diff --git a/.changes/2.0.0-preview-1.json b/.changes/2.0.0-preview-1.json index 1b7d507e5d9f..5fe894b47755 100644 --- a/.changes/2.0.0-preview-1.json +++ b/.changes/2.0.0-preview-1.json @@ -5,7 +5,7 @@ { "category": "AWS SDK for Java v2", "type": "feature", - "description": "Initial release of the AWS SDK for Java v2. See our [blog post](https://aws.amazon.com/blogs/developer/aws-sdk-for-java-2-0-developer-preview) for information about this new major veresion. This release is considered a developer preview and is not intended for production use cases." + "description": "Initial release of the AWS SDK for Java v2. See our [blog post](https://aws.amazon.com/blogs/developer/aws-sdk-for-java-2-0-developer-preview) for information about this new major version. This release is considered a developer preview and is not intended for production use cases." } ] } diff --git a/.changes/2.10.0.json b/.changes/2.10.0.json new file mode 100644 index 000000000000..ef32bdd3c4bf --- /dev/null +++ b/.changes/2.10.0.json @@ -0,0 +1,41 @@ +{ + "version": "2.10.0", + "date": "2019-10-24", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updating to use Jackson 2.10.0 and Netty 4.1.42.Final" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Adds support for the new family of Elastic Inference Accelerators (eia2) for SageMaker Hosting and Notebook Services" + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "This release adds support for the gRPC and HTTP/2 protocols." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release updates CreateFpgaImage to support tagging FPGA images on creation" + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "This release introduces Voice Connector PDX region and defaults previously created Voice Connectors to IAD. You can create Voice Connector Groups and add region specific Voice Connectors to direct telephony traffic across AWS regions in case of regional failures. With this release you can add phone numbers to Voice Connector Groups and can bulk move phone numbers between Voice Connectors, between Voice Connector and Voice Connector Groups and between Voice Connector Groups. Voice Connector now supports additional settings to enable SIP Log capture. This is in addition to the launch of Voice Connector Cloud Watch metrics in this release. This release also supports assigning outbound calling name (CNAM) to AWS account and individual phone numbers assigned to Voice Connectors. * Voice Connector now supports a setting to enable real time audio streaming delivered via Kinesis Audio streams. Please note that recording Amazon Chime Voice Connector calls with this feature maybe be subject to laws or regulations regarding the recording of telephone calls and other electronic communications. AWS Customer and their end users' have the responsibility to comply with all applicable laws regarding the recording, including properly notifying all participants in a recorded session or to a recorded communication that the session or communication is being recorded and obtain their consent." + }, + { + "type": "feature", + "category": "Amazon GameLift", + "description": "Amazon GameLift offers expanded hardware options for game hosting: Custom game builds can use the Amazon Linux 2 operating system, and fleets for both custom builds and Realtime servers can now use C5, M5, and R5 instance types." + }, + { + "type": "bugfix", + "category": "Netty NIO Http Client", + "description": "Fix a race condition where the channel is closed right after all content is buffered, causing `server failed to complete the response` error by adding a flag when `LastHttpContentHandler` is received." + } + ] +} diff --git a/.changes/2.10.1.json b/.changes/2.10.1.json new file mode 100644 index 000000000000..2ba7f6dff5d7 --- /dev/null +++ b/.changes/2.10.1.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.1", + "date": "2019-10-25", + "entries": [ + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "description": "With this release, Amazon Transcribe Streaming now supports audio sources in Australian English (en-AU)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.10.json b/.changes/2.10.10.json new file mode 100644 index 000000000000..30862214c3a5 --- /dev/null +++ b/.changes/2.10.10.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.10", + "date": "2019-11-06", + "entries": [ + { + "type": "feature", + "category": "AWS Budgets", + "description": "Documentation updates for budgets to track Savings Plans utilization and coverage" + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "This launch provides customers with access to Savings Plans management APIs." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "description": "EFS customers can select a lifecycle policy that automatically moves files that have not been accessed for 7 days into the EFS Infrequent Access (EFS IA) storage class. EFS IA provides price/performance that is cost-optimized for files that are not accessed every day." + }, + { + "type": "feature", + "category": "AWS Signer", + "description": "This release adds support for tagging code-signing profiles in AWS Signer." + }, + { + "type": "feature", + "category": "AWS Savings Plans", + "description": "This is the first release of Savings Plans, a new flexible pricing model that offers low prices on Amazon EC2 and AWS Fargate usage." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Add support for Build Number, Secrets Manager and Exported Environment Variables." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.11.json b/.changes/2.10.11.json new file mode 100644 index 000000000000..d00a706f5582 --- /dev/null +++ b/.changes/2.10.11.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.11", + "date": "2019-11-06", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Added the web identity credentials provider to the default credential chain" + }, + { + "type": "feature", + "category": "AWS Savings Plans", + "description": "This is the first release of Savings Plans, a new flexible pricing model that offers low prices on Amazon EC2 and AWS Fargate usage." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.12.json b/.changes/2.10.12.json new file mode 100644 index 000000000000..f5493dd810b4 --- /dev/null +++ b/.changes/2.10.12.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.12", + "date": "2019-11-07", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "AWS Systems Manager Session Manager target length increased to 400." + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "description": "Switch from setting the absolute URI in HTTP requests with no `Host` header to setting the absolute request path and query paramters and a `Host` header." + }, + { + "type": "feature", + "category": "AWS SSO OIDC", + "description": "This is an initial release of AWS Single Sign-On OAuth device code authorization service." + }, + { + "type": "feature", + "category": "AWS S3", + "description": "Added support for presignPutObject in S3Presigner." + }, + { + "type": "feature", + "category": "AWS Single Sign-On", + "description": "This is an initial release of AWS Single Sign-On (SSO) end-user access. This release adds support for accessing AWS accounts assigned in AWS SSO using short term credentials." + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "description": "This release adds new languages (ar, hi, ko, ja, zh, zh-TW) for Amazon Comprehend's DetectSentiment, DetectEntities, DetectKeyPhrases, BatchDetectSentiment, BatchDetectEntities and BatchDetectKeyPhrases APIs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.13.json b/.changes/2.10.13.json new file mode 100644 index 000000000000..962c28a5fa65 --- /dev/null +++ b/.changes/2.10.13.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.13", + "date": "2019-11-08", + "entries": [ + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This release contains ticket fixes for Amazon ECR." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity", + "description": "This release adds support for disabling classic flow." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.14.json b/.changes/2.10.14.json new file mode 100644 index 000000000000..ff057f494e75 --- /dev/null +++ b/.changes/2.10.14.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.14", + "date": "2019-11-11", + "entries": [ + { + "type": "feature", + "category": "Amazon Polly", + "description": "Add `PollyPresigner` which enables support for presigning `SynthesizeSpeech` requests." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "The Resource Import feature enables customers to import existing AWS resources into new or existing CloudFormation Stacks." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "This launch provides customers with access to GetCostAndUsageWithResources API." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.15.json b/.changes/2.10.15.json new file mode 100644 index 000000000000..d4b7f744262d --- /dev/null +++ b/.changes/2.10.15.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.15", + "date": "2019-11-12", + "entries": [ + { + "type": "feature", + "category": "AWS Marketplace Catalog Service", + "description": "This is the first release for the AWS Marketplace Catalog service which allows you to list, describe and manage change requests on your published entities on AWS Marketplace." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "Amazon DynamoDB enables you to restore your data to a new DynamoDB table using a point-in-time or on-demand backup. You now can modify the settings on the new restored table. Specifically, you can exclude some or all of the local and global secondary indexes from being created with the restored table. In addition, you can change the billing mode and provisioned capacity settings." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "With this release, Amazon Transcribe now supports transcriptions from audio sources in Welsh English (en-WL), Scottish English(en-AB), Irish English(en-IE), Farsi(fa-IR), Tamil(ta-IN), Indonesian(id-ID), Portuguese (pt-PT), Dutch(nl-NL)." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "You can configure your Application Load Balancer to either drop invalid header fields or forward them to targets." + }, + { + "type": "feature", + "category": "AWS CodePipeline", + "description": "AWS CodePipeline now supports the use of variables in action configuration." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.16.json b/.changes/2.10.16.json new file mode 100644 index 000000000000..3214de451465 --- /dev/null +++ b/.changes/2.10.16.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.16", + "date": "2019-11-13", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Email Service", + "description": "This is the first release of version 2 of the Amazon SES API. You can use this API to configure your Amazon SES account, and to send email. This API extends the functionality that exists in the previous version of the Amazon SES API." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "This release adds the custom fields definition support in the index definition for AWS IoT Fleet Indexing Service. Custom fields can be used as an aggregation field to run aggregations with both existing GetStatistics API and newly added GetCardinality, GetPercentiles APIs. GetStatistics will return all statistics (min/max/sum/avg/count...) with this release. For more information, please refer to our latest documentation: https://docs.aws.amazon.com/iot/latest/developerguide/iot-indexing.html" + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "You can now add tags to a lifecycle policy in Data Lifecycle Manager (DLM). Tags allow you to categorize your policies in different ways, such as by department, purpose or owner. You can also enable resource level permissions based on tags to set access control on ability to modify or delete a tagged policy." + }, + { + "type": "feature", + "category": "Amazon CloudSearch", + "description": "Amazon CloudSearch domains let you require that all traffic to the domain arrive over HTTPS. This security feature helps you block clients that send unencrypted requests to the domain." + }, + { + "type": "feature", + "category": "AWS Data Exchange", + "description": "Introducing AWS Data Exchange, a service that makes it easy for AWS customers to securely create, manage, access, and exchange data sets in the cloud." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.17.json b/.changes/2.10.17.json new file mode 100644 index 000000000000..d8b7ac2d3058 --- /dev/null +++ b/.changes/2.10.17.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.17", + "date": "2019-11-14", + "entries": [ + { + "type": "feature", + "category": "AWSMarketplace Metering", + "description": "Added CustomerNotEntitledException in MeterUsage API for Container use case." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "description": "This release enhances the existing user management APIs and adds 3 new APIs - TagResource, UntagResource, and ListTagsForResource to support tagging Amazon Connect users, which facilitates more granular access controls for Amazon Connect users within an Amazon Connect instance. You can learn more about the new APIs here: https://docs.aws.amazon.com/connect/latest/APIReference/Welcome.html." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Updates support for adding attachments to Systems Manager Automation documents" + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "This release adds a new setting at user pool client to prevent user existence related errors during authentication, confirmation, and password recovery related operations. This release also adds support to enable or disable specific authentication flows for a user pool client." + }, + { + "type": "feature", + "category": "Amazon Personalize", + "description": "Amazon Personalize: Adds ability to get batch recommendations by creating a batch inference job." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.18.json b/.changes/2.10.18.json new file mode 100644 index 000000000000..a8ea4c81e4af --- /dev/null +++ b/.changes/2.10.18.json @@ -0,0 +1,66 @@ +{ + "version": "2.10.18", + "date": "2019-11-15", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "description": "Documentation updates for logs" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This release updates AWS Systems Manager Parameter Store documentation for the enhanced search capability." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "You can now add tags while copying snapshots. Previously, a user had to first copy the snapshot and then add tags to the copied snapshot manually. Moving forward, you can specify the list of tags you wish to be applied to the copied snapshot as a parameter on the Copy Snapshot API." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "This release adds support for Chime Room Management APIs" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "When SdkException or one of its children is created without a 'message', inherit the message from the exception 'cause' (if any). This should reduce the chance of an exception being raised by the SDK with a null message." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "This release adds a new option in the User Pool to allow specifying sender's name in the emails sent by Amazon Cognito. This release also adds support to add SES Configuration Set to the emails sent by Amazon Cognito." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Added APIs to register your directories with Amazon WorkSpaces and to modify directory details." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Access to the cluster ARN makes it easier for you to author resource-level permissions policies in AWS Identity and Access Management. To simplify the process of obtaining the cluster ARN, Amazon EMR has added a new field containing the cluster ARN to all API responses that include the cluster ID." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for DolbyVision encoding, and SCTE35 & ESAM insertion to DASH ISO EMSG." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Introducing Amazon EKS managed node groups, a new feature that lets you easily provision worker nodes for Amazon EKS clusters and keep them up to date using the Amazon EKS management console, CLI, and APIs." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "Documentation-only change to the default value of the routing.http.drop_invalid_header_fields.enabled attribute." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "This release includes new operations related to findings export, including: CreatePublishingDestination, UpdatePublishingDestination, DescribePublishingDestination, DeletePublishingDestination and ListPublishingDestinations." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.19.json b/.changes/2.10.19.json new file mode 100644 index 000000000000..fc378ff7f1c8 --- /dev/null +++ b/.changes/2.10.19.json @@ -0,0 +1,51 @@ +{ + "version": "2.10.19", + "date": "2019-11-18", + "entries": [ + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "add EstimatedOnDemandCostWithCurrentCommitment to GetSavingsPlansPurchaseRecommendationRequest API" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "The release contains new API and API changes for AWS Systems Manager Explorer product." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Update default connectionMaxIdleTimeout of NettyNioAsyncClient to 5 seconds" + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Added support for S3 Replication for existing objects. This release allows customers who have requested and been granted access to replicate existing S3 objects across buckets." + }, + { + "type": "feature", + "category": "Amazon SageMaker Runtime", + "description": "Amazon SageMaker Runtime now supports a new TargetModel header to invoke a specific model hosted on multi model endpoints." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "This release introduces APIs for the CloudFormation Registry, a new service to submit and discover resource providers with which you can manage third-party resources natively in CloudFormation." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Documentation updates for rds" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Amazon SageMaker now supports multi-model endpoints to host multiple models on an endpoint using a single inference container." + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release of the Amazon Pinpoint API introduces support for using and managing message templates for messages that are sent through the voice channel. It also introduces support for specifying default values for message variables in message templates." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.2.json b/.changes/2.10.2.json new file mode 100644 index 000000000000..f80f518240d9 --- /dev/null +++ b/.changes/2.10.2.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.2", + "date": "2019-10-28", + "entries": [ + { + "type": "feature", + "category": "AWS Transfer for SFTP", + "description": "This release adds logical directories support to your AWS SFTP server endpoint, so you can now create logical directory structures mapped to Amazon Simple Storage Service (Amazon S3) bucket paths for users created and stored within the service. Amazon S3 bucket names and paths can now be hidden from AWS SFTP users, providing an additional level of privacy to meet security requirements. You can lock down your SFTP users' access to designated folders (commonly referred to as 'chroot'), and simplify complex folder structures for data distribution through SFTP without replicating files across multiple users." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This release of Amazon Elastic Container Registry Service (Amazon ECR) introduces support for image scanning. This identifies the software vulnerabilities in the container image based on the Common Vulnerabilities and Exposures (CVE) database." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache adds support for migrating Redis workloads hosted on Amazon EC2 into ElastiCache by syncing the data between the source Redis cluster and target ElastiCache for Redis cluster in real time. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/migrate-to-elasticache.html." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.20.json b/.changes/2.10.20.json new file mode 100644 index 000000000000..ed52fb1dd804 --- /dev/null +++ b/.changes/2.10.20.json @@ -0,0 +1,51 @@ +{ + "version": "2.10.20", + "date": "2019-11-19", + "entries": [ + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "IAM reports the timestamp when a role's credentials were last used to make an AWS request. This helps you identify unused roles and remove them confidently from your AWS accounts." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "As part of this release, we are extending the capability of AWS IoT Rules Engine to send messages directly to customer's own web services/applications. Customers can now create topic rules with HTTP actions to route messages from IoT Core directly to URL's that they own. Ownership is proved by creating and confirming topic rule destinations." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Add support for ARM and GPU-enhanced build environments and a new SSD-backed Linux compute type with additional CPU and memory in CodeBuild" + }, + { + "type": "feature", + "category": "AWS Config", + "description": "AWSConfig launches support for conformance packs. A conformance pack is a new resource type that allows you to package a collection of Config rules and remediation actions into a single entity. You can create and deploy conformance packs into your account or across all accounts in your organization" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for RunInstances to specify the metadata options for new instances; adds a new API, ModifyInstanceMetadataOptions, which lets you modify the metadata options for a running or stopped instance; and adds support for CreateCustomerGateway to specify a device name." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "This release allows forward actions on Application Load Balancers to route requests to multiple target groups, based on the weight you specify for each target group." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "This release provides three new runtimes to support Node.js 12 (initially 12.13.0), Python 3.8 and Java 11." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "This release of AWS CloudFormation StackSets enables users to detect drift on a stack set and the stack instances that belong to that stack set." + }, + { + "type": "feature", + "category": "Auto Scaling", + "description": "Amazon EC2 Auto Scaling now supports Instance Weighting and Max Instance Lifetime. Instance Weighting allows specifying the capacity units for each instance type included in the MixedInstancesPolicy and how they would contribute to your application's performance. Max Instance Lifetime allows specifying the maximum length of time that an instance can be in service. If any instances are approaching this limit, Amazon EC2 Auto Scaling gradually replaces them." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.21.json b/.changes/2.10.21.json new file mode 100644 index 000000000000..b4c55737c270 --- /dev/null +++ b/.changes/2.10.21.json @@ -0,0 +1,96 @@ +{ + "version": "2.10.21", + "date": "2019-11-20", + "entries": [ + { + "type": "feature", + "category": "AWS Application Discovery Service", + "description": "New exception type for use with Migration Hub home region" + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "Added support for CPU and memory task-level overrides on the RunTask and StartTask APIs. Added location information to Tasks." + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "DLM now supports Fast Snapshot Restore. You can enable Fast Restore on snapshots created by DLM, provide the AZs and the number of snapshots to be enabled with this capability." + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "description": "With this release, Amazon Kinesis Data Firehose allows server side encryption with customer managed CMKs. Customer managed CMKs ( \"Customer Master Keys\") are AWS Key Management Service (KMS) keys that are fully managed by the customer. With customer managed CMKs, customers can establish and maintain their key policies, IAM policies, rotating policies and add tags. For more information about AWS KMS and CMKs, please refer to: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html. Please refer to the following link to create CMKs: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html" + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "This release introduces support for Amazon S3 Replication Time Control, a new feature of S3 Replication that provides a predictable replication time backed by a Service Level Agreement. S3 Replication Time Control helps customers meet compliance or business requirements for data replication, and provides visibility into the replication process with new Amazon CloudWatch Metrics." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release of Amazon Elastic Compute Cloud (Amazon EC2) introduces support for Amazon Elastic Block Store (Amazon EBS) fast snapshot restores." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "description": "1. This release adds two new APIs, GetInsightSelectors and PutInsightSelectors, which let you configure CloudTrail Insights event delivery on a trail. An Insights event is a new type of event that is generated when CloudTrail detects unusual activity in your AWS account. In this release, only \"ApiCallRateInsight\" is a supported Insights event type. 2. This release also adds the new \"ExcludeManagementEventSource\" option to the existing PutEventSelectors API. This field currently supports only AWS Key Management Services." + }, + { + "type": "feature", + "category": "AWS Migration Hub", + "description": "New exception type for use with Migration Hub home region" + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Amazon QuickSight now supports programmatic creation and management of data sources, data sets, dashboards and templates with new APIs. Templates hold dashboard metadata, and can be used to create copies connected to the same or different dataset as required. Also included in this release are APIs for SPICE ingestions, fine-grained access control over AWS resources using AWS Identity and Access Management (IAM) policies, as well AWS tagging. APIs are supported for both Standard and Enterprise Edition, with edition-specific support for specific functionality." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "The new DescribeAvailabilityMonitorTest API provides the results of the most recent High Availability monitoring test. The new StartAvailabilityMonitorTest API verifies the storage gateway is configured for High Availability monitoring. The new ActiveDirectoryStatus response element has been added to the DescribeSMBSettings and JoinDomain APIs to indicate the status of the gateway after the most recent JoinDomain operation. The new TimeoutInSeconds parameter of the JoinDomain API allows for the configuration of the timeout in which the JoinDomain operation must complete." + }, + { + "type": "feature", + "category": "AWS Elemental MediaStore", + "description": "This release fixes a broken link in the SDK documentation." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Adds APIs to create and manage meeting session resources for the Amazon Chime SDK" + }, + { + "type": "feature", + "category": "AWS Migration Hub Config", + "description": "AWS Migration Hub Config Service allows you to get and set the Migration Hub home region for use with AWS Migration Hub and Application Discovery Service" + }, + { + "type": "feature", + "category": "AWS DataSync", + "description": "Update to configure task to run periodically on a schedule" + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "With this release Amazon Transcribe enables alternative transcriptions so that you can see different interpretations of transcribed audio." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "Announcing a Multi-AZ deployment type for Amazon FSx for Windows File Server, providing fully-managed Windows file storage with high availability and redundancy across multiple AWS Availability Zones." + }, + { + "type": "feature", + "category": "AWS CodeCommit", + "description": "This release adds support for creating pull request approval rules and pull request approval rule templates in AWS CodeCommit. This allows developers to block merges of pull requests, contingent on the approval rules being satisfiied." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Upgrades to Instance Metadata Service version 2 (IMDS v2). With IMDS v2, a session token is used to make requests for EC2 instance metadata and credentials." + } + ] +} diff --git a/.changes/2.10.22.json b/.changes/2.10.22.json new file mode 100644 index 000000000000..0117625e976a --- /dev/null +++ b/.changes/2.10.22.json @@ -0,0 +1,71 @@ +{ + "version": "2.10.22", + "date": "2019-11-21", + "entries": [ + { + "type": "feature", + "category": "AWS AppSync", + "description": "AppSync: AWS AppSync now supports the ability to add, configure, and maintain caching for your AWS AppSync GraphQL API." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "description": "This release adds a new API: StartChatContact. You can use it to programmatically start a chat on the specified Amazon Connect instance. Learn more here: https://docs.aws.amazon.com/connect/latest/APIReference/Welcome.html" + }, + { + "type": "feature", + "category": "Amazon Connect Participant Service", + "description": "This release adds 5 new APIs: CreateParticipantConnection, DisconnectParticipant, GetTranscript, SendEvent, and SendMessage. For Amazon Connect chat, you can use them to programmatically perform participant actions on the configured Amazon Connect instance. Learn more here: https://docs.aws.amazon.com/connect-participant/latest/APIReference/Welcome.html" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "With this release, you can convert an existing Amazon DynamoDB table to a global table by adding replicas in other AWS Regions." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "Amazon Lex now supports Sentiment Analysis" + }, + { + "type": "feature", + "category": "Amazon Lex Runtime Service", + "description": "Amazon Lex now supports Sentiment Analysis" + }, + { + "type": "feature", + "category": "AWSMarketplace Metering", + "description": "Documentation updates for the AWS Marketplace Metering Service." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "The release contains new API and API changes for AWS Systems Manager Explorer product." + }, + { + "type": "feature", + "category": "AWS Config", + "description": "AWS Config launches Custom Configuration Items. A new feature which allows customers to publish resource configuration for third-party resources, custom, or on-premises servers." + }, + { + "type": "feature", + "category": "AWS Amplify", + "description": "This release of AWS Amplify Console introduces support for backend environments. Backend environments are containers for AWS deployments. Each environment is a collection of AWS resources." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "With this release, Amazon Transcribe now supports transcriptions from audio sources in Hebrew (he-IL), Swiss German (de-CH), Japanese (ja-JP), Turkish (tr-TR), Arabic-Gulf (ar-AE), Malay (ms-MY), Telugu (te-IN)" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for attaching AWS License Manager Configurations to Amazon Machine Image (AMI) using ImportImage API; and adds support for running different instance sizes on EC2 Dedicated Hosts" + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "This release adds support for Glue 1.0 compatible ML Transforms." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.23.json b/.changes/2.10.23.json new file mode 100644 index 000000000000..b80453b65e8b --- /dev/null +++ b/.changes/2.10.23.json @@ -0,0 +1,71 @@ +{ + "version": "2.10.23", + "date": "2019-11-22", + "entries": [ + { + "type": "feature", + "category": "AWS Certificate Manager", + "description": "This release adds support for Tag-Based IAM for AWS Certificate Manager and adding tags to certificates upon creation." + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "description": "Added documentation for the dead-letter queue feature." + }, + { + "type": "feature", + "category": "Amazon Forecast Service", + "description": "This release adds two key updates to existing APIs. 1. Amazon Forecast can now generate forecasts in any quantile using the optional parameter forecastTypes in the CreateForecast API and 2. You can get additional details (metrics and relevant error messages) on your AutoML runs using the DescribePredictor and GetAccuracyMetrics APIs." + }, + { + "type": "feature", + "category": "AWS Auto Scaling Plans", + "description": "Update default endpoint for AWS Auto Scaling." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "Update default endpoint for Application Auto Scaling." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR adds support for concurrent step execution and cancelling running steps. Amazon EMR has added a new Outpost ARN field in the ListCluster and DescribeCluster API responses that is populated for clusters launched in an AWS Outpost subnet." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds two new APIs (DescribeInstanceTypes and DescribeInstanceTypeOfferings) that give customers access to instance type attributes and regional and zonal offerings." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage VOD", + "description": "Includes the submission time of Asset ingestion request in the API response for Create/List/Describe Assets." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "The ProcessCredentialsProvider now supports credential files up to 64 KB by default through an increase of the processOutputLimit from 1024 bytes to 64000 bytes." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Add Canonical ARN to LogsLocation." + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "description": "Support tagging for STS sessions and tag based access control for the STS APIs" + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This release adds enhanced face filtering support to the IndexFaces API operation, and introduces face filtering for CompareFaces and SearchFacesByImage API operations." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Add RebootOption and LastNoRebootInstallOperationTime for DescribeInstancePatchStates and DescribeInstancePatchStatesForPatchGroup API" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.24.json b/.changes/2.10.24.json new file mode 100644 index 000000000000..ea4f4eafb49f --- /dev/null +++ b/.changes/2.10.24.json @@ -0,0 +1,151 @@ +{ + "version": "2.10.24", + "date": "2019-11-25", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaPackage VOD", + "description": "Adds a domain name to PackagingGroups, representing the fully qualified domain name for Assets created in the group." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "This launch provides customers with access to Cost Category Public Beta APIs." + }, + { + "type": "feature", + "category": "AWS Resource Access Manager", + "description": "AWS RAM provides new APIs to view the permissions granted to principals in a resource share. This release also creates corresponding resource shares for supported services that use resource policies, as well as an API to promote them to standard shares that can be managed in RAM." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "This release adds: 1) APIs for fleet provisioning claim and template, 2) endpoint configuration and custom domains, 3) support for enhanced custom authentication, d) support for 4 additional audit checks: Device and CA certificate key quality checks, IoT role alias over-permissive check and IoT role alias access to unused services check, 5) extended capability of AWS IoT Rules Engine to support IoT SiteWise rule action. The IoT SiteWise rule action lets you send messages from IoT sensors and applications to IoT SiteWise asset properties" + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "This release of Elastic Load Balancing V2 adds new subnet features for Network Load Balancers and a new routing algorithm for Application Load Balancers." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "CodeBuild adds support for test reporting" + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for 8K outputs and support for QuickTime Animation Codec (RLE) inputs." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports the ability to create a multiple program transport stream (MPTS)." + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "description": "Amazon Comprehend now supports real-time analysis with Custom Classification" + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Amazon Cognito Userpools now supports Sign in with Apple as an Identity Provider." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Added the function state and update status to the output of GetFunctionConfiguration and other actions. Check the state information to ensure that a function is ready before you perform operations on it. Functions take time to become ready when you connect them to a VPC.Added the EventInvokeConfig type and operations to configure error handling options for asynchronous invocation. Use PutFunctionEventInvokeConfig to configure the number of retries and the maximum age of events when you invoke the function asynchronously.Added on-failure and on-success destination settings for asynchronous invocation. Configure destinations to send an invocation record to an SNS topic, an SQS queue, an EventBridge event bus, or a Lambda function.Added error handling options to event source mappings. This enables you to configure the number of retries, configure the maximum age of records, or retry with smaller batches when an error occurs when a function processes a Kinesis or DynamoDB stream.Added the on-failure destination setting to event source mappings. This enables you to send discarded events to an SNS topic or SQS queue when all retries fail or when the maximum record age is exceeded when a function processes a Kinesis or DynamoDB stream.Added the ParallelizationFactor option to event source mappings to increase concurrency per shard when a function processes a Kinesis or DynamoDB stream." + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "You can now set time based retention policies on Data Lifecycle Manager. With this launch, DLM allows you to set snapshot retention period in the following interval units: days, weeks, months and years." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "AWS Systems Manager Documents now supports more Document Types: ApplicationConfiguration, ApplicationConfigurationSchema and DeploymentStrategy. This release also extends Document Permissions capabilities and introduces a new Force flag for DeleteDocument API." + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "description": "AWS Key Management Service (KMS) now enables creation and use of asymmetric Customer Master Keys (CMKs) and the generation of asymmetric data key pairs." + }, + { + "type": "feature", + "category": "AWS IoT Secure Tunneling", + "description": "This release adds support for IoT Secure Tunneling to remote access devices behind restricted firewalls." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Cluster Endpoints can now be tagged by using --tags in the create-db-cluster-endpoint API" + }, + { + "type": "feature", + "category": "Amazon Athena", + "description": "This release adds additional query lifecycle metrics to the QueryExecutionStatistics object in GetQueryExecution response." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "description": "This release includes support for automatically suppressing email addresses that result in hard bounce or complaint events at the account level, and for managing addresses on this account-level suppression list." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "This release supports auto scaling of document classifier endpoints for Comprehend; and supports target tracking based on the average capacity utilization metric for AppStream 2.0 fleets." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "description": "This release introduces new set of APIs (\"wafv2\") for AWS WAF. Major changes include single set of APIs for creating/updating resources in global and regional scope, and rules are configured directly into web ACL instead of being referenced. The previous APIs (\"waf\" and \"waf-regional\") are now referred as AWS WAF Classic. For more information visit: https://docs.aws.amazon.com/waf/latest/APIReference/Welcome.html" + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "Introducing AWS AppConfig, a new service that enables customers to quickly deploy validated configurations to applications of any size in a controlled and monitored fashion." + }, + { + "type": "feature", + "category": "Amazon Lex Runtime Service", + "description": "Amazon Lex adds \"sessionId\" attribute to the PostText and PostContent response." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "description": "CloudWatch Application Insights for .NET and SQL Server includes the follwing features: -Tagging Create and manage tags for your applications.-Custom log pattern matching. Define custom log patterns to be detected and monitored.-Resource-level permissions. Specify applications users can access." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds two new APIs: 1. ModifyDefaultCreditSpecification, which allows you to set default credit specification at the account level per AWS Region, per burstable performance instance family, so that all new burstable performance instances in the account launch using the new default credit specification. 2. GetDefaultCreditSpecification, which allows you to get current default credit specification per AWS Region, per burstable performance instance family. This release also adds new client exceptions for StartInstances and StopInstances." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "description": "This release adds a new feature called \"Contributor Insights\". \"Contributor Insights\" supports the following 6 new APIs (PutInsightRule, DeleteInsightRules, EnableInsightRules, DisableInsightRules, DescribeInsightRules and GetInsightRuleReport)." + }, + { + "type": "feature", + "category": "Amazon Kinesis Analytics", + "description": "Kinesis Data Analytics service adds support to configure Java applications to access resources in a VPC. Also releasing support to configure Java applications to set allowNonRestoreState flag through the service APIs." + }, + { + "type": "feature", + "category": "Alexa For Business", + "description": "API update for Alexa for Business: This update enables the use of meeting room configuration that can be applied to a room profile. These settings help improve and measure utilization on Alexa for Business enabled rooms. New features include end meeting reminders, intelligent room release and room utilization analytics report." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "This release contains changes for 1. Redshift Scheduler 2. Update to the DescribeNodeConfigurationOptions to include a new action type recommend-node-config" + }, + { + "type": "feature", + "category": "AWS Greengrass", + "description": "IoT Greengrass supports machine learning resources in 'No container' mode." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.25.json b/.changes/2.10.25.json new file mode 100644 index 000000000000..e5890b50688f --- /dev/null +++ b/.changes/2.10.25.json @@ -0,0 +1,71 @@ +{ + "version": "2.10.25", + "date": "2019-11-26", + "entries": [ + { + "type": "feature", + "category": "AWS Directory Service", + "description": "This release will introduce optional encryption over LDAP network traffic using SSL certificates between customer's self-managed AD and AWS Directory Services instances. The release also provides APIs for Certificate management." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "Introduces the DescribeEffectivePolicy action, which returns the contents of the policy that's in effect for the account." + }, + { + "type": "feature", + "category": "AWS RDS DataService", + "description": "Type hints to improve handling of some specific parameter types (date/time, decimal etc) for ExecuteStatement and BatchExecuteStatement APIs" + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "You can use tag policies to help standardize on tags across your organization's resources." + }, + { + "type": "feature", + "category": "AWSServerlessApplicationRepository", + "description": "AWS Serverless Application Repository now supports verified authors. Verified means that AWS has made a good faith review, as a reasonable and prudent service provider, of the information provided by the requester and has confirmed that the requester's identity is as claimed." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "For the WorkspaceBundle API, added the image identifier and the time of the last update." + }, + { + "type": "bugfix", + "category": "AWS Kinesis", + "description": "Reducing default read timeout and write timeout to 10 seconds for Kinesis client." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "This release adds a new setting for a user pool to configure which recovery methods a user can use to recover their account via the forgot password operation." + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "description": "Detect unhealthy http2 connections when read or write times out by sending PING frames" + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "description": "AWS Elemental MediaTailor SDK now allows configuration of the Live Pre-Roll feature for HLS and DASH streams." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "1) Amazon Contributor Insights for Amazon DynamoDB is a diagnostic tool for identifying frequently accessed keys and understanding database traffic trends. 2) Support for displaying new fields when a table's encryption state is Inaccessible or the table have been Archived." + }, + { + "type": "feature", + "category": "Amazon Elastic Inference", + "description": "Amazon Elastic Inference allows customers to attach Elastic Inference Accelerators to Amazon EC2 and Amazon ECS tasks, thus providing low-cost GPU-powered acceleration and reducing the cost of running deep learning inference. This release allows customers to add or remove tags for their Elastic Inference Accelerators." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Documentation updates for QuickSight" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.26.json b/.changes/2.10.26.json new file mode 100644 index 000000000000..defe9cc2190b --- /dev/null +++ b/.changes/2.10.26.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.26", + "date": "2019-12-02", + "entries": [ + { + "type": "feature", + "category": "Schemas", + "description": "This release introduces support for Amazon EventBridge schema registry, making it easy to discover and write code for events in EventBridge." + }, + { + "type": "feature", + "category": "AWS License Manager", + "description": "AWS License Manager now automates discovery of bring-your-own-license usage across the customers organization. With few simple settings, customers can add bring your own license product information along with licensing rules, which would enable License Manager to automatically track the instances that have the specified products installed. If License Manager detects any violation of licensing rules, it would notify the customers designated license administrator to take corrective action." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Write operations (put, get, delete) now support 'conditionExpression'" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "AWS now provides a new BYOL experience for software licenses, such as Windows and SQL Server, that require a dedicated physical server. You can now enjoy the flexibility and cost effectiveness of using your own licenses on Amazon EC2 Dedicated Hosts, but with the simplicity, resiliency, and elasticity of AWS. You can specify your Dedicated Host management preferences, such as host allocation, host capacity utilization, and instance placement in AWS License Manager. Once set up, AWS takes care of these administrative tasks on your behalf, so that you can seamlessly launch virtual machines (instances) on Dedicated Hosts just like you would launch an EC2 instance with AWS provided licenses." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "This is the first release of EC2 Image Builder, a service that provides a managed experience for automating the creation of EC2 AMIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.27.json b/.changes/2.10.27.json new file mode 100644 index 000000000000..735404abfa66 --- /dev/null +++ b/.changes/2.10.27.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.27", + "date": "2019-12-02", + "entries": [ + { + "type": "feature", + "category": "Access Analyzer", + "description": "Introducing AWS IAM Access Analyzer, an IAM feature that makes it easy for AWS customers to ensure that their resource-based policies provide only the intended access to resources outside their AWS accounts." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.28.json b/.changes/2.10.28.json new file mode 100644 index 000000000000..7b8732bc8899 --- /dev/null +++ b/.changes/2.10.28.json @@ -0,0 +1,81 @@ +{ + "version": "2.10.28", + "date": "2019-12-03", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Introducing Amazon EKS with Fargate. Customers can now use Amazon EKS to launch pods directly onto AWS Fargate, the serverless compute engine built for containers on AWS." + }, + { + "type": "feature", + "category": "AWS Network Manager", + "description": "This is the initial SDK release for AWS Network Manager." + }, + { + "type": "feature", + "category": "Amazon Fraud Detector", + "description": "Amazon Fraud Detector is a fully managed service that makes it easy to identify potentially fraudulent online activities such as online payment fraud and the creation of fake accounts. Amazon Fraud Detector uses your data, machine learning (ML), and more than 20 years of fraud detection expertise from Amazon to automatically identify potentially fraudulent online activity so you can catch more fraud faster." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "description": "Initial release of AWS Compute Optimizer. AWS Compute Optimizer recommends optimal AWS Compute resources to reduce costs and improve performance for your workloads." + }, + { + "type": "feature", + "category": "Amazon Textract", + "description": "This SDK Release introduces Amazon Augmented AI support for Amazon Textract AnalyzeDocument API. Image byte payloads for synchronous operations have increased from 5 MB to 10 MB." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "description": "Amazon S3 Access Points is a new S3 feature that simplifies managing data access at scale for shared data sets on Amazon S3. Access Points provide a customizable way to access the objects in a bucket, with a unique hostname and access policy that enforces the specific permissions and network controls for any request made through the access point. This represents a new way of provisioning access to shared data sets." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release supports ECS Capacity Providers, Fargate Spot, and ECS Cluster Auto Scaling. These features enable new ways for ECS to manage compute capacity used by tasks." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "It is a preview launch of Amazon Kendra. Amazon Kendra is a managed, highly accurate and easy to use enterprise search service that is powered by machine learning." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "UltraWarm storage provides a cost-effective way to store large amounts of read-only data on Amazon Elasticsearch Service. Rather than attached storage, UltraWarm nodes use Amazon S3 and a sophisticated caching solution to improve performance. For indices that you are not actively writing to and query less frequently, UltraWarm storage offers significantly lower costs per GiB. In Elasticsearch, these warm indices behave just like any other index. You can query them using the same APIs or use them to create dashboards in Kibana." + }, + { + "type": "feature", + "category": "AWS Outposts", + "description": "This is the initial release for AWS Outposts, a fully managed service that extends AWS infrastructure, services, APIs, and tools to customer sites. AWS Outposts enables you to launch and run EC2 instances and EBS volumes locally at your on-premises location. This release introduces new APIs for creating and viewing Outposts." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Amazon S3 Access Points is a new S3 feature that simplifies managing data access at scale for shared data sets on Amazon S3. Access Points provide a customizable way to access the objects in a bucket, with a unique hostname and access policy that enforces the specific permissions and network controls for any request made through the access point. This represents a new way of provisioning access to shared data sets." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Profiler", + "description": "(New Service) Amazon CodeGuru Profiler analyzes application CPU utilization and latency characteristics to show you where you are spending the most cycles in your application. This analysis is presented in an interactive flame graph that helps you easily understand which paths consume the most resources, verify that your application is performing as expected, and uncover areas that can be optimized further." + }, + { + "type": "feature", + "category": "Amazon Augmented AI Runtime", + "description": "This release adds support for Amazon Augmented AI, which makes it easy to build workflows for human review of machine learning predictions." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "This is the preview release of Amazon CodeGuru Reviewer." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for the following features: 1. An option to enable acceleration for Site-to-Site VPN connections; 2. Inf1 instances featuring up to 16 AWS Inferentia chips; 3. The ability to associate route tables with internet gateways and virtual private gateways; 4. AWS Local Zones that place compute, storage, database, and other select services; 5. Launching and viewing EC2 instances and EBS volumes running locally in Outposts; 6. Peering Transit Gateways between regions simplifying creation of secure and private global networks on AWS; 7. Transit Gateway Multicast, enabling multicast routing within and between VPCs using Transit Gateway as a multicast router." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.29.json b/.changes/2.10.29.json new file mode 100644 index 000000000000..2be5a2908220 --- /dev/null +++ b/.changes/2.10.29.json @@ -0,0 +1,46 @@ +{ + "version": "2.10.29", + "date": "2019-12-03", + "entries": [ + { + "type": "feature", + "category": "AWS Step Functions", + "description": "This release of the AWS Step Functions SDK introduces support for Express Workflows." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds support for the Amazon RDS Proxy" + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This SDK Release introduces APIs for Amazon Rekognition Custom Labels feature (CreateProjects, CreateProjectVersion,DescribeProjects, DescribeProjectVersions, StartProjectVersion, StopProjectVersion and DetectCustomLabels). Also new is AugmentedAI (Human In The Loop) Support for DetectModerationLabels in Amazon Rekognition." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "You can now use SageMaker Autopilot for automatically training and tuning candidate models using a combination of various feature engineering, ML algorithms, and hyperparameters determined from the user's input data. SageMaker Automatic Model Tuning now supports tuning across multiple algorithms. With Amazon SageMaker Experiments users can create Experiments, ExperimentTrials, and ExperimentTrialComponents to track, organize, and evaluate their ML training jobs. With Amazon SageMaker Debugger, users can easily debug training jobs using a number of pre-built rules provided by Amazon SageMaker, or build custom rules. With Amazon SageMaker Processing, users can run on-demand, distributed, and fully managed jobs for data pre- or post- processing or model evaluation. With Amazon SageMaker Model Monitor, a user can create MonitoringSchedules to automatically monitor endpoints to detect data drift and other issues and get alerted on them. This release also includes the preview version of Amazon SageMaker Studio with Domains, UserProfiles, and Apps. This release also includes the preview version of Amazon Augmented AI to easily implement human review of machine learning predictions by creating FlowDefinitions, HumanTaskUis, and HumanLoops." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "- Added the ProvisionedConcurrency type and operations. Allocate provisioned concurrency to enable your function to scale up without fluctuations in latency. Use PutProvisionedConcurrencyConfig to configure provisioned concurrency on a version of a function, or on an alias." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "This release supports auto scaling of provisioned concurrency for AWS Lambda." + }, + { + "type": "feature", + "category": "Amazon Elastic Block Store", + "description": "This release introduces the EBS direct APIs for Snapshots: 1. ListSnapshotBlocks, which lists the block indexes and block tokens for blocks in an Amazon EBS snapshot. 2. ListChangedBlocks, which lists the block indexes and block tokens for blocks that are different between two snapshots of the same volume/snapshot lineage. 3. GetSnapshotBlock, which returns the data in a block of an Amazon EBS snapshot." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Interacting with an access point in a different region to the one the S3 client is configured for will no longer result in the request being signed for the wrong region and rejected by S3." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.3.json b/.changes/2.10.3.json new file mode 100644 index 000000000000..4e2011dd54c2 --- /dev/null +++ b/.changes/2.10.3.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.3", + "date": "2019-10-28", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Adding support in SelectObjectContent for scanning a portion of an object specified by a scan range." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.30.json b/.changes/2.10.30.json new file mode 100644 index 000000000000..72427a8b0f5e --- /dev/null +++ b/.changes/2.10.30.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.30", + "date": "2019-12-04", + "entries": [ + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "description": "Amazon API Gateway now supports HTTP APIs (beta), enabling customers to quickly build high performance RESTful APIs that are up to 71% cheaper than REST APIs also available from API Gateway. HTTP APIs are optimized for building APIs that proxy to AWS Lambda functions or HTTP backends, making them ideal for serverless workloads. Using HTTP APIs, you can secure your APIs using OIDC and OAuth 2 out of box, quickly build web applications using a simple CORS experience, and get started immediately with automatic deployment and simple create workflows." + }, + { + "type": "feature", + "category": "Amazon Kinesis Video Signaling Channels", + "description": "Announcing support for WebRTC in Kinesis Video Streams, as fully managed capability. You can now use simple APIs to enable your connected devices, web, and mobile apps with real-time two-way media streaming capabilities." + }, + { + "type": "feature", + "category": "Amazon Kinesis Video Streams", + "description": "Introduces management of signaling channels for Kinesis Video Streams." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Fixed an issue where receiving a GOAWAY that would cause the closing of all streams could cause all outstanding streams to be completed successfully instead of exceptionally." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Fixed an issue where closing the last stream on a connection that had been closed or received a GOAWAY did not close the connection." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.31.json b/.changes/2.10.31.json new file mode 100644 index 000000000000..f2a27b9ed723 --- /dev/null +++ b/.changes/2.10.31.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.31", + "date": "2019-12-09", + "entries": [ + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "AWS MSK has added support for Open Monitoring with Prometheus." + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "description": "The Verify operation now returns KMSInvalidSignatureException on invalid signatures. The Sign and Verify operations now return KMSInvalidStateException when a request is made against a CMK pending deletion." + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "description": "Close HTTP/2 connections if they have had 0 streams for 5 seconds. This can be disabled using `useIdleConnectionReaper(false)` or have the time period adjusted using `connectionMaxIdleTime(...)` on the `NettyNioAsyncHttpClient.Builder`." + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "description": "Periodically ping HTTP/2 connections and close them if the service does not respond. The ping periodicity and timeout time is not currently configurable." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Adds the SSM GetCalendarState API and ChangeCalendar SSM Document type. These features enable the forthcoming Systems Manager Change Calendar feature, which will allow you to schedule events during which actions should (or should not) be performed." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Documentation updates for QuickSight" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.32.json b/.changes/2.10.32.json new file mode 100644 index 000000000000..937ef7cab10f --- /dev/null +++ b/.changes/2.10.32.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.32", + "date": "2019-12-10", + "entries": [ + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "1. Adding DocumentTitleFieldName as an optional configuration for SharePoint. 2. updating s3 object pattern to support all s3 keys." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.33.json b/.changes/2.10.33.json new file mode 100644 index 000000000000..d52799f34216 --- /dev/null +++ b/.changes/2.10.33.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.33", + "date": "2019-12-11", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release allows customers to attach multiple Elastic Inference Accelerators to a single EC2 instance. It adds support for a Count parameter for each Elastic Inference Accelerator type you specify on the RunInstances and LaunchTemplate APIs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Adds a `has*` method to requests and responses that have a List or Map property." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.34.json b/.changes/2.10.34.json new file mode 100644 index 000000000000..240cb63b85cf --- /dev/null +++ b/.changes/2.10.34.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.34", + "date": "2019-12-12", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixing exception using `RequestBody.fromInputStream` on non-resettable `InputStreams` by making `reset` conditional on `markSupported`. See [#1544](https://github.com/aws/aws-sdk-java-v2/issues/1544) / [#1545](https://github.com/aws/aws-sdk-java-v2/issues/1545)" + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "This release includes improvements and fixes bugs for the IAM Access Analyzer feature." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.35.json b/.changes/2.10.35.json new file mode 100644 index 000000000000..2b511b0bb05e --- /dev/null +++ b/.changes/2.10.35.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.35", + "date": "2019-12-13", + "entries": [ + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "CodeBuild adds support for cross account" + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "description": "Added the ability to use your own public-private key pair to configure DKIM authentication for a domain identity." + }, + { + "type": "feature", + "category": "Amazon Detective", + "description": "This is the initial release of Amazon Detective." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.36.json b/.changes/2.10.36.json new file mode 100644 index 000000000000..81f1306f41f6 --- /dev/null +++ b/.changes/2.10.36.json @@ -0,0 +1,26 @@ +{ + "version": "2.10.36", + "date": "2019-12-16", + "entries": [ + { + "type": "feature", + "category": "AmazonMQ", + "description": "Amazon MQ now supports throughput-optimized message brokers, backed by Amazon EBS." + }, + { + "type": "feature", + "category": "AWS Comprehend Medical", + "description": "New Ontology linking APIs will provides medication concepts normalization and Diagnoses codes from input text. In this release we will provide two APIs - RxNorm and ICD10-CM." + }, + { + "type": "feature", + "category": "Amazon S3", + "description": "CopyObjectRequest now has `destinationBucket` and `destinationKey` properties for clarity.\nThe existing names, `bucket` and `key`, are deprecated." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "You can now configure your EC2 Fleet to preferentially use EC2 Capacity Reservations for launching On-Demand instances, enabling you to fully utilize the available (and unused) Capacity Reservations before launching On-Demand instances on net new capacity." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.37.json b/.changes/2.10.37.json new file mode 100644 index 000000000000..108eb477483d --- /dev/null +++ b/.changes/2.10.37.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.37", + "date": "2019-12-17", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Added support for Cloud Watch Output and Document Version to the Run Command tasks in Maintenance Windows." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "Added a new Over-the-Air (OTA) Update feature that allows you to use different, or multiple, protocols to transfer an image from the AWS cloud to IoT devices." + }, + { + "type": "feature", + "category": "Amazon Kinesis Analytics", + "description": "Kinesis Data Analytics service now supports running Java applications using Flink 1.8." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports HLS ID3 segment tagging, HLS redundant manifests for CDNs that support different publishing/viewing endpoints, fragmented MP4 (fMP4), and frame capture intervals specified in milliseconds." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "Documentation updates for Amazon ECS." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Documentation updates for Amazon EC2" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.38.json b/.changes/2.10.38.json new file mode 100644 index 000000000000..56d491584d3e --- /dev/null +++ b/.changes/2.10.38.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.38", + "date": "2019-12-18", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Updates Amazon S3 endpoints allowing you to configure your client to opt-in to using S3 with the us-east-1 regional endpoint, instead of global." + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "Documentation updates for resourcegroupstaggingapi" + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "AWS OpsWorks CM now supports tagging, and tag-based access control, of servers and backups." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release introduces the ability to tag Elastic Graphics accelerators. You can use tags to organize and identify your accelerators for cost allocation." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Documentation updates for CloudFront" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.39.json b/.changes/2.10.39.json new file mode 100644 index 000000000000..ea5f3af373e6 --- /dev/null +++ b/.changes/2.10.39.json @@ -0,0 +1,56 @@ +{ + "version": "2.10.39", + "date": "2019-12-19", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "We are updating the supportedRootDevices field to supportedRootDeviceTypes for DescribeInstanceTypes API to ensure that the actual value is returned, correcting a previous error in the model." + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "You can now copy snapshots across regions using Data Lifecycle Manager (DLM). You can enable policies which, along with create, can now also copy snapshots to one or more AWS region(s). Copies can be scheduled for up to three regions from a single policy and retention periods are set for each region separately." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This release allows customers to add tags to Automation execution, enabling them to sort and filter executions in different ways, such as by resource, purpose, owner, or environment." + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "description": "Add context map to get-recommendations and get-personalized-ranking request objects to provide contextual metadata at inference time" + }, + { + "type": "feature", + "category": "Amazon GameLift", + "description": "Amazon GameLift now supports ARNs for all key GameLift resources, tagging for GameLift resource authorization management, and updated documentation that articulates GameLift's resource authorization strategy." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "Amazon Lex now supports conversation logs and slot obfuscation." + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "description": "`SETTINGS_INITIAL_WINDOW_SIZE` is now configurable on HTTP/2 connections opened by the Netty client using `Http2Configuration#initialWindowSize(Integer)` along with `NettyNioAsyncHttpClient.Builder#http2Configuration(Http2Configuration)`. See https://tools.ietf.org/html/rfc7540#section-6.5.2 for more information." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Amazon Transcribe supports job queuing for the StartTranscriptionJob API." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fixed an issue where a 'checksum mismatch' error is raised whenever a PutObject request is retried while using an async client." + }, + { + "type": "feature", + "category": "AWS CodeStar connections", + "description": "Public beta for Bitbucket Cloud support in AWS CodePipeline through integration with AWS CodeStar connections." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.4.json b/.changes/2.10.4.json new file mode 100644 index 000000000000..6dc02fe5bdb3 --- /dev/null +++ b/.changes/2.10.4.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.4", + "date": "2019-10-29", + "entries": [ + { + "type": "feature", + "category": "Amazon AppStream", + "description": "Adds support for providing domain names that can embed streaming sessions" + }, + { + "type": "feature", + "category": "AWS Cloud9", + "description": "Added CREATING and CREATE_FAILED environment lifecycle statuses." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.40.json b/.changes/2.10.40.json new file mode 100644 index 000000000000..27e005b45fca --- /dev/null +++ b/.changes/2.10.40.json @@ -0,0 +1,56 @@ +{ + "version": "2.10.40", + "date": "2019-12-20", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds an operation that enables users to specify whether a database is restarted when its SSL/TLS certificate is rotated. Only customers who do not use SSL/TLS should use this operation." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fixed an issue where the SDK would attempt to validate the checksum on a PutObjectRequest when S3 was returning invalid checksums. This would cause all requests to buckets with customer-managed-key service-side encryption to fail." + }, + { + "type": "feature", + "category": "AWS Device Farm", + "description": "Introduced browser testing support through AWS Device Farm" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Documentation updates for Amazon Redshift RA3 node types." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Additional resource types are now fully supported in the AWS Security Finding Format (ASFF). These resources include AwsElbv2LoadBalancer, AwsKmsKey, AwsIamRole, AwsSqsQueue, AwsLambdaFunction, AwsSnsTopic, and AwsCloudFrontDistribution. Each of these resource types includes an accompanying resource details object with fields for security finding providers to populate. Updates were made to the AwsIamAccessKey resource details object to include information on principal ID and name. To learn more, visit our documentation on the ASFF." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release introduces the ability to tag key pairs, placement groups, export tasks, import image tasks, import snapshot tasks and export image tasks. You can use tags to organize and identify your resources for cost allocation." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This release updates the attachments support to include AttachmentReference source for Automation documents." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "AWS Transcribe now supports vocabulary filtering that allows customers to input words to the service that they don't want to see in the output transcript." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Amazon EKS now supports restricting access to the API server public endpoint by applying CIDR blocks" + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release of the Amazon Pinpoint API introduces versioning support for message templates." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.41.json b/.changes/2.10.41.json new file mode 100644 index 000000000000..1236ebe1d24b --- /dev/null +++ b/.changes/2.10.41.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.41", + "date": "2019-12-23", + "entries": [ + { + "type": "feature", + "category": "Amazon Detective", + "description": "Updated the documentation for Amazon Detective." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "This release adds a new family of APIs (create-data-repository-task, describe-data-repository-task, and cancel-data-repository-task) that allow users to perform operations between their file system and its linked data repository." + }, + { + "type": "feature", + "category": "AWS Health APIs and Notifications", + "description": "With this release, you can now centrally aggregate AWS Health events from all accounts in your AWS organization. Visit AWS Health documentation to learn more about enabling and using this feature: https://docs.aws.amazon.com/health/latest/ug/organizational-view-health.html." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.42.json b/.changes/2.10.42.json new file mode 100644 index 000000000000..f248a856c652 --- /dev/null +++ b/.changes/2.10.42.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.42", + "date": "2020-01-02", + "entries": [ + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "This release adds support for Certificate Authority (CA) certificate identifier to managed databases in Amazon Lightsail." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "Adds waiters for ImageScanComplete and LifecyclePolicyPreviewComplete" + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "Documentation updates for Amazon Lex." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Documentation updates for GetReservationUtilization for the Cost Explorer API." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix unmarshalling for models with xml attributes. See [#1488](https://github.com/aws/aws-sdk-java-v2/issues/1488)." + }, + { + "type": "bugfix", + "category": "Netty NIO Http Client", + "description": "Propagate exception properly when an exception is thrown from protocol initialization." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.43.json b/.changes/2.10.43.json new file mode 100644 index 000000000000..5babc4d8bb6d --- /dev/null +++ b/.changes/2.10.43.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.43", + "date": "2020-01-06", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Add `RequestBody.fromRemainingByteBuffer(ByteBuffer)` that copies only the remaining readable bytes of the buffer. See [#1534](https://github.com/aws/aws-sdk-java-v2/issues/1534)" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release supports service providers configuring a private DNS name for services other than AWS services and services available in the AWS marketplace. This feature allows consumers to access the service using an existing DNS name without making changes to their applications." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Reduce ReadTimeout and ConnectTimeout for accessing EC2 metadata instance service" + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage", + "description": "You can now restrict direct access to AWS Elemental MediaPackage by securing requests for live content using CDN authorization. With CDN authorization, content requests require a specific HTTP header and authorization code." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Requests that return an error response in the body of the HTTP response with a successful (200) status code will now correctly be handled as a failed request by the SDK." + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "description": "Amazon Comprehend now supports Multilabel document classification" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.44.json b/.changes/2.10.44.json new file mode 100644 index 000000000000..a20340f5a968 --- /dev/null +++ b/.changes/2.10.44.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.44", + "date": "2020-01-07", + "entries": [ + { + "type": "feature", + "category": "AWS X-Ray", + "description": "Documentation updates for xray" + }, + { + "type": "feature", + "category": "AWS Migration Hub", + "description": "ListApplicationStates API provides a list of all application migration states" + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Add encryption key override to StartBuild API in AWS CodeBuild." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.45.json b/.changes/2.10.45.json new file mode 100644 index 000000000000..345285efb682 --- /dev/null +++ b/.changes/2.10.45.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.45", + "date": "2020-01-08", + "entries": [ + { + "type": "feature", + "category": "Amazon Translate", + "description": "This release adds a new family of APIs for asynchronous batch translation service that provides option to translate large collection of text or HTML documents stored in Amazon S3 folder. This service accepts a batch of up to 5 GB in size per API call with each document not exceeding 1 MB size and the number of documents not exceeding 1 million per batch. See documentation for more information." + }, + { + "type": "feature", + "category": "Firewall Management Service", + "description": "AWS Firewall Manager now supports tagging, and tag-based access control, of policies." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "description": "Add sfn specific http configurations. See [#1325](https://github.com/aws/aws-sdk-java-v2/issues/1325)" + }, + { + "type": "bugfix", + "category": "Amazon EC2", + "description": "Fix NPE when calling `CopySnapshot`. Fixes [#1564](https://github.com/aws/aws-sdk-java-v2/issues/1564)" + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Documentation updates for CreateCostCategoryDefinition and UpdateCostCategoryDefinition API" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.46.json b/.changes/2.10.46.json new file mode 100644 index 000000000000..fa6f8c9dad67 --- /dev/null +++ b/.changes/2.10.46.json @@ -0,0 +1,26 @@ +{ + "version": "2.10.46", + "date": "2020-01-09", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "description": "Documentation updates for logs" + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "description": "Documentation updates for sts" + }, + { + "type": "feature", + "category": "Amazon S3", + "description": "Add support for Tagging builder in `CreateMultipartUploadRequest`. See [#1440](https://github.com/aws/aws-sdk-java-v2/issues/1440)" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Increase the priority of the AWS_WEB_IDENTITY_TOKEN_FILE/AWS_ROLE_ARN/AWS_ROLE_SESSION_NAME environment variables when loading credentials so that they are considered before web_identity_token_file/role_arn/role_session_name profile properties. This is consistent with the other AWS SDKs, including the CLI." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.47.json b/.changes/2.10.47.json new file mode 100644 index 000000000000..67a82e693156 --- /dev/null +++ b/.changes/2.10.47.json @@ -0,0 +1,51 @@ +{ + "version": "2.10.47", + "date": "2020-01-10", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds an operation that enables users to override the system-default SSL/TLS certificate for new Amazon RDS DB instances temporarily, or remove the customer override." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release introduces the ability to tag egress only internet gateways, local gateways, local gateway route tables, local gateway virtual interfaces, local gateway virtual interface groups, local gateway route table VPC association and local gateway route table virtual interface group association. You can use tags to organize and identify your resources for cost allocation." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Added the migrate feature to Amazon WorkSpaces." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Add shared profile support to new and existing users" + }, + { + "type": "feature", + "category": "AWS Transfer for SFTP", + "description": "This release introduces a new endpoint type that allows you to attach Elastic IP addresses from your AWS account with your server's endpoint directly and whitelist access to your server by client's internet IP address(es) using VPC Security Groups." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoints and added global endpoints for iso and iso-b." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fix an issue where s3#listObjects incorrectly decoded marker field. See [#1574](https://github.com/aws/aws-sdk-java-v2/issues/1574)." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "SageMaker ListTrialComponents API filter by TrialName and ExperimentName." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.48.json b/.changes/2.10.48.json new file mode 100644 index 000000000000..f1c00df53d78 --- /dev/null +++ b/.changes/2.10.48.json @@ -0,0 +1,26 @@ +{ + "version": "2.10.48", + "date": "2020-01-13", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Documentation updates for the StopInstances API. You can now stop and start an Amazon EBS-backed Spot Instance at will, instead of relying on the Stop interruption behavior to stop your Spot Instances when interrupted." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fixed bug prevent GetBucketBolicy from ever being successful using the asynchronous S3 client." + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "Cross-region backup is a new AWS Backup feature that allows enterprises to copy backups across multiple AWS services to different regions." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "description": "This release adds support for managing EFS file system policies and EFS Access Points." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.49.json b/.changes/2.10.49.json new file mode 100644 index 000000000000..2964ff467b6d --- /dev/null +++ b/.changes/2.10.49.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.49", + "date": "2020-01-14", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for partition placement groups and instance metadata option in Launch Templates" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.5.json b/.changes/2.10.5.json new file mode 100644 index 000000000000..f9a13c4ad0c4 --- /dev/null +++ b/.changes/2.10.5.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.5", + "date": "2019-10-30", + "entries": [ + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache for Redis 5.0.5 now allows you to modify authentication tokens by setting and rotating new tokens. You can now modify active tokens while in use, or add brand-new tokens to existing encryption-in-transit enabled clusters that were previously setup without authentication tokens. This is a two-step process that allows you to set and rotate the token without interrupting client requests." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.50.json b/.changes/2.10.50.json new file mode 100644 index 000000000000..29de159bf5d0 --- /dev/null +++ b/.changes/2.10.50.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.50", + "date": "2020-01-15", + "entries": [ + { + "type": "bugfix", + "category": "Amazon Transcribe Service", + "description": "Fixed an issue where streaming transcriptions would fail with signature validation errors if the date changed during the request." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Document updates for Patch Manager 'NoReboot' feature." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "General Update to EC2 Docs and SDKs" + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "Updated description for PolicyID parameter and ConstraintViolationException." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Add support for DescribeStandardsControls and UpdateStandardsControl. These new Security Hub API operations are used to track and manage whether a compliance standards control is enabled." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.51.json b/.changes/2.10.51.json new file mode 100644 index 000000000000..a11e9ee1cfc1 --- /dev/null +++ b/.changes/2.10.51.json @@ -0,0 +1,26 @@ +{ + "version": "2.10.51", + "date": "2020-01-16", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Client VPN now supports Port Configuration for VPN Endpoints, allowing usage of either port 443 or port 1194." + }, + { + "type": "feature", + "category": "AWS Directory Service", + "description": "To reduce the number of errors our customers are facing, we have modified the requirements of input parameters for two of Directory Service APIs." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "This release adds two new APIs (UpdateWorkforce and DescribeWorkforce) to SageMaker Ground Truth service for workforce IP whitelisting." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.52.json b/.changes/2.10.52.json new file mode 100644 index 000000000000..70463cd52e87 --- /dev/null +++ b/.changes/2.10.52.json @@ -0,0 +1,41 @@ +{ + "version": "2.10.52", + "date": "2020-01-17", + "entries": [ + { + "type": "feature", + "category": "AWS CloudHSM V2", + "description": "This release introduces resource-level and tag-based access control for AWS CloudHSM resources. You can now tag CloudHSM backups, tag CloudHSM clusters on creation, and tag a backup as you copy it to another region." + }, + { + "type": "feature", + "category": "Amazon Neptune", + "description": "This release includes Deletion Protection for Amazon Neptune databases." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release provides a public preview for specifying Amazon EFS file systems as volumes in your Amazon ECS task definitions." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Documentation updates for redshift" + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for MP3 audio only outputs." + }, + { + "type": "feature", + "category": "AWS Batch", + "description": "This release ensures INACTIVE job definitions are permanently deleted after 180 days." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.53.json b/.changes/2.10.53.json new file mode 100644 index 000000000000..91314cfc854f --- /dev/null +++ b/.changes/2.10.53.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.53", + "date": "2020-01-20", + "entries": [ + { + "type": "feature", + "category": "AWS Key Management Service", + "description": "The ConnectCustomKeyStore operation now provides new error codes (USER_LOGGED_IN and USER_NOT_FOUND) for customers to better troubleshoot if their connect custom key store operation fails. Password length validation during CreateCustomKeyStore now also occurs on the client side." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "description": "Updating DescribeAnomalyDetectors API to return AnomalyDetector Status value in response." + }, + { + "type": "feature", + "category": "Alexa For Business", + "description": "Add support for CreatedTime and ConnectionStatusUpdatedTime in response of SearchDevices API." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release provides support for a preview of bringing your own IPv6 addresses (BYOIP for IPv6) for use in AWS." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "description": "This release adds support for a list API to retrieve the configuration events logged during periodic updates to an application by Amazon CloudWatch Application Insights." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Added reason codes to StateReasonCode (InvalidSubnet, InvalidSecurityGroup) and LastUpdateStatusReasonCode (SubnetOutOfIPAddresses, InvalidSubnet, InvalidSecurityGroup) for functions that connect to a VPC." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.54.json b/.changes/2.10.54.json new file mode 100644 index 000000000000..5a4d34a1dbcc --- /dev/null +++ b/.changes/2.10.54.json @@ -0,0 +1,41 @@ +{ + "version": "2.10.54", + "date": "2020-01-21", + "entries": [ + { + "type": "feature", + "category": "AWS CodePipeline", + "description": "AWS CodePipeline enables an ability to stop pipeline executions." + }, + { + "type": "feature", + "category": "AWS Application Discovery Service", + "description": "Documentation updates for the AWS Application Discovery Service." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "Documentation updates for iotcolumbo" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Add an enum value to the result of DescribeByoipCidrs to support CIDRs that are not publicly advertisable." + }, + { + "type": "bugfix", + "category": "Netty NIO Http Client", + "description": "Fixed a bug where an inactive http2 connection without `GOAWAY` frame received might get reused in a new request, causing `ClosedChannelException`" + }, + { + "type": "feature", + "category": "AWS Marketplace Commerce Analytics", + "description": "Remove 4 deprecated data sets, change some data sets available dates to 2017-09-15" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.55.json b/.changes/2.10.55.json new file mode 100644 index 000000000000..27994f2cf066 --- /dev/null +++ b/.changes/2.10.55.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.55", + "date": "2020-01-23", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This SDK release introduces APIs that automate the export of Amazon RDS snapshot data to Amazon S3. The new APIs include: StartExportTask, CancelExportTask, DescribeExportTasks. These APIs automate the extraction of data from an RDS snapshot and export it to an Amazon S3 bucket. The data is stored in a compressed, consistent, and query-able format. After the data is exported, you can query it directly using tools such as Amazon Athena or Redshift Spectrum. You can also consume the data as part of a data lake solution. If you archive the data in S3 Infrequent Access or Glacier, you can reduce long term data storage costs by applying data lifecycle policies." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Added ServiceMetadata.servicePartitions() to get partition metadata for a specific service" + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "This release enables the Identity and Access Management policy simulator to simulate permissions boundary policies." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Support for non-blocking asynchronous calling of all mapper operations" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Improved error messages on UnknownHostExceptions" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.56.json b/.changes/2.10.56.json new file mode 100644 index 000000000000..dab2bee26028 --- /dev/null +++ b/.changes/2.10.56.json @@ -0,0 +1,46 @@ +{ + "version": "2.10.56", + "date": "2020-01-24", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Adding new error codes for Nodegroups in EKS" + }, + { + "type": "feature", + "category": "Amazon EC2", + "description": "Adds EC2ThrottledException as a recognized throttling exception to be retried" + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "AWS OpsWorks for Chef Automate now supports in-place upgrade to Chef Automate 2. Eligible servers can be updated through the management console, CLI and APIs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Documentation updates for WorkSpaces" + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release provides support for tagging Amazon ECS task sets for services using external deployment controllers." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Fix issue where DNS resolution for a host is only made once for the initial request to the host. If the DNS entries change for a hostname, the client will resolve the new address until the client is closed and recreated." + }, + { + "type": "feature", + "category": "AWS DataSync", + "description": "AWS DataSync now supports FSx for Windows File Server Locations" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.57.json b/.changes/2.10.57.json new file mode 100644 index 000000000000..6de1d762eff8 --- /dev/null +++ b/.changes/2.10.57.json @@ -0,0 +1,71 @@ +{ + "version": "2.10.57", + "date": "2020-02-04", + "entries": [ + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Adding KVM as a support hypervisor" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Deliver exceptions to stream channels correctly if there's an exception thrown on connection. This also fixes a bug where publisher signals onComplete if the stream is closed as a result of outbound GOAWAY." + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "This release adds support for tagging Amazon WorkMail organizations." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fixed an issue where fields in `ListObjectVersionsResponse` and `ListMultipartUploadsResponse` are not decoded correctly when encodingType is specified as url. See [#1601](https://github.com/aws/aws-sdk-java-v2/issues/1601)" + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "Updated ThrottlingException documentation to report that the error code is 400, and not 429, to reflect actual system behaviour." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Changing usage of typed builders for PutItem, UpdateItem and StaticTableSchema to explicitly provide class type." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "This release enables AWS MSK customers to list Apache Kafka versions that are supported on AWS MSK clusters. Also includes changes to expose additional details of a cluster's state in DescribeCluster and ListClusters APIs." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Throws `IOException` for the race condition where an HTTP2 connection gets reused at the same time it gets inactive so that failed requests can be retried" + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Renames top level sync/async MappedDatabase interfaces as DynamoDbEnhancedClient interfaces. Also adds builder definitions to the interfaces together with a static method that returns the default implementation of the builder." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This feature ensures that an instance is patched up to the available patches on a particular date. It can be enabled by selecting the 'ApproveUntilDate' option as the auto-approval rule while creating the patch baseline. ApproveUntilDate - The cutoff date for auto approval of released patches. Any patches released on or before this date will be installed automatically." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Documentation updates for CloudFront" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon VPC Flow Logs adds support for 1-minute aggregation intervals." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.58.json b/.changes/2.10.58.json new file mode 100644 index 000000000000..52ddb0cc73b8 --- /dev/null +++ b/.changes/2.10.58.json @@ -0,0 +1,41 @@ +{ + "version": "2.10.58", + "date": "2020-02-05", + "entries": [ + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "Updated the maximum number of tags that can be added to a snapshot using DLM to 45." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release provides support for tagging when you create a VPC endpoint, or VPC endpoint service." + }, + { + "type": "feature", + "category": "AWS Ground Station", + "description": "Adds dataflowEndpointRegion property to DataflowEndpointConfig. The dateCreated, lastUpdated, and tags properties on GetSatellite have been deprecated." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for fine-tuned QVBR quality level." + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "Documentation-only update that adds services to the list of supported services." + }, + { + "type": "feature", + "category": "Amazon Forecast Query Service", + "description": "Documentation updates for Amazon Forecast." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Additional resource types are now supported in the AWS Security Finding Format (ASFF). The following new resource types are added, each having an accompanying resource details object with fields for security finding providers to populate: AwsCodeBuildProject, AwsEc2NetworkInterface, AwsEc2SecurityGroup, AwsElasticsearchDomain, AwsLambdaLayerVersion, AwsRdsDbInstance, and AwsWafWebAcl. The following resource types are added without an accompanying details object: AutoscalingAutoscalingGroup, AwsDynamoDbTable, AwsEc2Eip, AwsEc2Snapshot, AwsEc2Volume, AwsRdsDbSnapshot, AwsRedshiftCluster, and AwsS3Object. The number of allowed resources per finding is increased from 10 to 32. A new field is added in the Compliance object, RelatedRequirements. To learn more, visit our documentation on the ASFF." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.59.json b/.changes/2.10.59.json new file mode 100644 index 000000000000..131bac6dd0a2 --- /dev/null +++ b/.changes/2.10.59.json @@ -0,0 +1,41 @@ +{ + "version": "2.10.59", + "date": "2020-02-06", + "entries": [ + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "AWS CodeBuild adds support for Amazon Elastic File Systems" + }, + { + "type": "feature", + "category": "AWS AppSync", + "description": "AWS AppSync now supports X-Ray" + }, + { + "type": "feature", + "category": "Amazon Elastic Block Store", + "description": "Documentation updates for EBS direct APIs." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "In order to make operations more easily discoverable by an IDE, specific operation methods have been added to the enhanced client interface. An operation method takes a corresponding request object as parameter. Meanwhile, the generic execute() method is removed. This change affects only batch and transcribe operations at the database level." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This release contains updated text for the GetAuthorizationToken API." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "Amazon Lex now supports AMAZON.AlphaNumeric with regular expressions." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds platform details and billing info to the DescribeImages API." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.6.json b/.changes/2.10.6.json new file mode 100644 index 000000000000..dffc0242a46e --- /dev/null +++ b/.changes/2.10.6.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.6", + "date": "2019-10-31", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "S3 Inventory now supports a new field 'IntelligentTieringAccessTier' that reports the access tier (frequent or infrequent) of objects stored in Intelligent-Tiering storage class." + }, + { + "type": "feature", + "category": "AWS Amplify", + "description": "This release of AWS Amplify Console introduces support for Web Previews. This feature allows user to create ephemeral branch deployments from pull request submissions made to a connected repository. A pull-request preview deploys every pull request made to your Git repository to a unique preview URL." + }, + { + "type": "feature", + "category": "AWS Support", + "description": "The status descriptions for TrustedAdvisorCheckRefreshStatus have been updated" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.60.json b/.changes/2.10.60.json new file mode 100644 index 000000000000..7daeb137527c --- /dev/null +++ b/.changes/2.10.60.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.60", + "date": "2020-02-07", + "entries": [ + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "This release adds support for simulation job batches" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Documentation updates for RDS: when restoring a DB cluster from a snapshot, must create DB instances" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "This version of the SDK includes bug fixes and documentation updates." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Improves discoverability by renaming the table and index interfaces to be consistent with the client interface naming, and by adding operation methods for createTable(), scan() and query(), as applicable. These methods take a request object as parameter. Execute() methods for the index interface is removed since they are no longer needed." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.61.json b/.changes/2.10.61.json new file mode 100644 index 000000000000..8fbb38ecde08 --- /dev/null +++ b/.changes/2.10.61.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.61", + "date": "2020-02-10", + "entries": [ + { + "type": "feature", + "category": "Amazon DocumentDB with MongoDB compatibility", + "description": "Added clarifying information that Amazon DocumentDB shares operational technology with Amazon RDS and Amazon Neptune." + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "description": "The ConnectCustomKeyStore API now provides a new error code (SUBNET_NOT_FOUND) for customers to better troubleshoot if their \"connect-custom-key-store\" operation fails." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.62.json b/.changes/2.10.62.json new file mode 100644 index 000000000000..ee20a57c70c1 --- /dev/null +++ b/.changes/2.10.62.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.62", + "date": "2020-02-11", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "This release of AWS CloudFormation StackSets allows you to centrally manage deployments to all the accounts in your organization or specific organizational units (OUs) in AWS Organizations. You will also be able to enable automatic deployments to any new accounts added to your organization or OUs. The permissions needed to deploy across accounts will automatically be taken care of by the StackSets service." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 Now Supports Tagging Spot Fleet." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Features:This release adds a new setting for a user pool to allow if customer wants their user signup/signin with case insensitive username. The current default setting is case sensitive, and for our next release we will change it to case insensitive." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.63.json b/.changes/2.10.63.json new file mode 100644 index 000000000000..4bb888d063d0 --- /dev/null +++ b/.changes/2.10.63.json @@ -0,0 +1,41 @@ +{ + "version": "2.10.63", + "date": "2020-02-12", + "entries": [ + { + "type": "feature", + "category": "Amazon Neptune", + "description": "This launch enables Neptune start-db-cluster and stop-db-cluster. Stopping and starting Amazon Neptune clusters helps you manage costs for development and test environments. You can temporarily stop all the DB instances in your cluster, instead of setting up and tearing down all the DB instances each time that you use the cluster." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for tagging public IPv4 pools." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Documentation updates for Amazon Chime" + }, + { + "type": "feature", + "category": "AWS Directory Service", + "description": "Release to add the ExpirationDateTime as an output to ListCertificates so as to ease customers to look into their certificate lifetime and make timely decisions about renewing them." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "Amazon Elasticsearch Service now offers fine-grained access control, which adds multiple capabilities to give tighter control over data. New features include the ability to use roles to define granular permissions for indices, documents, or fields and to extend Kibana with read-only views and secure multi-tenant support." + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "This release adds support for access control rules management in Amazon WorkMail." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Adding ability to add arguments that cannot be overridden to AWS Glue jobs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.64.json b/.changes/2.10.64.json new file mode 100644 index 000000000000..1f93f2b9406a --- /dev/null +++ b/.changes/2.10.64.json @@ -0,0 +1,26 @@ +{ + "version": "2.10.64", + "date": "2020-02-13", + "entries": [ + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Improves discoverability by adding operation methods for deleteItem(), getItem(), putItem and updateItem(), as applicable. These methods take a request object as parameter. Execute() methods for the table interface is removed since they are no longer needed." + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "description": "When there is an I/O error on an http2 request, the SDK will start shutting down the connection - stopping using the http2 connection for new requests and closing it after all streams are finished." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage VOD", + "description": "Adds support for DASH with multiple media presentation description periods triggered by presence of SCTE-35 ad markers in the manifest.Also adds optional configuration for DASH SegmentTemplateFormat to refer to segments by Number with Duration, Number with Timeline or Time with Timeline and compact the manifest by combining duplicate SegmentTemplate tags." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.65.json b/.changes/2.10.65.json new file mode 100644 index 000000000000..ae1367df3b1d --- /dev/null +++ b/.changes/2.10.65.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.65", + "date": "2020-02-14", + "entries": [ + { + "type": "feature", + "category": "AWS MediaTailor", + "description": "AWS Elemental MediaTailor SDK now allows configuration of Personalization Threshold for HLS and DASH streams." + }, + { + "type": "feature", + "category": "AWS Shield", + "description": "This release adds support for associating Amazon Route 53 health checks to AWS Shield Advanced protected resources." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "You can now enable Multi-Attach on Provisioned IOPS io1 volumes through the create-volume API." + }, + { + "type": "feature", + "category": "Amazon S3", + "description": "Added support for presigning `CreateMultipartUpload`, `UploadPart`, `CompleteMultipartUpload`, and `AbortMultipartUpload` requests." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Security Hub has released a new DescribeStandards API action. This API action allows a customer to list all of the standards available in an account. For each standard, the list provides the customer with the standard name, description, and ARN. Customers can use the ARN as an input to the BatchEnableStandards API action. To learn more, visit our API documentation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.66.json b/.changes/2.10.66.json new file mode 100644 index 000000000000..bf3812e1dffd --- /dev/null +++ b/.changes/2.10.66.json @@ -0,0 +1,31 @@ +{ + "version": "2.10.66", + "date": "2020-02-17", + "entries": [ + { + "type": "feature", + "category": "AWS Cloud9", + "description": "AWS Cloud9 now supports the ability to tag Cloud9 development environments." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Documentation updates for EC2" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "Amazon DynamoDB enables you to restore your DynamoDB backup or table data across AWS Regions such that the restored table is created in a different AWS Region from where the source table or backup resides. You can do cross-region restores between AWS commercial Regions, AWS China Regions, and AWS GovCloud (US) Regions." + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This update adds the ability to detect text in videos and adds filters to image and video text detection." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.67.json b/.changes/2.10.67.json new file mode 100644 index 000000000000..5e45aa097449 --- /dev/null +++ b/.changes/2.10.67.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.67", + "date": "2020-02-18", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release supports Microsoft Active Directory authentication for Amazon Aurora." + }, + { + "type": "feature", + "category": "Auto Scaling", + "description": "Amazon EC2 Auto Scaling now supports the ability to enable/disable target tracking, step scaling, and simple scaling policies." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Added AudioFallbackUrl to support Chime SDK client." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.68.json b/.changes/2.10.68.json new file mode 100644 index 000000000000..ea76797ecf61 --- /dev/null +++ b/.changes/2.10.68.json @@ -0,0 +1,26 @@ +{ + "version": "2.10.68", + "date": "2020-02-19", + "entries": [ + { + "type": "feature", + "category": "AWS Lambda", + "description": "AWS Lambda now supports Ruby 2.7" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Auto Scaling", + "description": "Doc update for EC2 Auto Scaling: Add Enabled parameter for PutScalingPolicy" + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "\"ListPortfolioAccess\" API now has a new optional parameter \"OrganizationParentId\". When it is provided and if the portfolio with the \"PortfolioId\" given was shared with an organization or organizational unit with \"OrganizationParentId\", all accounts in the organization sub-tree under parent which inherit an organizational portfolio share will be listed, rather than all accounts with external shares. To accommodate long lists returned from the new option, the API now supports pagination." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.69.json b/.changes/2.10.69.json new file mode 100644 index 000000000000..468ca3ee42f2 --- /dev/null +++ b/.changes/2.10.69.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.69", + "date": "2020-02-20", + "entries": [ + { + "type": "feature", + "category": "AWS Savings Plans", + "description": "Added support for AWS Lambda in Compute Savings Plans" + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "As of this release of the Amazon Pinpoint API, the Title property is optional for the CampaignEmailMessage object." + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "This release adds exponential growth type support for deployment strategies." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.7.json b/.changes/2.10.7.json new file mode 100644 index 000000000000..3b7657d25986 --- /dev/null +++ b/.changes/2.10.7.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.7", + "date": "2019-11-01", + "entries": [ + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release of the Amazon Pinpoint API introduces support for using and managing journeys, and querying analytics data for journeys." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "description": "This release adds two new APIs, GetTrail and ListTrails, and support for adding tags when you create a trail by using a new TagsList parameter on CreateTrail operations." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "This release contains task timeline attributes in replication task statistics. This release also adds a note to the documentation for the CdcStartPosition task request parameter. This note describes how to enable the use of native CDC start points for a PostgreSQL source by setting the new slotName extra connection attribute on the source endpoint to the name of an existing logical replication slot." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.70.json b/.changes/2.10.70.json new file mode 100644 index 000000000000..6d775f264f33 --- /dev/null +++ b/.changes/2.10.70.json @@ -0,0 +1,26 @@ +{ + "version": "2.10.70", + "date": "2020-02-21", + "entries": [ + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Improves discoverability by adding consumer-style methods for all client, table and index operations." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "This release of EC2 Image Builder increases the maximum policy document size for Image Builder resource-based policy APIs." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Extend elastic resize to support resizing clusters to different instance types." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "description": "Documentation updates for AWS WAF (wafv2) to correct the guidance for associating a web ACL to a CloudFront distribution." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.71.json b/.changes/2.10.71.json new file mode 100644 index 000000000000..7160a6a2ec6d --- /dev/null +++ b/.changes/2.10.71.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.71", + "date": "2020-02-24", + "entries": [ + { + "type": "feature", + "category": "Amazon DocumentDB with MongoDB compatibility", + "description": "Documentation updates for docdb" + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "Documentation updates for iotcolumbo" + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "Announcing persistent file systems for Amazon FSx for Lustre that are ideal for longer-term storage and workloads, and a new generation of scratch file systems that offer higher burst throughput for spiky workloads." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "description": "This release allows you to create and manage tags for event buses." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "description": "This release allows you to create and manage tags for event buses." + }, + { + "type": "feature", + "category": "Amazon Import/Export Snowball", + "description": "AWS Snowball adds a field for entering your GSTIN when creating AWS Snowball jobs in the Asia Pacific (Mumbai) region." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.72.json b/.changes/2.10.72.json new file mode 100644 index 000000000000..8d4773e1d13e --- /dev/null +++ b/.changes/2.10.72.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.72", + "date": "2020-02-25", + "entries": [ + { + "type": "feature", + "category": "AWS Secrets Manager", + "description": "This release increases the maximum allowed size of SecretString or SecretBinary from 10KB to 64KB in the CreateSecret, UpdateSecret, PutSecretValue and GetSecretValue APIs." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Improves discoverability by adding consumer-style methods for all client, table and index operations." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "description": "This release adds support for CloudWatch Logs for Standard Workflows." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "Amazon MSK has added support for Broker Log delivery to CloudWatch, S3, and Firehose." + }, + { + "type": "feature", + "category": "AWS Outposts", + "description": "This release adds DeleteSite and DeleteOutpost." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.73.json b/.changes/2.10.73.json new file mode 100644 index 000000000000..f1d6943a2737 --- /dev/null +++ b/.changes/2.10.73.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.73", + "date": "2020-02-26", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release changes the RunInstances CLI and SDK's so that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "SageMaker UpdateEndpoint API now supports retained variant properties, e.g., instance count, variant weight. SageMaker ListTrials API filter by TrialComponentName. Make ExperimentConfig name length limits consistent with CreateExperiment, CreateTrial, and CreateTrialComponent APIs." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Amazon Transcribe's Automatic Content Redaction feature enables you to automatically redact sensitive personally identifiable information (PII) from transcription results. It replaces each instance of an identified PII utterance with a [PII] tag in the transcript." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Security Hub has added to the DescribeProducts API operation a new response field called IntegrationTypes. The IntegrationTypes field lists the types of actions that a product performs relative to Security Hub such as send findings to Security Hub and receive findings from Security Hub." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Added the BeanTableSchema implementation of TableSchema that allows a TableSchema to be instantiated from an annotated Java bean class which can then be used with the DynamoDB Enhanced Client." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.74.json b/.changes/2.10.74.json new file mode 100644 index 000000000000..532b19f173c2 --- /dev/null +++ b/.changes/2.10.74.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.74", + "date": "2020-02-27", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Global Accelerator", + "description": "This release adds support for adding tags to accelerators and bringing your own IP address to AWS Global Accelerator (BYOIP)." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "Adds support to create notification contacts in Amazon Lightsail, and to create instance, database, and load balancer metric alarms that notify you based on the value of a metric relative to a threshold that you specify." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.75.json b/.changes/2.10.75.json new file mode 100644 index 000000000000..eeccf6eea273 --- /dev/null +++ b/.changes/2.10.75.json @@ -0,0 +1,51 @@ +{ + "version": "2.10.75", + "date": "2020-02-28", + "entries": [ + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Added SearchDashboards API that allows listing of dashboards that a specific user has access to." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue adds resource tagging support for Machine Learning Transforms and adds a new API, ListMLTransforms to support tag filtering. With this feature, customers can use tags in AWS Glue to organize and control access to Machine Learning Transforms." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "Added a target group attribute to support sticky sessions for Network Load Balancers." + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "This release includes improvements and fixes bugs for the IAM Access Analyzer feature." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Profiler", + "description": "Documentation updates for Amazon CodeGuru Profiler" + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "App Mesh now supports Transport Layer Security (TLS) between Virtual Nodes in a Mesh. Customers can use managed certificates from an AWS Certificate Manager Private Certificate Authority or bring their own certificates from the local file system to encrypt traffic between their workloads. See https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual-node-tls.html for details." + }, + { + "type": "feature", + "category": "AWS Config", + "description": "Accepts a structured query language (SQL) SELECT command and an aggregator name, performs the corresponding search on resources aggregated by the aggregator, and returns resource configurations matching the properties." + }, + { + "type": "feature", + "category": "Amazon Augmented AI Runtime", + "description": "This release updates Amazon Augmented AI ListHumanLoops API, DescribeHumanLoop response, StartHumanLoop response and type names of SDK fields." + }, + { + "type": "feature", + "category": "Amazon WorkDocs", + "description": "Documentation updates for workdocs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.76.json b/.changes/2.10.76.json new file mode 100644 index 000000000000..60ce783decd5 --- /dev/null +++ b/.changes/2.10.76.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.76", + "date": "2020-02-28", + "entries": [ + { + "type": "feature", + "category": "AWS Config", + "description": "Correcting list of supported resource types." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.77.json b/.changes/2.10.77.json new file mode 100644 index 000000000000..adfc3c738537 --- /dev/null +++ b/.changes/2.10.77.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.77", + "date": "2020-03-02", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudWatch", + "description": "Introducing Amazon CloudWatch Composite Alarms" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Comprehend Medical", + "description": "New Time Expression feature, part of DetectEntitiesV2 API will provide temporal relations to existing NERe entities such as Medication, Test, Treatment, Procedure and Medical conditions." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.78.json b/.changes/2.10.78.json new file mode 100644 index 000000000000..9701522dba63 --- /dev/null +++ b/.changes/2.10.78.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.78", + "date": "2020-03-03", + "entries": [ + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Fix an issue where the Netty client was prematurely considering an HTTP/2 request body as sent, but was still in the process of being transferred to the remote endpoint." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon VPC Flow Logs adds support for tags and tagging on resource creation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.79.json b/.changes/2.10.79.json new file mode 100644 index 000000000000..97fe6ea69f16 --- /dev/null +++ b/.changes/2.10.79.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.79", + "date": "2020-03-04", + "entries": [ + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release of the Amazon Pinpoint API introduces support for integrating recommender models with email, push notification, and SMS message templates. You can now use these types of templates to connect to recommender models and add personalized recommendations to messages that you send from campaigns and journeys." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.8.json b/.changes/2.10.8.json new file mode 100644 index 000000000000..7347f9ec3b50 --- /dev/null +++ b/.changes/2.10.8.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.8", + "date": "2019-11-04", + "entries": [ + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "RoboMaker Fleet Management launch a feature to verify your robot is ready to download and install the new robot application using a download condition file, which is a script run on the robot prior to downloading the new deployment." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Accelerator (DAX)", + "description": "Documentation updates for dax" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Documentation updates for ec2" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.80.json b/.changes/2.10.80.json new file mode 100644 index 000000000000..7502199d9427 --- /dev/null +++ b/.changes/2.10.80.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.80", + "date": "2020-03-05", + "entries": [ + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Adds javadoc to operation methods and request/response objects." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "Add a new finding field for EC2 findings indicating the instance's local IP address involved in the threat." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "Updated the Tag regex pattern to align with AWS tagging APIs." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Amazon EKS now supports adding a KMS key to your cluster for envelope encryption of Kubernetes secrets." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "You can now create AWS Client VPN Endpoints with a specified VPC and Security Group. Additionally, you can modify these attributes when modifying the endpoint." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.81.json b/.changes/2.10.81.json new file mode 100644 index 000000000000..a1860553e473 --- /dev/null +++ b/.changes/2.10.81.json @@ -0,0 +1,41 @@ +{ + "version": "2.10.81", + "date": "2020-03-06", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release provides customers with a self-service option to enable Local Zones." + }, + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "Added support for streaming a GUI from robot and simulation applications" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "Amazon GuardDuty findings now include the OutpostArn if the finding is generated for an AWS Outposts EC2 host." + }, + { + "type": "bugfix", + "category": "Netty NIO Http Client", + "description": "Expand Http2 connection-level flow control window when a new stream is acquired on that connection so that the connection-level window size is proportional to the number of streams." + }, + { + "type": "feature", + "category": "AWS Signer", + "description": "This release enables signing image format override in PutSigningProfile requests, adding two more enum fields, JSONEmbedded and JSONDetached. This release also extends the length limit of SigningProfile name from 20 to 64." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "App Mesh now supports sharing a Mesh with other AWS accounts. Customers can use AWS Resource Access Manager to share their Mesh with other accounts in their organization to connection applications within a single service mesh. See https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html for details." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.82.json b/.changes/2.10.82.json new file mode 100644 index 000000000000..d8424bd86bca --- /dev/null +++ b/.changes/2.10.82.json @@ -0,0 +1,46 @@ +{ + "version": "2.10.82", + "date": "2020-03-09", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Use the last seen HTTP/1.1 header value for headers defined to only appear once in an HTTP message instead of merging them all into a list. The order in which header values are inspected is: headers set by the request marshaller, overridden headers set on the client, then finally overridden headers set on the SDK request object. See https://tools.ietf.org/html/rfc2616#section-4.2 for more information." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Added support for \"retry modes\". A retry mode allows configuring multiple SDK parameters at once using default retry profiles, some of which are standardized between AWS SDK languages. See RetryMode javadoc for more information." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue where specifying your own retry policy would override AWS and service-specific retry conditions. By default, all retry policies now have AWS and service-specific retry conditions added. This can be disabled via the new `RetryPolicy.furtherRefinementsAllowed(false)`." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue where the retry condition returned by `RetryPolicy.retryCondition` differed from the one specified by `RetryPolicy.Builder.retryCondition`. The old value can be accessed via the new `RetryPolicy.aggregateRetryCondition`." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Added the ability to configure or disable the default retry throttling behavior of the SDK that 'kicks in' during a large volume of retriable service call errors. This behavior can now be configured via `RetryPolicy.retryCapacityCondition`." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports the ability to configure the Preferred Channel Pipeline for channels contributing to a Multiplex." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "Added new settings for Kinesis target to include detailed transaction info; to capture table DDL details; to use single-line unformatted json, which can be directly queried by AWS Athena if data is streamed into S3 through AWS Kinesis Firehose. Added CdcInsertsAndUpdates in S3 target settings to allow capture ongoing insertions and updates only." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon Virtual Private Cloud (VPC) NAT Gateway adds support for tagging on resource creation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.83.json b/.changes/2.10.83.json new file mode 100644 index 000000000000..b9cff19f5f85 --- /dev/null +++ b/.changes/2.10.83.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.83", + "date": "2020-03-10", + "entries": [ + { + "type": "feature", + "category": "AWS Marketplace Commerce Analytics", + "description": "Change the disbursement data set to look past 31 days instead until the beginning of the month." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Amazon Transcribe's Automatic Content Redaction feature enables you to automatically redact sensitive personally identifiable information (PII) from transcription results. It replaces each instance of an identified PII utterance with a [PII] tag in the transcript." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Documentation updates for EC2" + }, + { + "type": "feature", + "category": "AWSServerlessApplicationRepository", + "description": "AWS Serverless Application Repository now supports sharing applications privately with AWS Organizations." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Reverts a recent change from 2.10.70 where the json protocol type was changed to application/json, this is now back to application/x-amz-json-1.1." + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "API update that adds a new parameter, durationExpression, to SetTimerAction, and deprecates seconds" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.84.json b/.changes/2.10.84.json new file mode 100644 index 000000000000..b66000168d94 --- /dev/null +++ b/.changes/2.10.84.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.84", + "date": "2020-03-11", + "entries": [ + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Amazon Redshift now supports operations to pause and resume a cluster on demand or on a schedule." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "description": "Documentation updates for elasticfilesystem" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.85.json b/.changes/2.10.85.json new file mode 100644 index 000000000000..f34778978c26 --- /dev/null +++ b/.changes/2.10.85.json @@ -0,0 +1,36 @@ +{ + "version": "2.10.85", + "date": "2020-03-12", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Documentation updates for EC2" + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "As part of this release, we are extending capability of AWS IoT Rules Engine to support IoT Cloudwatch log action. The IoT Cloudwatch log rule action lets you send messages from IoT sensors and applications to Cloudwatch logs for troubleshooting and debugging." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "Amazon Lex now supports tagging for bots, bot aliases and bot channels." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "description": "Amazon API Gateway HTTP APIs is now generally available. HTTP APIs offer the core functionality of REST API at up to 71% lower price compared to REST API, 60% lower p99 latency, and is significantly easier to use. As part of general availability, we added new features to route requests to private backends such as private ALBs, NLBs, and IP/ports. We also brought over a set of features from REST API such as Stage Variables, and Stage/Route level throttling. Custom domain names can also now be used with both REST And HTTP APIs." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "The AWS Security Finding Format is being augmented with the following changes. 21 new resource types without corresponding details objects are added. Another new resource type, AwsS3Object, has an accompanying details object. Severity.Label is a new string field that indicates the severity of a finding. The available values are: INFORMATIONAL, LOW, MEDIUM, HIGH, CRITICAL. The new string field Workflow.Status indicates the status of the investigation into a finding. The available values are: NEW, NOTIFIED, RESOLVED, SUPPRESSED." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.86.json b/.changes/2.10.86.json new file mode 100644 index 000000000000..de89348d4a8e --- /dev/null +++ b/.changes/2.10.86.json @@ -0,0 +1,11 @@ +{ + "version": "2.10.86", + "date": "2020-03-13", + "entries": [ + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "This release adds S3 as a configuration source provider." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.87.json b/.changes/2.10.87.json new file mode 100644 index 000000000000..1290e9d73f89 --- /dev/null +++ b/.changes/2.10.87.json @@ -0,0 +1,41 @@ +{ + "version": "2.10.87", + "date": "2020-03-16", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Resource data sync for AWS Systems Manager Inventory now includes destination data sharing. This feature enables you to synchronize inventory data from multiple AWS accounts into a central Amazon S3 bucket. To use this feature, all AWS accounts must be listed in AWS Organizations." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release adds the ability to update the task placement strategy and constraints for Amazon ECS services." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache now supports Global Datastore for Redis. Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. You can create, modify and describe a Global Datastore, as well as add or remove regions from your Global Datastore and promote a region as primary in Global Datastore." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "The enhanced DDB client table schema now supports custom AttributeConverterProviders, and StaticAttribute can take individual AttributeConverter to override default attribute converter behavior." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "description": "Amazon S3 now supports Batch Operations job tagging." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Additional response field \"CompromisedCredentialsDetected\" added to AdminListUserAuthEvents." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.88.json b/.changes/2.10.88.json new file mode 100644 index 000000000000..018bc306c56b --- /dev/null +++ b/.changes/2.10.88.json @@ -0,0 +1,21 @@ +{ + "version": "2.10.88", + "date": "2020-03-17", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for: AV1 encoding in File Group MP4, DASH and CMAF DASH outputs; PCM/WAV audio output in MPEG2-TS containers; and Opus audio in Webm inputs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix an issue where the signing key is created only once at the start of the request for event streaming requests. This causes requests that span two or more days to have signing errors once the date changes because the signing key was derived only once using the date at the beginning of the request." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.89.json b/.changes/2.10.89.json new file mode 100644 index 000000000000..51e74182eb77 --- /dev/null +++ b/.changes/2.10.89.json @@ -0,0 +1,26 @@ +{ + "version": "2.10.89", + "date": "2020-03-18", + "entries": [ + { + "type": "feature", + "category": "Amazon Personalize", + "description": "[Personalize] Adds support for returning hyperparameter values of the best performing model in a HPO job." + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "description": "Feature adds the ability for a flow to have multiple redundant sources that provides resiliency to a source failing. The new APIs added to enable the feature are, AddFlowSources, RemoveFlowSource and UpdateFlow." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Updated the MaxRecords type in DescribeExportTasks to Integer." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.9.json b/.changes/2.10.9.json new file mode 100644 index 000000000000..edfdcbe9a4a3 --- /dev/null +++ b/.changes/2.10.9.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.9", + "date": "2019-11-05", + "entries": [ + { + "type": "feature", + "category": "AWS CodeStar Notifications", + "description": "This release adds a notification manager for events in repositories, build projects, deployments, and pipelines. You can now configure rules and receive notifications about events that occur for resources. Each notification includes a status message as well as a link to the resource (repository, build project, deployment application, or pipeline) whose event generated the notification." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Documentation updates for Amazon RDS" + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.90.json b/.changes/2.10.90.json new file mode 100644 index 000000000000..1bc2885b9927 --- /dev/null +++ b/.changes/2.10.90.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.90", + "date": "2020-03-19", + "entries": [ + { + "type": "feature", + "category": "AWS Outposts", + "description": "Documentation updates for AWS Outposts." + }, + { + "type": "feature", + "category": "AWS Certificate Manager", + "description": "AWS Certificate Manager documentation updated on API calls ImportCertificate and ListCertificate. Specific updates included input constraints, private key size for import and next token size for list." + } + ] +} \ No newline at end of file diff --git a/.changes/2.10.91.json b/.changes/2.10.91.json new file mode 100644 index 000000000000..d9deebd17e5a --- /dev/null +++ b/.changes/2.10.91.json @@ -0,0 +1,16 @@ +{ + "version": "2.10.91", + "date": "2020-03-20", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "Added \"productId\" and \"portfolioId\" to responses from CreateConstraint, UpdateConstraint, ListConstraintsForPortfolio, and DescribeConstraint APIs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.0.json b/.changes/2.11.0.json new file mode 100644 index 000000000000..c7ab334f3017 --- /dev/null +++ b/.changes/2.11.0.json @@ -0,0 +1,31 @@ +{ + "version": "2.11.0", + "date": "2020-03-23", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Adding new error code IamLimitExceeded for Nodegroups in EKS" + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "description": "Documentation updates to reflect that the default timeout for integrations is now 30 seconds for HTTP APIs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updating dependency version: netty 4.1.42.Final -> 4.1.46.Final (contains the fix for reducing heap usage for netty client)" + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Documentation updates for Route 53." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Bump minor version to '2.11.0-SNAPSHOT' because of [#1692](https://github.com/aws/aws-sdk-java-v2/issues/1692)" + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.1.json b/.changes/2.11.1.json new file mode 100644 index 000000000000..9100677a3b14 --- /dev/null +++ b/.changes/2.11.1.json @@ -0,0 +1,31 @@ +{ + "version": "2.11.1", + "date": "2020-03-24", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Adding new error codes: Ec2SubnetInvalidConfiguration and NodeCreationFailure for Nodegroups in EKS" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "Introduces actions for giving a member account administrative Organizations permissions for an AWS service. You can run this action only for AWS services that support this feature." + }, + { + "type": "feature", + "category": "AWS RDS DataService", + "description": "Documentation updates for rds-data" + }, + { + "type": "feature", + "category": "Amazon Athena", + "description": "Documentation updates for Athena, including QueryExecutionStatus QUEUED and RUNNING states. QUEUED now indicates that the query has been submitted to the service. RUNNING indicates that the query is in execution phase." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.10.json b/.changes/2.11.10.json new file mode 100644 index 000000000000..50678182d3f0 --- /dev/null +++ b/.changes/2.11.10.json @@ -0,0 +1,31 @@ +{ + "version": "2.11.10", + "date": "2020-04-06", + "entries": [ + { + "type": "feature", + "category": "Amazon Chime", + "description": "Amazon Chime proxy phone sessions let you provide two users with a shared phone number to communicate via voice or text for up to 12 hours without revealing personal phone numbers. When users call or message the provided phone number, they are connected to the other party and their private phone numbers are replaced with the shared number in Caller ID." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "This release adds support for batch transcription jobs within Amazon Transcribe Medical." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "description": "This release adds a new action, ListPlatformBranches, and updates two actions, ListPlatformVersions and DescribePlatformVersion, to support the concept of Elastic Beanstalk platform branches." + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "Documentation updates for AWS Identity and Access Management (IAM)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.11.json b/.changes/2.11.11.json new file mode 100644 index 000000000000..c95d042380f4 --- /dev/null +++ b/.changes/2.11.11.json @@ -0,0 +1,21 @@ +{ + "version": "2.11.11", + "date": "2020-04-07", + "entries": [ + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Documentation updates for Amazon API Gateway." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "API updates for CodeGuruReviewer" + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "description": "You can now send content from your MediaConnect flow to your virtual private cloud (VPC) without going over the public internet." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.12.json b/.changes/2.11.12.json new file mode 100644 index 000000000000..3a48687df2cd --- /dev/null +++ b/.changes/2.11.12.json @@ -0,0 +1,46 @@ +{ + "version": "2.11.12", + "date": "2020-04-08", + "entries": [ + { + "type": "feature", + "category": "AWS Migration Hub Config", + "description": "Adding ThrottlingException" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Profiler", + "description": "CodeGuruProfiler adds support for resource based authorization to submit profile data." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "The OrganizationalUnitIds parameter on StackSet and the OrganizationalUnitId parameter on StackInstance, StackInstanceSummary, and StackSetOperationResultSummary are now reserved for internal use. No data is returned for this parameter." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release provides the ability to include tags in EC2 event notifications." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release provides native support for specifying Amazon EFS file systems as volumes in your Amazon ECS task definitions." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "feature: Chime: This release introduces the ability to tag Amazon Chime SDK meeting resources. You can use tags to organize and identify your resources for cost allocation." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK adds support for queue hopping. Jobs can now hop from their original queue to a specified alternate queue, based on the maximum wait time that you specify in the job settings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.13.json b/.changes/2.11.13.json new file mode 100644 index 000000000000..112e1d2ce94d --- /dev/null +++ b/.changes/2.11.13.json @@ -0,0 +1,81 @@ +{ + "version": "2.11.13", + "date": "2020-04-16", + "entries": [ + { + "type": "feature", + "category": "Amazon Augmented AI Runtime", + "description": "This release updates Amazon Augmented AI ListHumanLoops and StartHumanLoop APIs." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert now allows you to specify your input captions frame rate for SCC captions sources." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Added a new BatchUpdateFindings action, which allows customers to update selected information about their findings. Security Hub customers use BatchUpdateFindings to track their investigation into a finding. BatchUpdateFindings is intended to replace the UpdateFindings action, which is deprecated." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 now supports adding AWS resource tags for placement groups and key pairs, at creation time. The CreatePlacementGroup API will now return placement group information when created successfully. The DeleteKeyPair API now supports deletion by resource ID." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "This release adds support for querying GetUserDefinedFunctions API without databaseName." + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "description": "AWS Elemental MediaTailor SDK now allows configuration of Avail Suppression." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds support for Amazon RDS Proxy with PostgreSQL compatibility." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "This release includes support for additional OS Versions within EC2 Image Builder." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Sample code for AWS Lambda operations" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Amazon SageMaker now supports running training jobs on ml.g4dn and ml.c5n instance types. Amazon SageMaker supports in \"IN\" operation for Search now." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Mark a connection as unreusable if there was a 5xx server error so that a new request will establish a new connection." + }, + { + "type": "feature", + "category": "Amazon Import/Export Snowball", + "description": "An update to the Snowball Edge Storage Optimized device has been launched. Like the previous version, it has 80 TB of capacity for data transfer. Now it has 40 vCPUs, 80 GiB, and a 1 TiB SATA SSD of memory for EC2 compatible compute. The 80 TB of capacity can also be used for EBS-like volumes for AMIs." + }, + { + "type": "feature", + "category": "AWS Migration Hub", + "description": "Adding ThrottlingException" + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "API update that allows users to customize event action payloads, and adds support for Amazon DynamoDB actions." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.14.json b/.changes/2.11.14.json new file mode 100644 index 000000000000..abeb535e8253 --- /dev/null +++ b/.changes/2.11.14.json @@ -0,0 +1,21 @@ +{ + "version": "2.11.14", + "date": "2020-04-17", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "Documentation updates for opsworkscm" + }, + { + "type": "feature", + "category": "Amazon Fraud Detector", + "description": "Added support for a new rule engine execution mode. Customers will be able to configure their detector versions to evaluate all rules and return outcomes from all 'matched' rules in the GetPrediction API response. Added support for deleting Detectors (DeleteDetector) and Rule Versions (DeleteRuleVersion)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.2.json b/.changes/2.11.2.json new file mode 100644 index 000000000000..93008d01a6b2 --- /dev/null +++ b/.changes/2.11.2.json @@ -0,0 +1,76 @@ +{ + "version": "2.11.2", + "date": "2020-03-25", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Reduced the number of times the profile file configuration is read from disk on client creation from 3-5 to 1." + }, + { + "type": "feature", + "category": "AWS X-Ray", + "description": "GetTraceSummaries - Now provides additional root cause attribute ClientImpacting which indicates whether root cause impacted trace client." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "Adding support for customer packages (dictionary files) to Amazon Elasticsearch Service" + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB", + "description": "Fixed an issue that could cause a null-pointer-exception when using anonymous credentials with endpoint discovery enabled." + }, + { + "type": "feature", + "category": "Amazon Managed Blockchain", + "description": "Amazon Managed Blockchain now has support to publish Hyperledger Fabric peer node, chaincode, and certificate authority (CA) logs to Amazon CloudWatch Logs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client [Preview]", + "description": "Performance improvements." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Customers can now receive Savings Plans recommendations at the member (linked) account level." + }, + { + "type": "feature", + "category": "Amazon Detective", + "description": "The new ACCEPTED_BUT_DISABLED member account status indicates that a member account that accepted the invitation is blocked from contributing data to the behavior graph. The reason is provided in the new DISABLED_REASON property. The new StartMonitoringMember operation enables a blocked member account." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "description": "Amazon CloudWatch Application Insights for .NET and SQL Server now integrates with Amazon CloudWatch Events (AWS CodeDeploy, AWS Health and Amazon EC2 state changes). This feature enables customers to view events related to problems detected by CloudWatch Application Insights, and reduce mean-time-to-resolution (MTTR)." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Added a `defaultProfileFile` and `defaultProfileName` option to the client override configuration. Setting this configuration value is equivalent to setting the environment or system properties for the profile file and profile name. Specifically, it sets the default profile file and profile name used by the client." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fixed a bug where explicitly disabling use-arn-region on S3Configuration would have lower priority than the environment variable, system property or profile property." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB", + "description": "Fixed an issue where endpoint discovery configuration specified in the profile file was being ignored." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "When endpoint discovery is enabled, the endpoint discovery process is now initialized with the first request, instead of 60 seconds after the first request." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.3.json b/.changes/2.11.3.json new file mode 100644 index 000000000000..615edf7e4152 --- /dev/null +++ b/.changes/2.11.3.json @@ -0,0 +1,26 @@ +{ + "version": "2.11.3", + "date": "2020-03-26", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "This release updates Amazon Augmented AI CreateFlowDefinition API and DescribeFlowDefinition response." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Security Hub has now made it easier to opt out of default standards when you enable Security Hub. We added a new Boolean parameter to EnableSecurityHub called EnableDefaultStandards. If that parameter is true, Security Hub's default standards are enabled. A new Boolean parameter for standards, EnabledByDefault, indicates whether a standard is a default standard. Today, the only default standard is CIS AWS Foundations Benchmark v1.2. Additional default standards will be added in the future.To learn more, visit our documentation on the EnableSecurityHub API action." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "This release includes two changes: a new lower-cost, storage type called HDD (Hard Disk Drive), and a new generation of the Single-AZ deployment type called Single AZ 2. The HDD storage type can be selected on Multi AZ 1 and Single AZ 2 deployment types." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.4.json b/.changes/2.11.4.json new file mode 100644 index 000000000000..dbab31a8ff86 --- /dev/null +++ b/.changes/2.11.4.json @@ -0,0 +1,31 @@ +{ + "version": "2.11.4", + "date": "2020-03-27", + "entries": [ + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "Added \"LocalRoleName\" as an acceptable Parameter for Launch type in CreateConstraint and UpdateConstraint APIs" + }, + { + "type": "feature", + "category": "AWS Global Accelerator", + "description": "This update adds an event history to the ListByoipCidr API call. This enables you to see the changes that you've made for an IP address range that you bring to AWS Global Accelerator through bring your own IP address (BYOIP)." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "The Amazon Kendra Microsoft SharePoint data source now supports include and exclude regular expressions and change log features. Include and exclude regular expressions enable you to provide a list of regular expressions to match the display URL of SharePoint documents to either include or exclude documents respectively. When you enable the changelog feature it enables Amazon Kendra to use the SharePoint change log to determine which documents to update in the index." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Remove the `LimitExceededException` as a throttling error as it seems many services don't treat it as a throttling error." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.5.json b/.changes/2.11.5.json new file mode 100644 index 000000000000..b24f9e69994d --- /dev/null +++ b/.changes/2.11.5.json @@ -0,0 +1,16 @@ +{ + "version": "2.11.5", + "date": "2020-03-30", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "This release adds support for the creation and management of IAM Access Analyzer analyzers with type organization. An analyzer with type organization continuously monitors all supported resources within the AWS organization and reports findings when they allow access from outside the organization." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.6.json b/.changes/2.11.6.json new file mode 100644 index 000000000000..41dd8a73c063 --- /dev/null +++ b/.changes/2.11.6.json @@ -0,0 +1,76 @@ +{ + "version": "2.11.6", + "date": "2020-03-31", + "entries": [ + { + "type": "feature", + "category": "AWS Organizations", + "description": "Documentation updates for AWS Organizations" + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "Documentation updates for OpsWorks-CM CreateServer values." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Add two enums for MongoDB connection: Added \"CONNECTION_URL\" to \"ConnectionPropertyKey\" and added \"MONGODB\" to \"ConnectionType\"" + }, + { + "type": "feature", + "category": "Amazon Elastic Inference", + "description": "This release includes improvements for the Amazon Elastic Inference service." + }, + { + "type": "feature", + "category": "Amazon Detective", + "description": "Removing the notes that Detective is in preview, in preparation for the Detective GA release." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "description": "Added support for AWS Firewall Manager for WAFv2 and PermissionPolicy APIs for WAFv2." + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "This release adds an event log to deployments. In the case of a deployment rollback, the event log details the rollback reason." + }, + { + "type": "feature", + "category": "AWS Elemental MediaStore", + "description": "This release adds support for CloudWatch Metrics. You can now set a policy on your container to dictate which metrics MediaStore sends to CloudWatch." + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This release adds DeleteProject and DeleteProjectVersion APIs to Amazon Rekognition Custom Labels." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Adding audit logging support for SMB File Shares" + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release of the Amazon Pinpoint API introduces MMS support for SMS messages." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "AWS Lambda now supports .NET Core 3.1" + }, + { + "type": "feature", + "category": "Firewall Management Service", + "description": "This release contains FMS wafv2 support." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.7.json b/.changes/2.11.7.json new file mode 100644 index 000000000000..61f57b57194d --- /dev/null +++ b/.changes/2.11.7.json @@ -0,0 +1,21 @@ +{ + "version": "2.11.7", + "date": "2020-04-01", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "description": "You can now send content from your virtual private cloud (VPC) to your MediaConnect flow without going over the public internet." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "This release introduces Dimensions for AWS IoT Device Defender. Dimensions can be used in Security Profiles to collect and monitor fine-grained metrics." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.8.json b/.changes/2.11.8.json new file mode 100644 index 000000000000..edde3d1b2916 --- /dev/null +++ b/.changes/2.11.8.json @@ -0,0 +1,36 @@ +{ + "version": "2.11.8", + "date": "2020-04-02", + "entries": [ + { + "type": "feature", + "category": "Amazon GameLift", + "description": "Public preview of GameLift FleetIQ as a standalone feature. GameLift FleetIQ makes it possible to use low-cost Spot instances by limiting the chance of interruptions affecting game sessions. FleetIQ is a feature of the managed GameLift service, and can now be used with game hosting in EC2 Auto Scaling groups that you manage in your own account." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Documentation updates for redshift" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports Automatic Input Failover. This feature provides resiliency upstream of the channel, before ingest starts." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Documentation updates for RDS: creating read replicas is now supported for SQL Server DB instances" + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "description": "Amazon CloudWatch Contributor Insights adds support for tags and tagging on resource creation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.9.json b/.changes/2.11.9.json new file mode 100644 index 000000000000..fda913d4e111 --- /dev/null +++ b/.changes/2.11.9.json @@ -0,0 +1,26 @@ +{ + "version": "2.11.9", + "date": "2020-04-03", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "Added support for limiting simulation unit usage, giving more predictable control over simulation cost" + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "description": "Amazon Personalize: Add new response field \"score\" to each item returned by GetRecommendations and GetPersonalizedRanking (HRNN-based recipes only)" + }, + { + "type": "feature", + "category": "AWS S3", + "description": "Allow DefaultS3Presigner.Builder to take a custom S3Configuration" + } + ] +} \ No newline at end of file diff --git a/.changes/2.12.0.json b/.changes/2.12.0.json new file mode 100644 index 000000000000..a9af6d6cccc5 --- /dev/null +++ b/.changes/2.12.0.json @@ -0,0 +1,41 @@ +{ + "version": "2.12.0", + "date": "2020-04-20", + "entries": [ + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Cost Categories API is now General Available with new dimensions and operations support. You can map costs by account name, service, and charge type dimensions as well as use contains, starts with, and ends with operations. Cost Categories can also be used in RI and SP coverage reports." + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "API update that allows users to add AWS Iot SiteWise actions while creating Detector Model in AWS Iot Events" + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "description": "You can now export an OpenAPI 3.0 compliant API definition file for Amazon API Gateway HTTP APIs using the Export API." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client", + "description": "The Amazon DynamoDB Enhanced Client is now generally available and provides a natural and intuitive interface for developers to integrate their applications with Amazon DynamoDB by means of an adaptive API that will map inputs and results to and from Java objects modeled by the application, rather than requiring the developers to implement that transformation themselves." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Synthetics", + "description": "Introducing CloudWatch Synthetics. This is the first public release of CloudWatch Synthetics." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Added a new ConnectionType \"KAFKA\" and a ConnectionProperty \"KAFKA_BOOTSTRAP_SERVERS\" to support Kafka connection." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.0.json b/.changes/2.13.0.json new file mode 100644 index 000000000000..905b2c86afc3 --- /dev/null +++ b/.changes/2.13.0.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.0", + "date": "2020-04-21", + "entries": [ + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Cost Explorer Rightsizing Recommendations integrates with Compute Optimizer and begins offering across instance family rightsizing recommendations, adding to existing support for within instance family rightsizing recommendations." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updating dependency version: Jackson 2.10.0 -> 2.10.3, Jackson-annotations 2.9.0 -> 2.10.0." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "description": "You can now programmatically transfer domains between AWS accounts without having to contact AWS Support" + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "AWS GuardDuty now supports using AWS Organizations delegated administrators to create and manage GuardDuty master and member accounts. The feature also allows GuardDuty to be automatically enabled on associated organization accounts." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Bump minor version to '2.13.0-SNAPSHOT' because of upgrade of Jackson version." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR adds support for configuring a managed scaling policy for an Amazon EMR cluster. This enables automatic resizing of a cluster to optimize for job execution speed and reduced cluster cost." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.1.json b/.changes/2.13.1.json new file mode 100644 index 000000000000..2e9062c21aa5 --- /dev/null +++ b/.changes/2.13.1.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.1", + "date": "2020-04-22", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "Add support for code review and recommendation feedback APIs." + }, + { + "type": "feature", + "category": "Firewall Management Service", + "description": "This release is to support AWS Firewall Manager policy with Organizational Unit scope." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "This change adds a new field 'OptionalDeployment' to ServiceSoftwareOptions to indicate whether a service software update is optional or mandatory. If True, it indicates that the update is optional, and the service software is not automatically updated. If False, the service software is automatically updated after AutomatedUpdateDate." + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "description": "Adding ServiceUnavailableException as one of the expected exceptions" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Amazon Redshift support for usage limits" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.10.json b/.changes/2.13.10.json new file mode 100644 index 000000000000..5a3ec49b5bb3 --- /dev/null +++ b/.changes/2.13.10.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.10", + "date": "2020-05-05", + "entries": [ + { + "type": "feature", + "category": "AWS SDJ for Java v2", + "description": "Updating dependency version: Jackson 2.10.3 -> 2.10.4, and combine dependency Jackson-annotations with Jackson." + }, + { + "type": "feature", + "category": "AWS Support", + "description": "Documentation updates for support" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "With this release, you can call ModifySubnetAttribute with two new parameters: MapCustomerOwnedIpOnLaunch and CustomerOwnedIpv4Pool, to map a customerOwnedIpv4Pool to a subnet. You will also see these two new fields in the DescribeSubnets response. If your subnet has a customerOwnedIpv4Pool mapped, your network interface will get an auto assigned customerOwnedIpv4 address when placed onto an instance." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB", + "description": "Tweaked the javadocs for Get/Update, since it was previously wrongfully copied over from Delete and mentions the \"delete operation\"." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "AWS Systems Manager Parameter Store launches new data type to support aliases in EC2 APIs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.11.json b/.changes/2.13.11.json new file mode 100644 index 000000000000..449231d9b4ec --- /dev/null +++ b/.changes/2.13.11.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.11", + "date": "2020-05-06", + "entries": [ + { + "type": "feature", + "category": "AWS CodeStar connections", + "description": "Added support for tagging resources in AWS CodeStar Connections" + }, + { + "type": "feature", + "category": "AWS Comprehend Medical", + "description": "New Batch Ontology APIs for ICD-10 and RxNorm will provide batch capability of linking the information extracted by Comprehend Medical to medical ontologies. The new ontology linking APIs make it easy to detect medications and medical conditions in unstructured clinical text and link them to RxNorm and ICD-10-CM codes respectively. This new feature can help you reduce the cost, time and effort of processing large amounts of unstructured medical text with high accuracy." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.12.json b/.changes/2.13.12.json new file mode 100644 index 000000000000..7b2c157f292c --- /dev/null +++ b/.changes/2.13.12.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.12", + "date": "2020-05-07", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "description": "Amazon CloudWatch Logs now offers the ability to interact with Logs Insights queries via the new PutQueryDefinition, DescribeQueryDefinitions, and DeleteQueryDefinition APIs." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Add COMMIT_MESSAGE enum for webhook filter types" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 now adds warnings to identify issues when creating a launch template or launch template version." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Amazon Route 53 now supports the EU (Milan) Region (eu-south-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "This release adds support for the following options in instance public ports: Specify source IP addresses, specify ICMP protocol like PING, and enable/disable the Lightsail browser-based SSH and RDP clients' access to your instance." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This Patch Manager release supports creating patch baselines for Oracle Linux and Debian" + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "The description of the AWS AppConfig GetConfiguration API action was amended to include important information about calling ClientConfigurationVersion when you configure clients to call GetConfiguration." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.13.json b/.changes/2.13.13.json new file mode 100644 index 000000000000..c073c66f9fe6 --- /dev/null +++ b/.changes/2.13.13.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.13", + "date": "2020-05-08", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "This release adds a new parameter (EnableInterContainerTrafficEncryption) to CreateProcessingJob API to allow for enabling inter-container traffic encryption on processing jobs." + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "Documentation updates for resourcegroupstaggingapi" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "A helpful error message is now raised when an obviously-invalid region name is given to the SDK, instead of the previous NullPointerException. Fixes [#1642](https://github.com/aws/aws-sdk-java-v2/issues/1642)." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "Documentation updates for GuardDuty" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.14.json b/.changes/2.13.14.json new file mode 100644 index 000000000000..5f3cee6f8d07 --- /dev/null +++ b/.changes/2.13.14.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.14", + "date": "2020-05-11", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "M6g instances are our next-generation general purpose instances powered by AWS Graviton2 processors" + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "Add Bitbucket integration APIs" + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "Amazon Kendra is now generally available. As part of general availability, we are launching Metrics for query & storage utilization" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.15.json b/.changes/2.13.15.json new file mode 100644 index 000000000000..7d4bc3e225d2 --- /dev/null +++ b/.changes/2.13.15.json @@ -0,0 +1,16 @@ +{ + "version": "2.13.15", + "date": "2020-05-12", + "entries": [ + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "Minor API fixes and updates to the documentation." + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "Documentation updates for iot-bifrost" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.16.json b/.changes/2.13.16.json new file mode 100644 index 000000000000..70cf75d0eb56 --- /dev/null +++ b/.changes/2.13.16.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.16", + "date": "2020-05-13", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix a race condition in `FileAsyncResponseTransformer` where the future fails to complete when onComplete event is dispatched on the same thread that executed request" + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release introduces a new major version of the Amazon Macie API. You can use this version of the API to develop tools and applications that interact with the new Amazon Macie." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache now supports auto-update of ElastiCache clusters after the \"recommended apply by date\" of service update has passed. ElastiCache will use your maintenance window to schedule the auto-update of applicable clusters. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Self-Service-Updates.html and https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Self-Service-Updates.html" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.17.json b/.changes/2.13.17.json new file mode 100644 index 000000000000..3813e0448731 --- /dev/null +++ b/.changes/2.13.17.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.17", + "date": "2020-05-14", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Support event streams that are shared between two operations." + }, + { + "type": "feature", + "category": "Amazon RDS", + "description": "Add SourceRegion to CopyDBClusterSnapshot and CreateDBCluster operations. As with CopyDBSnapshot and CreateDBInstanceReadReplica, specifying this field will automatically populate the PresignedURL field with a valid value." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 now supports adding AWS resource tags for associations between VPCs and local gateways, at creation time." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix generation for operations that share an output shape." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Expose the `extendedRequestId` from `SdkServiceException`, so it can be provided to support to investigate issues." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix unmarshalling of events when structure member name and shape name mismatch." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "This release adds a new parameter (SupportedOsVersions) to the Components API. This parameter lists the OS versions supported by a component." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.18.json b/.changes/2.13.18.json new file mode 100644 index 000000000000..467682fdac88 --- /dev/null +++ b/.changes/2.13.18.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.18", + "date": "2020-05-15", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Allow event structures to be used as operation outputs outside of streaming contexts." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Starting today, you can stop the execution of Glue workflows that are running. AWS Glue workflows are directed acyclic graphs (DAGs) of Glue triggers, crawlers and jobs. Using a workflow, you can design a complex multi-job extract, transform, and load (ETL) activity that AWS Glue can execute and track as single entity." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This release adds support for specifying an image manifest media type when pushing a manifest to Amazon ECR." + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "description": "API updates for STS" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "This release adds support for the following features: 1. DescribeType and ListTypeVersions APIs now output a field IsDefaultVersion, indicating if a version is the default version for its type; 2. Add StackRollbackComplete waiter feature to wait until stack status is UPDATE_ROLLBACK_COMPLETE; 3. Add paginators in DescribeAccountLimits, ListChangeSets, ListStackInstances, ListStackSetOperationResults, ListStackSetOperations, ListStackSets APIs." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix generation for services that contain operations with the same name as the service." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.19.json b/.changes/2.13.19.json new file mode 100644 index 000000000000..af01c484cf54 --- /dev/null +++ b/.changes/2.13.19.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.19", + "date": "2020-05-18", + "entries": [ + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "Documentation updates for Amazon Macie" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "Documentation updates for dynamodb" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release adds support for specifying environment files to add environment variables to your containers." + }, + { + "type": "feature", + "category": "Amazon QLDB", + "description": "Amazon QLDB now supports Amazon Kinesis data streams. You can now emit QLDB journal data, via the new QLDB Streams feature, directly to Amazon Kinesis supporting event processing and analytics among related use cases." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release changes the RunInstances CLI and SDK's so that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Amazon Chime now supports redacting chat messages." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.2.json b/.changes/2.13.2.json new file mode 100644 index 000000000000..5ff3d4a2259b --- /dev/null +++ b/.changes/2.13.2.json @@ -0,0 +1,51 @@ +{ + "version": "2.13.2", + "date": "2020-04-23", + "entries": [ + { + "type": "feature", + "category": "AWS Transfer Family", + "description": "This release adds support for transfers over FTPS and FTP in and out of Amazon S3, which makes it easy to migrate File Transfer Protocol over SSL (FTPS) and FTP workloads to AWS, in addition to the existing support for Secure File Transfer Protocol (SFTP)." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Added AutomaticTapeCreation APIs" + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release of the Amazon Pinpoint API enhances support for sending campaigns through custom channels to locations such as AWS Lambda functions or web applications. Campaigns can now use CustomDeliveryConfiguration and CampaignCustomMessage to configure custom channel settings for a campaign." + }, + { + "type": "feature", + "category": "AWS Resource Access Manager", + "description": "AWS Resource Access Manager (RAM) provides a new ListResourceTypes action. This action lets you list the resource types that can be shared using AWS RAM." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage VOD", + "description": "Adds tagging support for PackagingGroups, PackagingConfigurations, and Assets" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Adds support for AWS Local Zones, including a new optional parameter AvailabilityZoneGroup for the DescribeOrderableDBInstanceOptions operation." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "This release supports Auto Scaling in Amazon Keyspaces for Apache Cassandra." + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "description": "You can now deliver streaming data to an Amazon Elasticsearch Service domain in an Amazon VPC. You can now compress streaming data delivered to S3 using Hadoop-Snappy in addition to Gzip, Zip and Snappy formats." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.20.json b/.changes/2.13.20.json new file mode 100644 index 000000000000..92ec01704b9c --- /dev/null +++ b/.changes/2.13.20.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.20", + "date": "2020-05-19", + "entries": [ + { + "type": "feature", + "category": "AWS Health APIs and Notifications", + "description": "Feature: Health: AWS Health added a new field to differentiate Public events from Account-Specific events in the API request and response. Visit https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html to learn more." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Endpoint discovery is now enabled by default for future services that will require it. A new method 'endpointDiscoveryEnabled' has been added to client builders that support endpoint discovery allowing a true or false value to be set. 'enableEndpointDiscovery' has been deprecated on the client builders as it is now superseded by 'endpointDiscoveryEnabled'." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "You can now receive Voice Connector call events through SNS or SQS." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for Federated Authentication via SAML-2.0 in AWS ClientVPN." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Documentation updates for Amazon Transcribe." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.21.json b/.changes/2.13.21.json new file mode 100644 index 000000000000..aad080c3af8d --- /dev/null +++ b/.changes/2.13.21.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.21", + "date": "2020-05-20", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports the ability to ingest the content that is streaming from an AWS Elemental Link device: https://aws.amazon.com/medialive/features/link/. This release also adds support for SMPTE-2038 and input state waiters." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "For findings related to controls, the finding information now includes the reason behind the current status of the control. A new field for the findings original severity allows finding providers to use the severity values from the system they use to assign severity." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Amazon Chime enterprise account administrators can now set custom retention policies on chat data in the Amazon Chime application." + }, + { + "type": "feature", + "category": "AWS CodeDeploy", + "description": "Amazon ECS customers using application and network load balancers can use CodeDeploy BlueGreen hook to invoke a CloudFormation stack update. With this update you can view CloudFormation deployment and target details via existing APIs and use your stack Id to list or delete all deployments associated with the stack." + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "description": "This release adds support for vocabulary filtering in streaming with which you can filter unwanted words from the real-time transcription results. Visit https://docs.aws.amazon.com/transcribe/latest/dg/how-it-works.html to learn more." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "List APIs for all resources now contain additional information: when a resource was created, last updated, and its current version number." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "Documentation updates for Application Auto Scaling" + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "This release allows customers to enable or disable AWS Backup support for an AWS resource type. This release also includes new APIs, update-region-settings and describe-region-settings, which can be used to opt in to a specific resource type. For all current AWS Backup customers, the default settings enable support for EBS, EC2, StorageGateway, EFS, DDB and RDS resource types." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.22.json b/.changes/2.13.22.json new file mode 100644 index 000000000000..0d686bfd92d4 --- /dev/null +++ b/.changes/2.13.22.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.22", + "date": "2020-05-21", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue where a service returning an unknown response event type would cause a failure." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Synthetics", + "description": "AWS CloudWatch Synthetics now supports configuration of allocated memory for a canary." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "CodeBuild adds support for tagging with report groups" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "From this release onwards ProvisionByoipCidr publicly supports IPv6. Updated ProvisionByoipCidr API to support tags for public IPv4 and IPv6 pools. Added NetworkBorderGroup to the DescribePublicIpv4Pools response." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Deprecates unusable input members bound to Content-MD5 header. Updates example and documentation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.23.json b/.changes/2.13.23.json new file mode 100644 index 000000000000..76f107963e0a --- /dev/null +++ b/.changes/2.13.23.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.23", + "date": "2020-05-22", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "This release adds support for the standard deviation auto-computed aggregate and improved support for portal logo images in SiteWise." + }, + { + "type": "feature", + "category": "Auto Scaling", + "description": "Documentation updates for Amazon EC2 Auto Scaling" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.24.json b/.changes/2.13.24.json new file mode 100644 index 000000000000..347220932f55 --- /dev/null +++ b/.changes/2.13.24.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.24", + "date": "2020-05-26", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "ebsOptimizedInfo, efaSupported and supportedVirtualizationTypes added to DescribeInstanceTypes API" + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "Allowing cron expression in the DLM policy creation schedule." + }, + { + "type": "feature", + "category": "Amazon Macie", + "description": "This is a documentation-only update to the Amazon Macie Classic API. This update corrects out-of-date references to the service name." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache now allows you to use resource based policies to manage access to operations performed on ElastiCache resources. Also, Amazon ElastiCache now exposes ARN (Amazon Resource Names) for ElastiCache resources such as Cache Clusters and Parameter Groups. ARNs can be used to apply IAM policies to ElastiCache resources." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Add DataSetArns to QuickSight DescribeDashboard API response." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "The AWS Systems Manager GetOpsSummary API action now supports multiple OpsResultAttributes in the request. Currently, this feature only supports OpsResultAttributes with the following TypeNames: [AWS:EC2InstanceComputeOptimizer] or [AWS:EC2InstanceInformation, AWS:EC2InstanceComputeOptimizer]. These TypeNames can be used along with either or both of the following: [AWS:EC2InstanceRecommendation, AWS:RecommendationSource]" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.25.json b/.changes/2.13.25.json new file mode 100644 index 000000000000..73ef7b4eecbb --- /dev/null +++ b/.changes/2.13.25.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.25", + "date": "2020-05-27", + "entries": [ + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Check the `x-amz-content-range` header for `GetObject` responses when the `Content-Range` header is not returned by the service. Fixes [#1209](https://github.com/aws/aws-sdk-java-v2/issues/1209)." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "This release added support for HTTP/2 ALPN preference lists for Network Load Balancers" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "Documentation updates for GuardDuty" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.26.json b/.changes/2.13.26.json new file mode 100644 index 000000000000..cd65b21304ed --- /dev/null +++ b/.changes/2.13.26.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.26", + "date": "2020-05-28", + "entries": [ + { + "type": "feature", + "category": "Amazon QLDB Session", + "description": "Documentation updates for Amazon QLDB Session" + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "This release adds support for Amazon WorkMail organization-level retention policies." + }, + { + "type": "feature", + "category": "AWS Marketplace Catalog Service", + "description": "AWS Marketplace Catalog now supports accessing initial change payloads with DescribeChangeSet operation." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "New APIs for upgrading the Apache Kafka version of a cluster and to find out compatible upgrade paths" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.27.json b/.changes/2.13.27.json new file mode 100644 index 000000000000..f546be4f0bf5 --- /dev/null +++ b/.changes/2.13.27.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.27", + "date": "2020-06-01", + "entries": [ + { + "type": "feature", + "category": "AWS Key Management Service", + "description": "AWS Key Management Service (AWS KMS): If the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext APIs are called on a CMK in a custom key store (origin == AWS_CLOUDHSM), they return an UnsupportedOperationException. If a call to UpdateAlias causes a customer to exceed the Alias resource quota, the UpdateAlias API returns a LimitExceededException." + }, + { + "type": "feature", + "category": "Amazon Athena", + "description": "This release adds support for connecting Athena to your own Apache Hive Metastores in addition to the AWS Glue Data Catalog. For more information, please see https://docs.aws.amazon.com/athena/latest/ug/connect-to-data-source-hive.html" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR now supports encrypting log files with AWS Key Management Service (KMS) customer managed keys." + }, + { + "type": "feature", + "category": "AWS Maven Lambda Archetype", + "description": "Updated the `archetype-lambda` to generate SDK client that uses region from environment variable." + }, + { + "type": "feature", + "category": "Amazon WorkLink", + "description": "Amazon WorkLink now supports resource tagging for fleets." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "We are releasing HumanTaskUiArn as a new parameter in CreateLabelingJob and RenderUiTemplate which can take an ARN for a system managed UI to render a task." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "New capabilities to update storage capacity and throughput capacity of your file systems, providing the flexibility to grow file storage and to scale up or down the available performance as needed to meet evolving storage needs over time." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.28.json b/.changes/2.13.28.json new file mode 100644 index 000000000000..5d785c1b88dd --- /dev/null +++ b/.changes/2.13.28.json @@ -0,0 +1,11 @@ +{ + "version": "2.13.28", + "date": "2020-06-02", + "entries": [ + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "Amazon GuardDuty findings now include S3 bucket details under the resource section if an S3 Bucket was one of the affected resources" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.29.json b/.changes/2.13.29.json new file mode 100644 index 000000000000..46f23d0e5331 --- /dev/null +++ b/.changes/2.13.29.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.29", + "date": "2020-06-03", + "entries": [ + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "GenerateServiceLastAccessedDetails will now return ActionLastAccessed details for certain S3 control plane actions" + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "Amazon Elasticsearch Service now offers support for cross-cluster search, enabling you to perform searches, aggregations, and visualizations across multiple Amazon Elasticsearch Service domains with a single query or from a single Kibana interface. New feature includes the ability to setup connection, required to perform cross-cluster search, between domains using an approval workflow." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Adding databaseName in the response for GetUserDefinedFunctions() API." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for the encoding of VP8 or VP9 video in WebM container with Vorbis or Opus audio." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Direct Connect", + "description": "This release supports the virtual interface failover test, which allows you to verify that traffic routes over redundant virtual interfaces when you bring your primary virtual interface out of service." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "This release improves the Multi-AZ feature in ElastiCache by adding a separate flag and proper validations." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.3.json b/.changes/2.13.3.json new file mode 100644 index 000000000000..d2a6ac7ea768 --- /dev/null +++ b/.changes/2.13.3.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.3", + "date": "2020-04-24", + "entries": [ + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "Enable 1hour frequency in the schedule creation for Data LifeCycle Manager." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "This release adds a new exception type to the AWS IoT SetV2LoggingLevel API." + }, + { + "type": "feature", + "category": "Amazon Elastic Inference", + "description": "This feature allows customers to describe the accelerator types and offerings on any region where Elastic Inference is available." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed bean-style setter names on serializable builders to match bean-style getter names." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.30.json b/.changes/2.13.30.json new file mode 100644 index 000000000000..24c4bca4a469 --- /dev/null +++ b/.changes/2.13.30.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.30", + "date": "2020-06-04", + "entries": [ + { + "type": "feature", + "category": "AWSMarketplace Metering", + "description": "Documentation updates for meteringmarketplace" + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage VOD", + "description": "You can now restrict direct access to AWS Elemental MediaPackage by securing requests for VOD content using CDN authorization. With CDN authorization, content requests require a specific HTTP header and authorization code." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "This release adds the BurstCapacityPercentage and BurstCapacityTime instance metrics, which allow you to track the burst capacity available to your instance." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "New C5a instances, the latest generation of EC2's compute-optimized instances featuring AMD's 2nd Generation EPYC processors. C5a instances offer up to 96 vCPUs, 192 GiB of instance memory, 20 Gbps in Network bandwidth; New G4dn.metal bare metal instance with 8 NVIDIA T4 GPUs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "SSM State Manager support for executing an association only at specified CRON schedule after creating/updating an association." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.31.json b/.changes/2.13.31.json new file mode 100644 index 000000000000..5d5cabdf1b83 --- /dev/null +++ b/.changes/2.13.31.json @@ -0,0 +1,51 @@ +{ + "version": "2.13.31", + "date": "2020-06-05", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Amazon CloudFront adds support for configurable origin connection attempts and origin connection timeout." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "This release adds support for DescribeProduct and DescribeProductAsAdmin by product name, DescribeProvisioningArtifact by product name or provisioning artifact name, returning launch paths as part of DescribeProduct output and adds maximum length for provisioning artifact name and provisioning artifact description." + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "description": "[Personalize] Adds ability to apply filter to real-time recommendations" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Amazon API Gateway now allows customers of REST APIs to skip trust chain validation for backend server certificates for HTTP and VPC Link Integration. This feature enables customers to configure their REST APIs to integrate with backends that are secured with certificates vended from private certificate authorities (CA) or certificates that are self-signed." + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release enables additional functionality for the Amazon Pinpoint journeys feature. With this release, you can send messages through additional channels, including SMS, push notifications, and custom channels." + }, + { + "type": "feature", + "category": "Amazon Personalize", + "description": "[Personalize] Adds ability to create and apply filters." + }, + { + "type": "feature", + "category": "Amazon SageMaker Runtime", + "description": "You can now specify the production variant to send the inference request to, when invoking a SageMaker Endpoint that is running two or more variants." + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "description": "These API changes enable an IAM user to associate an operations role with an Elastic Beanstalk environment, so that the IAM user can call Elastic Beanstalk actions without having access to underlying downstream AWS services that these actions call." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.32.json b/.changes/2.13.32.json new file mode 100644 index 000000000000..c026b2237a9c --- /dev/null +++ b/.changes/2.13.32.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.32", + "date": "2020-06-08", + "entries": [ + { + "type": "feature", + "category": "AWS Cloud Map", + "description": "Added support for tagging Service and Namespace type resources in Cloud Map" + }, + { + "type": "feature", + "category": "AWS Shield", + "description": "This release adds the option for customers to identify a contact name and method that the DDoS Response Team can proactively engage when a Route 53 Health Check that is associated with a Shield protected resource fails." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Update javadoc annotation for AwsBasicCredentials" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.33.json b/.changes/2.13.33.json new file mode 100644 index 000000000000..655146bc5866 --- /dev/null +++ b/.changes/2.13.33.json @@ -0,0 +1,16 @@ +{ + "version": "2.13.33", + "date": "2020-06-09", + "entries": [ + { + "type": "feature", + "category": "AWS Transfer Family", + "description": "This release updates the API so customers can test use of Source IP to allow, deny or limit access to data in their S3 buckets after integrating their identity provider." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.34.json b/.changes/2.13.34.json new file mode 100644 index 000000000000..b58df9e2886f --- /dev/null +++ b/.changes/2.13.34.json @@ -0,0 +1,51 @@ +{ + "version": "2.13.34", + "date": "2020-06-10", + "entries": [ + { + "type": "feature", + "category": "AWS Compute Optimizer", + "description": "Compute Optimizer supports exporting recommendations to Amazon S3." + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "This release allows customers to choose from a list of predefined deployment strategies while starting deployments." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "New C6g instances powered by AWS Graviton2 processors and ideal for running advanced, compute-intensive workloads; New R6g instances powered by AWS Graviton2 processors and ideal for running memory-intensive workloads." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "Documentation updates for lightsail" + }, + { + "type": "feature", + "category": "AWS Shield", + "description": "Corrections to the supported format for contact phone numbers and to the description for the create subscription action." + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "Reducing the schedule name of DLM Lifecycle policy from 500 to 120 characters." + }, + { + "type": "feature", + "category": "CodeArtifact", + "description": "Added support for AWS CodeArtifact." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API removes support for the ArchiveFindings and UnarchiveFindings operations. This release also adds UNKNOWN as an encryption type for S3 bucket metadata." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "Service Catalog Documentation Update for Integration with AWS Organizations Delegated Administrator feature" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.35.json b/.changes/2.13.35.json new file mode 100644 index 000000000000..8b23113e363c --- /dev/null +++ b/.changes/2.13.35.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.35", + "date": "2020-06-11", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Avoid unnecessary copying in `AsyncRequestBody.fromBytes()`" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT Data Plane", + "description": "As part of this release, we are introducing a new feature called named shadow, which extends the capability of AWS IoT Device Shadow to support multiple shadows for a single IoT device. With this release, customers can store different device state data into different shadows, and as a result access only the required state data when needed and reduce individual shadow size." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "This change adds the built-in AMAZON.KendraSearchIntent that enables integration with Amazon Kendra." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release adds support for deleting capacity providers." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "EC2 Image Builder now supports specifying a custom working directory for your build and test workflows. In addition, Image Builder now supports defining tags that are applied to ephemeral resources created by EC2 Image Builder as part of the image creation workflow." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.36.json b/.changes/2.13.36.json new file mode 100644 index 000000000000..199f1a1ed4da --- /dev/null +++ b/.changes/2.13.36.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.36", + "date": "2020-06-12", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "The following parameters now return the organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets: the OrganizationalUnitIds parameter on StackSet and the OrganizationalUnitId parameter on StackInstance, StackInstanceSummary, and StackSetOperationResultSummary" + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "You can now choose to crawl the entire table or just a sample of records in DynamoDB when using AWS Glue crawlers. Additionally, you can also specify a scanning rate for crawling DynamoDB tables." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2 DynamoDB Enhanced Client", + "description": "Added ClientRequestToken in class TransactWriteItemsEnhancedRequest." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Display EndpointType in DescribeGatewayInformation" + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Documentation updates for Amazon API Gateway" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.37.json b/.changes/2.13.37.json new file mode 100644 index 000000000000..283abad01548 --- /dev/null +++ b/.changes/2.13.37.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.37", + "date": "2020-06-15", + "entries": [ + { + "type": "feature", + "category": "Amazon Chime", + "description": "feature: Chime: This release introduces the ability to create an AWS Chime SDK meeting with attendees." + }, + { + "type": "feature", + "category": "Alexa For Business", + "description": "Adding support for optional tags in CreateBusinessReportSchedule, CreateProfile and CreateSkillGroup APIs" + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Updated all AuthParameters to be sensitive." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "Added support for job executions rollout configuration, job abort configuration, and job executions timeout configuration for AWS IoT Over-the-Air (OTA) Update Feature." + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "This release adds a hosted configuration source provider. Customers can now store their application configurations directly in AppConfig, without the need for an external configuration source." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.38.json b/.changes/2.13.38.json new file mode 100644 index 000000000000..04497c4057d3 --- /dev/null +++ b/.changes/2.13.38.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.38", + "date": "2020-06-16", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "description": "Introducing instance refresh, a feature that helps you update all instances in an Auto Scaling group in a rolling fashion (for example, to apply a new AMI or instance type). You can control the pace of the refresh by defining the percentage of the group that must remain running/healthy during the replacement process and the time for new instances to warm up between replacements." + }, + { + "type": "feature", + "category": "AWS Data Exchange", + "description": "This release fixes a bug in the AWS Data Exchange Python and NodeJS SDKs. The 'KmsKeyArn' field in the create-job API was configured to be required instead of optional. We updated this field to be optional in this release." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Documentation updates for CloudFront" + }, + { + "type": "feature", + "category": "Amazon Polly", + "description": "Amazon Polly adds new US English child voice - Kevin. Kevin is available as Neural voice only." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Adds support for using Amazon Elastic File System (persistent storage) with AWS Lambda. This enables customers to share data across function invocations, read large reference data files, and write function output to a persistent and shared store." + }, + { + "type": "feature", + "category": "Amazon QLDB", + "description": "Documentation updates for Amazon QLDB" + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Fix an NPE in `OptionalAttributeConverter` that can happen the if the `nul()` property of the `AttributeValue` is `null`." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.39.json b/.changes/2.13.39.json new file mode 100644 index 000000000000..0da636d54ca1 --- /dev/null +++ b/.changes/2.13.39.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.39", + "date": "2020-06-17", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "nvmeSupport added to DescribeInstanceTypes API" + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "Adds support for route and virtual node listener timeouts." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Add PriorRequestNotComplete exception to AssociateVPCWithHostedZone API" + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This is a documentation-only update to the Amazon Macie API. This update contains miscellaneous editorial improvements to various API descriptions." + }, + { + "type": "feature", + "category": "Amazon Import/Export Snowball", + "description": "AWS Snowcone is a portable, rugged and secure device for edge computing and data transfer. You can use Snowcone to collect, process, and move data to AWS, either offline by shipping the device to AWS or online by using AWS DataSync. With 2 CPUs and 4 GB RAM of compute and 8 TB of storage, Snowcone can run edge computing workloads and store data securely. Snowcone's small size (8.94\" x 5.85\" x 3.25\" / 227 mm x 148.6 mm x 82.65 mm) allows you to set it next to machinery in a factory. Snowcone weighs about 4.5 lbs. (2 kg), so you can carry one in a backpack, use it with battery-based operation, and use the Wi-Fi interface to gather sensor data. Snowcone supports a file interface with NFS support." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.4.json b/.changes/2.13.4.json new file mode 100644 index 000000000000..9a1fc0862c0a --- /dev/null +++ b/.changes/2.13.4.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.4", + "date": "2020-04-27", + "entries": [ + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "Adding minimum replication engine version for describe-endpoint-types api." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Various performance improvements." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Change to the input, ResourceSpec, changing EnvironmentArn to SageMakerImageArn. This affects the following preview APIs: CreateDomain, DescribeDomain, UpdateDomain, CreateUserProfile, DescribeUserProfile, UpdateUserProfile, CreateApp and DescribeApp." + }, + { + "type": "feature", + "category": "AWS Data Exchange", + "description": "This release introduces AWS Data Exchange support for configurable encryption parameters when exporting data sets to Amazon S3." + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "This release adds support for inclusion of S3 Access Point policies in IAM Access Analyzer evaluation of S3 bucket access. IAM Access Analyzer now reports findings for buckets shared through access points and identifies the access point that permits access." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.40.json b/.changes/2.13.40.json new file mode 100644 index 000000000000..6c93707308b9 --- /dev/null +++ b/.changes/2.13.40.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.40", + "date": "2020-06-18", + "entries": [ + { + "type": "feature", + "category": "AWS Support", + "description": "Documentation updates for support" + }, + { + "type": "feature", + "category": "AWSMarketplace Metering", + "description": "Documentation updates for meteringmarketplace" + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Added a new ListHostedZonesByVPC API for customers to list all the private hosted zones that a specified VPC is associated with." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for NexGuard FileMarker SDK, which allows NexGuard partners to watermark proprietary content in mezzanine and OTT streaming contexts." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Adding support for global write forwarding on secondary clusters in an Aurora global database." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression before running the maintenance window." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "description": "You can now configure Amazon SES to send event notifications when the delivery of an email is delayed because of a temporary issue. For example, you can receive a notification if the recipient's inbox is full, or if there's a temporary problem with the receiving email server." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.41.json b/.changes/2.13.41.json new file mode 100644 index 000000000000..b9fe63843fa2 --- /dev/null +++ b/.changes/2.13.41.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.41", + "date": "2020-06-19", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Adds support to tag elastic-gpu on the RunInstances api" + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Documentation updates for elasticache" + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports Input Prepare schedule actions. This feature improves existing input switching by allowing users to prepare an input prior to switching to it." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "Documentation updates for AWS OpsWorks CM." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.42.json b/.changes/2.13.42.json new file mode 100644 index 000000000000..822c23f0ec8e --- /dev/null +++ b/.changes/2.13.42.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.42", + "date": "2020-06-22", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds Tag On Create feature support for the ImportImage, ImportSnapshot, ExportImage and CreateInstanceExportTask APIs." + }, + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "description": "AWS SQS adds pagination support for ListQueues and ListDeadLetterSourceQueues APIs" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Added paginators for various APIs." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Adding support for MaximumCoreCapacityUnits parameter for EMR Managed Scaling. It allows users to control how many units/nodes are added to the CORE group/fleet. Remaining units/nodes are added to the TASK groups/fleet in the cluster." + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This update adds the ability to detect black frames, end credits, shots, and color bars in stored videos" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.43.json b/.changes/2.13.43.json new file mode 100644 index 000000000000..efc5877aa6c9 --- /dev/null +++ b/.changes/2.13.43.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.43", + "date": "2020-06-23", + "entries": [ + { + "type": "feature", + "category": "AWS MediaTailor", + "description": "AWS Elemental MediaTailor SDK now allows configuration of Bumper." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "Added a new error message to support the requirement for a Business License on AWS accounts in China to create an organization." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.44.json b/.changes/2.13.44.json new file mode 100644 index 000000000000..bb9816351c6d --- /dev/null +++ b/.changes/2.13.44.json @@ -0,0 +1,56 @@ +{ + "version": "2.13.44", + "date": "2020-06-24", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "description": "Documentation updates for Amazon EC2 Auto Scaling." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "This release adds support for a new backup policy type for AWS Organizations." + }, + { + "type": "feature", + "category": "AWS CodeCommit", + "description": "This release introduces support for reactions to CodeCommit comments. Users will be able to select from a pre-defined list of emojis to express their reaction to any comments." + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "Documentation updates for iam" + }, + { + "type": "feature", + "category": "AWS Amplify", + "description": "This release of AWS Amplify Console introduces support for automatically creating custom subdomains for branches based on user-defined glob patterns, as well as automatically cleaning up Amplify branches when their corresponding git branches are deleted." + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "Customers can now manage and monitor their backups in a policied manner across their AWS accounts, via an integration between AWS Backup and AWS Organizations" + }, + { + "type": "feature", + "category": "Amazon Honeycode", + "description": "Introducing Amazon Honeycode - a fully managed service that allows you to quickly build mobile and web apps for teams without programming." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR customers can now set allocation strategies for On-Demand and Spot instances in their EMR clusters with instance fleets. These allocation strategies use real-time capacity insights to provision clusters faster and make the most efficient use of available spare capacity to allocate Spot instances to reduce interruptions." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "This release adds the capability to take highly-durable, incremental backups of your FSx for Lustre persistent file systems. This capability makes it easy to further protect your file system data and to meet business and regulatory compliance requirements." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.45.json b/.changes/2.13.45.json new file mode 100644 index 000000000000..5886730a8d09 --- /dev/null +++ b/.changes/2.13.45.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.45", + "date": "2020-06-25", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Added support for tag-on-create for Host Reservations in Dedicated Hosts. You can now specify tags when you create a Host Reservation for a Dedicated Host. For more information about tagging, see AWS Tagging Strategies." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "This release adds new APIs to support column level statistics in AWS Glue Data Catalog" + }, + { + "type": "bugfix", + "category": "AWS DynamoDB Enhanced Client", + "description": "Fixed a bug causing a NullPointerException to be thrown in the enhanced DeleteItem operation if a conditionExpression was given with null attributeNames or null attributeValues." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.46.json b/.changes/2.13.46.json new file mode 100644 index 000000000000..684303a46f36 --- /dev/null +++ b/.changes/2.13.46.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.46", + "date": "2020-06-26", + "entries": [ + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Don't require Authorization for InitiateAuth and RespondToAuthChallenge." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Added support for cross-region DataSource credentials copying." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "This release contains miscellaneous API documentation updates for AWS DMS in response to several customer reported issues." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "ListStackInstances and DescribeStackInstance now return a new `StackInstanceStatus` object that contains `DetailedStatus` values: a disambiguation of the more generic `Status` value. ListStackInstances output can now be filtered on `DetailedStatus` using the new `Filters` parameter." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "The new 'ModelClientConfig' parameter being added for CreateTransformJob and DescribeTransformJob api actions enable customers to configure model invocation related parameters such as timeout and retry." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.47.json b/.changes/2.13.47.json new file mode 100644 index 000000000000..aa613631389c --- /dev/null +++ b/.changes/2.13.47.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.47", + "date": "2020-06-29", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "description": "Documentation updates for Amazon EC2 Auto Scaling." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Profiler", + "description": "Amazon CodeGuru Profiler is now generally available. The Profiler helps developers to optimize their software, troubleshoot issues in production, and identify their most expensive lines of code. As part of general availability, we are launching: Profiling of AWS Lambda functions, Anomaly detection in CPU profiles, Color My Code on flame graphs, Expanding presence to 10 AWS regions." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Virtual Private Cloud (VPC) customers can now create and manage their own Prefix Lists to simplify VPC configurations." + }, + { + "type": "feature", + "category": "AWS CodeStar connections", + "description": "Updated and new APIs in support of hosts for connections to installed provider types. New integration with the GitHub Enterprise Server provider type." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.48.json b/.changes/2.13.48.json new file mode 100644 index 000000000000..56384c1db86a --- /dev/null +++ b/.changes/2.13.48.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.48", + "date": "2020-06-30", + "entries": [ + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "Release GitHub Enterprise Server source provider integration" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix marshaller binding for input event streams when member name and shape name are not equal." + }, + { + "type": "feature", + "category": "AWS Comprehend Medical", + "description": "This release adds the relationships between MedicalCondition and Anatomy in DetectEntitiesV2 API." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Documentation updates for rds" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Added support for tag-on-create for CreateVpc, CreateEgressOnlyInternetGateway, CreateSecurityGroup, CreateSubnet, CreateNetworkInterface, CreateNetworkAcl, CreateDhcpOptions and CreateInternetGateway. You can now specify tags when creating any of these resources. For more information about tagging, see AWS Tagging Strategies." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "Add a new parameter (ImageDigest) and a new exception (ImageDigestDoesNotMatchException) to PutImage API to support pushing image by digest." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.49.json b/.changes/2.13.49.json new file mode 100644 index 000000000000..cc67573cc5bd --- /dev/null +++ b/.changes/2.13.49.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.49", + "date": "2020-07-01", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds the exceptions KMSKeyNotAccessibleFault and InvalidDBClusterStateFault to the Amazon RDS ModifyDBInstance API." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Support build status config in project source" + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "This release supports third party emergency call routing configuration for Amazon Chime Voice Connectors." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "This release adds additional details for findings. There are now finding details for auto scaling groups, EC2 volumes, and EC2 VPCs. You can identify detected vulnerabilities and provide related network paths." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "EC2 Image Builder adds support for encrypted AMI distribution." + }, + { + "type": "feature", + "category": "AWS AppSync", + "description": "AWS AppSync supports new 12xlarge instance for server-side API caching" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.5.json b/.changes/2.13.5.json new file mode 100644 index 000000000000..93ba9eed939a --- /dev/null +++ b/.changes/2.13.5.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.5", + "date": "2020-04-28", + "entries": [ + { + "type": "feature", + "category": "Amazon Kinesis Video Streams", + "description": "Add \"GET_CLIP\" to the list of supported API names for the GetDataEndpoint API." + }, + { + "type": "feature", + "category": "Amazon Kinesis Video Streams Archived Media", + "description": "Add support for the GetClip API for retrieving media from a video stream in the MP4 format." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhacned", + "description": "Fix NPE on EnhancedType, created with documentOf, when calling innerToString" + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Amazon Route 53 now supports the Africa (Cape Town) Region (af-south-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This release adds support for multi-architecture images also known as a manifest list" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "SSM State Manager support for adding list association filter for Resource Group and manual mode of managing compliance for an association." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports several new features: enhanced VQ for H.264 (AVC) output encodes; passthrough of timed metadata and of Nielsen ID3 metadata in fMP4 containers in HLS outputs; the ability to generate a SCTE-35 sparse track without additional segmentation, in Microsoft Smooth outputs; the ability to select the audio from a TS input by specifying the audio track; and conversion of HDR colorspace in the input to an SDR colorspace in the output." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.50.json b/.changes/2.13.50.json new file mode 100644 index 000000000000..8003dfb2ca88 --- /dev/null +++ b/.changes/2.13.50.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.50", + "date": "2020-07-02", + "entries": [ + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Documentation updates for elasticache" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "description": "Documentation updates for Amazon Connect." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.51.json b/.changes/2.13.51.json new file mode 100644 index 000000000000..5b99c518dd98 --- /dev/null +++ b/.changes/2.13.51.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.51", + "date": "2020-07-06", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "This release supports optional start date and end date parameters for the GetAssetPropertyValueHistory API." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Add Theme APIs and update Dashboard APIs to support theme overrides." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Adds support for Amazon RDS on AWS Outposts." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.52.json b/.changes/2.13.52.json new file mode 100644 index 000000000000..d5a046c4561b --- /dev/null +++ b/.changes/2.13.52.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.52", + "date": "2020-07-07", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "DescribeAvailabilityZones now returns additional data about Availability Zones and Local Zones." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "description": "This release adds support for automatic backups of Amazon EFS file systems to further simplify backup management." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "This release includes the preview release of the client-side metrics for the AWS SDK for Java v2. The SPI can be found in the `metrics-spi` module, and this release also includes a metric publisher for CloudWatch in `cloudwatch-metric-publisher`. See our post over at the [AWS Developer Blog](https://aws.amazon.com/blogs/developer/category/developer-tools/aws-sdk-for-java/) for additional information." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Adding support for file-system driven directory refresh, Case Sensitivity toggle for SMB File Shares, and S3 Prefixes and custom File Share names" + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue Data Catalog supports cross account sharing of tables through AWS Lake Formation" + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Amazon CloudFront adds support for a new security policy, TLSv1.2_2019." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Lake Formation", + "description": "AWS Lake Formation supports sharing tables with other AWS accounts and organizations" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.53.json b/.changes/2.13.53.json new file mode 100644 index 000000000000..9ca5887e8cf1 --- /dev/null +++ b/.changes/2.13.53.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.53", + "date": "2020-07-08", + "entries": [ + { + "type": "feature", + "category": "Amazon Forecast Service", + "description": "With this release, Amazon Forecast now supports the ability to add a tag to any resource via the launch of three new APIs: TagResouce, UntagResource and ListTagsForResource. A tag is a simple label consisting of a customer-defined key and an optional value allowing for easier resource management." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "We have launched a self-service option to make it easier for customers to manage the use of their content by AI services. Certain AI services (Amazon CodeGuru Profiler, Amazon Comprehend, Amazon Lex, Amazon Polly, Amazon Rekognition, Amazon Textract, Amazon Transcribe, and Amazon Translate), may use content to improve the service. Customers have been able to opt out of this use by contacting AWS Support, and now they can opt out on a self-service basis by setting an Organizations policy for all or an individual AI service as listed above. Please refer to the technical documentation for more details." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "EC2 Spot now enables customers to tag their Spot Instances Requests on creation." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Customers can now see Instance Name alongside each rightsizing recommendation." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.54.json b/.changes/2.13.54.json new file mode 100644 index 000000000000..55d8ea3895fb --- /dev/null +++ b/.changes/2.13.54.json @@ -0,0 +1,76 @@ +{ + "version": "2.13.54", + "date": "2020-07-09", + "entries": [ + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Support converting \"0\" and \"1\" numbers read from DynamoDB to Boolean and AtomicBoolean." + }, + { + "type": "feature", + "category": "Amazon Elastic Block Store", + "description": "This release introduces the following set of actions for the EBS direct APIs: 1. StartSnapshot, which creates a new Amazon EBS snapshot. 2. PutSnapshotBlock, which writes a block of data to a snapshot. 3. CompleteSnapshot, which seals and completes a snapshot after blocks of data have been written to it." + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "description": "This release adds support for SMS origination number as an attribute in the MessageAttributes parameter for the SNS Publish API." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "description": "Amazon CloudWatch Events/EventBridge adds support for API Gateway as a target." + }, + { + "type": "feature", + "category": "Alexa For Business", + "description": "Added support for registering an AVS device directly to a room using RegisterAVSDevice with a room ARN" + }, + { + "type": "feature", + "category": "AWS Secrets Manager", + "description": "Adds support for filters on the ListSecrets API to allow filtering results by name, tag key, tag value, or description. Adds support for the BlockPublicPolicy option on the PutResourcePolicy API to block resource policies which grant a wide range of IAM principals access to secrets. Adds support for the ValidateResourcePolicy API to validate resource policies for syntax and prevent lockout error scenarios and wide access to secrets." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "description": "Amazon EventBridge adds support for API Gateway as a target." + }, + { + "type": "feature", + "category": "AWS Amplify", + "description": "Documentation update to the introduction text to specify that this is the Amplify Console API." + }, + { + "type": "feature", + "category": "AWS CloudHSM V2", + "description": "Documentation updates for cloudhsmv2" + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "description": "AWS Comprehend now supports Real-time Analysis with Custom Entity Recognition." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "This release adds the DeleteHumanTaskUi API to Amazon Augmented AI" + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "AppMesh now supports Ingress which allows resources outside a mesh to communicate to resources that are inside the mesh. See https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_gateways.html" + }, + { + "type": "feature", + "category": "AWS WAFV2", + "description": "Added the option to use IP addresses from an HTTP header that you specify, instead of using the web request origin. Available for IP set matching, geo matching, and rate-based rule count aggregation." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.55.json b/.changes/2.13.55.json new file mode 100644 index 000000000000..1d0467399bf3 --- /dev/null +++ b/.changes/2.13.55.json @@ -0,0 +1,16 @@ +{ + "version": "2.13.55", + "date": "2020-07-15", + "entries": [ + { + "type": "feature", + "category": "Amazon Interactive Video Service", + "description": "Introducing Amazon Interactive Video Service - a managed live streaming solution that is quick and easy to set up, and ideal for creating interactive video experiences." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.56.json b/.changes/2.13.56.json new file mode 100644 index 000000000000..f4bfea0a8201 --- /dev/null +++ b/.changes/2.13.56.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.56", + "date": "2020-07-17", + "entries": [ + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation." + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "description": "Add waiters for `EnvironmentExists`, `EnvironmentUpdated`, and `EnvironmentTerminated`. Add paginators for `DescribeEnvironmentManagedActionHistory` and `ListPlatformVersions`." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Documentation updates for EC2" + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "description": "This release adds a set of Amazon Connect APIs to programmatically control call recording with start, stop, pause and resume functions." + }, + { + "type": "feature", + "category": "AWS AppSync", + "description": "Documentation update to Cachingconfig.cachingKeys to include $context.source as a valid value." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "Documentation updates for Application Auto Scaling" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.57.json b/.changes/2.13.57.json new file mode 100644 index 000000000000..ca510d82292d --- /dev/null +++ b/.changes/2.13.57.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.57", + "date": "2020-07-20", + "entries": [ + { + "type": "feature", + "category": "Firewall Management Service", + "description": "Added managed policies for auditing security group rules, including the use of managed application and protocol lists." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Add a new SupportsParallelQuery output field to DescribeDBEngineVersions. This field shows whether the engine version supports parallelquery. Add a new SupportsGlobalDatabases output field to DescribeDBEngineVersions and DescribeOrderableDBInstanceOptions. This field shows whether global database is supported by engine version or the combination of engine version and instance class." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "CloudFront adds support for cache policies and origin request policies. With these new policies, you can now more granularly control the query string, header, and cookie values that are included in the cache key and in requests that CloudFront sends to your origin." + }, + { + "type": "feature", + "category": "Amazon Fraud Detector", + "description": "Introduced flexible model training dataset requirements for Online Fraud Insights so that customers can choose any two inputs to train a model instead of being required to use 'email' and 'IP address' at minimum. Added support for resource ARNs, resource tags, resource-based IAM policies and identity-based policies that limit access to a resource based on tags. Added support for customer-managed customer master key (CMK) data encryption. Added new Event Type, Entity Type, and Label APIs. An event type defines the structure for an event sent to Amazon Fraud Detector, including the variables sent as part of the event, the entity performing the event, and the labels that classify the event. Introduced the GetEventPrediction API." + }, + { + "type": "feature", + "category": "AWS Ground Station", + "description": "Adds optional MTU property to DataflowEndpoint and adds contact source and destination details to DescribeContact response." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "AWS CodeBuild adds support for Session Manager and Windows 2019 Environment type" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Added support for tag-on-create for CreateVpcPeeringConnection and CreateRouteTable. You can now specify tags when creating any of these resources. For more information about tagging, see AWS Tagging Strategies. Add poolArn to the response of DescribeCoipPools." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.58.json b/.changes/2.13.58.json new file mode 100644 index 000000000000..c07b840b5fc5 --- /dev/null +++ b/.changes/2.13.58.json @@ -0,0 +1,11 @@ +{ + "version": "2.13.58", + "date": "2020-07-21", + "entries": [ + { + "type": "feature", + "category": "Amazon CodeGuru Profiler", + "description": "Amazon CodeGuru Profiler now supports resource tagging APIs, tags-on-create and tag-based access control features. You can now tag profiling groups for better resource and access control management." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.59.json b/.changes/2.13.59.json new file mode 100644 index 000000000000..b08751b79f1b --- /dev/null +++ b/.changes/2.13.59.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.59", + "date": "2020-07-22", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "The AWS Elemental MediaLive APIs and SDKs now support the ability to get thumbnails for MediaLive devices that are attached or not attached to a channel. Previously, this thumbnail feature was available only on the console." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "New API operations - GetSessionEmbedUrl, CreateNamespace, DescribeNamespace, ListNamespaces, DeleteNamespace, DescribeAccountSettings, UpdateAccountSettings, CreateAccountCustomization, DescribeAccountCustomization, UpdateAccountCustomization, DeleteAccountCustomization. Modified API operations to support custom permissions restrictions - RegisterUser, UpdateUser, UpdateDashboardPermissions" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.6.json b/.changes/2.13.6.json new file mode 100644 index 000000000000..d2e47da6c865 --- /dev/null +++ b/.changes/2.13.6.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.6", + "date": "2020-04-29", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "AWS IoT SiteWise is a managed service that makes it easy to collect, store, organize and monitor data from industrial equipment at scale. You can use AWS IoT SiteWise to model your physical assets, processes and facilities, quickly compute common industrial performance metrics, and create fully managed web applications to help analyze industrial equipment data, prevent costly equipment issues, and reduce production inefficiencies." + }, + { + "type": "feature", + "category": "AWS WAF Regional", + "description": "This release add migration API for AWS WAF Classic (\"waf\" and \"waf-regional\"). The migration API will parse through your web ACL and generate a CloudFormation template into your S3 bucket. Deploying this template will create equivalent web ACL under new AWS WAF (\"wafv2\")." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS WAF", + "description": "This release add migration API for AWS WAF Classic (\"waf\" and \"waf-regional\"). The migration API will parse through your web ACL and generate a CloudFormation template into your S3 bucket. Deploying this template will create equivalent web ACL under new AWS WAF (\"wafv2\")." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "With this release, you can now use Amazon Transcribe to create medical custom vocabularies and use them in both medical real-time streaming and medical batch transcription jobs." + }, + { + "type": "feature", + "category": "AWS Cloud Map", + "description": "Documentation updates for servicediscovery" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.60.json b/.changes/2.13.60.json new file mode 100644 index 000000000000..8140091dd04d --- /dev/null +++ b/.changes/2.13.60.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.60", + "date": "2020-07-23", + "entries": [ + { + "type": "feature", + "category": "Amazon FSx", + "description": "Adds support for AutoImport, a new FSx for Lustre feature that allows customers to configure their FSx file system to automatically update its contents when new objects are added to S3 or existing objects are overwritten." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Added UpdateWorkspaceImagePermission API to share Amazon WorkSpaces images across AWS accounts." + }, + { + "type": "feature", + "category": "AWS Config", + "description": "Adding service linked configuration aggregation support along with new enums for config resource coverage" + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "This release adds support for Amazon Lightsail content delivery network (CDN) distributions and SSL/TLS certificates." + }, + { + "type": "feature", + "category": "AWS Direct Connect", + "description": "Documentation updates for AWS Direct Connect" + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Added new ConnectionProperties: \"KAFKA_SSL_ENABLED\" (to toggle SSL connections) and \"KAFKA_CUSTOM_CERT\" (import CA certificate file)" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.61.json b/.changes/2.13.61.json new file mode 100644 index 000000000000..cc8dcde50247 --- /dev/null +++ b/.changes/2.13.61.json @@ -0,0 +1,51 @@ +{ + "version": "2.13.61", + "date": "2020-07-24", + "entries": [ + { + "type": "feature", + "category": "Amazon Fraud Detector", + "description": "GetPrediction has been replaced with GetEventPrediction. PutExternalModel has been simplified to accept a role ARN." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "Amazon Kendra now supports sorting query results based on document attributes. Amazon Kendra also introduced an option to enclose table and column names with double quotes for database data sources." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage", + "description": "The release adds daterange as a new ad marker option. This option enables MediaPackage to insert EXT-X-DATERANGE tags in HLS and CMAF manifests. The EXT-X-DATERANGE tag is used to signal ad and program transition events." + }, + { + "type": "feature", + "category": "AmazonMQ", + "description": "Amazon MQ now supports LDAP (Lightweight Directory Access Protocol), providing authentication and authorization of Amazon MQ users via a customer designated LDAP server." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API introduces additional criteria for sorting and filtering query results for account quotas and usage statistics." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Sagemaker Ground Truth:Added support for OIDC (OpenID Connect) to authenticate workers via their own identity provider instead of through Amazon Cognito. This release adds new APIs (CreateWorkforce, DeleteWorkforce, and ListWorkforces) to SageMaker Ground Truth service. Sagemaker Neo: Added support for detailed target device description by using TargetPlatform fields - OS, architecture, and accelerator. Added support for additional compilation parameters by using JSON field CompilerOptions. Sagemaker Search: SageMaker Search supports transform job details in trial components." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "description": "AWS CloudWatch ListMetrics now supports an optional parameter (RecentlyActive) to filter results by only metrics that have received new datapoints in the past 3 hours. This enables more targeted metric data retrieval through the Get APIs" + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "Documentation update for FSx for Lustre" + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "description": "You can now disable an entitlement to stop streaming content to the subscriber's flow temporarily. When you are ready to allow content to start streaming to the subscriber's flow again, you can enable the entitlement." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.62.json b/.changes/2.13.62.json new file mode 100644 index 000000000000..d714e60a3b60 --- /dev/null +++ b/.changes/2.13.62.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.62", + "date": "2020-07-27", + "entries": [ + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "Basic endpoint settings for relational databases, Preflight validation API." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Add ability to manually resume workflows in AWS Glue providing customers further control over the orchestration of ETL workloads." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "m6gd, c6gd, r6gd instances are powered by AWS Graviton2 processors and support local NVMe instance storage" + }, + { + "type": "feature", + "category": "AWS DataSync", + "description": "Today AWS DataSync releases support for self-managed object storage Locations and the new TransferMode Option." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Assorted doc ticket-fix updates for Systems Manager." + }, + { + "type": "feature", + "category": "Amazon Fraud Detector", + "description": "Moved the eventTypeName attribute for PutExternalModel API to inputConfiguration. Model ID's no longer allow hyphens." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.63.json b/.changes/2.13.63.json new file mode 100644 index 000000000000..1e518347e25a --- /dev/null +++ b/.changes/2.13.63.json @@ -0,0 +1,56 @@ +{ + "version": "2.13.63", + "date": "2020-07-28", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Slight performance improvement when metrics are disabled." + }, + { + "type": "feature", + "category": "Auto Scaling", + "description": "Now you can enable Instance Metadata Service Version 2 (IMDSv2) or disable the instance metadata endpoint with Launch Configurations." + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service", + "description": "Added a new error code, PendingVerification, to differentiate between errors caused by insufficient IAM permissions and errors caused by account verification." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports several new features: EBU-TT-D captions in Microsoft Smooth outputs; interlaced video in HEVC outputs; video noise reduction (using temporal filtering) in HEVC outputs." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Added UpdateSecurityHubConfiguration API. Security Hub now allows customers to choose whether to automatically enable new controls that are added to an existing standard that the customer enabled. For example, if you enabled Foundational Security Best Practices for an account, you can automatically enable new controls as we add them to that standard. By default, new controls are enabled." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue where HTTP status code metrics were not always published for async clients." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "This release updates distribution configurations to allow periods in AMI names." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Introduces support for tag-on-create capability for the following APIs: CreateVpnConnection, CreateVpnGateway, and CreateCustomerGateway. A user can now add tags while creating these resources. For further detail, please see AWS Tagging Strategies." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Adds reporting of manual cluster snapshot quota to DescribeAccountAttributes API" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.64.json b/.changes/2.13.64.json new file mode 100644 index 000000000000..c12dc9730b50 --- /dev/null +++ b/.changes/2.13.64.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.64", + "date": "2020-07-29", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Adding support to target EC2 On-Demand Capacity Reservations within an AWS Resource Group to launch EC2 instances." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This release adds support for encrypting the contents of your Amazon ECR repository with customer master keys (CMKs) stored in AWS Key Management Service." + }, + { + "type": "feature", + "category": "AWS Resource Groups", + "description": "Resource Groups released a new feature that enables you to create a group with an associated configuration that specifies how other AWS services interact with the group. There are two new operations `GroupResources` and `UngroupResources` to work on a group with a configuration. In this release, you can associate EC2 Capacity Reservations with a resource group. Resource Groups also added a new request parameter `Group` to replace `GroupName` for all existing operations." + }, + { + "type": "feature", + "category": "AWS Cloud Map", + "description": "Added new attribute AWS_EC2_INSTANCE_ID for RegisterInstance API" + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "GuardDuty now supports S3 Data Events as a configurable data source type. This feature expands GuardDuty's monitoring scope to include S3 data plane operations, such as GetObject and PutObject. This data source is optional and can be enabled or disabled at anytime. Accounts already using GuardDuty must first enable the new feature to use it; new accounts will be enabled by default. GuardDuty masters can configure this data source for individual member accounts and GuardDuty masters associated through AWS Organizations can automatically enable the data source in member accounts." + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "description": "This release includes a new Kinesis Data Firehose feature that supports data delivery to Https endpoint and to partners. You can now use Kinesis Data Firehose to ingest real-time data and deliver to Https endpoint and partners in a serverless, reliable, and salable manner." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.65.json b/.changes/2.13.65.json new file mode 100644 index 000000000000..ae4f830f267f --- /dev/null +++ b/.changes/2.13.65.json @@ -0,0 +1,56 @@ +{ + "version": "2.13.65", + "date": "2020-07-30", + "entries": [ + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Adding support for BuildBatch, and CodeCoverage APIs. BuildBatch allows you to model your project environment in source, and helps start multiple builds with a single API call. CodeCoverage allows you to track your code coverage using AWS CodeBuild." + }, + { + "type": "feature", + "category": "AWS Resource Groups", + "description": "Improved documentation for Resource Groups API operations." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "GuardDuty can now provide detailed cost metrics broken down by account, data source, and S3 resources, based on the past 30 days of usage. This new feature also supports viewing cost metrics for all member accounts as a GuardDuty master." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "description": "This release makes more API operations available to customers in version 2 of the Amazon SES API. With these additions, customers can now access sending authorization, custom verification email, and template API operations. With this release, Amazon SES is also providing new and updated APIs to allow customers to request production access." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "EC2 On-Demand Capacity Reservations now adds support to bring your own licenses (BYOL) of Windows operating system to launch EC2 instances." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "This release adds support for ProvisionProduct, UpdateProvisionedProduct & DescribeProvisioningParameters by product name, provisioning artifact name and path name. In addition DescribeProvisioningParameters now returns a list of provisioning artifact outputs." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "Documentation updates for AWS Organizations" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "Amazon MSK has added a new API that allows you to reboot brokers within a cluster." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Documentation updates for CloudFront" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.66.json b/.changes/2.13.66.json new file mode 100644 index 000000000000..cae6c8b02b61 --- /dev/null +++ b/.changes/2.13.66.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.66", + "date": "2020-07-31", + "entries": [ + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "description": "Adds support to use filters with Personalized Ranking recipe" + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "Updates to the list of services supported by this API." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "description": "Add ManagedByFirewallManager flag to the logging configuration, which indicates whether AWS Firewall Manager controls the configuration." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "This release increases the CreateMeetingWithAttendee max attendee limit to 10." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Add support for gateway VM deprecation dates" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.67.json b/.changes/2.13.67.json new file mode 100644 index 000000000000..c12a733a294b --- /dev/null +++ b/.changes/2.13.67.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.67", + "date": "2020-08-03", + "entries": [ + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Fixed an issue where, under rare circumstances, streaming request bytes could be misordered." + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fixed an issue that could cause \"Data read has a different checksum than expected\" errors." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Adds a waiter for CommandExecuted and paginators for various other APIs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.68.json b/.changes/2.13.68.json new file mode 100644 index 000000000000..a8ba0123a7c3 --- /dev/null +++ b/.changes/2.13.68.json @@ -0,0 +1,16 @@ +{ + "version": "2.13.68", + "date": "2020-08-04", + "entries": [ + { + "type": "feature", + "category": "AWS Health APIs and Notifications", + "description": "Documentation updates for health" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.69.json b/.changes/2.13.69.json new file mode 100644 index 000000000000..7ae24c31ba3c --- /dev/null +++ b/.changes/2.13.69.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.69", + "date": "2020-08-05", + "entries": [ + { + "type": "feature", + "category": "AWS AppSync", + "description": "AWS AppSync releases support for Direct Lambda Resolvers." + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "description": "Documentation updates for SNS." + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "Documentation updates for the Resource Group Tagging API namespace." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "Documentation updates for StorageCapacity input value format." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Amazon Transcribe now supports custom language models, which can improve transcription accuracy for your specific use case." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.7.json b/.changes/2.13.7.json new file mode 100644 index 000000000000..2f0e8e0869dd --- /dev/null +++ b/.changes/2.13.7.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.7", + "date": "2020-04-30", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for including AFD signaling in MXF wrapper." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Adding support for S3_INTELLIGENT_TIERING as a storage class option" + }, + { + "type": "feature", + "category": "Schemas", + "description": "Add support for resource policies for Amazon EventBridge Schema Registry, which is now generally available." + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "Doc only update to correct APIs and related descriptions" + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "AWS IoT Core released Fleet Provisioning for scalable onboarding of IoT devices to the cloud. This release includes support for customer's Lambda functions to validate devices during onboarding. Fleet Provisioning also allows devices to send Certificate Signing Requests (CSR) to AWS IoT Core for signing and getting a unique certificate. Lastly, AWS IoT Core added a feature to register the same certificate for multiple accounts in the same region without needing to register the certificate authority (CA)." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Made OperationContext a public interface and moved it into public namespace as it was already exposed through another public interface. This will only impact extensions that have been written to reference the old internal-only class that should now switch to the approved stable public interface." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Documentation updates for Lambda" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.70.json b/.changes/2.13.70.json new file mode 100644 index 000000000000..d73c4e5455a0 --- /dev/null +++ b/.changes/2.13.70.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.70", + "date": "2020-08-06", + "entries": [ + { + "type": "feature", + "category": "Amazon Lex Runtime Service", + "description": "Amazon Lex supports intent classification confidence scores along with a list of the top five intents." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release supports Wavelength resources, including carrier gateways, and carrier IP addresses." + }, + { + "type": "feature", + "category": "Amazon Personalize Events", + "description": "Adds support implicit and explicit impression input" + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "description": "Adds support for implicit impressions" + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "Amazon Lex supports the option to enable accuracy improvements and specify an intent classification confidence score threshold." + }, + { + "type": "feature", + "category": "Amazon Personalize", + "description": "Add 'exploration' functionality" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.71.json b/.changes/2.13.71.json new file mode 100644 index 000000000000..c35a5818c4c3 --- /dev/null +++ b/.changes/2.13.71.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.71", + "date": "2020-08-07", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Updates Amazon S3 API reference documentation." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "Documentation updates for some new error reasons." + }, + { + "type": "feature", + "category": "AWS Server Migration Service", + "description": "In this release, AWS Server Migration Service (SMS) has added new features: 1. APIs to work with application and instance level validation 2. Import application catalog from AWS Application Discovery Service 3. For an application you can start on-demand replication" + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Fixed the issue where `connectionTimeToLive` was not allowed to set to zero. See [#1976](https://github.com/aws/aws-sdk-java-v2/issues/1976)" + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue now adds support for Network connection type enabling you to access resources inside your VPC using Glue crawlers and Glue ETL jobs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.72.json b/.changes/2.13.72.json new file mode 100644 index 000000000000..15f887f8a33b --- /dev/null +++ b/.changes/2.13.72.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.72", + "date": "2020-08-10", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Remove CoIP Auto-Assign feature references." + }, + { + "type": "feature", + "category": "AWS Savings Plans", + "description": "Updates to the list of services supported by this API." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Starting today, you can further control orchestration of your ETL workloads in AWS Glue by specifying the maximum number of concurrent runs for a Glue workflow." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.73.json b/.changes/2.13.73.json new file mode 100644 index 000000000000..2f307039c7ce --- /dev/null +++ b/.changes/2.13.73.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.73", + "date": "2020-08-11", + "entries": [ + { + "type": "feature", + "category": "AWS Organizations", + "description": "Minor documentation update for AWS Organizations" + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Support Managed Streaming for Kafka as an Event Source. Support retry until record expiration for Kinesis and Dynamodb streams event source mappings." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Add support for in-region CopyObject and UploadPartCopy through S3 Access Points" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release rolls back the EC2 On-Demand Capacity Reservations (ODCRs) release 1.11.831 published on 2020-07-30, which was deployed in error." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.74.json b/.changes/2.13.74.json new file mode 100644 index 000000000000..6272523921e7 --- /dev/null +++ b/.changes/2.13.74.json @@ -0,0 +1,51 @@ +{ + "version": "2.13.74", + "date": "2020-08-12", + "entries": [ + { + "type": "feature", + "category": "AWS IoT", + "description": "Audit finding suppressions: Device Defender enables customers to turn off non-compliant findings for specific resources on a per check basis." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Introduces support for IPv6-in-IPv4 IPsec tunnels. A user can now send traffic from their on-premise IPv6 network to AWS VPCs that have IPv6 support enabled." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Support for creating Lambda Functions using 'java8.al2' and 'provided.al2'" + }, + { + "type": "feature", + "category": "AWS Cloud9", + "description": "Add ConnectionType input parameter to CreateEnvironmentEC2 endpoint. New parameter enables creation of environments with SSM connection." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "This release adds the capability to create persistent file systems for throughput-intensive workloads using Hard Disk Drive (HDD) storage and an optional read-only Solid-State Drive (SSD) cache." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Adds optional EnableWorkDocs property to WorkspaceCreationProperties in the ModifyWorkspaceCreationProperties API" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Added an \"unsafe\" way to retrieve a byte array from `SdkBytes` and `ResponseBytes` without copying the data." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "description": "Adds security policies to control cryptographic algorithms advertised by your server, additional characters in usernames and length increase, and FIPS compliant endpoints in the US and Canada regions." + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "description": "Amazon Comprehend Custom Entity Recognition now supports Spanish, German, French, Italian and Portuguese and up to 25 entity types per model." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.75.json b/.changes/2.13.75.json new file mode 100644 index 000000000000..ba43c9199bc4 --- /dev/null +++ b/.changes/2.13.75.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.75", + "date": "2020-08-13", + "entries": [ + { + "type": "feature", + "category": "Braket", + "description": "Amazon Braket general availability with Device and Quantum Task operations." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Adding ability to customize expiry for Refresh, Access and ID tokens." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation." + }, + { + "type": "feature", + "category": "AWS AppSync", + "description": "Documentation update for AWS AppSync support for Direct Lambda Resolvers." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Added MapCustomerOwnedIpOnLaunch and CustomerOwnedIpv4Pool to ModifySubnetAttribute to allow CoIP auto assign. Fields are returned in DescribeSubnets and DescribeNetworkInterfaces responses." + }, + { + "type": "bugfix", + "category": "CloudWatch Metrics Publisher", + "description": "Fixed a bug where `CloudWatchPublisher#close` would not always complete flushing pending metrics before returning." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release allows customers to specify a replica mode when creating or modifying a Read Replica, for DB engines which support this feature." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Adding support for customer provided EC2 launch templates and AMIs to EKS Managed Nodegroups. Also adds support for Arm-based instances to EKS Managed Nodegroups." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.76.json b/.changes/2.13.76.json new file mode 100644 index 000000000000..096e24822ce4 --- /dev/null +++ b/.changes/2.13.76.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.76", + "date": "2020-08-14", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "New C5ad instances featuring AMD's 2nd Generation EPYC processors, offering up to 96 vCPUs, 192 GiB of instance memory, 3.8 TB of NVMe based SSD instance storage, and 20 Gbps in Network bandwidth" + }, + { + "type": "feature", + "category": "AWS License Manager", + "description": "This release includes ability to enforce license assignment rules with EC2 Dedicated Hosts." + }, + { + "type": "feature", + "category": "Braket", + "description": "Fixing bug in our SDK model where device status and device type had been flipped." + }, + { + "type": "feature", + "category": "Amazon AppStream", + "description": "Adds support for the Desktop View feature" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Amazon SageMaker now supports 1) creating real-time inference endpoints using model container images from Docker registries in customers' VPC 2) AUC(Area under the curve) as AutoPilot objective metric" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.8.json b/.changes/2.13.8.json new file mode 100644 index 000000000000..62b0bbab76b4 --- /dev/null +++ b/.changes/2.13.8.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.8", + "date": "2020-05-01", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "description": "Change the TagKeys argument for UntagResource to a URL parameter to address an issue with the Java and .NET SDKs." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Added TimeoutSeconds as part of ListCommands API response." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.9.json b/.changes/2.13.9.json new file mode 100644 index 000000000000..8f1ef3329be9 --- /dev/null +++ b/.changes/2.13.9.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.9", + "date": "2020-05-04", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "description": "Amazon S3 Batch Operations now supports Object Lock." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "With this release, you can include enriched metadata in Amazon Virtual Private Cloud (Amazon VPC) flow logs published to Amazon CloudWatch Logs or Amazon Simple Storage Service (S3). Prior to this, custom format VPC flow logs enriched with additional metadata could be published only to S3. With this launch, we are also adding additional metadata fields that provide insights about the location such as AWS Region, AWS Availability Zone, AWS Local Zone, AWS Wavelength Zone, or AWS Outpost where the network interface where flow logs are captured exists." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Bugfix for handling special characters ':' and '#' in attribute names" + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Documentation updates for Amazon API Gateway" + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.0.json b/.changes/2.14.0.json new file mode 100644 index 000000000000..2225d74e19eb --- /dev/null +++ b/.changes/2.14.0.json @@ -0,0 +1,51 @@ +{ + "version": "2.14.0", + "date": "2020-08-17", + "entries": [ + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This feature adds support for pushing and pulling Open Container Initiative (OCI) artifacts." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "The client-side metrics feature is out of developer preview and is now generaly available." + }, + { + "type": "feature", + "category": "Amazon Kinesis", + "description": "Introducing ShardFilter for ListShards API to filter the shards using a position in the stream, and ChildShards support for GetRecords and SubscribeToShard API to discover children shards on shard end" + }, + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "This release introduces RoboMaker Simulation WorldForge, a capability that automatically generates one or more simulation worlds." + }, + { + "type": "feature", + "category": "AWS Certificate Manager Private Certificate Authority", + "description": "ACM Private CA is launching cross-account support. This allows customers to share their private CAs with other accounts, AWS Organizations, and organizational units to issue end-entity certificates." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "Adds support for HTTP Desync Mitigation in Application Load Balancers." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "Adds support for HTTP Desync Mitigation in Classic Load Balancers." + }, + { + "type": "feature", + "category": "AWS Certificate Manager", + "description": "ACM provides support for the new Private CA feature Cross-account CA sharing. ACM users can issue certificates signed by a private CA belonging to another account where the CA was shared with them." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Amazon QuickSight now supports programmatic creation and management of analyses with new APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.1.json b/.changes/2.14.1.json new file mode 100644 index 000000000000..c24a44233c05 --- /dev/null +++ b/.changes/2.14.1.json @@ -0,0 +1,36 @@ +{ + "version": "2.14.1", + "date": "2020-08-18", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Email Service", + "description": "This release includes new APIs to allow customers to add or remove email addresses from their account-level suppression list in bulk." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "New details for DynamoDB tables, Elastic IP addresses, IAM policies and users, RDS DB clusters and snapshots, and Secrets Manager secrets. Added details for AWS KMS keys and RDS DB instances." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Documentation updates for codebuild" + }, + { + "type": "feature", + "category": "AWS DataSync", + "description": "DataSync support for filters as input arguments to the ListTasks and ListLocations API calls." + }, + { + "type": "feature", + "category": "AWS SSO Identity Store", + "description": "AWS Single Sign-On (SSO) Identity Store service provides an interface to retrieve all of your users and groups. It enables entitlement management per user or group for AWS SSO and other IDPs." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Adding the option to use a service linked role to publish events to Pinpoint." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.10.json b/.changes/2.14.10.json new file mode 100644 index 000000000000..39eb78700bdf --- /dev/null +++ b/.changes/2.14.10.json @@ -0,0 +1,26 @@ +{ + "version": "2.14.10", + "date": "2020-09-02", + "entries": [ + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API introduces additional statistics for the size and count of Amazon S3 objects that Macie can analyze as part of a classification job." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds a new transit gateway attachment state and resource type." + }, + { + "type": "feature", + "category": "DynamoDB Enhanced Client", + "description": "Support for mapping to and from immutable Java objects using ImmutableTableSchema and StaticImmutableTableSchema." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.11.json b/.changes/2.14.11.json new file mode 100644 index 000000000000..ce017974be8c --- /dev/null +++ b/.changes/2.14.11.json @@ -0,0 +1,26 @@ +{ + "version": "2.14.11", + "date": "2020-09-03", + "entries": [ + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "GuardDuty findings triggered by failed events now include the error code name within the AwsApiCallAction section." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "description": "This release of the AWS Step Functions SDK introduces support for payloads up to 256KB for Standard and Express workflows" + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "Amazon Kendra now returns confidence scores for both 'answer' and 'question and answer' query responses." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage", + "description": "Enables inserting a UTCTiming XML tag in the output manifest of a DASH endpoint which a media player will use to help with time synchronization." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.12.json b/.changes/2.14.12.json new file mode 100644 index 000000000000..af31b2c33dc0 --- /dev/null +++ b/.changes/2.14.12.json @@ -0,0 +1,26 @@ +{ + "version": "2.14.12", + "date": "2020-09-04", + "entries": [ + { + "type": "feature", + "category": "AWS X-Ray", + "description": "Enhancing CreateGroup, UpdateGroup, GetGroup and GetGroups APIs to support configuring X-Ray Insights" + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Adding support for Microsoft Office 2016 and Microsoft Office 2019 in BYOL Images" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Documentation-only updates for AWS Systems Manager" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.13.json b/.changes/2.14.13.json new file mode 100644 index 000000000000..8128d2e99fa4 --- /dev/null +++ b/.changes/2.14.13.json @@ -0,0 +1,41 @@ +{ + "version": "2.14.13", + "date": "2020-09-08", + "entries": [ + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "Amazon Lex supports en-AU locale" + }, + { + "type": "feature", + "category": "AWS Common Runtime HTTP Client", + "description": "This release includes the preview release of the AWS Common Runtime HTTP client for the AWS SDK for Java v2. The code can be found in the `aws-crt-client` module." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Adds tagging support for QuickSight customization resources. A user can now specify a list of tags when creating a customization resource and use a customization ARN in QuickSight's tagging APIs." + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "description": "You can now secure HTTP APIs using Lambda authorizers and IAM authorizers. These options enable you to make flexible auth decisions using a Lambda function, or using IAM policies, respectively." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "AWS CodeBuild - Support keyword search for test cases in DecribeTestCases API . Allow deletion of reports in the report group, before deletion of report group using the deleteReports flag." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Fix for stack overflow caused by using self-referencing DynamoDB annotated classes." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "Adds support for Application Load Balancers on Outposts." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.14.json b/.changes/2.14.14.json new file mode 100644 index 000000000000..495f0da034ea --- /dev/null +++ b/.changes/2.14.14.json @@ -0,0 +1,26 @@ +{ + "version": "2.14.14", + "date": "2020-09-09", + "entries": [ + { + "type": "feature", + "category": "Amazon Kinesis Analytics", + "description": "Kinesis Data Analytics is adding new AUTOSCALING application status for applications during auto scaling and also adding FlinkRunConfigurationDescription in the ApplicationDetails." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Adding support for partitionIndexes to improve GetPartitions performance." + }, + { + "type": "feature", + "category": "Redshift Data API Service", + "description": "The Amazon Redshift Data API is generally available. This release enables querying Amazon Redshift data and listing various database objects." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.15.json b/.changes/2.14.15.json new file mode 100644 index 000000000000..c1ec1f9c302e --- /dev/null +++ b/.changes/2.14.15.json @@ -0,0 +1,31 @@ +{ + "version": "2.14.15", + "date": "2020-09-10", + "entries": [ + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "Update SMS message model description to clearly indicate that the MediaUrl field is reserved for future use and is not supported by Pinpoint as of today." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Bucket owner verification feature added. This feature introduces the x-amz-expected-bucket-owner and x-amz-source-expected-bucket-owner headers." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Cloudfront adds support for Brotli. You can enable brotli caching and compression support by enabling it in your Cache Policy." + }, + { + "type": "feature", + "category": "AWS Single Sign-On Admin", + "description": "This is an initial release of AWS Single Sign-On (SSO) Access Management APIs. This release adds support for SSO operations which could be used for managing access to AWS accounts." + }, + { + "type": "feature", + "category": "Amazon Elastic Block Store", + "description": "Documentation updates for Amazon EBS direct APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.16.json b/.changes/2.14.16.json new file mode 100644 index 000000000000..ff80482ec837 --- /dev/null +++ b/.changes/2.14.16.json @@ -0,0 +1,16 @@ +{ + "version": "2.14.16", + "date": "2020-09-11", + "entries": [ + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Adds API support for WorkSpaces Cross-Region Redirection feature." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.17.json b/.changes/2.14.17.json new file mode 100644 index 000000000000..cbb052c97d3f --- /dev/null +++ b/.changes/2.14.17.json @@ -0,0 +1,36 @@ +{ + "version": "2.14.17", + "date": "2020-09-14", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for the T4G instance family to the EC2 ModifyDefaultCreditSpecification and GetDefaultCreditSpecification APIs." + }, + { + "type": "feature", + "category": "Amazon Managed Blockchain", + "description": "Introducing support for Hyperledger Fabric 1.4. When using framework version 1.4, the state database may optionally be specified when creating peer nodes (defaults to CouchDB)." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix a bug in the SDK where the contents of the `AsyncRequestBody` is not included in the AWS Signature Version 4 calculation, which is required for some streaming operations such as Glacier `UploadArchive' and ClouSearch Domain `UploadDocuments`." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "description": "This release of the AWS Step Functions SDK introduces support for AWS X-Ray." + }, + { + "type": "feature", + "category": "Amazon DocumentDB with MongoDB compatibility", + "description": "Updated API documentation and added paginators for DescribeCertificates, DescribeDBClusterParameterGroups, DescribeDBClusterParameters, DescribeDBClusterSnapshots and DescribePendingMaintenanceActions" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.18.json b/.changes/2.14.18.json new file mode 100644 index 000000000000..142e0a161600 --- /dev/null +++ b/.changes/2.14.18.json @@ -0,0 +1,51 @@ +{ + "version": "2.14.18", + "date": "2020-09-15", + "entries": [ + { + "type": "feature", + "category": "AWS Budgets", + "description": "Documentation updates for Daily Cost and Usage budgets" + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Amazon Transcribe now supports automatic language identification, which enables you to transcribe audio files without needing to know the language in advance." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "Added new API's to support SASL SCRAM Authentication with MSK Clusters." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Sagemaker Ground Truth: Added support for a new Streaming feature which helps to continuously feed data and receive labels in real time. This release adds a new input and output SNS data channel." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "Amazon Kendra now returns confidence scores for 'document' query responses." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "AWS Organizations now enables you to add tags to the AWS accounts, organizational units, organization root, and policies in your organization." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "T4g instances are powered by AWS Graviton2 processors" + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports CDI (Cloud Digital Interface) inputs which enable uncompressed video from applications on Elastic Cloud Compute (EC2), AWS Media Services, and from AWS partners" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.19.json b/.changes/2.14.19.json new file mode 100644 index 000000000000..7f91a7c3786e --- /dev/null +++ b/.changes/2.14.19.json @@ -0,0 +1,41 @@ +{ + "version": "2.14.19", + "date": "2020-09-16", + "entries": [ + { + "type": "feature", + "category": "AWS Greengrass", + "description": "This release includes the ability to set run-time configuration for a Greengrass core. The Telemetry feature, also included in this release, can be configured via run-time configuration per core." + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "Customers can now provide multiple schedules within a single Data Lifecycle Manager (DLM) policy. Each schedule supports tagging, Fast Snapshot Restore (FSR) and cross region copy individually." + }, + { + "type": "feature", + "category": "Amazon STS", + "description": "Make the STSCredentialsProvider stale and prefetch times configurable so clients can control when session credentials are refreshed" + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "Enhance DescribeProvisionedProduct API to allow useProvisionedProduct Name as Input, so customer can provide ProvisionedProduct Name instead of ProvisionedProduct Id to describe a ProvisionedProduct." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "description": "This release adds support for contact flows and routing profiles. For details, see the Release Notes in the Amazon Connect Administrator Guide." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "The ComplianceItemEntry Status description was updated to address Windows patches that aren't applicable." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.2.json b/.changes/2.14.2.json new file mode 100644 index 000000000000..94441c1835f8 --- /dev/null +++ b/.changes/2.14.2.json @@ -0,0 +1,41 @@ +{ + "version": "2.14.2", + "date": "2020-08-19", + "entries": [ + { + "type": "feature", + "category": "AWS Lake Formation", + "description": "Adding additional field in ListPermissions API response to return RAM resource share ARN if a resource were shared through AWS RAM service." + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "description": "Amazon Transcribe and Amazon Transcribe Medical now enable you to identify different speakers in your real-time streams with speaker identification." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "Minor documentation updates for AWS Organizations" + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "Enhance SearchProvisionedProducts API to allow queries using productName and provisioningArtifactName. Added lastProvisioningRecordId and lastSuccessfulRecordId to Read ProvisionedProduct APIs" + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service", + "description": "Amazon Interactive Video Service (IVS) now offers customers the ability to create private channels, allowing customers to restrict their streams by channel or viewer." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Added WORM, tape retention lock, and custom pool features for virtual tapes." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.20.json b/.changes/2.14.20.json new file mode 100644 index 000000000000..382b6819af63 --- /dev/null +++ b/.changes/2.14.20.json @@ -0,0 +1,46 @@ +{ + "version": "2.14.20", + "date": "2020-09-17", + "entries": [ + { + "type": "feature", + "category": "Amazon Comprehend", + "description": "Amazon Comprehend now supports detecting Personally Identifiable Information (PII) entities in a document." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Adds support for mutual TLS authentication for public regional REST Apis" + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "description": "Amazon Transcribe now supports channel identification in real-time streaming, which enables you to transcribe multi-channel streaming audio." + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "description": "Adds support for mutual TLS authentication and disableAPIExecuteEndpoint for public regional HTTP Apis" + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "Amazon Kendra now supports additional file formats and metadata for FAQs." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "Adds support for data plane audit logging in Amazon Elasticsearch Service." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Documentation updates for CloudFront" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.21.json b/.changes/2.14.21.json new file mode 100644 index 000000000000..b0b8755e7cfe --- /dev/null +++ b/.changes/2.14.21.json @@ -0,0 +1,26 @@ +{ + "version": "2.14.21", + "date": "2020-09-18", + "entries": [ + { + "type": "feature", + "category": "AWS Single Sign-On Admin", + "description": "Documentation updates for AWS SSO APIs." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports batch operations, which allow users to start, stop, and delete multiple MediaLive resources with a single request." + }, + { + "type": "feature", + "category": "AWS CodeStar connections", + "description": "New integration with the GitHub provider type." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.22.json b/.changes/2.14.22.json new file mode 100644 index 000000000000..5776a59791bd --- /dev/null +++ b/.changes/2.14.22.json @@ -0,0 +1,46 @@ +{ + "version": "2.14.22", + "date": "2020-09-21", + "entries": [ + { + "type": "feature", + "category": "AWS Resource Groups", + "description": "Documentation updates and corrections for Resource Groups API Reference and SDKs." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "description": "Add support for Redshift Data API Targets" + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "This release supports IAM mode for SiteWise Monitor portals" + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "Documentation updates for the Resource Groups Tagging API." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Adding support to update multiple partitions of a table in a single request" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Documentation updates for the RDS DescribeExportTasks API" + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "description": "Add support for Redshift Data API Targets" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.23.json b/.changes/2.14.23.json new file mode 100644 index 000000000000..07a28ef39188 --- /dev/null +++ b/.changes/2.14.23.json @@ -0,0 +1,36 @@ +{ + "version": "2.14.23", + "date": "2020-09-22", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Formal parameter names of sychronous streaming methods were aligned with their javadocs." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Streams", + "description": "Documentation updates for streams.dynamodb" + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "Lex now supports es-US locales" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "This change makes the `FileStoreTlsKeyManagersProvider` and `SystemPropertyTlsKeyManagersProvider` respect the `ssl.KeyManagerFactory.algorithm` when instantiating the `KeyManagerFactory` rather than always using the hardcoded value of `SunX509`." + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "Adding support for Mailbox Export APIs" + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "description": "Amazon Comprehend integrates with Amazon SageMaker GroundTruth to allow its customers to annotate their datasets using GroundTruth and train their models using Comprehend Custom APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.24.json b/.changes/2.14.24.json new file mode 100644 index 000000000000..7829f30c1a92 --- /dev/null +++ b/.changes/2.14.24.json @@ -0,0 +1,31 @@ +{ + "version": "2.14.24", + "date": "2020-09-23", + "entries": [ + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Added Sheet information to DescribeDashboard, DescribeTemplate and DescribeAnalysis API response." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "This release provides access to Cost Anomaly Detection Public Preview APIs. Cost Anomaly Detection finds cost anomalies based on your historical cost and usage using Machine Learning models." + }, + { + "type": "feature", + "category": "Amazon Translate", + "description": "Improvements to DeleteTerminology API." + }, + { + "type": "bugfix", + "category": "AWS DynamoDB Enhanced Client", + "description": "Fixed incorrect 'duplicate key' error triggered when flattening a TableSchema that has key tags and more than one attribute." + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "This release allows customers to enable or disable advanced backup settings in backup plan. As part of this feature AWS Backup added support for Windows VSS backup option for EC2 resources." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.25.json b/.changes/2.14.25.json new file mode 100644 index 000000000000..21e5ad48c166 --- /dev/null +++ b/.changes/2.14.25.json @@ -0,0 +1,51 @@ +{ + "version": "2.14.25", + "date": "2020-09-24", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Amazon EKS now supports configuring your cluster's service CIDR during cluster creation." + }, + { + "type": "feature", + "category": "AWS Amplify", + "description": "Allow Oauth Token in CreateApp call to be a maximum of 1000 characters instead of 100" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue where successful JSON service responses were required to include a payload (fixes NullPointerException originating from JsonProtocolUnmarshaller)." + }, + { + "type": "feature", + "category": "AWS Savings Plans", + "description": "Introducing Queued SavingsPlans that will enable customers to queue their purchase request of Savings Plans for future dates." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Amazon Transcribe now supports WebM, OGG, AMR and AMR-WB as input formats. You can also specify an output key as a location within your S3 buckets to store the output of your transcription jobs." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue with XML services, where the service responding with no payload would treat the payload as empty. Now, empty payloads will properly be populated within the XML response as \"null\"." + }, + { + "type": "feature", + "category": "Synthetics", + "description": "AWS Synthetics now supports AWS X-Ray Active Tracing feature. RunConfig is now an optional parameter with timeout updated from (60 - 900 seconds) to (3 - 840 seconds)." + }, + { + "type": "feature", + "category": "Amazon Textract", + "description": "AWS Textract now supports output results for asynchronous jobs to customer specified s3 bucket." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.26.json b/.changes/2.14.26.json new file mode 100644 index 000000000000..3a384fbd3e5d --- /dev/null +++ b/.changes/2.14.26.json @@ -0,0 +1,41 @@ +{ + "version": "2.14.26", + "date": "2020-09-25", + "entries": [ + { + "type": "feature", + "category": "Amazon DocumentDB with MongoDB compatibility", + "description": "Documentation updates for docdb" + }, + { + "type": "feature", + "category": "AWS Config", + "description": "Make the delivery-s3-bucket as an optional parameter for conformance packs and organizational conformance packs" + }, + { + "type": "feature", + "category": "Amazon Fraud Detector", + "description": "Increased maximum length of eventVariables values for GetEventPrediction from 256 to 1024." + }, + { + "type": "feature", + "category": "AWS Batch", + "description": "Support custom logging, executionRole, secrets, and linuxParameters (initProcessEnabled, maxSwap, swappiness, sharedMemorySize, and tmpfs). Also, add new context keys for awslogs." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release supports returning additional information about local gateway resources, such as the local gateway route table." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "description": "Documentation update for AssumeRole error" + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.27.json b/.changes/2.14.27.json new file mode 100644 index 000000000000..f51b078f5cc4 --- /dev/null +++ b/.changes/2.14.27.json @@ -0,0 +1,16 @@ +{ + "version": "2.14.27", + "date": "2020-09-28", + "entries": [ + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "This release extends Application Auto Scaling support to AWS Comprehend Entity Recognizer endpoint, allowing automatic updates to provisioned Inference Units to maintain targeted utilization level." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds the InsufficientAvailableIPsInSubnetFault error for RDS Proxy." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.28.json b/.changes/2.14.28.json new file mode 100644 index 000000000000..3fedaa68a4a5 --- /dev/null +++ b/.changes/2.14.28.json @@ -0,0 +1,41 @@ +{ + "version": "2.14.28", + "date": "2020-09-29", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for Client to Client routing for AWS Client VPN." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Simple update to description of ComplianceItemStatus." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "description": "Update TagResource API documentation to include Contact Flows and Routing Profiles as supported resources." + }, + { + "type": "feature", + "category": "Schemas", + "description": "Added support for schemas of type JSONSchemaDraft4. Added ExportSchema API that converts schemas in AWS Events registry and Discovered schemas from OpenApi3 to JSONSchemaDraft4." + }, + { + "type": "feature", + "category": "Amazon Timestream Query", + "description": "(New Service) Amazon Timestream is a fast, scalable, fully managed, purpose-built time series database that makes it easy to store and analyze trillions of time series data points per day." + }, + { + "type": "feature", + "category": "Amazon Timestream Write", + "description": "(New Service) Amazon Timestream is a fast, scalable, fully managed, purpose-built time series database that makes it easy to store and analyze trillions of time series data points per day." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.3.json b/.changes/2.14.3.json new file mode 100644 index 000000000000..8e197f6a9c9c --- /dev/null +++ b/.changes/2.14.3.json @@ -0,0 +1,26 @@ +{ + "version": "2.14.3", + "date": "2020-08-20", + "entries": [ + { + "type": "feature", + "category": "Amazon FSx", + "description": "Documentation updates for Amazon FSx" + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Documentation updates for chime" + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "description": "Customers can now create Amazon API Gateway HTTP APIs that route requests to AWS AppConfig, Amazon EventBridge, Amazon Kinesis Data Streams, Amazon SQS, and AWS Step Functions." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Streaming Client", + "description": "Fix for CRC not working correctly for compressed responses" + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.4.json b/.changes/2.14.4.json new file mode 100644 index 000000000000..d8e4c5ab6736 --- /dev/null +++ b/.changes/2.14.4.json @@ -0,0 +1,51 @@ +{ + "version": "2.14.4", + "date": "2020-08-24", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "Add traversalDirection to ListAssociatedAssetsRequest and add portal status to ListPortalsResponse" + }, + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "description": "Documentation updates for CloudWatch Logs" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "Add UpdateConfiguration and DeleteConfiguration operations." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "For JSON protocols, when unmarshalling a response, if a member is declared to be located in the URI, the member is treated as being located in the payload instead." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Add string length constraints to OpsDataAttributeName and OpsFilterValue." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "Added new endpoint settings to include columns with Null and Empty value when using Kinesis and Kafka as target. Added a new endpoint setting to set maximum message size when using Kafka as target." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release enables customers to use VPC prefix lists in their transit gateway route tables, and it adds support for Provisioned IOPS SSD (io2) EBS volumes." + }, + { + "type": "feature", + "category": "AWS X-Ray", + "description": "AWS X-Ray now supports tagging on sampling rules and groups." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.5.json b/.changes/2.14.5.json new file mode 100644 index 000000000000..8eda59e84a5f --- /dev/null +++ b/.changes/2.14.5.json @@ -0,0 +1,21 @@ +{ + "version": "2.14.5", + "date": "2020-08-26", + "entries": [ + { + "type": "feature", + "category": "Amazon Appflow", + "description": "Amazon AppFlow is a fully managed integration service that securely transfers data between AWS services and SaaS applications. This update releases the first version of Amazon AppFlow APIs and SDK." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Route 53 Resolver", + "description": "Route 53 Resolver adds support for resolver query logs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.6.json b/.changes/2.14.6.json new file mode 100644 index 000000000000..5ebe376ab27f --- /dev/null +++ b/.changes/2.14.6.json @@ -0,0 +1,31 @@ +{ + "version": "2.14.6", + "date": "2020-08-27", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for WebM DASH outputs as well as H.264 4:2:2 10-bit output in MOV and MP4." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Documentation updates for Amazon Redshift." + }, + { + "type": "feature", + "category": "Amazon GameLift", + "description": "GameLift FleetIQ as a standalone feature is now generally available. FleetIQ makes low-cost Spot instances viable for game hosting. Use GameLift FleetIQ with your EC2 Auto Scaling groups." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Introduces support to initiate Internet Key Exchange (IKE) negotiations for VPN connections from AWS. A user can now send the initial IKE message to their Customer Gateway (CGW) from VPN endpoints." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.7.json b/.changes/2.14.7.json new file mode 100644 index 000000000000..a1445a7116d8 --- /dev/null +++ b/.changes/2.14.7.json @@ -0,0 +1,31 @@ +{ + "version": "2.14.7", + "date": "2020-08-28", + "entries": [ + { + "type": "feature", + "category": "AWS Cost and Usage Report Service", + "description": "This release add MONTHLY as the new supported TimeUnit for ReportDefinition." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "You can now manage CloudFront's additional, real-time metrics with the CloudFront API." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Documentation updates for Route 53" + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR adds support for ICMP, port -1, in Block Public Access Exceptions and API access for EMR Notebooks execution. You can now non-interactively execute EMR Notebooks and pass input parameters." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.8.json b/.changes/2.14.8.json new file mode 100644 index 000000000000..86413f31d44c --- /dev/null +++ b/.changes/2.14.8.json @@ -0,0 +1,31 @@ +{ + "version": "2.14.8", + "date": "2020-08-31", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "CloudFront now supports real-time logging for CloudFront distributions. CloudFront real-time logs are more detailed, configurable, and are available in real time." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 and Spot Fleet now support modification of launch template configs for a running fleet enabling instance type, instance weight, AZ, and AMI updates without losing the current fleet ID." + }, + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "description": "Documentation updates for SQS." + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "Documentation updates for Cryo" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.14.9.json b/.changes/2.14.9.json new file mode 100644 index 000000000000..f834fe151ace --- /dev/null +++ b/.changes/2.14.9.json @@ -0,0 +1,21 @@ +{ + "version": "2.14.9", + "date": "2020-09-01", + "entries": [ + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Added a PatchSummary object for security findings. The PatchSummary object provides details about the patch compliance status of an instance." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Code Generator test failures on Windows systems were fixed." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "Add support for repository analysis based code reviews" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.0.json b/.changes/2.15.0.json new file mode 100644 index 000000000000..ee7375f1426f --- /dev/null +++ b/.changes/2.15.0.json @@ -0,0 +1,76 @@ +{ + "version": "2.15.0", + "date": "2020-09-30", + "entries": [ + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Added several new resource details objects. Added additional details for CloudFront distributions, IAM roles, and IAM access keys. Added a new ResourceRole attribute for resources." + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "Amazon Pinpoint - Features - Customers can start a journey based on an event being triggered by an endpoint or user." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "AWS IoT Rules Engine adds Timestream action. The Timestream rule action lets you stream time-series data from IoT sensors and applications to Amazon Timestream databases for time series analysis." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Amazon S3 on Outposts expands object storage to on-premises AWS Outposts environments, enabling you to store and retrieve objects using S3 APIs and features." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR customers can now use EC2 placement group to influence the placement of master nodes in a high-availability (HA) cluster across distinct underlying hardware to improve cluster availability." + }, + { + "type": "feature", + "category": "AWS DataSync", + "description": "This release enables customers to create s3 location for S3 bucket's located on an AWS Outpost." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "This release introduces Waiters for the AWS SDK for Java v2! Waiters make it easier for customers to wait for a resource to transition into a desired state. It comes handy when customers are interacting with operations that are eventually consistent on the service side. For more information on Waiters, head on over to the [AWS Developer Blog](https://aws.amazon.com/blogs/developer/category/developer-tools/aws-sdk-for-java/) and check out the [Developer Guide](http://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/welcome.html)." + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "description": "MediaConnect now supports reservations to provide a discounted rate for a specific outbound bandwidth over a period of time." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "This release extends Auto Scaling support for cluster storage of Managed Streaming for Kafka. Auto Scaling monitors and automatically expands storage capacity when a critical usage threshold is met." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "description": "Amazon S3 on Outposts expands object storage to on-premises AWS Outposts environments, enabling you to store and retrieve objects using S3 APIs and features." + }, + { + "type": "feature", + "category": "Amazon S3 on Outposts", + "description": "Amazon S3 on Outposts expands object storage to on-premises AWS Outposts environments, enabling you to store and retrieve objects using S3 APIs and features." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "EC2 Image Builder adds support for copying AMIs created by Image Builder to accounts specific to each Region." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Direct Connect", + "description": "Documentation updates for AWS Direct Connect." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.1.json b/.changes/2.15.1.json new file mode 100644 index 000000000000..4582ffd63c47 --- /dev/null +++ b/.changes/2.15.1.json @@ -0,0 +1,41 @@ +{ + "version": "2.15.1", + "date": "2020-10-01", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Documentation updates for elasticmapreduce" + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Adding additional optional map parameter to get-plan api" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "Added support for Enabling Zookeeper Encryption in Transit for AWS MSK." + }, + { + "type": "feature", + "category": "AWS AppSync", + "description": "Exposes the wafWebAclArn field on GraphQL api records. The wafWebAclArn field contains the amazon resource name of a WAF Web ACL if the AWS AppSync API is associated with one." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "QuickSight now supports connecting to AWS Timestream data source" + }, + { + "type": "feature", + "category": "AWS WAFV2", + "description": "AWS WAF is now available for AWS AppSync GraphQL APIs. AWS WAF protects against malicious attacks with AWS Managed Rules or your own custom rules. For more information see the AWS WAF Developer Guide." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.10.json b/.changes/2.15.10.json new file mode 100644 index 000000000000..ad3529cf9916 --- /dev/null +++ b/.changes/2.15.10.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.10", + "date": "2020-10-19", + "entries": [ + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "An Admin can now update the launch role associated with a Provisioned Product. Admins and End Users can now view the launch role associated with a Provisioned Product." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This Patch Manager release now supports Common Vulnerabilities and Exposure (CVE) Ids for missing packages via the DescribeInstancePatches API." + }, + { + "type": "feature", + "category": "HTTP Client SPI", + "description": "Calling the SdkHttpFullRequest uri() builder method, query parameters of the provided URI will be kept.\nThis can be useful in case you want to provide an already fully formed URI like a callback URI." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Amazon CloudFront adds support for Origin Shield." + }, + { + "type": "feature", + "category": "Amazon DocumentDB with MongoDB compatibility", + "description": "Documentation updates for docdb" + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "Documentation updates for Cryo" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.11.json b/.changes/2.15.11.json new file mode 100644 index 000000000000..5f5b3016ea01 --- /dev/null +++ b/.changes/2.15.11.json @@ -0,0 +1,46 @@ +{ + "version": "2.15.11", + "date": "2020-10-20", + "entries": [ + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fixed an issue where metrics were not being collected for Amazon S3 (or other XML services)" + }, + { + "type": "feature", + "category": "AWS Batch", + "description": "Adding evaluateOnExit to job retry strategies." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue where requestBody and asyncRequestBody were not visible in ExecutionInterceptor.afterMarshalling." + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "description": "EnvironmentStatus enum update to include Aborting, LinkingFrom and LinkingTo" + }, + { + "type": "feature", + "category": "AWS AppSync", + "description": "Documentation updates to AppSync to correct several typos." + }, + { + "type": "feature", + "category": "Amazon S3", + "description": "Moved the logic for calculating the Content-MD5 checksums from s3 to sdk-core. As always, make sure to use a version of 'sdk-core' greater than or equal to your version of 's3'. If you use an old version of 'sdk-core' and a new version of 's3', you will receive errors that Content-MD5 is required." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Fix for handling special characters in attribute names with WRITE_IF_NOT_EXISTS update behavior" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.12.json b/.changes/2.15.12.json new file mode 100644 index 000000000000..dd77de617bf4 --- /dev/null +++ b/.changes/2.15.12.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.12", + "date": "2020-10-21", + "entries": [ + { + "type": "feature", + "category": "AWS Global Accelerator", + "description": "This release adds support for specifying port overrides on AWS Global Accelerator endpoint groups." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "AWS Organizations renamed the 'master account' to 'management account'." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "This release adds custom data sources: a new data source type that gives you full control of the documents added, modified or deleted during a data source sync while providing run history metrics." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue crawlers now support incremental crawls for the Amazon Simple Storage Service (Amazon S3) data source." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "instance-storage-info nvmeSupport added to DescribeInstanceTypes API" + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "CloudFront adds support for managing the public keys for signed URLs and signed cookies directly in CloudFront (it no longer requires the AWS root account)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.13.json b/.changes/2.15.13.json new file mode 100644 index 000000000000..293eccb6f4cb --- /dev/null +++ b/.changes/2.15.13.json @@ -0,0 +1,31 @@ +{ + "version": "2.15.13", + "date": "2020-10-22", + "entries": [ + { + "type": "feature", + "category": "Amazon Appflow", + "description": "Salesforce connector creation with customer provided client id and client secret, incremental pull configuration, salesforce upsert write operations and execution ID when on-demand flows are executed." + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "description": "SNS now supports a new class of topics: FIFO (First-In-First-Out). FIFO topics provide strictly-ordered, deduplicated, filterable, encryptable, many-to-many messaging at scale." + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "API Documentation updates for IAM Access Analyzer." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "Documentation updates for servicecatalog" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.14.json b/.changes/2.15.14.json new file mode 100644 index 000000000000..1fcb96571caa --- /dev/null +++ b/.changes/2.15.14.json @@ -0,0 +1,21 @@ +{ + "version": "2.15.14", + "date": "2020-10-23", + "entries": [ + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Support description on columns." + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "description": "MediaTailor now supports ad marker passthrough for HLS. Use AdMarkerPassthrough to pass EXT-X-CUE-IN, EXT-X-CUE-OUT, and EXT-X-SPLICEPOINT-SCTE35 from origin manifests into personalized manifests." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.15.json b/.changes/2.15.15.json new file mode 100644 index 000000000000..2d738bc63a8a --- /dev/null +++ b/.changes/2.15.15.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.15", + "date": "2020-10-26", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release enables customers to bring custom images for use with SageMaker Studio notebooks." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "contributor": "", + "description": "Amazon Kendra now supports indexing data from Confluence Server." + }, + { + "type": "feature", + "category": "Amazon Neptune", + "contributor": "", + "description": "This feature enables custom endpoints for Amazon Neptune clusters. Custom endpoints simplify connection management when clusters contain instances with different capacities and configuration settings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.16.json b/.changes/2.15.16.json new file mode 100644 index 000000000000..11bf666c2268 --- /dev/null +++ b/.changes/2.15.16.json @@ -0,0 +1,18 @@ +{ + "version": "2.15.16", + "date": "2020-10-27", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "AWS Glue machine learning transforms now support encryption-at-rest for labels and trained models." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.17.json b/.changes/2.15.17.json new file mode 100644 index 000000000000..bd993a2fa2ee --- /dev/null +++ b/.changes/2.15.17.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.17", + "date": "2020-10-28", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "This release adds support for GG-Managed Job Namespace" + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "contributor": "", + "description": "Documentation update for Amazon WorkMail" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": null, + "description": "Fixed an issue where marshalling of a modeled object was not honoring the has* method on a list/map." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "AWS Nitro Enclaves general availability. Added support to RunInstances for creating enclave-enabled EC2 instances. New APIs to associate an ACM certificate with an IAM role, for enclave consumption." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": null, + "description": "Fixed an issue where the toString/equals/hashCode on a modeled object were not honoring the has* methods for lists and maps." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.18.json b/.changes/2.15.18.json new file mode 100644 index 000000000000..8cbbc17e81db --- /dev/null +++ b/.changes/2.15.18.json @@ -0,0 +1,48 @@ +{ + "version": "2.15.18", + "date": "2020-10-29", + "entries": [ + { + "type": "feature", + "category": "AWS Marketplace Commerce Analytics", + "contributor": "", + "description": "Documentation updates for marketplacecommerceanalytics to specify four data sets which are deprecated." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "contributor": "", + "description": "This release enables customers to manage their own contact lists and end-user subscription preferences." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Application Load Balancer (ALB) now supports the gRPC protocol-version. With this release, customers can use ALB to route and load balance gRPC traffic between gRPC enabled clients and microservices." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "contributor": "", + "description": "Adding support for access based enumeration on SMB file shares, file share visibility on SMB file shares, and file upload notifications for all file shares" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Support for Appliance mode on Transit Gateway that simplifies deployment of stateful network appliances. Added support for AWS Client VPN Self-Service Portal." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "Support disabling the default execute-api endpoint for REST APIs." + }, + { + "type": "feature", + "category": "CodeArtifact", + "contributor": "", + "description": "Add support for tagging of CodeArtifact domain and repository resources." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.19.json b/.changes/2.15.19.json new file mode 100644 index 000000000000..16c9e44bb4c6 --- /dev/null +++ b/.changes/2.15.19.json @@ -0,0 +1,54 @@ +{ + "version": "2.15.19", + "date": "2020-10-30", + "entries": [ + { + "type": "feature", + "category": "Braket", + "contributor": "", + "description": "This release supports tagging for Amazon Braket quantum-task resources. It also supports tag-based access control for quantum-task APIs." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixing FilteringSubscriber and LimitingSubscriber to complete when subscribing criteria is completed." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Adding DocDbSettings to support DocumentDB as a source." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "This feature increases the number of accounts that can be added to the Launch permissions within an Image Builder Distribution configuration." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Documentation updates for AWS ElastiCache" + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "contributor": "", + "description": "Documentation updates for Amazon SNS" + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "This release of the Amazon Macie API adds an eqExactMatch operator for filtering findings. With this operator you can increase the precision of your finding filters and suppression rules." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "Support for HLS discontinuity tags in the child manifests. Support for incomplete segment behavior in the media output. Support for automatic input failover condition settings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.2.json b/.changes/2.15.2.json new file mode 100644 index 000000000000..ed55ebaa432a --- /dev/null +++ b/.changes/2.15.2.json @@ -0,0 +1,46 @@ +{ + "version": "2.15.2", + "date": "2020-10-02", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Amazon S3 Object Ownership is a new S3 feature that enables bucket owners to automatically assume ownership of objects that are uploaded to their buckets by other AWS Accounts." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "This release adds support for tagging listeners, rules, and target groups on creation. This release also supported tagging operations through tagging api's for listeners and rules." + }, + { + "type": "feature", + "category": "AWS Cloud Map", + "description": "Added support for optional parameters for DiscoverInstances API in AWS Cloud Map" + }, + { + "type": "feature", + "category": "Amazon Personalize Events", + "description": "Adds new APIs to write item and user records to Datasets." + }, + { + "type": "feature", + "category": "AWS Batch", + "description": "Support tagging for Batch resources (compute environment, job queue, job definition and job) and tag based access control on Batch APIs" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Adds the NCHAR Character Set ID parameter to the CreateDbInstance API for RDS Oracle." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "bugfix", + "category": "Apache HTTP Client", + "description": "Fixed an issue in Apache HTTP client where a request with path parameter as a single slash threw invalid host name error." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.20.json b/.changes/2.15.20.json new file mode 100644 index 000000000000..01a97f5581b6 --- /dev/null +++ b/.changes/2.15.20.json @@ -0,0 +1,48 @@ +{ + "version": "2.15.20", + "date": "2020-11-02", + "entries": [ + { + "type": "bugfix", + "category": "AWS DynamoDB Enhanced Client", + "contributor": "", + "description": "Publisher streams returned by async resources in the DynamoDB Enhanced Client now correctly handle mapping errors when they are encountered in the stream by calling onError on the subscriber and then implicitly cancelling the subscription. Previously the stream would just permanently hang and never complete." + }, + { + "type": "deprecation", + "category": "AWS SSO OIDC", + "contributor": "", + "description": "Renamed/deprecated 'error_description' fields in exceptions in favor of 'errorDescription'." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Added code generation validation that customer-visible identifiers are idiomatic (do not contain underscores). Services with underscores in their models can use rename customizations to fix these issues, or apply the 'underscoresInNameBehavior = ALLOW' customization." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Upgrade `org.apache.httpcomponents:httpclient` version to `4.5.13`" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": null, + "description": "Fixing race condition in EventStreamAsyncResponseTransformer. Field eventsToDeliver is a LinkedList, i.e., not thread-safe. Accesses to field eventsToDeliver are protected by synchronization on itself, but not in 1 location." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "The mapped publisher returned by SdkPublisher.map will now handle exceptions thrown by the mapping function by calling onError on its subscriber and then cancelling the subscription rather than throwing it back to the publishing process when it attempts to publish data." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for the following features: 1. P4d instances based on NVIDIA A100 GPUs. 2. NetworkCardIndex attribute to support multiple network cards." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.21.json b/.changes/2.15.21.json new file mode 100644 index 000000000000..6251577513f5 --- /dev/null +++ b/.changes/2.15.21.json @@ -0,0 +1,72 @@ +{ + "version": "2.15.21", + "date": "2020-11-04", + "entries": [ + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Service Catalog API ListPortfolioAccess can now support a maximum PageSize of 100." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "contributor": "", + "description": "Amazon Elasticsearch Service now supports native SAML authentication that seamlessly integrates with the customers' existing SAML 2.0 Identity Provider (IdP)." + }, + { + "type": "feature", + "category": "AWSMarketplace Metering", + "contributor": "", + "description": "Adding Vendor Tagging Support in MeterUsage and BatchMeterUsage API." + }, + { + "type": "feature", + "category": "AmazonMQ", + "contributor": "", + "description": "Amazon MQ introduces support for RabbitMQ, a popular message-broker with native support for AMQP 0.9.1. You can now create fully-managed RabbitMQ brokers in the cloud." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Capacity Rebalance helps you manage and maintain workload availability during Spot interruptions by proactively augmenting your Auto Scaling group with a new instance before interrupting an old one." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Added support for Client Connect Handler for AWS Client VPN. Fleet supports launching replacement instances in response to Capacity Rebalance recommendation." + }, + { + "type": "feature", + "category": "AWS X-Ray", + "contributor": "", + "description": "Releasing new APIs GetInsightSummaries, GetInsightEvents, GetInsight, GetInsightImpactGraph and updating GetTimeSeriesServiceStatistics API for AWS X-Ray Insights feature" + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "Documentation updates for monitoring" + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "contributor": "", + "description": "With this release, Amazon Transcribe now supports real-time transcription from audio sources in Italian (it-IT) and German(de-DE)." + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "Updated API documentation and added paginator for AWS Iot Registry ListThingPrincipals API." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.22.json b/.changes/2.15.22.json new file mode 100644 index 000000000000..ca04f429f1d0 --- /dev/null +++ b/.changes/2.15.22.json @@ -0,0 +1,72 @@ +{ + "version": "2.15.22", + "date": "2020-11-05", + "entries": [ + { + "type": "feature", + "category": "Amazon Fraud Detector", + "contributor": "", + "description": "Added support for deleting resources like Variables, ExternalModels, Outcomes, Models, ModelVersions, Labels, EventTypes and EntityTypes. Updated DeleteEvent operation to catch missing exceptions." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "contributor": "", + "description": "This release adds circuit breaking capabilities to your mesh with connection pooling and outlier detection support." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "This release adds a new ReplicaStatus INACCESSIBLE_ENCRYPTION_CREDENTIALS for the Table description, indicating when a key used to encrypt a regional replica table is not accessible." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "With this release, customers can now reprocess past events by storing the events published on event bus in an encrypted archive." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "contributor": "", + "description": "With this release, customers can now reprocess past events by storing the events published on event bus in an encrypted archive." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "contributor": "", + "description": "Amazon Elasticsearch Service now provides the ability to define a custom endpoint for your domain and link an SSL certificate from ACM, making it easier to refer to Kibana and the domain endpoint." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Supports a new parameter to set the max allocated storage in gigabytes for the CreateDBInstanceReadReplica API." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Support Amazon MQ as an Event Source." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Documentation updates for EC2." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "contributor": "", + "description": "Amazon Kendra now supports providing user context in your query requests, Tokens can be JSON or JWT format. This release also introduces support for Confluence cloud datasources." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.23.json b/.changes/2.15.23.json new file mode 100644 index 000000000000..403b6fb79a9d --- /dev/null +++ b/.changes/2.15.23.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.23", + "date": "2020-11-06", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "Remove the CreatePresignedPortalUrl API" + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "Support for SCTE35 ad markers in OnCuePoint style in RTMP outputs." + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "contributor": "", + "description": "Amazon Data Lifecycle Manager now supports the creation and retention of EBS-backed Amazon Machine Images" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "Documentation updates for Systems Manager" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Network card support with four new attributes: NetworkCardIndex, NetworkPerformance, DefaultNetworkCardIndex, and MaximumNetworkInterfaces, added to the DescribeInstanceTypes API." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.24.json b/.changes/2.15.24.json new file mode 100644 index 000000000000..47b0cda8e15c --- /dev/null +++ b/.changes/2.15.24.json @@ -0,0 +1,66 @@ +{ + "version": "2.15.24", + "date": "2020-11-09", + "entries": [ + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "contributor": "", + "description": "Adding support for package versioning in Amazon Elasticsearch Service" + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "contributor": "", + "description": "Added bandwidth rate limit schedule for Tape and Volume Gateways" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "This release adds supports for exporting Amazon DynamoDB table data to Amazon S3 to perform analytics at any scale." + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "DataSync now enables customers to adjust the network bandwidth used by a running AWS DataSync task." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "S3 Intelligent-Tiering adds support for Archive and Deep Archive Access tiers; S3 Replication adds replication metrics and failure notifications, brings feature parity for delete marker replication" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "This release adds support for creating DNS aliases for Amazon FSx for Windows File Server, and using AWS Backup to automate scheduled, policy-driven backup plans for Amazon FSx file systems." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "add a new filter to allow customer to filter automation executions by using resource-group which used for execute automation" + }, + { + "type": "feature", + "category": "AWS IoT Analytics", + "contributor": "", + "description": "AWS IoT Analytics now supports Late Data Notifications for datasets, dataset content creation using previous version IDs, and includes the LastMessageArrivalTime attribute for channels and datastores." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release provides native support for specifying Amazon FSx for Windows File Server file systems as volumes in your Amazon ECS task definitions." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "Sensitive data findings in Amazon Macie now include enhanced location data for Apache Avro object containers and Apache Parquet files." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.25.json b/.changes/2.15.25.json new file mode 100644 index 000000000000..3af2421867a2 --- /dev/null +++ b/.changes/2.15.25.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.25", + "date": "2020-11-10", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for Gateway Load Balancer VPC endpoints and VPC endpoint services" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "Add SessionId as a filter for DescribeSessions API" + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Added support for Gateway Load Balancers, which make it easy to deploy, scale, and run third-party virtual networking appliances." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Replaced class loading from Thread.currentThread().getContextClassLoader() to ClassLoaderHelper in ProfileCredentialsUtils and WebIdentityCredentialsUtils, since it was causing Class not found error." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "frosforever", + "description": "Fix default client error to have spaces between words." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.26.json b/.changes/2.15.26.json new file mode 100644 index 000000000000..57558403b69d --- /dev/null +++ b/.changes/2.15.26.json @@ -0,0 +1,54 @@ +{ + "version": "2.15.26", + "date": "2020-11-11", + "entries": [ + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Adding support to remove a Provisioned Product launch role via UpdateProvisionedProductProperties" + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "contributor": "", + "description": "Upgrade Netty libraries to `4.1.53.Final`, and `netty-tcnative-boringssl-static` to `2.0.34.Final`." + }, + { + "type": "feature", + "category": "AWS Glue DataBrew", + "contributor": "", + "description": "This is the initial SDK release for AWS Glue DataBrew. DataBrew is a visual data preparation tool that enables users to clean and normalize data without writing any code." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "AWS Elemental MediaConvert SDK has added support for Automated ABR encoding and improved the reliability of embedded captions in accelerated outputs." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "contributor": "", + "description": "Fix a bug where the Netty HTTP client can leak memory when a response stream is cancelled prematurely but the upstream publisher continues to invoke onNext for some time before stopping. Fixes [#2051](https://github.com/aws/aws-sdk-java-v2/issues/2051)." + }, + { + "type": "feature", + "category": "AWS Amplify", + "contributor": "", + "description": "Whereas previously custom headers were set via the app's buildspec, custom headers can now be set directly on the Amplify app for both ci/cd and manual deploy apps." + }, + { + "type": "feature", + "category": "Amazon Forecast Service", + "contributor": "", + "description": "Providing support of custom quantiles in CreatePredictor API." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "QuickSight now supports Column-level security and connecting to Oracle data source." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.27.json b/.changes/2.15.27.json new file mode 100644 index 000000000000..cc9e30fd7cbf --- /dev/null +++ b/.changes/2.15.27.json @@ -0,0 +1,48 @@ +{ + "version": "2.15.27", + "date": "2020-11-12", + "entries": [ + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "This release adds a batchMode parameter to the IotEvents, IotAnalytics, and Firehose actions which allows customers to send an array of messages to the corresponding services" + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "contributor": "", + "description": "Adds support to use dynamic filters with Personalize." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "contributor": "", + "description": "Lex now supports es-ES, it-IT, fr-FR and fr-CA locales" + }, + { + "type": "feature", + "category": "AWS RoboMaker", + "contributor": "", + "description": "This release introduces Robomaker Worldforge TagsOnCreate which allows customers to tag worlds as they are being generated by providing the tags while configuring a world generation job." + }, + { + "type": "feature", + "category": "AWS Service Catalog App Registry", + "contributor": "", + "description": "AWS Service Catalog AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "contributor": "", + "description": "This release adds support for Amazon Lightsail container services. You can now create a Lightsail container service, and deploy Docker images to it." + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds new Australian English female voice - Olivia. Olivia is available as Neural voice only." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.28.json b/.changes/2.15.28.json new file mode 100644 index 000000000000..324074327277 --- /dev/null +++ b/.changes/2.15.28.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.28", + "date": "2020-11-13", + "entries": [ + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Adds dualstack support for Network Load Balancers (TCP/TLS only), an attribute for WAF fail open for Application Load Balancers, and an attribute for connection draining for Network Load Balancers." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Shield", + "contributor": "", + "description": "This release adds APIs for two new features: 1) Allow customers to bundle resources into protection groups and treat as a single unit. 2) Provide per-account event summaries to all AWS customers." + }, + { + "type": "feature", + "category": "Amazon Textract", + "contributor": "", + "description": "AWS Textract now allows customers to specify their own KMS key to be used for asynchronous jobs output results, AWS Textract now also recognizes handwritten text from English documents." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.29.json b/.changes/2.15.29.json new file mode 100644 index 000000000000..2e5641407367 --- /dev/null +++ b/.changes/2.15.29.json @@ -0,0 +1,60 @@ +{ + "version": "2.15.29", + "date": "2020-11-16", + "entries": [ + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Adding MoveReplicationTask feature to move replication tasks between instances" + }, + { + "type": "feature", + "category": "AWS IoT Secure Tunneling", + "contributor": "", + "description": "Support using multiple data streams per tunnel using the Secure Tunneling multiplexing feature." + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "AWS Synthetics now supports Environment Variables to assign runtime parameters in the canary scripts." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This feature enables customers to encrypt their Amazon SageMaker Studio storage volumes with customer master keys (CMKs) managed by them in AWS Key Management Service (KMS)." + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "This release supports Unicode characters for string operations in formulae computes in SiteWise. For more information, search for SiteWise in Amazon What's new or refer the SiteWise documentation." + }, + { + "type": "feature", + "category": "AWS CodePipeline", + "contributor": "", + "description": "We show details about inbound executions and id of action executions in GetPipelineState API. We also add ConflictException to StartPipelineExecution, RetryStageExecution, StopPipelineExecution APIs." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Support import of CloudFormation stacks into Service Catalog provisioned products." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Adding new parameters for dashboard persistence" + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "contributor": "", + "description": "Documentation updates for Amazon SNS." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.3.json b/.changes/2.15.3.json new file mode 100644 index 000000000000..31f3c7de3abb --- /dev/null +++ b/.changes/2.15.3.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.3", + "date": "2020-10-05", + "entries": [ + { + "type": "feature", + "category": "AWS DynamoDB Enhanced Client", + "description": "Added support for attribute level custom update behaviors such as 'write if not exists'." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Streams", + "description": "Documentation updates for streams.dynamodb" + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for AVC-I and VC3 encoding in the MXF OP1a container, Nielsen non-linear watermarking, and InSync FrameFormer frame rate conversion." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue crawlers now support Amazon DocumentDB (with MongoDB compatibility) and MongoDB collections. You can choose to crawl the entire data set or only a small sample to reduce crawl time." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "This release adds support for launching Amazon SageMaker Studio in your VPC. Use AppNetworkAccessType in CreateDomain API to disable access to public internet and restrict the network traffic to VPC." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "This release adds a new ReplicaStatus REGION DISABLED for the Table description. This state indicates that the AWS Region for the replica is inaccessible because the AWS Region is disabled." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.30.json b/.changes/2.15.30.json new file mode 100644 index 000000000000..9bcbb635553e --- /dev/null +++ b/.changes/2.15.30.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.30", + "date": "2020-11-17", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Support copy-db-snapshot in the one region on cross clusters and local cluster for RDSonVmware. Add target-custom-availability-zone parameter to specify where a snapshot should be copied." + }, + { + "type": "feature", + "category": "Firewall Management Service", + "contributor": "", + "description": "Added Firewall Manager policy support for AWS Network Firewall resources." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds support for user hierarchy group and user hierarchy structure. For details, see the Release Notes in the Amazon Connect Administrator Guide." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "The Amazon Macie API now has a lastRunErrorStatus property to indicate if account- or bucket-level errors occurred during the run of a one-time classification job or the latest run of a recurring job." + }, + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "(New Service) AWS Network Firewall is a managed network layer firewall service that makes it easy to secure your virtual private cloud (VPC) networks and block malicious traffic." + }, + { + "type": "feature", + "category": "Amazon Chime", + "contributor": "", + "description": "This release adds CRUD APIs for Amazon Chime SipMediaApplications and SipRules. It also adds the API for creating outbound PSTN calls for Amazon Chime meetings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.31.json b/.changes/2.15.31.json new file mode 100644 index 000000000000..de3ca883f608 --- /dev/null +++ b/.changes/2.15.31.json @@ -0,0 +1,54 @@ +{ + "version": "2.15.31", + "date": "2020-11-18", + "entries": [ + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "AWS S3 Storage Lens provides visibility into your storage usage and activity trends at the organization or account level, with aggregations by Region, storage class, bucket, and prefix." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Adding Memcached 1.6 to parameter family" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Outposts", + "contributor": "", + "description": "Mark the Name parameter in CreateOutpost as required." + }, + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "AWS Backup now supports cross-account backup, enabling AWS customers to securely copy their backups across their AWS accounts within their AWS organizations." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "EC2 Fleet adds support of DeleteFleets API for instant type fleets. Now you can delete an instant type fleet and terminate all associated instances with a single API call." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "contributor": "", + "description": "AWS CodeBuild - Adding Status field for Report Group" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "This release adds ChangeSets support for Nested Stacks. ChangeSets offer a preview of how proposed changes to a stack might impact existing resources or create new ones." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.32.json b/.changes/2.15.32.json new file mode 100644 index 000000000000..1659d30f59fd --- /dev/null +++ b/.changes/2.15.32.json @@ -0,0 +1,84 @@ +{ + "version": "2.15.32", + "date": "2020-11-19", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "You can now create Auto Scaling groups with multiple launch templates using a mixed instances policy, making it easy to deploy an AMI with an architecture that is different from the rest of the group." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "contributor": "", + "description": "Amazon Lex supports managing input and output contexts as well as default values for slots." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "The AWS Elemental MediaLive APIs and SDKs now support the ability to see the software update status on Link devices" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "contributor": "", + "description": "Amazon Redshift support for returning ClusterNamespaceArn in describeClusters" + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "EventBridge now supports Resource-based policy authorization on event buses. This enables cross-account PutEvents API calls, creating cross-account rules, and simplifies permission management." + }, + { + "type": "feature", + "category": "AWS Directory Service", + "contributor": "", + "description": "Adding multi-region replication feature for AWS Managed Microsoft AD" + }, + { + "type": "feature", + "category": "Amazon Kinesis Analytics", + "contributor": "", + "description": "Amazon Kinesis Data Analytics now supports building and running streaming applications using Apache Flink 1.11 and provides a way to access the Apache Flink dashboard for supported Flink versions." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Added the starting position and starting position timestamp to ESM Configuration. Now customers will be able to view these fields for their ESM." + }, + { + "type": "feature", + "category": "Amazon Lex Runtime Service", + "contributor": "", + "description": "Amazon Lex now supports the ability to view and manage active contexts associated with a user session." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "Additional metadata that may be applicable to the recommendation." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "contributor": "", + "description": "EventBridge now supports Resource-based policy authorization on event buses. This enables cross-account PutEvents API calls, creating cross-account rules, and simplifies permission management." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Adding support for Glue Schema Registry. The AWS Glue Schema Registry is a new feature that allows you to centrally discover, control, and evolve data stream schemas." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.33.json b/.changes/2.15.33.json new file mode 100644 index 000000000000..19d7726cfee1 --- /dev/null +++ b/.changes/2.15.33.json @@ -0,0 +1,78 @@ +{ + "version": "2.15.33", + "date": "2020-11-20", + "entries": [ + { + "type": "feature", + "category": "AWS CloudHSM V2", + "contributor": "", + "description": "Added managed backup retention, a feature that enables customers to retain backups for a configurable period after which CloudHSM service will automatically delete them." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity", + "contributor": "", + "description": "Added SDK pagination support for ListIdentityPools" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "contributor": "", + "description": "This release adds support for PER TOPIC PER PARTITION monitoring on AWS MSK clusters." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds a set of Amazon Connect APIs to programmatically control instance creation, modification, description and deletion." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "The Amazon Macie API now provides S3 bucket metadata that indicates whether any one-time or recurring classification jobs are configured to analyze data in a bucket." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Add new documentation regarding automatically generated Content-MD5 headers when using the SDK or CLI." + }, + { + "type": "feature", + "category": "Amazon Chime", + "contributor": "", + "description": "The Amazon Chime SDK for messaging provides the building blocks needed to build chat and other real-time collaboration features." + }, + { + "type": "feature", + "category": "AWS Service Catalog App Registry", + "contributor": "", + "description": "AWS Service Catalog AppRegistry Documentation update" + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "contributor": "", + "description": "This release supports tagging repository association resources in Amazon CodeGuru Reviewer." + }, + { + "type": "feature", + "category": "AWS Single Sign-on", + "contributor": "", + "description": "Added support for retrieving SSO credentials: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "contributor": "", + "description": "This release makes tag value a required attribute of the tag's key-value pair." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.34.json b/.changes/2.15.34.json new file mode 100644 index 000000000000..bf6631bf63c0 --- /dev/null +++ b/.changes/2.15.34.json @@ -0,0 +1,138 @@ +{ + "version": "2.15.34", + "date": "2020-11-23", + "entries": [ + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "With this release, you can capture data changes in any Amazon DynamoDB table as an Amazon Kinesis data stream. You also can use PartiQL (SQL-compatible language) to manipulate data in DynamoDB tables." + }, + { + "type": "feature", + "category": "AWS Outposts", + "contributor": "", + "description": "Support specifying tags during the creation of the Outpost resource. Tags are now returned in the response body of Outpost APIs." + }, + { + "type": "feature", + "category": "AWS Single Sign-On Admin", + "contributor": "", + "description": "AWS Single Sign-On now enables attribute-based access control for workforce identities to simplify permissions in AWS" + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Feature1 - Glue crawler adds data lineage configuration option. Feature2 - AWS Glue Data Catalog adds APIs for PartitionIndex creation and deletion as part of Enhancement Partition Management feature." + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "This release enables users to identify different file types in the over-the-air update (OTA) functionality using fileType parameter for CreateOTAUpdate API" + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "contributor": "", + "description": "Add API support for EMR Studio, a new notebook-first IDE for data scientists and data engineers with single sign-on, Jupyter notebooks, automated infrastructure provisioning, and job diagnosis." + }, + { + "type": "feature", + "category": "Amazon Translate", + "contributor": "", + "description": "This update adds new operations to create and manage parallel data in Amazon Translate. Parallel data is a resource that you can use to run Active Custom Translation jobs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release adds support for updating capacity providers, specifying custom instance warmup periods for capacity providers, and using deployment circuit breaker for your ECS Services." + }, + { + "type": "feature", + "category": "CodeArtifact", + "contributor": "", + "description": "Add support for the NuGet package format." + }, + { + "type": "feature", + "category": "AWS License Manager", + "contributor": "", + "description": "AWS License Manager now provides the ability for license administrators to be able to associate license configurations to AMIs shared with their AWS account" + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs." + }, + { + "type": "feature", + "category": "AWS CodeStar connections", + "contributor": "", + "description": "Added support for the UpdateHost API." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "contributor": "", + "description": "Adding MAINTENANCE and REBOOTING_BROKER to Cluster states." + }, + { + "type": "feature", + "category": "Amazon Timestream Query", + "contributor": "", + "description": "Amazon Timestream now supports \"QueryStatus\" in Query API which has information about cumulative bytes scanned, metered, as well as progress percentage for the query." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Documentation updates for elasticache" + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Updated the account management API to support the integration with AWS Organizations. Added new methods to allow users to view and manage the delegated administrator account for Security Hub." + }, + { + "type": "feature", + "category": "Amazon Forecast Service", + "contributor": "", + "description": "Releasing the set of PredictorBacktestExportJob APIs which allow customers to export backtest values and item-level metrics data from Predictor training." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "contributor": "", + "description": "Add Detected Workload to ApplicationComponent which shows the workloads that installed in the component" + }, + { + "type": "feature", + "category": "AWS Signer", + "contributor": "", + "description": "AWS Signer is launching code-signing for AWS Lambda. Now customers can cryptographically sign Lambda code to ensure trust, integrity, and functionality." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "This release includes support for new feature: Code Signing for AWS Lambda. This adds new resources and APIs to configure Lambda functions to accept and verify signed code artifacts at deployment." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for Multiple Private DNS names to DescribeVpcEndpointServices response." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.35.json b/.changes/2.15.35.json new file mode 100644 index 000000000000..27df17571e74 --- /dev/null +++ b/.changes/2.15.35.json @@ -0,0 +1,120 @@ +{ + "version": "2.15.35", + "date": "2020-11-24", + "entries": [ + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Support for embedding without user registration. New enum EmbeddingIdentityType. A potential breaking change. Affects code that refers IdentityType enum type directly instead of literal string value." + }, + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "Add Ec2Configuration in ComputeEnvironment.ComputeResources. Use in CreateComputeEnvironment API to enable AmazonLinux2 support." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "CloudTrail now includes advanced event selectors, which give you finer-grained control over the events that are logged to your trail." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "AWS Elemental MediaConvert SDK has added support for Vorbis and Opus audio in OGG/OGA containers." + }, + { + "type": "feature", + "category": "Amazon GameLift", + "contributor": "", + "description": "GameLift FlexMatch is now available as a standalone matchmaking solution. FlexMatch now provides customizable matchmaking for games hosted peer-to-peer, on-premises, or on cloud compute primitives." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "contributor": "", + "description": "Adding GetReportGroupTrend API for Test Reports." + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "contributor": "", + "description": "Support Comprehend events detection APIs" + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "contributor": "", + "description": "Amazon Transcribe Medical streaming added medical specialties and HTTP/2 support. Amazon Transcribe streaming supports additional languages. Both support OGG/OPUS and FLAC codecs for streaming." + }, + { + "type": "feature", + "category": "Amazon Appflow", + "contributor": "", + "description": "Upsolver as a destination connector and documentation update." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "This release adds ability to configure Cognito User Pools with third party sms and email providers for sending notifications to users." + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "This release adds the capability to increase storage capacity of Amazon FSx for Lustre file systems, providing the flexibility to meet evolving storage needs over time." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "Adds support for the new Modules feature for CloudFormation. A module encapsulates one or more resources and their respective configurations for reuse across your organization." + }, + { + "type": "feature", + "category": "AmazonMWAA", + "contributor": "", + "description": "(New Service) Amazon MWAA is a managed service for Apache Airflow that makes it easy for data engineers and data scientists to execute data processing workflows in the cloud." + }, + { + "type": "feature", + "category": "Amazon Timestream Write", + "contributor": "", + "description": "Adds support of upserts for idempotent updates to Timestream." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "contributor": "", + "description": "Lex now supports es-419, de-DE locales" + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "This release adds support for customer managed customer master key (CMK) based encryption in IoT SiteWise." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "contributor": "", + "description": "This release of the AWS Step Functions SDK introduces support for Synchronous Express Workflows" + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "contributor": "", + "description": "Updates the Integer constraint of DescribeEnvironmentManagedActionHistory's MaxItems parameter to [1, 100]." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.36.json b/.changes/2.15.36.json new file mode 100644 index 000000000000..8094b61486c6 --- /dev/null +++ b/.changes/2.15.36.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.36", + "date": "2020-11-30", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed a resource leak that could occur when closing the default credentials provider (or a client using the default credentials provider), when `closeable` credentials like STS or SSO were in use. Fixes [#2149](https://github.com/aws/aws-sdk-java-v2/issues/2149)." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release introduces Amazon EC2 Mac1 instances, a new Amazon EC2 instance family built on Apple Mac mini computers, powered by AWS Nitro System, and support running macOS workloads on Amazon EC2" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add LICENSE.txt and NOTICE.txt to META-INF directory of generated JARs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.37.json b/.changes/2.15.37.json new file mode 100644 index 000000000000..a6db8da9a1cc --- /dev/null +++ b/.changes/2.15.37.json @@ -0,0 +1,102 @@ +{ + "version": "2.15.37", + "date": "2020-12-01", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for: EBS gp3 volumes; and D3/D3en/R5b/M5zn instances powered by Intel Cascade Lake CPUs" + }, + { + "type": "feature", + "category": "AmplifyBackend", + "contributor": "", + "description": "(New Service) The Amplify Admin UI offers an accessible way to develop app backends and manage app content. We recommend that you use the Amplify Admin UI to manage the backend of your Amplify app." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds an Amazon Connect API that provides the ability to create tasks, and a set of APIs (in preview) to configure AppIntegrations associations with Amazon Connect instances." + }, + { + "type": "feature", + "category": "Amazon AppIntegrations Service", + "contributor": "", + "description": "The Amazon AppIntegrations service (in preview release) enables you to configure and reuse connections to external applications." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Connect Contact Lens", + "contributor": "", + "description": "Contact Lens for Amazon Connect analyzes conversations, both real-time and post-call. The ListRealtimeContactAnalysisSegments API returns a list of analysis segments for a real-time analysis session." + }, + { + "type": "feature", + "category": "Amazon SageMaker Feature Store Runtime", + "contributor": "", + "description": "This release adds support for Amazon SageMaker Feature Store, which makes it easy for customers to create, version, share, and manage curated data for machine learning (ML) development." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "S3 adds support for multiple-destination replication, option to sync replica modifications; S3 Bucket Keys to reduce cost of S3 SSE with AWS KMS" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon SageMaker Pipelines for ML workflows. Amazon SageMaker Feature Store, a fully managed repository for ML features." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Amazon EKS now allows you to define and manage the lifecycle for Kubernetes add-ons for your clusters. This release adds support for the AWS VPC CNI (vpc-cni)." + }, + { + "type": "feature", + "category": "Amazon DevOps Guru", + "contributor": "", + "description": "(New Service) Amazon DevOps Guru is available in public preview. It's a fully managed service that uses machine learning to analyze your operational solutions to help you find and troubleshoot issues." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "This release includes support for a new feature: Container images support in AWS Lambda. This adds APIs for deploying functions as container images. AWS Lambda now supports memory up to 10240MB." + }, + { + "type": "feature", + "category": "AWS Directory Service", + "contributor": "", + "description": "Adding client authentication feature for AWS AD Connector" + }, + { + "type": "feature", + "category": "Amazon Lookout for Vision", + "contributor": "", + "description": "This release introduces support for Amazon Lookout for Vision." + }, + { + "type": "feature", + "category": "Amazon Honeycode", + "contributor": "", + "description": "Introducing APIs to read and write directly from Honeycode tables. Use APIs to pull table and column metadata, then use the read and write APIs to programmatically read and write from the tables." + }, + { + "type": "feature", + "category": "Amazon Elastic Container Registry Public", + "contributor": "", + "description": "Supports Amazon Elastic Container Registry (Amazon ECR) Public, a fully managed registry that makes it easy for a developer to publicly share container software worldwide for anyone to download." + } + ] +} diff --git a/.changes/2.15.38.json b/.changes/2.15.38.json new file mode 100644 index 000000000000..28f4e9b06414 --- /dev/null +++ b/.changes/2.15.38.json @@ -0,0 +1,18 @@ +{ + "version": "2.15.38", + "date": "2020-12-01", + "entries": [ + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "This is the first release of Amazon Connect Customer Profiles, a unified customer profile for your Amazon Connect contact center." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.39.json b/.changes/2.15.39.json new file mode 100644 index 000000000000..d3e28bac8e74 --- /dev/null +++ b/.changes/2.15.39.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.39", + "date": "2020-12-03", + "entries": [ + { + "type": "feature", + "category": "AWS License Manager", + "contributor": "", + "description": "AWS License Manager enables managed entitlements for AWS customers and Software Vendors (ISV). You can track and distribute license entitlements from AWS Marketplace and supported ISVs." + }, + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "This release adds support for customer to run Batch Jobs on ECS Fargate, the serverless compute engine built for containers on AWS. Customer can also propagate Job and Job Definition Tags to ECS Task." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AmplifyBackend", + "contributor": "", + "description": "Regular documentation updates." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for EBS volumes that are attached to instances." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.4.json b/.changes/2.15.4.json new file mode 100644 index 000000000000..dd83d2944df1 --- /dev/null +++ b/.changes/2.15.4.json @@ -0,0 +1,26 @@ +{ + "version": "2.15.4", + "date": "2020-10-06", + "entries": [ + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "Added new S3 endpoint settings to allow partitioning CDC data by date for S3 as target. Exposed some Extra Connection Attributes as endpoint settings for relational databases as target." + }, + { + "type": "feature", + "category": "AWS Marketplace Catalog Service", + "description": "AWS Marketplace Catalog now supports FailureCode for change workflows to help differentiate client errors and server faults." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release supports returning additional information about local gateway virtual interfaces, and virtual interface groups." + }, + { + "type": "feature", + "category": "Amazon Kinesis Analytics", + "description": "Amazon Kinesis Analytics now supports StopApplication with 'force' option" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.40.json b/.changes/2.15.40.json new file mode 100644 index 000000000000..de79065cba9b --- /dev/null +++ b/.changes/2.15.40.json @@ -0,0 +1,66 @@ +{ + "version": "2.15.40", + "date": "2020-12-04", + "entries": [ + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Added the additional enum InvalidImage to StateReasonCode and LastUpdateStatusReasonCode fields." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "AWS Elemental MediaLive now supports black video and audio silence as new conditions to trigger automatic input failover." + }, + { + "type": "feature", + "category": "AWS Directory Service", + "contributor": "", + "description": "Documentation updates for ds - updated descriptions" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release introduces tag-on-create capability for the CreateImage API. A user can now specify tags that will be applied to the new resources (image, snapshots or both), during creation time." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "croudet", + "description": "Avoid costly metrics collection when metric collector is NoOpMetricCollector." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "Update the import-workspace-image API to have \"BYOL_REGULAR_WSP\" as a valid input string for ingestion-process." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "contributor": "", + "description": "Adding HEALING to ClusterState." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "AWS Systems Manager Patch Manager MAC OS Support and OpsMetadata Store APIs to store operational metadata for an Application." + }, + { + "type": "feature", + "category": "AWS License Manager", + "contributor": "", + "description": "Automated Discovery now has support for custom tags, and detects software uninstalls." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adds support for Amazon RDS Cross-Region Automated Backups, the ability to setup automatic replication of snapshots and transaction logs from a primary AWS Region to a secondary AWS Region." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.41.json b/.changes/2.15.41.json new file mode 100644 index 000000000000..831ab9b956bf --- /dev/null +++ b/.changes/2.15.41.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.41", + "date": "2020-12-07", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "contributor": "", + "description": "Fixed the issue where certain handshake errors manifested as acquire connection timeout error when using TLS1.3 and proxy." + }, + { + "type": "feature", + "category": "AWS Common Runtime HTTP Client", + "contributor": "", + "description": "Bump up `aws-crt` version to `0.9.0`" + }, + { + "type": "feature", + "category": "AWS Service Catalog App Registry", + "contributor": "", + "description": "AWS Service Catalog AppRegistry now supports adding, removing, and listing tags on resources after they are created." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Added PreserveTransaction setting to preserve order of CDC for S3 as target. Added CsvNoSupValue setting to replace empty value for columns not included in the supplemental log for S3 as target." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Added amz-sdk-request and removed amz-sdk-retry header. The new header matches the behavior of the other SDKs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.42.json b/.changes/2.15.42.json new file mode 100644 index 000000000000..6ab3fa186efd --- /dev/null +++ b/.changes/2.15.42.json @@ -0,0 +1,72 @@ +{ + "version": "2.15.42", + "date": "2020-12-08", + "entries": [ + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Added new parameters for join optimization." + }, + { + "type": "feature", + "category": "Amazon HealthLake", + "contributor": "", + "description": "This release introduces Amazon HealthLake (preview), a HIPAA-eligible service that enables healthcare and life sciences customers to store, transform, query, and analyze health data in the AWS Cloud." + }, + { + "type": "feature", + "category": "Amazon Sagemaker Edge Manager", + "contributor": "", + "description": "Amazon SageMaker Edge Manager makes it easy to optimize, secure, monitor, and maintain machine learning (ML) models across fleets of edge devices such as smart cameras, smart speakers, and robots." + }, + { + "type": "feature", + "category": "Amazon EMR Containers", + "contributor": "", + "description": "This release adds support for Amazon EMR on EKS, a simple way to run Spark on Kubernetes." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "The SDK will now retry on `TransactionInProgressException` error code." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "contributor": "", + "description": "1. Amazon Kendra connector for Google Drive repositories 2. Amazon Kendra's relevance ranking models are regularly tuned for each customer by capturing end-user search patterns and feedback." + }, + { + "type": "feature", + "category": "AWS Audit Manager", + "contributor": "", + "description": "AWS Audit Manager helps you continuously audit your AWS usage to simplify how you manage risk and compliance. This update releases the first version of the AWS Audit Manager APIs and SDK." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "contributor": "", + "description": "This release adds support for configuring cross-region and cross-account replication of your Amazon ECR images." + }, + { + "type": "feature", + "category": "Amazon Forecast Service", + "contributor": "", + "description": "This release adds support for the Amazon Forecast Weather Index which can increase forecasting accuracy by automatically including weather forecasts in demand forecasts." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This feature helps you monitor model performance characteristics such as accuracy, identify undesired bias in your ML models, and explain model decisions better with explainability drift detection." + }, + { + "type": "feature", + "category": "Amazon SageMaker Runtime", + "contributor": "", + "description": "This feature allows customers to invoke their endpoint with an inference ID. If used and data capture for the endpoint is enabled, this ID will be captured along with request data." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.43.json b/.changes/2.15.43.json new file mode 100644 index 000000000000..e43788f35ce8 --- /dev/null +++ b/.changes/2.15.43.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.43", + "date": "2020-12-09", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "contributor": "", + "description": "Add support for availability zone relocation feature." + }, + { + "type": "feature", + "category": "AWS Global Accelerator", + "contributor": "", + "description": "This release adds support for custom routing accelerators" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for G4ad instances powered by AMD Radeon Pro V520 GPUs and AMD 2nd Generation EPYC processors" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.44.json b/.changes/2.15.44.json new file mode 100644 index 000000000000..896fe0cae244 --- /dev/null +++ b/.changes/2.15.44.json @@ -0,0 +1,24 @@ +{ + "version": "2.15.44", + "date": "2020-12-10", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "TGW connect simplifies connectivity of SD-WAN appliances; IGMP support for TGW multicast; VPC Reachability Analyzer for VPC resources connectivity analysis." + }, + { + "type": "feature", + "category": "AWS Network Manager", + "contributor": "", + "description": "This release adds API support for Transit Gateway Connect integration into AWS Network Manager." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "contributor": "", + "description": "Amazon Kendra now supports adding synonyms to an index through the new Thesaurus resource." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.45.json b/.changes/2.15.45.json new file mode 100644 index 000000000000..60c7dcb8b05a --- /dev/null +++ b/.changes/2.15.45.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.45", + "date": "2020-12-11", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "Added the ListAssetRelationships operation and support for composite asset models, which represent structured sets of properties within asset models." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "CloudTrailInvalidClientTokenIdException is now thrown when a call results in the InvalidClientTokenId error code. The Name parameter of the AdvancedEventSelector data type is now optional." + }, + { + "type": "feature", + "category": "AWS Performance Insights", + "contributor": "", + "description": "You can group DB load according to the dimension groups for database, application, and session type. Amazon RDS also supports the dimensions db.name, db.application.name, and db.session_type.name." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "Documentation updates for monitoring" + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "contributor": "", + "description": "Documentation updates for GuardDuty" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.46.json b/.changes/2.15.46.json new file mode 100644 index 000000000000..afb7ef16565a --- /dev/null +++ b/.changes/2.15.46.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.46", + "date": "2020-12-14", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Treat zero-byte responses from async HTTP clients as not having a payload, regardless of the response content-length. This fixes an issue that could cause HEAD responses (e.g. s3's headObject responses) with a content-length specified from being treated as having a payload. This fixes issues like [#1216](https://github.com/aws/aws-sdk-java-v2/issues/1216) where the SDK attempts to read data from the response based on the content-length, not based on whether there was actually a payload." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Add c5n.metal to ec2 instance types list" + }, + { + "type": "feature", + "category": "Amazon DevOps Guru", + "contributor": "", + "description": "Documentation updates for DevOps Guru." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Global Accelerator", + "contributor": "", + "description": "This release adds support for custom routing accelerators" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.47.json b/.changes/2.15.47.json new file mode 100644 index 000000000000..813154a8339e --- /dev/null +++ b/.changes/2.15.47.json @@ -0,0 +1,60 @@ +{ + "version": "2.15.47", + "date": "2020-12-15", + "entries": [ + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "AWS IoT Rules Engine adds Kafka Action that allows sending data to Apache Kafka clusters inside a VPC. AWS IoT Device Defender adds custom metrics and machine-learning based anomaly detection." + }, + { + "type": "feature", + "category": "AWS IoT Greengrass V2", + "contributor": "", + "description": "AWS IoT Greengrass V2 is a new major version of AWS IoT Greengrass. This release adds several updates such as modular components, continuous deployments, and improved ease of use." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "Adding support for Change Manager API content" + }, + { + "type": "feature", + "category": "AWS IoT Wireless", + "contributor": "", + "description": "AWS IoT for LoRaWAN enables customers to setup a private LoRaWAN network by connecting their LoRaWAN devices and gateways to the AWS cloud without managing a LoRaWAN Network Server." + }, + { + "type": "feature", + "category": "AWS IoT Core Device Advisor", + "contributor": "", + "description": "AWS IoT Core Device Advisor is fully managed test capability for IoT devices. Device manufacturers can use Device Advisor to test their IoT devices for reliable and secure connectivity with AWS IoT." + }, + { + "type": "feature", + "category": "Amazon Prometheus Service", + "contributor": "", + "description": "(New Service) Amazon Managed Service for Prometheus is a fully managed Prometheus-compatible monitoring service that makes it easy to monitor containerized applications securely and at scale." + }, + { + "type": "feature", + "category": "AWS IoT Fleet Hub", + "contributor": "", + "description": "AWS IoT Fleet Hub, a new feature of AWS IoT Device Management that provides a web application for monitoring and managing device fleets connected to AWS IoT at scale." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Added support for Apache Kafka as a event source. Added support for TumblingWindowInSeconds for streams event source mappings. Added support for FunctionResponseTypes for streams event source mappings" + }, + { + "type": "feature", + "category": "AWS IoT Analytics", + "contributor": "", + "description": "FileFormatConfiguration enables data store to save data in JSON or Parquet format. S3Paths enables you to specify the S3 objects that save your channel messages when you reprocess the pipeline." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.48.json b/.changes/2.15.48.json new file mode 100644 index 000000000000..3da182e74a76 --- /dev/null +++ b/.changes/2.15.48.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.48", + "date": "2020-12-16", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Well-Architected Tool", + "contributor": "", + "description": "This is the first release of AWS Well-Architected Tool API support, use to review your workload and compare against the latest AWS architectural best practices." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "This release updates the \"MonitorArnList\" from a list of String to be a list of Arn for both CreateAnomalySubscription and UpdateAnomalySubscription APIs" + }, + { + "type": "feature", + "category": "Amazon Location Service", + "contributor": "", + "description": "Initial release of Amazon Location Service. A new geospatial service providing capabilities to render maps, geocode/reverse geocode, track device locations, and detect geofence entry/exit events." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "QuickSight now supports connecting to federated data sources of Athena" + }, + { + "type": "feature", + "category": "Amazon Prometheus Service", + "contributor": "", + "description": "Documentation updates for Amazon Managed Service for Prometheus" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.49.json b/.changes/2.15.49.json new file mode 100644 index 000000000000..aff73e283bc3 --- /dev/null +++ b/.changes/2.15.49.json @@ -0,0 +1,72 @@ +{ + "version": "2.15.49", + "date": "2020-12-17", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Route 53 Resolver", + "contributor": "", + "description": "Route 53 Resolver adds support for enabling resolver DNSSEC validation in virtual private cloud (VPC)." + }, + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "Adding PutExternalEvaluation API which grants permission to deliver evaluation result to AWS Config" + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "This release adds support for building and distributing container images within EC2 Image Builder." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "This release adds support for DNSSEC signing in Amazon Route 53." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "This change fixes a bug in the code generation related to eventstreams that prevents multiple events to share the same shape." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Support TagOptions sharing with Service Catalog portfolio sharing." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "C6gn instances are powered by AWS Graviton2 processors and offer 100 Gbps networking bandwidth. These instances deliver up to 40% better price-performance benefit versus comparable x86-based instances" + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "contributor": "", + "description": "Provide Cross-account copy event based policy support in DataLifecycleManager (DLM)" + }, + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "contributor": "", + "description": "Amazon SQS adds queue attributes to enable high throughput FIFO." + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "Added CreationDate and LastUpdatedDate timestamps to ListAliases API response" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.5.json b/.changes/2.15.5.json new file mode 100644 index 000000000000..e84041396e66 --- /dev/null +++ b/.changes/2.15.5.json @@ -0,0 +1,26 @@ +{ + "version": "2.15.5", + "date": "2020-10-07", + "entries": [ + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "This release introduces User and UserGroup to allow customers to have access control list of the Redis resources for AWS ElastiCache. This release also adds support for Outposts for AWS ElastiCache." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage", + "description": "AWS Elemental MediaPackage provides access logs that capture detailed information about requests sent to a customer's MediaPackage channel." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Enables Rightsizing Recommendations to analyze and present EC2 instance-level EBS metrics when generating recommendations. Returns AccessDeniedException if the account is not opted into Rightsizing" + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "description": "This release enables AWS Compute Optimizer to analyze EC2 instance-level EBS read and write operations, and throughput when generating recommendations for your EC2 instances and Auto Scaling groups." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.50.json b/.changes/2.15.50.json new file mode 100644 index 000000000000..daeafb502af2 --- /dev/null +++ b/.changes/2.15.50.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.50", + "date": "2020-12-18", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Upgrading jackson.databind.version to 2.10.5.1" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adds IAM DB authentication information to the PendingModifiedValues output of the DescribeDBInstances API. Adds ClusterPendingModifiedValues information to the output of the DescribeDBClusters API." + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "contributor": "", + "description": "Updated FilterValues regex pattern to align with Filter Expression." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "EBS io2 volumes now supports Multi-Attach" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.51.json b/.changes/2.15.51.json new file mode 100644 index 000000000000..5260d96b6d9d --- /dev/null +++ b/.changes/2.15.51.json @@ -0,0 +1,96 @@ +{ + "version": "2.15.51", + "date": "2020-12-21", + "entries": [ + { + "type": "feature", + "category": "Amazon Connect Participant Service", + "contributor": "", + "description": "This release adds three new APIs: StartAttachmentUpload, CompleteAttachmentUpload, and GetAttachment. For Amazon Connect Chat, you can use these APIs to share files in chat conversations." + }, + { + "type": "feature", + "category": "Service Quotas", + "contributor": "", + "description": "Added the ability to tag applied quotas." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "AWS DMS launches support for AWS Secrets Manager to manage source and target database credentials." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Add 4 connection properties: SECRET_ID, CONNECTOR_URL, CONNECTOR_TYPE, CONNECTOR_CLASS_NAME. Add two connection types: MARKETPLACE, CUSTOM" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "Documentation updates for Amazon API Gateway." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds Tag On Create feature support for the AllocateAddress API." + }, + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "Documentation updates for batch" + }, + { + "type": "feature", + "category": "AWS Outposts", + "contributor": "", + "description": "In this release, AWS Outposts adds support for three new APIs: TagResource, UntagResource, and ListTagsForResource. Customers can now manage tags for their resources through the SDK." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Format GetObject's Expires header to be an http-date instead of iso8601" + }, + { + "type": "feature", + "category": "Amazon QLDB Session", + "contributor": "", + "description": "Adds \"TimingInformation\" to all SendCommand API results and \"IOUsage\" to ExecuteStatementResult, FetchPageResult and CommitTransactionResult." + }, + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "AWS Config adds support to save advanced queries. New API operations - GetStoredQuery, PutStoredQuery, ListStoredQueries, DeleteStoredQuery" + }, + { + "type": "feature", + "category": "AWS Service Catalog App Registry", + "contributor": "", + "description": "New API `SyncResouce` to update AppRegistry system tags." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Finding providers can now use BatchImportFindings to update Confidence, Criticality, RelatedFindings, Severity, and Types." + }, + { + "type": "feature", + "category": "Amazon Managed Blockchain", + "contributor": "", + "description": "Added support for provisioning and managing public Ethereum nodes on main and test networks supporting secure access using Sigv4 and standard open-source Ethereum APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.52.json b/.changes/2.15.52.json new file mode 100644 index 000000000000..5585ced508f0 --- /dev/null +++ b/.changes/2.15.52.json @@ -0,0 +1,54 @@ +{ + "version": "2.15.52", + "date": "2020-12-22", + "entries": [ + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Documentation updates for elasticache" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds support for quick connects. For details, see the Release Notes in the Amazon Connect Administrator Guide." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "AWS Glue Find Matches machine learning transforms now support column importance scores." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "SSM Maintenance Window support for registering/updating maintenance window tasks without targets." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "This release adds additional metadata that may be applicable to the Rightsizing Recommendations." + }, + { + "type": "feature", + "category": "AWS IoT Wireless", + "contributor": "", + "description": "Adding the ability to use Fingerprint in GetPartnerAccount and ListPartnerAccounts API responses to protect sensitive customer account information." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adds customer-owned IP address (CoIP) support to Amazon RDS on AWS Outposts." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.53.json b/.changes/2.15.53.json new file mode 100644 index 000000000000..4df17c2d5f45 --- /dev/null +++ b/.changes/2.15.53.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.53", + "date": "2020-12-23", + "entries": [ + { + "type": "feature", + "category": "AWS Resource Groups", + "contributor": "", + "description": "Add operation `PutGroupConfiguration`. Support dedicated hosts and add `Pending` in operations `Un/GroupResources`. Add `Resources` in `ListGroupResources` and deprecate `ResourceIdentifiers`." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for lambda functions." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "AWS DMS launches support for AWS Secrets Manager to manage Oracle ASM Database credentials" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.54.json b/.changes/2.15.54.json new file mode 100644 index 000000000000..367a2e41ddef --- /dev/null +++ b/.changes/2.15.54.json @@ -0,0 +1,12 @@ +{ + "version": "2.15.54", + "date": "2020-12-28", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudFront", + "contributor": "", + "description": "Amazon CloudFront has deprecated the CreateStreamingDistribution and CreateStreamingDistributionWithTags APIs as part of discontinuing support for Real-Time Messaging Protocol (RTMP) distributions." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.55.json b/.changes/2.15.55.json new file mode 100644 index 000000000000..96bf2f3cd08b --- /dev/null +++ b/.changes/2.15.55.json @@ -0,0 +1,18 @@ +{ + "version": "2.15.55", + "date": "2020-12-29", + "entries": [ + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "contributor": "", + "description": "Amazon API Gateway now supports data mapping for HTTP APIs which allows customers to modify HTTP Request before sending it to their integration and HTTP Response before sending it to the invoker." + }, + { + "type": "feature", + "category": "AWS Certificate Manager Private Certificate Authority", + "contributor": "", + "description": "This release adds a new parameter \"CsrExtensions\" in the \"CertificateAuthorityConfiguration\" data structure, which allows customers to add the addition of KU and SIA into the CA CSR." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.56.json b/.changes/2.15.56.json new file mode 100644 index 000000000000..37f78cace7ef --- /dev/null +++ b/.changes/2.15.56.json @@ -0,0 +1,24 @@ +{ + "version": "2.15.56", + "date": "2020-12-30", + "entries": [ + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Documentation updates for elasticache" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.57.json b/.changes/2.15.57.json new file mode 100644 index 000000000000..25325ce78e90 --- /dev/null +++ b/.changes/2.15.57.json @@ -0,0 +1,12 @@ +{ + "version": "2.15.57", + "date": "2020-12-31", + "entries": [ + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Enhanced Service Catalog DescribeProvisioningParameters API to return new parameter constraints, i.e., MinLength, MaxLength, MinValue, MaxValue, ConstraintDescription and AllowedPattern" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.58.json b/.changes/2.15.58.json new file mode 100644 index 000000000000..8fa86414a3a6 --- /dev/null +++ b/.changes/2.15.58.json @@ -0,0 +1,24 @@ +{ + "version": "2.15.58", + "date": "2021-01-04", + "entries": [ + { + "type": "feature", + "category": "Amazon HealthLake", + "contributor": "", + "description": "Amazon HealthLake now supports exporting data from FHIR Data Stores in Preview." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix for [#1684](https://github.com/aws/aws-sdk-java-v2/issues/1684) Some of the Retry attempts which failed due to the API TimeOuts did not successfully retried but ended up with AbortedException." + }, + { + "type": "feature", + "category": "Amazon CloudSearch", + "contributor": "", + "description": "This release adds support for new Amazon CloudSearch instances." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.59.json b/.changes/2.15.59.json new file mode 100644 index 000000000000..88e09a910bd0 --- /dev/null +++ b/.changes/2.15.59.json @@ -0,0 +1,24 @@ +{ + "version": "2.15.59", + "date": "2021-01-05", + "entries": [ + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "- ### Features - Add new GetCostcategories API - Support filter for GetDimensions, GetTags and GetCostcategories api - Support sortBy metrics for GetDimensions, GetTags and GetCostcategories api" + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "contributor": "", + "description": "Documentation updates for Application Auto Scaling" + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "`S3Utilities#getUrl` now supports versionId. See [#2224](https://github.com/aws/aws-sdk-java-v2/issues/2224)" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.6.json b/.changes/2.15.6.json new file mode 100644 index 000000000000..c587cbfd9be5 --- /dev/null +++ b/.changes/2.15.6.json @@ -0,0 +1,46 @@ +{ + "version": "2.15.6", + "date": "2020-10-08", + "entries": [ + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This release provides location information for the manifest validation files." + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "description": "Documentation updates for SNS." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "This release enables Sagemaker customers to convert Tensorflow and PyTorch models to CoreML (ML Model) format." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "description": "Amazon EventBridge adds support for target Dead Letter Queues (DLQs) and custom retry policies." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "description": "Amazon EventBridge (formerly called CloudWatch Events) adds support for target Dead-letter Queues and custom retry policies." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Supports a new parameter to set the max allocated storage in gigabytes for restore database instance from S3 and restore database instance to a point in time APIs." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "You can now create hierarchical cost categories by choosing \"Cost Category\" as a dimension. You can also track the status of your cost category updates to your cost and usage information." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "AWS EC2 RevokeSecurityGroupIngress and RevokeSecurityGroupEgress APIs will return IpPermissions which do not match with any existing IpPermissions for security groups in default VPC and EC2-Classic." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.60.json b/.changes/2.15.60.json new file mode 100644 index 000000000000..d57fec95309a --- /dev/null +++ b/.changes/2.15.60.json @@ -0,0 +1,24 @@ +{ + "version": "2.15.60", + "date": "2021-01-06", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "This update increases the number of instance types that can be added to the overrides within an mixed instances group configuration." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "This release adds support for Amazon EFS, so customers can transfer files over SFTP, FTPS and FTP in and out of Amazon S3 as well as Amazon EFS." + }, + { + "type": "feature", + "category": "AWS Auto Scaling Plans", + "contributor": "", + "description": "Documentation updates for AWS Auto Scaling" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.61.json b/.changes/2.15.61.json new file mode 100644 index 000000000000..810dadcebdde --- /dev/null +++ b/.changes/2.15.61.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.61", + "date": "2021-01-07", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "AWS Elemental MediaConvert SDK has added support for I-Frame-only HLS manifest generation in CMAF outputs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS CodePipeline", + "contributor": "", + "description": "Adding cancelled status and summary for executions aborted by pipeline updates." + }, + { + "type": "feature", + "category": "Amazon DevOps Guru", + "contributor": "", + "description": "Add resourceHours field in GetAccountHealth API to show total number of resource hours AWS Dev Ops Guru has done work for in the last hour." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.7.json b/.changes/2.15.7.json new file mode 100644 index 000000000000..5b1e43d7dac9 --- /dev/null +++ b/.changes/2.15.7.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.7", + "date": "2020-10-09", + "entries": [ + { + "type": "feature", + "category": "AWS Amplify", + "description": "Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval - enabling can make code changes can take up to 10 minutes to roll out." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "WAV audio output. Extracting ancillary captions in MP4 file inputs. Priority on channels feeding a multiplex (higher priority channels will tend to have higher video quality)." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "This release introduces a new Amazon EKS error code: \"ClusterUnreachable\"" + }, + { + "type": "feature", + "category": "Amazon Import/Export Snowball", + "description": "We added new APIs to allow customers to better manage their device shipping. You can check if your shipping label expired, generate a new label, and tell us that you received or shipped your job." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "This new API takes either a ProvisonedProductId or a ProvisionedProductName, along with a list of 1 or more output keys and responds with the (key,value) pairs of those outputs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.8.json b/.changes/2.15.8.json new file mode 100644 index 000000000000..61104ed6bd0a --- /dev/null +++ b/.changes/2.15.8.json @@ -0,0 +1,96 @@ +{ + "version": "2.15.8", + "date": "2020-10-15", + "entries": [ + { + "type": "bugfix", + "category": "AWS Lambda Maven Archetype", + "description": "Fixed an issue where archetype generation failed with latest maven-archetype-plugin. See [#1981](https://github.com/aws/aws-sdk-java-v2/issues/1981)" + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This SDK Release introduces new API (DetectProtectiveEquipment) for Amazon Rekognition. This release also adds ServiceQuotaExceeded exception to Amazon Rekognition IndexFaces API." + }, + { + "type": "feature", + "category": "AWS Ground Station", + "description": "Adds error message attribute to DescribeContact DataflowDetails" + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "When creating Endpoints, Replication Instances, and Replication Tasks, the feature provides you the option to specify friendly name to the resources." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Documentation updates for WorkSpaces" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This Patch Manager release now supports searching for available packages from Amazon Linux and Amazon Linux 2 via the DescribeAvailablePatches API." + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "Add CreateOrganization and DeleteOrganization API operations." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Add support for plus (+) character in profile names" + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "description": "Add support to associate VPC Security Groups at server creation." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "API Documentation updates for Glue Get-Plan API" + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "This release improves email validation for subscriptions on the SDK endpoints." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "Add new variable, lastStatusChangeDate, to DescribeDomainConfiguration API" + }, + { + "type": "feature", + "category": "AWS X-Ray", + "description": "Enhancing CreateGroup, UpdateGroup, GetGroup and GetGroups APIs to support configuring X-Ray Insights Notifications. Adding TraceLimit information into X-Ray BatchGetTraces API response." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Return tags for all resources in the output of DescribeDBInstances, DescribeDBSnapshots, DescribeDBClusters, and DescribeDBClusterSnapshots API operations." + }, + { + "type": "feature", + "category": "AWS Budgets", + "description": "This release introduces AWS Budgets Actions, allowing you to define an explicit response(or set of responses) to take when your budget exceeds it's action threshold." + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "This release adds support for the ApplyArchiveRule api in IAM Access Analyzer. The ApplyArchiveRule api allows users to apply an archive rule retroactively to existing findings in an analyzer." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API adds support for pausing and resuming classification jobs. Also, sensitive data findings now include location data for up to 15 occurrences of sensitive data." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.9.json b/.changes/2.15.9.json new file mode 100644 index 000000000000..3bc1c441b5f2 --- /dev/null +++ b/.changes/2.15.9.json @@ -0,0 +1,21 @@ +{ + "version": "2.15.9", + "date": "2020-10-16", + "entries": [ + { + "type": "feature", + "category": "AWS Organizations", + "description": "Documentation updates for AWS Organizations." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "The AWS Elemental MediaLive APIs and SDKs now support the ability to transfer the ownership of MediaLive Link devices across AWS accounts." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.16.json b/.changes/2.7.16.json new file mode 100644 index 000000000000..cf6b25ad9dd7 --- /dev/null +++ b/.changes/2.7.16.json @@ -0,0 +1,16 @@ +{ + "version": "2.7.16", + "date": "2019-08-02", + "entries": [ + { + "type": "feature", + "category": "AWS Security Token Service", + "description": "Documentation updates for sts" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix the issue where the `content-length` set on the request is not honored for streaming operations." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.17.json b/.changes/2.7.17.json new file mode 100644 index 000000000000..8f4a816fbb42 --- /dev/null +++ b/.changes/2.7.17.json @@ -0,0 +1,21 @@ +{ + "version": "2.7.17", + "date": "2019-08-05", + "entries": [ + { + "type": "feature", + "category": "AWS DataSync", + "description": "Support VPC endpoints." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 now supports a new Spot allocation strategy \"Capacity-optimized\" that fulfills your request using Spot Instance pools that are optimally chosen based on the available Spot capacity." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "In this release, AWS IoT Device Defender introduces audit mitigation actions that can be applied to audit findings to help mitigate security issues." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.18.json b/.changes/2.7.18.json new file mode 100644 index 000000000000..43ddb32b91e9 --- /dev/null +++ b/.changes/2.7.18.json @@ -0,0 +1,11 @@ +{ + "version": "2.7.18", + "date": "2019-08-06", + "entries": [ + { + "type": "feature", + "category": "AWS Batch", + "description": "Documentation updates for AWS Batch" + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.19.json b/.changes/2.7.19.json new file mode 100644 index 000000000000..ba1a57141f42 --- /dev/null +++ b/.changes/2.7.19.json @@ -0,0 +1,11 @@ +{ + "version": "2.7.19", + "date": "2019-08-07", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "description": "CloudWatch Application Insights for .NET and SQL Server now provides integration with AWS Systems Manager OpsCenter. This integration allows you to view and resolve problems and operational issues detected for selected applications." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.20.json b/.changes/2.7.20.json new file mode 100644 index 000000000000..744b0a97a969 --- /dev/null +++ b/.changes/2.7.20.json @@ -0,0 +1,26 @@ +{ + "version": "2.7.20", + "date": "2019-08-08", + "entries": [ + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "CodeBuild adds CloudFormation support for SourceCredential" + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "This release adds support for Chef Automate 2 specific engine attributes." + }, + { + "type": "feature", + "category": "AWS Lake Formation", + "description": "Lake Formation: (New Service) AWS Lake Formation is a fully managed service that makes it easier for customers to build, secure and manage data lakes. AWS Lake Formation simplifies and automates many of the complex manual steps usually required to create data lakes including collecting, cleaning and cataloging data and securely making that data available for analytics and machine learning." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "You can now use AWS Glue to find matching records across dataset even without identifiers to join on by using the new FindMatches ML Transform. Find related products, places, suppliers, customers, and more by teaching a custom machine learning transformation that you can use to identify matching matching records as part of your analysis, data cleaning, or master data management project by adding the FindMatches transformation to your Glue ETL Jobs. If your problem is more along the lines of deduplication, you can use the FindMatches in much the same way to identify customers who have signed up more than ones, products that have accidentally been added to your product catalog more than once, and so forth. Using the FindMatches MLTransform, you can teach a Transform your definition of a duplicate through examples, and it will use machine learning to identify other potential duplicates in your dataset. As with data integration, you can then use your new Transform in your deduplication projects by adding the FindMatches transformation to your Glue ETL Jobs. This release also contains additional APIs that support AWS Lake Formation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.21.json b/.changes/2.7.21.json new file mode 100644 index 000000000000..99439e955e88 --- /dev/null +++ b/.changes/2.7.21.json @@ -0,0 +1,41 @@ +{ + "version": "2.7.21", + "date": "2019-08-09", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed the issue where ByteArrayAsyncRequestBody can send duplicate requests when another request comes in at the same time the subscription completes." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert has added support for multi-DRM SPEKE with CMAF outputs, MP3 ingest, and options for improved video quality." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "New \"evidence\" field in the finding model to provide evidence information explaining why the finding has been triggered. Currently only threat-intelligence findings have this field. Some documentation updates." + }, + { + "type": "feature", + "category": "Amazon Lex Runtime Service", + "description": "Manage Amazon Lex session state using APIs on the client" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Add expectedNextSnapshotScheduleTime and expectedNextSnapshotScheduleTimeStatus to redshift cluster object." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "This release adds Quality of Service (QoS) support for AWS IoT rules engine republish action." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "For APIs that support input event streams, set the `Content-Type` to `application/vnd.amazon.eventstream` on the request." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.22.json b/.changes/2.7.22.json new file mode 100644 index 000000000000..579fa7457334 --- /dev/null +++ b/.changes/2.7.22.json @@ -0,0 +1,26 @@ +{ + "version": "2.7.22", + "date": "2019-08-12", + "entries": [ + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "Adding new Emotion, Fear" + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "description": "Documentation updates for monitoring" + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "Documentation updates for Application Auto Scaling" + }, + { + "type": "feature", + "category": "Auto Scaling", + "description": "Amazon EC2 Auto Scaling now supports a new Spot allocation strategy \"capacity-optimized\" that fulfills your request using Spot Instance pools that are optimally chosen based on the available Spot capacity." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.23.json b/.changes/2.7.23.json new file mode 100644 index 000000000000..a162d3b436f2 --- /dev/null +++ b/.changes/2.7.23.json @@ -0,0 +1,11 @@ +{ + "version": "2.7.23", + "date": "2019-08-13", + "entries": [ + { + "type": "feature", + "category": "AWS AppSync", + "description": "Adds a configuration option for AppSync GraphQL APIs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.24.json b/.changes/2.7.24.json new file mode 100644 index 000000000000..95c26c40a2a4 --- /dev/null +++ b/.changes/2.7.24.json @@ -0,0 +1,11 @@ +{ + "version": "2.7.24", + "date": "2019-08-14", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds a new API called SendDiagnosticInterrupt, which allows you to send diagnostic interrupts to your EC2 instance." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.25.json b/.changes/2.7.25.json new file mode 100644 index 000000000000..ee228d641945 --- /dev/null +++ b/.changes/2.7.25.json @@ -0,0 +1,36 @@ +{ + "version": "2.7.25", + "date": "2019-08-15", + "entries": [ + { + "type": "feature", + "category": "AWS CodeCommit", + "description": "This release adds an API, BatchGetCommits, that allows retrieval of metadata for multiple commits in an AWS CodeCommit repository." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds an option to use private certificates from AWS Certificate Manager (ACM) to authenticate a Site-to-Site VPN connection's tunnel endpoints and customer gateway device." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "This release adds support for http header based routing and route prioritization." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "GetJobBookmarks API is withdrawn." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "CreateSnapshotFromVolumeRecoveryPoint API supports new parameter: Tags (to be attached to the created resource)" + }, + { + "type": "feature", + "category": "Amazon Athena", + "description": "This release adds support for querying S3 Requester Pays buckets. Users can enable this feature through their Workgroup settings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.26.json b/.changes/2.7.26.json new file mode 100644 index 000000000000..36c2ce021937 --- /dev/null +++ b/.changes/2.7.26.json @@ -0,0 +1,21 @@ +{ + "version": "2.7.26", + "date": "2019-08-16", + "entries": [ + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "Two feature release: 1. AWS RoboMaker introduces log-based simulation. Log-based simulation allows you to play back pre-recorded log data such as sensor streams for testing robotic functions like localization, mapping, and object detection. Use the AWS RoboMaker SDK to test your robotic applications. 2. AWS RoboMaker allow customer to setup a robot deployment timeout when CreateDeploymentJob." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR has introduced an account level configuration called Block Public Access that allows you to block clusters with ports open to traffic from public IP sources (i.e. 0.0.0.0/0 for IPv4 and ::/0 for IPv6) from launching. Individual ports or port ranges can be added as exceptions to allow public access." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release of Amazon Elastic Container Service (Amazon ECS) introduces support for controlling the usage of swap space on a per-container basis for Linux containers." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.27.json b/.changes/2.7.27.json new file mode 100644 index 000000000000..fac59ea54122 --- /dev/null +++ b/.changes/2.7.27.json @@ -0,0 +1,16 @@ +{ + "version": "2.7.27", + "date": "2019-08-19", + "entries": [ + { + "type": "feature", + "category": "AWS Cost and Usage Report Service", + "description": "New IAM permission required for editing AWS Cost and Usage Reports - Starting today, you can allow or deny IAM users permission to edit Cost & Usage Reports through the API and the Billing and Cost Management console. To allow users to edit Cost & Usage Reports, ensure that they have 'cur: ModifyReportDefinition' permission. Refer to the technical documentation (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_cur_ModifyReportDefinition.html) for additional details." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "Fix for HttpMethod enum" + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.28.json b/.changes/2.7.28.json new file mode 100644 index 000000000000..97f1db0b1e36 --- /dev/null +++ b/.changes/2.7.28.json @@ -0,0 +1,26 @@ +{ + "version": "2.7.28", + "date": "2019-08-20", + "entries": [ + { + "type": "feature", + "category": "Amazon AppStream", + "description": "Includes API updates to support streaming through VPC endpoints for image builders and stacks." + }, + { + "type": "feature", + "category": "AWS Transfer for SFTP", + "description": "New field in response of TestIdentityProvider" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Amazon SageMaker introduces Managed Spot Training. Increases the maximum number of metric definitions to 40 for SageMaker Training and Hyperparameter Tuning Jobs. SageMaker Neo adds support for Acer aiSage and Qualcomm QCS605 and QCS603." + }, + { + "type": "feature", + "category": "Alexa For Business", + "description": "Adding support for optional locale input in CreateProfile and UpdateProfile APIs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.29.json b/.changes/2.7.29.json new file mode 100644 index 000000000000..3ca89a7c2401 --- /dev/null +++ b/.changes/2.7.29.json @@ -0,0 +1,61 @@ +{ + "version": "2.7.29", + "date": "2019-08-21", + "entries": [ + { + "type": "feature", + "category": "Apache HTTP Client", + "description": "Enable TLS client authentication support for the Apache HTTP Client by allowing customers to specify a `TlsKeyManagersProvider` on the builder. The `KeyManger`s provided will be used when the remote server wants to authenticate the client." + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "description": "Increased limits on number of items recommended and reranked: The maximum number of results returned from getRecommendations API has been increased to 200. The maximum number of items which can be reranked via getPersonalizedRanking API has been increased to 200." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Amazon SageMaker now supports Amazon EFS and Amazon FSx for Lustre file systems as data sources for training machine learning models. Amazon SageMaker now supports running training jobs on ml.p3dn.24xlarge instance type. This instance type is offered as a limited private preview for certain SageMaker customers. If you are interested in joining the private preview, please reach out to the SageMaker Product Management team via AWS Support.\"" + }, + { + "type": "feature", + "category": "Amazon Forecast Query Service", + "description": "Amazon Forecast is a fully managed machine learning service that makes it easy for customers to generate accurate forecasts using their historical time-series data" + }, + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "description": "This release provides a way to add metadata tags to a queue when it is created. You can use tags to organize and identify your Amazon SQS queues for cost allocation." + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "description": "Add ability to to use HTTP proxies with the Netty async client." + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "Documentation updates for Amazon Rekognition." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "Public preview version of 'dynamodb-enhanced' that has a new DynamoDb mapper library that can be used with the v2 SDK. See README.md in the module for more detailed information about this module." + }, + { + "type": "feature", + "category": "Amazon Forecast Service", + "description": "Amazon Forecast is a fully managed machine learning service that makes it easy for customers to generate accurate forecasts using their historical time-series data" + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "ElastiCache extends support for Scale down for Redis Cluster-mode enabled and disabled replication groups" + }, + { + "type": "feature", + "category": "HTTP Client SPI", + "description": "Add `TlsKeyManagersProvider` interface for supporting TLS client auth in HTTP client implementations." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.30.json b/.changes/2.7.30.json new file mode 100644 index 000000000000..88854e1baef3 --- /dev/null +++ b/.changes/2.7.30.json @@ -0,0 +1,16 @@ +{ + "version": "2.7.30", + "date": "2019-08-22", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release allows users to enable RDS Data API while creating Aurora Serverless databases." + }, + { + "type": "feature", + "category": "AWS DataSync", + "description": "This release adds support for SMB location type." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.31.json b/.changes/2.7.31.json new file mode 100644 index 000000000000..797aeb1d6425 --- /dev/null +++ b/.changes/2.7.31.json @@ -0,0 +1,21 @@ +{ + "version": "2.7.31", + "date": "2019-08-23", + "entries": [ + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Amazon Transcribe - support transcriptions from audio sources in Russian (ru-RU) and Chinese (zh-CN)." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release of EC2 VM Import Export adds support for exporting Amazon Machine Image(AMI)s to a VM file" + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage VOD", + "description": "Adds optional Constant Initialization Vector (IV) to HLS Encryption for MediaPackage VOD." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.32.json b/.changes/2.7.32.json new file mode 100644 index 000000000000..013a3947582a --- /dev/null +++ b/.changes/2.7.32.json @@ -0,0 +1,16 @@ +{ + "version": "2.7.32", + "date": "2019-08-26", + "entries": [ + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "This release resolves an issue with the DescribeHub action, changes the MasterId and InvitationId parameters for AcceptInvitation to Required, and changes the AccountIds parameter for DeleteInvitations and DeclineInvitations to Required." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This feature adds \"default tier\" to the AWS Systems Manager Parameter Store for parameter creation and update. AWS customers can now set the \"default tier\" to one of the following values: Standard (default), Advanced or Intelligent-Tiering. This allows customers to create advanced parameters or parameters in corresponding tiers with one setting rather than code change to specify parameter tiers." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.33.json b/.changes/2.7.33.json new file mode 100644 index 000000000000..f77169cdc178 --- /dev/null +++ b/.changes/2.7.33.json @@ -0,0 +1,11 @@ +{ + "version": "2.7.33", + "date": "2019-08-27", + "entries": [ + { + "type": "feature", + "category": "AWS Organizations", + "description": "Documentation updates for organizations" + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.34.json b/.changes/2.7.34.json new file mode 100644 index 000000000000..152cbf7d5bc6 --- /dev/null +++ b/.changes/2.7.34.json @@ -0,0 +1,21 @@ +{ + "version": "2.7.34", + "date": "2019-08-28", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "description": "Added support for message system attributes, which currently lets you send AWS X-Ray trace IDs through Amazon SQS." + }, + { + "type": "feature", + "category": "AWS Global Accelerator", + "description": "API Update for AWS Global Accelerator Client IP Preservation" + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "This release adds the ability to send a job to an on-demand queue while simulating the performance of a job sent to a reserved queue. Use this setting to estimate the number of reserved transcoding slots (RTS) you need for a reserved queue." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.35.json b/.changes/2.7.35.json new file mode 100644 index 000000000000..175c5903ec47 --- /dev/null +++ b/.changes/2.7.35.json @@ -0,0 +1,31 @@ +{ + "version": "2.7.35", + "date": "2019-08-29", + "entries": [ + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "With the current release, you can suspend and later resume any of the following scaling actions in Application Auto Scaling: scheduled scaling actions, dynamic scaling in actions, dynamic scaling out actions." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release of Amazon Elastic Container Service (Amazon ECS) introduces support for including Docker container IDs in the API response when describing and stopping tasks. This enables customers to easily map containers to the tasks they are associated with." + }, + { + "type": "feature", + "category": "AWS CodePipeline", + "description": "Introducing pipeline execution trigger details in ListPipelineExecutions API." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache for Redis now supports encryption at rest using customer managed customer master keys (CMKs) in AWS Key Management Service (KMS). Amazon ElastiCache now supports cluster names upto 40 characters for replicationGoups and upto 50 characters for cacheClusters." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Adds a \"MaximumBatchingWindowInSeconds\" parameter to event source mapping api's. Usable by Dynamodb and Kinesis event sources." + } + ] +} \ No newline at end of file diff --git a/.changes/2.7.36.json b/.changes/2.7.36.json new file mode 100644 index 000000000000..a6531612229d --- /dev/null +++ b/.changes/2.7.36.json @@ -0,0 +1,21 @@ +{ + "version": "2.7.36", + "date": "2019-08-30", + "entries": [ + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release of Amazon Elastic Container Service (Amazon ECS) introduces support for modifying the cluster settings for existing clusters, which enables you to toggle whether Container Insights is enabled or not. Support is also introduced for custom log routing using the ECS FireLens integration." + }, + { + "type": "feature", + "category": "AmazonMQ", + "description": "Adds support for updating security groups selection of an Amazon MQ broker." + }, + { + "type": "feature", + "category": "AmazonApiGatewayManagementApi", + "description": "You can use getConnection to return information about the connection (when it is connected, IP address, etc) and deleteConnection to disconnect the given connection" + } + ] +} \ No newline at end of file diff --git a/.changes/2.8.0.json b/.changes/2.8.0.json new file mode 100644 index 000000000000..ee4068120b02 --- /dev/null +++ b/.changes/2.8.0.json @@ -0,0 +1,31 @@ +{ + "version": "2.8.0", + "date": "2019-09-03", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Upgrade Netty version to 4.1.39.Final, netty reactive streams version to 2.0.3, netty open ssl version to 2.0.25.Final" + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release of Amazon Elastic Container Service (Amazon ECS) introduces support for attaching Amazon Elastic Inference accelerators to your containers. This enables you to run deep learning inference workloads with hardware acceleration in a more efficient way." + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "Documentation updates for resourcegroupstaggingapi" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Bump minor version to 2.8.0-SNAPSHOT because of [#1391](https://github.com/aws/aws-sdk-java-v2/issues/1391)." + }, + { + "type": "feature", + "category": "Amazon GameLift", + "description": "You can now make use of PKI resources to provide more secure connections between your game clients and servers. To learn more, please refer to the public Amazon GameLift documentation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.8.1.json b/.changes/2.8.1.json new file mode 100644 index 000000000000..a8a603677df8 --- /dev/null +++ b/.changes/2.8.1.json @@ -0,0 +1,21 @@ +{ + "version": "2.8.1", + "date": "2019-09-04", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "Amazon EKS DescribeCluster API returns a new OIDC issuer field that can be used to create OIDC identity provider for IAM for Service Accounts feature." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "MediaFormat is now optional for StartTranscriptionJob API." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "description": "Added support for new history events" + } + ] +} \ No newline at end of file diff --git a/.changes/2.8.2.json b/.changes/2.8.2.json new file mode 100644 index 000000000000..3205f90f54c1 --- /dev/null +++ b/.changes/2.8.2.json @@ -0,0 +1,11 @@ +{ + "version": "2.8.2", + "date": "2019-09-05", + "entries": [ + { + "type": "feature", + "category": "AWS Config", + "description": "AWS Config now includes the option for marking RemediationConfigurations as automatic, removing the need to call the StartRemediationExecution API. Manual control over resource execution rate is also included, and RemediationConfigurations are now ARN addressable. Exceptions to exclude account resources from being remediated can be configured with the new PutRemediationExceptions, DescribeRemediationExceptions, and DeleteRemediationExceptions APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.8.3.json b/.changes/2.8.3.json new file mode 100644 index 000000000000..ff9c03e79bad --- /dev/null +++ b/.changes/2.8.3.json @@ -0,0 +1,11 @@ +{ + "version": "2.8.3", + "date": "2019-09-06", + "entries": [ + { + "type": "feature", + "category": "Amazon Kinesis Analytics", + "description": "Documentation updates for kinesisanalytics" + } + ] +} \ No newline at end of file diff --git a/.changes/2.8.4.json b/.changes/2.8.4.json new file mode 100644 index 000000000000..4939aabb9c8a --- /dev/null +++ b/.changes/2.8.4.json @@ -0,0 +1,41 @@ +{ + "version": "2.8.4", + "date": "2019-09-09", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release expands Site-to-Site VPN tunnel options to allow customers to restrict security algorithms and configure timer settings for VPN connections. Customers can specify these new options while creating new VPN connections, or they can modify the tunnel options on existing connections using a new API." + }, + { + "type": "feature", + "category": "Amazon QLDB Session", + "description": "(New Service) Amazon QLDB is a fully managed ledger database that provides a transparent, immutable, and cryptographically verifiable transaction log owned by a central trusted authority. Amazon QLDB is a new class of serverless database that eliminates the need to engage in the complex development effort of building your own ledger-like applications and it automatically scales to support the demands of your application. Introduces Amazon QLDB API operations needed for interacting with data in Amazon QLDB ledgers." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "This release adds support for http retry policies." + }, + { + "type": "feature", + "category": "AWS Marketplace Commerce Analytics", + "description": "Add FDP+FPS (monthly_revenue_field_demonstration_usage + monthly_revenue_flexible_payment_schedule) to Marketplace Commerce Analytics Service" + }, + { + "type": "feature", + "category": "Amazon AppStream", + "description": "IamRoleArn support in CreateFleet, UpdateFleet, CreateImageBuilder APIs" + }, + { + "type": "feature", + "category": "Amazon QLDB", + "description": "(New Service) Amazon QLDB is a fully managed ledger database that provides a transparent, immutable, and cryptographically verifiable transaction log owned by a central trusted authority. Amazon QLDB is a new class of serverless database that eliminates the need to engage in the complex development effort of building your own ledger-like applications and it automatically scales to support the demands of your application. Introduces Amazon QLDB API operations needed for managing Amazon QLDB ledgers. This includes the ability to manage Amazon QLDB ledgers, cryptographically verify documents, and export the journal in a ledger." + }, + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "Support for Connectivity to Simulation. When you need to interact with the applications in your simulation job, you can connect to your robot application or simulation application with port forwarding. When you configure port forwarding, traffic will be forwarded from the simulation job port to the application port. Port forwarding makes it easy to connect with tools such as ROS Bridge and other tools. This can be useful when you want to debug or run custom tools to interact with your applications." + } + ] +} \ No newline at end of file diff --git a/.changes/2.8.5.json b/.changes/2.8.5.json new file mode 100644 index 000000000000..ecd956c158bb --- /dev/null +++ b/.changes/2.8.5.json @@ -0,0 +1,16 @@ +{ + "version": "2.8.5", + "date": "2019-09-10", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix marshalling for models with xml attribute. See [#1182](https://github.com/aws/aws-sdk-java-v2/issues/1182)" + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "The CloudWatchLogGroupARN parameter of the UpdateGatewayInformation API allows for configuring the gateway to use a CloudWatch log-group where Storage Gateway health events will be logged." + } + ] +} \ No newline at end of file diff --git a/.changes/2.8.6.json b/.changes/2.8.6.json new file mode 100644 index 000000000000..ed4e232b007e --- /dev/null +++ b/.changes/2.8.6.json @@ -0,0 +1,36 @@ +{ + "version": "2.8.6", + "date": "2019-09-11", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for new data fields and log format in VPC flow logs." + }, + { + "type": "feature", + "category": "AWS Config", + "description": "Adding input validation for the OrganizationConfigRuleName string." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release allows customers to specify a custom parameter group when creating a Read Replica, for DB engines which support this feature." + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "description": "This release adds support for the RIST protocol on sources and outputs." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "description": "Fixing letter case in Map history event details to be small case" + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "description": "Updated API documentation to correct broken links, and to update content based on customer feedback." + } + ] +} \ No newline at end of file diff --git a/.changes/2.8.7.json b/.changes/2.8.7.json new file mode 100644 index 000000000000..8f50dfe0c291 --- /dev/null +++ b/.changes/2.8.7.json @@ -0,0 +1,26 @@ +{ + "version": "2.8.7", + "date": "2019-09-12", + "entries": [ + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "Documentation updates for elasticloadbalancingv2: This release adds support for TLS SNI on Network Load Balancers" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Fix for FleetActivityStatus and FleetStateCode enum" + }, + { + "type": "feature", + "category": "Amazon WorkMail Message Flow", + "description": "This release allows customers to access email messages as they flow to and from Amazon WorkMail." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports High Efficiency Video Coding (HEVC) for standard-definition (SD), high-definition (HD), and ultra-high-definition (UHD) encoding with HDR support.Encoding with HEVC offers a number of advantages. While UHD video requires an advanced codec beyond H.264 (AVC), high frame rate (HFR) or High Dynamic Range (HDR) content in HD also benefit from HEVC's advancements. In addition, benefits can be achieved with HD and SD content even if HDR and HFR are not needed." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.0.json b/.changes/2.9.0.json new file mode 100644 index 000000000000..f55163582366 --- /dev/null +++ b/.changes/2.9.0.json @@ -0,0 +1,26 @@ +{ + "version": "2.9.0", + "date": "2019-09-16", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for multi-DRM SPEKE with CMAF outputs, MP3 ingest, and options for improved video quality." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Bump minor version to `2.9.0-SNAPSHOT` because of [#1413](https://github.com/aws/aws-sdk-java-v2/issues/1413)." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updating dependencies versions: jackson 2.9.8 -> 2.9.9, slf4j 1.7.35 -> 1.7.38, netty 4.1.39.Final -> 4.1.41.Final (contains the fix for the performance regression in 4.1.39)" + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "description": "This release lets customers add tags to an Amazon EKS cluster. These tags can be used to control access to the EKS API for managing the cluster using IAM. The Amazon EKS TagResource API allows customers to associate tags with their cluster. Customers can list tags for a cluster using the ListTagsForResource API and remove tags from a cluster with the UntagResource API. Note: tags are specific to the EKS cluster resource, they do not propagate to other AWS resources used by the cluster." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.1.json b/.changes/2.9.1.json new file mode 100644 index 000000000000..ae13902b847d --- /dev/null +++ b/.changes/2.9.1.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.1", + "date": "2019-09-17", + "entries": [ + { + "type": "feature", + "category": "Amazon Athena", + "description": "This release adds DataManifestLocation field indicating the location and file name of the data manifest file. Users can get a list of files that the Athena query wrote or intended to write from the manifest file." + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "Documentation updates for iam" + }, + { + "type": "feature", + "category": "Amazon Personalize", + "description": "[Personalize] Adds trainingHours to solutionVersion properties." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.10.json b/.changes/2.9.10.json new file mode 100644 index 000000000000..db222221218d --- /dev/null +++ b/.changes/2.9.10.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.10", + "date": "2019-09-30", + "entries": [ + { + "type": "feature", + "category": "AWS WAF", + "description": "Lowering the threshold for Rate Based rule from 2000 to 100." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds support for creating a Read Replica with Active Directory domain information. This release updates RDS API to indicate whether an OrderableDBInstanceOption supports Kerberos Authentication." + }, + { + "type": "feature", + "category": "AmazonMQ", + "description": "Amazon MQ now includes the ability to scale your brokers by changing the host instance type. See the hostInstanceType property of UpdateBrokerInput (https://docs.aws.amazon.com/amazon-mq/latest/api-reference/brokers-broker-id.html#brokers-broker-id-model-updatebrokerinput), and pendingHostInstanceType property of DescribeBrokerOutput (https://docs.aws.amazon.com/amazon-mq/latest/api-reference/brokers-broker-id.html#brokers-broker-id-model-describebrokeroutput)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.11.json b/.changes/2.9.11.json new file mode 100644 index 000000000000..aab62e0b204e --- /dev/null +++ b/.changes/2.9.11.json @@ -0,0 +1,11 @@ +{ + "version": "2.9.11", + "date": "2019-10-01", + "entries": [ + { + "type": "feature", + "category": "Amazon DocumentDB with MongoDB compatibility", + "description": "This release provides support for describe and modify CA certificates." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.12.json b/.changes/2.9.12.json new file mode 100644 index 000000000000..9e6c95d8ac74 --- /dev/null +++ b/.changes/2.9.12.json @@ -0,0 +1,11 @@ +{ + "version": "2.9.12", + "date": "2019-10-02", + "entries": [ + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "This release adds support for the automatic snapshots add-on for instances and block storage disks." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.13.json b/.changes/2.9.13.json new file mode 100644 index 000000000000..1695dc7efeba --- /dev/null +++ b/.changes/2.9.13.json @@ -0,0 +1,26 @@ +{ + "version": "2.9.13", + "date": "2019-10-03", + "entries": [ + { + "type": "feature", + "category": "AWS Device Farm", + "description": "Documentation updates for devicefarm" + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "Amazon Elasticsearch Service now supports configuring additional options for domain endpoint, such as whether to require HTTPS for all traffic." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "Documentation updates for Application Auto Scaling" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release allows customers to purchase regional EC2 RIs on a future date." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.14.json b/.changes/2.9.14.json new file mode 100644 index 000000000000..984867c1bb1a --- /dev/null +++ b/.changes/2.9.14.json @@ -0,0 +1,36 @@ +{ + "version": "2.9.14", + "date": "2019-10-04", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Documentation updates for Systems Manager / StartSession." + }, + { + "type": "feature", + "category": "Amazon S3 Control", + "description": "Adds support for the Amazon S3 Control service to the SDK." + }, + { + "type": "bugfix", + "category": "Amazon CloudWatch", + "description": "Add cloudwatch specific http configurations, specifically reducing `connectionMaxIdleTime`. Related to [#1380](https://github.com/aws/aws-sdk-java-v2/issues/1380)" + }, + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Add s3 specific http configurations, specifically reducing `connectionMaxIdleTime`. Related to [#1122](https://github.com/aws/aws-sdk-java-v2/issues/1122)" + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "This release adds ClientMetadata input parameter to multiple Cognito User Pools operations, making this parameter available to the customer configured lambda triggers as applicable." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage", + "description": "New Harvest Job APIs to export segment-accurate content windows from MediaPackage Origin Endpoints to S3. See https://docs.aws.amazon.com/mediapackage/latest/ug/harvest-jobs.html for more info" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.15.json b/.changes/2.9.15.json new file mode 100644 index 000000000000..3d651b8ecc00 --- /dev/null +++ b/.changes/2.9.15.json @@ -0,0 +1,36 @@ +{ + "version": "2.9.15", + "date": "2019-10-07", + "entries": [ + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "description": "Amazon Kinesis Data Firehose now allows delivering data to Elasticsearch clusters set up in a different AWS account than the Firehose AWS account. For technical documentation, look for ElasticsearchDestinationConfiguration in the Amazon Kinesis Firehose API reference." + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release of the Amazon Pinpoint API introduces support for using and managing message templates." + }, + { + "type": "feature", + "category": "AWS Direct Connect", + "description": "This release adds a service provider field for physical connection creation and provides a list of available partner providers for each Direct Connect location." + }, + { + "type": "feature", + "category": "Amazon Pinpoint Email Service", + "description": "This release of the Amazon Pinpoint Email API introduces support for using and managing message templates." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue now provides ability to use custom certificates for JDBC Connections." + }, + { + "type": "feature", + "category": "Amazon Import/Export Snowball", + "description": "AWS Snowball Edge now allows you to perform an offline update to the software of your Snowball Edge device when your device is not connected to the internet. Previously, updating your Snowball Edge's software required that the device be connected to the internet or be sent back to AWS. Now, you can keep your Snowball Edge software up to date even if your device(s) cannot connect to the internet, or are required to run in an air-gapped environment. To complete offline updates, download the software update from a client machine with connection to the internet using the AWS Command Line Interface (CLI). Then, have the Snowball Edge device download and install the software update using the Snowball Edge device API. For more information about offline updates, visit the Snowball Edge documentation page." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.16.json b/.changes/2.9.16.json new file mode 100644 index 000000000000..de5a320d83d2 --- /dev/null +++ b/.changes/2.9.16.json @@ -0,0 +1,36 @@ +{ + "version": "2.9.16", + "date": "2019-10-08", + "entries": [ + { + "type": "feature", + "category": "AWS Organizations", + "description": "Documentation updates for organizations" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "EC2MetadataUtils: add marketplaceProductCodes inside InstanceInfo's POJO" + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "description": "Documentation updates for Amazon EventBridge." + }, + { + "type": "feature", + "category": "AWS DataSync", + "description": "Add Sync options to enable/disable TaskQueueing" + }, + { + "type": "feature", + "category": "Amazon S3 Control", + "description": "Adds support for the Amazon S3 Control service to the SDK." + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "description": "With this release, you can use Amazon Kinesis Firehose delivery streams to deliver streaming data to Amazon Elasticsearch Service version 7.x clusters. For technical documentation, look for CreateDeliveryStream operation in Amazon Kinesis Firehose API reference." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.17.json b/.changes/2.9.17.json new file mode 100644 index 000000000000..641c6a85b456 --- /dev/null +++ b/.changes/2.9.17.json @@ -0,0 +1,31 @@ +{ + "version": "2.9.17", + "date": "2019-10-09", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for Dolby Atmos encoding, up to 36 outputs, accelerated transcoding with frame capture and preferred acceleration feature." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Expose instance signature through EC2MetadataUtils" + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache now allows you to apply available service updates on demand to your Memcached and Redis Cache Clusters. Features included: (1) Access to the list of applicable service updates and their priorities. (2) Service update monitoring and regular status updates. (3) Recommended apply-by-dates for scheduling the service updates. (4) Ability to stop and later re-apply updates. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Self-Service-Updates.html and https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Self-Service-Updates.html" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "Updated documentation for Amazon Managed Streaming for Kafka service." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix the implementations of `equals(Object)` and `hashCode()` for `DefaultSdkAutoConstructList` and `DefaultSdkAutoConstructMap` so that they follow the Java `equals` and `hashCode` contract. In addition, ensure that these implementations' `toString()` methods return nicely readable results. Fixes [#1445](https://github.com/aws/aws-sdk-java-v2/issues/1445)" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.18.json b/.changes/2.9.18.json new file mode 100644 index 000000000000..40bddf65c2cf --- /dev/null +++ b/.changes/2.9.18.json @@ -0,0 +1,31 @@ +{ + "version": "2.9.18", + "date": "2019-10-10", + "entries": [ + { + "type": "feature", + "category": "AWS IoT Analytics", + "description": "Add `completionTime` to API call ListDatasetContents." + }, + { + "type": "feature", + "category": "Amazon Lex Runtime Service", + "description": "Amazon Lex now supports Session API checkpoints" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Implement arn parser functions in `arns` module." + }, + { + "type": "feature", + "category": "Firewall Management Service", + "description": "Firewall Manager now supports Amazon VPC security groups, making it easier to configure and manage security groups across multiple accounts from a single place." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "New EC2 M5n, M5dn, R5n, R5dn instances with 100 Gbps network performance and Elastic Fabric Adapter (EFA) for ultra low latency; New A1.metal bare metal instance powered by AWS Graviton Processors" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.19.json b/.changes/2.9.19.json new file mode 100644 index 000000000000..c9dfdf68e84e --- /dev/null +++ b/.changes/2.9.19.json @@ -0,0 +1,11 @@ +{ + "version": "2.9.19", + "date": "2019-10-11", + "entries": [ + { + "type": "feature", + "category": "AWS Greengrass", + "description": "Greengrass OTA service supports Raspbian/Armv6l platforms." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.2.json b/.changes/2.9.2.json new file mode 100644 index 000000000000..082d5ea8c7e6 --- /dev/null +++ b/.changes/2.9.2.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.2", + "date": "2019-09-18", + "entries": [ + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Amazon API Gateway simplifies accessing PRIVATE APIs by allowing you to associate one or more Amazon Virtual Private Cloud (VPC) Endpoints to a private API. API Gateway will create and manage DNS alias records necessary for easily invoking the private APIs. With this feature, you can leverage private APIs in web applications hosted within your VPCs." + }, + { + "type": "feature", + "category": "AWS WAF Regional", + "description": "Lowering the threshold for Rate Based rule from 2000 to 100." + }, + { + "type": "feature", + "category": "AWS Resource Access Manager", + "description": "AWS RAM provides a new ListPendingInvitationResources API action that lists the resources in a resource share that is shared with you but that the invitation is still pending for" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.20.json b/.changes/2.9.20.json new file mode 100644 index 000000000000..4a6106d26e8b --- /dev/null +++ b/.changes/2.9.20.json @@ -0,0 +1,16 @@ +{ + "version": "2.9.20", + "date": "2019-10-14", + "entries": [ + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Documentation updates for WorkSpaces" + }, + { + "type": "feature", + "category": "Amazon Personalize", + "description": "AWS Personalize: Adds ability to create a solution version using FULL or UPDATE training mode" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.21.json b/.changes/2.9.21.json new file mode 100644 index 000000000000..45f40144993d --- /dev/null +++ b/.changes/2.9.21.json @@ -0,0 +1,11 @@ +{ + "version": "2.9.21", + "date": "2019-10-15", + "entries": [ + { + "type": "feature", + "category": "Amazon Kinesis Video Streams Archived Media", + "description": "Add ON_DISCONTINUITY mode to the GetHLSStreamingSessionURL API" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.22.json b/.changes/2.9.22.json new file mode 100644 index 000000000000..1de38bca5b5e --- /dev/null +++ b/.changes/2.9.22.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.22", + "date": "2019-10-16", + "entries": [ + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "This release adds support for ROS2 Dashing as a beta feature" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "AWS MSK has added support for adding brokers to a cluster." + }, + { + "type": "feature", + "category": "AWS Marketplace Commerce Analytics", + "description": "add 2 more values for the supporting sections - age of past due funds + uncollected funds breakdown" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.23.json b/.changes/2.9.23.json new file mode 100644 index 000000000000..b69f89a56888 --- /dev/null +++ b/.changes/2.9.23.json @@ -0,0 +1,16 @@ +{ + "version": "2.9.23", + "date": "2019-10-17", + "entries": [ + { + "type": "feature", + "category": "AWS Batch", + "description": "Adding support for Compute Environment Allocation Strategies" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Amazon RDS now supports Amazon RDS on VMware with the introduction of APIs related to Custom Availability Zones and Media installation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.24.json b/.changes/2.9.24.json new file mode 100644 index 000000000000..897c07ca66ad --- /dev/null +++ b/.changes/2.9.24.json @@ -0,0 +1,16 @@ +{ + "version": "2.9.24", + "date": "2019-10-18", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudWatch", + "description": "New Period parameter added to MetricDataQuery structure." + }, + { + "type": "bugfix", + "category": "Netty NIO Http Client", + "description": "Update `HealthCheckedChannelPool` to check `KEEP_ALIVE` when acquiring a channel from the pool to avoid soon-to-be inactive channels being picked up by a new request. This should reduce the frequency of `IOException: Server failed to complete response` errors. See [#1380](https://github.com/aws/aws-sdk-java-v2/issues/1380), [#1466](https://github.com/aws/aws-sdk-java-v2/issues/1466)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.25.json b/.changes/2.9.25.json new file mode 100644 index 000000000000..6daf2fdea78d --- /dev/null +++ b/.changes/2.9.25.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.25", + "date": "2019-10-22", + "entries": [ + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "AWS OpsWorks for Chef Automate (OWCA) now allows customers to use a custom domain and respective certificate, for their AWS OpsWorks For Chef Automate servers. Customers can now provide a CustomDomain, CustomCertificate and CustomPrivateKey in CreateServer API to configure their Chef Automate servers with a custom domain and certificate." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Better handle `GOAWAY` messages from the remote endpoint." + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "Add support for new serial evaluation method for events in a detector model." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.26.json b/.changes/2.9.26.json new file mode 100644 index 000000000000..f316ca06c2d2 --- /dev/null +++ b/.changes/2.9.26.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.26", + "date": "2019-10-23", + "entries": [ + { + "type": "feature", + "category": "AWS Security Token Service", + "description": "AWS Security Token Service (STS) now supports a regional configuration flag to make the client respect the region without the need for the endpoint parameter." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "description": "This release adds 4 new APIs ListQueues, ListPhoneNumbers, ListContactFlows, and ListHoursOfOperations, which can be used to programmatically list Queues, PhoneNumbers, ContactFlows, and HoursOfOperations configured for an Amazon Connect instance respectively. You can learn more about the new APIs here: https://docs.aws.amazon.com/connect/latest/APIReference/Welcome.html." + }, + { + "type": "feature", + "category": "Amazon Polly", + "description": "Amazon Polly adds new female voices: US Spanish - Lupe and Brazilian Portuguese - Camila; both voices are available in Standard and Neural engine." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.3.json b/.changes/2.9.3.json new file mode 100644 index 000000000000..e35abe3f2801 --- /dev/null +++ b/.changes/2.9.3.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.3", + "date": "2019-09-19", + "entries": [ + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue DevEndpoints now supports GlueVersion, enabling you to choose Apache Spark 2.4.3 (in addition to Apache Spark 2.2.1). In addition to supporting the latest version of Spark, you will also have the ability to choose between Python 2 and Python 3." + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "description": "When you grant an entitlement, you can now specify the percentage of the entitlement data transfer that you want the subscriber to be responsible for." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release of Amazon Elastic Container Service (Amazon ECS) introduces support for container image manifest digests. This enables you to identify all tasks launched using a container image pulled from ECR in order to correlate what was built with where it is running." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.4.json b/.changes/2.9.4.json new file mode 100644 index 000000000000..cb90e705e0e9 --- /dev/null +++ b/.changes/2.9.4.json @@ -0,0 +1,26 @@ +{ + "version": "2.9.4", + "date": "2019-09-20", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "G4 instances are Amazon EC2 instances based on NVIDIA T4 GPUs and are designed to provide cost-effective machine learning inference for applications, like image classification, object detection, recommender systems, automated speech recognition, and language translation. G4 instances are also a cost-effective platform for building and running graphics-intensive applications, such as remote graphics workstations, video transcoding, photo-realistic design, and game streaming in the cloud. To get started with G4 instances visit https://aws.amazon.com/ec2/instance-types/g4." + }, + { + "type": "feature", + "category": "AWS Greengrass", + "description": "Greengrass OTA service now returns the updated software version in the PlatformSoftwareVersion parameter of a CreateSoftwareUpdateJob response" + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Adds the WorkSpaces restore feature" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Add a new LeaseID output field to DescribeReservedDBInstances, which shows the unique identifier for the lease associated with the reserved DB instance. AWS Support might request the lease ID for an issue related to a reserved DB instance." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.5.json b/.changes/2.9.5.json new file mode 100644 index 000000000000..58781893e4d2 --- /dev/null +++ b/.changes/2.9.5.json @@ -0,0 +1,16 @@ +{ + "version": "2.9.5", + "date": "2019-09-23", + "entries": [ + { + "type": "feature", + "category": "AWS RDS DataService", + "description": "RDS Data API now supports Amazon Aurora Serverless PostgreSQL databases." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Adds API operation DescribeNodeConfigurationOptions and associated data structures." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.6.json b/.changes/2.9.6.json new file mode 100644 index 000000000000..9cd9616180f1 --- /dev/null +++ b/.changes/2.9.6.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.6", + "date": "2019-09-24", + "entries": [ + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "With this update Amazon Transcribe enables you to provide an AWS KMS key to encrypt your transcription output." + }, + { + "type": "feature", + "category": "AWS DataSync", + "description": "Added S3StorageClass, OverwriteMode sync option, and ONLY_FILES_TRANSFERRED setting for the VerifyMode sync option." + }, + { + "type": "feature", + "category": "AWS Comprehend Medical", + "description": "Use Amazon Comprehend Medical to analyze medical text stored in the specified Amazon S3 bucket. Use the console to create and manage batch analysis jobs, or use the batch APIs to detect both medical entities and protected health information (PHI). The batch APIs start, stop, list, and retrieve information about batch analysis jobs. This release also includes DetectEntitiesV2 operation which returns the Acuity and Direction entities as attributes instead of types." + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.7.json b/.changes/2.9.7.json new file mode 100644 index 000000000000..a3d28b1bf7c5 --- /dev/null +++ b/.changes/2.9.7.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.7", + "date": "2019-09-25", + "entries": [ + { + "type": "feature", + "category": "AWS Global Accelerator", + "description": "API Update for AWS Global Accelerator to support for DNS aliasing." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "This release adds a new DeleteConnection API to delete the connection between a replication instance and an endpoint. It also adds an optional S3 setting to specify the precision of any TIMESTAMP column values written to an S3 object file in .parquet format." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Enable G4D and R5 instances in SageMaker Hosting Services" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.8.json b/.changes/2.9.8.json new file mode 100644 index 000000000000..59ea447f60d6 --- /dev/null +++ b/.changes/2.9.8.json @@ -0,0 +1,16 @@ +{ + "version": "2.9.8", + "date": "2019-09-26", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This release updates the AWS Systems Manager Parameter Store PutParameter and LabelParameterVersion APIs to return the \"Tier\" of parameter created/updated and the \"parameter version\" labeled respectively." + }, + { + "type": "feature", + "category": "AWS CodePipeline", + "description": "Documentation updates for CodePipeline" + } + ] +} \ No newline at end of file diff --git a/.changes/2.9.9.json b/.changes/2.9.9.json new file mode 100644 index 000000000000..8629cb2b9e3d --- /dev/null +++ b/.changes/2.9.9.json @@ -0,0 +1,21 @@ +{ + "version": "2.9.9", + "date": "2019-09-27", + "entries": [ + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release of Amazon Elastic Container Service (Amazon ECS) removes FirelensConfiguration from the DescribeTask output during the FireLens public preview." + }, + { + "type": "feature", + "category": "AWS Amplify", + "description": "This release adds access logs APIs and artifact APIs for AWS Amplify Console." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Update the pool size for default async future completion executor service. See [#1251](https://github.com/aws/aws-sdk-java-v2/issues/1251), [#994](https://github.com/aws/aws-sdk-java-v2/issues/994)" + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/bugfix-AWSSDKforJavav2-bbc8ecb.json b/.changes/next-release/bugfix-AWSSDKforJavav2-bbc8ecb.json deleted file mode 100644 index 4e9db6b1eaca..000000000000 --- a/.changes/next-release/bugfix-AWSSDKforJavav2-bbc8ecb.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "category": "AWS SDK for Java v2", - "type": "bugfix", - "description": "Fix the issue where the `content-length` set on the request is not honored for streaming operations." -} diff --git a/.changes/next-release/bugfix-NettyNIOHTTPClient-dd43cf4.json b/.changes/next-release/bugfix-NettyNIOHTTPClient-dd43cf4.json new file mode 100644 index 000000000000..2402e9647bb5 --- /dev/null +++ b/.changes/next-release/bugfix-NettyNIOHTTPClient-dd43cf4.json @@ -0,0 +1,6 @@ +{ + "category": "Netty NIO HTTP Client", + "contributor": "", + "type": "bugfix", + "description": "Use `SystemPropretyTlsKeyManagersProvider` if no `KeyManger` is provided." +} diff --git a/.changes/next-release/feature-NettyNIOHTTPClient-e7d2844.json b/.changes/next-release/feature-NettyNIOHTTPClient-e7d2844.json new file mode 100644 index 000000000000..ec1b2d53c87d --- /dev/null +++ b/.changes/next-release/feature-NettyNIOHTTPClient-e7d2844.json @@ -0,0 +1,6 @@ +{ + "category": "Netty NIO HTTP Client", + "contributor": "", + "type": "bugfix", + "description": "Correctly select the cipher suites based on the HTTP protocol. See [#2159](https://github.com/aws/aws-sdk-java-v2/issues/2159)" +} diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 000000000000..b3680fa48f95 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,38 @@ +--- +name: "\U0001F41B Bug report" +about: Create a report to help us improve +labels: bug, needs-triage +--- + + + +## Describe the bug + + +## Expected Behavior + + +## Current Behavior + + + + + + +## Steps to Reproduce + + + + +## Possible Solution + + +## Context + + + +## Your Environment + +* AWS Java SDK version used: +* JDK version used: +* Operating System and version: diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 000000000000..fac624d37159 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,15 @@ +--- +name: "\U0001F4D5 Documentation Issue" +about: Report an issue in the API Reference documentation or Developer Guide +labels: documentation, needs-triage +--- + + + +## Describe the issue + + +## Links + + + diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 000000000000..9492a02c8a94 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,36 @@ +--- +name: "\U0001F680 Feature Request" +about: Suggest an idea for this project +labels: feature-request, needs-triage +--- + + + +## Describe the Feature + + +## Is your Feature Request related to a problem? + + +## Proposed Solution + + +## Describe alternatives you've considered + + +## Additional Context + + + + + + + +- [ ] I may be able to implement this feature request + + +## Your Environment + +* AWS Java SDK version used: +* JDK version used: +* Operating System and version: diff --git a/.github/ISSUE_TEMPLATE/general-issue.md b/.github/ISSUE_TEMPLATE/general-issue.md new file mode 100644 index 000000000000..52273edcc004 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/general-issue.md @@ -0,0 +1,28 @@ +--- +name: "\U0001F4AC General Issue" +about: Create a new issue +labels: guidance, needs-triage +--- + + + +## Describe the issue + + +## Steps to Reproduce + + + + +## Current Behavior + + + + + + +## Your Environment + +* AWS Java SDK version used: +* JDK version used: +* Operating System and version: diff --git a/.github/workflows/closed-issue-message.yml b/.github/workflows/closed-issue-message.yml new file mode 100644 index 000000000000..e1137cf61719 --- /dev/null +++ b/.github/workflows/closed-issue-message.yml @@ -0,0 +1,17 @@ +name: Closed Issue Message +on: + issues: + types: [closed] +jobs: + auto_comment: + runs-on: ubuntu-latest + steps: + - uses: aws-actions/closed-issue-message@v1 + with: + # These inputs are both required + repo-token: "${{ secrets.GITHUB_TOKEN }}" + message: | + ### ⚠️COMMENT VISIBILITY WARNING⚠️ + Comments on closed issues are hard for our team to see. + If you need more assistance, please open a new issue that references this one. + If you wish to keep having a conversation with other community members under this issue feel free to do so. diff --git a/.github/workflows/stale-issue.yml b/.github/workflows/stale-issue.yml new file mode 100644 index 000000000000..607980c6c377 --- /dev/null +++ b/.github/workflows/stale-issue.yml @@ -0,0 +1,56 @@ +name: "Close stale issues" + +# Controls when the action will run. +on: + schedule: + - cron: "0 0/3 * * *" + +jobs: + cleanup: + name: Stale issue job + runs-on: ubuntu-latest + steps: + - uses: aws-actions/stale-issue-cleanup@v3 + with: + # Setting messages to an empty string will cause the automation to skip + # that category + ancient-issue-message: This is a very old issue that is probably not getting as much + attention as it deserves. We encourage you to check if this is still an issue in + the latest release and if you find that this is still a problem, please feel free + to provide a comment or open a new issue. + stale-issue-message: It looks like this issue hasn’t been active in longer than a week. + In the absence of more information, we will be closing this issue soon. If you find + that this is still a problem, please add a comment to prevent automatic closure, or + if the issue is already closed please feel free to reopen it. + stale-pr-message: It looks like this PR hasn’t been active in longer than a week. In + the absence of more information, we will be closing this PR soon. Please add a + comment to prevent automatic closure, or if the PR is already closed please feel + free to open a new one. + + # These labels are required + stale-issue-label: closing-soon + exempt-issue-label: no-auto-closure + stale-pr-label: closing-soon + exempt-pr-label: no-auto-closure + response-requested-label: response-requested + + # Don't set closed-for-staleness label to skip closing very old issues + # regardless of label + closed-for-staleness-label: closed-for-staleness + + # Issue timing + days-before-stale: 7 + days-before-close: 4 + days-before-ancient: 1095 + + # If you don't want to mark a issue as being ancient based on a + # threshold of "upvotes", you can set this here. An "upvote" is + # the total number of +1, heart, hooray, and rocket reactions + # on an issue. + minimum-upvotes-to-exempt: 1 + + repo-token: ${{ secrets.GITHUB_TOKEN }} + loglevel: DEBUG + # Set dry-run to true to not perform label or close actions. + #dry-run: true + diff --git a/.travis.yml b/.travis.yml index 5682fd599751..a439eda0bba7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,11 @@ language: java jdk: - - oraclejdk8 - - openjdk11 + - openjdk8 sudo: true -dist: precise +dist: xenial install: /bin/true +env: + - AWS_REGION=us-west-2 notifications: email: - github-awsforjava@amazon.com diff --git a/CHANGELOG.md b/CHANGELOG.md index 43a7813f5b85..cd4e74216f97 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7587 @@ +# __2.15.61__ __2021-01-07__ +## __AWS CodePipeline__ + - ### Features + - Adding cancelled status and summary for executions aborted by pipeline updates. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for I-Frame-only HLS manifest generation in CMAF outputs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DevOps Guru__ + - ### Features + - Add resourceHours field in GetAccountHealth API to show total number of resource hours AWS Dev Ops Guru has done work for in the last hour. + +# __2.15.60__ __2021-01-06__ +## __AWS Auto Scaling Plans__ + - ### Features + - Documentation updates for AWS Auto Scaling + +## __AWS Transfer Family__ + - ### Features + - This release adds support for Amazon EFS, so customers can transfer files over SFTP, FTPS and FTP in and out of Amazon S3 as well as Amazon EFS. + +## __Auto Scaling__ + - ### Features + - This update increases the number of instance types that can be added to the overrides within an mixed instances group configuration. + +# __2.15.59__ __2021-01-05__ +## __AWS Cost Explorer Service__ + - ### Features + - - ### Features - Add new GetCostcategories API - Support filter for GetDimensions, GetTags and GetCostcategories api - Support sortBy metrics for GetDimensions, GetTags and GetCostcategories api + +## __Amazon S3__ + - ### Features + - `S3Utilities#getUrl` now supports versionId. See [#2224](https://github.com/aws/aws-sdk-java-v2/issues/2224) + +## __Application Auto Scaling__ + - ### Features + - Documentation updates for Application Auto Scaling + +# __2.15.58__ __2021-01-04__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix for [#1684](https://github.com/aws/aws-sdk-java-v2/issues/1684) Some of the Retry attempts which failed due to the API TimeOuts did not successfully retried but ended up with AbortedException. + +## __Amazon CloudSearch__ + - ### Features + - This release adds support for new Amazon CloudSearch instances. + +## __Amazon HealthLake__ + - ### Features + - Amazon HealthLake now supports exporting data from FHIR Data Stores in Preview. + +# __2.15.57__ __2020-12-31__ +## __AWS Service Catalog__ + - ### Features + - Enhanced Service Catalog DescribeProvisioningParameters API to return new parameter constraints, i.e., MinLength, MaxLength, MinValue, MaxValue, ConstraintDescription and AllowedPattern + +# __2.15.56__ __2020-12-30__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for elasticache + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation. + +# __2.15.55__ __2020-12-29__ +## __AWS Certificate Manager Private Certificate Authority__ + - ### Features + - This release adds a new parameter "CsrExtensions" in the "CertificateAuthorityConfiguration" data structure, which allows customers to add the addition of KU and SIA into the CA CSR. + +## __AmazonApiGatewayV2__ + - ### Features + - Amazon API Gateway now supports data mapping for HTTP APIs which allows customers to modify HTTP Request before sending it to their integration and HTTP Response before sending it to the invoker. + +# __2.15.54__ __2020-12-28__ +## __Amazon CloudFront__ + - ### Features + - Amazon CloudFront has deprecated the CreateStreamingDistribution and CreateStreamingDistributionWithTags APIs as part of discontinuing support for Real-Time Messaging Protocol (RTMP) distributions. + +# __2.15.53__ __2020-12-23__ +## __AWS Compute Optimizer__ + - ### Features + - This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for lambda functions. + +## __AWS Database Migration Service__ + - ### Features + - AWS DMS launches support for AWS Secrets Manager to manage Oracle ASM Database credentials + +## __AWS Resource Groups__ + - ### Features + - Add operation `PutGroupConfiguration`. Support dedicated hosts and add `Pending` in operations `Un/GroupResources`. Add `Resources` in `ListGroupResources` and deprecate `ResourceIdentifiers`. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.15.52__ __2020-12-22__ +## __AWS Cost Explorer Service__ + - ### Features + - This release adds additional metadata that may be applicable to the Rightsizing Recommendations. + +## __AWS Glue__ + - ### Features + - AWS Glue Find Matches machine learning transforms now support column importance scores. + +## __AWS IoT Wireless__ + - ### Features + - Adding the ability to use Fingerprint in GetPartnerAccount and ListPartnerAccounts API responses to protect sensitive customer account information. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Connect Service__ + - ### Features + - This release adds support for quick connects. For details, see the Release Notes in the Amazon Connect Administrator Guide. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for elasticache + +## __Amazon Relational Database Service__ + - ### Features + - Adds customer-owned IP address (CoIP) support to Amazon RDS on AWS Outposts. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - SSM Maintenance Window support for registering/updating maintenance window tasks without targets. + +# __2.15.51__ __2020-12-21__ +## __AWS Batch__ + - ### Features + - Documentation updates for batch + +## __AWS Config__ + - ### Features + - AWS Config adds support to save advanced queries. New API operations - GetStoredQuery, PutStoredQuery, ListStoredQueries, DeleteStoredQuery + +## __AWS Database Migration Service__ + - ### Features + - AWS DMS launches support for AWS Secrets Manager to manage source and target database credentials. + +## __AWS Glue__ + - ### Features + - Add 4 connection properties: SECRET_ID, CONNECTOR_URL, CONNECTOR_TYPE, CONNECTOR_CLASS_NAME. Add two connection types: MARKETPLACE, CUSTOM + +## __AWS Outposts__ + - ### Features + - In this release, AWS Outposts adds support for three new APIs: TagResource, UntagResource, and ListTagsForResource. Customers can now manage tags for their resources through the SDK. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Finding providers can now use BatchImportFindings to update Confidence, Criticality, RelatedFindings, Severity, and Types. + +## __AWS Service Catalog App Registry__ + - ### Features + - New API `SyncResouce` to update AppRegistry system tags. + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway. + +## __Amazon Connect Participant Service__ + - ### Features + - This release adds three new APIs: StartAttachmentUpload, CompleteAttachmentUpload, and GetAttachment. For Amazon Connect Chat, you can use these APIs to share files in chat conversations. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds Tag On Create feature support for the AllocateAddress API. + +## __Amazon Managed Blockchain__ + - ### Features + - Added support for provisioning and managing public Ethereum nodes on main and test networks supporting secure access using Sigv4 and standard open-source Ethereum APIs. + +## __Amazon QLDB Session__ + - ### Features + - Adds "TimingInformation" to all SendCommand API results and "IOUsage" to ExecuteStatementResult, FetchPageResult and CommitTransactionResult. + +## __Amazon Simple Storage Service__ + - ### Features + - Format GetObject's Expires header to be an http-date instead of iso8601 + +## __Service Quotas__ + - ### Features + - Added the ability to tag applied quotas. + +# __2.15.50__ __2020-12-18__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Upgrading jackson.databind.version to 2.10.5.1 + +## __Amazon Elastic Compute Cloud__ + - ### Features + - EBS io2 volumes now supports Multi-Attach + +## __Amazon Personalize Runtime__ + - ### Features + - Updated FilterValues regex pattern to align with Filter Expression. + +## __Amazon Relational Database Service__ + - ### Features + - Adds IAM DB authentication information to the PendingModifiedValues output of the DescribeDBInstances API. Adds ClusterPendingModifiedValues information to the output of the DescribeDBClusters API. + +# __2.15.49__ __2020-12-17__ +## __AWS Config__ + - ### Features + - Adding PutExternalEvaluation API which grants permission to deliver evaluation result to AWS Config + +## __AWS Key Management Service__ + - ### Features + - Added CreationDate and LastUpdatedDate timestamps to ListAliases API response + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - This change fixes a bug in the code generation related to eventstreams that prevents multiple events to share the same shape. + +## __AWS Service Catalog__ + - ### Features + - Support TagOptions sharing with Service Catalog portfolio sharing. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Provide Cross-account copy event based policy support in DataLifecycleManager (DLM) + +## __Amazon Elastic Compute Cloud__ + - ### Features + - C6gn instances are powered by AWS Graviton2 processors and offer 100 Gbps networking bandwidth. These instances deliver up to 40% better price-performance benefit versus comparable x86-based instances + +## __Amazon Route 53__ + - ### Features + - This release adds support for DNSSEC signing in Amazon Route 53. + +## __Amazon Route 53 Resolver__ + - ### Features + - Route 53 Resolver adds support for enabling resolver DNSSEC validation in virtual private cloud (VPC). + +## __Amazon Simple Queue Service__ + - ### Features + - Amazon SQS adds queue attributes to enable high throughput FIFO. + +## __EC2 Image Builder__ + - ### Features + - This release adds support for building and distributing container images within EC2 Image Builder. + +# __2.15.48__ __2020-12-16__ +## __AWS Cost Explorer Service__ + - ### Features + - This release updates the "MonitorArnList" from a list of String to be a list of Arn for both CreateAnomalySubscription and UpdateAnomalySubscription APIs + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Well-Architected Tool__ + - ### Features + - This is the first release of AWS Well-Architected Tool API support, use to review your workload and compare against the latest AWS architectural best practices. + +## __Amazon Location Service__ + - ### Features + - Initial release of Amazon Location Service. A new geospatial service providing capabilities to render maps, geocode/reverse geocode, track device locations, and detect geofence entry/exit events. + +## __Amazon Prometheus Service__ + - ### Features + - Documentation updates for Amazon Managed Service for Prometheus + +## __Amazon QuickSight__ + - ### Features + - QuickSight now supports connecting to federated data sources of Athena + +# __2.15.47__ __2020-12-15__ +## __AWS IoT__ + - ### Features + - AWS IoT Rules Engine adds Kafka Action that allows sending data to Apache Kafka clusters inside a VPC. AWS IoT Device Defender adds custom metrics and machine-learning based anomaly detection. + +## __AWS IoT Analytics__ + - ### Features + - FileFormatConfiguration enables data store to save data in JSON or Parquet format. S3Paths enables you to specify the S3 objects that save your channel messages when you reprocess the pipeline. + +## __AWS IoT Core Device Advisor__ + - ### Features + - AWS IoT Core Device Advisor is fully managed test capability for IoT devices. Device manufacturers can use Device Advisor to test their IoT devices for reliable and secure connectivity with AWS IoT. + +## __AWS IoT Fleet Hub__ + - ### Features + - AWS IoT Fleet Hub, a new feature of AWS IoT Device Management that provides a web application for monitoring and managing device fleets connected to AWS IoT at scale. + +## __AWS IoT Greengrass V2__ + - ### Features + - AWS IoT Greengrass V2 is a new major version of AWS IoT Greengrass. This release adds several updates such as modular components, continuous deployments, and improved ease of use. + +## __AWS IoT Wireless__ + - ### Features + - AWS IoT for LoRaWAN enables customers to setup a private LoRaWAN network by connecting their LoRaWAN devices and gateways to the AWS cloud without managing a LoRaWAN Network Server. + +## __AWS Lambda__ + - ### Features + - Added support for Apache Kafka as a event source. Added support for TumblingWindowInSeconds for streams event source mappings. Added support for FunctionResponseTypes for streams event source mappings + +## __Amazon Prometheus Service__ + - ### Features + - (New Service) Amazon Managed Service for Prometheus is a fully managed Prometheus-compatible monitoring service that makes it easy to monitor containerized applications securely and at scale. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Adding support for Change Manager API content + +# __2.15.46__ __2020-12-14__ +## __AWS Global Accelerator__ + - ### Features + - This release adds support for custom routing accelerators + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Treat zero-byte responses from async HTTP clients as not having a payload, regardless of the response content-length. This fixes an issue that could cause HEAD responses (e.g. s3's headObject responses) with a content-length specified from being treated as having a payload. This fixes issues like [#1216](https://github.com/aws/aws-sdk-java-v2/issues/1216) where the SDK attempts to read data from the response based on the content-length, not based on whether there was actually a payload. + +## __Amazon DevOps Guru__ + - ### Features + - Documentation updates for DevOps Guru. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Add c5n.metal to ec2 instance types list + +# __2.15.45__ __2020-12-11__ +## __AWS CloudTrail__ + - ### Features + - CloudTrailInvalidClientTokenIdException is now thrown when a call results in the InvalidClientTokenId error code. The Name parameter of the AdvancedEventSelector data type is now optional. + +## __AWS IoT SiteWise__ + - ### Features + - Added the ListAssetRelationships operation and support for composite asset models, which represent structured sets of properties within asset models. + +## __AWS Performance Insights__ + - ### Features + - You can group DB load according to the dimension groups for database, application, and session type. Amazon RDS also supports the dimensions db.name, db.application.name, and db.session_type.name. + +## __Amazon CloudWatch__ + - ### Features + - Documentation updates for monitoring + +## __Amazon GuardDuty__ + - ### Features + - Documentation updates for GuardDuty + +## __Auto Scaling__ + - ### Features + - Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs. + +# __2.15.44__ __2020-12-10__ +## __AWS Network Manager__ + - ### Features + - This release adds API support for Transit Gateway Connect integration into AWS Network Manager. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now supports adding synonyms to an index through the new Thesaurus resource. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - TGW connect simplifies connectivity of SD-WAN appliances; IGMP support for TGW multicast; VPC Reachability Analyzer for VPC resources connectivity analysis. + +# __2.15.43__ __2020-12-09__ +## __AWS Global Accelerator__ + - ### Features + - This release adds support for custom routing accelerators + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for G4ad instances powered by AMD Radeon Pro V520 GPUs and AMD 2nd Generation EPYC processors + +## __Amazon Redshift__ + - ### Features + - Add support for availability zone relocation feature. + +# __2.15.42__ __2020-12-08__ +## __AWS Audit Manager__ + - ### Features + - AWS Audit Manager helps you continuously audit your AWS usage to simplify how you manage risk and compliance. This update releases the first version of the AWS Audit Manager APIs and SDK. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - The SDK will now retry on `TransactionInProgressException` error code. + +## __AWSKendraFrontendService__ + - ### Features + - 1. Amazon Kendra connector for Google Drive repositories 2. Amazon Kendra's relevance ranking models are regularly tuned for each customer by capturing end-user search patterns and feedback. + +## __Amazon EC2 Container Registry__ + - ### Features + - This release adds support for configuring cross-region and cross-account replication of your Amazon ECR images. + +## __Amazon EMR Containers__ + - ### Features + - This release adds support for Amazon EMR on EKS, a simple way to run Spark on Kubernetes. + +## __Amazon Forecast Service__ + - ### Features + - This release adds support for the Amazon Forecast Weather Index which can increase forecasting accuracy by automatically including weather forecasts in demand forecasts. + +## __Amazon HealthLake__ + - ### Features + - This release introduces Amazon HealthLake (preview), a HIPAA-eligible service that enables healthcare and life sciences customers to store, transform, query, and analyze health data in the AWS Cloud. + +## __Amazon QuickSight__ + - ### Features + - Added new parameters for join optimization. + +## __Amazon SageMaker Runtime__ + - ### Features + - This feature allows customers to invoke their endpoint with an inference ID. If used and data capture for the endpoint is enabled, this ID will be captured along with request data. + +## __Amazon SageMaker Service__ + - ### Features + - This feature helps you monitor model performance characteristics such as accuracy, identify undesired bias in your ML models, and explain model decisions better with explainability drift detection. + +## __Amazon Sagemaker Edge Manager__ + - ### Features + - Amazon SageMaker Edge Manager makes it easy to optimize, secure, monitor, and maintain machine learning (ML) models across fleets of edge devices such as smart cameras, smart speakers, and robots. + +# __2.15.41__ __2020-12-07__ +## __AWS Common Runtime HTTP Client__ + - ### Features + - Bump up `aws-crt` version to `0.9.0` + +## __AWS Database Migration Service__ + - ### Features + - Added PreserveTransaction setting to preserve order of CDC for S3 as target. Added CsvNoSupValue setting to replace empty value for columns not included in the supplemental log for S3 as target. + +## __AWS SDK for Java v2__ + - ### Features + - Added amz-sdk-request and removed amz-sdk-retry header. The new header matches the behavior of the other SDKs. + - Updated service endpoint metadata. + +## __AWS Service Catalog App Registry__ + - ### Features + - AWS Service Catalog AppRegistry now supports adding, removing, and listing tags on resources after they are created. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Fixed the issue where certain handshake errors manifested as acquire connection timeout error when using TLS1.3 and proxy. + +# __2.15.40__ __2020-12-04__ +## __AWS Directory Service__ + - ### Features + - Documentation updates for ds - updated descriptions + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports black video and audio silence as new conditions to trigger automatic input failover. + +## __AWS Lambda__ + - ### Features + - Added the additional enum InvalidImage to StateReasonCode and LastUpdateStatusReasonCode fields. + +## __AWS License Manager__ + - ### Features + - Automated Discovery now has support for custom tags, and detects software uninstalls. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Avoid costly metrics collection when metric collector is NoOpMetricCollector. + - Contributed by: [@croudet](https://github.com/croudet) + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release introduces tag-on-create capability for the CreateImage API. A user can now specify tags that will be applied to the new resources (image, snapshots or both), during creation time. + +## __Amazon Relational Database Service__ + - ### Features + - Adds support for Amazon RDS Cross-Region Automated Backups, the ability to setup automatic replication of snapshots and transaction logs from a primary AWS Region to a secondary AWS Region. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - AWS Systems Manager Patch Manager MAC OS Support and OpsMetadata Store APIs to store operational metadata for an Application. + +## __Amazon WorkSpaces__ + - ### Features + - Update the import-workspace-image API to have "BYOL_REGULAR_WSP" as a valid input string for ingestion-process. + +## __Managed Streaming for Kafka__ + - ### Features + - Adding HEALING to ClusterState. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@croudet](https://github.com/croudet) +# __2.15.39__ __2020-12-03__ +## __AWS Batch__ + - ### Features + - This release adds support for customer to run Batch Jobs on ECS Fargate, the serverless compute engine built for containers on AWS. Customer can also propagate Job and Job Definition Tags to ECS Task. + +## __AWS Compute Optimizer__ + - ### Features + - This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for EBS volumes that are attached to instances. + +## __AWS License Manager__ + - ### Features + - AWS License Manager enables managed entitlements for AWS customers and Software Vendors (ISV). You can track and distribute license entitlements from AWS Marketplace and supported ISVs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AmplifyBackend__ + - ### Features + - Regular documentation updates. + +# __2.15.38__ __2020-12-01__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Connect Customer Profiles__ + - ### Features + - This is the first release of Amazon Connect Customer Profiles, a unified customer profile for your Amazon Connect contact center. + +# __2.15.37__ __2020-12-01__ +## __AWS Directory Service__ + - ### Features + - Adding client authentication feature for AWS AD Connector + +## __AWS Lambda__ + - ### Features + - This release includes support for a new feature: Container images support in AWS Lambda. This adds APIs for deploying functions as container images. AWS Lambda now supports memory up to 10240MB. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon AppIntegrations Service__ + - ### Features + - The Amazon AppIntegrations service (in preview release) enables you to configure and reuse connections to external applications. + +## __Amazon Connect Contact Lens__ + - ### Features + - Contact Lens for Amazon Connect analyzes conversations, both real-time and post-call. The ListRealtimeContactAnalysisSegments API returns a list of analysis segments for a real-time analysis session. + +## __Amazon Connect Service__ + - ### Features + - This release adds an Amazon Connect API that provides the ability to create tasks, and a set of APIs (in preview) to configure AppIntegrations associations with Amazon Connect instances. + +## __Amazon DevOps Guru__ + - ### Features + - (New Service) Amazon DevOps Guru is available in public preview. It's a fully managed service that uses machine learning to analyze your operational solutions to help you find and troubleshoot issues. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for: EBS gp3 volumes; and D3/D3en/R5b/M5zn instances powered by Intel Cascade Lake CPUs + +## __Amazon Elastic Container Registry Public__ + - ### Features + - Supports Amazon Elastic Container Registry (Amazon ECR) Public, a fully managed registry that makes it easy for a developer to publicly share container software worldwide for anyone to download. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Amazon EKS now allows you to define and manage the lifecycle for Kubernetes add-ons for your clusters. This release adds support for the AWS VPC CNI (vpc-cni). + +## __Amazon Honeycode__ + - ### Features + - Introducing APIs to read and write directly from Honeycode tables. Use APIs to pull table and column metadata, then use the read and write APIs to programmatically read and write from the tables. + +## __Amazon Lookout for Vision__ + - ### Features + - This release introduces support for Amazon Lookout for Vision. + +## __Amazon SageMaker Feature Store Runtime__ + - ### Features + - This release adds support for Amazon SageMaker Feature Store, which makes it easy for customers to create, version, share, and manage curated data for machine learning (ML) development. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker Pipelines for ML workflows. Amazon SageMaker Feature Store, a fully managed repository for ML features. + +## __Amazon Simple Storage Service__ + - ### Features + - S3 adds support for multiple-destination replication, option to sync replica modifications; S3 Bucket Keys to reduce cost of S3 SSE with AWS KMS + +## __AmplifyBackend__ + - ### Features + - (New Service) The Amplify Admin UI offers an accessible way to develop app backends and manage app content. We recommend that you use the Amplify Admin UI to manage the backend of your Amplify app. + +# __2.15.36__ __2020-11-30__ +## __AWS SDK for Java v2__ + - ### Features + - Add LICENSE.txt and NOTICE.txt to META-INF directory of generated JARs + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed a resource leak that could occur when closing the default credentials provider (or a client using the default credentials provider), when `closeable` credentials like STS or SSO were in use. Fixes [#2149](https://github.com/aws/aws-sdk-java-v2/issues/2149). + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release introduces Amazon EC2 Mac1 instances, a new Amazon EC2 instance family built on Apple Mac mini computers, powered by AWS Nitro System, and support running macOS workloads on Amazon EC2 + +# __2.15.35__ __2020-11-24__ +## __AWS Batch__ + - ### Features + - Add Ec2Configuration in ComputeEnvironment.ComputeResources. Use in CreateComputeEnvironment API to enable AmazonLinux2 support. + +## __AWS CloudFormation__ + - ### Features + - Adds support for the new Modules feature for CloudFormation. A module encapsulates one or more resources and their respective configurations for reuse across your organization. + +## __AWS CloudTrail__ + - ### Features + - CloudTrail now includes advanced event selectors, which give you finer-grained control over the events that are logged to your trail. + +## __AWS CodeBuild__ + - ### Features + - Adding GetReportGroupTrend API for Test Reports. + +## __AWS Elastic Beanstalk__ + - ### Features + - Updates the Integer constraint of DescribeEnvironmentManagedActionHistory's MaxItems parameter to [1, 100]. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for Vorbis and Opus audio in OGG/OGA containers. + +## __AWS IoT SiteWise__ + - ### Features + - This release adds support for customer managed customer master key (CMK) based encryption in IoT SiteWise. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Step Functions__ + - ### Features + - This release of the AWS Step Functions SDK introduces support for Synchronous Express Workflows + +## __Amazon Appflow__ + - ### Features + - Upsolver as a destination connector and documentation update. + +## __Amazon Cognito Identity Provider__ + - ### Features + - This release adds ability to configure Cognito User Pools with third party sms and email providers for sending notifications to users. + +## __Amazon Comprehend__ + - ### Features + - Support Comprehend events detection APIs + +## __Amazon FSx__ + - ### Features + - This release adds the capability to increase storage capacity of Amazon FSx for Lustre file systems, providing the flexibility to meet evolving storage needs over time. + +## __Amazon GameLift__ + - ### Features + - GameLift FlexMatch is now available as a standalone matchmaking solution. FlexMatch now provides customizable matchmaking for games hosted peer-to-peer, on-premises, or on cloud compute primitives. + +## __Amazon Lex Model Building Service__ + - ### Features + - Lex now supports es-419, de-DE locales + +## __Amazon QuickSight__ + - ### Features + - Support for embedding without user registration. New enum EmbeddingIdentityType. A potential breaking change. Affects code that refers IdentityType enum type directly instead of literal string value. + +## __Amazon Timestream Write__ + - ### Features + - Adds support of upserts for idempotent updates to Timestream. + +## __Amazon Transcribe Streaming Service__ + - ### Features + - Amazon Transcribe Medical streaming added medical specialties and HTTP/2 support. Amazon Transcribe streaming supports additional languages. Both support OGG/OPUS and FLAC codecs for streaming. + +## __AmazonMWAA__ + - ### Features + - (New Service) Amazon MWAA is a managed service for Apache Airflow that makes it easy for data engineers and data scientists to execute data processing workflows in the cloud. + +# __2.15.34__ __2020-11-23__ +## __AWS CodeStar connections__ + - ### Features + - Added support for the UpdateHost API. + +## __AWS Glue__ + - ### Features + - Feature1 - Glue crawler adds data lineage configuration option. Feature2 - AWS Glue Data Catalog adds APIs for PartitionIndex creation and deletion as part of Enhancement Partition Management feature. + +## __AWS IoT__ + - ### Features + - This release enables users to identify different file types in the over-the-air update (OTA) functionality using fileType parameter for CreateOTAUpdate API + +## __AWS Lambda__ + - ### Features + - This release includes support for new feature: Code Signing for AWS Lambda. This adds new resources and APIs to configure Lambda functions to accept and verify signed code artifacts at deployment. + +## __AWS License Manager__ + - ### Features + - AWS License Manager now provides the ability for license administrators to be able to associate license configurations to AMIs shared with their AWS account + +## __AWS Outposts__ + - ### Features + - Support specifying tags during the creation of the Outpost resource. Tags are now returned in the response body of Outpost APIs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Updated the account management API to support the integration with AWS Organizations. Added new methods to allow users to view and manage the delegated administrator account for Security Hub. + +## __AWS Signer__ + - ### Features + - AWS Signer is launching code-signing for AWS Lambda. Now customers can cryptographically sign Lambda code to ensure trust, integrity, and functionality. + +## __AWS Single Sign-On Admin__ + - ### Features + - AWS Single Sign-On now enables attribute-based access control for workforce identities to simplify permissions in AWS + +## __Amazon CloudWatch Application Insights__ + - ### Features + - Add Detected Workload to ApplicationComponent which shows the workloads that installed in the component + +## __Amazon DynamoDB__ + - ### Features + - With this release, you can capture data changes in any Amazon DynamoDB table as an Amazon Kinesis data stream. You also can use PartiQL (SQL-compatible language) to manipulate data in DynamoDB tables. + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for updating capacity providers, specifying custom instance warmup periods for capacity providers, and using deployment circuit breaker for your ECS Services. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for elasticache + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for Multiple Private DNS names to DescribeVpcEndpointServices response. + +## __Amazon Elastic MapReduce__ + - ### Features + - Add API support for EMR Studio, a new notebook-first IDE for data scientists and data engineers with single sign-on, Jupyter notebooks, automated infrastructure provisioning, and job diagnosis. + +## __Amazon Forecast Service__ + - ### Features + - Releasing the set of PredictorBacktestExportJob APIs which allow customers to export backtest values and item-level metrics data from Predictor training. + +## __Amazon Timestream Query__ + - ### Features + - Amazon Timestream now supports "QueryStatus" in Query API which has information about cumulative bytes scanned, metered, as well as progress percentage for the query. + +## __Amazon Translate__ + - ### Features + - This update adds new operations to create and manage parallel data in Amazon Translate. Parallel data is a resource that you can use to run Active Custom Translation jobs. + +## __Auto Scaling__ + - ### Features + - Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs. + +## __CodeArtifact__ + - ### Features + - Add support for the NuGet package format. + +## __Managed Streaming for Kafka__ + - ### Features + - Adding MAINTENANCE and REBOOTING_BROKER to Cluster states. + +# __2.15.33__ __2020-11-20__ +## __AWS App Mesh__ + - ### Features + - This release makes tag value a required attribute of the tag's key-value pair. + +## __AWS CloudHSM V2__ + - ### Features + - Added managed backup retention, a feature that enables customers to retain backups for a configurable period after which CloudHSM service will automatically delete them. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog App Registry__ + - ### Features + - AWS Service Catalog AppRegistry Documentation update + +## __AWS Single Sign-on__ + - ### Features + - Added support for retrieving SSO credentials: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html. + +## __Amazon Chime__ + - ### Features + - The Amazon Chime SDK for messaging provides the building blocks needed to build chat and other real-time collaboration features. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - This release supports tagging repository association resources in Amazon CodeGuru Reviewer. + +## __Amazon Cognito Identity__ + - ### Features + - Added SDK pagination support for ListIdentityPools + +## __Amazon Connect Service__ + - ### Features + - This release adds a set of Amazon Connect APIs to programmatically control instance creation, modification, description and deletion. + +## __Amazon Macie 2__ + - ### Features + - The Amazon Macie API now provides S3 bucket metadata that indicates whether any one-time or recurring classification jobs are configured to analyze data in a bucket. + +## __Amazon Simple Storage Service__ + - ### Features + - Add new documentation regarding automatically generated Content-MD5 headers when using the SDK or CLI. + +## __Managed Streaming for Kafka__ + - ### Features + - This release adds support for PER TOPIC PER PARTITION monitoring on AWS MSK clusters. + +# __2.15.32__ __2020-11-19__ +## __AWS Cost Explorer Service__ + - ### Features + - Additional metadata that may be applicable to the recommendation. + +## __AWS Directory Service__ + - ### Features + - Adding multi-region replication feature for AWS Managed Microsoft AD + +## __AWS Elemental MediaLive__ + - ### Features + - The AWS Elemental MediaLive APIs and SDKs now support the ability to see the software update status on Link devices + +## __AWS Glue__ + - ### Features + - Adding support for Glue Schema Registry. The AWS Glue Schema Registry is a new feature that allows you to centrally discover, control, and evolve data stream schemas. + +## __AWS Lambda__ + - ### Features + - Added the starting position and starting position timestamp to ESM Configuration. Now customers will be able to view these fields for their ESM. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CloudWatch Events__ + - ### Features + - EventBridge now supports Resource-based policy authorization on event buses. This enables cross-account PutEvents API calls, creating cross-account rules, and simplifies permission management. + +## __Amazon EventBridge__ + - ### Features + - EventBridge now supports Resource-based policy authorization on event buses. This enables cross-account PutEvents API calls, creating cross-account rules, and simplifies permission management. + +## __Amazon Kinesis Analytics__ + - ### Features + - Amazon Kinesis Data Analytics now supports building and running streaming applications using Apache Flink 1.11 and provides a way to access the Apache Flink dashboard for supported Flink versions. + +## __Amazon Lex Model Building Service__ + - ### Features + - Amazon Lex supports managing input and output contexts as well as default values for slots. + +## __Amazon Lex Runtime Service__ + - ### Features + - Amazon Lex now supports the ability to view and manage active contexts associated with a user session. + +## __Amazon Redshift__ + - ### Features + - Amazon Redshift support for returning ClusterNamespaceArn in describeClusters + +## __Auto Scaling__ + - ### Features + - You can now create Auto Scaling groups with multiple launch templates using a mixed instances policy, making it easy to deploy an AMI with an architecture that is different from the rest of the group. + +# __2.15.31__ __2020-11-18__ +## __AWS Backup__ + - ### Features + - AWS Backup now supports cross-account backup, enabling AWS customers to securely copy their backups across their AWS accounts within their AWS organizations. + +## __AWS CloudFormation__ + - ### Features + - This release adds ChangeSets support for Nested Stacks. ChangeSets offer a preview of how proposed changes to a stack might impact existing resources or create new ones. + +## __AWS CodeBuild__ + - ### Features + - AWS CodeBuild - Adding Status field for Report Group + +## __AWS Outposts__ + - ### Features + - Mark the Name parameter in CreateOutpost as required. + +## __AWS S3 Control__ + - ### Features + - AWS S3 Storage Lens provides visibility into your storage usage and activity trends at the organization or account level, with aggregations by Region, storage class, bucket, and prefix. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon ElastiCache__ + - ### Features + - Adding Memcached 1.6 to parameter family + +## __Amazon Elastic Compute Cloud__ + - ### Features + - EC2 Fleet adds support of DeleteFleets API for instant type fleets. Now you can delete an instant type fleet and terminate all associated instances with a single API call. + +# __2.15.30__ __2020-11-17__ +## __AWS Network Firewall__ + - ### Features + - (New Service) AWS Network Firewall is a managed network layer firewall service that makes it easy to secure your virtual private cloud (VPC) networks and block malicious traffic. + +## __Amazon Chime__ + - ### Features + - This release adds CRUD APIs for Amazon Chime SipMediaApplications and SipRules. It also adds the API for creating outbound PSTN calls for Amazon Chime meetings. + +## __Amazon Connect Service__ + - ### Features + - This release adds support for user hierarchy group and user hierarchy structure. For details, see the Release Notes in the Amazon Connect Administrator Guide. + +## __Amazon Macie 2__ + - ### Features + - The Amazon Macie API now has a lastRunErrorStatus property to indicate if account- or bucket-level errors occurred during the run of a one-time classification job or the latest run of a recurring job. + +## __Amazon Relational Database Service__ + - ### Features + - Support copy-db-snapshot in the one region on cross clusters and local cluster for RDSonVmware. Add target-custom-availability-zone parameter to specify where a snapshot should be copied. + +## __Firewall Management Service__ + - ### Features + - Added Firewall Manager policy support for AWS Network Firewall resources. + +# __2.15.29__ __2020-11-16__ +## __AWS CodePipeline__ + - ### Features + - We show details about inbound executions and id of action executions in GetPipelineState API. We also add ConflictException to StartPipelineExecution, RetryStageExecution, StopPipelineExecution APIs. + +## __AWS Database Migration Service__ + - ### Features + - Adding MoveReplicationTask feature to move replication tasks between instances + +## __AWS IoT Secure Tunneling__ + - ### Features + - Support using multiple data streams per tunnel using the Secure Tunneling multiplexing feature. + +## __AWS IoT SiteWise__ + - ### Features + - This release supports Unicode characters for string operations in formulae computes in SiteWise. For more information, search for SiteWise in Amazon What's new or refer the SiteWise documentation. + +## __AWS Service Catalog__ + - ### Features + - Support import of CloudFormation stacks into Service Catalog provisioned products. + +## __Amazon QuickSight__ + - ### Features + - Adding new parameters for dashboard persistence + +## __Amazon SageMaker Service__ + - ### Features + - This feature enables customers to encrypt their Amazon SageMaker Studio storage volumes with customer master keys (CMKs) managed by them in AWS Key Management Service (KMS). + +## __Amazon Simple Notification Service__ + - ### Features + - Documentation updates for Amazon SNS. + +## __Synthetics__ + - ### Features + - AWS Synthetics now supports Environment Variables to assign runtime parameters in the canary scripts. + +# __2.15.28__ __2020-11-13__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Shield__ + - ### Features + - This release adds APIs for two new features: 1) Allow customers to bundle resources into protection groups and treat as a single unit. 2) Provide per-account event summaries to all AWS customers. + +## __Amazon Textract__ + - ### Features + - AWS Textract now allows customers to specify their own KMS key to be used for asynchronous jobs output results, AWS Textract now also recognizes handwritten text from English documents. + +## __Elastic Load Balancing__ + - ### Features + - Adds dualstack support for Network Load Balancers (TCP/TLS only), an attribute for WAF fail open for Application Load Balancers, and an attribute for connection draining for Network Load Balancers. + +# __2.15.27__ __2020-11-12__ +## __AWS IoT__ + - ### Features + - This release adds a batchMode parameter to the IotEvents, IotAnalytics, and Firehose actions which allows customers to send an array of messages to the corresponding services + +## __AWS RoboMaker__ + - ### Features + - This release introduces Robomaker Worldforge TagsOnCreate which allows customers to tag worlds as they are being generated by providing the tags while configuring a world generation job. + +## __AWS Service Catalog App Registry__ + - ### Features + - AWS Service Catalog AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise. + +## __Amazon Lex Model Building Service__ + - ### Features + - Lex now supports es-ES, it-IT, fr-FR and fr-CA locales + +## __Amazon Lightsail__ + - ### Features + - This release adds support for Amazon Lightsail container services. You can now create a Lightsail container service, and deploy Docker images to it. + +## __Amazon Personalize Runtime__ + - ### Features + - Adds support to use dynamic filters with Personalize. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds new Australian English female voice - Olivia. Olivia is available as Neural voice only. + +# __2.15.26__ __2020-11-11__ +## __AWS Amplify__ + - ### Features + - Whereas previously custom headers were set via the app's buildspec, custom headers can now be set directly on the Amplify app for both ci/cd and manual deploy apps. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for Automated ABR encoding and improved the reliability of embedded captions in accelerated outputs. + +## __AWS Glue DataBrew__ + - ### Features + - This is the initial SDK release for AWS Glue DataBrew. DataBrew is a visual data preparation tool that enables users to clean and normalize data without writing any code. + +## __AWS Service Catalog__ + - ### Features + - Adding support to remove a Provisioned Product launch role via UpdateProvisionedProductProperties + +## __Amazon Forecast Service__ + - ### Features + - Providing support of custom quantiles in CreatePredictor API. + +## __Amazon QuickSight__ + - ### Features + - QuickSight now supports Column-level security and connecting to Oracle data source. + +## __Netty NIO HTTP Client__ + - ### Features + - Upgrade Netty libraries to `4.1.53.Final`, and `netty-tcnative-boringssl-static` to `2.0.34.Final`. + + - ### Bugfixes + - Fix a bug where the Netty HTTP client can leak memory when a response stream is cancelled prematurely but the upstream publisher continues to invoke onNext for some time before stopping. Fixes [#2051](https://github.com/aws/aws-sdk-java-v2/issues/2051). + +# __2.15.25__ __2020-11-10__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix default client error to have spaces between words. + - Contributed by: [@frosforever](https://github.com/frosforever) + - Replaced class loading from Thread.currentThread().getContextClassLoader() to ClassLoaderHelper in ProfileCredentialsUtils and WebIdentityCredentialsUtils, since it was causing Class not found error. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for Gateway Load Balancer VPC endpoints and VPC endpoint services + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Add SessionId as a filter for DescribeSessions API + +## __Auto Scaling__ + - ### Features + - Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs. + +## __Elastic Load Balancing__ + - ### Features + - Added support for Gateway Load Balancers, which make it easy to deploy, scale, and run third-party virtual networking appliances. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@frosforever](https://github.com/frosforever) +# __2.15.24__ __2020-11-09__ +## __AWS DataSync__ + - ### Features + - DataSync now enables customers to adjust the network bandwidth used by a running AWS DataSync task. + +## __AWS IoT Analytics__ + - ### Features + - AWS IoT Analytics now supports Late Data Notifications for datasets, dataset content creation using previous version IDs, and includes the LastMessageArrivalTime attribute for channels and datastores. + +## __AWS Storage Gateway__ + - ### Features + - Added bandwidth rate limit schedule for Tape and Volume Gateways + +## __Amazon DynamoDB__ + - ### Features + - This release adds supports for exporting Amazon DynamoDB table data to Amazon S3 to perform analytics at any scale. + +## __Amazon EC2 Container Service__ + - ### Features + - This release provides native support for specifying Amazon FSx for Windows File Server file systems as volumes in your Amazon ECS task definitions. + +## __Amazon Elasticsearch Service__ + - ### Features + - Adding support for package versioning in Amazon Elasticsearch Service + +## __Amazon FSx__ + - ### Features + - This release adds support for creating DNS aliases for Amazon FSx for Windows File Server, and using AWS Backup to automate scheduled, policy-driven backup plans for Amazon FSx file systems. + +## __Amazon Macie 2__ + - ### Features + - Sensitive data findings in Amazon Macie now include enhanced location data for Apache Avro object containers and Apache Parquet files. + +## __Amazon Simple Storage Service__ + - ### Features + - S3 Intelligent-Tiering adds support for Archive and Deep Archive Access tiers; S3 Replication adds replication metrics and failure notifications, brings feature parity for delete marker replication + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - add a new filter to allow customer to filter automation executions by using resource-group which used for execute automation + +# __2.15.23__ __2020-11-06__ +## __AWS Elemental MediaLive__ + - ### Features + - Support for SCTE35 ad markers in OnCuePoint style in RTMP outputs. + +## __AWS IoT SiteWise__ + - ### Features + - Remove the CreatePresignedPortalUrl API + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Amazon Data Lifecycle Manager now supports the creation and retention of EBS-backed Amazon Machine Images + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Network card support with four new attributes: NetworkCardIndex, NetworkPerformance, DefaultNetworkCardIndex, and MaximumNetworkInterfaces, added to the DescribeInstanceTypes API. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Documentation updates for Systems Manager + +# __2.15.22__ __2020-11-05__ +## __AWS App Mesh__ + - ### Features + - This release adds circuit breaking capabilities to your mesh with connection pooling and outlier detection support. + +## __AWS Lambda__ + - ### Features + - Support Amazon MQ as an Event Source. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now supports providing user context in your query requests, Tokens can be JSON or JWT format. This release also introduces support for Confluence cloud datasources. + +## __Amazon CloudWatch Events__ + - ### Features + - With this release, customers can now reprocess past events by storing the events published on event bus in an encrypted archive. + +## __Amazon DynamoDB__ + - ### Features + - This release adds a new ReplicaStatus INACCESSIBLE_ENCRYPTION_CREDENTIALS for the Table description, indicating when a key used to encrypt a regional replica table is not accessible. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for EC2. + +## __Amazon Elasticsearch Service__ + - ### Features + - Amazon Elasticsearch Service now provides the ability to define a custom endpoint for your domain and link an SSL certificate from ACM, making it easier to refer to Kibana and the domain endpoint. + +## __Amazon EventBridge__ + - ### Features + - With this release, customers can now reprocess past events by storing the events published on event bus in an encrypted archive. + +## __Amazon Fraud Detector__ + - ### Features + - Added support for deleting resources like Variables, ExternalModels, Outcomes, Models, ModelVersions, Labels, EventTypes and EntityTypes. Updated DeleteEvent operation to catch missing exceptions. + +## __Amazon Relational Database Service__ + - ### Features + - Supports a new parameter to set the max allocated storage in gigabytes for the CreateDBInstanceReadReplica API. + +# __2.15.21__ __2020-11-04__ +## __AWS IoT__ + - ### Features + - Updated API documentation and added paginator for AWS Iot Registry ListThingPrincipals API. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - Service Catalog API ListPortfolioAccess can now support a maximum PageSize of 100. + +## __AWS X-Ray__ + - ### Features + - Releasing new APIs GetInsightSummaries, GetInsightEvents, GetInsight, GetInsightImpactGraph and updating GetTimeSeriesServiceStatistics API for AWS X-Ray Insights feature + +## __AWSMarketplace Metering__ + - ### Features + - Adding Vendor Tagging Support in MeterUsage and BatchMeterUsage API. + +## __Amazon CloudWatch__ + - ### Features + - Documentation updates for monitoring + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added support for Client Connect Handler for AWS Client VPN. Fleet supports launching replacement instances in response to Capacity Rebalance recommendation. + +## __Amazon Elasticsearch Service__ + - ### Features + - Amazon Elasticsearch Service now supports native SAML authentication that seamlessly integrates with the customers' existing SAML 2.0 Identity Provider (IdP). + +## __Amazon Transcribe Streaming Service__ + - ### Features + - With this release, Amazon Transcribe now supports real-time transcription from audio sources in Italian (it-IT) and German(de-DE). + +## __AmazonMQ__ + - ### Features + - Amazon MQ introduces support for RabbitMQ, a popular message-broker with native support for AMQP 0.9.1. You can now create fully-managed RabbitMQ brokers in the cloud. + +## __Auto Scaling__ + - ### Features + - Capacity Rebalance helps you manage and maintain workload availability during Spot interruptions by proactively augmenting your Auto Scaling group with a new instance before interrupting an old one. + +# __2.15.20__ __2020-11-02__ +## __AWS DynamoDB Enhanced Client__ + - ### Bugfixes + - Publisher streams returned by async resources in the DynamoDB Enhanced Client now correctly handle mapping errors when they are encountered in the stream by calling onError on the subscriber and then implicitly cancelling the subscription. Previously the stream would just permanently hang and never complete. + +## __AWS SDK for Java v2__ + - ### Features + - Added code generation validation that customer-visible identifiers are idiomatic (do not contain underscores). Services with underscores in their models can use rename customizations to fix these issues, or apply the 'underscoresInNameBehavior = ALLOW' customization. + - Upgrade `org.apache.httpcomponents:httpclient` version to `4.5.13` + + - ### Bugfixes + - Fixing race condition in EventStreamAsyncResponseTransformer. Field eventsToDeliver is a LinkedList, i.e., not thread-safe. Accesses to field eventsToDeliver are protected by synchronization on itself, but not in 1 location. + - The mapped publisher returned by SdkPublisher.map will now handle exceptions thrown by the mapping function by calling onError on its subscriber and then cancelling the subscription rather than throwing it back to the publishing process when it attempts to publish data. + +## __AWS SSO OIDC__ + - ### Deprecations + - Renamed/deprecated 'error_description' fields in exceptions in favor of 'errorDescription'. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for the following features: 1. P4d instances based on NVIDIA A100 GPUs. 2. NetworkCardIndex attribute to support multiple network cards. + +# __2.15.19__ __2020-10-30__ +## __AWS Database Migration Service__ + - ### Features + - Adding DocDbSettings to support DocumentDB as a source. + +## __AWS Elemental MediaLive__ + - ### Features + - Support for HLS discontinuity tags in the child manifests. Support for incomplete segment behavior in the media output. Support for automatic input failover condition settings. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fixing FilteringSubscriber and LimitingSubscriber to complete when subscribing criteria is completed. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for AWS ElastiCache + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API adds an eqExactMatch operator for filtering findings. With this operator you can increase the precision of your finding filters and suppression rules. + +## __Amazon Simple Notification Service__ + - ### Features + - Documentation updates for Amazon SNS + +## __Braket__ + - ### Features + - This release supports tagging for Amazon Braket quantum-task resources. It also supports tag-based access control for quantum-task APIs. + +## __EC2 Image Builder__ + - ### Features + - This feature increases the number of accounts that can be added to the Launch permissions within an Image Builder Distribution configuration. + +# __2.15.18__ __2020-10-29__ +## __AWS Marketplace Commerce Analytics__ + - ### Features + - Documentation updates for marketplacecommerceanalytics to specify four data sets which are deprecated. + +## __AWS Storage Gateway__ + - ### Features + - Adding support for access based enumeration on SMB file shares, file share visibility on SMB file shares, and file upload notifications for all file shares + +## __Amazon API Gateway__ + - ### Features + - Support disabling the default execute-api endpoint for REST APIs. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Support for Appliance mode on Transit Gateway that simplifies deployment of stateful network appliances. Added support for AWS Client VPN Self-Service Portal. + +## __Amazon Simple Email Service__ + - ### Features + - This release enables customers to manage their own contact lists and end-user subscription preferences. + +## __CodeArtifact__ + - ### Features + - Add support for tagging of CodeArtifact domain and repository resources. + +## __Elastic Load Balancing__ + - ### Features + - Application Load Balancer (ALB) now supports the gRPC protocol-version. With this release, customers can use ALB to route and load balance gRPC traffic between gRPC enabled clients and microservices. + +# __2.15.17__ __2020-10-28__ +## __AWS IoT__ + - ### Features + - This release adds support for GG-Managed Job Namespace + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where marshalling of a modeled object was not honoring the has* method on a list/map. + - Fixed an issue where the toString/equals/hashCode on a modeled object were not honoring the has* methods for lists and maps. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - AWS Nitro Enclaves general availability. Added support to RunInstances for creating enclave-enabled EC2 instances. New APIs to associate an ACM certificate with an IAM role, for enclave consumption. + +## __Amazon WorkMail__ + - ### Features + - Documentation update for Amazon WorkMail + +# __2.15.16__ __2020-10-27__ +## __AWS Glue__ + - ### Features + - AWS Glue machine learning transforms now support encryption-at-rest for labels and trained models. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.15.15__ __2020-10-26__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now supports indexing data from Confluence Server. + +## __Amazon Neptune__ + - ### Features + - This feature enables custom endpoints for Amazon Neptune clusters. Custom endpoints simplify connection management when clusters contain instances with different capacities and configuration settings. + +## __Amazon SageMaker Service__ + - ### Features + - This release enables customers to bring custom images for use with SageMaker Studio notebooks. + +# __2.15.14__ __2020-10-23__ +## __AWS MediaTailor__ + - ### Features + - MediaTailor now supports ad marker passthrough for HLS. Use AdMarkerPassthrough to pass EXT-X-CUE-IN, EXT-X-CUE-OUT, and EXT-X-SPLICEPOINT-SCTE35 from origin manifests into personalized manifests. + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation. + +## __Amazon QuickSight__ + - ### Features + - Support description on columns. + +# __2.15.13__ __2020-10-22__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - Documentation updates for servicecatalog + +## __Access Analyzer__ + - ### Features + - API Documentation updates for IAM Access Analyzer. + +## __Amazon Appflow__ + - ### Features + - Salesforce connector creation with customer provided client id and client secret, incremental pull configuration, salesforce upsert write operations and execution ID when on-demand flows are executed. + +## __Amazon Simple Notification Service__ + - ### Features + - SNS now supports a new class of topics: FIFO (First-In-First-Out). FIFO topics provide strictly-ordered, deduplicated, filterable, encryptable, many-to-many messaging at scale. + +# __2.15.12__ __2020-10-21__ +## __AWS Global Accelerator__ + - ### Features + - This release adds support for specifying port overrides on AWS Global Accelerator endpoint groups. + +## __AWS Glue__ + - ### Features + - AWS Glue crawlers now support incremental crawls for the Amazon Simple Storage Service (Amazon S3) data source. + +## __AWS Organizations__ + - ### Features + - AWS Organizations renamed the 'master account' to 'management account'. + +## __AWSKendraFrontendService__ + - ### Features + - This release adds custom data sources: a new data source type that gives you full control of the documents added, modified or deleted during a data source sync while providing run history metrics. + +## __Amazon CloudFront__ + - ### Features + - CloudFront adds support for managing the public keys for signed URLs and signed cookies directly in CloudFront (it no longer requires the AWS root account). + +## __Amazon Elastic Compute Cloud__ + - ### Features + - instance-storage-info nvmeSupport added to DescribeInstanceTypes API + +# __2.15.11__ __2020-10-20__ +## __AWS AppSync__ + - ### Features + - Documentation updates to AppSync to correct several typos. + +## __AWS Batch__ + - ### Features + - Adding evaluateOnExit to job retry strategies. + +## __AWS Elastic Beanstalk__ + - ### Features + - EnvironmentStatus enum update to include Aborting, LinkingFrom and LinkingTo + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where requestBody and asyncRequestBody were not visible in ExecutionInterceptor.afterMarshalling. + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Fix for handling special characters in attribute names with WRITE_IF_NOT_EXISTS update behavior + +## __Amazon S3__ + - ### Features + - Moved the logic for calculating the Content-MD5 checksums from s3 to sdk-core. As always, make sure to use a version of 'sdk-core' greater than or equal to your version of 's3'. If you use an old version of 'sdk-core' and a new version of 's3', you will receive errors that Content-MD5 is required. + + - ### Bugfixes + - Fixed an issue where metrics were not being collected for Amazon S3 (or other XML services) + +# __2.15.10__ __2020-10-19__ +## __AWS Backup__ + - ### Features + - Documentation updates for Cryo + +## __AWS Service Catalog__ + - ### Features + - An Admin can now update the launch role associated with a Provisioned Product. Admins and End Users can now view the launch role associated with a Provisioned Product. + +## __Amazon CloudFront__ + - ### Features + - Amazon CloudFront adds support for Origin Shield. + +## __Amazon DocumentDB with MongoDB compatibility__ + - ### Features + - Documentation updates for docdb + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This Patch Manager release now supports Common Vulnerabilities and Exposure (CVE) Ids for missing packages via the DescribeInstancePatches API. + +## __HTTP Client SPI__ + - ### Features + - Calling the SdkHttpFullRequest uri() builder method, query parameters of the provided URI will be kept. + This can be useful in case you want to provide an already fully formed URI like a callback URI. + +# __2.15.9__ __2020-10-16__ +## __AWS Elemental MediaLive__ + - ### Features + - The AWS Elemental MediaLive APIs and SDKs now support the ability to transfer the ownership of MediaLive Link devices across AWS accounts. + +## __AWS Organizations__ + - ### Features + - Documentation updates for AWS Organizations. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.15.8__ __2020-10-15__ +## __AWS Budgets__ + - ### Features + - This release introduces AWS Budgets Actions, allowing you to define an explicit response(or set of responses) to take when your budget exceeds it's action threshold. + +## __AWS Cost Explorer Service__ + - ### Features + - This release improves email validation for subscriptions on the SDK endpoints. + +## __AWS Database Migration Service__ + - ### Features + - When creating Endpoints, Replication Instances, and Replication Tasks, the feature provides you the option to specify friendly name to the resources. + +## __AWS Glue__ + - ### Features + - API Documentation updates for Glue Get-Plan API + +## __AWS Ground Station__ + - ### Features + - Adds error message attribute to DescribeContact DataflowDetails + +## __AWS IoT__ + - ### Features + - Add new variable, lastStatusChangeDate, to DescribeDomainConfiguration API + +## __AWS Lambda Maven Archetype__ + - ### Bugfixes + - Fixed an issue where archetype generation failed with latest maven-archetype-plugin. See [#1981](https://github.com/aws/aws-sdk-java-v2/issues/1981) + +## __AWS SDK for Java v2__ + - ### Features + - Add support for plus (+) character in profile names + - Updated service endpoint metadata. + +## __AWS Transfer Family__ + - ### Features + - Add support to associate VPC Security Groups at server creation. + +## __AWS X-Ray__ + - ### Features + - Enhancing CreateGroup, UpdateGroup, GetGroup and GetGroups APIs to support configuring X-Ray Insights Notifications. Adding TraceLimit information into X-Ray BatchGetTraces API response. + +## __Access Analyzer__ + - ### Features + - This release adds support for the ApplyArchiveRule api in IAM Access Analyzer. The ApplyArchiveRule api allows users to apply an archive rule retroactively to existing findings in an analyzer. + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API adds support for pausing and resuming classification jobs. Also, sensitive data findings now include location data for up to 15 occurrences of sensitive data. + +## __Amazon Rekognition__ + - ### Features + - This SDK Release introduces new API (DetectProtectiveEquipment) for Amazon Rekognition. This release also adds ServiceQuotaExceeded exception to Amazon Rekognition IndexFaces API. + +## __Amazon Relational Database Service__ + - ### Features + - Return tags for all resources in the output of DescribeDBInstances, DescribeDBSnapshots, DescribeDBClusters, and DescribeDBClusterSnapshots API operations. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This Patch Manager release now supports searching for available packages from Amazon Linux and Amazon Linux 2 via the DescribeAvailablePatches API. + +## __Amazon WorkMail__ + - ### Features + - Add CreateOrganization and DeleteOrganization API operations. + +## __Amazon WorkSpaces__ + - ### Features + - Documentation updates for WorkSpaces + +# __2.15.7__ __2020-10-09__ +## __AWS Amplify__ + - ### Features + - Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval - enabling can make code changes can take up to 10 minutes to roll out. + +## __AWS Elemental MediaLive__ + - ### Features + - WAV audio output. Extracting ancillary captions in MP4 file inputs. Priority on channels feeding a multiplex (higher priority channels will tend to have higher video quality). + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - This new API takes either a ProvisonedProductId or a ProvisionedProductName, along with a list of 1 or more output keys and responds with the (key,value) pairs of those outputs. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - This release introduces a new Amazon EKS error code: "ClusterUnreachable" + +## __Amazon Import/Export Snowball__ + - ### Features + - We added new APIs to allow customers to better manage their device shipping. You can check if your shipping label expired, generate a new label, and tell us that you received or shipped your job. + +# __2.15.6__ __2020-10-08__ +## __AWS Cost Explorer Service__ + - ### Features + - You can now create hierarchical cost categories by choosing "Cost Category" as a dimension. You can also track the status of your cost category updates to your cost and usage information. + +## __Amazon CloudWatch Events__ + - ### Features + - Amazon EventBridge (formerly called CloudWatch Events) adds support for target Dead-letter Queues and custom retry policies. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - AWS EC2 RevokeSecurityGroupIngress and RevokeSecurityGroupEgress APIs will return IpPermissions which do not match with any existing IpPermissions for security groups in default VPC and EC2-Classic. + +## __Amazon EventBridge__ + - ### Features + - Amazon EventBridge adds support for target Dead Letter Queues (DLQs) and custom retry policies. + +## __Amazon Rekognition__ + - ### Features + - This release provides location information for the manifest validation files. + +## __Amazon Relational Database Service__ + - ### Features + - Supports a new parameter to set the max allocated storage in gigabytes for restore database instance from S3 and restore database instance to a point in time APIs. + +## __Amazon SageMaker Service__ + - ### Features + - This release enables Sagemaker customers to convert Tensorflow and PyTorch models to CoreML (ML Model) format. + +## __Amazon Simple Notification Service__ + - ### Features + - Documentation updates for SNS. + +# __2.15.5__ __2020-10-07__ +## __AWS Compute Optimizer__ + - ### Features + - This release enables AWS Compute Optimizer to analyze EC2 instance-level EBS read and write operations, and throughput when generating recommendations for your EC2 instances and Auto Scaling groups. + +## __AWS Cost Explorer Service__ + - ### Features + - Enables Rightsizing Recommendations to analyze and present EC2 instance-level EBS metrics when generating recommendations. Returns AccessDeniedException if the account is not opted into Rightsizing + +## __AWS Elemental MediaPackage__ + - ### Features + - AWS Elemental MediaPackage provides access logs that capture detailed information about requests sent to a customer's MediaPackage channel. + +## __Amazon ElastiCache__ + - ### Features + - This release introduces User and UserGroup to allow customers to have access control list of the Redis resources for AWS ElastiCache. This release also adds support for Outposts for AWS ElastiCache. + +# __2.15.4__ __2020-10-06__ +## __AWS Database Migration Service__ + - ### Features + - Added new S3 endpoint settings to allow partitioning CDC data by date for S3 as target. Exposed some Extra Connection Attributes as endpoint settings for relational databases as target. + +## __AWS Marketplace Catalog Service__ + - ### Features + - AWS Marketplace Catalog now supports FailureCode for change workflows to help differentiate client errors and server faults. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release supports returning additional information about local gateway virtual interfaces, and virtual interface groups. + +## __Amazon Kinesis Analytics__ + - ### Features + - Amazon Kinesis Analytics now supports StopApplication with 'force' option + +# __2.15.3__ __2020-10-05__ +## __AWS DynamoDB Enhanced Client__ + - ### Features + - Added support for attribute level custom update behaviors such as 'write if not exists'. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for AVC-I and VC3 encoding in the MXF OP1a container, Nielsen non-linear watermarking, and InSync FrameFormer frame rate conversion. + +## __AWS Glue__ + - ### Features + - AWS Glue crawlers now support Amazon DocumentDB (with MongoDB compatibility) and MongoDB collections. You can choose to crawl the entire data set or only a small sample to reduce crawl time. + +## __Amazon DynamoDB__ + - ### Features + - This release adds a new ReplicaStatus REGION DISABLED for the Table description. This state indicates that the AWS Region for the replica is inaccessible because the AWS Region is disabled. + +## __Amazon DynamoDB Streams__ + - ### Features + - Documentation updates for streams.dynamodb + +## __Amazon SageMaker Service__ + - ### Features + - This release adds support for launching Amazon SageMaker Studio in your VPC. Use AppNetworkAccessType in CreateDomain API to disable access to public internet and restrict the network traffic to VPC. + +# __2.15.2__ __2020-10-02__ +## __AWS Batch__ + - ### Features + - Support tagging for Batch resources (compute environment, job queue, job definition and job) and tag based access control on Batch APIs + +## __AWS Cloud Map__ + - ### Features + - Added support for optional parameters for DiscoverInstances API in AWS Cloud Map + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Personalize Events__ + - ### Features + - Adds new APIs to write item and user records to Datasets. + +## __Amazon Relational Database Service__ + - ### Features + - Adds the NCHAR Character Set ID parameter to the CreateDbInstance API for RDS Oracle. + +## __Amazon Simple Storage Service__ + - ### Features + - Amazon S3 Object Ownership is a new S3 feature that enables bucket owners to automatically assume ownership of objects that are uploaded to their buckets by other AWS Accounts. + +## __Apache HTTP Client__ + - ### Bugfixes + - Fixed an issue in Apache HTTP client where a request with path parameter as a single slash threw invalid host name error. + +## __Elastic Load Balancing__ + - ### Features + - This release adds support for tagging listeners, rules, and target groups on creation. This release also supported tagging operations through tagging api's for listeners and rules. + +# __2.15.1__ __2020-10-01__ +## __AWS AppSync__ + - ### Features + - Exposes the wafWebAclArn field on GraphQL api records. The wafWebAclArn field contains the amazon resource name of a WAF Web ACL if the AWS AppSync API is associated with one. + +## __AWS Glue__ + - ### Features + - Adding additional optional map parameter to get-plan api + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS WAFV2__ + - ### Features + - AWS WAF is now available for AWS AppSync GraphQL APIs. AWS WAF protects against malicious attacks with AWS Managed Rules or your own custom rules. For more information see the AWS WAF Developer Guide. + +## __Amazon Elastic MapReduce__ + - ### Features + - Documentation updates for elasticmapreduce + +## __Amazon QuickSight__ + - ### Features + - QuickSight now supports connecting to AWS Timestream data source + +## __Managed Streaming for Kafka__ + - ### Features + - Added support for Enabling Zookeeper Encryption in Transit for AWS MSK. + +# __2.15.0__ __2020-09-30__ +## __AWS DataSync__ + - ### Features + - This release enables customers to create s3 location for S3 bucket's located on an AWS Outpost. + +## __AWS Direct Connect__ + - ### Features + - Documentation updates for AWS Direct Connect. + +## __AWS IoT__ + - ### Features + - AWS IoT Rules Engine adds Timestream action. The Timestream rule action lets you stream time-series data from IoT sensors and applications to Amazon Timestream databases for time series analysis. + +## __AWS MediaConnect__ + - ### Features + - MediaConnect now supports reservations to provide a discounted rate for a specific outbound bandwidth over a period of time. + +## __AWS S3 Control__ + - ### Features + - Amazon S3 on Outposts expands object storage to on-premises AWS Outposts environments, enabling you to store and retrieve objects using S3 APIs and features. + +## __AWS SDK for Java v2__ + - ### Features + - This release introduces Waiters for the AWS SDK for Java v2! Waiters make it easier for customers to wait for a resource to transition into a desired state. It comes handy when customers are interacting with operations that are eventually consistent on the service side. For more information on Waiters, head on over to the [AWS Developer Blog](https://aws.amazon.com/blogs/developer/category/developer-tools/aws-sdk-for-java/) and check out the [Developer Guide](http://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/welcome.html). + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Added several new resource details objects. Added additional details for CloudFront distributions, IAM roles, and IAM access keys. Added a new ResourceRole attribute for resources. + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR customers can now use EC2 placement group to influence the placement of master nodes in a high-availability (HA) cluster across distinct underlying hardware to improve cluster availability. + +## __Amazon Pinpoint__ + - ### Features + - Amazon Pinpoint - Features - Customers can start a journey based on an event being triggered by an endpoint or user. + +## __Amazon S3 on Outposts__ + - ### Features + - Amazon S3 on Outposts expands object storage to on-premises AWS Outposts environments, enabling you to store and retrieve objects using S3 APIs and features. + +## __Amazon Simple Storage Service__ + - ### Features + - Amazon S3 on Outposts expands object storage to on-premises AWS Outposts environments, enabling you to store and retrieve objects using S3 APIs and features. + +## __Application Auto Scaling__ + - ### Features + - This release extends Auto Scaling support for cluster storage of Managed Streaming for Kafka. Auto Scaling monitors and automatically expands storage capacity when a critical usage threshold is met. + +## __EC2 Image Builder__ + - ### Features + - EC2 Image Builder adds support for copying AMIs created by Image Builder to accounts specific to each Region. + +# __2.14.28__ __2020-09-29__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Connect Service__ + - ### Features + - Update TagResource API documentation to include Contact Flows and Routing Profiles as supported resources. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for Client to Client routing for AWS Client VPN. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Simple update to description of ComplianceItemStatus. + +## __Amazon Timestream Query__ + - ### Features + - (New Service) Amazon Timestream is a fast, scalable, fully managed, purpose-built time series database that makes it easy to store and analyze trillions of time series data points per day. + +## __Amazon Timestream Write__ + - ### Features + - (New Service) Amazon Timestream is a fast, scalable, fully managed, purpose-built time series database that makes it easy to store and analyze trillions of time series data points per day. + +## __Schemas__ + - ### Features + - Added support for schemas of type JSONSchemaDraft4. Added ExportSchema API that converts schemas in AWS Events registry and Discovered schemas from OpenApi3 to JSONSchemaDraft4. + +# __2.14.27__ __2020-09-28__ +## __Amazon Relational Database Service__ + - ### Features + - This release adds the InsufficientAvailableIPsInSubnetFault error for RDS Proxy. + +## __Application Auto Scaling__ + - ### Features + - This release extends Application Auto Scaling support to AWS Comprehend Entity Recognizer endpoint, allowing automatic updates to provisioned Inference Units to maintain targeted utilization level. + +# __2.14.26__ __2020-09-25__ +## __AWS Batch__ + - ### Features + - Support custom logging, executionRole, secrets, and linuxParameters (initProcessEnabled, maxSwap, swappiness, sharedMemorySize, and tmpfs). Also, add new context keys for awslogs. + +## __AWS Config__ + - ### Features + - Make the delivery-s3-bucket as an optional parameter for conformance packs and organizational conformance packs + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Security Token Service__ + - ### Features + - Documentation update for AssumeRole error + +## __Amazon DocumentDB with MongoDB compatibility__ + - ### Features + - Documentation updates for docdb + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release supports returning additional information about local gateway resources, such as the local gateway route table. + +## __Amazon Fraud Detector__ + - ### Features + - Increased maximum length of eventVariables values for GetEventPrediction from 256 to 1024. + +# __2.14.25__ __2020-09-24__ +## __AWS Amplify__ + - ### Features + - Allow Oauth Token in CreateApp call to be a maximum of 1000 characters instead of 100 + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where successful JSON service responses were required to include a payload (fixes NullPointerException originating from JsonProtocolUnmarshaller). + - Fixed an issue with XML services, where the service responding with no payload would treat the payload as empty. Now, empty payloads will properly be populated within the XML response as "null". + +## __AWS Savings Plans__ + - ### Features + - Introducing Queued SavingsPlans that will enable customers to queue their purchase request of Savings Plans for future dates. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Amazon EKS now supports configuring your cluster's service CIDR during cluster creation. + +## __Amazon Textract__ + - ### Features + - AWS Textract now supports output results for asynchronous jobs to customer specified s3 bucket. + +## __Amazon Transcribe Service__ + - ### Features + - Amazon Transcribe now supports WebM, OGG, AMR and AMR-WB as input formats. You can also specify an output key as a location within your S3 buckets to store the output of your transcription jobs. + +## __Synthetics__ + - ### Features + - AWS Synthetics now supports AWS X-Ray Active Tracing feature. RunConfig is now an optional parameter with timeout updated from (60 - 900 seconds) to (3 - 840 seconds). + +# __2.14.24__ __2020-09-23__ +## __AWS Backup__ + - ### Features + - This release allows customers to enable or disable advanced backup settings in backup plan. As part of this feature AWS Backup added support for Windows VSS backup option for EC2 resources. + +## __AWS Cost Explorer Service__ + - ### Features + - This release provides access to Cost Anomaly Detection Public Preview APIs. Cost Anomaly Detection finds cost anomalies based on your historical cost and usage using Machine Learning models. + +## __AWS DynamoDB Enhanced Client__ + - ### Bugfixes + - Fixed incorrect 'duplicate key' error triggered when flattening a TableSchema that has key tags and more than one attribute. + +## __Amazon QuickSight__ + - ### Features + - Added Sheet information to DescribeDashboard, DescribeTemplate and DescribeAnalysis API response. + +## __Amazon Translate__ + - ### Features + - Improvements to DeleteTerminology API. + +# __2.14.23__ __2020-09-22__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Formal parameter names of sychronous streaming methods were aligned with their javadocs. + - This change makes the `FileStoreTlsKeyManagersProvider` and `SystemPropertyTlsKeyManagersProvider` respect the `ssl.KeyManagerFactory.algorithm` when instantiating the `KeyManagerFactory` rather than always using the hardcoded value of `SunX509`. + +## __Amazon Comprehend__ + - ### Features + - Amazon Comprehend integrates with Amazon SageMaker GroundTruth to allow its customers to annotate their datasets using GroundTruth and train their models using Comprehend Custom APIs. + +## __Amazon DynamoDB Streams__ + - ### Features + - Documentation updates for streams.dynamodb + +## __Amazon Lex Model Building Service__ + - ### Features + - Lex now supports es-US locales + +## __Amazon WorkMail__ + - ### Features + - Adding support for Mailbox Export APIs + +# __2.14.22__ __2020-09-21__ +## __AWS Glue__ + - ### Features + - Adding support to update multiple partitions of a table in a single request + +## __AWS IoT SiteWise__ + - ### Features + - This release supports IAM mode for SiteWise Monitor portals + +## __AWS Resource Groups__ + - ### Features + - Documentation updates and corrections for Resource Groups API Reference and SDKs. + +## __AWS Resource Groups Tagging API__ + - ### Features + - Documentation updates for the Resource Groups Tagging API. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CloudWatch Events__ + - ### Features + - Add support for Redshift Data API Targets + +## __Amazon EventBridge__ + - ### Features + - Add support for Redshift Data API Targets + +## __Amazon Relational Database Service__ + - ### Features + - Documentation updates for the RDS DescribeExportTasks API + +# __2.14.21__ __2020-09-18__ +## __AWS CodeStar connections__ + - ### Features + - New integration with the GitHub provider type. + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports batch operations, which allow users to start, stop, and delete multiple MediaLive resources with a single request. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Single Sign-On Admin__ + - ### Features + - Documentation updates for AWS SSO APIs. + +# __2.14.20__ __2020-09-17__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now supports additional file formats and metadata for FAQs. + +## __Amazon API Gateway__ + - ### Features + - Adds support for mutual TLS authentication for public regional REST Apis + +## __Amazon CloudFront__ + - ### Features + - Documentation updates for CloudFront + +## __Amazon Comprehend__ + - ### Features + - Amazon Comprehend now supports detecting Personally Identifiable Information (PII) entities in a document. + +## __Amazon Elasticsearch Service__ + - ### Features + - Adds support for data plane audit logging in Amazon Elasticsearch Service. + +## __Amazon Transcribe Streaming Service__ + - ### Features + - Amazon Transcribe now supports channel identification in real-time streaming, which enables you to transcribe multi-channel streaming audio. + +## __AmazonApiGatewayV2__ + - ### Features + - Adds support for mutual TLS authentication and disableAPIExecuteEndpoint for public regional HTTP Apis + +# __2.14.19__ __2020-09-16__ +## __AWS Greengrass__ + - ### Features + - This release includes the ability to set run-time configuration for a Greengrass core. The Telemetry feature, also included in this release, can be configured via run-time configuration per core. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - Enhance DescribeProvisionedProduct API to allow useProvisionedProduct Name as Input, so customer can provide ProvisionedProduct Name instead of ProvisionedProduct Id to describe a ProvisionedProduct. + +## __Amazon Connect Service__ + - ### Features + - This release adds support for contact flows and routing profiles. For details, see the Release Notes in the Amazon Connect Administrator Guide. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Customers can now provide multiple schedules within a single Data Lifecycle Manager (DLM) policy. Each schedule supports tagging, Fast Snapshot Restore (FSR) and cross region copy individually. + +## __Amazon STS__ + - ### Features + - Make the STSCredentialsProvider stale and prefetch times configurable so clients can control when session credentials are refreshed + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - The ComplianceItemEntry Status description was updated to address Windows patches that aren't applicable. + +# __2.14.18__ __2020-09-15__ +## __AWS Budgets__ + - ### Features + - Documentation updates for Daily Cost and Usage budgets + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports CDI (Cloud Digital Interface) inputs which enable uncompressed video from applications on Elastic Cloud Compute (EC2), AWS Media Services, and from AWS partners + +## __AWS Organizations__ + - ### Features + - AWS Organizations now enables you to add tags to the AWS accounts, organizational units, organization root, and policies in your organization. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now returns confidence scores for 'document' query responses. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - T4g instances are powered by AWS Graviton2 processors + +## __Amazon SageMaker Service__ + - ### Features + - Sagemaker Ground Truth: Added support for a new Streaming feature which helps to continuously feed data and receive labels in real time. This release adds a new input and output SNS data channel. + +## __Amazon Transcribe Service__ + - ### Features + - Amazon Transcribe now supports automatic language identification, which enables you to transcribe audio files without needing to know the language in advance. + +## __Managed Streaming for Kafka__ + - ### Features + - Added new API's to support SASL SCRAM Authentication with MSK Clusters. + +# __2.14.17__ __2020-09-14__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fix a bug in the SDK where the contents of the `AsyncRequestBody` is not included in the AWS Signature Version 4 calculation, which is required for some streaming operations such as Glacier `UploadArchive' and ClouSearch Domain `UploadDocuments`. + +## __AWS Step Functions__ + - ### Features + - This release of the AWS Step Functions SDK introduces support for AWS X-Ray. + +## __Amazon DocumentDB with MongoDB compatibility__ + - ### Features + - Updated API documentation and added paginators for DescribeCertificates, DescribeDBClusterParameterGroups, DescribeDBClusterParameters, DescribeDBClusterSnapshots and DescribePendingMaintenanceActions + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for the T4G instance family to the EC2 ModifyDefaultCreditSpecification and GetDefaultCreditSpecification APIs. + +## __Amazon Managed Blockchain__ + - ### Features + - Introducing support for Hyperledger Fabric 1.4. When using framework version 1.4, the state database may optionally be specified when creating peer nodes (defaults to CouchDB). + +# __2.14.16__ __2020-09-11__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon WorkSpaces__ + - ### Features + - Adds API support for WorkSpaces Cross-Region Redirection feature. + +# __2.14.15__ __2020-09-10__ +## __AWS Single Sign-On Admin__ + - ### Features + - This is an initial release of AWS Single Sign-On (SSO) Access Management APIs. This release adds support for SSO operations which could be used for managing access to AWS accounts. + +## __Amazon CloudFront__ + - ### Features + - Cloudfront adds support for Brotli. You can enable brotli caching and compression support by enabling it in your Cache Policy. + +## __Amazon Elastic Block Store__ + - ### Features + - Documentation updates for Amazon EBS direct APIs. + +## __Amazon Pinpoint__ + - ### Features + - Update SMS message model description to clearly indicate that the MediaUrl field is reserved for future use and is not supported by Pinpoint as of today. + +## __Amazon Simple Storage Service__ + - ### Features + - Bucket owner verification feature added. This feature introduces the x-amz-expected-bucket-owner and x-amz-source-expected-bucket-owner headers. + +# __2.14.14__ __2020-09-09__ +## __AWS Glue__ + - ### Features + - Adding support for partitionIndexes to improve GetPartitions performance. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Kinesis Analytics__ + - ### Features + - Kinesis Data Analytics is adding new AUTOSCALING application status for applications during auto scaling and also adding FlinkRunConfigurationDescription in the ApplicationDetails. + +## __Redshift Data API Service__ + - ### Features + - The Amazon Redshift Data API is generally available. This release enables querying Amazon Redshift data and listing various database objects. + +# __2.14.13__ __2020-09-08__ +## __AWS CodeBuild__ + - ### Features + - AWS CodeBuild - Support keyword search for test cases in DecribeTestCases API . Allow deletion of reports in the report group, before deletion of report group using the deleteReports flag. + +## __AWS Common Runtime HTTP Client__ + - ### Features + - This release includes the preview release of the AWS Common Runtime HTTP client for the AWS SDK for Java v2. The code can be found in the `aws-crt-client` module. + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Fix for stack overflow caused by using self-referencing DynamoDB annotated classes. + +## __Amazon Lex Model Building Service__ + - ### Features + - Amazon Lex supports en-AU locale + +## __Amazon QuickSight__ + - ### Features + - Adds tagging support for QuickSight customization resources. A user can now specify a list of tags when creating a customization resource and use a customization ARN in QuickSight's tagging APIs. + +## __AmazonApiGatewayV2__ + - ### Features + - You can now secure HTTP APIs using Lambda authorizers and IAM authorizers. These options enable you to make flexible auth decisions using a Lambda function, or using IAM policies, respectively. + +## __Elastic Load Balancing__ + - ### Features + - Adds support for Application Load Balancers on Outposts. + +# __2.14.12__ __2020-09-04__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS X-Ray__ + - ### Features + - Enhancing CreateGroup, UpdateGroup, GetGroup and GetGroups APIs to support configuring X-Ray Insights + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Documentation-only updates for AWS Systems Manager + +## __Amazon WorkSpaces__ + - ### Features + - Adding support for Microsoft Office 2016 and Microsoft Office 2019 in BYOL Images + +# __2.14.11__ __2020-09-03__ +## __AWS Elemental MediaPackage__ + - ### Features + - Enables inserting a UTCTiming XML tag in the output manifest of a DASH endpoint which a media player will use to help with time synchronization. + +## __AWS Step Functions__ + - ### Features + - This release of the AWS Step Functions SDK introduces support for payloads up to 256KB for Standard and Express workflows + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now returns confidence scores for both 'answer' and 'question and answer' query responses. + +## __Amazon GuardDuty__ + - ### Features + - GuardDuty findings triggered by failed events now include the error code name within the AwsApiCallAction section. + +# __2.14.10__ __2020-09-02__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds a new transit gateway attachment state and resource type. + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API introduces additional statistics for the size and count of Amazon S3 objects that Macie can analyze as part of a classification job. + +## __DynamoDB Enhanced Client__ + - ### Features + - Support for mapping to and from immutable Java objects using ImmutableTableSchema and StaticImmutableTableSchema. + +# __2.14.9__ __2020-09-01__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Code Generator test failures on Windows systems were fixed. + +## __AWS SecurityHub__ + - ### Features + - Added a PatchSummary object for security findings. The PatchSummary object provides details about the patch compliance status of an instance. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - Add support for repository analysis based code reviews + +# __2.14.8__ __2020-08-31__ +## __AWS Backup__ + - ### Features + - Documentation updates for Cryo + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CloudFront__ + - ### Features + - CloudFront now supports real-time logging for CloudFront distributions. CloudFront real-time logs are more detailed, configurable, and are available in real time. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 and Spot Fleet now support modification of launch template configs for a running fleet enabling instance type, instance weight, AZ, and AMI updates without losing the current fleet ID. + +## __Amazon Simple Queue Service__ + - ### Features + - Documentation updates for SQS. + +# __2.14.7__ __2020-08-28__ +## __AWS Cost and Usage Report Service__ + - ### Features + - This release add MONTHLY as the new supported TimeUnit for ReportDefinition. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CloudFront__ + - ### Features + - You can now manage CloudFront's additional, real-time metrics with the CloudFront API. + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR adds support for ICMP, port -1, in Block Public Access Exceptions and API access for EMR Notebooks execution. You can now non-interactively execute EMR Notebooks and pass input parameters. + +## __Amazon Route 53__ + - ### Features + - Documentation updates for Route 53 + +# __2.14.6__ __2020-08-27__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for WebM DASH outputs as well as H.264 4:2:2 10-bit output in MOV and MP4. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Introduces support to initiate Internet Key Exchange (IKE) negotiations for VPN connections from AWS. A user can now send the initial IKE message to their Customer Gateway (CGW) from VPN endpoints. + +## __Amazon GameLift__ + - ### Features + - GameLift FleetIQ as a standalone feature is now generally available. FleetIQ makes low-cost Spot instances viable for game hosting. Use GameLift FleetIQ with your EC2 Auto Scaling groups. + +## __Amazon Redshift__ + - ### Features + - Documentation updates for Amazon Redshift. + +# __2.14.5__ __2020-08-26__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Appflow__ + - ### Features + - Amazon AppFlow is a fully managed integration service that securely transfers data between AWS services and SaaS applications. This update releases the first version of Amazon AppFlow APIs and SDK. + +## __Amazon Route 53 Resolver__ + - ### Features + - Route 53 Resolver adds support for resolver query logs + +# __2.14.4__ __2020-08-24__ +## __AWS Database Migration Service__ + - ### Features + - Added new endpoint settings to include columns with Null and Empty value when using Kinesis and Kafka as target. Added a new endpoint setting to set maximum message size when using Kafka as target. + +## __AWS IoT SiteWise__ + - ### Features + - Add traversalDirection to ListAssociatedAssetsRequest and add portal status to ListPortalsResponse + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - For JSON protocols, when unmarshalling a response, if a member is declared to be located in the URI, the member is treated as being located in the payload instead. + +## __AWS X-Ray__ + - ### Features + - AWS X-Ray now supports tagging on sampling rules and groups. + +## __Amazon CloudWatch Logs__ + - ### Features + - Documentation updates for CloudWatch Logs + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release enables customers to use VPC prefix lists in their transit gateway route tables, and it adds support for Provisioned IOPS SSD (io2) EBS volumes. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Add string length constraints to OpsDataAttributeName and OpsFilterValue. + +## __Managed Streaming for Kafka__ + - ### Features + - Add UpdateConfiguration and DeleteConfiguration operations. + +# __2.14.3__ __2020-08-20__ +## __Amazon Chime__ + - ### Features + - Documentation updates for chime + +## __Amazon DynamoDB Streaming Client__ + - ### Bugfixes + - Fix for CRC not working correctly for compressed responses + +## __Amazon FSx__ + - ### Features + - Documentation updates for Amazon FSx + +## __AmazonApiGatewayV2__ + - ### Features + - Customers can now create Amazon API Gateway HTTP APIs that route requests to AWS AppConfig, Amazon EventBridge, Amazon Kinesis Data Streams, Amazon SQS, and AWS Step Functions. + +# __2.14.2__ __2020-08-19__ +## __AWS Lake Formation__ + - ### Features + - Adding additional field in ListPermissions API response to return RAM resource share ARN if a resource were shared through AWS RAM service. + +## __AWS Organizations__ + - ### Features + - Minor documentation updates for AWS Organizations + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - Enhance SearchProvisionedProducts API to allow queries using productName and provisioningArtifactName. Added lastProvisioningRecordId and lastSuccessfulRecordId to Read ProvisionedProduct APIs + +## __AWS Storage Gateway__ + - ### Features + - Added WORM, tape retention lock, and custom pool features for virtual tapes. + +## __Amazon Interactive Video Service__ + - ### Features + - Amazon Interactive Video Service (IVS) now offers customers the ability to create private channels, allowing customers to restrict their streams by channel or viewer. + +## __Amazon Transcribe Streaming Service__ + - ### Features + - Amazon Transcribe and Amazon Transcribe Medical now enable you to identify different speakers in your real-time streams with speaker identification. + +# __2.14.1__ __2020-08-18__ +## __AWS CodeBuild__ + - ### Features + - Documentation updates for codebuild + +## __AWS DataSync__ + - ### Features + - DataSync support for filters as input arguments to the ListTasks and ListLocations API calls. + +## __AWS SSO Identity Store__ + - ### Features + - AWS Single Sign-On (SSO) Identity Store service provides an interface to retrieve all of your users and groups. It enables entitlement management per user or group for AWS SSO and other IDPs. + +## __AWS SecurityHub__ + - ### Features + - New details for DynamoDB tables, Elastic IP addresses, IAM policies and users, RDS DB clusters and snapshots, and Secrets Manager secrets. Added details for AWS KMS keys and RDS DB instances. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Adding the option to use a service linked role to publish events to Pinpoint. + +## __Amazon Simple Email Service__ + - ### Features + - This release includes new APIs to allow customers to add or remove email addresses from their account-level suppression list in bulk. + +# __2.14.0__ __2020-08-17__ +## __AWS Certificate Manager__ + - ### Features + - ACM provides support for the new Private CA feature Cross-account CA sharing. ACM users can issue certificates signed by a private CA belonging to another account where the CA was shared with them. + +## __AWS Certificate Manager Private Certificate Authority__ + - ### Features + - ACM Private CA is launching cross-account support. This allows customers to share their private CAs with other accounts, AWS Organizations, and organizational units to issue end-entity certificates. + +## __AWS RoboMaker__ + - ### Features + - This release introduces RoboMaker Simulation WorldForge, a capability that automatically generates one or more simulation worlds. + +## __AWS SDK for Java v2__ + - ### Features + - The client-side metrics feature is out of developer preview and is now generaly available. + +## __Amazon EC2 Container Registry__ + - ### Features + - This feature adds support for pushing and pulling Open Container Initiative (OCI) artifacts. + +## __Amazon Kinesis__ + - ### Features + - Introducing ShardFilter for ListShards API to filter the shards using a position in the stream, and ChildShards support for GetRecords and SubscribeToShard API to discover children shards on shard end + +## __Amazon QuickSight__ + - ### Features + - Amazon QuickSight now supports programmatic creation and management of analyses with new APIs. + +## __Elastic Load Balancing__ + - ### Features + - Adds support for HTTP Desync Mitigation in Application Load Balancers. + - Adds support for HTTP Desync Mitigation in Classic Load Balancers. + +# __2.13.76__ __2020-08-14__ +## __AWS License Manager__ + - ### Features + - This release includes ability to enforce license assignment rules with EC2 Dedicated Hosts. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon AppStream__ + - ### Features + - Adds support for the Desktop View feature + +## __Amazon Elastic Compute Cloud__ + - ### Features + - New C5ad instances featuring AMD's 2nd Generation EPYC processors, offering up to 96 vCPUs, 192 GiB of instance memory, 3.8 TB of NVMe based SSD instance storage, and 20 Gbps in Network bandwidth + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker now supports 1) creating real-time inference endpoints using model container images from Docker registries in customers' VPC 2) AUC(Area under the curve) as AutoPilot objective metric + +## __Braket__ + - ### Features + - Fixing bug in our SDK model where device status and device type had been flipped. + +# __2.13.75__ __2020-08-13__ +## __AWS AppSync__ + - ### Features + - Documentation update for AWS AppSync support for Direct Lambda Resolvers. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Adding ability to customize expiry for Refresh, Access and ID tokens. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added MapCustomerOwnedIpOnLaunch and CustomerOwnedIpv4Pool to ModifySubnetAttribute to allow CoIP auto assign. Fields are returned in DescribeSubnets and DescribeNetworkInterfaces responses. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Adding support for customer provided EC2 launch templates and AMIs to EKS Managed Nodegroups. Also adds support for Arm-based instances to EKS Managed Nodegroups. + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation. + +## __Amazon Relational Database Service__ + - ### Features + - This release allows customers to specify a replica mode when creating or modifying a Read Replica, for DB engines which support this feature. + +## __Braket__ + - ### Features + - Amazon Braket general availability with Device and Quantum Task operations. + +## __CloudWatch Metrics Publisher__ + - ### Bugfixes + - Fixed a bug where `CloudWatchPublisher#close` would not always complete flushing pending metrics before returning. + +# __2.13.74__ __2020-08-12__ +## __AWS Cloud9__ + - ### Features + - Add ConnectionType input parameter to CreateEnvironmentEC2 endpoint. New parameter enables creation of environments with SSM connection. + +## __AWS IoT__ + - ### Features + - Audit finding suppressions: Device Defender enables customers to turn off non-compliant findings for specific resources on a per check basis. + +## __AWS Lambda__ + - ### Features + - Support for creating Lambda Functions using 'java8.al2' and 'provided.al2' + +## __AWS SDK for Java v2__ + - ### Features + - Added an "unsafe" way to retrieve a byte array from `SdkBytes` and `ResponseBytes` without copying the data. + +## __AWS Transfer Family__ + - ### Features + - Adds security policies to control cryptographic algorithms advertised by your server, additional characters in usernames and length increase, and FIPS compliant endpoints in the US and Canada regions. + +## __Amazon Comprehend__ + - ### Features + - Amazon Comprehend Custom Entity Recognition now supports Spanish, German, French, Italian and Portuguese and up to 25 entity types per model. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Introduces support for IPv6-in-IPv4 IPsec tunnels. A user can now send traffic from their on-premise IPv6 network to AWS VPCs that have IPv6 support enabled. + +## __Amazon FSx__ + - ### Features + - This release adds the capability to create persistent file systems for throughput-intensive workloads using Hard Disk Drive (HDD) storage and an optional read-only Solid-State Drive (SSD) cache. + +## __Amazon WorkSpaces__ + - ### Features + - Adds optional EnableWorkDocs property to WorkspaceCreationProperties in the ModifyWorkspaceCreationProperties API + +# __2.13.73__ __2020-08-11__ +## __AWS Lambda__ + - ### Features + - Support Managed Streaming for Kafka as an Event Source. Support retry until record expiration for Kinesis and Dynamodb streams event source mappings. + +## __AWS Organizations__ + - ### Features + - Minor documentation update for AWS Organizations + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release rolls back the EC2 On-Demand Capacity Reservations (ODCRs) release 1.11.831 published on 2020-07-30, which was deployed in error. + +## __Amazon Simple Storage Service__ + - ### Features + - Add support for in-region CopyObject and UploadPartCopy through S3 Access Points + +# __2.13.72__ __2020-08-10__ +## __AWS Glue__ + - ### Features + - Starting today, you can further control orchestration of your ETL workloads in AWS Glue by specifying the maximum number of concurrent runs for a Glue workflow. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Savings Plans__ + - ### Features + - Updates to the list of services supported by this API. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Remove CoIP Auto-Assign feature references. + +# __2.13.71__ __2020-08-07__ +## __AWS Glue__ + - ### Features + - AWS Glue now adds support for Network connection type enabling you to access resources inside your VPC using Glue crawlers and Glue ETL jobs. + +## __AWS Organizations__ + - ### Features + - Documentation updates for some new error reasons. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Server Migration Service__ + - ### Features + - In this release, AWS Server Migration Service (SMS) has added new features: 1. APIs to work with application and instance level validation 2. Import application catalog from AWS Application Discovery Service 3. For an application you can start on-demand replication + +## __Amazon Simple Storage Service__ + - ### Features + - Updates Amazon S3 API reference documentation. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Fixed the issue where `connectionTimeToLive` was not allowed to set to zero. See [#1976](https://github.com/aws/aws-sdk-java-v2/issues/1976) + +# __2.13.70__ __2020-08-06__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release supports Wavelength resources, including carrier gateways, and carrier IP addresses. + +## __Amazon Lex Model Building Service__ + - ### Features + - Amazon Lex supports the option to enable accuracy improvements and specify an intent classification confidence score threshold. + +## __Amazon Lex Runtime Service__ + - ### Features + - Amazon Lex supports intent classification confidence scores along with a list of the top five intents. + +## __Amazon Personalize__ + - ### Features + - Add 'exploration' functionality + +## __Amazon Personalize Events__ + - ### Features + - Adds support implicit and explicit impression input + +## __Amazon Personalize Runtime__ + - ### Features + - Adds support for implicit impressions + +# __2.13.69__ __2020-08-05__ +## __AWS AppSync__ + - ### Features + - AWS AppSync releases support for Direct Lambda Resolvers. + +## __AWS Resource Groups Tagging API__ + - ### Features + - Documentation updates for the Resource Group Tagging API namespace. + +## __Amazon FSx__ + - ### Features + - Documentation updates for StorageCapacity input value format. + +## __Amazon Simple Notification Service__ + - ### Features + - Documentation updates for SNS. + +## __Amazon Transcribe Service__ + - ### Features + - Amazon Transcribe now supports custom language models, which can improve transcription accuracy for your specific use case. + +# __2.13.68__ __2020-08-04__ +## __AWS Health APIs and Notifications__ + - ### Features + - Documentation updates for health + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.13.67__ __2020-08-03__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon S3__ + - ### Bugfixes + - Fixed an issue that could cause "Data read has a different checksum than expected" errors. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Adds a waiter for CommandExecuted and paginators for various other APIs. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Fixed an issue where, under rare circumstances, streaming request bytes could be misordered. + +# __2.13.66__ __2020-07-31__ +## __AWS Resource Groups Tagging API__ + - ### Features + - Updates to the list of services supported by this API. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Add support for gateway VM deprecation dates + +## __AWS WAFV2__ + - ### Features + - Add ManagedByFirewallManager flag to the logging configuration, which indicates whether AWS Firewall Manager controls the configuration. + +## __Amazon Chime__ + - ### Features + - This release increases the CreateMeetingWithAttendee max attendee limit to 10. + +## __Amazon Personalize Runtime__ + - ### Features + - Adds support to use filters with Personalized Ranking recipe + +# __2.13.65__ __2020-07-30__ +## __AWS CodeBuild__ + - ### Features + - Adding support for BuildBatch, and CodeCoverage APIs. BuildBatch allows you to model your project environment in source, and helps start multiple builds with a single API call. CodeCoverage allows you to track your code coverage using AWS CodeBuild. + +## __AWS Organizations__ + - ### Features + - Documentation updates for AWS Organizations + +## __AWS Resource Groups__ + - ### Features + - Improved documentation for Resource Groups API operations. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - This release adds support for ProvisionProduct, UpdateProvisionedProduct & DescribeProvisioningParameters by product name, provisioning artifact name and path name. In addition DescribeProvisioningParameters now returns a list of provisioning artifact outputs. + +## __Amazon CloudFront__ + - ### Features + - Documentation updates for CloudFront + +## __Amazon Elastic Compute Cloud__ + - ### Features + - EC2 On-Demand Capacity Reservations now adds support to bring your own licenses (BYOL) of Windows operating system to launch EC2 instances. + +## __Amazon GuardDuty__ + - ### Features + - GuardDuty can now provide detailed cost metrics broken down by account, data source, and S3 resources, based on the past 30 days of usage. This new feature also supports viewing cost metrics for all member accounts as a GuardDuty master. + +## __Amazon Simple Email Service__ + - ### Features + - This release makes more API operations available to customers in version 2 of the Amazon SES API. With these additions, customers can now access sending authorization, custom verification email, and template API operations. With this release, Amazon SES is also providing new and updated APIs to allow customers to request production access. + +## __Managed Streaming for Kafka__ + - ### Features + - Amazon MSK has added a new API that allows you to reboot brokers within a cluster. + +# __2.13.64__ __2020-07-29__ +## __AWS Cloud Map__ + - ### Features + - Added new attribute AWS_EC2_INSTANCE_ID for RegisterInstance API + +## __AWS Resource Groups__ + - ### Features + - Resource Groups released a new feature that enables you to create a group with an associated configuration that specifies how other AWS services interact with the group. There are two new operations `GroupResources` and `UngroupResources` to work on a group with a configuration. In this release, you can associate EC2 Capacity Reservations with a resource group. Resource Groups also added a new request parameter `Group` to replace `GroupName` for all existing operations. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon EC2 Container Registry__ + - ### Features + - This release adds support for encrypting the contents of your Amazon ECR repository with customer master keys (CMKs) stored in AWS Key Management Service. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Adding support to target EC2 On-Demand Capacity Reservations within an AWS Resource Group to launch EC2 instances. + +## __Amazon GuardDuty__ + - ### Features + - GuardDuty now supports S3 Data Events as a configurable data source type. This feature expands GuardDuty's monitoring scope to include S3 data plane operations, such as GetObject and PutObject. This data source is optional and can be enabled or disabled at anytime. Accounts already using GuardDuty must first enable the new feature to use it; new accounts will be enabled by default. GuardDuty masters can configure this data source for individual member accounts and GuardDuty masters associated through AWS Organizations can automatically enable the data source in member accounts. + +## __Amazon Kinesis Firehose__ + - ### Features + - This release includes a new Kinesis Data Firehose feature that supports data delivery to Https endpoint and to partners. You can now use Kinesis Data Firehose to ingest real-time data and deliver to Https endpoint and partners in a serverless, reliable, and salable manner. + +# __2.13.63__ __2020-07-28__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports several new features: EBU-TT-D captions in Microsoft Smooth outputs; interlaced video in HEVC outputs; video noise reduction (using temporal filtering) in HEVC outputs. + +## __AWS SDK for Java v2__ + - ### Features + - Slight performance improvement when metrics are disabled. + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where HTTP status code metrics were not always published for async clients. + +## __AWS SecurityHub__ + - ### Features + - Added UpdateSecurityHubConfiguration API. Security Hub now allows customers to choose whether to automatically enable new controls that are added to an existing standard that the customer enabled. For example, if you enabled Foundational Security Best Practices for an account, you can automatically enable new controls as we add them to that standard. By default, new controls are enabled. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Introduces support for tag-on-create capability for the following APIs: CreateVpnConnection, CreateVpnGateway, and CreateCustomerGateway. A user can now add tags while creating these resources. For further detail, please see AWS Tagging Strategies. + +## __Amazon Interactive Video Service__ + - ### Features + - Added a new error code, PendingVerification, to differentiate between errors caused by insufficient IAM permissions and errors caused by account verification. + +## __Amazon Relational Database Service__ + - ### Features + - Adds reporting of manual cluster snapshot quota to DescribeAccountAttributes API + +## __Auto Scaling__ + - ### Features + - Now you can enable Instance Metadata Service Version 2 (IMDSv2) or disable the instance metadata endpoint with Launch Configurations. + +## __EC2 Image Builder__ + - ### Features + - This release updates distribution configurations to allow periods in AMI names. + +# __2.13.62__ __2020-07-27__ +## __AWS DataSync__ + - ### Features + - Today AWS DataSync releases support for self-managed object storage Locations and the new TransferMode Option. + +## __AWS Database Migration Service__ + - ### Features + - Basic endpoint settings for relational databases, Preflight validation API. + +## __AWS Glue__ + - ### Features + - Add ability to manually resume workflows in AWS Glue providing customers further control over the orchestration of ETL workloads. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - m6gd, c6gd, r6gd instances are powered by AWS Graviton2 processors and support local NVMe instance storage + +## __Amazon Fraud Detector__ + - ### Features + - Moved the eventTypeName attribute for PutExternalModel API to inputConfiguration. Model ID's no longer allow hyphens. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Assorted doc ticket-fix updates for Systems Manager. + +# __2.13.61__ __2020-07-24__ +## __AWS Elemental MediaPackage__ + - ### Features + - The release adds daterange as a new ad marker option. This option enables MediaPackage to insert EXT-X-DATERANGE tags in HLS and CMAF manifests. The EXT-X-DATERANGE tag is used to signal ad and program transition events. + +## __AWS MediaConnect__ + - ### Features + - You can now disable an entitlement to stop streaming content to the subscriber's flow temporarily. When you are ready to allow content to start streaming to the subscriber's flow again, you can enable the entitlement. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now supports sorting query results based on document attributes. Amazon Kendra also introduced an option to enclose table and column names with double quotes for database data sources. + +## __Amazon CloudWatch__ + - ### Features + - AWS CloudWatch ListMetrics now supports an optional parameter (RecentlyActive) to filter results by only metrics that have received new datapoints in the past 3 hours. This enables more targeted metric data retrieval through the Get APIs + +## __Amazon FSx__ + - ### Features + - Documentation update for FSx for Lustre + +## __Amazon Fraud Detector__ + - ### Features + - GetPrediction has been replaced with GetEventPrediction. PutExternalModel has been simplified to accept a role ARN. + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API introduces additional criteria for sorting and filtering query results for account quotas and usage statistics. + +## __Amazon SageMaker Service__ + - ### Features + - Sagemaker Ground Truth:Added support for OIDC (OpenID Connect) to authenticate workers via their own identity provider instead of through Amazon Cognito. This release adds new APIs (CreateWorkforce, DeleteWorkforce, and ListWorkforces) to SageMaker Ground Truth service. Sagemaker Neo: Added support for detailed target device description by using TargetPlatform fields - OS, architecture, and accelerator. Added support for additional compilation parameters by using JSON field CompilerOptions. Sagemaker Search: SageMaker Search supports transform job details in trial components. + +## __AmazonMQ__ + - ### Features + - Amazon MQ now supports LDAP (Lightweight Directory Access Protocol), providing authentication and authorization of Amazon MQ users via a customer designated LDAP server. + +# __2.13.60__ __2020-07-23__ +## __AWS Config__ + - ### Features + - Adding service linked configuration aggregation support along with new enums for config resource coverage + +## __AWS Direct Connect__ + - ### Features + - Documentation updates for AWS Direct Connect + +## __AWS Glue__ + - ### Features + - Added new ConnectionProperties: "KAFKA_SSL_ENABLED" (to toggle SSL connections) and "KAFKA_CUSTOM_CERT" (import CA certificate file) + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon FSx__ + - ### Features + - Adds support for AutoImport, a new FSx for Lustre feature that allows customers to configure their FSx file system to automatically update its contents when new objects are added to S3 or existing objects are overwritten. + +## __Amazon Lightsail__ + - ### Features + - This release adds support for Amazon Lightsail content delivery network (CDN) distributions and SSL/TLS certificates. + +## __Amazon WorkSpaces__ + - ### Features + - Added UpdateWorkspaceImagePermission API to share Amazon WorkSpaces images across AWS accounts. + +# __2.13.59__ __2020-07-22__ +## __AWS Elemental MediaLive__ + - ### Features + - The AWS Elemental MediaLive APIs and SDKs now support the ability to get thumbnails for MediaLive devices that are attached or not attached to a channel. Previously, this thumbnail feature was available only on the console. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon QuickSight__ + - ### Features + - New API operations - GetSessionEmbedUrl, CreateNamespace, DescribeNamespace, ListNamespaces, DeleteNamespace, DescribeAccountSettings, UpdateAccountSettings, CreateAccountCustomization, DescribeAccountCustomization, UpdateAccountCustomization, DeleteAccountCustomization. Modified API operations to support custom permissions restrictions - RegisterUser, UpdateUser, UpdateDashboardPermissions + +# __2.13.58__ __2020-07-21__ +## __Amazon CodeGuru Profiler__ + - ### Features + - Amazon CodeGuru Profiler now supports resource tagging APIs, tags-on-create and tag-based access control features. You can now tag profiling groups for better resource and access control management. + +# __2.13.57__ __2020-07-20__ +## __AWS CodeBuild__ + - ### Features + - AWS CodeBuild adds support for Session Manager and Windows 2019 Environment type + +## __AWS Ground Station__ + - ### Features + - Adds optional MTU property to DataflowEndpoint and adds contact source and destination details to DescribeContact response. + +## __Amazon CloudFront__ + - ### Features + - CloudFront adds support for cache policies and origin request policies. With these new policies, you can now more granularly control the query string, header, and cookie values that are included in the cache key and in requests that CloudFront sends to your origin. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added support for tag-on-create for CreateVpcPeeringConnection and CreateRouteTable. You can now specify tags when creating any of these resources. For more information about tagging, see AWS Tagging Strategies. Add poolArn to the response of DescribeCoipPools. + +## __Amazon Fraud Detector__ + - ### Features + - Introduced flexible model training dataset requirements for Online Fraud Insights so that customers can choose any two inputs to train a model instead of being required to use 'email' and 'IP address' at minimum. Added support for resource ARNs, resource tags, resource-based IAM policies and identity-based policies that limit access to a resource based on tags. Added support for customer-managed customer master key (CMK) data encryption. Added new Event Type, Entity Type, and Label APIs. An event type defines the structure for an event sent to Amazon Fraud Detector, including the variables sent as part of the event, the entity performing the event, and the labels that classify the event. Introduced the GetEventPrediction API. + +## __Amazon Relational Database Service__ + - ### Features + - Add a new SupportsParallelQuery output field to DescribeDBEngineVersions. This field shows whether the engine version supports parallelquery. Add a new SupportsGlobalDatabases output field to DescribeDBEngineVersions and DescribeOrderableDBInstanceOptions. This field shows whether global database is supported by engine version or the combination of engine version and instance class. + +## __Firewall Management Service__ + - ### Features + - Added managed policies for auditing security group rules, including the use of managed application and protocol lists. + +# __2.13.56__ __2020-07-17__ +## __AWS AppSync__ + - ### Features + - Documentation update to Cachingconfig.cachingKeys to include $context.source as a valid value. + +## __AWS Elastic Beanstalk__ + - ### Features + - Add waiters for `EnvironmentExists`, `EnvironmentUpdated`, and `EnvironmentTerminated`. Add paginators for `DescribeEnvironmentManagedActionHistory` and `ListPlatformVersions`. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Connect Service__ + - ### Features + - This release adds a set of Amazon Connect APIs to programmatically control call recording with start, stop, pause and resume functions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for EC2 + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation. + +## __Application Auto Scaling__ + - ### Features + - Documentation updates for Application Auto Scaling + +# __2.13.55__ __2020-07-15__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Interactive Video Service__ + - ### Features + - Introducing Amazon Interactive Video Service - a managed live streaming solution that is quick and easy to set up, and ideal for creating interactive video experiences. + +# __2.13.54__ __2020-07-09__ +## __AWS Amplify__ + - ### Features + - Documentation update to the introduction text to specify that this is the Amplify Console API. + +## __AWS App Mesh__ + - ### Features + - AppMesh now supports Ingress which allows resources outside a mesh to communicate to resources that are inside the mesh. See https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_gateways.html + +## __AWS CloudHSM V2__ + - ### Features + - Documentation updates for cloudhsmv2 + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Secrets Manager__ + - ### Features + - Adds support for filters on the ListSecrets API to allow filtering results by name, tag key, tag value, or description. Adds support for the BlockPublicPolicy option on the PutResourcePolicy API to block resource policies which grant a wide range of IAM principals access to secrets. Adds support for the ValidateResourcePolicy API to validate resource policies for syntax and prevent lockout error scenarios and wide access to secrets. + +## __AWS WAFV2__ + - ### Features + - Added the option to use IP addresses from an HTTP header that you specify, instead of using the web request origin. Available for IP set matching, geo matching, and rate-based rule count aggregation. + +## __Alexa For Business__ + - ### Features + - Added support for registering an AVS device directly to a room using RegisterAVSDevice with a room ARN + +## __Amazon CloudWatch Events__ + - ### Features + - Amazon CloudWatch Events/EventBridge adds support for API Gateway as a target. + +## __Amazon Comprehend__ + - ### Features + - AWS Comprehend now supports Real-time Analysis with Custom Entity Recognition. + +## __Amazon DynamoDB Enhanced Client__ + - ### Features + - Support converting "0" and "1" numbers read from DynamoDB to Boolean and AtomicBoolean. + +## __Amazon Elastic Block Store__ + - ### Features + - This release introduces the following set of actions for the EBS direct APIs: 1. StartSnapshot, which creates a new Amazon EBS snapshot. 2. PutSnapshotBlock, which writes a block of data to a snapshot. 3. CompleteSnapshot, which seals and completes a snapshot after blocks of data have been written to it. + +## __Amazon EventBridge__ + - ### Features + - Amazon EventBridge adds support for API Gateway as a target. + +## __Amazon SageMaker Service__ + - ### Features + - This release adds the DeleteHumanTaskUi API to Amazon Augmented AI + +## __Amazon Simple Notification Service__ + - ### Features + - This release adds support for SMS origination number as an attribute in the MessageAttributes parameter for the SNS Publish API. + +# __2.13.53__ __2020-07-08__ +## __AWS Cost Explorer Service__ + - ### Features + - Customers can now see Instance Name alongside each rightsizing recommendation. + +## __AWS Organizations__ + - ### Features + - We have launched a self-service option to make it easier for customers to manage the use of their content by AI services. Certain AI services (Amazon CodeGuru Profiler, Amazon Comprehend, Amazon Lex, Amazon Polly, Amazon Rekognition, Amazon Textract, Amazon Transcribe, and Amazon Translate), may use content to improve the service. Customers have been able to opt out of this use by contacting AWS Support, and now they can opt out on a self-service basis by setting an Organizations policy for all or an individual AI service as listed above. Please refer to the technical documentation for more details. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - EC2 Spot now enables customers to tag their Spot Instances Requests on creation. + +## __Amazon Forecast Service__ + - ### Features + - With this release, Amazon Forecast now supports the ability to add a tag to any resource via the launch of three new APIs: TagResouce, UntagResource and ListTagsForResource. A tag is a simple label consisting of a customer-defined key and an optional value allowing for easier resource management. + +# __2.13.52__ __2020-07-07__ +## __AWS Glue__ + - ### Features + - AWS Glue Data Catalog supports cross account sharing of tables through AWS Lake Formation + +## __AWS Lake Formation__ + - ### Features + - AWS Lake Formation supports sharing tables with other AWS accounts and organizations + +## __AWS SDK for Java v2__ + - ### Features + - This release includes the preview release of the client-side metrics for the AWS SDK for Java v2. The SPI can be found in the `metrics-spi` module, and this release also includes a metric publisher for CloudWatch in `cloudwatch-metric-publisher`. See our post over at the [AWS Developer Blog](https://aws.amazon.com/blogs/developer/category/developer-tools/aws-sdk-for-java/) for additional information. + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Adding support for file-system driven directory refresh, Case Sensitivity toggle for SMB File Shares, and S3 Prefixes and custom File Share names + +## __Amazon CloudFront__ + - ### Features + - Amazon CloudFront adds support for a new security policy, TLSv1.2_2019. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - DescribeAvailabilityZones now returns additional data about Availability Zones and Local Zones. + +## __Amazon Elastic File System__ + - ### Features + - This release adds support for automatic backups of Amazon EFS file systems to further simplify backup management. + +# __2.13.51__ __2020-07-06__ +## __AWS IoT SiteWise__ + - ### Features + - This release supports optional start date and end date parameters for the GetAssetPropertyValueHistory API. + +## __Amazon QuickSight__ + - ### Features + - Add Theme APIs and update Dashboard APIs to support theme overrides. + +## __Amazon Relational Database Service__ + - ### Features + - Adds support for Amazon RDS on AWS Outposts. + +# __2.13.50__ __2020-07-02__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Connect Service__ + - ### Features + - Documentation updates for Amazon Connect. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for elasticache + +# __2.13.49__ __2020-07-01__ +## __AWS AppSync__ + - ### Features + - AWS AppSync supports new 12xlarge instance for server-side API caching + +## __AWS CodeBuild__ + - ### Features + - Support build status config in project source + +## __AWS SecurityHub__ + - ### Features + - This release adds additional details for findings. There are now finding details for auto scaling groups, EC2 volumes, and EC2 VPCs. You can identify detected vulnerabilities and provide related network paths. + +## __Amazon Chime__ + - ### Features + - This release supports third party emergency call routing configuration for Amazon Chime Voice Connectors. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds the exceptions KMSKeyNotAccessibleFault and InvalidDBClusterStateFault to the Amazon RDS ModifyDBInstance API. + +## __EC2 Image Builder__ + - ### Features + - EC2 Image Builder adds support for encrypted AMI distribution. + +# __2.13.48__ __2020-06-30__ +## __AWS Comprehend Medical__ + - ### Features + - This release adds the relationships between MedicalCondition and Anatomy in DetectEntitiesV2 API. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix marshaller binding for input event streams when member name and shape name are not equal. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - Release GitHub Enterprise Server source provider integration + +## __Amazon EC2 Container Registry__ + - ### Features + - Add a new parameter (ImageDigest) and a new exception (ImageDigestDoesNotMatchException) to PutImage API to support pushing image by digest. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added support for tag-on-create for CreateVpc, CreateEgressOnlyInternetGateway, CreateSecurityGroup, CreateSubnet, CreateNetworkInterface, CreateNetworkAcl, CreateDhcpOptions and CreateInternetGateway. You can now specify tags when creating any of these resources. For more information about tagging, see AWS Tagging Strategies. + +## __Amazon Relational Database Service__ + - ### Features + - Documentation updates for rds + +# __2.13.47__ __2020-06-29__ +## __AWS CodeStar connections__ + - ### Features + - Updated and new APIs in support of hosts for connections to installed provider types. New integration with the GitHub Enterprise Server provider type. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CodeGuru Profiler__ + - ### Features + - Amazon CodeGuru Profiler is now generally available. The Profiler helps developers to optimize their software, troubleshoot issues in production, and identify their most expensive lines of code. As part of general availability, we are launching: Profiling of AWS Lambda functions, Anomaly detection in CPU profiles, Color My Code on flame graphs, Expanding presence to 10 AWS regions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Virtual Private Cloud (VPC) customers can now create and manage their own Prefix Lists to simplify VPC configurations. + +## __Auto Scaling__ + - ### Features + - Documentation updates for Amazon EC2 Auto Scaling. + +# __2.13.46__ __2020-06-26__ +## __AWS CloudFormation__ + - ### Features + - ListStackInstances and DescribeStackInstance now return a new `StackInstanceStatus` object that contains `DetailedStatus` values: a disambiguation of the more generic `Status` value. ListStackInstances output can now be filtered on `DetailedStatus` using the new `Filters` parameter. + +## __AWS Database Migration Service__ + - ### Features + - This release contains miscellaneous API documentation updates for AWS DMS in response to several customer reported issues. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Don't require Authorization for InitiateAuth and RespondToAuthChallenge. + +## __Amazon QuickSight__ + - ### Features + - Added support for cross-region DataSource credentials copying. + +## __Amazon SageMaker Service__ + - ### Features + - The new 'ModelClientConfig' parameter being added for CreateTransformJob and DescribeTransformJob api actions enable customers to configure model invocation related parameters such as timeout and retry. + +# __2.13.45__ __2020-06-25__ +## __AWS DynamoDB Enhanced Client__ + - ### Bugfixes + - Fixed a bug causing a NullPointerException to be thrown in the enhanced DeleteItem operation if a conditionExpression was given with null attributeNames or null attributeValues. + +## __AWS Glue__ + - ### Features + - This release adds new APIs to support column level statistics in AWS Glue Data Catalog + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added support for tag-on-create for Host Reservations in Dedicated Hosts. You can now specify tags when you create a Host Reservation for a Dedicated Host. For more information about tagging, see AWS Tagging Strategies. + +# __2.13.44__ __2020-06-24__ +## __AWS Amplify__ + - ### Features + - This release of AWS Amplify Console introduces support for automatically creating custom subdomains for branches based on user-defined glob patterns, as well as automatically cleaning up Amplify branches when their corresponding git branches are deleted. + +## __AWS Backup__ + - ### Features + - Customers can now manage and monitor their backups in a policied manner across their AWS accounts, via an integration between AWS Backup and AWS Organizations + +## __AWS CodeCommit__ + - ### Features + - This release introduces support for reactions to CodeCommit comments. Users will be able to select from a pre-defined list of emojis to express their reaction to any comments. + +## __AWS Identity and Access Management__ + - ### Features + - Documentation updates for iam + +## __AWS Organizations__ + - ### Features + - This release adds support for a new backup policy type for AWS Organizations. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR customers can now set allocation strategies for On-Demand and Spot instances in their EMR clusters with instance fleets. These allocation strategies use real-time capacity insights to provision clusters faster and make the most efficient use of available spare capacity to allocate Spot instances to reduce interruptions. + +## __Amazon FSx__ + - ### Features + - This release adds the capability to take highly-durable, incremental backups of your FSx for Lustre persistent file systems. This capability makes it easy to further protect your file system data and to meet business and regulatory compliance requirements. + +## __Amazon Honeycode__ + - ### Features + - Introducing Amazon Honeycode - a fully managed service that allows you to quickly build mobile and web apps for teams without programming. + +## __Auto Scaling__ + - ### Features + - Documentation updates for Amazon EC2 Auto Scaling. + +# __2.13.43__ __2020-06-23__ +## __AWS MediaTailor__ + - ### Features + - AWS Elemental MediaTailor SDK now allows configuration of Bumper. + +## __AWS Organizations__ + - ### Features + - Added a new error message to support the requirement for a Business License on AWS accounts in China to create an organization. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.13.42__ __2020-06-22__ +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds Tag On Create feature support for the ImportImage, ImportSnapshot, ExportImage and CreateInstanceExportTask APIs. + +## __Amazon Elastic MapReduce__ + - ### Features + - Adding support for MaximumCoreCapacityUnits parameter for EMR Managed Scaling. It allows users to control how many units/nodes are added to the CORE group/fleet. Remaining units/nodes are added to the TASK groups/fleet in the cluster. + +## __Amazon Rekognition__ + - ### Features + - This update adds the ability to detect black frames, end credits, shots, and color bars in stored videos + +## __Amazon Relational Database Service__ + - ### Features + - Added paginators for various APIs. + +## __Amazon Simple Queue Service__ + - ### Features + - AWS SQS adds pagination support for ListQueues and ListDeadLetterSourceQueues APIs + +# __2.13.41__ __2020-06-19__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports Input Prepare schedule actions. This feature improves existing input switching by allowing users to prepare an input prior to switching to it. + +## __AWS OpsWorks CM__ + - ### Features + - Documentation updates for AWS OpsWorks CM. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for elasticache + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Adds support to tag elastic-gpu on the RunInstances api + +# __2.13.40__ __2020-06-18__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for NexGuard FileMarker SDK, which allows NexGuard partners to watermark proprietary content in mezzanine and OTT streaming contexts. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Support__ + - ### Features + - Documentation updates for support + +## __AWSMarketplace Metering__ + - ### Features + - Documentation updates for meteringmarketplace + +## __Amazon Relational Database Service__ + - ### Features + - Adding support for global write forwarding on secondary clusters in an Aurora global database. + +## __Amazon Route 53__ + - ### Features + - Added a new ListHostedZonesByVPC API for customers to list all the private hosted zones that a specified VPC is associated with. + +## __Amazon Simple Email Service__ + - ### Features + - You can now configure Amazon SES to send event notifications when the delivery of an email is delayed because of a temporary issue. For example, you can receive a notification if the recipient's inbox is full, or if there's a temporary problem with the receiving email server. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression before running the maintenance window. + +# __2.13.39__ __2020-06-17__ +## __AWS App Mesh__ + - ### Features + - Adds support for route and virtual node listener timeouts. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - nvmeSupport added to DescribeInstanceTypes API + +## __Amazon Import/Export Snowball__ + - ### Features + - AWS Snowcone is a portable, rugged and secure device for edge computing and data transfer. You can use Snowcone to collect, process, and move data to AWS, either offline by shipping the device to AWS or online by using AWS DataSync. With 2 CPUs and 4 GB RAM of compute and 8 TB of storage, Snowcone can run edge computing workloads and store data securely. Snowcone's small size (8.94" x 5.85" x 3.25" / 227 mm x 148.6 mm x 82.65 mm) allows you to set it next to machinery in a factory. Snowcone weighs about 4.5 lbs. (2 kg), so you can carry one in a backpack, use it with battery-based operation, and use the Wi-Fi interface to gather sensor data. Snowcone supports a file interface with NFS support. + +## __Amazon Macie 2__ + - ### Features + - This is a documentation-only update to the Amazon Macie API. This update contains miscellaneous editorial improvements to various API descriptions. + +## __Amazon Route 53__ + - ### Features + - Add PriorRequestNotComplete exception to AssociateVPCWithHostedZone API + +# __2.13.38__ __2020-06-16__ +## __AWS Data Exchange__ + - ### Features + - This release fixes a bug in the AWS Data Exchange Python and NodeJS SDKs. The 'KmsKeyArn' field in the create-job API was configured to be required instead of optional. We updated this field to be optional in this release. + +## __AWS Lambda__ + - ### Features + - Adds support for using Amazon Elastic File System (persistent storage) with AWS Lambda. This enables customers to share data across function invocations, read large reference data files, and write function output to a persistent and shared store. + +## __Amazon CloudFront__ + - ### Features + - Documentation updates for CloudFront + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Fix an NPE in `OptionalAttributeConverter` that can happen the if the `nul()` property of the `AttributeValue` is `null`. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds new US English child voice - Kevin. Kevin is available as Neural voice only. + +## __Amazon QLDB__ + - ### Features + - Documentation updates for Amazon QLDB + +## __Auto Scaling__ + - ### Features + - Introducing instance refresh, a feature that helps you update all instances in an Auto Scaling group in a rolling fashion (for example, to apply a new AMI or instance type). You can control the pace of the refresh by defining the percentage of the group that must remain running/healthy during the replacement process and the time for new instances to warm up between replacements. + +# __2.13.37__ __2020-06-15__ +## __AWS IoT__ + - ### Features + - Added support for job executions rollout configuration, job abort configuration, and job executions timeout configuration for AWS IoT Over-the-Air (OTA) Update Feature. + +## __Alexa For Business__ + - ### Features + - Adding support for optional tags in CreateBusinessReportSchedule, CreateProfile and CreateSkillGroup APIs + +## __Amazon AppConfig__ + - ### Features + - This release adds a hosted configuration source provider. Customers can now store their application configurations directly in AppConfig, without the need for an external configuration source. + +## __Amazon Chime__ + - ### Features + - feature: Chime: This release introduces the ability to create an AWS Chime SDK meeting with attendees. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Updated all AuthParameters to be sensitive. + +# __2.13.36__ __2020-06-12__ +## __AWS CloudFormation__ + - ### Features + - The following parameters now return the organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets: the OrganizationalUnitIds parameter on StackSet and the OrganizationalUnitId parameter on StackInstance, StackInstanceSummary, and StackSetOperationResultSummary + +## __AWS Glue__ + - ### Features + - You can now choose to crawl the entire table or just a sample of records in DynamoDB when using AWS Glue crawlers. Additionally, you can also specify a scanning rate for crawling DynamoDB tables. + +## __AWS SDK for Java v2 DynamoDB Enhanced Client__ + - ### Bugfixes + - Added ClientRequestToken in class TransactWriteItemsEnhancedRequest. + +## __AWS Storage Gateway__ + - ### Features + - Display EndpointType in DescribeGatewayInformation + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway + +# __2.13.35__ __2020-06-11__ +## __AWS IoT Data Plane__ + - ### Features + - As part of this release, we are introducing a new feature called named shadow, which extends the capability of AWS IoT Device Shadow to support multiple shadows for a single IoT device. With this release, customers can store different device state data into different shadows, and as a result access only the required state data when needed and reduce individual shadow size. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Avoid unnecessary copying in `AsyncRequestBody.fromBytes()` + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for deleting capacity providers. + +## __Amazon Lex Model Building Service__ + - ### Features + - This change adds the built-in AMAZON.KendraSearchIntent that enables integration with Amazon Kendra. + +## __EC2 Image Builder__ + - ### Features + - EC2 Image Builder now supports specifying a custom working directory for your build and test workflows. In addition, Image Builder now supports defining tags that are applied to ephemeral resources created by EC2 Image Builder as part of the image creation workflow. + +# __2.13.34__ __2020-06-10__ +## __AWS Compute Optimizer__ + - ### Features + - Compute Optimizer supports exporting recommendations to Amazon S3. + +## __AWS Service Catalog__ + - ### Features + - Service Catalog Documentation Update for Integration with AWS Organizations Delegated Administrator feature + +## __AWS Shield__ + - ### Features + - Corrections to the supported format for contact phone numbers and to the description for the create subscription action. + +## __Amazon AppConfig__ + - ### Features + - This release allows customers to choose from a list of predefined deployment strategies while starting deployments. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Reducing the schedule name of DLM Lifecycle policy from 500 to 120 characters. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - New C6g instances powered by AWS Graviton2 processors and ideal for running advanced, compute-intensive workloads; New R6g instances powered by AWS Graviton2 processors and ideal for running memory-intensive workloads. + +## __Amazon Lightsail__ + - ### Features + - Documentation updates for lightsail + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API removes support for the ArchiveFindings and UnarchiveFindings operations. This release also adds UNKNOWN as an encryption type for S3 bucket metadata. + +## __CodeArtifact__ + - ### Features + - Added support for AWS CodeArtifact. + +# __2.13.33__ __2020-06-09__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Transfer Family__ + - ### Features + - This release updates the API so customers can test use of Source IP to allow, deny or limit access to data in their S3 buckets after integrating their identity provider. + +# __2.13.32__ __2020-06-08__ +## __AWS Cloud Map__ + - ### Features + - Added support for tagging Service and Namespace type resources in Cloud Map + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Update javadoc annotation for AwsBasicCredentials + +## __AWS Shield__ + - ### Features + - This release adds the option for customers to identify a contact name and method that the DDoS Response Team can proactively engage when a Route 53 Health Check that is associated with a Shield protected resource fails. + +# __2.13.31__ __2020-06-05__ +## __AWS Elastic Beanstalk__ + - ### Features + - These API changes enable an IAM user to associate an operations role with an Elastic Beanstalk environment, so that the IAM user can call Elastic Beanstalk actions without having access to underlying downstream AWS services that these actions call. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - This release adds support for DescribeProduct and DescribeProductAsAdmin by product name, DescribeProvisioningArtifact by product name or provisioning artifact name, returning launch paths as part of DescribeProduct output and adds maximum length for provisioning artifact name and provisioning artifact description. + +## __Amazon API Gateway__ + - ### Features + - Amazon API Gateway now allows customers of REST APIs to skip trust chain validation for backend server certificates for HTTP and VPC Link Integration. This feature enables customers to configure their REST APIs to integrate with backends that are secured with certificates vended from private certificate authorities (CA) or certificates that are self-signed. + +## __Amazon CloudFront__ + - ### Features + - Amazon CloudFront adds support for configurable origin connection attempts and origin connection timeout. + +## __Amazon Personalize__ + - ### Features + - [Personalize] Adds ability to create and apply filters. + +## __Amazon Personalize Runtime__ + - ### Features + - [Personalize] Adds ability to apply filter to real-time recommendations + +## __Amazon Pinpoint__ + - ### Features + - This release enables additional functionality for the Amazon Pinpoint journeys feature. With this release, you can send messages through additional channels, including SMS, push notifications, and custom channels. + +## __Amazon SageMaker Runtime__ + - ### Features + - You can now specify the production variant to send the inference request to, when invoking a SageMaker Endpoint that is running two or more variants. + +# __2.13.30__ __2020-06-04__ +## __AWS Elemental MediaPackage VOD__ + - ### Features + - You can now restrict direct access to AWS Elemental MediaPackage by securing requests for VOD content using CDN authorization. With CDN authorization, content requests require a specific HTTP header and authorization code. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSMarketplace Metering__ + - ### Features + - Documentation updates for meteringmarketplace + +## __Amazon Elastic Compute Cloud__ + - ### Features + - New C5a instances, the latest generation of EC2's compute-optimized instances featuring AMD's 2nd Generation EPYC processors. C5a instances offer up to 96 vCPUs, 192 GiB of instance memory, 20 Gbps in Network bandwidth; New G4dn.metal bare metal instance with 8 NVIDIA T4 GPUs. + +## __Amazon Lightsail__ + - ### Features + - This release adds the BurstCapacityPercentage and BurstCapacityTime instance metrics, which allow you to track the burst capacity available to your instance. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - SSM State Manager support for executing an association only at specified CRON schedule after creating/updating an association. + +# __2.13.29__ __2020-06-03__ +## __AWS Direct Connect__ + - ### Features + - This release supports the virtual interface failover test, which allows you to verify that traffic routes over redundant virtual interfaces when you bring your primary virtual interface out of service. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for the encoding of VP8 or VP9 video in WebM container with Vorbis or Opus audio. + +## __AWS Glue__ + - ### Features + - Adding databaseName in the response for GetUserDefinedFunctions() API. + +## __AWS Identity and Access Management__ + - ### Features + - GenerateServiceLastAccessedDetails will now return ActionLastAccessed details for certain S3 control plane actions + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon ElastiCache__ + - ### Features + - This release improves the Multi-AZ feature in ElastiCache by adding a separate flag and proper validations. + +## __Amazon Elasticsearch Service__ + - ### Features + - Amazon Elasticsearch Service now offers support for cross-cluster search, enabling you to perform searches, aggregations, and visualizations across multiple Amazon Elasticsearch Service domains with a single query or from a single Kibana interface. New feature includes the ability to setup connection, required to perform cross-cluster search, between domains using an approval workflow. + +# __2.13.28__ __2020-06-02__ +## __Amazon GuardDuty__ + - ### Features + - Amazon GuardDuty findings now include S3 bucket details under the resource section if an S3 Bucket was one of the affected resources + +# __2.13.27__ __2020-06-01__ +## __AWS Key Management Service__ + - ### Features + - AWS Key Management Service (AWS KMS): If the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext APIs are called on a CMK in a custom key store (origin == AWS_CLOUDHSM), they return an UnsupportedOperationException. If a call to UpdateAlias causes a customer to exceed the Alias resource quota, the UpdateAlias API returns a LimitExceededException. + +## __AWS Maven Lambda Archetype__ + - ### Features + - Updated the `archetype-lambda` to generate SDK client that uses region from environment variable. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Athena__ + - ### Features + - This release adds support for connecting Athena to your own Apache Hive Metastores in addition to the AWS Glue Data Catalog. For more information, please see https://docs.aws.amazon.com/athena/latest/ug/connect-to-data-source-hive.html + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR now supports encrypting log files with AWS Key Management Service (KMS) customer managed keys. + +## __Amazon FSx__ + - ### Features + - New capabilities to update storage capacity and throughput capacity of your file systems, providing the flexibility to grow file storage and to scale up or down the available performance as needed to meet evolving storage needs over time. + +## __Amazon SageMaker Service__ + - ### Features + - We are releasing HumanTaskUiArn as a new parameter in CreateLabelingJob and RenderUiTemplate which can take an ARN for a system managed UI to render a task. + +## __Amazon WorkLink__ + - ### Features + - Amazon WorkLink now supports resource tagging for fleets. + +# __2.13.26__ __2020-05-28__ +## __AWS Marketplace Catalog Service__ + - ### Features + - AWS Marketplace Catalog now supports accessing initial change payloads with DescribeChangeSet operation. + +## __Amazon QLDB Session__ + - ### Features + - Documentation updates for Amazon QLDB Session + +## __Amazon WorkMail__ + - ### Features + - This release adds support for Amazon WorkMail organization-level retention policies. + +## __Managed Streaming for Kafka__ + - ### Features + - New APIs for upgrading the Apache Kafka version of a cluster and to find out compatible upgrade paths + +# __2.13.25__ __2020-05-27__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon GuardDuty__ + - ### Features + - Documentation updates for GuardDuty + +## __Amazon S3__ + - ### Bugfixes + - Check the `x-amz-content-range` header for `GetObject` responses when the `Content-Range` header is not returned by the service. Fixes [#1209](https://github.com/aws/aws-sdk-java-v2/issues/1209). + +## __Elastic Load Balancing__ + - ### Features + - This release added support for HTTP/2 ALPN preference lists for Network Load Balancers + +# __2.13.24__ __2020-05-26__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Allowing cron expression in the DLM policy creation schedule. + +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache now allows you to use resource based policies to manage access to operations performed on ElastiCache resources. Also, Amazon ElastiCache now exposes ARN (Amazon Resource Names) for ElastiCache resources such as Cache Clusters and Parameter Groups. ARNs can be used to apply IAM policies to ElastiCache resources. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - ebsOptimizedInfo, efaSupported and supportedVirtualizationTypes added to DescribeInstanceTypes API + +## __Amazon Macie__ + - ### Features + - This is a documentation-only update to the Amazon Macie Classic API. This update corrects out-of-date references to the service name. + +## __Amazon QuickSight__ + - ### Features + - Add DataSetArns to QuickSight DescribeDashboard API response. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - The AWS Systems Manager GetOpsSummary API action now supports multiple OpsResultAttributes in the request. Currently, this feature only supports OpsResultAttributes with the following TypeNames: [AWS:EC2InstanceComputeOptimizer] or [AWS:EC2InstanceInformation, AWS:EC2InstanceComputeOptimizer]. These TypeNames can be used along with either or both of the following: [AWS:EC2InstanceRecommendation, AWS:RecommendationSource] + +# __2.13.23__ __2020-05-22__ +## __AWS IoT SiteWise__ + - ### Features + - This release adds support for the standard deviation auto-computed aggregate and improved support for portal logo images in SiteWise. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Auto Scaling__ + - ### Features + - Documentation updates for Amazon EC2 Auto Scaling + +# __2.13.22__ __2020-05-21__ +## __AWS CodeBuild__ + - ### Features + - CodeBuild adds support for tagging with report groups + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where a service returning an unknown response event type would cause a failure. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - From this release onwards ProvisionByoipCidr publicly supports IPv6. Updated ProvisionByoipCidr API to support tags for public IPv4 and IPv6 pools. Added NetworkBorderGroup to the DescribePublicIpv4Pools response. + +## __Amazon Simple Storage Service__ + - ### Features + - Deprecates unusable input members bound to Content-MD5 header. Updates example and documentation. + +## __Synthetics__ + - ### Features + - AWS CloudWatch Synthetics now supports configuration of allocated memory for a canary. + +# __2.13.21__ __2020-05-20__ +## __AWS App Mesh__ + - ### Features + - List APIs for all resources now contain additional information: when a resource was created, last updated, and its current version number. + +## __AWS Backup__ + - ### Features + - This release allows customers to enable or disable AWS Backup support for an AWS resource type. This release also includes new APIs, update-region-settings and describe-region-settings, which can be used to opt in to a specific resource type. For all current AWS Backup customers, the default settings enable support for EBS, EC2, StorageGateway, EFS, DDB and RDS resource types. + +## __AWS CodeDeploy__ + - ### Features + - Amazon ECS customers using application and network load balancers can use CodeDeploy BlueGreen hook to invoke a CloudFormation stack update. With this update you can view CloudFormation deployment and target details via existing APIs and use your stack Id to list or delete all deployments associated with the stack. + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports the ability to ingest the content that is streaming from an AWS Elemental Link device: https://aws.amazon.com/medialive/features/link/. This release also adds support for SMPTE-2038 and input state waiters. + +## __AWS SecurityHub__ + - ### Features + - For findings related to controls, the finding information now includes the reason behind the current status of the control. A new field for the findings original severity allows finding providers to use the severity values from the system they use to assign severity. + +## __Amazon Chime__ + - ### Features + - Amazon Chime enterprise account administrators can now set custom retention policies on chat data in the Amazon Chime application. + +## __Amazon Transcribe Streaming Service__ + - ### Features + - This release adds support for vocabulary filtering in streaming with which you can filter unwanted words from the real-time transcription results. Visit https://docs.aws.amazon.com/transcribe/latest/dg/how-it-works.html to learn more. + +## __Application Auto Scaling__ + - ### Features + - Documentation updates for Application Auto Scaling + +# __2.13.20__ __2020-05-19__ +## __AWS Health APIs and Notifications__ + - ### Features + - Feature: Health: AWS Health added a new field to differentiate Public events from Account-Specific events in the API request and response. Visit https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html to learn more. + +## __AWS SDK for Java v2__ + - ### Features + - Endpoint discovery is now enabled by default for future services that will require it. A new method 'endpointDiscoveryEnabled' has been added to client builders that support endpoint discovery allowing a true or false value to be set. 'enableEndpointDiscovery' has been deprecated on the client builders as it is now superseded by 'endpointDiscoveryEnabled'. + - Updated service endpoint metadata. + +## __Amazon Chime__ + - ### Features + - You can now receive Voice Connector call events through SNS or SQS. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for Federated Authentication via SAML-2.0 in AWS ClientVPN. + +## __Amazon Transcribe Service__ + - ### Features + - Documentation updates for Amazon Transcribe. + +# __2.13.19__ __2020-05-18__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Chime__ + - ### Features + - Amazon Chime now supports redacting chat messages. + +## __Amazon DynamoDB__ + - ### Features + - Documentation updates for dynamodb + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for specifying environment files to add environment variables to your containers. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release changes the RunInstances CLI and SDK's so that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. + +## __Amazon Macie 2__ + - ### Features + - Documentation updates for Amazon Macie + +## __Amazon QLDB__ + - ### Features + - Amazon QLDB now supports Amazon Kinesis data streams. You can now emit QLDB journal data, via the new QLDB Streams feature, directly to Amazon Kinesis supporting event processing and analytics among related use cases. + +# __2.13.18__ __2020-05-15__ +## __AWS CloudFormation__ + - ### Features + - This release adds support for the following features: 1. DescribeType and ListTypeVersions APIs now output a field IsDefaultVersion, indicating if a version is the default version for its type; 2. Add StackRollbackComplete waiter feature to wait until stack status is UPDATE_ROLLBACK_COMPLETE; 3. Add paginators in DescribeAccountLimits, ListChangeSets, ListStackInstances, ListStackSetOperationResults, ListStackSetOperations, ListStackSets APIs. + +## __AWS Glue__ + - ### Features + - Starting today, you can stop the execution of Glue workflows that are running. AWS Glue workflows are directed acyclic graphs (DAGs) of Glue triggers, crawlers and jobs. Using a workflow, you can design a complex multi-job extract, transform, and load (ETL) activity that AWS Glue can execute and track as single entity. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Allow event structures to be used as operation outputs outside of streaming contexts. + - Fix generation for services that contain operations with the same name as the service. + +## __AWS Security Token Service__ + - ### Features + - API updates for STS + +## __Amazon EC2 Container Registry__ + - ### Features + - This release adds support for specifying an image manifest media type when pushing a manifest to Amazon ECR. + +# __2.13.17__ __2020-05-14__ +## __AWS SDK for Java v2__ + - ### Features + - Expose the `extendedRequestId` from `SdkServiceException`, so it can be provided to support to investigate issues. + - Updated service endpoint metadata. + + - ### Bugfixes + - Fix generation for operations that share an output shape. + - Fix unmarshalling of events when structure member name and shape name mismatch. + - Support event streams that are shared between two operations. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 now supports adding AWS resource tags for associations between VPCs and local gateways, at creation time. + +## __Amazon RDS__ + - ### Features + - Add SourceRegion to CopyDBClusterSnapshot and CreateDBCluster operations. As with CopyDBSnapshot and CreateDBInstanceReadReplica, specifying this field will automatically populate the PresignedURL field with a valid value. + +## __EC2 Image Builder__ + - ### Features + - This release adds a new parameter (SupportedOsVersions) to the Components API. This parameter lists the OS versions supported by a component. + +# __2.13.16__ __2020-05-13__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix a race condition in `FileAsyncResponseTransformer` where the future fails to complete when onComplete event is dispatched on the same thread that executed request + +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache now supports auto-update of ElastiCache clusters after the "recommended apply by date" of service update has passed. ElastiCache will use your maintenance window to schedule the auto-update of applicable clusters. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Self-Service-Updates.html and https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Self-Service-Updates.html + +## __Amazon Macie 2__ + - ### Features + - This release introduces a new major version of the Amazon Macie API. You can use this version of the API to develop tools and applications that interact with the new Amazon Macie. + +# __2.13.15__ __2020-05-12__ +## __AWS IoT SiteWise__ + - ### Features + - Documentation updates for iot-bifrost + +## __Amazon WorkMail__ + - ### Features + - Minor API fixes and updates to the documentation. + +# __2.13.14__ __2020-05-11__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra is now generally available. As part of general availability, we are launching Metrics for query & storage utilization + +## __Amazon CodeGuru Reviewer__ + - ### Features + - Add Bitbucket integration APIs + +## __Amazon Elastic Compute Cloud__ + - ### Features + - M6g instances are our next-generation general purpose instances powered by AWS Graviton2 processors + +# __2.13.13__ __2020-05-08__ +## __AWS Resource Groups Tagging API__ + - ### Features + - Documentation updates for resourcegroupstaggingapi + +## __AWS SDK for Java v2__ + - ### Features + - A helpful error message is now raised when an obviously-invalid region name is given to the SDK, instead of the previous NullPointerException. Fixes [#1642](https://github.com/aws/aws-sdk-java-v2/issues/1642). + - Updated service endpoint metadata. + +## __Amazon GuardDuty__ + - ### Features + - Documentation updates for GuardDuty + +## __Amazon SageMaker Service__ + - ### Features + - This release adds a new parameter (EnableInterContainerTrafficEncryption) to CreateProcessingJob API to allow for enabling inter-container traffic encryption on processing jobs. + +# __2.13.12__ __2020-05-07__ +## __AWS CodeBuild__ + - ### Features + - Add COMMIT_MESSAGE enum for webhook filter types + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon AppConfig__ + - ### Features + - The description of the AWS AppConfig GetConfiguration API action was amended to include important information about calling ClientConfigurationVersion when you configure clients to call GetConfiguration. + +## __Amazon CloudWatch Logs__ + - ### Features + - Amazon CloudWatch Logs now offers the ability to interact with Logs Insights queries via the new PutQueryDefinition, DescribeQueryDefinitions, and DeleteQueryDefinition APIs. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 now adds warnings to identify issues when creating a launch template or launch template version. + +## __Amazon Lightsail__ + - ### Features + - This release adds support for the following options in instance public ports: Specify source IP addresses, specify ICMP protocol like PING, and enable/disable the Lightsail browser-based SSH and RDP clients' access to your instance. + +## __Amazon Route 53__ + - ### Features + - Amazon Route 53 now supports the EU (Milan) Region (eu-south-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This Patch Manager release supports creating patch baselines for Oracle Linux and Debian + +# __2.13.11__ __2020-05-06__ +## __AWS CodeStar connections__ + - ### Features + - Added support for tagging resources in AWS CodeStar Connections + +## __AWS Comprehend Medical__ + - ### Features + - New Batch Ontology APIs for ICD-10 and RxNorm will provide batch capability of linking the information extracted by Comprehend Medical to medical ontologies. The new ontology linking APIs make it easy to detect medications and medical conditions in unstructured clinical text and link them to RxNorm and ICD-10-CM codes respectively. This new feature can help you reduce the cost, time and effort of processing large amounts of unstructured medical text with high accuracy. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.13.10__ __2020-05-05__ +## __AWS SDJ for Java v2__ + - ### Features + - Updating dependency version: Jackson 2.10.3 -> 2.10.4, and combine dependency Jackson-annotations with Jackson. + +## __AWS Support__ + - ### Features + - Documentation updates for support + +## __Amazon DynamoDB__ + - ### Bugfixes + - Tweaked the javadocs for Get/Update, since it was previously wrongfully copied over from Delete and mentions the "delete operation". + +## __Amazon Elastic Compute Cloud__ + - ### Features + - With this release, you can call ModifySubnetAttribute with two new parameters: MapCustomerOwnedIpOnLaunch and CustomerOwnedIpv4Pool, to map a customerOwnedIpv4Pool to a subnet. You will also see these two new fields in the DescribeSubnets response. If your subnet has a customerOwnedIpv4Pool mapped, your network interface will get an auto assigned customerOwnedIpv4 address when placed onto an instance. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - AWS Systems Manager Parameter Store launches new data type to support aliases in EC2 APIs + +# __2.13.9__ __2020-05-04__ +## __AWS S3 Control__ + - ### Features + - Amazon S3 Batch Operations now supports Object Lock. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Bugfix for handling special characters ':' and '#' in attribute names + +## __Amazon Elastic Compute Cloud__ + - ### Features + - With this release, you can include enriched metadata in Amazon Virtual Private Cloud (Amazon VPC) flow logs published to Amazon CloudWatch Logs or Amazon Simple Storage Service (S3). Prior to this, custom format VPC flow logs enriched with additional metadata could be published only to S3. With this launch, we are also adding additional metadata fields that provide insights about the location such as AWS Region, AWS Availability Zone, AWS Local Zone, AWS Wavelength Zone, or AWS Outpost where the network interface where flow logs are captured exists. + +# __2.13.8__ __2020-05-01__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic File System__ + - ### Features + - Change the TagKeys argument for UntagResource to a URL parameter to address an issue with the Java and .NET SDKs. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Added TimeoutSeconds as part of ListCommands API response. + +# __2.13.7__ __2020-04-30__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for including AFD signaling in MXF wrapper. + +## __AWS IoT__ + - ### Features + - AWS IoT Core released Fleet Provisioning for scalable onboarding of IoT devices to the cloud. This release includes support for customer's Lambda functions to validate devices during onboarding. Fleet Provisioning also allows devices to send Certificate Signing Requests (CSR) to AWS IoT Core for signing and getting a unique certificate. Lastly, AWS IoT Core added a feature to register the same certificate for multiple accounts in the same region without needing to register the certificate authority (CA). + +## __AWS IoT Events__ + - ### Features + - Doc only update to correct APIs and related descriptions + +## __AWS Lambda__ + - ### Features + - Documentation updates for Lambda + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Adding support for S3_INTELLIGENT_TIERING as a storage class option + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Made OperationContext a public interface and moved it into public namespace as it was already exposed through another public interface. This will only impact extensions that have been written to reference the old internal-only class that should now switch to the approved stable public interface. + +## __Schemas__ + - ### Features + - Add support for resource policies for Amazon EventBridge Schema Registry, which is now generally available. + +# __2.13.6__ __2020-04-29__ +## __AWS Cloud Map__ + - ### Features + - Documentation updates for servicediscovery + +## __AWS IoT SiteWise__ + - ### Features + - AWS IoT SiteWise is a managed service that makes it easy to collect, store, organize and monitor data from industrial equipment at scale. You can use AWS IoT SiteWise to model your physical assets, processes and facilities, quickly compute common industrial performance metrics, and create fully managed web applications to help analyze industrial equipment data, prevent costly equipment issues, and reduce production inefficiencies. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS WAF__ + - ### Features + - This release add migration API for AWS WAF Classic ("waf" and "waf-regional"). The migration API will parse through your web ACL and generate a CloudFormation template into your S3 bucket. Deploying this template will create equivalent web ACL under new AWS WAF ("wafv2"). + +## __AWS WAF Regional__ + - ### Features + - This release add migration API for AWS WAF Classic ("waf" and "waf-regional"). The migration API will parse through your web ACL and generate a CloudFormation template into your S3 bucket. Deploying this template will create equivalent web ACL under new AWS WAF ("wafv2"). + +## __Amazon Transcribe Service__ + - ### Features + - With this release, you can now use Amazon Transcribe to create medical custom vocabularies and use them in both medical real-time streaming and medical batch transcription jobs. + +# __2.13.5__ __2020-04-28__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports several new features: enhanced VQ for H.264 (AVC) output encodes; passthrough of timed metadata and of Nielsen ID3 metadata in fMP4 containers in HLS outputs; the ability to generate a SCTE-35 sparse track without additional segmentation, in Microsoft Smooth outputs; the ability to select the audio from a TS input by specifying the audio track; and conversion of HDR colorspace in the input to an SDR colorspace in the output. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DynamoDB Enhacned__ + - ### Bugfixes + - Fix NPE on EnhancedType, created with documentOf, when calling innerToString + +## __Amazon EC2 Container Registry__ + - ### Features + - This release adds support for multi-architecture images also known as a manifest list + +## __Amazon Kinesis Video Streams__ + - ### Features + - Add "GET_CLIP" to the list of supported API names for the GetDataEndpoint API. + +## __Amazon Kinesis Video Streams Archived Media__ + - ### Features + - Add support for the GetClip API for retrieving media from a video stream in the MP4 format. + +## __Amazon Route 53__ + - ### Features + - Amazon Route 53 now supports the Africa (Cape Town) Region (af-south-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - SSM State Manager support for adding list association filter for Resource Group and manual mode of managing compliance for an association. + +# __2.13.4__ __2020-04-27__ +## __AWS Data Exchange__ + - ### Features + - This release introduces AWS Data Exchange support for configurable encryption parameters when exporting data sets to Amazon S3. + +## __AWS Database Migration Service__ + - ### Features + - Adding minimum replication engine version for describe-endpoint-types api. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + - Various performance improvements. + +## __Access Analyzer__ + - ### Features + - This release adds support for inclusion of S3 Access Point policies in IAM Access Analyzer evaluation of S3 bucket access. IAM Access Analyzer now reports findings for buckets shared through access points and identifies the access point that permits access. + +## __Amazon SageMaker Service__ + - ### Features + - Change to the input, ResourceSpec, changing EnvironmentArn to SageMakerImageArn. This affects the following preview APIs: CreateDomain, DescribeDomain, UpdateDomain, CreateUserProfile, DescribeUserProfile, UpdateUserProfile, CreateApp and DescribeApp. + +# __2.13.3__ __2020-04-24__ +## __AWS IoT__ + - ### Features + - This release adds a new exception type to the AWS IoT SetV2LoggingLevel API. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed bean-style setter names on serializable builders to match bean-style getter names. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Enable 1hour frequency in the schedule creation for Data LifeCycle Manager. + +## __Amazon Elastic Inference__ + - ### Features + - This feature allows customers to describe the accelerator types and offerings on any region where Elastic Inference is available. + +# __2.13.2__ __2020-04-23__ +## __AWS Elemental MediaPackage VOD__ + - ### Features + - Adds tagging support for PackagingGroups, PackagingConfigurations, and Assets + +## __AWS Resource Access Manager__ + - ### Features + - AWS Resource Access Manager (RAM) provides a new ListResourceTypes action. This action lets you list the resource types that can be shared using AWS RAM. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Added AutomaticTapeCreation APIs + +## __AWS Transfer Family__ + - ### Features + - This release adds support for transfers over FTPS and FTP in and out of Amazon S3, which makes it easy to migrate File Transfer Protocol over SSL (FTPS) and FTP workloads to AWS, in addition to the existing support for Secure File Transfer Protocol (SFTP). + +## __Amazon Kinesis Firehose__ + - ### Features + - You can now deliver streaming data to an Amazon Elasticsearch Service domain in an Amazon VPC. You can now compress streaming data delivered to S3 using Hadoop-Snappy in addition to Gzip, Zip and Snappy formats. + +## __Amazon Pinpoint__ + - ### Features + - This release of the Amazon Pinpoint API enhances support for sending campaigns through custom channels to locations such as AWS Lambda functions or web applications. Campaigns can now use CustomDeliveryConfiguration and CampaignCustomMessage to configure custom channel settings for a campaign. + +## __Amazon Relational Database Service__ + - ### Features + - Adds support for AWS Local Zones, including a new optional parameter AvailabilityZoneGroup for the DescribeOrderableDBInstanceOptions operation. + +## __Application Auto Scaling__ + - ### Features + - This release supports Auto Scaling in Amazon Keyspaces for Apache Cassandra. + +# __2.13.1__ __2020-04-22__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - Add support for code review and recommendation feedback APIs. + +## __Amazon Elasticsearch Service__ + - ### Features + - This change adds a new field 'OptionalDeployment' to ServiceSoftwareOptions to indicate whether a service software update is optional or mandatory. If True, it indicates that the update is optional, and the service software is not automatically updated. If False, the service software is automatically updated after AutomatedUpdateDate. + +## __Amazon Redshift__ + - ### Features + - Amazon Redshift support for usage limits + +## __Amazon Transcribe Streaming Service__ + - ### Features + - Adding ServiceUnavailableException as one of the expected exceptions + +## __Firewall Management Service__ + - ### Features + - This release is to support AWS Firewall Manager policy with Organizational Unit scope. + +# __2.13.0__ __2020-04-21__ +## __AWS Cost Explorer Service__ + - ### Features + - Cost Explorer Rightsizing Recommendations integrates with Compute Optimizer and begins offering across instance family rightsizing recommendations, adding to existing support for within instance family rightsizing recommendations. + +## __AWS SDK for Java v2__ + - ### Features + - Bump minor version to '2.13.0-SNAPSHOT' because of upgrade of Jackson version. + - Updated service endpoint metadata. + - Updating dependency version: Jackson 2.10.0 -> 2.10.3, Jackson-annotations 2.9.0 -> 2.10.0. + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR adds support for configuring a managed scaling policy for an Amazon EMR cluster. This enables automatic resizing of a cluster to optimize for job execution speed and reduced cluster cost. + +## __Amazon GuardDuty__ + - ### Features + - AWS GuardDuty now supports using AWS Organizations delegated administrators to create and manage GuardDuty master and member accounts. The feature also allows GuardDuty to be automatically enabled on associated organization accounts. + +## __Amazon Route 53 Domains__ + - ### Features + - You can now programmatically transfer domains between AWS accounts without having to contact AWS Support + +# __2.12.0__ __2020-04-20__ +## __AWS Cost Explorer Service__ + - ### Features + - Cost Categories API is now General Available with new dimensions and operations support. You can map costs by account name, service, and charge type dimensions as well as use contains, starts with, and ends with operations. Cost Categories can also be used in RI and SP coverage reports. + +## __AWS Glue__ + - ### Features + - Added a new ConnectionType "KAFKA" and a ConnectionProperty "KAFKA_BOOTSTRAP_SERVERS" to support Kafka connection. + +## __AWS IoT Events__ + - ### Features + - API update that allows users to add AWS Iot SiteWise actions while creating Detector Model in AWS Iot Events + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DynamoDB Enhanced Client__ + - ### Features + - The Amazon DynamoDB Enhanced Client is now generally available and provides a natural and intuitive interface for developers to integrate their applications with Amazon DynamoDB by means of an adaptive API that will map inputs and results to and from Java objects modeled by the application, rather than requiring the developers to implement that transformation themselves. + +## __AmazonApiGatewayV2__ + - ### Features + - You can now export an OpenAPI 3.0 compliant API definition file for Amazon API Gateway HTTP APIs using the Export API. + +## __Synthetics__ + - ### Features + - Introducing CloudWatch Synthetics. This is the first public release of CloudWatch Synthetics. + +# __2.11.14__ __2020-04-17__ +## __AWS OpsWorks CM__ + - ### Features + - Documentation updates for opsworkscm + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Fraud Detector__ + - ### Features + - Added support for a new rule engine execution mode. Customers will be able to configure their detector versions to evaluate all rules and return outcomes from all 'matched' rules in the GetPrediction API response. Added support for deleting Detectors (DeleteDetector) and Rule Versions (DeleteRuleVersion). + +# __2.11.13__ __2020-04-16__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert now allows you to specify your input captions frame rate for SCC captions sources. + +## __AWS Glue__ + - ### Features + - This release adds support for querying GetUserDefinedFunctions API without databaseName. + +## __AWS IoT Events__ + - ### Features + - API update that allows users to customize event action payloads, and adds support for Amazon DynamoDB actions. + +## __AWS Lambda__ + - ### Features + - Sample code for AWS Lambda operations + +## __AWS MediaTailor__ + - ### Features + - AWS Elemental MediaTailor SDK now allows configuration of Avail Suppression. + +## __AWS Migration Hub__ + - ### Features + - Adding ThrottlingException + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Added a new BatchUpdateFindings action, which allows customers to update selected information about their findings. Security Hub customers use BatchUpdateFindings to track their investigation into a finding. BatchUpdateFindings is intended to replace the UpdateFindings action, which is deprecated. + +## __Amazon Augmented AI Runtime__ + - ### Features + - This release updates Amazon Augmented AI ListHumanLoops and StartHumanLoop APIs. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 now supports adding AWS resource tags for placement groups and key pairs, at creation time. The CreatePlacementGroup API will now return placement group information when created successfully. The DeleteKeyPair API now supports deletion by resource ID. + +## __Amazon Import/Export Snowball__ + - ### Features + - An update to the Snowball Edge Storage Optimized device has been launched. Like the previous version, it has 80 TB of capacity for data transfer. Now it has 40 vCPUs, 80 GiB, and a 1 TiB SATA SSD of memory for EC2 compatible compute. The 80 TB of capacity can also be used for EBS-like volumes for AMIs. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for Amazon RDS Proxy with PostgreSQL compatibility. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker now supports running training jobs on ml.g4dn and ml.c5n instance types. Amazon SageMaker supports in "IN" operation for Search now. + +## __EC2 Image Builder__ + - ### Features + - This release includes support for additional OS Versions within EC2 Image Builder. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Mark a connection as unreusable if there was a 5xx server error so that a new request will establish a new connection. + +# __2.11.12__ __2020-04-08__ +## __AWS CloudFormation__ + - ### Features + - The OrganizationalUnitIds parameter on StackSet and the OrganizationalUnitId parameter on StackInstance, StackInstanceSummary, and StackSetOperationResultSummary are now reserved for internal use. No data is returned for this parameter. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK adds support for queue hopping. Jobs can now hop from their original queue to a specified alternate queue, based on the maximum wait time that you specify in the job settings. + +## __AWS Migration Hub Config__ + - ### Features + - Adding ThrottlingException + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Chime__ + - ### Features + - feature: Chime: This release introduces the ability to tag Amazon Chime SDK meeting resources. You can use tags to organize and identify your resources for cost allocation. + +## __Amazon CodeGuru Profiler__ + - ### Features + - CodeGuruProfiler adds support for resource based authorization to submit profile data. + +## __Amazon EC2 Container Service__ + - ### Features + - This release provides native support for specifying Amazon EFS file systems as volumes in your Amazon ECS task definitions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release provides the ability to include tags in EC2 event notifications. + +# __2.11.11__ __2020-04-07__ +## __AWS MediaConnect__ + - ### Features + - You can now send content from your MediaConnect flow to your virtual private cloud (VPC) without going over the public internet. + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - API updates for CodeGuruReviewer + +# __2.11.10__ __2020-04-06__ +## __AWS Elastic Beanstalk__ + - ### Features + - This release adds a new action, ListPlatformBranches, and updates two actions, ListPlatformVersions and DescribePlatformVersion, to support the concept of Elastic Beanstalk platform branches. + +## __AWS Identity and Access Management__ + - ### Features + - Documentation updates for AWS Identity and Access Management (IAM). + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Chime__ + - ### Features + - Amazon Chime proxy phone sessions let you provide two users with a shared phone number to communicate via voice or text for up to 12 hours without revealing personal phone numbers. When users call or message the provided phone number, they are connected to the other party and their private phone numbers are replaced with the shared number in Caller ID. + +## __Amazon Transcribe Service__ + - ### Features + - This release adds support for batch transcription jobs within Amazon Transcribe Medical. + +# __2.11.9__ __2020-04-03__ +## __AWS RoboMaker__ + - ### Features + - Added support for limiting simulation unit usage, giving more predictable control over simulation cost + +## __AWS S3__ + - ### Features + - Allow DefaultS3Presigner.Builder to take a custom S3Configuration + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Personalize Runtime__ + - ### Features + - Amazon Personalize: Add new response field "score" to each item returned by GetRecommendations and GetPersonalizedRanking (HRNN-based recipes only) + +# __2.11.8__ __2020-04-02__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports Automatic Input Failover. This feature provides resiliency upstream of the channel, before ingest starts. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CloudWatch__ + - ### Features + - Amazon CloudWatch Contributor Insights adds support for tags and tagging on resource creation. + +## __Amazon GameLift__ + - ### Features + - Public preview of GameLift FleetIQ as a standalone feature. GameLift FleetIQ makes it possible to use low-cost Spot instances by limiting the chance of interruptions affecting game sessions. FleetIQ is a feature of the managed GameLift service, and can now be used with game hosting in EC2 Auto Scaling groups that you manage in your own account. + +## __Amazon Redshift__ + - ### Features + - Documentation updates for redshift + +## __Amazon Relational Database Service__ + - ### Features + - Documentation updates for RDS: creating read replicas is now supported for SQL Server DB instances + +# __2.11.7__ __2020-04-01__ +## __AWS IoT__ + - ### Features + - This release introduces Dimensions for AWS IoT Device Defender. Dimensions can be used in Security Profiles to collect and monitor fine-grained metrics. + +## __AWS MediaConnect__ + - ### Features + - You can now send content from your virtual private cloud (VPC) to your MediaConnect flow without going over the public internet. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.11.6__ __2020-03-31__ +## __AWS Elemental MediaStore__ + - ### Features + - This release adds support for CloudWatch Metrics. You can now set a policy on your container to dictate which metrics MediaStore sends to CloudWatch. + +## __AWS Glue__ + - ### Features + - Add two enums for MongoDB connection: Added "CONNECTION_URL" to "ConnectionPropertyKey" and added "MONGODB" to "ConnectionType" + +## __AWS Lambda__ + - ### Features + - AWS Lambda now supports .NET Core 3.1 + +## __AWS OpsWorks CM__ + - ### Features + - Documentation updates for OpsWorks-CM CreateServer values. + +## __AWS Organizations__ + - ### Features + - Documentation updates for AWS Organizations + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Adding audit logging support for SMB File Shares + +## __AWS WAFV2__ + - ### Features + - Added support for AWS Firewall Manager for WAFv2 and PermissionPolicy APIs for WAFv2. + +## __Amazon AppConfig__ + - ### Features + - This release adds an event log to deployments. In the case of a deployment rollback, the event log details the rollback reason. + +## __Amazon Detective__ + - ### Features + - Removing the notes that Detective is in preview, in preparation for the Detective GA release. + +## __Amazon Elastic Inference__ + - ### Features + - This release includes improvements for the Amazon Elastic Inference service. + +## __Amazon Pinpoint__ + - ### Features + - This release of the Amazon Pinpoint API introduces MMS support for SMS messages. + +## __Amazon Rekognition__ + - ### Features + - This release adds DeleteProject and DeleteProjectVersion APIs to Amazon Rekognition Custom Labels. + +## __Firewall Management Service__ + - ### Features + - This release contains FMS wafv2 support. + +# __2.11.5__ __2020-03-30__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Access Analyzer__ + - ### Features + - This release adds support for the creation and management of IAM Access Analyzer analyzers with type organization. An analyzer with type organization continuously monitors all supported resources within the AWS organization and reports findings when they allow access from outside the organization. + +# __2.11.4__ __2020-03-27__ +## __AWS Global Accelerator__ + - ### Features + - This update adds an event history to the ListByoipCidr API call. This enables you to see the changes that you've made for an IP address range that you bring to AWS Global Accelerator through bring your own IP address (BYOIP). + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Remove the `LimitExceededException` as a throttling error as it seems many services don't treat it as a throttling error. + +## __AWS Service Catalog__ + - ### Features + - Added "LocalRoleName" as an acceptable Parameter for Launch type in CreateConstraint and UpdateConstraint APIs + +## __AWSKendraFrontendService__ + - ### Features + - The Amazon Kendra Microsoft SharePoint data source now supports include and exclude regular expressions and change log features. Include and exclude regular expressions enable you to provide a list of regular expressions to match the display URL of SharePoint documents to either include or exclude documents respectively. When you enable the changelog feature it enables Amazon Kendra to use the SharePoint change log to determine which documents to update in the index. + +# __2.11.3__ __2020-03-26__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Security Hub has now made it easier to opt out of default standards when you enable Security Hub. We added a new Boolean parameter to EnableSecurityHub called EnableDefaultStandards. If that parameter is true, Security Hub's default standards are enabled. A new Boolean parameter for standards, EnabledByDefault, indicates whether a standard is a default standard. Today, the only default standard is CIS AWS Foundations Benchmark v1.2. Additional default standards will be added in the future.To learn more, visit our documentation on the EnableSecurityHub API action. + +## __Amazon FSx__ + - ### Features + - This release includes two changes: a new lower-cost, storage type called HDD (Hard Disk Drive), and a new generation of the Single-AZ deployment type called Single AZ 2. The HDD storage type can be selected on Multi AZ 1 and Single AZ 2 deployment types. + +## __Amazon SageMaker Service__ + - ### Features + - This release updates Amazon Augmented AI CreateFlowDefinition API and DescribeFlowDefinition response. + +# __2.11.2__ __2020-03-25__ +## __AWS Cost Explorer Service__ + - ### Features + - Customers can now receive Savings Plans recommendations at the member (linked) account level. + +## __AWS SDK for Java v2__ + - ### Features + - Added a `defaultProfileFile` and `defaultProfileName` option to the client override configuration. Setting this configuration value is equivalent to setting the environment or system properties for the profile file and profile name. Specifically, it sets the default profile file and profile name used by the client. + - Reduced the number of times the profile file configuration is read from disk on client creation from 3-5 to 1. + - Updated service endpoint metadata. + +## __AWS X-Ray__ + - ### Features + - GetTraceSummaries - Now provides additional root cause attribute ClientImpacting which indicates whether root cause impacted trace client. + +## __Amazon CloudWatch Application Insights__ + - ### Features + - Amazon CloudWatch Application Insights for .NET and SQL Server now integrates with Amazon CloudWatch Events (AWS CodeDeploy, AWS Health and Amazon EC2 state changes). This feature enables customers to view events related to problems detected by CloudWatch Application Insights, and reduce mean-time-to-resolution (MTTR). + +## __Amazon Detective__ + - ### Features + - The new ACCEPTED_BUT_DISABLED member account status indicates that a member account that accepted the invitation is blocked from contributing data to the behavior graph. The reason is provided in the new DISABLED_REASON property. The new StartMonitoringMember operation enables a blocked member account. + +## __Amazon DynamoDB__ + - ### Features + - When endpoint discovery is enabled, the endpoint discovery process is now initialized with the first request, instead of 60 seconds after the first request. + + - ### Bugfixes + - Fixed an issue that could cause a null-pointer-exception when using anonymous credentials with endpoint discovery enabled. + - Fixed an issue where endpoint discovery configuration specified in the profile file was being ignored. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Bugfixes + - Performance improvements. + +## __Amazon Elasticsearch Service__ + - ### Features + - Adding support for customer packages (dictionary files) to Amazon Elasticsearch Service + +## __Amazon Managed Blockchain__ + - ### Features + - Amazon Managed Blockchain now has support to publish Hyperledger Fabric peer node, chaincode, and certificate authority (CA) logs to Amazon CloudWatch Logs. + +## __Amazon S3__ + - ### Bugfixes + - Fixed a bug where explicitly disabling use-arn-region on S3Configuration would have lower priority than the environment variable, system property or profile property. + +# __2.11.1__ __2020-03-24__ +## __AWS Organizations__ + - ### Features + - Introduces actions for giving a member account administrative Organizations permissions for an AWS service. You can run this action only for AWS services that support this feature. + +## __AWS RDS DataService__ + - ### Features + - Documentation updates for rds-data + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Athena__ + - ### Features + - Documentation updates for Athena, including QueryExecutionStatus QUEUED and RUNNING states. QUEUED now indicates that the query has been submitted to the service. RUNNING indicates that the query is in execution phase. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Adding new error codes: Ec2SubnetInvalidConfiguration and NodeCreationFailure for Nodegroups in EKS + +# __2.11.0__ __2020-03-23__ +## __AWS SDK for Java v2__ + - ### Features + - Bump minor version to '2.11.0-SNAPSHOT' because of [#1692](https://github.com/aws/aws-sdk-java-v2/issues/1692) + - Updating dependency version: netty 4.1.42.Final -> 4.1.46.Final (contains the fix for reducing heap usage for netty client) + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Adding new error code IamLimitExceeded for Nodegroups in EKS + +## __Amazon Route 53__ + - ### Features + - Documentation updates for Route 53. + +## __AmazonApiGatewayV2__ + - ### Features + - Documentation updates to reflect that the default timeout for integrations is now 30 seconds for HTTP APIs. + +# __2.10.91__ __2020-03-20__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - Added "productId" and "portfolioId" to responses from CreateConstraint, UpdateConstraint, ListConstraintsForPortfolio, and DescribeConstraint APIs + +# __2.10.90__ __2020-03-19__ +## __AWS Certificate Manager__ + - ### Features + - AWS Certificate Manager documentation updated on API calls ImportCertificate and ListCertificate. Specific updates included input constraints, private key size for import and next token size for list. + +## __AWS Outposts__ + - ### Features + - Documentation updates for AWS Outposts. + +# __2.10.89__ __2020-03-18__ +## __AWS MediaConnect__ + - ### Features + - Feature adds the ability for a flow to have multiple redundant sources that provides resiliency to a source failing. The new APIs added to enable the feature are, AddFlowSources, RemoveFlowSource and UpdateFlow. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Personalize__ + - ### Features + - [Personalize] Adds support for returning hyperparameter values of the best performing model in a HPO job. + +## __Amazon Relational Database Service__ + - ### Features + - Updated the MaxRecords type in DescribeExportTasks to Integer. + +# __2.10.88__ __2020-03-17__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for: AV1 encoding in File Group MP4, DASH and CMAF DASH outputs; PCM/WAV audio output in MPEG2-TS containers; and Opus audio in Webm inputs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fix an issue where the signing key is created only once at the start of the request for event streaming requests. This causes requests that span two or more days to have signing errors once the date changes because the signing key was derived only once using the date at the beginning of the request. + +# __2.10.87__ __2020-03-16__ +## __AWS S3 Control__ + - ### Features + - Amazon S3 now supports Batch Operations job tagging. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Additional response field "CompromisedCredentialsDetected" added to AdminListUserAuthEvents. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - The enhanced DDB client table schema now supports custom AttributeConverterProviders, and StaticAttribute can take individual AttributeConverter to override default attribute converter behavior. + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds the ability to update the task placement strategy and constraints for Amazon ECS services. + +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache now supports Global Datastore for Redis. Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. You can create, modify and describe a Global Datastore, as well as add or remove regions from your Global Datastore and promote a region as primary in Global Datastore. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Resource data sync for AWS Systems Manager Inventory now includes destination data sharing. This feature enables you to synchronize inventory data from multiple AWS accounts into a central Amazon S3 bucket. To use this feature, all AWS accounts must be listed in AWS Organizations. + +# __2.10.86__ __2020-03-13__ +## __Amazon AppConfig__ + - ### Features + - This release adds S3 as a configuration source provider. + +# __2.10.85__ __2020-03-12__ +## __AWS IoT__ + - ### Features + - As part of this release, we are extending capability of AWS IoT Rules Engine to support IoT Cloudwatch log action. The IoT Cloudwatch log rule action lets you send messages from IoT sensors and applications to Cloudwatch logs for troubleshooting and debugging. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - The AWS Security Finding Format is being augmented with the following changes. 21 new resource types without corresponding details objects are added. Another new resource type, AwsS3Object, has an accompanying details object. Severity.Label is a new string field that indicates the severity of a finding. The available values are: INFORMATIONAL, LOW, MEDIUM, HIGH, CRITICAL. The new string field Workflow.Status indicates the status of the investigation into a finding. The available values are: NEW, NOTIFIED, RESOLVED, SUPPRESSED. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for EC2 + +## __Amazon Lex Model Building Service__ + - ### Features + - Amazon Lex now supports tagging for bots, bot aliases and bot channels. + +## __AmazonApiGatewayV2__ + - ### Features + - Amazon API Gateway HTTP APIs is now generally available. HTTP APIs offer the core functionality of REST API at up to 71% lower price compared to REST API, 60% lower p99 latency, and is significantly easier to use. As part of general availability, we added new features to route requests to private backends such as private ALBs, NLBs, and IP/ports. We also brought over a set of features from REST API such as Stage Variables, and Stage/Route level throttling. Custom domain names can also now be used with both REST And HTTP APIs. + +# __2.10.84__ __2020-03-11__ +## __Amazon Elastic File System__ + - ### Features + - Documentation updates for elasticfilesystem + +## __Amazon Redshift__ + - ### Features + - Amazon Redshift now supports operations to pause and resume a cluster on demand or on a schedule. + +# __2.10.83__ __2020-03-10__ +## __AWS IoT Events__ + - ### Features + - API update that adds a new parameter, durationExpression, to SetTimerAction, and deprecates seconds + +## __AWS Marketplace Commerce Analytics__ + - ### Features + - Change the disbursement data set to look past 31 days instead until the beginning of the month. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Reverts a recent change from 2.10.70 where the json protocol type was changed to application/json, this is now back to application/x-amz-json-1.1. + +## __AWSServerlessApplicationRepository__ + - ### Features + - AWS Serverless Application Repository now supports sharing applications privately with AWS Organizations. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for EC2 + +## __Amazon Transcribe Service__ + - ### Features + - Amazon Transcribe's Automatic Content Redaction feature enables you to automatically redact sensitive personally identifiable information (PII) from transcription results. It replaces each instance of an identified PII utterance with a [PII] tag in the transcript. + +# __2.10.82__ __2020-03-09__ +## __AWS Database Migration Service__ + - ### Features + - Added new settings for Kinesis target to include detailed transaction info; to capture table DDL details; to use single-line unformatted json, which can be directly queried by AWS Athena if data is streamed into S3 through AWS Kinesis Firehose. Added CdcInsertsAndUpdates in S3 target settings to allow capture ongoing insertions and updates only. + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports the ability to configure the Preferred Channel Pipeline for channels contributing to a Multiplex. + +## __AWS SDK for Java v2__ + - ### Features + - Added support for "retry modes". A retry mode allows configuring multiple SDK parameters at once using default retry profiles, some of which are standardized between AWS SDK languages. See RetryMode javadoc for more information. + - Added the ability to configure or disable the default retry throttling behavior of the SDK that 'kicks in' during a large volume of retriable service call errors. This behavior can now be configured via `RetryPolicy.retryCapacityCondition`. + + - ### Bugfixes + - Fixed an issue where specifying your own retry policy would override AWS and service-specific retry conditions. By default, all retry policies now have AWS and service-specific retry conditions added. This can be disabled via the new `RetryPolicy.furtherRefinementsAllowed(false)`. + - Fixed an issue where the retry condition returned by `RetryPolicy.retryCondition` differed from the one specified by `RetryPolicy.Builder.retryCondition`. The old value can be accessed via the new `RetryPolicy.aggregateRetryCondition`. + - Use the last seen HTTP/1.1 header value for headers defined to only appear once in an HTTP message instead of merging them all into a list. The order in which header values are inspected is: headers set by the request marshaller, overridden headers set on the client, then finally overridden headers set on the SDK request object. See https://tools.ietf.org/html/rfc2616#section-4.2 for more information. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon Virtual Private Cloud (VPC) NAT Gateway adds support for tagging on resource creation. + +# __2.10.81__ __2020-03-06__ +## __AWS App Mesh__ + - ### Features + - App Mesh now supports sharing a Mesh with other AWS accounts. Customers can use AWS Resource Access Manager to share their Mesh with other accounts in their organization to connection applications within a single service mesh. See https://docs.aws.amazon.com/app-mesh/latest/userguide/sharing.html for details. + +## __AWS RoboMaker__ + - ### Features + - Added support for streaming a GUI from robot and simulation applications + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Signer__ + - ### Features + - This release enables signing image format override in PutSigningProfile requests, adding two more enum fields, JSONEmbedded and JSONDetached. This release also extends the length limit of SigningProfile name from 20 to 64. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release provides customers with a self-service option to enable Local Zones. + +## __Amazon GuardDuty__ + - ### Features + - Amazon GuardDuty findings now include the OutpostArn if the finding is generated for an AWS Outposts EC2 host. + +## __Netty NIO Http Client__ + - ### Bugfixes + - Expand Http2 connection-level flow control window when a new stream is acquired on that connection so that the connection-level window size is proportional to the number of streams. + +# __2.10.80__ __2020-03-05__ +## __AWS OpsWorks CM__ + - ### Features + - Updated the Tag regex pattern to align with AWS tagging APIs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Adds javadoc to operation methods and request/response objects. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - You can now create AWS Client VPN Endpoints with a specified VPC and Security Group. Additionally, you can modify these attributes when modifying the endpoint. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Amazon EKS now supports adding a KMS key to your cluster for envelope encryption of Kubernetes secrets. + +## __Amazon GuardDuty__ + - ### Features + - Add a new finding field for EC2 findings indicating the instance's local IP address involved in the threat. + +# __2.10.79__ __2020-03-04__ +## __Amazon Pinpoint__ + - ### Features + - This release of the Amazon Pinpoint API introduces support for integrating recommender models with email, push notification, and SMS message templates. You can now use these types of templates to connect to recommender models and add personalized recommendations to messages that you send from campaigns and journeys. + +# __2.10.78__ __2020-03-03__ +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon VPC Flow Logs adds support for tags and tagging on resource creation. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Fix an issue where the Netty client was prematurely considering an HTTP/2 request body as sent, but was still in the process of being transferred to the remote endpoint. + +# __2.10.77__ __2020-03-02__ +## __AWS Comprehend Medical__ + - ### Features + - New Time Expression feature, part of DetectEntitiesV2 API will provide temporal relations to existing NERe entities such as Medication, Test, Treatment, Procedure and Medical conditions. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CloudWatch__ + - ### Features + - Introducing Amazon CloudWatch Composite Alarms + +# __2.10.76__ __2020-02-28__ +## __AWS Config__ + - ### Features + - Correcting list of supported resource types. + +# __2.10.75__ __2020-02-28__ +## __AWS App Mesh__ + - ### Features + - App Mesh now supports Transport Layer Security (TLS) between Virtual Nodes in a Mesh. Customers can use managed certificates from an AWS Certificate Manager Private Certificate Authority or bring their own certificates from the local file system to encrypt traffic between their workloads. See https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual-node-tls.html for details. + +## __AWS Config__ + - ### Features + - Accepts a structured query language (SQL) SELECT command and an aggregator name, performs the corresponding search on resources aggregated by the aggregator, and returns resource configurations matching the properties. + +## __AWS Glue__ + - ### Features + - AWS Glue adds resource tagging support for Machine Learning Transforms and adds a new API, ListMLTransforms to support tag filtering. With this feature, customers can use tags in AWS Glue to organize and control access to Machine Learning Transforms. + +## __Access Analyzer__ + - ### Features + - This release includes improvements and fixes bugs for the IAM Access Analyzer feature. + +## __Amazon Augmented AI Runtime__ + - ### Features + - This release updates Amazon Augmented AI ListHumanLoops API, DescribeHumanLoop response, StartHumanLoop response and type names of SDK fields. + +## __Amazon CodeGuru Profiler__ + - ### Features + - Documentation updates for Amazon CodeGuru Profiler + +## __Amazon QuickSight__ + - ### Features + - Added SearchDashboards API that allows listing of dashboards that a specific user has access to. + +## __Amazon WorkDocs__ + - ### Features + - Documentation updates for workdocs + +## __Elastic Load Balancing__ + - ### Features + - Added a target group attribute to support sticky sessions for Network Load Balancers. + +# __2.10.74__ __2020-02-27__ +## __AWS Global Accelerator__ + - ### Features + - This release adds support for adding tags to accelerators and bringing your own IP address to AWS Global Accelerator (BYOIP). + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Lightsail__ + - ### Features + - Adds support to create notification contacts in Amazon Lightsail, and to create instance, database, and load balancer metric alarms that notify you based on the value of a metric relative to a threshold that you specify. + +# __2.10.73__ __2020-02-26__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Security Hub has added to the DescribeProducts API operation a new response field called IntegrationTypes. The IntegrationTypes field lists the types of actions that a product performs relative to Security Hub such as send findings to Security Hub and receive findings from Security Hub. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Added the BeanTableSchema implementation of TableSchema that allows a TableSchema to be instantiated from an annotated Java bean class which can then be used with the DynamoDB Enhanced Client. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release changes the RunInstances CLI and SDK's so that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. + +## __Amazon SageMaker Service__ + - ### Features + - SageMaker UpdateEndpoint API now supports retained variant properties, e.g., instance count, variant weight. SageMaker ListTrials API filter by TrialComponentName. Make ExperimentConfig name length limits consistent with CreateExperiment, CreateTrial, and CreateTrialComponent APIs. + +## __Amazon Transcribe Service__ + - ### Features + - Amazon Transcribe's Automatic Content Redaction feature enables you to automatically redact sensitive personally identifiable information (PII) from transcription results. It replaces each instance of an identified PII utterance with a [PII] tag in the transcript. + +# __2.10.72__ __2020-02-25__ +## __AWS Outposts__ + - ### Features + - This release adds DeleteSite and DeleteOutpost. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Secrets Manager__ + - ### Features + - This release increases the maximum allowed size of SecretString or SecretBinary from 10KB to 64KB in the CreateSecret, UpdateSecret, PutSecretValue and GetSecretValue APIs. + +## __AWS Step Functions__ + - ### Features + - This release adds support for CloudWatch Logs for Standard Workflows. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Improves discoverability by adding consumer-style methods for all client, table and index operations. + +## __Managed Streaming for Kafka__ + - ### Features + - Amazon MSK has added support for Broker Log delivery to CloudWatch, S3, and Firehose. + +# __2.10.71__ __2020-02-24__ +## __AWS IoT Events__ + - ### Features + - Documentation updates for iotcolumbo + +## __Amazon CloudWatch Events__ + - ### Features + - This release allows you to create and manage tags for event buses. + +## __Amazon DocumentDB with MongoDB compatibility__ + - ### Features + - Documentation updates for docdb + +## __Amazon EventBridge__ + - ### Features + - This release allows you to create and manage tags for event buses. + +## __Amazon FSx__ + - ### Features + - Announcing persistent file systems for Amazon FSx for Lustre that are ideal for longer-term storage and workloads, and a new generation of scratch file systems that offer higher burst throughput for spiky workloads. + +## __Amazon Import/Export Snowball__ + - ### Features + - AWS Snowball adds a field for entering your GSTIN when creating AWS Snowball jobs in the Asia Pacific (Mumbai) region. + +# __2.10.70__ __2020-02-21__ +## __AWS WAFV2__ + - ### Features + - Documentation updates for AWS WAF (wafv2) to correct the guidance for associating a web ACL to a CloudFront distribution. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Improves discoverability by adding consumer-style methods for all client, table and index operations. + +## __Amazon Redshift__ + - ### Features + - Extend elastic resize to support resizing clusters to different instance types. + +## __EC2 Image Builder__ + - ### Features + - This release of EC2 Image Builder increases the maximum policy document size for Image Builder resource-based policy APIs. + +# __2.10.69__ __2020-02-20__ +## __AWS Savings Plans__ + - ### Features + - Added support for AWS Lambda in Compute Savings Plans + +## __Amazon AppConfig__ + - ### Features + - This release adds exponential growth type support for deployment strategies. + +## __Amazon Pinpoint__ + - ### Features + - As of this release of the Amazon Pinpoint API, the Title property is optional for the CampaignEmailMessage object. + +# __2.10.68__ __2020-02-19__ +## __AWS Lambda__ + - ### Features + - AWS Lambda now supports Ruby 2.7 + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - "ListPortfolioAccess" API now has a new optional parameter "OrganizationParentId". When it is provided and if the portfolio with the "PortfolioId" given was shared with an organization or organizational unit with "OrganizationParentId", all accounts in the organization sub-tree under parent which inherit an organizational portfolio share will be listed, rather than all accounts with external shares. To accommodate long lists returned from the new option, the API now supports pagination. + +## __Auto Scaling__ + - ### Features + - Doc update for EC2 Auto Scaling: Add Enabled parameter for PutScalingPolicy + +# __2.10.67__ __2020-02-18__ +## __Amazon Chime__ + - ### Features + - Added AudioFallbackUrl to support Chime SDK client. + +## __Amazon Relational Database Service__ + - ### Features + - This release supports Microsoft Active Directory authentication for Amazon Aurora. + +## __Auto Scaling__ + - ### Features + - Amazon EC2 Auto Scaling now supports the ability to enable/disable target tracking, step scaling, and simple scaling policies. + +# __2.10.66__ __2020-02-17__ +## __AWS Cloud9__ + - ### Features + - AWS Cloud9 now supports the ability to tag Cloud9 development environments. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DynamoDB__ + - ### Features + - Amazon DynamoDB enables you to restore your DynamoDB backup or table data across AWS Regions such that the restored table is created in a different AWS Region from where the source table or backup resides. You can do cross-region restores between AWS commercial Regions, AWS China Regions, and AWS GovCloud (US) Regions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for EC2 + +## __Amazon Rekognition__ + - ### Features + - This update adds the ability to detect text in videos and adds filters to image and video text detection. + +# __2.10.65__ __2020-02-14__ +## __AWS MediaTailor__ + - ### Features + - AWS Elemental MediaTailor SDK now allows configuration of Personalization Threshold for HLS and DASH streams. + +## __AWS SecurityHub__ + - ### Features + - Security Hub has released a new DescribeStandards API action. This API action allows a customer to list all of the standards available in an account. For each standard, the list provides the customer with the standard name, description, and ARN. Customers can use the ARN as an input to the BatchEnableStandards API action. To learn more, visit our API documentation. + +## __AWS Shield__ + - ### Features + - This release adds support for associating Amazon Route 53 health checks to AWS Shield Advanced protected resources. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - You can now enable Multi-Attach on Provisioned IOPS io1 volumes through the create-volume API. + +## __Amazon S3__ + - ### Features + - Added support for presigning `CreateMultipartUpload`, `UploadPart`, `CompleteMultipartUpload`, and `AbortMultipartUpload` requests. + +# __2.10.64__ __2020-02-13__ +## __AWS Elemental MediaPackage VOD__ + - ### Features + - Adds support for DASH with multiple media presentation description periods triggered by presence of SCTE-35 ad markers in the manifest.Also adds optional configuration for DASH SegmentTemplateFormat to refer to segments by Number with Duration, Number with Timeline or Time with Timeline and compact the manifest by combining duplicate SegmentTemplate tags. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Improves discoverability by adding operation methods for deleteItem(), getItem(), putItem and updateItem(), as applicable. These methods take a request object as parameter. Execute() methods for the table interface is removed since they are no longer needed. + +## __Netty NIO HTTP Client__ + - ### Features + - When there is an I/O error on an http2 request, the SDK will start shutting down the connection - stopping using the http2 connection for new requests and closing it after all streams are finished. + +# __2.10.63__ __2020-02-12__ +## __AWS Directory Service__ + - ### Features + - Release to add the ExpirationDateTime as an output to ListCertificates so as to ease customers to look into their certificate lifetime and make timely decisions about renewing them. + +## __AWS Glue__ + - ### Features + - Adding ability to add arguments that cannot be overridden to AWS Glue jobs + +## __Amazon Chime__ + - ### Features + - Documentation updates for Amazon Chime + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for tagging public IPv4 pools. + +## __Amazon Elasticsearch Service__ + - ### Features + - Amazon Elasticsearch Service now offers fine-grained access control, which adds multiple capabilities to give tighter control over data. New features include the ability to use roles to define granular permissions for indices, documents, or fields and to extend Kibana with read-only views and secure multi-tenant support. + +## __Amazon Neptune__ + - ### Features + - This launch enables Neptune start-db-cluster and stop-db-cluster. Stopping and starting Amazon Neptune clusters helps you manage costs for development and test environments. You can temporarily stop all the DB instances in your cluster, instead of setting up and tearing down all the DB instances each time that you use the cluster. + +## __Amazon WorkMail__ + - ### Features + - This release adds support for access control rules management in Amazon WorkMail. + +# __2.10.62__ __2020-02-11__ +## __AWS CloudFormation__ + - ### Features + - This release of AWS CloudFormation StackSets allows you to centrally manage deployments to all the accounts in your organization or specific organizational units (OUs) in AWS Organizations. You will also be able to enable automatic deployments to any new accounts added to your organization or OUs. The permissions needed to deploy across accounts will automatically be taken care of by the StackSets service. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Features:This release adds a new setting for a user pool to allow if customer wants their user signup/signin with case insensitive username. The current default setting is case sensitive, and for our next release we will change it to case insensitive. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 Now Supports Tagging Spot Fleet. + +# __2.10.61__ __2020-02-10__ +## __AWS Key Management Service__ + - ### Features + - The ConnectCustomKeyStore API now provides a new error code (SUBNET_NOT_FOUND) for customers to better troubleshoot if their "connect-custom-key-store" operation fails. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DocumentDB with MongoDB compatibility__ + - ### Features + - Added clarifying information that Amazon DocumentDB shares operational technology with Amazon RDS and Amazon Neptune. + +# __2.10.60__ __2020-02-07__ +## __AWS RoboMaker__ + - ### Features + - This release adds support for simulation job batches + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Improves discoverability by renaming the table and index interfaces to be consistent with the client interface naming, and by adding operation methods for createTable(), scan() and query(), as applicable. These methods take a request object as parameter. Execute() methods for the index interface is removed since they are no longer needed. + +## __Amazon Relational Database Service__ + - ### Features + - Documentation updates for RDS: when restoring a DB cluster from a snapshot, must create DB instances + +## __EC2 Image Builder__ + - ### Features + - This version of the SDK includes bug fixes and documentation updates. + +# __2.10.59__ __2020-02-06__ +## __AWS AppSync__ + - ### Features + - AWS AppSync now supports X-Ray + +## __AWS CodeBuild__ + - ### Features + - AWS CodeBuild adds support for Amazon Elastic File Systems + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - In order to make operations more easily discoverable by an IDE, specific operation methods have been added to the enhanced client interface. An operation method takes a corresponding request object as parameter. Meanwhile, the generic execute() method is removed. This change affects only batch and transcribe operations at the database level. + +## __Amazon EC2 Container Registry__ + - ### Features + - This release contains updated text for the GetAuthorizationToken API. + +## __Amazon Elastic Block Store__ + - ### Features + - Documentation updates for EBS direct APIs. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds platform details and billing info to the DescribeImages API. + +## __Amazon Lex Model Building Service__ + - ### Features + - Amazon Lex now supports AMAZON.AlphaNumeric with regular expressions. + +# __2.10.58__ __2020-02-05__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for fine-tuned QVBR quality level. + +## __AWS Ground Station__ + - ### Features + - Adds dataflowEndpointRegion property to DataflowEndpointConfig. The dateCreated, lastUpdated, and tags properties on GetSatellite have been deprecated. + +## __AWS Resource Groups Tagging API__ + - ### Features + - Documentation-only update that adds services to the list of supported services. + +## __AWS SecurityHub__ + - ### Features + - Additional resource types are now supported in the AWS Security Finding Format (ASFF). The following new resource types are added, each having an accompanying resource details object with fields for security finding providers to populate: AwsCodeBuildProject, AwsEc2NetworkInterface, AwsEc2SecurityGroup, AwsElasticsearchDomain, AwsLambdaLayerVersion, AwsRdsDbInstance, and AwsWafWebAcl. The following resource types are added without an accompanying details object: AutoscalingAutoscalingGroup, AwsDynamoDbTable, AwsEc2Eip, AwsEc2Snapshot, AwsEc2Volume, AwsRdsDbSnapshot, AwsRedshiftCluster, and AwsS3Object. The number of allowed resources per finding is increased from 10 to 32. A new field is added in the Compliance object, RelatedRequirements. To learn more, visit our documentation on the ASFF. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Updated the maximum number of tags that can be added to a snapshot using DLM to 45. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release provides support for tagging when you create a VPC endpoint, or VPC endpoint service. + +## __Amazon Forecast Query Service__ + - ### Features + - Documentation updates for Amazon Forecast. + +# __2.10.57__ __2020-02-04__ +## __AWS IoT__ + - ### Features + - Updated ThrottlingException documentation to report that the error code is 400, and not 429, to reflect actual system behaviour. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Adding KVM as a support hypervisor + +## __Amazon CloudFront__ + - ### Features + - Documentation updates for CloudFront + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Changing usage of typed builders for PutItem, UpdateItem and StaticTableSchema to explicitly provide class type. + - Renames top level sync/async MappedDatabase interfaces as DynamoDbEnhancedClient interfaces. Also adds builder definitions to the interfaces together with a static method that returns the default implementation of the builder. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon VPC Flow Logs adds support for 1-minute aggregation intervals. + +## __Amazon S3__ + - ### Bugfixes + - Fixed an issue where fields in `ListObjectVersionsResponse` and `ListMultipartUploadsResponse` are not decoded correctly when encodingType is specified as url. See [#1601](https://github.com/aws/aws-sdk-java-v2/issues/1601) + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This feature ensures that an instance is patched up to the available patches on a particular date. It can be enabled by selecting the 'ApproveUntilDate' option as the auto-approval rule while creating the patch baseline. ApproveUntilDate - The cutoff date for auto approval of released patches. Any patches released on or before this date will be installed automatically. + +## __Amazon WorkMail__ + - ### Features + - This release adds support for tagging Amazon WorkMail organizations. + +## __Managed Streaming for Kafka__ + - ### Features + - This release enables AWS MSK customers to list Apache Kafka versions that are supported on AWS MSK clusters. Also includes changes to expose additional details of a cluster's state in DescribeCluster and ListClusters APIs. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Deliver exceptions to stream channels correctly if there's an exception thrown on connection. This also fixes a bug where publisher signals onComplete if the stream is closed as a result of outbound GOAWAY. + - Throws `IOException` for the race condition where an HTTP2 connection gets reused at the same time it gets inactive so that failed requests can be retried + +# __2.10.56__ __2020-01-24__ +## __AWS DataSync__ + - ### Features + - AWS DataSync now supports FSx for Windows File Server Locations + +## __AWS OpsWorks CM__ + - ### Features + - AWS OpsWorks for Chef Automate now supports in-place upgrade to Chef Automate 2. Eligible servers can be updated through the management console, CLI and APIs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon EC2__ + - ### Features + - Adds EC2ThrottledException as a recognized throttling exception to be retried + +## __Amazon EC2 Container Service__ + - ### Features + - This release provides support for tagging Amazon ECS task sets for services using external deployment controllers. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Adding new error codes for Nodegroups in EKS + +## __Amazon WorkSpaces__ + - ### Features + - Documentation updates for WorkSpaces + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Fix issue where DNS resolution for a host is only made once for the initial request to the host. If the DNS entries change for a hostname, the client will resolve the new address until the client is closed and recreated. + +# __2.10.55__ __2020-01-23__ +## __AWS Identity and Access Management__ + - ### Features + - This release enables the Identity and Access Management policy simulator to simulate permissions boundary policies. + +## __AWS SDK for Java v2__ + - ### Features + - Added ServiceMetadata.servicePartitions() to get partition metadata for a specific service + - Improved error messages on UnknownHostExceptions + - Updated service endpoint metadata. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Support for non-blocking asynchronous calling of all mapper operations + +## __Amazon Relational Database Service__ + - ### Features + - This SDK release introduces APIs that automate the export of Amazon RDS snapshot data to Amazon S3. The new APIs include: StartExportTask, CancelExportTask, DescribeExportTasks. These APIs automate the extraction of data from an RDS snapshot and export it to an Amazon S3 bucket. The data is stored in a compressed, consistent, and query-able format. After the data is exported, you can query it directly using tools such as Amazon Athena or Redshift Spectrum. You can also consume the data as part of a data lake solution. If you archive the data in S3 Infrequent Access or Glacier, you can reduce long term data storage costs by applying data lifecycle policies. + +# __2.10.54__ __2020-01-21__ +## __AWS Application Discovery Service__ + - ### Features + - Documentation updates for the AWS Application Discovery Service. + +## __AWS CodePipeline__ + - ### Features + - AWS CodePipeline enables an ability to stop pipeline executions. + +## __AWS IoT Events__ + - ### Features + - Documentation updates for iotcolumbo + +## __AWS Marketplace Commerce Analytics__ + - ### Features + - Remove 4 deprecated data sets, change some data sets available dates to 2017-09-15 + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Add an enum value to the result of DescribeByoipCidrs to support CIDRs that are not publicly advertisable. + +## __Netty NIO Http Client__ + - ### Bugfixes + - Fixed a bug where an inactive http2 connection without `GOAWAY` frame received might get reused in a new request, causing `ClosedChannelException` + +# __2.10.53__ __2020-01-20__ +## __AWS Key Management Service__ + - ### Features + - The ConnectCustomKeyStore operation now provides new error codes (USER_LOGGED_IN and USER_NOT_FOUND) for customers to better troubleshoot if their connect custom key store operation fails. Password length validation during CreateCustomKeyStore now also occurs on the client side. + +## __AWS Lambda__ + - ### Features + - Added reason codes to StateReasonCode (InvalidSubnet, InvalidSecurityGroup) and LastUpdateStatusReasonCode (SubnetOutOfIPAddresses, InvalidSubnet, InvalidSecurityGroup) for functions that connect to a VPC. + +## __Alexa For Business__ + - ### Features + - Add support for CreatedTime and ConnectionStatusUpdatedTime in response of SearchDevices API. + +## __Amazon CloudWatch__ + - ### Features + - Updating DescribeAnomalyDetectors API to return AnomalyDetector Status value in response. + +## __Amazon CloudWatch Application Insights__ + - ### Features + - This release adds support for a list API to retrieve the configuration events logged during periodic updates to an application by Amazon CloudWatch Application Insights. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release provides support for a preview of bringing your own IPv6 addresses (BYOIP for IPv6) for use in AWS. + +# __2.10.52__ __2020-01-17__ +## __AWS Batch__ + - ### Features + - This release ensures INACTIVE job definitions are permanently deleted after 180 days. + +## __AWS CloudHSM V2__ + - ### Features + - This release introduces resource-level and tag-based access control for AWS CloudHSM resources. You can now tag CloudHSM backups, tag CloudHSM clusters on creation, and tag a backup as you copy it to another region. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for MP3 audio only outputs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon EC2 Container Service__ + - ### Features + - This release provides a public preview for specifying Amazon EFS file systems as volumes in your Amazon ECS task definitions. + +## __Amazon Neptune__ + - ### Features + - This release includes Deletion Protection for Amazon Neptune databases. + +## __Amazon Redshift__ + - ### Features + - Documentation updates for redshift + +# __2.10.51__ __2020-01-16__ +## __AWS Directory Service__ + - ### Features + - To reduce the number of errors our customers are facing, we have modified the requirements of input parameters for two of Directory Service APIs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Client VPN now supports Port Configuration for VPN Endpoints, allowing usage of either port 443 or port 1194. + +## __Amazon SageMaker Service__ + - ### Features + - This release adds two new APIs (UpdateWorkforce and DescribeWorkforce) to SageMaker Ground Truth service for workforce IP whitelisting. + +# __2.10.50__ __2020-01-15__ +## __AWS Organizations__ + - ### Features + - Updated description for PolicyID parameter and ConstraintViolationException. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Add support for DescribeStandardsControls and UpdateStandardsControl. These new Security Hub API operations are used to track and manage whether a compliance standards control is enabled. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - General Update to EC2 Docs and SDKs + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Document updates for Patch Manager 'NoReboot' feature. + +## __Amazon Transcribe Service__ + - ### Bugfixes + - Fixed an issue where streaming transcriptions would fail with signature validation errors if the date changed during the request. + +# __2.10.49__ __2020-01-14__ +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for partition placement groups and instance metadata option in Launch Templates + +# __2.10.48__ __2020-01-13__ +## __AWS Backup__ + - ### Features + - Cross-region backup is a new AWS Backup feature that allows enterprises to copy backups across multiple AWS services to different regions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for the StopInstances API. You can now stop and start an Amazon EBS-backed Spot Instance at will, instead of relying on the Stop interruption behavior to stop your Spot Instances when interrupted. + +## __Amazon Elastic File System__ + - ### Features + - This release adds support for managing EFS file system policies and EFS Access Points. + +## __Amazon S3__ + - ### Bugfixes + - Fixed bug prevent GetBucketBolicy from ever being successful using the asynchronous S3 client. + +# __2.10.47__ __2020-01-10__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + - Updated service endpoints and added global endpoints for iso and iso-b. + +## __AWS Transfer for SFTP__ + - ### Features + - This release introduces a new endpoint type that allows you to attach Elastic IP addresses from your AWS account with your server's endpoint directly and whitelist access to your server by client's internet IP address(es) using VPC Security Groups. + +## __Amazon Chime__ + - ### Features + - Add shared profile support to new and existing users + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release introduces the ability to tag egress only internet gateways, local gateways, local gateway route tables, local gateway virtual interfaces, local gateway virtual interface groups, local gateway route table VPC association and local gateway route table virtual interface group association. You can use tags to organize and identify your resources for cost allocation. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds an operation that enables users to override the system-default SSL/TLS certificate for new Amazon RDS DB instances temporarily, or remove the customer override. + +## __Amazon S3__ + - ### Bugfixes + - Fix an issue where s3#listObjects incorrectly decoded marker field. See [#1574](https://github.com/aws/aws-sdk-java-v2/issues/1574). + +## __Amazon SageMaker Service__ + - ### Features + - SageMaker ListTrialComponents API filter by TrialName and ExperimentName. + +## __Amazon WorkSpaces__ + - ### Features + - Added the migrate feature to Amazon WorkSpaces. + +# __2.10.46__ __2020-01-09__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Increase the priority of the AWS_WEB_IDENTITY_TOKEN_FILE/AWS_ROLE_ARN/AWS_ROLE_SESSION_NAME environment variables when loading credentials so that they are considered before web_identity_token_file/role_arn/role_session_name profile properties. This is consistent with the other AWS SDKs, including the CLI. + +## __AWS Security Token Service__ + - ### Features + - Documentation updates for sts + +## __Amazon CloudWatch Logs__ + - ### Features + - Documentation updates for logs + +## __Amazon S3__ + - ### Features + - Add support for Tagging builder in `CreateMultipartUploadRequest`. See [#1440](https://github.com/aws/aws-sdk-java-v2/issues/1440) + +# __2.10.45__ __2020-01-08__ +## __AWS Cost Explorer Service__ + - ### Features + - Documentation updates for CreateCostCategoryDefinition and UpdateCostCategoryDefinition API + +## __AWS Step Functions__ + - ### Features + - Add sfn specific http configurations. See [#1325](https://github.com/aws/aws-sdk-java-v2/issues/1325) + +## __Amazon EC2__ + - ### Bugfixes + - Fix NPE when calling `CopySnapshot`. Fixes [#1564](https://github.com/aws/aws-sdk-java-v2/issues/1564) + +## __Amazon Translate__ + - ### Features + - This release adds a new family of APIs for asynchronous batch translation service that provides option to translate large collection of text or HTML documents stored in Amazon S3 folder. This service accepts a batch of up to 5 GB in size per API call with each document not exceeding 1 MB size and the number of documents not exceeding 1 million per batch. See documentation for more information. + +## __Firewall Management Service__ + - ### Features + - AWS Firewall Manager now supports tagging, and tag-based access control, of policies. + +# __2.10.44__ __2020-01-07__ +## __AWS CodeBuild__ + - ### Features + - Add encryption key override to StartBuild API in AWS CodeBuild. + +## __AWS Migration Hub__ + - ### Features + - ListApplicationStates API provides a list of all application migration states + +## __AWS X-Ray__ + - ### Features + - Documentation updates for xray + +# __2.10.43__ __2020-01-06__ +## __AWS Elemental MediaPackage__ + - ### Features + - You can now restrict direct access to AWS Elemental MediaPackage by securing requests for live content using CDN authorization. With CDN authorization, content requests require a specific HTTP header and authorization code. + +## __AWS SDK for Java v2__ + - ### Features + - Add `RequestBody.fromRemainingByteBuffer(ByteBuffer)` that copies only the remaining readable bytes of the buffer. See [#1534](https://github.com/aws/aws-sdk-java-v2/issues/1534) + + - ### Bugfixes + - Reduce ReadTimeout and ConnectTimeout for accessing EC2 metadata instance service + +## __Amazon Comprehend__ + - ### Features + - Amazon Comprehend now supports Multilabel document classification + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release supports service providers configuring a private DNS name for services other than AWS services and services available in the AWS marketplace. This feature allows consumers to access the service using an existing DNS name without making changes to their applications. + +## __Amazon S3__ + - ### Bugfixes + - Requests that return an error response in the body of the HTTP response with a successful (200) status code will now correctly be handled as a failed request by the SDK. + +# __2.10.42__ __2020-01-02__ +## __AWS Cost Explorer Service__ + - ### Features + - Documentation updates for GetReservationUtilization for the Cost Explorer API. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix unmarshalling for models with xml attributes. See [#1488](https://github.com/aws/aws-sdk-java-v2/issues/1488). + +## __Amazon EC2 Container Registry__ + - ### Features + - Adds waiters for ImageScanComplete and LifecyclePolicyPreviewComplete + +## __Amazon Lex Model Building Service__ + - ### Features + - Documentation updates for Amazon Lex. + +## __Amazon Lightsail__ + - ### Features + - This release adds support for Certificate Authority (CA) certificate identifier to managed databases in Amazon Lightsail. + +## __Netty NIO Http Client__ + - ### Bugfixes + - Propagate exception properly when an exception is thrown from protocol initialization. + +# __2.10.41__ __2019-12-23__ +## __AWS Health APIs and Notifications__ + - ### Features + - With this release, you can now centrally aggregate AWS Health events from all accounts in your AWS organization. Visit AWS Health documentation to learn more about enabling and using this feature: https://docs.aws.amazon.com/health/latest/ug/organizational-view-health.html. + +## __Amazon Detective__ + - ### Features + - Updated the documentation for Amazon Detective. + +## __Amazon FSx__ + - ### Features + - This release adds a new family of APIs (create-data-repository-task, describe-data-repository-task, and cancel-data-repository-task) that allow users to perform operations between their file system and its linked data repository. + +# __2.10.40__ __2019-12-20__ +## __AWS Device Farm__ + - ### Features + - Introduced browser testing support through AWS Device Farm + +## __AWS SecurityHub__ + - ### Features + - Additional resource types are now fully supported in the AWS Security Finding Format (ASFF). These resources include AwsElbv2LoadBalancer, AwsKmsKey, AwsIamRole, AwsSqsQueue, AwsLambdaFunction, AwsSnsTopic, and AwsCloudFrontDistribution. Each of these resource types includes an accompanying resource details object with fields for security finding providers to populate. Updates were made to the AwsIamAccessKey resource details object to include information on principal ID and name. To learn more, visit our documentation on the ASFF. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release introduces the ability to tag key pairs, placement groups, export tasks, import image tasks, import snapshot tasks and export image tasks. You can use tags to organize and identify your resources for cost allocation. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Amazon EKS now supports restricting access to the API server public endpoint by applying CIDR blocks + +## __Amazon Pinpoint__ + - ### Features + - This release of the Amazon Pinpoint API introduces versioning support for message templates. + +## __Amazon Redshift__ + - ### Features + - Documentation updates for Amazon Redshift RA3 node types. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds an operation that enables users to specify whether a database is restarted when its SSL/TLS certificate is rotated. Only customers who do not use SSL/TLS should use this operation. + +## __Amazon S3__ + - ### Bugfixes + - Fixed an issue where the SDK would attempt to validate the checksum on a PutObjectRequest when S3 was returning invalid checksums. This would cause all requests to buckets with customer-managed-key service-side encryption to fail. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This release updates the attachments support to include AttachmentReference source for Automation documents. + +## __Amazon Transcribe Service__ + - ### Features + - AWS Transcribe now supports vocabulary filtering that allows customers to input words to the service that they don't want to see in the output transcript. + +# __2.10.39__ __2019-12-19__ +## __AWS CodeStar connections__ + - ### Features + - Public beta for Bitbucket Cloud support in AWS CodePipeline through integration with AWS CodeStar connections. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - You can now copy snapshots across regions using Data Lifecycle Manager (DLM). You can enable policies which, along with create, can now also copy snapshots to one or more AWS region(s). Copies can be scheduled for up to three regions from a single policy and retention periods are set for each region separately. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - We are updating the supportedRootDevices field to supportedRootDeviceTypes for DescribeInstanceTypes API to ensure that the actual value is returned, correcting a previous error in the model. + +## __Amazon GameLift__ + - ### Features + - Amazon GameLift now supports ARNs for all key GameLift resources, tagging for GameLift resource authorization management, and updated documentation that articulates GameLift's resource authorization strategy. + +## __Amazon Lex Model Building Service__ + - ### Features + - Amazon Lex now supports conversation logs and slot obfuscation. + +## __Amazon Personalize Runtime__ + - ### Features + - Add context map to get-recommendations and get-personalized-ranking request objects to provide contextual metadata at inference time + +## __Amazon S3__ + - ### Bugfixes + - Fixed an issue where a 'checksum mismatch' error is raised whenever a PutObject request is retried while using an async client. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This release allows customers to add tags to Automation execution, enabling them to sort and filter executions in different ways, such as by resource, purpose, owner, or environment. + +## __Amazon Transcribe Service__ + - ### Features + - Amazon Transcribe supports job queuing for the StartTranscriptionJob API. + +## __Netty NIO HTTP Client__ + - ### Features + - `SETTINGS_INITIAL_WINDOW_SIZE` is now configurable on HTTP/2 connections opened by the Netty client using `Http2Configuration#initialWindowSize(Integer)` along with `NettyNioAsyncHttpClient.Builder#http2Configuration(Http2Configuration)`. See https://tools.ietf.org/html/rfc7540#section-6.5.2 for more information. + +# __2.10.38__ __2019-12-18__ +## __AWS OpsWorks CM__ + - ### Features + - AWS OpsWorks CM now supports tagging, and tag-based access control, of servers and backups. + +## __AWS Resource Groups Tagging API__ + - ### Features + - Documentation updates for resourcegroupstaggingapi + +## __Amazon CloudFront__ + - ### Features + - Documentation updates for CloudFront + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release introduces the ability to tag Elastic Graphics accelerators. You can use tags to organize and identify your accelerators for cost allocation. + +## __Amazon Simple Storage Service__ + - ### Features + - Updates Amazon S3 endpoints allowing you to configure your client to opt-in to using S3 with the us-east-1 regional endpoint, instead of global. + +# __2.10.37__ __2019-12-17__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports HLS ID3 segment tagging, HLS redundant manifests for CDNs that support different publishing/viewing endpoints, fragmented MP4 (fMP4), and frame capture intervals specified in milliseconds. + +## __AWS IoT__ + - ### Features + - Added a new Over-the-Air (OTA) Update feature that allows you to use different, or multiple, protocols to transfer an image from the AWS cloud to IoT devices. + +## __Amazon EC2 Container Service__ + - ### Features + - Documentation updates for Amazon ECS. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for Amazon EC2 + +## __Amazon Kinesis Analytics__ + - ### Features + - Kinesis Data Analytics service now supports running Java applications using Flink 1.8. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Added support for Cloud Watch Output and Document Version to the Run Command tasks in Maintenance Windows. + +# __2.10.36__ __2019-12-16__ +## __AWS Comprehend Medical__ + - ### Features + - New Ontology linking APIs will provides medication concepts normalization and Diagnoses codes from input text. In this release we will provide two APIs - RxNorm and ICD10-CM. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - You can now configure your EC2 Fleet to preferentially use EC2 Capacity Reservations for launching On-Demand instances, enabling you to fully utilize the available (and unused) Capacity Reservations before launching On-Demand instances on net new capacity. + +## __Amazon S3__ + - ### Features + - CopyObjectRequest now has `destinationBucket` and `destinationKey` properties for clarity. + The existing names, `bucket` and `key`, are deprecated. + +## __AmazonMQ__ + - ### Features + - Amazon MQ now supports throughput-optimized message brokers, backed by Amazon EBS. + +# __2.10.35__ __2019-12-13__ +## __AWS CodeBuild__ + - ### Features + - CodeBuild adds support for cross account + +## __Amazon Detective__ + - ### Features + - This is the initial release of Amazon Detective. + +## __Amazon Simple Email Service__ + - ### Features + - Added the ability to use your own public-private key pair to configure DKIM authentication for a domain identity. + +# __2.10.34__ __2019-12-12__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fixing exception using `RequestBody.fromInputStream` on non-resettable `InputStreams` by making `reset` conditional on `markSupported`. See [#1544](https://github.com/aws/aws-sdk-java-v2/issues/1544) / [#1545](https://github.com/aws/aws-sdk-java-v2/issues/1545) + +## __Access Analyzer__ + - ### Features + - This release includes improvements and fixes bugs for the IAM Access Analyzer feature. + +# __2.10.33__ __2019-12-11__ +## __AWS SDK for Java v2__ + - ### Features + - Adds a `has*` method to requests and responses that have a List or Map property. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release allows customers to attach multiple Elastic Inference Accelerators to a single EC2 instance. It adds support for a Count parameter for each Elastic Inference Accelerator type you specify on the RunInstances and LaunchTemplate APIs. + +# __2.10.32__ __2019-12-10__ +## __AWSKendraFrontendService__ + - ### Features + - 1. Adding DocumentTitleFieldName as an optional configuration for SharePoint. 2. updating s3 object pattern to support all s3 keys. + +# __2.10.31__ __2019-12-09__ +## __AWS Key Management Service__ + - ### Features + - The Verify operation now returns KMSInvalidSignatureException on invalid signatures. The Sign and Verify operations now return KMSInvalidStateException when a request is made against a CMK pending deletion. + +## __Amazon QuickSight__ + - ### Features + - Documentation updates for QuickSight + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Adds the SSM GetCalendarState API and ChangeCalendar SSM Document type. These features enable the forthcoming Systems Manager Change Calendar feature, which will allow you to schedule events during which actions should (or should not) be performed. + +## __Managed Streaming for Kafka__ + - ### Features + - AWS MSK has added support for Open Monitoring with Prometheus. + +## __Netty NIO HTTP Client__ + - ### Features + - Close HTTP/2 connections if they have had 0 streams for 5 seconds. This can be disabled using `useIdleConnectionReaper(false)` or have the time period adjusted using `connectionMaxIdleTime(...)` on the `NettyNioAsyncHttpClient.Builder`. + - Periodically ping HTTP/2 connections and close them if the service does not respond. The ping periodicity and timeout time is not currently configurable. + +# __2.10.30__ __2019-12-04__ +## __Amazon Kinesis Video Signaling Channels__ + - ### Features + - Announcing support for WebRTC in Kinesis Video Streams, as fully managed capability. You can now use simple APIs to enable your connected devices, web, and mobile apps with real-time two-way media streaming capabilities. + +## __Amazon Kinesis Video Streams__ + - ### Features + - Introduces management of signaling channels for Kinesis Video Streams. + +## __AmazonApiGatewayV2__ + - ### Features + - Amazon API Gateway now supports HTTP APIs (beta), enabling customers to quickly build high performance RESTful APIs that are up to 71% cheaper than REST APIs also available from API Gateway. HTTP APIs are optimized for building APIs that proxy to AWS Lambda functions or HTTP backends, making them ideal for serverless workloads. Using HTTP APIs, you can secure your APIs using OIDC and OAuth 2 out of box, quickly build web applications using a simple CORS experience, and get started immediately with automatic deployment and simple create workflows. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Fixed an issue where closing the last stream on a connection that had been closed or received a GOAWAY did not close the connection. + - Fixed an issue where receiving a GOAWAY that would cause the closing of all streams could cause all outstanding streams to be completed successfully instead of exceptionally. + +# __2.10.29__ __2019-12-03__ +## __AWS Lambda__ + - ### Features + - - Added the ProvisionedConcurrency type and operations. Allocate provisioned concurrency to enable your function to scale up without fluctuations in latency. Use PutProvisionedConcurrencyConfig to configure provisioned concurrency on a version of a function, or on an alias. + +## __AWS Step Functions__ + - ### Features + - This release of the AWS Step Functions SDK introduces support for Express Workflows. + +## __Amazon Elastic Block Store__ + - ### Features + - This release introduces the EBS direct APIs for Snapshots: 1. ListSnapshotBlocks, which lists the block indexes and block tokens for blocks in an Amazon EBS snapshot. 2. ListChangedBlocks, which lists the block indexes and block tokens for blocks that are different between two snapshots of the same volume/snapshot lineage. 3. GetSnapshotBlock, which returns the data in a block of an Amazon EBS snapshot. + +## __Amazon Rekognition__ + - ### Features + - This SDK Release introduces APIs for Amazon Rekognition Custom Labels feature (CreateProjects, CreateProjectVersion,DescribeProjects, DescribeProjectVersions, StartProjectVersion, StopProjectVersion and DetectCustomLabels). Also new is AugmentedAI (Human In The Loop) Support for DetectModerationLabels in Amazon Rekognition. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for the Amazon RDS Proxy + +## __Amazon S3__ + - ### Bugfixes + - Interacting with an access point in a different region to the one the S3 client is configured for will no longer result in the request being signed for the wrong region and rejected by S3. + +## __Amazon SageMaker Service__ + - ### Features + - You can now use SageMaker Autopilot for automatically training and tuning candidate models using a combination of various feature engineering, ML algorithms, and hyperparameters determined from the user's input data. SageMaker Automatic Model Tuning now supports tuning across multiple algorithms. With Amazon SageMaker Experiments users can create Experiments, ExperimentTrials, and ExperimentTrialComponents to track, organize, and evaluate their ML training jobs. With Amazon SageMaker Debugger, users can easily debug training jobs using a number of pre-built rules provided by Amazon SageMaker, or build custom rules. With Amazon SageMaker Processing, users can run on-demand, distributed, and fully managed jobs for data pre- or post- processing or model evaluation. With Amazon SageMaker Model Monitor, a user can create MonitoringSchedules to automatically monitor endpoints to detect data drift and other issues and get alerted on them. This release also includes the preview version of Amazon SageMaker Studio with Domains, UserProfiles, and Apps. This release also includes the preview version of Amazon Augmented AI to easily implement human review of machine learning predictions by creating FlowDefinitions, HumanTaskUis, and HumanLoops. + +## __Application Auto Scaling__ + - ### Features + - This release supports auto scaling of provisioned concurrency for AWS Lambda. + +# __2.10.28__ __2019-12-03__ +## __AWS Compute Optimizer__ + - ### Features + - Initial release of AWS Compute Optimizer. AWS Compute Optimizer recommends optimal AWS Compute resources to reduce costs and improve performance for your workloads. + +## __AWS Network Manager__ + - ### Features + - This is the initial SDK release for AWS Network Manager. + +## __AWS Outposts__ + - ### Features + - This is the initial release for AWS Outposts, a fully managed service that extends AWS infrastructure, services, APIs, and tools to customer sites. AWS Outposts enables you to launch and run EC2 instances and EBS volumes locally at your on-premises location. This release introduces new APIs for creating and viewing Outposts. + +## __AWS S3 Control__ + - ### Features + - Amazon S3 Access Points is a new S3 feature that simplifies managing data access at scale for shared data sets on Amazon S3. Access Points provide a customizable way to access the objects in a bucket, with a unique hostname and access policy that enforces the specific permissions and network controls for any request made through the access point. This represents a new way of provisioning access to shared data sets. + +## __AWSKendraFrontendService__ + - ### Features + - It is a preview launch of Amazon Kendra. Amazon Kendra is a managed, highly accurate and easy to use enterprise search service that is powered by machine learning. + +## __Amazon Augmented AI Runtime__ + - ### Features + - This release adds support for Amazon Augmented AI, which makes it easy to build workflows for human review of machine learning predictions. + +## __Amazon CodeGuru Profiler__ + - ### Features + - (New Service) Amazon CodeGuru Profiler analyzes application CPU utilization and latency characteristics to show you where you are spending the most cycles in your application. This analysis is presented in an interactive flame graph that helps you easily understand which paths consume the most resources, verify that your application is performing as expected, and uncover areas that can be optimized further. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - This is the preview release of Amazon CodeGuru Reviewer. + +## __Amazon EC2 Container Service__ + - ### Features + - This release supports ECS Capacity Providers, Fargate Spot, and ECS Cluster Auto Scaling. These features enable new ways for ECS to manage compute capacity used by tasks. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for the following features: 1. An option to enable acceleration for Site-to-Site VPN connections; 2. Inf1 instances featuring up to 16 AWS Inferentia chips; 3. The ability to associate route tables with internet gateways and virtual private gateways; 4. AWS Local Zones that place compute, storage, database, and other select services; 5. Launching and viewing EC2 instances and EBS volumes running locally in Outposts; 6. Peering Transit Gateways between regions simplifying creation of secure and private global networks on AWS; 7. Transit Gateway Multicast, enabling multicast routing within and between VPCs using Transit Gateway as a multicast router. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Introducing Amazon EKS with Fargate. Customers can now use Amazon EKS to launch pods directly onto AWS Fargate, the serverless compute engine built for containers on AWS. + +## __Amazon Elasticsearch Service__ + - ### Features + - UltraWarm storage provides a cost-effective way to store large amounts of read-only data on Amazon Elasticsearch Service. Rather than attached storage, UltraWarm nodes use Amazon S3 and a sophisticated caching solution to improve performance. For indices that you are not actively writing to and query less frequently, UltraWarm storage offers significantly lower costs per GiB. In Elasticsearch, these warm indices behave just like any other index. You can query them using the same APIs or use them to create dashboards in Kibana. + +## __Amazon Fraud Detector__ + - ### Features + - Amazon Fraud Detector is a fully managed service that makes it easy to identify potentially fraudulent online activities such as online payment fraud and the creation of fake accounts. Amazon Fraud Detector uses your data, machine learning (ML), and more than 20 years of fraud detection expertise from Amazon to automatically identify potentially fraudulent online activity so you can catch more fraud faster. + +## __Amazon Simple Storage Service__ + - ### Features + - Amazon S3 Access Points is a new S3 feature that simplifies managing data access at scale for shared data sets on Amazon S3. Access Points provide a customizable way to access the objects in a bucket, with a unique hostname and access policy that enforces the specific permissions and network controls for any request made through the access point. This represents a new way of provisioning access to shared data sets. + +## __Amazon Textract__ + - ### Features + - This SDK Release introduces Amazon Augmented AI support for Amazon Textract AnalyzeDocument API. Image byte payloads for synchronous operations have increased from 5 MB to 10 MB. + +# __2.10.27__ __2019-12-02__ +## __Access Analyzer__ + - ### Features + - Introducing AWS IAM Access Analyzer, an IAM feature that makes it easy for AWS customers to ensure that their resource-based policies provide only the intended access to resources outside their AWS accounts. + +# __2.10.26__ __2019-12-02__ +## __AWS License Manager__ + - ### Features + - AWS License Manager now automates discovery of bring-your-own-license usage across the customers organization. With few simple settings, customers can add bring your own license product information along with licensing rules, which would enable License Manager to automatically track the instances that have the specified products installed. If License Manager detects any violation of licensing rules, it would notify the customers designated license administrator to take corrective action. + +## __Amazon DynamoDB Enhanced Client [Preview]__ + - ### Features + - Write operations (put, get, delete) now support 'conditionExpression' + +## __Amazon Elastic Compute Cloud__ + - ### Features + - AWS now provides a new BYOL experience for software licenses, such as Windows and SQL Server, that require a dedicated physical server. You can now enjoy the flexibility and cost effectiveness of using your own licenses on Amazon EC2 Dedicated Hosts, but with the simplicity, resiliency, and elasticity of AWS. You can specify your Dedicated Host management preferences, such as host allocation, host capacity utilization, and instance placement in AWS License Manager. Once set up, AWS takes care of these administrative tasks on your behalf, so that you can seamlessly launch virtual machines (instances) on Dedicated Hosts just like you would launch an EC2 instance with AWS provided licenses. + +## __EC2 Image Builder__ + - ### Features + - This is the first release of EC2 Image Builder, a service that provides a managed experience for automating the creation of EC2 AMIs. + +## __Schemas__ + - ### Features + - This release introduces support for Amazon EventBridge schema registry, making it easy to discover and write code for events in EventBridge. + +# __2.10.25__ __2019-11-26__ +## __AWS Directory Service__ + - ### Features + - This release will introduce optional encryption over LDAP network traffic using SSL certificates between customer's self-managed AD and AWS Directory Services instances. The release also provides APIs for Certificate management. + +## __AWS Kinesis__ + - ### Bugfixes + - Reducing default read timeout and write timeout to 10 seconds for Kinesis client. + +## __AWS MediaTailor__ + - ### Features + - AWS Elemental MediaTailor SDK now allows configuration of the Live Pre-Roll feature for HLS and DASH streams. + +## __AWS Organizations__ + - ### Features + - Introduces the DescribeEffectivePolicy action, which returns the contents of the policy that's in effect for the account. + +## __AWS RDS DataService__ + - ### Features + - Type hints to improve handling of some specific parameter types (date/time, decimal etc) for ExecuteStatement and BatchExecuteStatement APIs + +## __AWS Resource Groups Tagging API__ + - ### Features + - You can use tag policies to help standardize on tags across your organization's resources. + +## __AWSServerlessApplicationRepository__ + - ### Features + - AWS Serverless Application Repository now supports verified authors. Verified means that AWS has made a good faith review, as a reasonable and prudent service provider, of the information provided by the requester and has confirmed that the requester's identity is as claimed. + +## __Amazon Cognito Identity Provider__ + - ### Features + - This release adds a new setting for a user pool to configure which recovery methods a user can use to recover their account via the forgot password operation. + +## __Amazon DynamoDB__ + - ### Features + - 1) Amazon Contributor Insights for Amazon DynamoDB is a diagnostic tool for identifying frequently accessed keys and understanding database traffic trends. 2) Support for displaying new fields when a table's encryption state is Inaccessible or the table have been Archived. + +## __Amazon Elastic Inference__ + - ### Features + - Amazon Elastic Inference allows customers to attach Elastic Inference Accelerators to Amazon EC2 and Amazon ECS tasks, thus providing low-cost GPU-powered acceleration and reducing the cost of running deep learning inference. This release allows customers to add or remove tags for their Elastic Inference Accelerators. + +## __Amazon QuickSight__ + - ### Features + - Documentation updates for QuickSight + +## __Amazon WorkSpaces__ + - ### Features + - For the WorkspaceBundle API, added the image identifier and the time of the last update. + +## __Netty NIO HTTP Client__ + - ### Features + - Detect unhealthy http2 connections when read or write times out by sending PING frames + +# __2.10.24__ __2019-11-25__ +## __AWS CodeBuild__ + - ### Features + - CodeBuild adds support for test reporting + +## __AWS Cost Explorer Service__ + - ### Features + - This launch provides customers with access to Cost Category Public Beta APIs. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for 8K outputs and support for QuickTime Animation Codec (RLE) inputs. + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports the ability to create a multiple program transport stream (MPTS). + +## __AWS Elemental MediaPackage VOD__ + - ### Features + - Adds a domain name to PackagingGroups, representing the fully qualified domain name for Assets created in the group. + +## __AWS Greengrass__ + - ### Features + - IoT Greengrass supports machine learning resources in 'No container' mode. + +## __AWS IoT__ + - ### Features + - This release adds: 1) APIs for fleet provisioning claim and template, 2) endpoint configuration and custom domains, 3) support for enhanced custom authentication, d) support for 4 additional audit checks: Device and CA certificate key quality checks, IoT role alias over-permissive check and IoT role alias access to unused services check, 5) extended capability of AWS IoT Rules Engine to support IoT SiteWise rule action. The IoT SiteWise rule action lets you send messages from IoT sensors and applications to IoT SiteWise asset properties + +## __AWS IoT Secure Tunneling__ + - ### Features + - This release adds support for IoT Secure Tunneling to remote access devices behind restricted firewalls. + +## __AWS Key Management Service__ + - ### Features + - AWS Key Management Service (KMS) now enables creation and use of asymmetric Customer Master Keys (CMKs) and the generation of asymmetric data key pairs. + +## __AWS Lambda__ + - ### Features + - Added the function state and update status to the output of GetFunctionConfiguration and other actions. Check the state information to ensure that a function is ready before you perform operations on it. Functions take time to become ready when you connect them to a VPC.Added the EventInvokeConfig type and operations to configure error handling options for asynchronous invocation. Use PutFunctionEventInvokeConfig to configure the number of retries and the maximum age of events when you invoke the function asynchronously.Added on-failure and on-success destination settings for asynchronous invocation. Configure destinations to send an invocation record to an SNS topic, an SQS queue, an EventBridge event bus, or a Lambda function.Added error handling options to event source mappings. This enables you to configure the number of retries, configure the maximum age of records, or retry with smaller batches when an error occurs when a function processes a Kinesis or DynamoDB stream.Added the on-failure destination setting to event source mappings. This enables you to send discarded events to an SNS topic or SQS queue when all retries fail or when the maximum record age is exceeded when a function processes a Kinesis or DynamoDB stream.Added the ParallelizationFactor option to event source mappings to increase concurrency per shard when a function processes a Kinesis or DynamoDB stream. + +## __AWS Resource Access Manager__ + - ### Features + - AWS RAM provides new APIs to view the permissions granted to principals in a resource share. This release also creates corresponding resource shares for supported services that use resource policies, as well as an API to promote them to standard shares that can be managed in RAM. + +## __AWS WAFV2__ + - ### Features + - This release introduces new set of APIs ("wafv2") for AWS WAF. Major changes include single set of APIs for creating/updating resources in global and regional scope, and rules are configured directly into web ACL instead of being referenced. The previous APIs ("waf" and "waf-regional") are now referred as AWS WAF Classic. For more information visit: https://docs.aws.amazon.com/waf/latest/APIReference/Welcome.html + +## __Alexa For Business__ + - ### Features + - API update for Alexa for Business: This update enables the use of meeting room configuration that can be applied to a room profile. These settings help improve and measure utilization on Alexa for Business enabled rooms. New features include end meeting reminders, intelligent room release and room utilization analytics report. + +## __Amazon AppConfig__ + - ### Features + - Introducing AWS AppConfig, a new service that enables customers to quickly deploy validated configurations to applications of any size in a controlled and monitored fashion. + +## __Amazon Athena__ + - ### Features + - This release adds additional query lifecycle metrics to the QueryExecutionStatistics object in GetQueryExecution response. + +## __Amazon CloudWatch__ + - ### Features + - This release adds a new feature called "Contributor Insights". "Contributor Insights" supports the following 6 new APIs (PutInsightRule, DeleteInsightRules, EnableInsightRules, DisableInsightRules, DescribeInsightRules and GetInsightRuleReport). + +## __Amazon CloudWatch Application Insights__ + - ### Features + - CloudWatch Application Insights for .NET and SQL Server includes the follwing features: -Tagging Create and manage tags for your applications.-Custom log pattern matching. Define custom log patterns to be detected and monitored.-Resource-level permissions. Specify applications users can access. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Amazon Cognito Userpools now supports Sign in with Apple as an Identity Provider. + +## __Amazon Comprehend__ + - ### Features + - Amazon Comprehend now supports real-time analysis with Custom Classification + +## __Amazon Data Lifecycle Manager__ + - ### Features + - You can now set time based retention policies on Data Lifecycle Manager. With this launch, DLM allows you to set snapshot retention period in the following interval units: days, weeks, months and years. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds two new APIs: 1. ModifyDefaultCreditSpecification, which allows you to set default credit specification at the account level per AWS Region, per burstable performance instance family, so that all new burstable performance instances in the account launch using the new default credit specification. 2. GetDefaultCreditSpecification, which allows you to get current default credit specification per AWS Region, per burstable performance instance family. This release also adds new client exceptions for StartInstances and StopInstances. + +## __Amazon Kinesis Analytics__ + - ### Features + - Kinesis Data Analytics service adds support to configure Java applications to access resources in a VPC. Also releasing support to configure Java applications to set allowNonRestoreState flag through the service APIs. + +## __Amazon Lex Runtime Service__ + - ### Features + - Amazon Lex adds "sessionId" attribute to the PostText and PostContent response. + +## __Amazon Redshift__ + - ### Features + - This release contains changes for 1. Redshift Scheduler 2. Update to the DescribeNodeConfigurationOptions to include a new action type recommend-node-config + +## __Amazon Relational Database Service__ + - ### Features + - Cluster Endpoints can now be tagged by using --tags in the create-db-cluster-endpoint API + +## __Amazon Simple Email Service__ + - ### Features + - This release includes support for automatically suppressing email addresses that result in hard bounce or complaint events at the account level, and for managing addresses on this account-level suppression list. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - AWS Systems Manager Documents now supports more Document Types: ApplicationConfiguration, ApplicationConfigurationSchema and DeploymentStrategy. This release also extends Document Permissions capabilities and introduces a new Force flag for DeleteDocument API. + +## __Application Auto Scaling__ + - ### Features + - This release supports auto scaling of document classifier endpoints for Comprehend; and supports target tracking based on the average capacity utilization metric for AppStream 2.0 fleets. + +## __Elastic Load Balancing__ + - ### Features + - This release of Elastic Load Balancing V2 adds new subnet features for Network Load Balancers and a new routing algorithm for Application Load Balancers. + +# __2.10.23__ __2019-11-22__ +## __AWS Auto Scaling Plans__ + - ### Features + - Update default endpoint for AWS Auto Scaling. + +## __AWS Certificate Manager__ + - ### Features + - This release adds support for Tag-Based IAM for AWS Certificate Manager and adding tags to certificates upon creation. + +## __AWS CodeBuild__ + - ### Features + - Add Canonical ARN to LogsLocation. + +## __AWS Elemental MediaPackage VOD__ + - ### Features + - Includes the submission time of Asset ingestion request in the API response for Create/List/Describe Assets. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - The ProcessCredentialsProvider now supports credential files up to 64 KB by default through an increase of the processOutputLimit from 1024 bytes to 64000 bytes. + +## __AWS Security Token Service__ + - ### Features + - Support tagging for STS sessions and tag based access control for the STS APIs + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds two new APIs (DescribeInstanceTypes and DescribeInstanceTypeOfferings) that give customers access to instance type attributes and regional and zonal offerings. + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR adds support for concurrent step execution and cancelling running steps. Amazon EMR has added a new Outpost ARN field in the ListCluster and DescribeCluster API responses that is populated for clusters launched in an AWS Outpost subnet. + +## __Amazon Forecast Service__ + - ### Features + - This release adds two key updates to existing APIs. 1. Amazon Forecast can now generate forecasts in any quantile using the optional parameter forecastTypes in the CreateForecast API and 2. You can get additional details (metrics and relevant error messages) on your AutoML runs using the DescribePredictor and GetAccuracyMetrics APIs. + +## __Amazon Rekognition__ + - ### Features + - This release adds enhanced face filtering support to the IndexFaces API operation, and introduces face filtering for CompareFaces and SearchFacesByImage API operations. + +## __Amazon Simple Notification Service__ + - ### Features + - Added documentation for the dead-letter queue feature. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Add RebootOption and LastNoRebootInstallOperationTime for DescribeInstancePatchStates and DescribeInstancePatchStatesForPatchGroup API + +## __Application Auto Scaling__ + - ### Features + - Update default endpoint for Application Auto Scaling. + +# __2.10.22__ __2019-11-21__ +## __AWS Amplify__ + - ### Features + - This release of AWS Amplify Console introduces support for backend environments. Backend environments are containers for AWS deployments. Each environment is a collection of AWS resources. + +## __AWS AppSync__ + - ### Features + - AppSync: AWS AppSync now supports the ability to add, configure, and maintain caching for your AWS AppSync GraphQL API. + +## __AWS Config__ + - ### Features + - AWS Config launches Custom Configuration Items. A new feature which allows customers to publish resource configuration for third-party resources, custom, or on-premises servers. + +## __AWS Glue__ + - ### Features + - This release adds support for Glue 1.0 compatible ML Transforms. + +## __AWSMarketplace Metering__ + - ### Features + - Documentation updates for the AWS Marketplace Metering Service. + +## __Amazon Connect Participant Service__ + - ### Features + - This release adds 5 new APIs: CreateParticipantConnection, DisconnectParticipant, GetTranscript, SendEvent, and SendMessage. For Amazon Connect chat, you can use them to programmatically perform participant actions on the configured Amazon Connect instance. Learn more here: https://docs.aws.amazon.com/connect-participant/latest/APIReference/Welcome.html + +## __Amazon Connect Service__ + - ### Features + - This release adds a new API: StartChatContact. You can use it to programmatically start a chat on the specified Amazon Connect instance. Learn more here: https://docs.aws.amazon.com/connect/latest/APIReference/Welcome.html + +## __Amazon DynamoDB__ + - ### Features + - With this release, you can convert an existing Amazon DynamoDB table to a global table by adding replicas in other AWS Regions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for attaching AWS License Manager Configurations to Amazon Machine Image (AMI) using ImportImage API; and adds support for running different instance sizes on EC2 Dedicated Hosts + +## __Amazon Lex Model Building Service__ + - ### Features + - Amazon Lex now supports Sentiment Analysis + +## __Amazon Lex Runtime Service__ + - ### Features + - Amazon Lex now supports Sentiment Analysis + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - The release contains new API and API changes for AWS Systems Manager Explorer product. + +## __Amazon Transcribe Service__ + - ### Features + - With this release, Amazon Transcribe now supports transcriptions from audio sources in Hebrew (he-IL), Swiss German (de-CH), Japanese (ja-JP), Turkish (tr-TR), Arabic-Gulf (ar-AE), Malay (ms-MY), Telugu (te-IN) + +# __2.10.21__ __2019-11-20__ +## __AWS Application Discovery Service__ + - ### Features + - New exception type for use with Migration Hub home region + +## __AWS CloudTrail__ + - ### Features + - 1. This release adds two new APIs, GetInsightSelectors and PutInsightSelectors, which let you configure CloudTrail Insights event delivery on a trail. An Insights event is a new type of event that is generated when CloudTrail detects unusual activity in your AWS account. In this release, only "ApiCallRateInsight" is a supported Insights event type. 2. This release also adds the new "ExcludeManagementEventSource" option to the existing PutEventSelectors API. This field currently supports only AWS Key Management Services. + +## __AWS CodeCommit__ + - ### Features + - This release adds support for creating pull request approval rules and pull request approval rule templates in AWS CodeCommit. This allows developers to block merges of pull requests, contingent on the approval rules being satisfiied. + +## __AWS DataSync__ + - ### Features + - Update to configure task to run periodically on a schedule + +## __AWS Elemental MediaStore__ + - ### Features + - This release fixes a broken link in the SDK documentation. + +## __AWS Migration Hub__ + - ### Features + - New exception type for use with Migration Hub home region + +## __AWS Migration Hub Config__ + - ### Features + - AWS Migration Hub Config Service allows you to get and set the Migration Hub home region for use with AWS Migration Hub and Application Discovery Service + +## __AWS Storage Gateway__ + - ### Features + - The new DescribeAvailabilityMonitorTest API provides the results of the most recent High Availability monitoring test. The new StartAvailabilityMonitorTest API verifies the storage gateway is configured for High Availability monitoring. The new ActiveDirectoryStatus response element has been added to the DescribeSMBSettings and JoinDomain APIs to indicate the status of the gateway after the most recent JoinDomain operation. The new TimeoutInSeconds parameter of the JoinDomain API allows for the configuration of the timeout in which the JoinDomain operation must complete. + +## __Amazon Chime__ + - ### Features + - Adds APIs to create and manage meeting session resources for the Amazon Chime SDK + +## __Amazon Data Lifecycle Manager__ + - ### Features + - DLM now supports Fast Snapshot Restore. You can enable Fast Restore on snapshots created by DLM, provide the AZs and the number of snapshots to be enabled with this capability. + +## __Amazon EC2 Container Service__ + - ### Features + - Added support for CPU and memory task-level overrides on the RunTask and StartTask APIs. Added location information to Tasks. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release of Amazon Elastic Compute Cloud (Amazon EC2) introduces support for Amazon Elastic Block Store (Amazon EBS) fast snapshot restores. + - Upgrades to Instance Metadata Service version 2 (IMDS v2). With IMDS v2, a session token is used to make requests for EC2 instance metadata and credentials. + +## __Amazon FSx__ + - ### Features + - Announcing a Multi-AZ deployment type for Amazon FSx for Windows File Server, providing fully-managed Windows file storage with high availability and redundancy across multiple AWS Availability Zones. + +## __Amazon Kinesis Firehose__ + - ### Features + - With this release, Amazon Kinesis Data Firehose allows server side encryption with customer managed CMKs. Customer managed CMKs ( "Customer Master Keys") are AWS Key Management Service (KMS) keys that are fully managed by the customer. With customer managed CMKs, customers can establish and maintain their key policies, IAM policies, rotating policies and add tags. For more information about AWS KMS and CMKs, please refer to: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html. Please refer to the following link to create CMKs: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html + +## __Amazon QuickSight__ + - ### Features + - Amazon QuickSight now supports programmatic creation and management of data sources, data sets, dashboards and templates with new APIs. Templates hold dashboard metadata, and can be used to create copies connected to the same or different dataset as required. Also included in this release are APIs for SPICE ingestions, fine-grained access control over AWS resources using AWS Identity and Access Management (IAM) policies, as well AWS tagging. APIs are supported for both Standard and Enterprise Edition, with edition-specific support for specific functionality. + +## __Amazon Simple Storage Service__ + - ### Features + - This release introduces support for Amazon S3 Replication Time Control, a new feature of S3 Replication that provides a predictable replication time backed by a Service Level Agreement. S3 Replication Time Control helps customers meet compliance or business requirements for data replication, and provides visibility into the replication process with new Amazon CloudWatch Metrics. + +## __Amazon Transcribe Service__ + - ### Features + - With this release Amazon Transcribe enables alternative transcriptions so that you can see different interpretations of transcribed audio. + +# __2.10.20__ __2019-11-19__ +## __AWS CloudFormation__ + - ### Features + - This release of AWS CloudFormation StackSets enables users to detect drift on a stack set and the stack instances that belong to that stack set. + +## __AWS CodeBuild__ + - ### Features + - Add support for ARM and GPU-enhanced build environments and a new SSD-backed Linux compute type with additional CPU and memory in CodeBuild + +## __AWS Config__ + - ### Features + - AWSConfig launches support for conformance packs. A conformance pack is a new resource type that allows you to package a collection of Config rules and remediation actions into a single entity. You can create and deploy conformance packs into your account or across all accounts in your organization + +## __AWS Identity and Access Management__ + - ### Features + - IAM reports the timestamp when a role's credentials were last used to make an AWS request. This helps you identify unused roles and remove them confidently from your AWS accounts. + +## __AWS IoT__ + - ### Features + - As part of this release, we are extending the capability of AWS IoT Rules Engine to send messages directly to customer's own web services/applications. Customers can now create topic rules with HTTP actions to route messages from IoT Core directly to URL's that they own. Ownership is proved by creating and confirming topic rule destinations. + +## __AWS Lambda__ + - ### Features + - This release provides three new runtimes to support Node.js 12 (initially 12.13.0), Python 3.8 and Java 11. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for RunInstances to specify the metadata options for new instances; adds a new API, ModifyInstanceMetadataOptions, which lets you modify the metadata options for a running or stopped instance; and adds support for CreateCustomerGateway to specify a device name. + +## __Auto Scaling__ + - ### Features + - Amazon EC2 Auto Scaling now supports Instance Weighting and Max Instance Lifetime. Instance Weighting allows specifying the capacity units for each instance type included in the MixedInstancesPolicy and how they would contribute to your application's performance. Max Instance Lifetime allows specifying the maximum length of time that an instance can be in service. If any instances are approaching this limit, Amazon EC2 Auto Scaling gradually replaces them. + +## __Elastic Load Balancing__ + - ### Features + - This release allows forward actions on Application Load Balancers to route requests to multiple target groups, based on the weight you specify for each target group. + +# __2.10.19__ __2019-11-18__ +## __AWS CloudFormation__ + - ### Features + - This release introduces APIs for the CloudFormation Registry, a new service to submit and discover resource providers with which you can manage third-party resources natively in CloudFormation. + +## __AWS Cost Explorer Service__ + - ### Features + - add EstimatedOnDemandCostWithCurrentCommitment to GetSavingsPlansPurchaseRecommendationRequest API + +## __Amazon Pinpoint__ + - ### Features + - This release of the Amazon Pinpoint API introduces support for using and managing message templates for messages that are sent through the voice channel. It also introduces support for specifying default values for message variables in message templates. + +## __Amazon Relational Database Service__ + - ### Features + - Documentation updates for rds + +## __Amazon SageMaker Runtime__ + - ### Features + - Amazon SageMaker Runtime now supports a new TargetModel header to invoke a specific model hosted on multi model endpoints. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker now supports multi-model endpoints to host multiple models on an endpoint using a single inference container. + +## __Amazon Simple Storage Service__ + - ### Features + - Added support for S3 Replication for existing objects. This release allows customers who have requested and been granted access to replicate existing S3 objects across buckets. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - The release contains new API and API changes for AWS Systems Manager Explorer product. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Update default connectionMaxIdleTimeout of NettyNioAsyncClient to 5 seconds + +# __2.10.18__ __2019-11-15__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for DolbyVision encoding, and SCTE35 & ESAM insertion to DASH ISO EMSG. + +## __AWS SDK for Java v2__ + - ### Features + - When SdkException or one of its children is created without a 'message', inherit the message from the exception 'cause' (if any). This should reduce the chance of an exception being raised by the SDK with a null message. + +## __Amazon Chime__ + - ### Features + - This release adds support for Chime Room Management APIs + +## __Amazon CloudWatch Logs__ + - ### Features + - Documentation updates for logs + +## __Amazon Cognito Identity Provider__ + - ### Features + - This release adds a new option in the User Pool to allow specifying sender's name in the emails sent by Amazon Cognito. This release also adds support to add SES Configuration Set to the emails sent by Amazon Cognito. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - You can now add tags while copying snapshots. Previously, a user had to first copy the snapshot and then add tags to the copied snapshot manually. Moving forward, you can specify the list of tags you wish to be applied to the copied snapshot as a parameter on the Copy Snapshot API. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Introducing Amazon EKS managed node groups, a new feature that lets you easily provision worker nodes for Amazon EKS clusters and keep them up to date using the Amazon EKS management console, CLI, and APIs. + +## __Amazon Elastic MapReduce__ + - ### Features + - Access to the cluster ARN makes it easier for you to author resource-level permissions policies in AWS Identity and Access Management. To simplify the process of obtaining the cluster ARN, Amazon EMR has added a new field containing the cluster ARN to all API responses that include the cluster ID. + +## __Amazon GuardDuty__ + - ### Features + - This release includes new operations related to findings export, including: CreatePublishingDestination, UpdatePublishingDestination, DescribePublishingDestination, DeletePublishingDestination and ListPublishingDestinations. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This release updates AWS Systems Manager Parameter Store documentation for the enhanced search capability. + +## __Amazon WorkSpaces__ + - ### Features + - Added APIs to register your directories with Amazon WorkSpaces and to modify directory details. + +## __Elastic Load Balancing__ + - ### Features + - Documentation-only change to the default value of the routing.http.drop_invalid_header_fields.enabled attribute. + +# __2.10.17__ __2019-11-14__ +## __AWSMarketplace Metering__ + - ### Features + - Added CustomerNotEntitledException in MeterUsage API for Container use case. + +## __Amazon Cognito Identity Provider__ + - ### Features + - This release adds a new setting at user pool client to prevent user existence related errors during authentication, confirmation, and password recovery related operations. This release also adds support to enable or disable specific authentication flows for a user pool client. + +## __Amazon Connect Service__ + - ### Features + - This release enhances the existing user management APIs and adds 3 new APIs - TagResource, UntagResource, and ListTagsForResource to support tagging Amazon Connect users, which facilitates more granular access controls for Amazon Connect users within an Amazon Connect instance. You can learn more about the new APIs here: https://docs.aws.amazon.com/connect/latest/APIReference/Welcome.html. + +## __Amazon Personalize__ + - ### Features + - Amazon Personalize: Adds ability to get batch recommendations by creating a batch inference job. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Updates support for adding attachments to Systems Manager Automation documents + +# __2.10.16__ __2019-11-13__ +## __AWS Data Exchange__ + - ### Features + - Introducing AWS Data Exchange, a service that makes it easy for AWS customers to securely create, manage, access, and exchange data sets in the cloud. + +## __AWS IoT__ + - ### Features + - This release adds the custom fields definition support in the index definition for AWS IoT Fleet Indexing Service. Custom fields can be used as an aggregation field to run aggregations with both existing GetStatistics API and newly added GetCardinality, GetPercentiles APIs. GetStatistics will return all statistics (min/max/sum/avg/count...) with this release. For more information, please refer to our latest documentation: https://docs.aws.amazon.com/iot/latest/developerguide/iot-indexing.html + +## __Amazon CloudSearch__ + - ### Features + - Amazon CloudSearch domains let you require that all traffic to the domain arrive over HTTPS. This security feature helps you block clients that send unencrypted requests to the domain. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - You can now add tags to a lifecycle policy in Data Lifecycle Manager (DLM). Tags allow you to categorize your policies in different ways, such as by department, purpose or owner. You can also enable resource level permissions based on tags to set access control on ability to modify or delete a tagged policy. + +## __Amazon Simple Email Service__ + - ### Features + - This is the first release of version 2 of the Amazon SES API. You can use this API to configure your Amazon SES account, and to send email. This API extends the functionality that exists in the previous version of the Amazon SES API. + +# __2.10.15__ __2019-11-12__ +## __AWS CodePipeline__ + - ### Features + - AWS CodePipeline now supports the use of variables in action configuration. + +## __AWS Marketplace Catalog Service__ + - ### Features + - This is the first release for the AWS Marketplace Catalog service which allows you to list, describe and manage change requests on your published entities on AWS Marketplace. + +## __Amazon DynamoDB__ + - ### Features + - Amazon DynamoDB enables you to restore your data to a new DynamoDB table using a point-in-time or on-demand backup. You now can modify the settings on the new restored table. Specifically, you can exclude some or all of the local and global secondary indexes from being created with the restored table. In addition, you can change the billing mode and provisioned capacity settings. + +## __Amazon Transcribe Service__ + - ### Features + - With this release, Amazon Transcribe now supports transcriptions from audio sources in Welsh English (en-WL), Scottish English(en-AB), Irish English(en-IE), Farsi(fa-IR), Tamil(ta-IN), Indonesian(id-ID), Portuguese (pt-PT), Dutch(nl-NL). + +## __Elastic Load Balancing__ + - ### Features + - You can configure your Application Load Balancer to either drop invalid header fields or forward them to targets. + +# __2.10.14__ __2019-11-11__ +## __AWS CloudFormation__ + - ### Features + - The Resource Import feature enables customers to import existing AWS resources into new or existing CloudFormation Stacks. + +## __AWS Cost Explorer Service__ + - ### Features + - This launch provides customers with access to GetCostAndUsageWithResources API. + +## __Amazon Polly__ + - ### Features + - Add `PollyPresigner` which enables support for presigning `SynthesizeSpeech` requests. + +# __2.10.13__ __2019-11-08__ +## __Amazon Cognito Identity__ + - ### Features + - This release adds support for disabling classic flow. + +## __Amazon EC2 Container Registry__ + - ### Features + - This release contains ticket fixes for Amazon ECR. + +# __2.10.12__ __2019-11-07__ +## __AWS S3__ + - ### Features + - Added support for presignPutObject in S3Presigner. + +## __AWS SSO OIDC__ + - ### Features + - This is an initial release of AWS Single Sign-On OAuth device code authorization service. + +## __AWS Single Sign-On__ + - ### Features + - This is an initial release of AWS Single Sign-On (SSO) end-user access. This release adds support for accessing AWS accounts assigned in AWS SSO using short term credentials. + +## __Amazon Comprehend__ + - ### Features + - This release adds new languages (ar, hi, ko, ja, zh, zh-TW) for Amazon Comprehend's DetectSentiment, DetectEntities, DetectKeyPhrases, BatchDetectSentiment, BatchDetectEntities and BatchDetectKeyPhrases APIs + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - AWS Systems Manager Session Manager target length increased to 400. + +## __Netty NIO HTTP Client__ + - ### Features + - Switch from setting the absolute URI in HTTP requests with no `Host` header to setting the absolute request path and query paramters and a `Host` header. + +# __2.10.11__ __2019-11-06__ +## __AWS SDK for Java v2__ + - ### Features + - Added the web identity credentials provider to the default credential chain + +## __AWS Savings Plans__ + - ### Features + - This is the first release of Savings Plans, a new flexible pricing model that offers low prices on Amazon EC2 and AWS Fargate usage. + +# __2.10.10__ __2019-11-06__ +## __AWS Budgets__ + - ### Features + - Documentation updates for budgets to track Savings Plans utilization and coverage + +## __AWS CodeBuild__ + - ### Features + - Add support for Build Number, Secrets Manager and Exported Environment Variables. + +## __AWS Cost Explorer Service__ + - ### Features + - This launch provides customers with access to Savings Plans management APIs. + +## __AWS Savings Plans__ + - ### Features + - This is the first release of Savings Plans, a new flexible pricing model that offers low prices on Amazon EC2 and AWS Fargate usage. + +## __AWS Signer__ + - ### Features + - This release adds support for tagging code-signing profiles in AWS Signer. + +## __Amazon Elastic File System__ + - ### Features + - EFS customers can select a lifecycle policy that automatically moves files that have not been accessed for 7 days into the EFS Infrequent Access (EFS IA) storage class. EFS IA provides price/performance that is cost-optimized for files that are not accessed every day. + +# __2.10.9__ __2019-11-05__ +## __AWS CodeStar Notifications__ + - ### Features + - This release adds a notification manager for events in repositories, build projects, deployments, and pipelines. You can now configure rules and receive notifications about events that occur for resources. Each notification includes a status message as well as a link to the resource (repository, build project, deployment application, or pipeline) whose event generated the notification. + +## __Amazon Relational Database Service__ + - ### Features + - Documentation updates for Amazon RDS + +# __2.10.8__ __2019-11-04__ +## __AWS RoboMaker__ + - ### Features + - RoboMaker Fleet Management launch a feature to verify your robot is ready to download and install the new robot application using a download condition file, which is a script run on the robot prior to downloading the new deployment. + +## __Amazon DynamoDB Accelerator (DAX)__ + - ### Features + - Documentation updates for dax + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for ec2 + +# __2.10.7__ __2019-11-01__ +## __AWS CloudTrail__ + - ### Features + - This release adds two new APIs, GetTrail and ListTrails, and support for adding tags when you create a trail by using a new TagsList parameter on CreateTrail operations. + +## __AWS Database Migration Service__ + - ### Features + - This release contains task timeline attributes in replication task statistics. This release also adds a note to the documentation for the CdcStartPosition task request parameter. This note describes how to enable the use of native CDC start points for a PostgreSQL source by setting the new slotName extra connection attribute on the source endpoint to the name of an existing logical replication slot. + +## __Amazon Pinpoint__ + - ### Features + - This release of the Amazon Pinpoint API introduces support for using and managing journeys, and querying analytics data for journeys. + +# __2.10.6__ __2019-10-31__ +## __AWS Amplify__ + - ### Features + - This release of AWS Amplify Console introduces support for Web Previews. This feature allows user to create ephemeral branch deployments from pull request submissions made to a connected repository. A pull-request preview deploys every pull request made to your Git repository to a unique preview URL. + +## __AWS Support__ + - ### Features + - The status descriptions for TrustedAdvisorCheckRefreshStatus have been updated + +## __Amazon Simple Storage Service__ + - ### Features + - S3 Inventory now supports a new field 'IntelligentTieringAccessTier' that reports the access tier (frequent or infrequent) of objects stored in Intelligent-Tiering storage class. + +# __2.10.5__ __2019-10-30__ +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache for Redis 5.0.5 now allows you to modify authentication tokens by setting and rotating new tokens. You can now modify active tokens while in use, or add brand-new tokens to existing encryption-in-transit enabled clusters that were previously setup without authentication tokens. This is a two-step process that allows you to set and rotate the token without interrupting client requests. + +# __2.10.4__ __2019-10-29__ +## __AWS Cloud9__ + - ### Features + - Added CREATING and CREATE_FAILED environment lifecycle statuses. + +## __Amazon AppStream__ + - ### Features + - Adds support for providing domain names that can embed streaming sessions + +# __2.10.3__ __2019-10-28__ +## __Amazon Simple Storage Service__ + - ### Features + - Adding support in SelectObjectContent for scanning a portion of an object specified by a scan range. + +# __2.10.2__ __2019-10-28__ +## __AWS Transfer for SFTP__ + - ### Features + - This release adds logical directories support to your AWS SFTP server endpoint, so you can now create logical directory structures mapped to Amazon Simple Storage Service (Amazon S3) bucket paths for users created and stored within the service. Amazon S3 bucket names and paths can now be hidden from AWS SFTP users, providing an additional level of privacy to meet security requirements. You can lock down your SFTP users' access to designated folders (commonly referred to as 'chroot'), and simplify complex folder structures for data distribution through SFTP without replicating files across multiple users. + +## __Amazon EC2 Container Registry__ + - ### Features + - This release of Amazon Elastic Container Registry Service (Amazon ECR) introduces support for image scanning. This identifies the software vulnerabilities in the container image based on the Common Vulnerabilities and Exposures (CVE) database. + +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache adds support for migrating Redis workloads hosted on Amazon EC2 into ElastiCache by syncing the data between the source Redis cluster and target ElastiCache for Redis cluster in real time. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/migrate-to-elasticache.html. + +# __2.10.1__ __2019-10-25__ +## __Amazon Transcribe Streaming Service__ + - ### Features + - With this release, Amazon Transcribe Streaming now supports audio sources in Australian English (en-AU). + +# __2.10.0__ __2019-10-24__ +## __AWS App Mesh__ + - ### Features + - This release adds support for the gRPC and HTTP/2 protocols. + +## __AWS SDK for Java v2__ + - ### Features + - Updating to use Jackson 2.10.0 and Netty 4.1.42.Final + +## __Amazon Chime__ + - ### Features + - This release introduces Voice Connector PDX region and defaults previously created Voice Connectors to IAD. You can create Voice Connector Groups and add region specific Voice Connectors to direct telephony traffic across AWS regions in case of regional failures. With this release you can add phone numbers to Voice Connector Groups and can bulk move phone numbers between Voice Connectors, between Voice Connector and Voice Connector Groups and between Voice Connector Groups. Voice Connector now supports additional settings to enable SIP Log capture. This is in addition to the launch of Voice Connector Cloud Watch metrics in this release. This release also supports assigning outbound calling name (CNAM) to AWS account and individual phone numbers assigned to Voice Connectors. * Voice Connector now supports a setting to enable real time audio streaming delivered via Kinesis Audio streams. Please note that recording Amazon Chime Voice Connector calls with this feature maybe be subject to laws or regulations regarding the recording of telephone calls and other electronic communications. AWS Customer and their end users' have the responsibility to comply with all applicable laws regarding the recording, including properly notifying all participants in a recorded session or to a recorded communication that the session or communication is being recorded and obtain their consent. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release updates CreateFpgaImage to support tagging FPGA images on creation + +## __Amazon GameLift__ + - ### Features + - Amazon GameLift offers expanded hardware options for game hosting: Custom game builds can use the Amazon Linux 2 operating system, and fleets for both custom builds and Realtime servers can now use C5, M5, and R5 instance types. + +## __Amazon SageMaker Service__ + - ### Features + - Adds support for the new family of Elastic Inference Accelerators (eia2) for SageMaker Hosting and Notebook Services + +## __Netty NIO Http Client__ + - ### Bugfixes + - Fix a race condition where the channel is closed right after all content is buffered, causing `server failed to complete the response` error by adding a flag when `LastHttpContentHandler` is received. + +# __2.9.26__ __2019-10-23__ +## __AWS Security Token Service__ + - ### Features + - AWS Security Token Service (STS) now supports a regional configuration flag to make the client respect the region without the need for the endpoint parameter. + +## __Amazon Connect Service__ + - ### Features + - This release adds 4 new APIs ListQueues, ListPhoneNumbers, ListContactFlows, and ListHoursOfOperations, which can be used to programmatically list Queues, PhoneNumbers, ContactFlows, and HoursOfOperations configured for an Amazon Connect instance respectively. You can learn more about the new APIs here: https://docs.aws.amazon.com/connect/latest/APIReference/Welcome.html. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds new female voices: US Spanish - Lupe and Brazilian Portuguese - Camila; both voices are available in Standard and Neural engine. + +# __2.9.25__ __2019-10-22__ +## __AWS IoT Events__ + - ### Features + - Add support for new serial evaluation method for events in a detector model. + +## __AWS OpsWorks CM__ + - ### Features + - AWS OpsWorks for Chef Automate (OWCA) now allows customers to use a custom domain and respective certificate, for their AWS OpsWorks For Chef Automate servers. Customers can now provide a CustomDomain, CustomCertificate and CustomPrivateKey in CreateServer API to configure their Chef Automate servers with a custom domain and certificate. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Better handle `GOAWAY` messages from the remote endpoint. + +# __2.9.24__ __2019-10-18__ +## __Amazon CloudWatch__ + - ### Features + - New Period parameter added to MetricDataQuery structure. + +## __Netty NIO Http Client__ + - ### Bugfixes + - Update `HealthCheckedChannelPool` to check `KEEP_ALIVE` when acquiring a channel from the pool to avoid soon-to-be inactive channels being picked up by a new request. This should reduce the frequency of `IOException: Server failed to complete response` errors. See [#1380](https://github.com/aws/aws-sdk-java-v2/issues/1380), [#1466](https://github.com/aws/aws-sdk-java-v2/issues/1466). + +# __2.9.23__ __2019-10-17__ +## __AWS Batch__ + - ### Features + - Adding support for Compute Environment Allocation Strategies + +## __Amazon Relational Database Service__ + - ### Features + - Amazon RDS now supports Amazon RDS on VMware with the introduction of APIs related to Custom Availability Zones and Media installation. + +# __2.9.22__ __2019-10-16__ +## __AWS Marketplace Commerce Analytics__ + - ### Features + - add 2 more values for the supporting sections - age of past due funds + uncollected funds breakdown + +## __AWS RoboMaker__ + - ### Features + - This release adds support for ROS2 Dashing as a beta feature + +## __Managed Streaming for Kafka__ + - ### Features + - AWS MSK has added support for adding brokers to a cluster. + +# __2.9.21__ __2019-10-15__ +## __Amazon Kinesis Video Streams Archived Media__ + - ### Features + - Add ON_DISCONTINUITY mode to the GetHLSStreamingSessionURL API + +# __2.9.20__ __2019-10-14__ +## __Amazon Personalize__ + - ### Features + - AWS Personalize: Adds ability to create a solution version using FULL or UPDATE training mode + +## __Amazon WorkSpaces__ + - ### Features + - Documentation updates for WorkSpaces + +# __2.9.19__ __2019-10-11__ +## __AWS Greengrass__ + - ### Features + - Greengrass OTA service supports Raspbian/Armv6l platforms. + +# __2.9.18__ __2019-10-10__ +## __AWS IoT Analytics__ + - ### Features + - Add `completionTime` to API call ListDatasetContents. + +## __AWS SDK for Java v2__ + - ### Features + - Implement arn parser functions in `arns` module. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - New EC2 M5n, M5dn, R5n, R5dn instances with 100 Gbps network performance and Elastic Fabric Adapter (EFA) for ultra low latency; New A1.metal bare metal instance powered by AWS Graviton Processors + +## __Amazon Lex Runtime Service__ + - ### Features + - Amazon Lex now supports Session API checkpoints + +## __Firewall Management Service__ + - ### Features + - Firewall Manager now supports Amazon VPC security groups, making it easier to configure and manage security groups across multiple accounts from a single place. + +# __2.9.17__ __2019-10-09__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for Dolby Atmos encoding, up to 36 outputs, accelerated transcoding with frame capture and preferred acceleration feature. + +## __AWS SDK for Java v2__ + - ### Features + - Expose instance signature through EC2MetadataUtils + + - ### Bugfixes + - Fix the implementations of `equals(Object)` and `hashCode()` for `DefaultSdkAutoConstructList` and `DefaultSdkAutoConstructMap` so that they follow the Java `equals` and `hashCode` contract. In addition, ensure that these implementations' `toString()` methods return nicely readable results. Fixes [#1445](https://github.com/aws/aws-sdk-java-v2/issues/1445) + +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache now allows you to apply available service updates on demand to your Memcached and Redis Cache Clusters. Features included: (1) Access to the list of applicable service updates and their priorities. (2) Service update monitoring and regular status updates. (3) Recommended apply-by-dates for scheduling the service updates. (4) Ability to stop and later re-apply updates. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Self-Service-Updates.html and https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Self-Service-Updates.html + +## __Managed Streaming for Kafka__ + - ### Features + - Updated documentation for Amazon Managed Streaming for Kafka service. + +# __2.9.16__ __2019-10-08__ +## __AWS DataSync__ + - ### Features + - Add Sync options to enable/disable TaskQueueing + +## __AWS Organizations__ + - ### Features + - Documentation updates for organizations + +## __AWS SDK for Java v2__ + - ### Features + - EC2MetadataUtils: add marketplaceProductCodes inside InstanceInfo's POJO + +## __Amazon EventBridge__ + - ### Features + - Documentation updates for Amazon EventBridge. + +## __Amazon Kinesis Firehose__ + - ### Features + - With this release, you can use Amazon Kinesis Firehose delivery streams to deliver streaming data to Amazon Elasticsearch Service version 7.x clusters. For technical documentation, look for CreateDeliveryStream operation in Amazon Kinesis Firehose API reference. + +## __Amazon S3 Control__ + - ### Features + - Adds support for the Amazon S3 Control service to the SDK. + +# __2.9.15__ __2019-10-07__ +## __AWS Direct Connect__ + - ### Features + - This release adds a service provider field for physical connection creation and provides a list of available partner providers for each Direct Connect location. + +## __AWS Glue__ + - ### Features + - AWS Glue now provides ability to use custom certificates for JDBC Connections. + +## __Amazon Import/Export Snowball__ + - ### Features + - AWS Snowball Edge now allows you to perform an offline update to the software of your Snowball Edge device when your device is not connected to the internet. Previously, updating your Snowball Edge's software required that the device be connected to the internet or be sent back to AWS. Now, you can keep your Snowball Edge software up to date even if your device(s) cannot connect to the internet, or are required to run in an air-gapped environment. To complete offline updates, download the software update from a client machine with connection to the internet using the AWS Command Line Interface (CLI). Then, have the Snowball Edge device download and install the software update using the Snowball Edge device API. For more information about offline updates, visit the Snowball Edge documentation page. + +## __Amazon Kinesis Firehose__ + - ### Features + - Amazon Kinesis Data Firehose now allows delivering data to Elasticsearch clusters set up in a different AWS account than the Firehose AWS account. For technical documentation, look for ElasticsearchDestinationConfiguration in the Amazon Kinesis Firehose API reference. + +## __Amazon Pinpoint__ + - ### Features + - This release of the Amazon Pinpoint API introduces support for using and managing message templates. + +## __Amazon Pinpoint Email Service__ + - ### Features + - This release of the Amazon Pinpoint Email API introduces support for using and managing message templates. + +# __2.9.14__ __2019-10-04__ +## __AWS Elemental MediaPackage__ + - ### Features + - New Harvest Job APIs to export segment-accurate content windows from MediaPackage Origin Endpoints to S3. See https://docs.aws.amazon.com/mediapackage/latest/ug/harvest-jobs.html for more info + +## __Amazon CloudWatch__ + - ### Bugfixes + - Add cloudwatch specific http configurations, specifically reducing `connectionMaxIdleTime`. Related to [#1380](https://github.com/aws/aws-sdk-java-v2/issues/1380) + +## __Amazon Cognito Identity Provider__ + - ### Features + - This release adds ClientMetadata input parameter to multiple Cognito User Pools operations, making this parameter available to the customer configured lambda triggers as applicable. + +## __Amazon S3__ + - ### Bugfixes + - Add s3 specific http configurations, specifically reducing `connectionMaxIdleTime`. Related to [#1122](https://github.com/aws/aws-sdk-java-v2/issues/1122) + +## __Amazon S3 Control__ + - ### Features + - Adds support for the Amazon S3 Control service to the SDK. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Documentation updates for Systems Manager / StartSession. + +# __2.9.13__ __2019-10-03__ +## __AWS Device Farm__ + - ### Features + - Documentation updates for devicefarm + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release allows customers to purchase regional EC2 RIs on a future date. + +## __Amazon Elasticsearch Service__ + - ### Features + - Amazon Elasticsearch Service now supports configuring additional options for domain endpoint, such as whether to require HTTPS for all traffic. + +## __Application Auto Scaling__ + - ### Features + - Documentation updates for Application Auto Scaling + +# __2.9.12__ __2019-10-02__ +## __Amazon Lightsail__ + - ### Features + - This release adds support for the automatic snapshots add-on for instances and block storage disks. + +# __2.9.11__ __2019-10-01__ +## __Amazon DocumentDB with MongoDB compatibility__ + - ### Features + - This release provides support for describe and modify CA certificates. + +# __2.9.10__ __2019-09-30__ +## __AWS WAF__ + - ### Features + - Lowering the threshold for Rate Based rule from 2000 to 100. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for creating a Read Replica with Active Directory domain information. This release updates RDS API to indicate whether an OrderableDBInstanceOption supports Kerberos Authentication. + +## __AmazonMQ__ + - ### Features + - Amazon MQ now includes the ability to scale your brokers by changing the host instance type. See the hostInstanceType property of UpdateBrokerInput (https://docs.aws.amazon.com/amazon-mq/latest/api-reference/brokers-broker-id.html#brokers-broker-id-model-updatebrokerinput), and pendingHostInstanceType property of DescribeBrokerOutput (https://docs.aws.amazon.com/amazon-mq/latest/api-reference/brokers-broker-id.html#brokers-broker-id-model-describebrokeroutput). + +# __2.9.9__ __2019-09-27__ +## __AWS Amplify__ + - ### Features + - This release adds access logs APIs and artifact APIs for AWS Amplify Console. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Update the pool size for default async future completion executor service. See [#1251](https://github.com/aws/aws-sdk-java-v2/issues/1251), [#994](https://github.com/aws/aws-sdk-java-v2/issues/994) + +## __Amazon EC2 Container Service__ + - ### Features + - This release of Amazon Elastic Container Service (Amazon ECS) removes FirelensConfiguration from the DescribeTask output during the FireLens public preview. + +# __2.9.8__ __2019-09-26__ +## __AWS CodePipeline__ + - ### Features + - Documentation updates for CodePipeline + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This release updates the AWS Systems Manager Parameter Store PutParameter and LabelParameterVersion APIs to return the "Tier" of parameter created/updated and the "parameter version" labeled respectively. + +# __2.9.7__ __2019-09-25__ +## __AWS Database Migration Service__ + - ### Features + - This release adds a new DeleteConnection API to delete the connection between a replication instance and an endpoint. It also adds an optional S3 setting to specify the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. + +## __AWS Global Accelerator__ + - ### Features + - API Update for AWS Global Accelerator to support for DNS aliasing. + +## __Amazon SageMaker Service__ + - ### Features + - Enable G4D and R5 instances in SageMaker Hosting Services + +# __2.9.6__ __2019-09-24__ +## __AWS Comprehend Medical__ + - ### Features + - Use Amazon Comprehend Medical to analyze medical text stored in the specified Amazon S3 bucket. Use the console to create and manage batch analysis jobs, or use the batch APIs to detect both medical entities and protected health information (PHI). The batch APIs start, stop, list, and retrieve information about batch analysis jobs. This release also includes DetectEntitiesV2 operation which returns the Acuity and Direction entities as attributes instead of types. + +## __AWS DataSync__ + - ### Features + - Added S3StorageClass, OverwriteMode sync option, and ONLY_FILES_TRANSFERRED setting for the VerifyMode sync option. + +## __Amazon Transcribe Service__ + - ### Features + - With this update Amazon Transcribe enables you to provide an AWS KMS key to encrypt your transcription output. + +# __2.9.5__ __2019-09-23__ +## __AWS RDS DataService__ + - ### Features + - RDS Data API now supports Amazon Aurora Serverless PostgreSQL databases. + +## __Amazon Redshift__ + - ### Features + - Adds API operation DescribeNodeConfigurationOptions and associated data structures. + +# __2.9.4__ __2019-09-20__ +## __AWS Greengrass__ + - ### Features + - Greengrass OTA service now returns the updated software version in the PlatformSoftwareVersion parameter of a CreateSoftwareUpdateJob response + +## __Amazon Elastic Compute Cloud__ + - ### Features + - G4 instances are Amazon EC2 instances based on NVIDIA T4 GPUs and are designed to provide cost-effective machine learning inference for applications, like image classification, object detection, recommender systems, automated speech recognition, and language translation. G4 instances are also a cost-effective platform for building and running graphics-intensive applications, such as remote graphics workstations, video transcoding, photo-realistic design, and game streaming in the cloud. To get started with G4 instances visit https://aws.amazon.com/ec2/instance-types/g4. + +## __Amazon Relational Database Service__ + - ### Features + - Add a new LeaseID output field to DescribeReservedDBInstances, which shows the unique identifier for the lease associated with the reserved DB instance. AWS Support might request the lease ID for an issue related to a reserved DB instance. + +## __Amazon WorkSpaces__ + - ### Features + - Adds the WorkSpaces restore feature + +# __2.9.3__ __2019-09-19__ +## __AWS Glue__ + - ### Features + - AWS Glue DevEndpoints now supports GlueVersion, enabling you to choose Apache Spark 2.4.3 (in addition to Apache Spark 2.2.1). In addition to supporting the latest version of Spark, you will also have the ability to choose between Python 2 and Python 3. + +## __AWS MediaConnect__ + - ### Features + - When you grant an entitlement, you can now specify the percentage of the entitlement data transfer that you want the subscriber to be responsible for. + +## __Amazon EC2 Container Service__ + - ### Features + - This release of Amazon Elastic Container Service (Amazon ECS) introduces support for container image manifest digests. This enables you to identify all tasks launched using a container image pulled from ECR in order to correlate what was built with where it is running. + +# __2.9.2__ __2019-09-18__ +## __AWS Resource Access Manager__ + - ### Features + - AWS RAM provides a new ListPendingInvitationResources API action that lists the resources in a resource share that is shared with you but that the invitation is still pending for + +## __AWS WAF Regional__ + - ### Features + - Lowering the threshold for Rate Based rule from 2000 to 100. + +## __Amazon API Gateway__ + - ### Features + - Amazon API Gateway simplifies accessing PRIVATE APIs by allowing you to associate one or more Amazon Virtual Private Cloud (VPC) Endpoints to a private API. API Gateway will create and manage DNS alias records necessary for easily invoking the private APIs. With this feature, you can leverage private APIs in web applications hosted within your VPCs. + +# __2.9.1__ __2019-09-17__ +## __AWS Identity and Access Management__ + - ### Features + - Documentation updates for iam + +## __Amazon Athena__ + - ### Features + - This release adds DataManifestLocation field indicating the location and file name of the data manifest file. Users can get a list of files that the Athena query wrote or intended to write from the manifest file. + +## __Amazon Personalize__ + - ### Features + - [Personalize] Adds trainingHours to solutionVersion properties. + +# __2.9.0__ __2019-09-16__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for multi-DRM SPEKE with CMAF outputs, MP3 ingest, and options for improved video quality. + +## __AWS SDK for Java v2__ + - ### Features + - Bump minor version to `2.9.0-SNAPSHOT` because of [#1413](https://github.com/aws/aws-sdk-java-v2/issues/1413). + - Updating dependencies versions: jackson 2.9.8 -> 2.9.9, slf4j 1.7.35 -> 1.7.38, netty 4.1.39.Final -> 4.1.41.Final (contains the fix for the performance regression in 4.1.39) + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - This release lets customers add tags to an Amazon EKS cluster. These tags can be used to control access to the EKS API for managing the cluster using IAM. The Amazon EKS TagResource API allows customers to associate tags with their cluster. Customers can list tags for a cluster using the ListTagsForResource API and remove tags from a cluster with the UntagResource API. Note: tags are specific to the EKS cluster resource, they do not propagate to other AWS resources used by the cluster. + +# __2.8.7__ __2019-09-12__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports High Efficiency Video Coding (HEVC) for standard-definition (SD), high-definition (HD), and ultra-high-definition (UHD) encoding with HDR support.Encoding with HEVC offers a number of advantages. While UHD video requires an advanced codec beyond H.264 (AVC), high frame rate (HFR) or High Dynamic Range (HDR) content in HD also benefit from HEVC's advancements. In addition, benefits can be achieved with HD and SD content even if HDR and HFR are not needed. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Fix for FleetActivityStatus and FleetStateCode enum + +## __Amazon WorkMail Message Flow__ + - ### Features + - This release allows customers to access email messages as they flow to and from Amazon WorkMail. + +## __Elastic Load Balancing__ + - ### Features + - Documentation updates for elasticloadbalancingv2: This release adds support for TLS SNI on Network Load Balancers + +# __2.8.6__ __2019-09-11__ +## __AWS Config__ + - ### Features + - Adding input validation for the OrganizationConfigRuleName string. + +## __AWS MediaConnect__ + - ### Features + - This release adds support for the RIST protocol on sources and outputs. + +## __AWS Step Functions__ + - ### Features + - Fixing letter case in Map history event details to be small case + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for new data fields and log format in VPC flow logs. + +## __Amazon Relational Database Service__ + - ### Features + - This release allows customers to specify a custom parameter group when creating a Read Replica, for DB engines which support this feature. + +## __Amazon Simple Email Service__ + - ### Features + - Updated API documentation to correct broken links, and to update content based on customer feedback. + +# __2.8.5__ __2019-09-10__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix marshalling for models with xml attribute. See [#1182](https://github.com/aws/aws-sdk-java-v2/issues/1182) + +## __AWS Storage Gateway__ + - ### Features + - The CloudWatchLogGroupARN parameter of the UpdateGatewayInformation API allows for configuring the gateway to use a CloudWatch log-group where Storage Gateway health events will be logged. + +# __2.8.4__ __2019-09-09__ +## __AWS App Mesh__ + - ### Features + - This release adds support for http retry policies. + +## __AWS Marketplace Commerce Analytics__ + - ### Features + - Add FDP+FPS (monthly_revenue_field_demonstration_usage + monthly_revenue_flexible_payment_schedule) to Marketplace Commerce Analytics Service + +## __AWS RoboMaker__ + - ### Features + - Support for Connectivity to Simulation. When you need to interact with the applications in your simulation job, you can connect to your robot application or simulation application with port forwarding. When you configure port forwarding, traffic will be forwarded from the simulation job port to the application port. Port forwarding makes it easy to connect with tools such as ROS Bridge and other tools. This can be useful when you want to debug or run custom tools to interact with your applications. + +## __Amazon AppStream__ + - ### Features + - IamRoleArn support in CreateFleet, UpdateFleet, CreateImageBuilder APIs + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release expands Site-to-Site VPN tunnel options to allow customers to restrict security algorithms and configure timer settings for VPN connections. Customers can specify these new options while creating new VPN connections, or they can modify the tunnel options on existing connections using a new API. + +## __Amazon QLDB__ + - ### Features + - (New Service) Amazon QLDB is a fully managed ledger database that provides a transparent, immutable, and cryptographically verifiable transaction log owned by a central trusted authority. Amazon QLDB is a new class of serverless database that eliminates the need to engage in the complex development effort of building your own ledger-like applications and it automatically scales to support the demands of your application. Introduces Amazon QLDB API operations needed for managing Amazon QLDB ledgers. This includes the ability to manage Amazon QLDB ledgers, cryptographically verify documents, and export the journal in a ledger. + +## __Amazon QLDB Session__ + - ### Features + - (New Service) Amazon QLDB is a fully managed ledger database that provides a transparent, immutable, and cryptographically verifiable transaction log owned by a central trusted authority. Amazon QLDB is a new class of serverless database that eliminates the need to engage in the complex development effort of building your own ledger-like applications and it automatically scales to support the demands of your application. Introduces Amazon QLDB API operations needed for interacting with data in Amazon QLDB ledgers. + +# __2.8.3__ __2019-09-06__ +## __Amazon Kinesis Analytics__ + - ### Features + - Documentation updates for kinesisanalytics + +# __2.8.2__ __2019-09-05__ +## __AWS Config__ + - ### Features + - AWS Config now includes the option for marking RemediationConfigurations as automatic, removing the need to call the StartRemediationExecution API. Manual control over resource execution rate is also included, and RemediationConfigurations are now ARN addressable. Exceptions to exclude account resources from being remediated can be configured with the new PutRemediationExceptions, DescribeRemediationExceptions, and DeleteRemediationExceptions APIs. + +# __2.8.1__ __2019-09-04__ +## __AWS Step Functions__ + - ### Features + - Added support for new history events + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Amazon EKS DescribeCluster API returns a new OIDC issuer field that can be used to create OIDC identity provider for IAM for Service Accounts feature. + +## __Amazon Transcribe Service__ + - ### Features + - MediaFormat is now optional for StartTranscriptionJob API. + +# __2.8.0__ __2019-09-03__ +## __AWS Resource Groups Tagging API__ + - ### Features + - Documentation updates for resourcegroupstaggingapi + +## __AWS SDK for Java v2__ + - ### Features + - Bump minor version to 2.8.0-SNAPSHOT because of [#1391](https://github.com/aws/aws-sdk-java-v2/issues/1391). + - Upgrade Netty version to 4.1.39.Final, netty reactive streams version to 2.0.3, netty open ssl version to 2.0.25.Final + +## __Amazon EC2 Container Service__ + - ### Features + - This release of Amazon Elastic Container Service (Amazon ECS) introduces support for attaching Amazon Elastic Inference accelerators to your containers. This enables you to run deep learning inference workloads with hardware acceleration in a more efficient way. + +## __Amazon GameLift__ + - ### Features + - You can now make use of PKI resources to provide more secure connections between your game clients and servers. To learn more, please refer to the public Amazon GameLift documentation. + +# __2.7.36__ __2019-08-30__ +## __Amazon EC2 Container Service__ + - ### Features + - This release of Amazon Elastic Container Service (Amazon ECS) introduces support for modifying the cluster settings for existing clusters, which enables you to toggle whether Container Insights is enabled or not. Support is also introduced for custom log routing using the ECS FireLens integration. + +## __AmazonApiGatewayManagementApi__ + - ### Features + - You can use getConnection to return information about the connection (when it is connected, IP address, etc) and deleteConnection to disconnect the given connection + +## __AmazonMQ__ + - ### Features + - Adds support for updating security groups selection of an Amazon MQ broker. + +# __2.7.35__ __2019-08-29__ +## __AWS CodePipeline__ + - ### Features + - Introducing pipeline execution trigger details in ListPipelineExecutions API. + +## __AWS Lambda__ + - ### Features + - Adds a "MaximumBatchingWindowInSeconds" parameter to event source mapping api's. Usable by Dynamodb and Kinesis event sources. + +## __Amazon EC2 Container Service__ + - ### Features + - This release of Amazon Elastic Container Service (Amazon ECS) introduces support for including Docker container IDs in the API response when describing and stopping tasks. This enables customers to easily map containers to the tasks they are associated with. + +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache for Redis now supports encryption at rest using customer managed customer master keys (CMKs) in AWS Key Management Service (KMS). Amazon ElastiCache now supports cluster names upto 40 characters for replicationGoups and upto 50 characters for cacheClusters. + +## __Application Auto Scaling__ + - ### Features + - With the current release, you can suspend and later resume any of the following scaling actions in Application Auto Scaling: scheduled scaling actions, dynamic scaling in actions, dynamic scaling out actions. + +# __2.7.34__ __2019-08-28__ +## __AWS Elemental MediaConvert__ + - ### Features + - This release adds the ability to send a job to an on-demand queue while simulating the performance of a job sent to a reserved queue. Use this setting to estimate the number of reserved transcoding slots (RTS) you need for a reserved queue. + +## __AWS Global Accelerator__ + - ### Features + - API Update for AWS Global Accelerator Client IP Preservation + +## __Amazon Simple Queue Service__ + - ### Features + - Added support for message system attributes, which currently lets you send AWS X-Ray trace IDs through Amazon SQS. + +# __2.7.33__ __2019-08-27__ +## __AWS Organizations__ + - ### Features + - Documentation updates for organizations + +# __2.7.32__ __2019-08-26__ +## __AWS SecurityHub__ + - ### Features + - This release resolves an issue with the DescribeHub action, changes the MasterId and InvitationId parameters for AcceptInvitation to Required, and changes the AccountIds parameter for DeleteInvitations and DeclineInvitations to Required. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This feature adds "default tier" to the AWS Systems Manager Parameter Store for parameter creation and update. AWS customers can now set the "default tier" to one of the following values: Standard (default), Advanced or Intelligent-Tiering. This allows customers to create advanced parameters or parameters in corresponding tiers with one setting rather than code change to specify parameter tiers. + +# __2.7.31__ __2019-08-23__ +## __AWS Elemental MediaPackage VOD__ + - ### Features + - Adds optional Constant Initialization Vector (IV) to HLS Encryption for MediaPackage VOD. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release of EC2 VM Import Export adds support for exporting Amazon Machine Image(AMI)s to a VM file + +## __Amazon Transcribe Service__ + - ### Features + - Amazon Transcribe - support transcriptions from audio sources in Russian (ru-RU) and Chinese (zh-CN). + +# __2.7.30__ __2019-08-22__ +## __AWS DataSync__ + - ### Features + - This release adds support for SMB location type. + +## __Amazon Relational Database Service__ + - ### Features + - This release allows users to enable RDS Data API while creating Aurora Serverless databases. + +# __2.7.29__ __2019-08-21__ +## __Amazon DynamoDB__ + - ### Features + - Public preview version of 'dynamodb-enhanced' that has a new DynamoDb mapper library that can be used with the v2 SDK. See README.md in the module for more detailed information about this module. + +## __Amazon ElastiCache__ + - ### Features + - ElastiCache extends support for Scale down for Redis Cluster-mode enabled and disabled replication groups + +## __Amazon Forecast Query Service__ + - ### Features + - Amazon Forecast is a fully managed machine learning service that makes it easy for customers to generate accurate forecasts using their historical time-series data + +## __Amazon Forecast Service__ + - ### Features + - Amazon Forecast is a fully managed machine learning service that makes it easy for customers to generate accurate forecasts using their historical time-series data + +## __Amazon Personalize Runtime__ + - ### Features + - Increased limits on number of items recommended and reranked: The maximum number of results returned from getRecommendations API has been increased to 200. The maximum number of items which can be reranked via getPersonalizedRanking API has been increased to 200. + +## __Amazon Rekognition__ + - ### Features + - Documentation updates for Amazon Rekognition. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker now supports Amazon EFS and Amazon FSx for Lustre file systems as data sources for training machine learning models. Amazon SageMaker now supports running training jobs on ml.p3dn.24xlarge instance type. This instance type is offered as a limited private preview for certain SageMaker customers. If you are interested in joining the private preview, please reach out to the SageMaker Product Management team via AWS Support." + +## __Amazon Simple Queue Service__ + - ### Features + - This release provides a way to add metadata tags to a queue when it is created. You can use tags to organize and identify your Amazon SQS queues for cost allocation. + +## __Apache HTTP Client__ + - ### Features + - Enable TLS client authentication support for the Apache HTTP Client by allowing customers to specify a `TlsKeyManagersProvider` on the builder. The `KeyManger`s provided will be used when the remote server wants to authenticate the client. + +## __HTTP Client SPI__ + - ### Features + - Add `TlsKeyManagersProvider` interface for supporting TLS client auth in HTTP client implementations. + +## __Netty NIO HTTP Client__ + - ### Features + - Add ability to to use HTTP proxies with the Netty async client. + +# __2.7.28__ __2019-08-20__ +## __AWS Transfer for SFTP__ + - ### Features + - New field in response of TestIdentityProvider + +## __Alexa For Business__ + - ### Features + - Adding support for optional locale input in CreateProfile and UpdateProfile APIs + +## __Amazon AppStream__ + - ### Features + - Includes API updates to support streaming through VPC endpoints for image builders and stacks. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker introduces Managed Spot Training. Increases the maximum number of metric definitions to 40 for SageMaker Training and Hyperparameter Tuning Jobs. SageMaker Neo adds support for Acer aiSage and Qualcomm QCS605 and QCS603. + +# __2.7.27__ __2019-08-19__ +## __AWS App Mesh__ + - ### Features + - Fix for HttpMethod enum + +## __AWS Cost and Usage Report Service__ + - ### Features + - New IAM permission required for editing AWS Cost and Usage Reports - Starting today, you can allow or deny IAM users permission to edit Cost & Usage Reports through the API and the Billing and Cost Management console. To allow users to edit Cost & Usage Reports, ensure that they have 'cur: ModifyReportDefinition' permission. Refer to the technical documentation (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_cur_ModifyReportDefinition.html) for additional details. + +# __2.7.26__ __2019-08-16__ +## __AWS RoboMaker__ + - ### Features + - Two feature release: 1. AWS RoboMaker introduces log-based simulation. Log-based simulation allows you to play back pre-recorded log data such as sensor streams for testing robotic functions like localization, mapping, and object detection. Use the AWS RoboMaker SDK to test your robotic applications. 2. AWS RoboMaker allow customer to setup a robot deployment timeout when CreateDeploymentJob. + +## __Amazon EC2 Container Service__ + - ### Features + - This release of Amazon Elastic Container Service (Amazon ECS) introduces support for controlling the usage of swap space on a per-container basis for Linux containers. + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR has introduced an account level configuration called Block Public Access that allows you to block clusters with ports open to traffic from public IP sources (i.e. 0.0.0.0/0 for IPv4 and ::/0 for IPv6) from launching. Individual ports or port ranges can be added as exceptions to allow public access. + +# __2.7.25__ __2019-08-15__ +## __AWS App Mesh__ + - ### Features + - This release adds support for http header based routing and route prioritization. + +## __AWS CodeCommit__ + - ### Features + - This release adds an API, BatchGetCommits, that allows retrieval of metadata for multiple commits in an AWS CodeCommit repository. + +## __AWS Glue__ + - ### Features + - GetJobBookmarks API is withdrawn. + +## __AWS Storage Gateway__ + - ### Features + - CreateSnapshotFromVolumeRecoveryPoint API supports new parameter: Tags (to be attached to the created resource) + +## __Amazon Athena__ + - ### Features + - This release adds support for querying S3 Requester Pays buckets. Users can enable this feature through their Workgroup settings. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds an option to use private certificates from AWS Certificate Manager (ACM) to authenticate a Site-to-Site VPN connection's tunnel endpoints and customer gateway device. + +# __2.7.24__ __2019-08-14__ +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds a new API called SendDiagnosticInterrupt, which allows you to send diagnostic interrupts to your EC2 instance. + +# __2.7.23__ __2019-08-13__ +## __AWS AppSync__ + - ### Features + - Adds a configuration option for AppSync GraphQL APIs + +# __2.7.22__ __2019-08-12__ +## __Amazon CloudWatch__ + - ### Features + - Documentation updates for monitoring + +## __Amazon Rekognition__ + - ### Features + - Adding new Emotion, Fear + +## __Application Auto Scaling__ + - ### Features + - Documentation updates for Application Auto Scaling + +## __Auto Scaling__ + - ### Features + - Amazon EC2 Auto Scaling now supports a new Spot allocation strategy "capacity-optimized" that fulfills your request using Spot Instance pools that are optimally chosen based on the available Spot capacity. + +# __2.7.21__ __2019-08-09__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert has added support for multi-DRM SPEKE with CMAF outputs, MP3 ingest, and options for improved video quality. + +## __AWS IoT__ + - ### Features + - This release adds Quality of Service (QoS) support for AWS IoT rules engine republish action. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fixed the issue where ByteArrayAsyncRequestBody can send duplicate requests when another request comes in at the same time the subscription completes. + - For APIs that support input event streams, set the `Content-Type` to `application/vnd.amazon.eventstream` on the request. + +## __Amazon GuardDuty__ + - ### Features + - New "evidence" field in the finding model to provide evidence information explaining why the finding has been triggered. Currently only threat-intelligence findings have this field. Some documentation updates. + +## __Amazon Lex Runtime Service__ + - ### Features + - Manage Amazon Lex session state using APIs on the client + +## __Amazon Redshift__ + - ### Features + - Add expectedNextSnapshotScheduleTime and expectedNextSnapshotScheduleTimeStatus to redshift cluster object. + +# __2.7.20__ __2019-08-08__ +## __AWS CodeBuild__ + - ### Features + - CodeBuild adds CloudFormation support for SourceCredential + +## __AWS Glue__ + - ### Features + - You can now use AWS Glue to find matching records across dataset even without identifiers to join on by using the new FindMatches ML Transform. Find related products, places, suppliers, customers, and more by teaching a custom machine learning transformation that you can use to identify matching matching records as part of your analysis, data cleaning, or master data management project by adding the FindMatches transformation to your Glue ETL Jobs. If your problem is more along the lines of deduplication, you can use the FindMatches in much the same way to identify customers who have signed up more than ones, products that have accidentally been added to your product catalog more than once, and so forth. Using the FindMatches MLTransform, you can teach a Transform your definition of a duplicate through examples, and it will use machine learning to identify other potential duplicates in your dataset. As with data integration, you can then use your new Transform in your deduplication projects by adding the FindMatches transformation to your Glue ETL Jobs. This release also contains additional APIs that support AWS Lake Formation. + +## __AWS Lake Formation__ + - ### Features + - Lake Formation: (New Service) AWS Lake Formation is a fully managed service that makes it easier for customers to build, secure and manage data lakes. AWS Lake Formation simplifies and automates many of the complex manual steps usually required to create data lakes including collecting, cleaning and cataloging data and securely making that data available for analytics and machine learning. + +## __AWS OpsWorks CM__ + - ### Features + - This release adds support for Chef Automate 2 specific engine attributes. + +# __2.7.19__ __2019-08-07__ +## __Amazon CloudWatch Application Insights__ + - ### Features + - CloudWatch Application Insights for .NET and SQL Server now provides integration with AWS Systems Manager OpsCenter. This integration allows you to view and resolve problems and operational issues detected for selected applications. + +# __2.7.18__ __2019-08-06__ +## __AWS Batch__ + - ### Features + - Documentation updates for AWS Batch + +# __2.7.17__ __2019-08-05__ +## __AWS DataSync__ + - ### Features + - Support VPC endpoints. + +## __AWS IoT__ + - ### Features + - In this release, AWS IoT Device Defender introduces audit mitigation actions that can be applied to audit findings to help mitigate security issues. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 now supports a new Spot allocation strategy "Capacity-optimized" that fulfills your request using Spot Instance pools that are optimally chosen based on the available Spot capacity. + +# __2.7.16__ __2019-08-02__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix the issue where the `content-length` set on the request is not honored for streaming operations. + +## __AWS Security Token Service__ + - ### Features + - Documentation updates for sts + # __2.7.15__ __2019-07-30__ ## __AWS Elemental MediaConvert__ - ### Features @@ -2764,5 +10348,5 @@ # __2.0.0-preview-1__ __2017-06-28__ ## __AWS SDK for Java v2__ - ### Features - - Initial release of the AWS SDK for Java v2. See our [blog post](https://aws.amazon.com/blogs/developer/aws-sdk-for-java-2-0-developer-preview) for information about this new major veresion. This release is considered a developer preview and is not intended for production use cases. + - Initial release of the AWS SDK for Java v2. See our [blog post](https://aws.amazon.com/blogs/developer/aws-sdk-for-java-2-0-developer-preview) for information about this new major version. This release is considered a developer preview and is not intended for production use cases. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1f040805e1bd..410da6297a96 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,7 +88,7 @@ Please keep the following in mind when considering a code contribution: Any code you submit will be released under this license. If you are contributing a large/substantial feature, you may be asked to sign a - [Contributor License Agreement (CLA)][cla]. + Contributor License Agreement (CLA). * For anything but very small or quick changes, you should always start by checking the [Issues][issues] page to see if the work is already being done by another person. @@ -123,7 +123,7 @@ checklist below: * [ ] If the change is related to an existing Bug Report or Feature Request, the issue number is referenced * [ ] A short description of the change added to - [CHANGELOG.md](./CHANGELOG.md). Adding a new entry can be accomplished by + [CHANGELOG.md](./CHANGELOG.md). Adding a new entry must be accomplished by running the `scripts/new-change` script and following the instructions. Commit the new file created by the script in `.changes/next-release` with your changes. @@ -165,7 +165,6 @@ when contributing to the SDK. [markdown]: https://guides.github.com/features/mastering-markdown/ [issues]: https://github.com/aws/aws-sdk-java-v2/issues [pull-requests]: https://github.com/aws/aws-sdk-java-v2/pulls -[cla]: https://github.com/aws/aws-cla [label-bug]: https://github.com/aws/aws-sdk-java-v2/labels/Bug [label-doc-issue]: https://github.com/aws/aws-sdk-java-v2/labels/Documentation%20Issue [label-feature-request]: https://github.com/aws/aws-sdk-java-v2/labels/Feature%20Request diff --git a/NOTICE.txt b/NOTICE.txt index 434630a6dbfc..e277e4c2c445 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ AWS SDK for Java 2.0 -Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. This product includes software developed by Amazon Technologies, Inc (http://www.amazon.com/). @@ -10,5 +10,7 @@ THIRD PARTY COMPONENTS This software includes third party software subject to the following copyrights: - XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. - PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. +- Apache Commons Lang - https://github.com/apache/commons-lang +- Netty Reactive Streams - https://github.com/playframework/netty-reactive-streams The licenses for these third party components are included in LICENSE.txt diff --git a/README.md b/README.md index 9c05544b2984..2a2c003cb3c2 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ artifact source. * [Sample Code](#sample-code) * [API Docs][docs-api] * [Developer Guide][docs-guide] ([source][docs-guide-source]) +* [Maven Archetypes](archetypes/README.md) * [Issues][sdk-issues] * [SDK Blog][blog] * [Giving Feedback](#giving-feedback) @@ -36,7 +37,7 @@ section of the developer guide. ## Using the SDK -The recommended way to use the AWS SDK for Java in your project is to consume it from Maven. +The recommended way to use the AWS SDK for Java in your project is to consume it from Maven Central. #### Importing the BOM #### @@ -48,7 +49,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.7.15 + 2.15.61 pom import @@ -82,12 +83,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.7.15 + 2.15.61 software.amazon.awssdk s3 - 2.7.15 + 2.15.61 ``` @@ -99,7 +100,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.7.15 + 2.15.61 ``` @@ -131,13 +132,19 @@ You can find sample code for v2 in the following places: * [aws-doc-sdk-examples] repo. * Integration tests in this repo. They are located in the `it` directory under each service module, eg: [s3-integration-tests] +## Maintenance and Support for SDK Major Versions +For information about maintenance and support for SDK major versions and their underlying dependencies, see the following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide: + +* [AWS SDKs and Tools Maintenance Policy][maintenance-policy] +* [AWS SDKs and Tools Version Support Matrix][version-matrix] + ## Giving Feedback -We need your help in making this SDK great. Please participate in the community and contribute to this effort by submitting issues, participating in discussion forums and submitting pull requests through the following channels. +We need your help in making this SDK great. Please participate in the community and contribute to this effort by submitting issues, participating in discussion forums and submitting pull requests through the following channels: -* Come join the AWS Java community chat on [Gitter][gitter]. -* Articulate your feature request or upvote existing ones on our [Issues][features] page. -* Submit [issues][sdk-issues]. -* Send feedback directly to the team at aws-java-sdk-v2-feedback@amazon.com. +* Submit [issues][sdk-issues] - this is the preferred channel to interact with our team +* Come join the AWS Java community chat on [Gitter][gitter] +* Articulate your feature request or upvote existing ones on our [Issues][features] page +* Send feedback directly to the team at aws-java-sdk-v2-feedback@amazon.com [aws-iam-credentials]: http://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/java-dg-roles.html [aws]: http://aws.amazon.com/ @@ -154,9 +161,11 @@ We need your help in making this SDK great. Please participate in the community [aws-java-sdk-bom]: https://github.com/aws/aws-sdk-java-v2/tree/master/bom [stack-overflow]: http://stackoverflow.com/questions/tagged/aws-java-sdk [gitter]: https://gitter.im/aws/aws-sdk-java-v2 -[features]: https://github.com/aws/aws-sdk-java-v2/issues?q=is%3Aopen+is%3Aissue+label%3A%22Feature+Request%22 +[features]: https://github.com/aws/aws-sdk-java-v2/issues?q=is%3Aopen+is%3Aissue+label%3A%22feature-request%22 [support-center]: https://console.aws.amazon.com/support/ [console]: https://console.aws.amazon.com [bom]: http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22software.amazon.awssdk%22%20AND%20a%3A%22bom%22 [aws-doc-sdk-examples]: https://github.com/awsdocs/aws-doc-sdk-examples/tree/master/javav2 [s3-integration-tests]: https://github.com/aws/aws-sdk-java-v2/tree/master/services/s3/src/it/java/software/amazon/awssdk/services/s3 +[maintenance-policy]: https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html +[version-matrix]: https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html diff --git a/archetypes/README.md b/archetypes/README.md new file mode 100755 index 000000000000..70980eb12890 --- /dev/null +++ b/archetypes/README.md @@ -0,0 +1,9 @@ +# Maven Archetypes for AWS SDK for Java 2.x + +## Description +This module contains maven archetypes for AWS Java SDK 2.x. + +## Archetypes + +- [archetype-lambda](archetype-lambda/README.md) - a lambda function template using AWS Java SDK 2.x + diff --git a/archetypes/archetype-lambda/README.md b/archetypes/archetype-lambda/README.md new file mode 100755 index 000000000000..5702a787b739 --- /dev/null +++ b/archetypes/archetype-lambda/README.md @@ -0,0 +1,64 @@ +# Maven Archetype for lambda function using AWS SDK for Java 2.x + +## Description +This is an Apache Maven Archetype to create a lambda function template using [AWS Java SDK 2.x][aws-java-sdk-v2]. The generated template +has the optimized configurations and follows the best practices to reduce start up time. + +## Usage + +You can use `mvn archetype:generate` to generate a project using this archetype. See [maven archetype usage guidance][maven-archetype-usage] for more information. + +- Interactive mode + +``` +mvn archetype:generate \ + -DarchetypeGroupId=software.amazon.awssdk \ + -DarchetypeArtifactId=archetype-lambda \ + -DarchetypeVersion=2.x +``` + +- Batch mode + +``` +mvn archetype:generate \ + -DarchetypeGroupId=software.amazon.awssdk \ + -DarchetypeArtifactId=archetype-lambda \ + -DarchetypeVersion=2.x \ + -DgroupId=com.test \ + -DartifactId=sample-project \ + -Dservice=s3 \ + -DinteractiveMode=false +``` + +### Parameters + +Parameter Name | Default Value | Description +---|---|--- +`service` (required) | n/a | Specifies the service client to be used in the lambda function, eg: s3, dynamodb. You can find available services [here][java-sdk-v2-services]. +`groupId`(required) | n/a | Specifies the group ID of the project +`artifactId`(required) | n/a | Specifies the artifact ID of the project +`region` | n/a | Specifies the region to be set for the SDK client in the application +`httpClient` | url-connection-client | Specifies the http client to be used by the SDK client. Available options are `url-connection-client` (sync), `apache-client` (sync), `netty-nio-client` (async). See [http clients][sdk-http-clients] +`handlerClassName` | `"App"`| Specifies the class name of the handler, which will be used as the lambda function name. It should be camel case. +`javaSdkVersion` | Same version as the archetype version | Specifies the version of the AWS Java SDK 2.x to be used +`version` | 1.0-SNAPSHOT | Specifies the version of the project +`package` | ${groupId} | Specifies the package name for the classes + +### Deployment + +To deploy the lambda function, you can use [SAM CLI][sam-cli]. The generated project contains a default [SAM template][sam-template] file `template.yaml` where you can +configure different properties of your lambda function such as memory size and timeout. + +``` +sam deploy --guided +``` + +Please refer to [deploying lambda apps][deploying-lambda-apps] for more info. + +[aws-java-sdk-v2]: https://github.com/aws/aws-sdk-java-v2 +[java-sdk-v2-services]: https://github.com/aws/aws-sdk-java-v2/tree/master/services +[sdk-http-clients]: https://github.com/aws/aws-sdk-java-v2/tree/master/http-clients +[deploying-lambda-apps]: https://docs.aws.amazon.com/lambda/latest/dg/deploying-lambda-apps.html +[sam-cli]:https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-getting-started.html +[maven-archetype-usage]: https://maven.apache.org/archetype/maven-archetype-plugin/usage.html +[sam-template]: https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml new file mode 100644 index 000000000000..72676fa8298d --- /dev/null +++ b/archetypes/archetype-lambda/pom.xml @@ -0,0 +1,174 @@ + + + + + + archetypes + software.amazon.awssdk + 2.15.62-SNAPSHOT + + 4.0.0 + archetype-lambda + maven-archetype + AWS Java SDK :: Archetype Lambda + + The AWS SDK for Java - Maven archetype for Java lambda function using AWS Java SDK 2.x + + + + 3.2.0 + 3.2.0 + 1.6.0 + + + + + + software.amazon.awssdk + aws-sdk-java + ${awsjavasdk.version} + test + + + + + + + src/main/resources + true + + META-INF/maven/archetype-metadata.xml + + + + src/main/resources + false + + META-INF/maven/archetype-metadata.xml + + + + + + org.apache.maven.archetype + archetype-packaging + ${maven.archetype.version} + + + + + + exec-maven-plugin + org.codehaus.mojo + ${exec-maven-plugin.version} + + + map-service-to-client-prefix + generate-resources + + exec + + + python + ${basedir}/src/main/resources/map-service-to-client-prefix + + + + + + maven-archetype-plugin + ${maven.archetype.version} + + true + true + ${skip.unit.tests} + + + + integration-test + verify + + integration-test + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + ${maven-dependency-plugin.version} + + true + + + + + + maven-resources-plugin + ${maven.resource.plugin.version} + + + copy-resources + process-classes + + copy-resources + + + ${basedir}/target/classes/archetype-resources + UTF-8 + + + ${basedir}/target/classes/ + + global.vm + serviceMapping.vm + + + + + + + copy-resources-to-sub-folder + process-classes + + copy-resources + + + ${basedir}/target/classes/archetype-resources/src/main/java + UTF-8 + + + ${basedir}/target/classes/ + + global.vm + serviceMapping.vm + + + + + + + + + + \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/main/resources/META-INF/maven/archetype-metadata.xml b/archetypes/archetype-lambda/src/main/resources/META-INF/maven/archetype-metadata.xml new file mode 100644 index 000000000000..82f7c91a528c --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/META-INF/maven/archetype-metadata.xml @@ -0,0 +1,50 @@ + + + + + src/main/java + + **/*.java + + + + src/test/java + + **/*.java + + + + + + .gitignore + template.yaml + README.md + + + + + + App + + + ${project.version} + \d+\.\d+.\d+ + + + + + url-connection-client + (url-connection-client|apache-client|netty-nio-client) + + + null + ^\w+-(\w+-)+\d+$ + + + + ${netty-open-ssl-version} + + + diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/.gitignore b/archetypes/archetype-lambda/src/main/resources/archetype-resources/.gitignore new file mode 100644 index 000000000000..a500caff54ac --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/.gitignore @@ -0,0 +1,17 @@ +# Eclipse +.classpath +.project +.settings/ + +# Intellij +.idea/ +*.iml +*.iws + +# Mac +.DS_Store + +# Maven +target/ + +**/dependency-reduced-pom.xml diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/README.md b/archetypes/archetype-lambda/src/main/resources/archetype-resources/README.md new file mode 100644 index 000000000000..f8effbaa17ad --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/README.md @@ -0,0 +1,45 @@ +#[[#]]# ${handlerClassName} + +This project contains an AWS Lambda maven application with [AWS Java SDK 2.x](https://github.com/aws/aws-sdk-java-v2) dependencies. + +#[[##]]# Prerequisites +- Java 1.8+ +- Apache Maven +- [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +- Docker + +#[[##]]# Development + +The generated function handler class just returns the input. The configured AWS Java SDK client is created in `DependencyFactory` class and you can +add the code to interact with the SDK client based on your use case. + +#[[####]]# Building the project +``` +mvn clean install +``` + +#[[####]]# Testing it locally +``` +sam local invoke +``` + +#[[####]]# Adding more SDK clients +To add more service clients, you need to add the specific services modules in `pom.xml` and create the clients in `DependencyFactory` following the same +pattern as ${serviceClientVariable}Client. + +#[[##]]# Deployment + +The generated project contains a default [SAM template](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html) file `template.yaml` where you can +configure different properties of your lambda function such as memory size and timeout. You might also need to add specific policies to the lambda function +so that it can access other AWS resources. + +To deploy the application, you can run the following command: + +``` +sam deploy --guided +``` + +See [Deploying Serverless Applications](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-deploying.html) for more info. + + + diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/pom.xml b/archetypes/archetype-lambda/src/main/resources/archetype-resources/pom.xml new file mode 100644 index 000000000000..b51f5f667e6a --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/pom.xml @@ -0,0 +1,117 @@ +#parse ( "global.vm") + + + 4.0.0 + + ${groupId} + ${artifactId} + ${version} + jar + + UTF-8 + 1.8 + 1.8 + 3.2.1 + 3.6.1 + 1.6.0 + ${javaSdkVersion} + 1.2.0 + 5.4.2 +#if( $httpClient == 'netty-nio-client') + ${nettyOpenSslVersion} +#end + + + + + + software.amazon.awssdk + bom + ${aws.java.sdk.version} + pom + import + + + + + + + software.amazon.awssdk + ${moduleName} + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awssdk + apache-client + + + + + + software.amazon.awssdk + ${httpClient} + + +#if( $httpClient == 'netty-nio-client') + + + io.netty + netty-tcnative-boringssl-static + ${netty.openssl.version} + +#end + + com.amazonaws + aws-lambda-java-core + ${aws.lambda.java.version} + + + + + org.junit.jupiter + junit-jupiter + ${junit5.version} + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${maven.compiler.plugin.version} + + + org.apache.maven.plugins + maven-shade-plugin + ${maven.shade.plugin.version} + + false + ${artifactId} + + + *:* + + + module-info.class + + + + + + + package + + shade + + + + + + + diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/DependencyFactory.java b/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/DependencyFactory.java new file mode 100644 index 000000000000..947a0c4d4d03 --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/DependencyFactory.java @@ -0,0 +1,34 @@ +#parse ( "global.vm") + +package ${package}; + +import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +#if ($region == 'null') +import software.amazon.awssdk.core.SdkSystemSetting; +#end +import software.amazon.awssdk.http.${httpClientPackageName}; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.${servicePackage}.${serviceClientClassName}; + +/** + * The module containing all dependencies required by the {@link ${handlerClassName}}. + */ +public class DependencyFactory { + + private DependencyFactory() {} + + /** + * @return an instance of ${serviceClientClassName} + */ + public static ${serviceClientClassName} ${serviceClientVariable}Client() { + return ${serviceClientClassName}.builder() + .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) +#if ($region == 'null') + .region(Region.of(System.getenv(SdkSystemSetting.AWS_REGION.environmentVariable()))) +#else + .region(Region.${regionEnum}) +#end + .httpClientBuilder(${httpClientClassName}.builder()) + .build(); + } +} diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/__handlerClassName__.java b/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/__handlerClassName__.java new file mode 100644 index 000000000000..8a66c4aa925d --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/__handlerClassName__.java @@ -0,0 +1,29 @@ +#parse ( "global.vm") +package ${package}; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import software.amazon.awssdk.services.${servicePackage}.${serviceClientClassName}; + +/** + * Lambda function entry point. You can change to use other pojo type or implement + * a different RequestHandler. + * + * @see Lambda Java Handler for more information + */ +public class ${handlerClassName} implements RequestHandler { + private final ${serviceClientClassName} ${serviceClientVariable}Client; + + public ${handlerClassName}() { + // Initialize the SDK client outside of the handler method so that it can be reused for subsequent invocations. + // It is initialized when the class is loaded. + ${serviceClientVariable}Client = DependencyFactory.${serviceClientVariable}Client(); + // Consider invoking a simple api here to pre-warm up the application, eg: dynamodb#listTables + } + + @Override + public Object handleRequest(final Object input, final Context context) { + // TODO: invoking the api call using ${serviceClientVariable}Client. + return input; + } +} diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/test/java/__handlerClassName__Test.java b/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/test/java/__handlerClassName__Test.java new file mode 100644 index 000000000000..beecf7a3c67a --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/test/java/__handlerClassName__Test.java @@ -0,0 +1,18 @@ +#set( $symbol_pound = '#' ) +#set( $symbol_dollar = '$' ) +#set( $symbol_escape = '\' ) +package ${package}; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +public class ${handlerClassName}Test { + + @Test + public void handleRequest_shouldReturnConstantValue() { + ${handlerClassName} function = new ${handlerClassName}(); + Object result = function.handleRequest("echo", null); + assertEquals("echo", result); + } +} diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/template.yaml b/archetypes/archetype-lambda/src/main/resources/archetype-resources/template.yaml new file mode 100644 index 000000000000..5d6bb1592002 --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/template.yaml @@ -0,0 +1,19 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Resources: + # See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html + # for more info to see how to tune the lambda function configs based on your use case. + ${handlerClassName}Function: + Type: AWS::Serverless::Function + Properties: + Runtime: java8 + Handler: ${package}.${handlerClassName}::handleRequest + Timeout: 60 + MemorySize: 512 + CodeUri: ./target/${artifactId}.jar + # Attach policies here to give the function permission to access other AWS resources if needed + # See: https://github.com/awslabs/serverless-application-model/blob/master/docs/policy_templates.rst + # eg: + #Policies: + # - S3ReadPolicy: + # BucketName: test-bucket \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/main/resources/global.vm b/archetypes/archetype-lambda/src/main/resources/global.vm new file mode 100644 index 000000000000..610b556958ff --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/global.vm @@ -0,0 +1,36 @@ +## global variables used by the project +#parse ("serviceMapping.vm") +#set( $symbol_pound = '#' ) +#set( $symbol_dollar = '$' ) +#set( $symbol_escape = '\' ) +## customize wafregional and dynamodbstreams because they reside in waf and dynamodb modules respectively. +#if ($service == 'dynamodbstreams') + #set ($moduleName = 'dynamodb') + #set ($serviceClientPrefix = 'DynamoDbStreams') + #set ($servicePackage = 'dynamodb.streams') +#elseif ($service == 'wafregional') + #set ($moduleName = 'waf') + #set ($serviceClientPrefix = 'WafRegional') + #set ($servicePackage = 'waf.regional') +#else +## map the serviceId to service package and service client class name + #set ( $servicePackage = $service) + #set ($moduleName = $service) + #set ( $serviceClientPrefix = $serviceMapping[$service]) +#end +#set ( $serviceClientVariable = $serviceClientPrefix.substring(0,1).toLowerCase() + $serviceClientPrefix.substring(1)) +#set( $regionEnum = $region.replace("-", "_").toUpperCase() ) +## map the client module name to the client class name and pacakge name +#if( $httpClient == 'url-connection-client') + #set ($httpClientClassName = 'UrlConnectionHttpClient') + #set ($httpClientPackageName = 'urlconnection.' + $httpClientClassName) + #set ($serviceClientClassName = $serviceClientPrefix + 'Client') +#elseif ( $httpClient == 'apache-client') + #set ($httpClientClassName = 'ApacheHttpClient') + #set ($httpClientPackageName = 'apache.' + $httpClientClassName) + #set ($serviceClientClassName = $serviceClientPrefix + 'Client') +#elseif ( $httpClient == 'netty-nio-client') + #set ($httpClientClassName = 'NettyNioAsyncHttpClient') + #set ($httpClientPackageName = 'nio.netty.' + $httpClientClassName) + #set ($serviceClientClassName = $serviceClientPrefix + 'AsyncClient') +#end diff --git a/archetypes/archetype-lambda/src/main/resources/map-service-to-client-prefix b/archetypes/archetype-lambda/src/main/resources/map-service-to-client-prefix new file mode 100755 index 000000000000..8574844e7283 --- /dev/null +++ b/archetypes/archetype-lambda/src/main/resources/map-service-to-client-prefix @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import json +import os +import string + +MAPPING_FILE_NAME = 'serviceMapping.vm' +RESOURCES_ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__))) +ARCHETYPE_LAMBDA_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(os.path.join(RESOURCES_ROOT_DIR, "../")))) +SERVICE_DIR = os.path.join( + os.path.dirname(os.path.dirname(os.path.abspath(os.path.join(__file__, "../../../../")))), + 'services' +) + +def load_all_service_modules(): + service_mapping = {} + for f in [f for f in os.listdir(SERVICE_DIR) if os.path.isdir(os.path.join(SERVICE_DIR, f)) & os.path.exists(os.path.join(SERVICE_DIR, f, 'target'))]: + for s in [s for s in os.listdir(os.path.join(SERVICE_DIR, f, 'target', 'generated-sources/sdk/software/amazon/awssdk/services', f)) if s.endswith('AsyncClient.java') & s.startswith('Default')]: + service_mapping[f] = find_client_prefix(s) + return service_mapping + +def find_client_prefix(d): + index = d.find('AsyncClient.java') + return d[7:index] + +def write_to_vm_file(service_mapping): + target = os.path.join(ARCHETYPE_LAMBDA_ROOT_DIR, 'target') + + if not os.path.exists(target): + os.mkdir(target) + + target = os.path.join(ARCHETYPE_LAMBDA_ROOT_DIR, 'target', 'classes') + + if not os.path.exists(target): + os.mkdir(target) + + filename = os.path.join(target, MAPPING_FILE_NAME) + + with open(filename, 'w') as f: + f.write('#set ( $serviceMapping =') + f.write(json.dumps(service_mapping)) + f.write(')') + return filename + +def main(): + service_mapping = load_all_service_modules() + write_to_vm_file(service_mapping) + +if __name__ == '__main__': + main() diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/archetype.properties b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/archetype.properties new file mode 100644 index 000000000000..63a575c729e8 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/archetype.properties @@ -0,0 +1,10 @@ +groupId=software.amazonaws.test +artifactId=test-apache-artifact +version=1.0-SNAPSHOT +package=software.amazonaws.test +service=dynamodb +httpClient=apache-client +handlerClassName=MyApacheFunction +region=null +javaSdkVersion=2.11.0 +nettyOpenSslVersion=2.0.29.Final \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/goal.txt b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/goal.txt new file mode 100644 index 000000000000..4a1a71d3364c --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/goal.txt @@ -0,0 +1 @@ +verify \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/README.md b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/README.md new file mode 100644 index 000000000000..30bf8a316d7e --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/README.md @@ -0,0 +1,45 @@ +# MyApacheFunction + +This project contains an AWS Lambda maven application with [AWS Java SDK 2.x](https://github.com/aws/aws-sdk-java-v2) dependencies. + +## Prerequisites +- Java 1.8+ +- Apache Maven +- [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +- Docker + +## Development + +The generated function handler class just returns the input. The configured AWS Java SDK client is created in `DependencyFactory` class and you can +add the code to interact with the SDK client based on your use case. + +#### Building the project +``` +mvn clean install +``` + +#### Testing it locally +``` +sam local invoke +``` + +#### Adding more SDK clients +To add more service clients, you need to add the specific services modules in `pom.xml` and create the clients in `DependencyFactory` following the same +pattern as dynamoDbClient. + +## Deployment + +The generated project contains a default [SAM template](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html) file `template.yaml` where you can +configure different properties of your lambda function such as memory size and timeout. You might also need to add specific policies to the lambda function +so that it can access other AWS resources. + +To deploy the application, you can run the following command: + +``` +sam deploy --guided +``` + +See [Deploying Serverless Applications](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-deploying.html) for more info. + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/pom.xml new file mode 100644 index 000000000000..d34684ea4c6d --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/pom.xml @@ -0,0 +1,104 @@ + + + 4.0.0 + + software.amazonaws.test + test-apache-artifact + 1.0-SNAPSHOT + jar + + UTF-8 + 1.8 + 1.8 + 3.2.1 + 3.6.1 + 1.6.0 + 2.11.0 + 1.2.0 + 5.4.2 + + + + + + software.amazon.awssdk + bom + ${aws.java.sdk.version} + pom + import + + + + + + + software.amazon.awssdk + dynamodb + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awssdk + apache-client + + + + + + software.amazon.awssdk + apache-client + + + + com.amazonaws + aws-lambda-java-core + ${aws.lambda.java.version} + + + + + org.junit.jupiter + junit-jupiter + ${junit5.version} + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${maven.compiler.plugin.version} + + + org.apache.maven.plugins + maven-shade-plugin + ${maven.shade.plugin.version} + + false + test-apache-artifact + + + *:* + + + module-info.class + + + + + + + package + + shade + + + + + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java new file mode 100644 index 000000000000..f79a15985d7e --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java @@ -0,0 +1,27 @@ + +package software.amazonaws.test; + +import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * The module containing all dependencies required by the {@link MyApacheFunction}. + */ +public class DependencyFactory { + + private DependencyFactory() {} + + /** + * @return an instance of DynamoDbClient + */ + public static DynamoDbClient dynamoDbClient() { + return DynamoDbClient.builder() + .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) + .region(Region.of(System.getenv(SdkSystemSetting.AWS_REGION.environmentVariable()))) + .httpClientBuilder(ApacheHttpClient.builder()) + .build(); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/MyApacheFunction.java b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/MyApacheFunction.java new file mode 100644 index 000000000000..8f9f860811c6 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/MyApacheFunction.java @@ -0,0 +1,28 @@ +package software.amazonaws.test; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Lambda function entry point. You can change to use other pojo type or implement + * a different RequestHandler. + * + * @see Lambda Java Handler for more information + */ +public class MyApacheFunction implements RequestHandler { + private final DynamoDbClient dynamoDbClient; + + public MyApacheFunction() { + // Initialize the SDK client outside of the handler method so that it can be reused for subsequent invocations. + // It is initialized when the class is loaded. + dynamoDbClient = DependencyFactory.dynamoDbClient(); + // Consider invoking a simple api here to pre-warm up the application, eg: dynamodb#listTables + } + + @Override + public Object handleRequest(final Object input, final Context context) { + // TODO: invoking the api call using dynamoDbClient. + return input; + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/test/java/software/amazonaws/test/MyApacheFunctionTest.java b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/test/java/software/amazonaws/test/MyApacheFunctionTest.java new file mode 100644 index 000000000000..b55287348392 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/test/java/software/amazonaws/test/MyApacheFunctionTest.java @@ -0,0 +1,15 @@ +package software.amazonaws.test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +public class MyApacheFunctionTest { + + @Test + public void handleRequest_shouldReturnConstantValue() { + MyApacheFunction function = new MyApacheFunction(); + Object result = function.handleRequest("echo", null); + assertEquals("echo", result); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/template.yaml new file mode 100644 index 000000000000..513c0aed185b --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/template.yaml @@ -0,0 +1,19 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Resources: + # See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html + # for more info to see how to tune the lambda function configs based on your use case. + MyApacheFunctionFunction: + Type: AWS::Serverless::Function + Properties: + Runtime: java8 + Handler: software.amazonaws.test.MyApacheFunction::handleRequest + Timeout: 60 + MemorySize: 512 + CodeUri: ./target/test-apache-artifact.jar + # Attach policies here to give the function permission to access other AWS resources if needed + # See: https://github.com/awslabs/serverless-application-model/blob/master/docs/policy_templates.rst + # eg: + #Policies: + # - S3ReadPolicy: + # BucketName: test-bucket \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/archetype.properties b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/archetype.properties new file mode 100644 index 000000000000..148e110f2ee0 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/archetype.properties @@ -0,0 +1,25 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +groupId=software.amazonaws.test +artifactId=test-dynamodbstreams-artifact +version=1.0-SNAPSHOT +package=software.amazonaws.test +service=dynamodbstreams +httpClient=apache-client +handlerClassName=MyDynamoDbStreamsFunction +region=ap-southeast-1 +javaSdkVersion=2.11.0 +nettyOpenSslVersion=2.0.29.Final \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/goal.txt b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/goal.txt new file mode 100644 index 000000000000..4a1a71d3364c --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/goal.txt @@ -0,0 +1 @@ +verify \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/README.md b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/README.md new file mode 100644 index 000000000000..2fc00d2e4ac0 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/README.md @@ -0,0 +1,45 @@ +# MyDynamoDbStreamsFunction + +This project contains an AWS Lambda maven application with [AWS Java SDK 2.x](https://github.com/aws/aws-sdk-java-v2) dependencies. + +## Prerequisites +- Java 1.8+ +- Apache Maven +- [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +- Docker + +## Development + +The generated function handler class just returns the input. The configured AWS Java SDK client is created in `DependencyFactory` class and you can +add the code to interact with the SDK client based on your use case. + +#### Building the project +``` +mvn clean install +``` + +#### Testing it locally +``` +sam local invoke +``` + +#### Adding more SDK clients +To add more service clients, you need to add the specific services modules in `pom.xml` and create the clients in `DependencyFactory` following the same +pattern as dynamoDbStreamsClient. + +## Deployment + +The generated project contains a default [SAM template](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html) file `template.yaml` where you can +configure different properties of your lambda function such as memory size and timeout. You might also need to add specific policies to the lambda function +so that it can access other AWS resources. + +To deploy the application, you can run the following command: + +``` +sam deploy --guided +``` + +See [Deploying Serverless Applications](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-deploying.html) for more info. + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/pom.xml new file mode 100644 index 000000000000..e434477f8aea --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/pom.xml @@ -0,0 +1,104 @@ + + + 4.0.0 + + software.amazonaws.test + test-dynamodbstreams-artifact + 1.0-SNAPSHOT + jar + + UTF-8 + 1.8 + 1.8 + 3.2.1 + 3.6.1 + 1.6.0 + 2.11.0 + 1.2.0 + 5.4.2 + + + + + + software.amazon.awssdk + bom + ${aws.java.sdk.version} + pom + import + + + + + + + software.amazon.awssdk + dynamodb + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awssdk + apache-client + + + + + + software.amazon.awssdk + apache-client + + + + com.amazonaws + aws-lambda-java-core + ${aws.lambda.java.version} + + + + + org.junit.jupiter + junit-jupiter + ${junit5.version} + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${maven.compiler.plugin.version} + + + org.apache.maven.plugins + maven-shade-plugin + ${maven.shade.plugin.version} + + false + test-dynamodbstreams-artifact + + + *:* + + + module-info.class + + + + + + + package + + shade + + + + + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java new file mode 100644 index 000000000000..b8bdd17acc85 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java @@ -0,0 +1,26 @@ + +package software.amazonaws.test; + +import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.dynamodb.streams.DynamoDbStreamsClient; + +/** + * The module containing all dependencies required by the {@link MyDynamoDbStreamsFunction}. + */ +public class DependencyFactory { + + private DependencyFactory() {} + + /** + * @return an instance of DynamoDbStreamsClient + */ + public static DynamoDbStreamsClient dynamoDbStreamsClient() { + return DynamoDbStreamsClient.builder() + .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) + .region(Region.AP_SOUTHEAST_1) + .httpClientBuilder(ApacheHttpClient.builder()) + .build(); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/main/java/software/amazonaws/test/MyDynamoDbStreamsFunction.java b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/main/java/software/amazonaws/test/MyDynamoDbStreamsFunction.java new file mode 100644 index 000000000000..2722fbfdba68 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/main/java/software/amazonaws/test/MyDynamoDbStreamsFunction.java @@ -0,0 +1,28 @@ +package software.amazonaws.test; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import software.amazon.awssdk.services.dynamodb.streams.DynamoDbStreamsClient; + +/** + * Lambda function entry point. You can change to use other pojo type or implement + * a different RequestHandler. + * + * @see Lambda Java Handler for more information + */ +public class MyDynamoDbStreamsFunction implements RequestHandler { + private final DynamoDbStreamsClient dynamoDbStreamsClient; + + public MyDynamoDbStreamsFunction() { + // Initialize the SDK client outside of the handler method so that it can be reused for subsequent invocations. + // It is initialized when the class is loaded. + dynamoDbStreamsClient = DependencyFactory.dynamoDbStreamsClient(); + // Consider invoking a simple api here to pre-warm up the application, eg: dynamodb#listTables + } + + @Override + public Object handleRequest(final Object input, final Context context) { + // TODO: invoking the api call using dynamoDbStreamsClient. + return input; + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/test/java/software/amazonaws/test/MyDynamoDbStreamsFunctionTest.java b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/test/java/software/amazonaws/test/MyDynamoDbStreamsFunctionTest.java new file mode 100644 index 000000000000..7553684cf11a --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/src/test/java/software/amazonaws/test/MyDynamoDbStreamsFunctionTest.java @@ -0,0 +1,15 @@ +package software.amazonaws.test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +public class MyDynamoDbStreamsFunctionTest { + + @Test + public void handleRequest_shouldReturnConstantValue() { + MyDynamoDbStreamsFunction function = new MyDynamoDbStreamsFunction(); + Object result = function.handleRequest("echo", null); + assertEquals("echo", result); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/template.yaml new file mode 100644 index 000000000000..797f24dc0751 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/template.yaml @@ -0,0 +1,19 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Resources: + # See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html + # for more info to see how to tune the lambda function configs based on your use case. + MyDynamoDbStreamsFunctionFunction: + Type: AWS::Serverless::Function + Properties: + Runtime: java8 + Handler: software.amazonaws.test.MyDynamoDbStreamsFunction::handleRequest + Timeout: 60 + MemorySize: 512 + CodeUri: ./target/test-dynamodbstreams-artifact.jar + # Attach policies here to give the function permission to access other AWS resources if needed + # See: https://github.com/awslabs/serverless-application-model/blob/master/docs/policy_templates.rst + # eg: + #Policies: + # - S3ReadPolicy: + # BucketName: test-bucket \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/archetype.properties b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/archetype.properties new file mode 100644 index 000000000000..b5ef44cbd770 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/archetype.properties @@ -0,0 +1,11 @@ +groupId=software.amazonaws.test +artifactId=test-netty-artifact +version=1.0-SNAPSHOT +package=software.amazonaws.test +service=kinesis +httpClient=netty-nio-client +handlerClassName=MyNettyFunction +region=us-east-1 +javaSdkVersion=2.11.0 +nettyOpenSslVersion=2.0.29.Final + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/goal.txt b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/goal.txt new file mode 100644 index 000000000000..4a1a71d3364c --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/goal.txt @@ -0,0 +1 @@ +verify \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/README.md b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/README.md new file mode 100644 index 000000000000..e265f49bbdc4 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/README.md @@ -0,0 +1,45 @@ +# MyNettyFunction + +This project contains an AWS Lambda maven application with [AWS Java SDK 2.x](https://github.com/aws/aws-sdk-java-v2) dependencies. + +## Prerequisites +- Java 1.8+ +- Apache Maven +- [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +- Docker + +## Development + +The generated function handler class just returns the input. The configured AWS Java SDK client is created in `DependencyFactory` class and you can +add the code to interact with the SDK client based on your use case. + +#### Building the project +``` +mvn clean install +``` + +#### Testing it locally +``` +sam local invoke +``` + +#### Adding more SDK clients +To add more service clients, you need to add the specific services modules in `pom.xml` and create the clients in `DependencyFactory` following the same +pattern as kinesisClient. + +## Deployment + +The generated project contains a default [SAM template](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html) file `template.yaml` where you can +configure different properties of your lambda function such as memory size and timeout. You might also need to add specific policies to the lambda function +so that it can access other AWS resources. + +To deploy the application, you can run the following command: + +``` +sam deploy --guided +``` + +See [Deploying Serverless Applications](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-deploying.html) for more info. + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/pom.xml new file mode 100644 index 000000000000..177dfbb0440b --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/pom.xml @@ -0,0 +1,112 @@ + + + 4.0.0 + + software.amazonaws.test + test-netty-artifact + 1.0-SNAPSHOT + jar + + UTF-8 + 1.8 + 1.8 + 3.2.1 + 3.6.1 + 1.6.0 + 2.11.0 + 1.2.0 + 5.4.2 + 2.0.29.Final + + + + + + software.amazon.awssdk + bom + ${aws.java.sdk.version} + pom + import + + + + + + + software.amazon.awssdk + kinesis + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awssdk + apache-client + + + + + + software.amazon.awssdk + netty-nio-client + + + + + io.netty + netty-tcnative-boringssl-static + ${netty.openssl.version} + + + com.amazonaws + aws-lambda-java-core + ${aws.lambda.java.version} + + + + + org.junit.jupiter + junit-jupiter + ${junit5.version} + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${maven.compiler.plugin.version} + + + org.apache.maven.plugins + maven-shade-plugin + ${maven.shade.plugin.version} + + false + test-netty-artifact + + + *:* + + + module-info.class + + + + + + + package + + shade + + + + + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java new file mode 100644 index 000000000000..ab3f3792bb7e --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java @@ -0,0 +1,26 @@ + +package software.amazonaws.test; + +import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; + +/** + * The module containing all dependencies required by the {@link MyNettyFunction}. + */ +public class DependencyFactory { + + private DependencyFactory() {} + + /** + * @return an instance of KinesisAsyncClient + */ + public static KinesisAsyncClient kinesisClient() { + return KinesisAsyncClient.builder() + .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) + .region(Region.US_EAST_1) + .httpClientBuilder(NettyNioAsyncHttpClient.builder()) + .build(); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/main/java/software/amazonaws/test/MyNettyFunction.java b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/main/java/software/amazonaws/test/MyNettyFunction.java new file mode 100644 index 000000000000..0016c30c56e0 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/main/java/software/amazonaws/test/MyNettyFunction.java @@ -0,0 +1,28 @@ +package software.amazonaws.test; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; + +/** + * Lambda function entry point. You can change to use other pojo type or implement + * a different RequestHandler. + * + * @see Lambda Java Handler for more information + */ +public class MyNettyFunction implements RequestHandler { + private final KinesisAsyncClient kinesisClient; + + public MyNettyFunction() { + // Initialize the SDK client outside of the handler method so that it can be reused for subsequent invocations. + // It is initialized when the class is loaded. + kinesisClient = DependencyFactory.kinesisClient(); + // Consider invoking a simple api here to pre-warm up the application, eg: dynamodb#listTables + } + + @Override + public Object handleRequest(final Object input, final Context context) { + // TODO: invoking the api call using kinesisClient. + return input; + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/test/java/software/amazonaws/test/MyNettyFunctionTest.java b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/test/java/software/amazonaws/test/MyNettyFunctionTest.java new file mode 100644 index 000000000000..46e9272647aa --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/src/test/java/software/amazonaws/test/MyNettyFunctionTest.java @@ -0,0 +1,15 @@ +package software.amazonaws.test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +public class MyNettyFunctionTest { + + @Test + public void handleRequest_shouldReturnConstantValue() { + MyNettyFunction function = new MyNettyFunction(); + Object result = function.handleRequest("echo", null); + assertEquals("echo", result); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/template.yaml new file mode 100644 index 000000000000..e674e2599da3 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/template.yaml @@ -0,0 +1,19 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Resources: + # See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html + # for more info to see how to tune the lambda function configs based on your use case. + MyNettyFunctionFunction: + Type: AWS::Serverless::Function + Properties: + Runtime: java8 + Handler: software.amazonaws.test.MyNettyFunction::handleRequest + Timeout: 60 + MemorySize: 512 + CodeUri: ./target/test-netty-artifact.jar + # Attach policies here to give the function permission to access other AWS resources if needed + # See: https://github.com/awslabs/serverless-application-model/blob/master/docs/policy_templates.rst + # eg: + #Policies: + # - S3ReadPolicy: + # BucketName: test-bucket \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/archetype.properties b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/archetype.properties new file mode 100644 index 000000000000..4b987f49a274 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/archetype.properties @@ -0,0 +1,10 @@ +groupId=software.amazonaws.test +artifactId=test-url-connection-client-artifact +version=1.0-SNAPSHOT +package=software.amazonaws.test +service=s3 +httpClient=url-connection-client +handlerClassName=App +region=us-west-2 +javaSdkVersion=2.11.0 +nettyOpenSslVersion=2.0.29.Final \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/goal.txt b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/goal.txt new file mode 100644 index 000000000000..4a1a71d3364c --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/goal.txt @@ -0,0 +1 @@ +verify \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/README.md b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/README.md new file mode 100644 index 000000000000..6b17a7840213 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/README.md @@ -0,0 +1,45 @@ +# App + +This project contains an AWS Lambda maven application with [AWS Java SDK 2.x](https://github.com/aws/aws-sdk-java-v2) dependencies. + +## Prerequisites +- Java 1.8+ +- Apache Maven +- [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +- Docker + +## Development + +The generated function handler class just returns the input. The configured AWS Java SDK client is created in `DependencyFactory` class and you can +add the code to interact with the SDK client based on your use case. + +#### Building the project +``` +mvn clean install +``` + +#### Testing it locally +``` +sam local invoke +``` + +#### Adding more SDK clients +To add more service clients, you need to add the specific services modules in `pom.xml` and create the clients in `DependencyFactory` following the same +pattern as s3Client. + +## Deployment + +The generated project contains a default [SAM template](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html) file `template.yaml` where you can +configure different properties of your lambda function such as memory size and timeout. You might also need to add specific policies to the lambda function +so that it can access other AWS resources. + +To deploy the application, you can run the following command: + +``` +sam deploy --guided +``` + +See [Deploying Serverless Applications](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-deploying.html) for more info. + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/pom.xml new file mode 100644 index 000000000000..510579f12687 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/pom.xml @@ -0,0 +1,104 @@ + + + 4.0.0 + + software.amazonaws.test + test-url-connection-client-artifact + 1.0-SNAPSHOT + jar + + UTF-8 + 1.8 + 1.8 + 3.2.1 + 3.6.1 + 1.6.0 + 2.11.0 + 1.2.0 + 5.4.2 + + + + + + software.amazon.awssdk + bom + ${aws.java.sdk.version} + pom + import + + + + + + + software.amazon.awssdk + s3 + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awssdk + apache-client + + + + + + software.amazon.awssdk + url-connection-client + + + + com.amazonaws + aws-lambda-java-core + ${aws.lambda.java.version} + + + + + org.junit.jupiter + junit-jupiter + ${junit5.version} + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${maven.compiler.plugin.version} + + + org.apache.maven.plugins + maven-shade-plugin + ${maven.shade.plugin.version} + + false + test-url-connection-client-artifact + + + *:* + + + module-info.class + + + + + + + package + + shade + + + + + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/main/java/software/amazonaws/test/App.java b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/main/java/software/amazonaws/test/App.java new file mode 100644 index 000000000000..3d3588fee28a --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/main/java/software/amazonaws/test/App.java @@ -0,0 +1,28 @@ +package software.amazonaws.test; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import software.amazon.awssdk.services.s3.S3Client; + +/** + * Lambda function entry point. You can change to use other pojo type or implement + * a different RequestHandler. + * + * @see Lambda Java Handler for more information + */ +public class App implements RequestHandler { + private final S3Client s3Client; + + public App() { + // Initialize the SDK client outside of the handler method so that it can be reused for subsequent invocations. + // It is initialized when the class is loaded. + s3Client = DependencyFactory.s3Client(); + // Consider invoking a simple api here to pre-warm up the application, eg: dynamodb#listTables + } + + @Override + public Object handleRequest(final Object input, final Context context) { + // TODO: invoking the api call using s3Client. + return input; + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java new file mode 100644 index 000000000000..926269195a94 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java @@ -0,0 +1,26 @@ + +package software.amazonaws.test; + +import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; + +/** + * The module containing all dependencies required by the {@link App}. + */ +public class DependencyFactory { + + private DependencyFactory() {} + + /** + * @return an instance of S3Client + */ + public static S3Client s3Client() { + return S3Client.builder() + .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) + .region(Region.US_WEST_2) + .httpClientBuilder(UrlConnectionHttpClient.builder()) + .build(); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/test/java/software/amazonaws/test/AppTest.java b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/test/java/software/amazonaws/test/AppTest.java new file mode 100644 index 000000000000..8400b37496b7 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/src/test/java/software/amazonaws/test/AppTest.java @@ -0,0 +1,15 @@ +package software.amazonaws.test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +public class AppTest { + + @Test + public void handleRequest_shouldReturnConstantValue() { + App function = new App(); + Object result = function.handleRequest("echo", null); + assertEquals("echo", result); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/template.yaml new file mode 100644 index 000000000000..ca0bb619fd4e --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/template.yaml @@ -0,0 +1,19 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Resources: + # See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html + # for more info to see how to tune the lambda function configs based on your use case. + AppFunction: + Type: AWS::Serverless::Function + Properties: + Runtime: java8 + Handler: software.amazonaws.test.App::handleRequest + Timeout: 60 + MemorySize: 512 + CodeUri: ./target/test-url-connection-client-artifact.jar + # Attach policies here to give the function permission to access other AWS resources if needed + # See: https://github.com/awslabs/serverless-application-model/blob/master/docs/policy_templates.rst + # eg: + #Policies: + # - S3ReadPolicy: + # BucketName: test-bucket \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/archetype.properties b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/archetype.properties new file mode 100644 index 000000000000..de95a87a7f91 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/archetype.properties @@ -0,0 +1,25 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +groupId=software.amazonaws.test +artifactId=test-wafregional-artifact +version=1.0-SNAPSHOT +package=software.amazonaws.test +service=wafregional +httpClient=apache-client +handlerClassName=MyWafRegionalFunction +region=ap-southeast-1 +javaSdkVersion=2.11.0 +nettyOpenSslVersion=2.0.29.Final \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/goal.txt b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/goal.txt new file mode 100644 index 000000000000..4a1a71d3364c --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/goal.txt @@ -0,0 +1 @@ +verify \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/README.md b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/README.md new file mode 100644 index 000000000000..cdff80c819bd --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/README.md @@ -0,0 +1,45 @@ +# MyWafRegionalFunction + +This project contains an AWS Lambda maven application with [AWS Java SDK 2.x](https://github.com/aws/aws-sdk-java-v2) dependencies. + +## Prerequisites +- Java 1.8+ +- Apache Maven +- [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) +- Docker + +## Development + +The generated function handler class just returns the input. The configured AWS Java SDK client is created in `DependencyFactory` class and you can +add the code to interact with the SDK client based on your use case. + +#### Building the project +``` +mvn clean install +``` + +#### Testing it locally +``` +sam local invoke +``` + +#### Adding more SDK clients +To add more service clients, you need to add the specific services modules in `pom.xml` and create the clients in `DependencyFactory` following the same +pattern as wafRegionalClient. + +## Deployment + +The generated project contains a default [SAM template](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html) file `template.yaml` where you can +configure different properties of your lambda function such as memory size and timeout. You might also need to add specific policies to the lambda function +so that it can access other AWS resources. + +To deploy the application, you can run the following command: + +``` +sam deploy --guided +``` + +See [Deploying Serverless Applications](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-deploying.html) for more info. + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/pom.xml new file mode 100644 index 000000000000..9394f5abb2cc --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/pom.xml @@ -0,0 +1,104 @@ + + + 4.0.0 + + software.amazonaws.test + test-wafregional-artifact + 1.0-SNAPSHOT + jar + + UTF-8 + 1.8 + 1.8 + 3.2.1 + 3.6.1 + 1.6.0 + 2.11.0 + 1.2.0 + 5.4.2 + + + + + + software.amazon.awssdk + bom + ${aws.java.sdk.version} + pom + import + + + + + + + software.amazon.awssdk + waf + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awssdk + apache-client + + + + + + software.amazon.awssdk + apache-client + + + + com.amazonaws + aws-lambda-java-core + ${aws.lambda.java.version} + + + + + org.junit.jupiter + junit-jupiter + ${junit5.version} + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${maven.compiler.plugin.version} + + + org.apache.maven.plugins + maven-shade-plugin + ${maven.shade.plugin.version} + + false + test-wafregional-artifact + + + *:* + + + module-info.class + + + + + + + package + + shade + + + + + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java new file mode 100644 index 000000000000..31bacbfa33ef --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java @@ -0,0 +1,26 @@ + +package software.amazonaws.test; + +import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.waf.regional.WafRegionalClient; + +/** + * The module containing all dependencies required by the {@link MyWafRegionalFunction}. + */ +public class DependencyFactory { + + private DependencyFactory() {} + + /** + * @return an instance of WafRegionalClient + */ + public static WafRegionalClient wafRegionalClient() { + return WafRegionalClient.builder() + .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) + .region(Region.AP_SOUTHEAST_1) + .httpClientBuilder(ApacheHttpClient.builder()) + .build(); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/main/java/software/amazonaws/test/MyWafRegionalFunction.java b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/main/java/software/amazonaws/test/MyWafRegionalFunction.java new file mode 100644 index 000000000000..9f02b456977a --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/main/java/software/amazonaws/test/MyWafRegionalFunction.java @@ -0,0 +1,28 @@ +package software.amazonaws.test; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import software.amazon.awssdk.services.waf.regional.WafRegionalClient; + +/** + * Lambda function entry point. You can change to use other pojo type or implement + * a different RequestHandler. + * + * @see Lambda Java Handler for more information + */ +public class MyWafRegionalFunction implements RequestHandler { + private final WafRegionalClient wafRegionalClient; + + public MyWafRegionalFunction() { + // Initialize the SDK client outside of the handler method so that it can be reused for subsequent invocations. + // It is initialized when the class is loaded. + wafRegionalClient = DependencyFactory.wafRegionalClient(); + // Consider invoking a simple api here to pre-warm up the application, eg: dynamodb#listTables + } + + @Override + public Object handleRequest(final Object input, final Context context) { + // TODO: invoking the api call using wafRegionalClient. + return input; + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/test/java/software/amazonaws/test/MyWafRegionalFunctionTest.java b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/test/java/software/amazonaws/test/MyWafRegionalFunctionTest.java new file mode 100644 index 000000000000..adc0157faf76 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/src/test/java/software/amazonaws/test/MyWafRegionalFunctionTest.java @@ -0,0 +1,15 @@ +package software.amazonaws.test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +public class MyWafRegionalFunctionTest { + + @Test + public void handleRequest_shouldReturnConstantValue() { + MyWafRegionalFunction function = new MyWafRegionalFunction(); + Object result = function.handleRequest("echo", null); + assertEquals("echo", result); + } +} diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/template.yaml new file mode 100644 index 000000000000..70ee17fae8a3 --- /dev/null +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/template.yaml @@ -0,0 +1,19 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Resources: + # See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html + # for more info to see how to tune the lambda function configs based on your use case. + MyWafRegionalFunctionFunction: + Type: AWS::Serverless::Function + Properties: + Runtime: java8 + Handler: software.amazonaws.test.MyWafRegionalFunction::handleRequest + Timeout: 60 + MemorySize: 512 + CodeUri: ./target/test-wafregional-artifact.jar + # Attach policies here to give the function permission to access other AWS resources if needed + # See: https://github.com/awslabs/serverless-application-model/blob/master/docs/policy_templates.rst + # eg: + #Policies: + # - S3ReadPolicy: + # BucketName: test-bucket \ No newline at end of file diff --git a/archetypes/pom.xml b/archetypes/pom.xml new file mode 100644 index 000000000000..d21e59882802 --- /dev/null +++ b/archetypes/pom.xml @@ -0,0 +1,35 @@ + + + + + + aws-sdk-java-pom + software.amazon.awssdk + 2.15.62-SNAPSHOT + + 4.0.0 + archetypes + AWS Java SDK :: Archetypes + + archetype-lambda + + pom + + Maven Archetypes for applications using Java SDK 2.x + + \ No newline at end of file diff --git a/aws-sdk-java/build.properties b/aws-sdk-java/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/aws-sdk-java/build.properties +++ b/aws-sdk-java/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 6afb1545f0d8..dba5d834b698 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -1,10 +1,23 @@ - + 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT ../pom.xml aws-sdk-java @@ -710,6 +723,11 @@ Amazon AutoScaling, etc). s3 ${awsjavasdk.version} + + software.amazon.awssdk + s3control + ${awsjavasdk.version} + software.amazon.awssdk sagemaker @@ -920,6 +938,361 @@ Amazon AutoScaling, etc). eventbridge ${awsjavasdk.version} + + software.amazon.awssdk + lakeformation + ${awsjavasdk.version} + + + software.amazon.awssdk + forecast + ${awsjavasdk.version} + + + software.amazon.awssdk + forecastquery + ${awsjavasdk.version} + + + software.amazon.awssdk + qldb + ${awsjavasdk.version} + + + software.amazon.awssdk + qldbsession + ${awsjavasdk.version} + + + software.amazon.awssdk + workmailmessageflow + ${awsjavasdk.version} + + + software.amazon.awssdk + codestarnotifications + ${awsjavasdk.version} + + + software.amazon.awssdk + savingsplans + ${awsjavasdk.version} + + + software.amazon.awssdk + sso + ${awsjavasdk.version} + + + software.amazon.awssdk + ssooidc + ${awsjavasdk.version} + + + software.amazon.awssdk + marketplacecatalog + ${awsjavasdk.version} + + + software.amazon.awssdk + sesv2 + ${awsjavasdk.version} + + + software.amazon.awssdk + dataexchange + ${awsjavasdk.version} + + + software.amazon.awssdk + migrationhubconfig + ${awsjavasdk.version} + + + software.amazon.awssdk + connectparticipant + ${awsjavasdk.version} + + + software.amazon.awssdk + wafv2 + ${awsjavasdk.version} + + + software.amazon.awssdk + appconfig + ${awsjavasdk.version} + + + software.amazon.awssdk + iotsecuretunneling + ${awsjavasdk.version} + + + software.amazon.awssdk + elasticinference + ${awsjavasdk.version} + + + software.amazon.awssdk + imagebuilder + ${awsjavasdk.version} + + + software.amazon.awssdk + schemas + ${awsjavasdk.version} + + + software.amazon.awssdk + accessanalyzer + ${awsjavasdk.version} + + + software.amazon.awssdk + computeoptimizer + ${awsjavasdk.version} + + + software.amazon.awssdk + networkmanager + ${awsjavasdk.version} + + + software.amazon.awssdk + kendra + ${awsjavasdk.version} + + + software.amazon.awssdk + frauddetector + ${awsjavasdk.version} + + + software.amazon.awssdk + codegurureviewer + ${awsjavasdk.version} + + + software.amazon.awssdk + codeguruprofiler + ${awsjavasdk.version} + + + software.amazon.awssdk + outposts + ${awsjavasdk.version} + + + software.amazon.awssdk + sagemakera2iruntime + ${awsjavasdk.version} + + + software.amazon.awssdk + ebs + ${awsjavasdk.version} + + + software.amazon.awssdk + kinesisvideosignaling + ${awsjavasdk.version} + + + software.amazon.awssdk + detective + ${awsjavasdk.version} + + + software.amazon.awssdk + codestarconnections + ${awsjavasdk.version} + + + software.amazon.awssdk + synthetics + ${awsjavasdk.version} + + + software.amazon.awssdk + iotsitewise + ${awsjavasdk.version} + + + software.amazon.awssdk + macie2 + ${awsjavasdk.version} + + + software.amazon.awssdk + codeartifact + ${awsjavasdk.version} + + + software.amazon.awssdk + honeycode + ${awsjavasdk.version} + + + software.amazon.awssdk + ivs + ${awsjavasdk.version} + + + software.amazon.awssdk + braket + ${awsjavasdk.version} + + + software.amazon.awssdk + identitystore + ${awsjavasdk.version} + + + software.amazon.awssdk + appflow + ${awsjavasdk.version} + + + software.amazon.awssdk + redshiftdata + ${awsjavasdk.version} + + + software.amazon.awssdk + ssoadmin + ${awsjavasdk.version} + + + software.amazon.awssdk + timestreamwrite + ${awsjavasdk.version} + + + software.amazon.awssdk + timestreamquery + ${awsjavasdk.version} + + + software.amazon.awssdk + s3outposts + ${awsjavasdk.version} + + + software.amazon.awssdk + databrew + ${awsjavasdk.version} + + + software.amazon.awssdk + servicecatalogappregistry + ${awsjavasdk.version} + + + software.amazon.awssdk + networkfirewall + ${awsjavasdk.version} + + + software.amazon.awssdk + mwaa + ${awsjavasdk.version} + + + software.amazon.awssdk + devopsguru + ${awsjavasdk.version} + + + software.amazon.awssdk + sagemakerfeaturestoreruntime + ${awsjavasdk.version} + + + software.amazon.awssdk + appintegrations + ${awsjavasdk.version} + + + software.amazon.awssdk + ecrpublic + ${awsjavasdk.version} + + + software.amazon.awssdk + amplifybackend + ${awsjavasdk.version} + + + software.amazon.awssdk + connectcontactlens + ${awsjavasdk.version} + + + software.amazon.awssdk + lookoutvision + ${awsjavasdk.version} + + + software.amazon.awssdk + customerprofiles + ${awsjavasdk.version} + + + software.amazon.awssdk + emrcontainers + ${awsjavasdk.version} + + + software.amazon.awssdk + sagemakeredge + ${awsjavasdk.version} + + + software.amazon.awssdk + healthlake + ${awsjavasdk.version} + + + software.amazon.awssdk + auditmanager + ${awsjavasdk.version} + + + software.amazon.awssdk + amp + ${awsjavasdk.version} + + + software.amazon.awssdk + greengrassv2 + ${awsjavasdk.version} + + + software.amazon.awssdk + iotwireless + ${awsjavasdk.version} + + + software.amazon.awssdk + iotfleethub + ${awsjavasdk.version} + + + software.amazon.awssdk + iotdeviceadvisor + ${awsjavasdk.version} + + + software.amazon.awssdk + location + ${awsjavasdk.version} + + + software.amazon.awssdk + wellarchitected + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index 7ae45c129da5..f6765c54bd54 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -1,11 +1,26 @@ + + aws-sdk-java-pom software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 @@ -27,7 +42,12 @@ com.fasterxml.jackson.jr jackson-jr-objects - ${jackson.version} + ${jacksonjr.version} + + + com.fasterxml.jackson.jr + jackson-jr-stree + ${jacksonjr.version} org.slf4j @@ -37,7 +57,7 @@ com.fasterxml.jackson.core jackson-databind - ${jackson.version} + ${jackson.databind.version} com.fasterxml.jackson.core @@ -47,7 +67,7 @@ com.fasterxml.jackson.core jackson-annotations - ${jackson.annotations.version} + ${jackson.version} com.fasterxml.jackson.dataformat @@ -186,7 +206,7 @@ com.puppycrawl.tools checkstyle - 8.7 + 8.38 org.apache.maven.plugins @@ -285,6 +305,12 @@ ${junit.version} test + + org.testng + testng + ${testng.version} + test + org.hamcrest hamcrest-all @@ -315,6 +341,53 @@ ${netty-open-ssl-version} test + + com.amazonaws + DynamoDBLocal + ${dynamodb-local.version} + test + + + com.almworks.sqlite4java + sqlite4java + ${sqllite.version} + test + + + com.almworks.sqlite4java + sqlite4java-win32-x86 + ${sqllite.version} + dll + test + + + com.almworks.sqlite4java + sqlite4java-win32-x64 + ${sqllite.version} + dll + test + + + com.almworks.sqlite4java + libsqlite4java-osx + ${sqllite.version} + dylib + test + + + com.almworks.sqlite4java + libsqlite4java-linux-i386 + ${sqllite.version} + so + test + + + com.almworks.sqlite4java + libsqlite4java-linux-amd64 + ${sqllite.version} + so + test + diff --git a/bom/build.properties b/bom/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/bom/build.properties +++ b/bom/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/bom/pom.xml b/bom/pom.xml index efd013c77c54..164a30a67b82 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -1,10 +1,23 @@ - + 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT ../pom.xml bom @@ -44,6 +57,11 @@ annotations ${awsjavasdk.version} + + software.amazon.awssdk + arns + ${awsjavasdk.version} + software.amazon.awssdk auth @@ -400,6 +418,11 @@ dynamodb ${awsjavasdk.version} + + software.amazon.awssdk + dynamodb-enhanced + ${awsjavasdk.version} + software.amazon.awssdk ec2 @@ -820,6 +843,11 @@ s3 ${awsjavasdk.version} + + software.amazon.awssdk + s3control + ${awsjavasdk.version} + software.amazon.awssdk sagemaker @@ -1030,6 +1058,366 @@ eventbridge ${awsjavasdk.version} + + software.amazon.awssdk + lakeformation + ${awsjavasdk.version} + + + software.amazon.awssdk + forecast + ${awsjavasdk.version} + + + software.amazon.awssdk + forecastquery + ${awsjavasdk.version} + + + software.amazon.awssdk + qldb + ${awsjavasdk.version} + + + software.amazon.awssdk + qldbsession + ${awsjavasdk.version} + + + software.amazon.awssdk + workmailmessageflow + ${awsjavasdk.version} + + + software.amazon.awssdk + codestarnotifications + ${awsjavasdk.version} + + + software.amazon.awssdk + savingsplans + ${awsjavasdk.version} + + + software.amazon.awssdk + sso + ${awsjavasdk.version} + + + software.amazon.awssdk + ssooidc + ${awsjavasdk.version} + + + software.amazon.awssdk + marketplacecatalog + ${awsjavasdk.version} + + + software.amazon.awssdk + sesv2 + ${awsjavasdk.version} + + + software.amazon.awssdk + dataexchange + ${awsjavasdk.version} + + + software.amazon.awssdk + migrationhubconfig + ${awsjavasdk.version} + + + software.amazon.awssdk + connectparticipant + ${awsjavasdk.version} + + + software.amazon.awssdk + wafv2 + ${awsjavasdk.version} + + + software.amazon.awssdk + appconfig + ${awsjavasdk.version} + + + software.amazon.awssdk + iotsecuretunneling + ${awsjavasdk.version} + + + software.amazon.awssdk + elasticinference + ${awsjavasdk.version} + + + software.amazon.awssdk + imagebuilder + ${awsjavasdk.version} + + + software.amazon.awssdk + schemas + ${awsjavasdk.version} + + + software.amazon.awssdk + accessanalyzer + ${awsjavasdk.version} + + + software.amazon.awssdk + computeoptimizer + ${awsjavasdk.version} + + + software.amazon.awssdk + networkmanager + ${awsjavasdk.version} + + + software.amazon.awssdk + kendra + ${awsjavasdk.version} + + + software.amazon.awssdk + frauddetector + ${awsjavasdk.version} + + + software.amazon.awssdk + codegurureviewer + ${awsjavasdk.version} + + + software.amazon.awssdk + codeguruprofiler + ${awsjavasdk.version} + + + software.amazon.awssdk + outposts + ${awsjavasdk.version} + + + software.amazon.awssdk + sagemakera2iruntime + ${awsjavasdk.version} + + + software.amazon.awssdk + ebs + ${awsjavasdk.version} + + + software.amazon.awssdk + kinesisvideosignaling + ${awsjavasdk.version} + + + software.amazon.awssdk + detective + ${awsjavasdk.version} + + + software.amazon.awssdk + codestarconnections + ${awsjavasdk.version} + + + software.amazon.awssdk + synthetics + ${awsjavasdk.version} + + + software.amazon.awssdk + iotsitewise + ${awsjavasdk.version} + + + software.amazon.awssdk + macie2 + ${awsjavasdk.version} + + + software.amazon.awssdk + codeartifact + ${awsjavasdk.version} + + + software.amazon.awssdk + honeycode + ${awsjavasdk.version} + + + software.amazon.awssdk + ivs + ${awsjavasdk.version} + + + software.amazon.awssdk + braket + ${awsjavasdk.version} + + + software.amazon.awssdk + identitystore + ${awsjavasdk.version} + + + software.amazon.awssdk + appflow + ${awsjavasdk.version} + + + software.amazon.awssdk + redshiftdata + ${awsjavasdk.version} + + + software.amazon.awssdk + ssoadmin + ${awsjavasdk.version} + + + software.amazon.awssdk + timestreamwrite + ${awsjavasdk.version} + + + software.amazon.awssdk + timestreamquery + ${awsjavasdk.version} + + + software.amazon.awssdk + s3outposts + ${awsjavasdk.version} + + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + + + software.amazon.awssdk + databrew + ${awsjavasdk.version} + + + software.amazon.awssdk + servicecatalogappregistry + ${awsjavasdk.version} + + + software.amazon.awssdk + networkfirewall + ${awsjavasdk.version} + + + software.amazon.awssdk + mwaa + ${awsjavasdk.version} + + + software.amazon.awssdk + devopsguru + ${awsjavasdk.version} + + + software.amazon.awssdk + sagemakerfeaturestoreruntime + ${awsjavasdk.version} + + + software.amazon.awssdk + appintegrations + ${awsjavasdk.version} + + + software.amazon.awssdk + ecrpublic + ${awsjavasdk.version} + + + software.amazon.awssdk + amplifybackend + ${awsjavasdk.version} + + + software.amazon.awssdk + connectcontactlens + ${awsjavasdk.version} + + + software.amazon.awssdk + lookoutvision + ${awsjavasdk.version} + + + software.amazon.awssdk + customerprofiles + ${awsjavasdk.version} + + + software.amazon.awssdk + emrcontainers + ${awsjavasdk.version} + + + software.amazon.awssdk + sagemakeredge + ${awsjavasdk.version} + + + software.amazon.awssdk + healthlake + ${awsjavasdk.version} + + + software.amazon.awssdk + auditmanager + ${awsjavasdk.version} + + + software.amazon.awssdk + amp + ${awsjavasdk.version} + + + software.amazon.awssdk + greengrassv2 + ${awsjavasdk.version} + + + software.amazon.awssdk + iotwireless + ${awsjavasdk.version} + + + software.amazon.awssdk + iotfleethub + ${awsjavasdk.version} + + + software.amazon.awssdk + iotdeviceadvisor + ${awsjavasdk.version} + + + software.amazon.awssdk + location + ${awsjavasdk.version} + + + software.amazon.awssdk + wellarchitected + ${awsjavasdk.version} + diff --git a/build-tools/pom.xml b/build-tools/pom.xml index 58ebb2a6a409..5a159448f6d5 100644 --- a/build-tools/pom.xml +++ b/build-tools/pom.xml @@ -1,6 +1,6 @@ + + + + + + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml index 3f7b933aa0d6..f02fa4c0f460 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml @@ -1,6 +1,6 @@ + value="/*\n * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the "License").\n * You may not use this file except in compliance with the License.\n * A copy of the License is located at\n *\n * http://aws.amazon.com/apache2.0\n *\n * or in the "license" file accompanying this file. This file is distributed\n * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied. See the License for the specific language governing\n * permissions and limitations under the License.\n */"/> @@ -69,12 +69,6 @@ - - - - - - @@ -371,6 +365,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + @@ -386,4 +406,11 @@ + + + + + + + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/intellij-codestyle.xml b/build-tools/src/main/resources/software/amazon/awssdk/intellij-codestyle.xml index e6e6e5ef6a68..81a1535e2327 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/intellij-codestyle.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/intellij-codestyle.xml @@ -1,5 +1,5 @@ + - diff --git a/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml b/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml index 821b7255619f..448b951a533d 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml @@ -1,6 +1,6 @@ + + + + + + + + + + + + @@ -32,6 +46,13 @@ + + + + + + + @@ -143,4 +164,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/buildspecs/build.yml b/buildspecs/build.yml index 10e0ccbdd756..c2b63e4a83ad 100644 --- a/buildspecs/build.yml +++ b/buildspecs/build.yml @@ -15,4 +15,4 @@ phases: cd test/module-path-tests mvn package mvn exec:exec -P mock-tests - fi + fi \ No newline at end of file diff --git a/buildspecs/integ-test.yml b/buildspecs/integ-test.yml index ff4a67b98e8e..0280c3beb323 100644 --- a/buildspecs/integ-test.yml +++ b/buildspecs/integ-test.yml @@ -7,7 +7,15 @@ phases: build: commands: - - mvn clean install -Dskip.unit.tests -P integration-tests -Dfindbugs.skip -Dcheckstyle.skip -pl !:dynamodbdocument-v1,!:dynamodbmapper-v1 -T1C + - | + if [ ! -z "$INTEGRATION_TEST_ROLE_ARN" ]; then + ASSUME_ROLE_OUTPUT=`aws sts assume-role --role-arn "$INTEGRATION_TEST_ROLE_ARN" --role-session-name "integration-tests" --duration-seconds 7200 --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]' --output text` + AWS_ACCESS_KEY_ID=`echo $ASSUME_ROLE_OUTPUT | awk '{ print $1 }'` + AWS_SECRET_ACCESS_KEY=`echo $ASSUME_ROLE_OUTPUT | awk '{ print $2 }'` + AWS_SESSION_TOKEN=`echo $ASSUME_ROLE_OUTPUT | awk '{ print $3 }'` + echo "Using role $INTEGRATION_TEST_ROLE_ARN with access key $AWS_ACCESS_KEY_ID." + fi + - mvn clean install -Dskip.unit.tests -P integration-tests -Dfindbugs.skip -Dcheckstyle.skip -T1C - JAVA_VERSION=$(java -version 2>&1 | grep -i version | cut -d'"' -f2 | cut -d'.' -f1-1) - echo $JAVA_VERSION - | diff --git a/buildspecs/noop.yml b/buildspecs/noop.yml index a9a5a43b89a4..44b503c0d07d 100644 --- a/buildspecs/noop.yml +++ b/buildspecs/noop.yml @@ -1,9 +1,6 @@ version: 0.2 phases: - install: - runtime-versions: - java: openjdk8 build: commands: - echo "No-op." \ No newline at end of file diff --git a/buildspecs/on-demand-integ-test.yml b/buildspecs/on-demand-integ-test.yml index 72889bad78db..29e21c8777df 100644 --- a/buildspecs/on-demand-integ-test.yml +++ b/buildspecs/on-demand-integ-test.yml @@ -7,7 +7,7 @@ phases: build: commands: - - mvn clean install -Dskip.unit.tests -P integration-tests -Dfindbugs.skip -Dcheckstyle.skip -pl !:dynamodbmapper-v1 -Dfailsafe.rerunFailingTestsCount=1 --fail-at-end + - mvn clean install -Dskip.unit.tests -P integration-tests -Dfindbugs.skip -Dcheckstyle.skip -Dfailsafe.rerunFailingTestsCount=1 --fail-at-end - JAVA_VERSION=$(java -version 2>&1 | grep -i version | cut -d'"' -f2 | cut -d'.' -f1-1) - echo $JAVA_VERSION - | diff --git a/buildspecs/release-javadoc.yml b/buildspecs/release-javadoc.yml index ebdbdbe0979f..835d727a45aa 100644 --- a/buildspecs/release-javadoc.yml +++ b/buildspecs/release-javadoc.yml @@ -15,10 +15,12 @@ phases: build: commands: - mvn install -P quick -T1C - - mvn install javadoc:aggregate -B -Ppublic-javadoc -Dcheckstyle.skip -Dspotbugs.skip -DskipTests -Ddoclint=none -pl '!:dynamodbdocument-v1,!:dynamodbmapper-v1,!:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:test-utils,!:http-client-tests,!:tests-coverage-reporting' + - mvn install javadoc:aggregate -B -Ppublic-javadoc -Dcheckstyle.skip -Dspotbugs.skip -DskipTests -Ddoclint=none -pl '!:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:test-utils,!:http-client-tests,!:tests-coverage-reporting' - RELEASE_VERSION=`mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec` - - aws s3 sync target/site/apidocs/ $DOC_PATH/$RELEASE_VERSION/ - aws s3 sync $DOC_PATH/$RELEASE_VERSION/ $DOC_PATH/latest/ --acl=public-read --delete - jar cf aws-java-sdk-v2-docs.jar -C target/site/apidocs . - aws s3 cp aws-java-sdk-v2-docs.jar $DOC_PATH/ --acl="public-read" + - python ./scripts/doc_crosslinks/generate_cross_link_data.py --apiDefinitionsBasePath ./services/ --apiDefinitionsRelativeFilePath src/main/resources/codegen-resources/service-2.json --templateFilePath ./scripts/doc_crosslinks/crosslink_redirect.html --outputFilePath ./scripts/crosslink_redirect.html + - aws s3 cp ./scripts/crosslink_redirect.html $DOC_PATH/latest/ --acl="public-read" \ No newline at end of file diff --git a/buildspecs/release-to-maven.yml b/buildspecs/release-to-maven.yml index d5ce90554f9e..1b0c144354ae 100644 --- a/buildspecs/release-to-maven.yml +++ b/buildspecs/release-to-maven.yml @@ -15,12 +15,12 @@ phases: build: commands: - RELEASE_VERSION=`mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec` - - SONATYPE_URL="https://oss.sonatype.org/service/local/repositories/releases/content/software/amazon/awssdk/aws-sdk-java/$RELEASE_VERSION/" + - SONATYPE_URL="https://aws.oss.sonatype.org/service/local/repositories/releases/content/software/amazon/awssdk/aws-sdk-java/$RELEASE_VERSION/" - | if ! curl -f --head $SONATYPE_URL; then mkdir -p $CREDENTIALS aws s3 cp s3://aws-java-sdk-release-credentials/ $CREDENTIALS/ --recursive - mvn clean deploy -B -s $SETTINGS_XML -Dgpg.homedir=$GPG_HOME -Ppublishing -DperformRelease -Dspotbugs.skip -DskipTests -Dcheckstyle.skip -Ddoclint=none -pl !:dynamodbdocument-v1,!:dynamodbmapper-v1,!:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:tests-coverage-reporting,!:stability-tests -DautoReleaseAfterClose=true -DstagingProgressTimeoutMinutes=30 + mvn clean deploy -B -s $SETTINGS_XML -Dgpg.homedir=$GPG_HOME -Ppublishing -DperformRelease -Dspotbugs.skip -DskipTests -Dcheckstyle.skip -Djapicmp.skip -Ddoclint=none -pl !:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:tests-coverage-reporting,!:stability-tests -DautoReleaseAfterClose=true -DstagingProgressTimeoutMinutes=30 else echo "This version was already released." - fi \ No newline at end of file + fi diff --git a/buildspecs/update-master-from-release.yml b/buildspecs/update-master-from-release.yml index c7ab70da2ecb..d6b604093532 100644 --- a/buildspecs/update-master-from-release.yml +++ b/buildspecs/update-master-from-release.yml @@ -33,6 +33,7 @@ phases: - - mvn versions:set -DnewVersion=$NEW_VERSION_SNAPSHOT -DgenerateBackupPoms=false -DprocessAllModules=true - sed -i -E "s/().+(<\/version>)/\1$RELEASE_VERSION\2/" README.md + - sed -i -E "s/().+(<\/awsjavasdk.previous.version>)/\1$RELEASE_VERSION\2/" pom.xml - - 'git commit -am "Update to next snapshot version: $NEW_VERSION_SNAPSHOT"' - diff --git a/bundle/build.properties b/bundle/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/bundle/build.properties +++ b/bundle/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/bundle/pom.xml b/bundle/pom.xml index 1c67fa985597..b695188a19f0 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -1,6 +1,6 @@ + @@ -7,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT ../pom.xml codegen-lite-maven-plugin @@ -61,7 +76,7 @@ org.apache.maven.plugins maven-plugin-plugin - 3.5 + 3.6.0 default-descriptor diff --git a/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java b/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java index 94d10946405b..ceee556742d1 100644 --- a/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java +++ b/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -92,13 +92,9 @@ public void generateRegionClass(Path baseSourcesDirectory, Partitions partitions public void generateServiceMetadata(Path baseSourcesDirectory, Partitions partitions) { Path sourcesDirectory = baseSourcesDirectory.resolve(SERVICE_METADATA_BASE.replace(".", "/")); Set services = new HashSet<>(); - partitions.getPartitions().stream().forEach(p -> services.addAll(p.getServices().keySet())); + partitions.getPartitions().forEach(p -> services.addAll(p.getServices().keySet())); - services.stream() - // Use hardcoded file for elasticache until the incorrect fips endpoint is fixed - //TODO Remove once elasticache endpoints are fixed at source - .filter(s -> !"elasticache".equals(s)) - .forEach(s -> new CodeGenerator(sourcesDirectory.toString(), new ServiceMetadataGenerator(partitions, + services.forEach(s -> new CodeGenerator(sourcesDirectory.toString(), new ServiceMetadataGenerator(partitions, s, SERVICE_METADATA_BASE, REGION_BASE)) diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index dc9623d13719..d71db4432efb 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -1,4 +1,19 @@ + + @@ -6,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/CodeGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/CodeGenerator.java index 6238e07c7a52..56c6798c6a80 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/CodeGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/CodeGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/PoetClass.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/PoetClass.java index 2769d71ae266..d38f0593c615 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/PoetClass.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/PoetClass.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/Utils.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/Utils.java index 371e38e6a690..2e12c8ca10c1 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/Utils.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/Utils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/CodeTransformer.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/CodeTransformer.java index c98cffc6609c..056b5eadff42 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/CodeTransformer.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/CodeTransformer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/CodeWriter.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/CodeWriter.java index 86c60b76bd25..4a2bb49277db 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/CodeWriter.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/CodeWriter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/JavaCodeFormatter.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/JavaCodeFormatter.java index 4ffd3cd0891f..e4ac3581dcf3 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/JavaCodeFormatter.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/JavaCodeFormatter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/LinkRemover.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/LinkRemover.java index 35f5cbab6230..d1955ad5ae17 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/LinkRemover.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/LinkRemover.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/UnusedImportRemover.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/UnusedImportRemover.java index c4bf9616b0cc..a2410d38dce6 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/UnusedImportRemover.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/emitters/UnusedImportRemover.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionMetadataGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionMetadataGenerator.java index 25ad7b565336..5f0f505a4e89 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionMetadataGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionMetadataGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionMetadataProviderGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionMetadataProviderGenerator.java index d5b67c3a5cf0..4555dd209725 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionMetadataProviderGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionMetadataProviderGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -82,7 +82,6 @@ private CodeBlock partitions(Partitions partitions) { CodeBlock.Builder builder = CodeBlock.builder().add("$T.builder()", ImmutableMap.class); partitions.getPartitions() - .stream() .forEach(p -> builder.add(".put($S, new $T())", p.getPartition(), partitionMetadataClass(p.getPartition()))); return builder.add(".build()").build(); diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java index 30f5de26d759..fa5467bc847f 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -40,6 +40,7 @@ import software.amazon.awssdk.codegen.lite.PoetClass; import software.amazon.awssdk.codegen.lite.regions.model.Partitions; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.http.SdkHttpUtils; public class RegionGenerator implements PoetClass { @@ -108,7 +109,12 @@ private void regions(TypeSpec.Builder builder) { addGlobalRegions(builder); - regionsArray.add(regionsCodeBlock + ", ").add("AWS_GLOBAL, ").add("AWS_CN_GLOBAL, ").add("AWS_US_GOV_GLOBAL"); + regionsArray.add(regionsCodeBlock + ", ") + .add("AWS_GLOBAL, ") + .add("AWS_CN_GLOBAL, ") + .add("AWS_US_GOV_GLOBAL, ") + .add("AWS_ISO_GLOBAL, ") + .add("AWS_ISO_B_GLOBAL"); regionsArray.add("))"); TypeName listOfRegions = ParameterizedTypeName.get(ClassName.get(List.class), className()); @@ -129,6 +135,14 @@ private void addGlobalRegions(TypeSpec.Builder builder) { .addField(FieldSpec.builder(className(), "AWS_US_GOV_GLOBAL") .addModifiers(PUBLIC, STATIC, FINAL) .initializer("$T.of($S, true)", className(), "aws-us-gov-global") + .build()) + .addField(FieldSpec.builder(className(), "AWS_ISO_GLOBAL") + .addModifiers(PUBLIC, STATIC, FINAL) + .initializer("$T.of($S, true)", className(), "aws-iso-global") + .build()) + .addField(FieldSpec.builder(className(), "AWS_ISO_B_GLOBAL") + .addModifiers(PUBLIC, STATIC, FINAL) + .initializer("$T.of($S, true)", className(), "aws-iso-b-global") .build()); } @@ -153,7 +167,9 @@ private MethodSpec regionOfGlobal() { .addParameter(boolean.class, "isGlobalRegion") .returns(className()) .addStatement("$T.paramNotBlank($L, $S)", Validate.class, "value", "region") - .addStatement("return $L.put($L, $L)", "RegionCache", "value", "isGlobalRegion") + .addStatement("$T $L = $T.urlEncode($L)", + String.class, "urlEncodedValue", SdkHttpUtils.class, "value") + .addStatement("return $L.put($L, $L)", "RegionCache", "urlEncodedValue", "isGlobalRegion") .build(); } diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java index a1cfe810f271..92011139d2c6 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataLoader.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataLoader.java index 366414225840..984cc6d930b5 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataLoader.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataLoader.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java index 3c49b91a3871..0203bbbfb649 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -83,7 +83,6 @@ private CodeBlock regions(Partitions partitions) { CodeBlock.Builder builder = CodeBlock.builder().add("$T.builder()", ImmutableMap.class); partitions.getPartitions() - .stream() .forEach(p -> p.getRegions() .keySet() .forEach(r -> builder.add(".put(Region.$L, new $T())", regionClass(r), regionMetadataClass(r)))); diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionValidationUtil.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionValidationUtil.java index 65f2389b839d..63343bea5ddf 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionValidationUtil.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionValidationUtil.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -24,7 +24,8 @@ public final class RegionValidationUtil { private static final String FIPS_PREFIX = "fips-"; - private RegionValidationUtil() {} + private RegionValidationUtil() { + } /** * Determines if a given region string is a "valid" AWS region. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/ServiceMetadataGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/ServiceMetadataGenerator.java index 3bb16edab5ce..d00774ef1d5a 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/ServiceMetadataGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/ServiceMetadataGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -50,16 +50,16 @@ public class ServiceMetadataGenerator implements PoetClass { private final Partitions partitions; - private final String service; + private final String serviceEndpointPrefix; private final String basePackage; private final String regionBasePackage; public ServiceMetadataGenerator(Partitions partitions, - String service, + String serviceEndpointPrefix, String basePackage, String regionBasePackage) { this.partitions = partitions; - this.service = service; + this.serviceEndpointPrefix = serviceEndpointPrefix; this.basePackage = basePackage; this.regionBasePackage = regionBasePackage; } @@ -68,6 +68,8 @@ public ServiceMetadataGenerator(Partitions partitions, public TypeSpec poetClass() { TypeName listOfRegions = ParameterizedTypeName.get(ClassName.get(List.class), ClassName.get(regionBasePackage, "Region")); TypeName mapOfStringString = ParameterizedTypeName.get(Map.class, String.class, String.class); + TypeName listOfServicePartitionMetadata = + ParameterizedTypeName.get(ClassName.get(List.class), ClassName.get(regionBasePackage, "ServicePartitionMetadata")); return TypeSpec.classBuilder(className()) .addModifiers(Modifier.PUBLIC) @@ -79,7 +81,7 @@ public TypeSpec poetClass() { .addSuperinterface(ClassName.get(regionBasePackage, "ServiceMetadata")) .addField(FieldSpec.builder(String.class, "ENDPOINT_PREFIX") .addModifiers(PRIVATE, FINAL, STATIC) - .initializer("$S", service) + .initializer("$S", serviceEndpointPrefix) .build()) .addField(FieldSpec.builder(mapOfStringString, "PARTITION_OVERRIDDEN_ENDPOINTS") .addModifiers(PRIVATE, FINAL, STATIC) @@ -97,15 +99,20 @@ public TypeSpec poetClass() { .addModifiers(PRIVATE, FINAL, STATIC) .initializer(signingRegionOverrides(partitions)) .build()) + .addField(FieldSpec.builder(listOfServicePartitionMetadata, "PARTITIONS") + .addModifiers(PRIVATE, FINAL, STATIC) + .initializer(servicePartitions(partitions)) + .build()) .addMethod(regions()) .addMethod(endpointFor()) .addMethod(signingRegion()) + .addMethod(partitions(listOfServicePartitionMetadata)) .build(); } @Override public ClassName className() { - String sanitizedServiceName = service.replace(".", "-"); + String sanitizedServiceName = serviceEndpointPrefix.replace(".", "-"); return ClassName.get(basePackage, Stream.of(sanitizedServiceName.split("-")) .map(Utils::capitalize) .collect(Collectors.joining()) + "ServiceMetadata"); @@ -131,7 +138,6 @@ private CodeBlock serviceEndpoints(Partitions partitions) { CodeBlock.Builder builder = CodeBlock.builder().add("$T.builder()", ImmutableMap.class); services.entrySet() - .stream() .forEach(s -> s.getValue().getEndpoints() .entrySet() .stream() @@ -151,8 +157,8 @@ private CodeBlock regionsField(Partitions partitions) { partitions.getPartitions() .stream() - .filter(p -> p.getServices().containsKey(service)) - .forEach(p -> regions.addAll(p.getServices().get(service).getEndpoints().keySet() + .filter(p -> p.getServices().containsKey(serviceEndpointPrefix)) + .forEach(p -> regions.addAll(p.getServices().get(serviceEndpointPrefix).getEndpoints().keySet() .stream() .filter(r -> RegionValidationUtil.validRegion(r, p.getRegionRegex())) .collect(Collectors.toList()))); @@ -173,7 +179,6 @@ private CodeBlock signingRegionOverrides(Partitions partitions) { CodeBlock.Builder builder = CodeBlock.builder().add("$T.builder()", ImmutableMap.class); serviceData.entrySet() - .stream() .forEach(s -> s.getValue().getEndpoints() .entrySet() .stream() @@ -187,6 +192,39 @@ private CodeBlock signingRegionOverrides(Partitions partitions) { return builder.add(".build()").build(); } + private CodeBlock servicePartitions(Partitions partitions) { + return CodeBlock.builder() + .add("$T.unmodifiableList($T.asList(", Collections.class, Arrays.class) + .add(commaSeparatedServicePartitions(partitions)) + .add("))") + .build(); + } + + private CodeBlock commaSeparatedServicePartitions(Partitions partitions) { + ClassName defaultServicePartitionMetadata = ClassName.get(regionBasePackage + ".internal", + "DefaultServicePartitionMetadata"); + return partitions.getPartitions() + .stream() + .filter(p -> p.getServices().containsKey(serviceEndpointPrefix)) + .map(p -> CodeBlock.of("new $T($S, $L)", + defaultServicePartitionMetadata, + p.getPartition(), + globalRegion(p))) + .collect(CodeBlock.joining(",")); + } + + private CodeBlock globalRegion(Partition partition) { + ClassName region = ClassName.get(regionBasePackage, "Region"); + Service service = partition.getServices().get(this.serviceEndpointPrefix); + boolean hasGlobalRegionForPartition = service.isRegionalized() != null && + !service.isRegionalized() && + service.isPartitionWideEndpointAvailable(); + String globalRegionForPartition = hasGlobalRegionForPartition ? service.getPartitionEndpoint() : null; + return globalRegionForPartition == null + ? CodeBlock.of("null") + : CodeBlock.of("$T.of($S)", region, globalRegionForPartition); + } + private MethodSpec regions() { TypeName listOfRegions = ParameterizedTypeName.get(ClassName.get(List.class), ClassName.get(regionBasePackage, "Region")); @@ -221,15 +259,23 @@ private MethodSpec signingRegion() { .build(); } + private MethodSpec partitions(TypeName listOfServicePartitionMetadata) { + return MethodSpec.methodBuilder("servicePartitions") + .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) + .returns(listOfServicePartitionMetadata) + .addStatement("return $L", "PARTITIONS") + .build(); + } + private Map getServiceData(Partitions partitions) { Map serviceData = new TreeMap<>(Comparator.comparing(Partition::getPartition)); partitions.getPartitions() - .stream() .forEach(p -> p.getServices() .entrySet() .stream() - .filter(s -> s.getKey().equalsIgnoreCase(service)) + .filter(s -> s.getKey().equalsIgnoreCase(serviceEndpointPrefix)) .forEach(s -> serviceData.put(p, s.getValue()))); return serviceData; diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/ServiceMetadataProviderGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/ServiceMetadataProviderGenerator.java index 395891000fcd..f169fac71f7a 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/ServiceMetadataProviderGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/ServiceMetadataProviderGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -86,10 +86,8 @@ private CodeBlock regions(Partitions partitions) { Set seenServices = new HashSet<>(); partitions.getPartitions() - .stream() .forEach(p -> p.getServices() .keySet() - .stream() .forEach(s -> { if (!seenServices.contains(s)) { builder.add(".put($S, new $T())", s, serviceMetadataClass(s)); @@ -101,6 +99,10 @@ private CodeBlock regions(Partitions partitions) { } private ClassName serviceMetadataClass(String service) { + if ("s3".equals(service)) { + // This class contains extra logic for detecting the regional endpoint flag + return ClassName.get(basePackage, "EnhancedS3ServiceMetadata"); + } String sanitizedServiceName = service.replace(".", "-"); return ClassName.get(basePackage, Stream.of(sanitizedServiceName.split("-")) .map(Utils::capitalize) diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/CredentialScope.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/CredentialScope.java index 341c6c19418e..d34bc65d319a 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/CredentialScope.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/CredentialScope.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Endpoint.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Endpoint.java index 8c89a3e29f76..ace3435126ec 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Endpoint.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Endpoint.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -54,7 +54,8 @@ public final class Endpoint implements Cloneable { */ private String sslCommonName; - public Endpoint() {} + public Endpoint() { + } /** * Merges the given endpoints and returns the merged one. diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java index b457a97d3a9e..e225d47bfd82 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -62,7 +62,8 @@ public final class Partition { */ private Endpoint defaults; - public Partition() {} + public Partition() { + } public Partition(@JsonProperty(value = "partition") String partition, @JsonProperty(value = "regions") Map diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegion.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegion.java index f437b3aa167b..1d4b7d4bdf0d 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegion.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegion.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -30,7 +30,8 @@ public final class PartitionRegion { */ private String description; - public PartitionRegion() {} + public PartitionRegion() { + } public PartitionRegion(@JsonProperty(value = "description") String description) { this.description = Validate.notNull(description, "Region description"); diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partitions.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partitions.java index 074456022064..c0d5dd4c1b12 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partitions.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partitions.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -36,7 +36,8 @@ public final class Partitions { */ private List partitions; - public Partitions() {} + public Partitions() { + } public Partitions(@JsonProperty(value = "version") String version, @JsonProperty(value = "partitions") List partitions) { diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Service.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Service.java index db89355e5a83..4a95600c5416 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Service.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Service.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -45,9 +45,10 @@ public final class Service { /** * Returns true if the service is regionalized. */ - private boolean isRegionalized; + private Boolean isRegionalized; - public Service() {} + public Service() { + } public Service(@JsonProperty(value = "endpoints") Map endpoints) { this.endpoints = Validate.paramNotNull(endpoints, "endpoints"); @@ -99,7 +100,7 @@ public void setPartitionEndpoint(String partitionEndpoint) { /** * returns true if the service is regionalized. */ - public boolean isRegionalized() { + public Boolean isRegionalized() { return isRegionalized; } @@ -107,7 +108,7 @@ public boolean isRegionalized() { * sets the regionalized property for a service.. */ @JsonProperty(value = "isRegionalized") - public void setIsRegionalized(boolean regionalized) { + public void setIsRegionalized(Boolean regionalized) { isRegionalized = regionalized; } diff --git a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/PoetMatchers.java b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/PoetMatchers.java index 727009dec96f..e7c9ed5d2ee5 100644 --- a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/PoetMatchers.java +++ b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/PoetMatchers.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java index b3c214c8fdcb..223ec4d50532 100644 --- a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java +++ b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ public class RegionGenerationTest { - private static final String ENDPOINTS = "/software/amazon/awssdk/codegen/lite/endpoints.json"; + private static final String ENDPOINTS = "/software/amazon/awssdk/codegen/lite/test-endpoints.json"; private static final String SERVICE_METADATA_BASE = "software.amazon.awssdk.regions.servicemetadata"; private static final String REGION_METADATA_BASE = "software.amazon.awssdk.regions.regionmetadata"; private static final String PARTITION_METADATA_BASE = "software.amazon.awssdk.regions.partitionmetadata"; diff --git a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionValidationUtilTest.java b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionValidationUtilTest.java index 497669e8ef39..80c47e3c2f90 100644 --- a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionValidationUtilTest.java +++ b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionValidationUtilTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java index 33965dc7bad3..6b3750874bd6 100644 --- a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java @@ -7,6 +7,7 @@ import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.http.SdkHttpUtils; /** * An Amazon Web Services region that hosts a set of Amazon services. @@ -79,10 +80,14 @@ public final class Region { public static final Region AWS_US_GOV_GLOBAL = Region.of("aws-us-gov-global", true); + public static final Region AWS_ISO_GLOBAL = Region.of("aws-iso-global", true); + + public static final Region AWS_ISO_B_GLOBAL = Region.of("aws-iso-b-global", true); + private static final List REGIONS = Collections.unmodifiableList(Arrays.asList(AP_SOUTH_1, EU_WEST_3, EU_WEST_2, - EU_WEST_1, AP_NORTHEAST_3, AP_NORTHEAST_2, AP_NORTHEAST_1, CA_CENTRAL_1, SA_EAST_1, CN_NORTH_1, US_GOV_WEST_1, - AP_SOUTHEAST_1, AP_SOUTHEAST_2, EU_CENTRAL_1, US_EAST_1, US_EAST_2, US_WEST_1, CN_NORTHWEST_1, US_WEST_2, AWS_GLOBAL, - AWS_CN_GLOBAL, AWS_US_GOV_GLOBAL)); + EU_WEST_1, AP_NORTHEAST_3, AP_NORTHEAST_2, AP_NORTHEAST_1, CA_CENTRAL_1, SA_EAST_1, CN_NORTH_1, US_GOV_WEST_1, + AP_SOUTHEAST_1, AP_SOUTHEAST_2, EU_CENTRAL_1, US_EAST_1, US_EAST_2, US_WEST_1, CN_NORTHWEST_1, US_WEST_2, AWS_GLOBAL, + AWS_CN_GLOBAL, AWS_US_GOV_GLOBAL, AWS_ISO_GLOBAL, AWS_ISO_B_GLOBAL)); private final boolean isGlobalRegion; @@ -99,7 +104,8 @@ public static Region of(String value) { private static Region of(String value, boolean isGlobalRegion) { Validate.paramNotBlank(value, "region"); - return RegionCache.put(value, isGlobalRegion); + String urlEncodedValue = SdkHttpUtils.urlEncode(value); + return RegionCache.put(urlEncodedValue, isGlobalRegion); } public static List regions() { diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/s3-service-metadata.java b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/s3-service-metadata.java index bae9ee6da3e3..027d9a3bc765 100644 --- a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/s3-service-metadata.java +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/s3-service-metadata.java @@ -9,6 +9,8 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.regions.ServiceMetadata; +import software.amazon.awssdk.regions.ServicePartitionMetadata; +import software.amazon.awssdk.regions.internal.DefaultServicePartitionMetadata; import software.amazon.awssdk.utils.ImmutableMap; @Generated("software.amazon.awssdk:codegen") @@ -19,22 +21,27 @@ public final class S3ServiceMetadata implements ServiceMetadata { private static final Map PARTITION_OVERRIDDEN_ENDPOINTS = ImmutableMap. builder().build(); private static final Map REGION_OVERRIDDEN_ENDPOINTS = ImmutableMap. builder() - .put("ap-northeast-1", "s3.ap-northeast-1.amazonaws.com").put("ap-southeast-1", "s3.ap-southeast-1.amazonaws.com") - .put("ap-southeast-2", "s3.ap-southeast-2.amazonaws.com").put("eu-west-1", "s3.eu-west-1.amazonaws.com") - .put("sa-east-1", "s3.sa-east-1.amazonaws.com").put("us-east-1", "s3.amazonaws.com") - .put("us-west-1", "s3.us-west-1.amazonaws.com").put("us-west-2", "s3.us-west-2.amazonaws.com") - .put("fips-us-gov-west-1", "s3-fips-us-gov-west-1.amazonaws.com") - .put("us-gov-west-1", "s3.us-gov-west-1.amazonaws.com").build(); - - private static final List REGIONS = Collections.unmodifiableList(Arrays.asList(Region.of("ap-northeast-1"), - Region.of("ap-northeast-2"), Region.of("ap-northeast-3"), Region.of("ap-south-1"), Region.of("ap-southeast-1"), - Region.of("ap-southeast-2"), Region.of("ca-central-1"), Region.of("eu-central-1"), Region.of("eu-west-1"), - Region.of("eu-west-2"), Region.of("eu-west-3"), Region.of("sa-east-1"), Region.of("us-east-1"), - Region.of("us-east-2"), Region.of("us-west-1"), Region.of("us-west-2"), Region.of("cn-north-1"), - Region.of("cn-northwest-1"), Region.of("fips-us-gov-west-1"), Region.of("us-gov-west-1"))); + .put("ap-northeast-1", "s3.ap-northeast-1.amazonaws.com").put("ap-southeast-1", "s3.ap-southeast-1.amazonaws.com") + .put("ap-southeast-2", "s3.ap-southeast-2.amazonaws.com").put("eu-west-1", "s3.eu-west-1.amazonaws.com") + .put("sa-east-1", "s3.sa-east-1.amazonaws.com").put("us-east-1", "s3.amazonaws.com") + .put("us-west-1", "s3.us-west-1.amazonaws.com").put("us-west-2", "s3.us-west-2.amazonaws.com") + .put("fips-us-gov-west-1", "s3-fips-us-gov-west-1.amazonaws.com") + .put("us-gov-west-1", "s3.us-gov-west-1.amazonaws.com").build(); + + private static final List REGIONS = Collections.unmodifiableList( + Arrays.asList(Region.of("ap-northeast-1"), + Region.of("ap-northeast-2"), Region.of("ap-northeast-3"), Region.of("ap-south-1"), Region.of("ap-southeast-1"), + Region.of("ap-southeast-2"), Region.of("ca-central-1"), Region.of("eu-central-1"), Region.of("eu-west-1"), + Region.of("eu-west-2"), Region.of("eu-west-3"), Region.of("sa-east-1"), Region.of("us-east-1"), + Region.of("us-east-2"), Region.of("us-west-1"), Region.of("us-west-2"), Region.of("cn-north-1"), + Region.of("cn-northwest-1"), Region.of("fips-us-gov-west-1"), Region.of("us-gov-west-1"))); private static final Map SIGNING_REGION_OVERRIDES = ImmutableMap. builder() - .put("fips-us-gov-west-1", "us-gov-west-1").build(); + .put("fips-us-gov-west-1", "us-gov-west-1").build(); + + private static final List PARTITIONS = Collections.unmodifiableList(Arrays.asList( + new DefaultServicePartitionMetadata("aws", null), new DefaultServicePartitionMetadata("aws-cn", null), + new DefaultServicePartitionMetadata("aws-us-gov", null))); @Override public List regions() { @@ -44,11 +51,16 @@ public List regions() { @Override public URI endpointFor(Region region) { return URI.create(REGION_OVERRIDDEN_ENDPOINTS.containsKey(region.id()) ? REGION_OVERRIDDEN_ENDPOINTS.get(region.id()) - : computeEndpoint(ENDPOINT_PREFIX, PARTITION_OVERRIDDEN_ENDPOINTS, region)); + : computeEndpoint(ENDPOINT_PREFIX, PARTITION_OVERRIDDEN_ENDPOINTS, region)); } @Override public Region signingRegion(Region region) { return Region.of(SIGNING_REGION_OVERRIDES.getOrDefault(region.id(), region.id())); } + + @Override + public List servicePartitions() { + return PARTITIONS; + } } diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/service-metadata-provider.java b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/service-metadata-provider.java index b781aa2503c5..ee84f112bdba 100644 --- a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/service-metadata-provider.java +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/service-metadata-provider.java @@ -1,5 +1,4 @@ package software.amazon.awssdk.regions; - import java.util.Map; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkPublicApi; @@ -56,6 +55,7 @@ import software.amazon.awssdk.regions.servicemetadata.ElasticmapreduceServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.ElastictranscoderServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.EmailServiceMetadata; +import software.amazon.awssdk.regions.servicemetadata.EnhancedS3ServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.EntitlementMarketplaceServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.EsServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.EventsServiceMetadata; @@ -107,7 +107,6 @@ import software.amazon.awssdk.regions.servicemetadata.Route53domainsServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.RuntimeLexServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.RuntimeSagemakerServiceMetadata; -import software.amazon.awssdk.regions.servicemetadata.S3ServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.SagemakerServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.SdbServiceMetadata; import software.amazon.awssdk.regions.servicemetadata.SecretsmanagerServiceMetadata; @@ -196,7 +195,7 @@ public final class GeneratedServiceMetadataProvider implements ServiceMetadataPr .put("redshift", new RedshiftServiceMetadata()).put("rekognition", new RekognitionServiceMetadata()) .put("resource-groups", new ResourceGroupsServiceMetadata()).put("route53", new Route53ServiceMetadata()) .put("route53domains", new Route53domainsServiceMetadata()).put("runtime.lex", new RuntimeLexServiceMetadata()) - .put("runtime.sagemaker", new RuntimeSagemakerServiceMetadata()).put("s3", new S3ServiceMetadata()) + .put("runtime.sagemaker", new RuntimeSagemakerServiceMetadata()).put("s3", new EnhancedS3ServiceMetadata()) .put("sagemaker", new SagemakerServiceMetadata()).put("sdb", new SdbServiceMetadata()) .put("secretsmanager", new SecretsmanagerServiceMetadata()) .put("serverlessrepo", new ServerlessrepoServiceMetadata()) diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/sts-service-metadata.java b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/sts-service-metadata.java index b622f4a7ee45..a88b40b8378a 100644 --- a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/sts-service-metadata.java +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/sts-service-metadata.java @@ -1,18 +1,3 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - package software.amazon.awssdk.regions.servicemetadata; import java.net.URI; @@ -24,6 +9,8 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.regions.ServiceMetadata; +import software.amazon.awssdk.regions.ServicePartitionMetadata; +import software.amazon.awssdk.regions.internal.DefaultServicePartitionMetadata; import software.amazon.awssdk.utils.ImmutableMap; @Generated("software.amazon.awssdk:codegen") @@ -51,6 +38,10 @@ public final class StsServiceMetadata implements ServiceMetadata { .put("ap-northeast-2", "ap-northeast-2").put("us-east-1-fips", "us-east-1").put("us-east-2-fips", "us-east-2") .put("us-west-1-fips", "us-west-1").put("us-west-2-fips", "us-west-2").build(); + private static final List PARTITIONS = Collections.unmodifiableList(Arrays.asList( + new DefaultServicePartitionMetadata("aws", null), new DefaultServicePartitionMetadata("aws-cn", null), + new DefaultServicePartitionMetadata("aws-us-gov", null))); + @Override public List regions() { return REGIONS; @@ -66,4 +57,9 @@ public URI endpointFor(Region region) { public Region signingRegion(Region region) { return Region.of(SIGNING_REGION_OVERRIDES.getOrDefault(region.id(), region.id())); } + + @Override + public List servicePartitions() { + return PARTITIONS; + } } diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/endpoints.json b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-endpoints.json similarity index 100% rename from codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/endpoints.json rename to codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-endpoints.json diff --git a/codegen-maven-plugin/build.properties b/codegen-maven-plugin/build.properties index ecf2dae6fcb1..59c37a425758 100644 --- a/codegen-maven-plugin/build.properties +++ b/codegen-maven-plugin/build.properties @@ -1,12 +1,12 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at -# +# # http://aws.amazon.com/apache2.0 -# +# # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index d073b970a67b..700cbaf4c160 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -1,6 +1,6 @@ + core software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/Generated.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/Generated.java index 82102c3d0924..3c649551a73e 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/Generated.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/Generated.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/Immutable.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/Immutable.java index 0d98cd4e1b6a..cf172c06a6d8 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/Immutable.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/Immutable.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/NotThreadSafe.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/NotThreadSafe.java index e8097e5683bb..0b43a2f2392a 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/NotThreadSafe.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/NotThreadSafe.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/ReviewBeforeRelease.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/ReviewBeforeRelease.java index 0cb16ed58688..8c14f26a8fea 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/ReviewBeforeRelease.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/ReviewBeforeRelease.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkInternalApi.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkInternalApi.java index da18811b4ba8..fe14c2146895 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkInternalApi.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkInternalApi.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ import java.lang.annotation.Target; /** - * Marker interface for 'internal' APIs that should not be used outside the core module. Breaking + * Marker interface for 'internal' APIs that should not be used outside the same module. Breaking * changes can and will be introduced to elements marked as {@link SdkInternalApi}. Users of the SDK * and the generated clients themselves should not depend on any packages, types, fields, * constructors, or methods with this annotation. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPreviewApi.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPreviewApi.java new file mode 100644 index 000000000000..7fb713f735f8 --- /dev/null +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPreviewApi.java @@ -0,0 +1,30 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +/** + * Marker interface for preview and experimental APIs. Breaking changes may be + * introduced to elements marked as {@link SdkPreviewApi}. Users of the SDK + * should assume that anything annotated as preview will change or break, and + * should not use them in production. + */ +@Target({ElementType.PACKAGE, ElementType.TYPE, ElementType.FIELD, ElementType.CONSTRUCTOR, ElementType.METHOD}) +@SdkProtectedApi +public @interface SdkPreviewApi { +} diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkProtectedApi.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkProtectedApi.java index 2f47fff57bdb..edf0580666fe 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkProtectedApi.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkProtectedApi.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPublicApi.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPublicApi.java index 560369ed6652..9a8104527f7a 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPublicApi.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPublicApi.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkTestInternalApi.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkTestInternalApi.java index de45d7345a08..7cc8d4dea5b7 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkTestInternalApi.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkTestInternalApi.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/ThreadSafe.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/ThreadSafe.java index 972f226428df..0df611a9c101 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/ThreadSafe.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/ThreadSafe.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/package-info.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/package-info.java index 18e533682f61..06933e2a9790 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/package-info.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/arns/pom.xml b/core/arns/pom.xml new file mode 100644 index 000000000000..ab8458465c76 --- /dev/null +++ b/core/arns/pom.xml @@ -0,0 +1,77 @@ + + + + + + core + software.amazon.awssdk + 2.15.62-SNAPSHOT + + 4.0.0 + + arns + + AWS Java SDK :: Arns + + The AWS SDK for Java - Arns module holds the classes that are related to AWS ARN + + https://aws.amazon.com/sdkforjava + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + + junit + junit + test + + + org.assertj + assertj-core + test + + + software.amazon.awssdk + test-utils + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.arns + + + + + + + \ No newline at end of file diff --git a/core/arns/src/main/java/software/amazon/awssdk/arns/Arn.java b/core/arns/src/main/java/software/amazon/awssdk/arns/Arn.java new file mode 100644 index 000000000000..638c575f0081 --- /dev/null +++ b/core/arns/src/main/java/software/amazon/awssdk/arns/Arn.java @@ -0,0 +1,373 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.arns; + +import java.util.Objects; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * The Arns generated and recognized by this code are the Arns described here: + * + * https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + * + *

+ * The primary supported Arn format is: + * + * + * arn::<partition>::<service>::<region>::<account>::<resource> + * + * + *

+ * {@link #resourceAsString()} returns everything after the account section of the Arn + * as a single string. + * + *

+ * However, the following Arn formats are supported where the values are present and well + * formatted through {@link #resource()}: + * + *

+ * arn:<partition>:<service>:<region>:<account>:<resourcetype>/resource
+ * arn:<partition>:<service>:<region>:<account>:<resourcetype>/resource/qualifier
+ * arn:<partition>:<service>:<region>:<account>:<resourcetype>/resource:qualifier
+ * arn:<partition>:<service>:<region>:<account>:<resourcetype>:resource
+ * arn:<partition>:<service>:<region>:<account>:<resourcetype>:resource:qualifier
+ * 
+ * + * {@link #resource()} returns a {@link ArnResource} which has access + * to {@link ArnResource#resourceType()}, {@link ArnResource#resource()} and + * {@link ArnResource#qualifier()}. + * + *

+ * To parse an Arn from a string use Arn.fromString(). To convert an Arn to it's + * string representation use Arn.toString(). + * + *

+ * For instance, for a string s, containing a well-formed Arn the + * following should always be true: + * + *

+ * Arn theArn = Arn.fromString(s);
+ * s.equals(theArn.toString());
+ * 
+ * + * @see ArnResource + */ +@SdkPublicApi +public final class Arn implements ToCopyableBuilder { + + private final String partition; + private final String service; + private final String region; + private final String accountId; + private final String resource; + private final ArnResource arnResource; + + private Arn(DefaultBuilder builder) { + this.partition = Validate.paramNotBlank(builder.partition, "partition"); + this.service = Validate.paramNotBlank(builder.service, "service"); + this.region = builder.region; + this.accountId = builder.accountId; + this.resource = Validate.paramNotBlank(builder.resource, "resource"); + this.arnResource = ArnResource.fromString(resource); + } + + /** + * @return The partition that the resource is in. + */ + public String partition() { + return partition; + } + + /** + * @return The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). + */ + public String service() { + return service; + } + + /** + * @return The Region that the resource resides in. + */ + public Optional region() { + return Optional.ofNullable(region); + } + + /** + * @return The ID of the AWS account that owns the resource, without the hyphens. + */ + public Optional accountId() { + return Optional.ofNullable(accountId); + } + + /** + * @return {@link ArnResource} + */ + public ArnResource resource() { + return arnResource; + } + + /** + * @return the resource as string + */ + public String resourceAsString() { + return resource; + } + + /** + * @return a builder for {@link Arn}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Parses a given string into an {@link Arn}. The resource is accessible entirely as a + * string through {@link #resourceAsString()}. Where correctly formatted, a parsed + * resource containing resource type, resource and qualifier is available through + * {@link #resource()}. + * + * @param arn - A string containing an Arn. + * @return {@link Arn} - A modeled Arn. + */ + public static Arn fromString(String arn) { + int arnColonIndex = arn.indexOf(':'); + if (arnColonIndex < 0 || !"arn".equals(arn.substring(0, arnColonIndex))) { + throw new IllegalArgumentException("Malformed ARN - doesn't start with 'arn:'"); + } + + int partitionColonIndex = arn.indexOf(':', arnColonIndex + 1); + if (partitionColonIndex < 0) { + throw new IllegalArgumentException("Malformed ARN - no AWS partition specified"); + } + String partition = arn.substring(arnColonIndex + 1, partitionColonIndex); + + int serviceColonIndex = arn.indexOf(':', partitionColonIndex + 1); + if (serviceColonIndex < 0) { + throw new IllegalArgumentException("Malformed ARN - no service specified"); + } + String service = arn.substring(partitionColonIndex + 1, serviceColonIndex); + + int regionColonIndex = arn.indexOf(':', serviceColonIndex + 1); + if (regionColonIndex < 0) { + throw new IllegalArgumentException("Malformed ARN - no AWS region partition specified"); + } + String region = arn.substring(serviceColonIndex + 1, regionColonIndex); + + int accountColonIndex = arn.indexOf(':', regionColonIndex + 1); + if (accountColonIndex < 0) { + throw new IllegalArgumentException("Malformed ARN - no AWS account specified"); + } + String accountId = arn.substring(regionColonIndex + 1, accountColonIndex); + + String resource = arn.substring(accountColonIndex + 1); + if (resource.isEmpty()) { + throw new IllegalArgumentException("Malformed ARN - no resource specified"); + } + + return Arn.builder() + .partition(partition) + .service(service) + .region(region) + .accountId(accountId) + .resource(resource) + .build(); + } + + @Override + public String toString() { + return "arn:" + + this.partition + + ":" + + this.service + + ":" + + region + + ":" + + this.accountId + + ":" + + this.resource; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Arn arn = (Arn) o; + + if (!Objects.equals(partition, arn.partition)) { + return false; + } + if (!Objects.equals(service, arn.service)) { + return false; + } + if (!Objects.equals(region, arn.region)) { + return false; + } + if (!Objects.equals(accountId, arn.accountId)) { + return false; + } + if (!Objects.equals(resource, arn.resource)) { + return false; + } + return Objects.equals(arnResource, arn.arnResource); + } + + @Override + public int hashCode() { + int result = partition.hashCode(); + result = 31 * result + service.hashCode(); + result = 31 * result + (region != null ? region.hashCode() : 0); + result = 31 * result + (accountId != null ? accountId.hashCode() : 0); + result = 31 * result + resource.hashCode(); + return result; + } + + @Override + public Builder toBuilder() { + return builder().accountId(accountId) + .partition(partition) + .region(region) + .resource(resource) + .service(service) + ; + } + + /** + * A builder for a {@link Arn}. See {@link #builder()}. + */ + public interface Builder extends CopyableBuilder { + + /** + * Define the partition that the resource is in. + * + * @param partition the partition that the resource is in + * @return Returns a reference to this builder + */ + Builder partition(String partition); + + /** + * Define the service name that identifies the AWS product + * + * @param service The service name that identifies the AWS product + * @return Returns a reference to this builder + */ + Builder service(String service); + + /** + * Define the Region that the resource resides in. + * + * @param region The Region that the resource resides in. + * @return Returns a reference to this builder + */ + Builder region(String region); + + /** + * Define the ID of the AWS account that owns the resource, without the hyphens. + * + * @param accountId The ID of the AWS account that owns the resource, without the hyphens. + * @return Returns a reference to this builder + */ + Builder accountId(String accountId); + + /** + * Define the resource identifier. A resource identifier can be the name or ID of the resource + * or a resource path. + * + * @param resource resource identifier + * @return Returns a reference to this builder + */ + Builder resource(String resource); + + /** + * @return an instance of {@link Arn} that is created from the builder + */ + Arn build(); + } + + private static final class DefaultBuilder implements Builder { + private String partition; + private String service; + private String region; + private String accountId; + private String resource; + + private DefaultBuilder() { + } + + public void setPartition(String partition) { + this.partition = partition; + } + + @Override + public Builder partition(String partition) { + setPartition(partition); + return this; + } + + public void setService(String service) { + this.service = service; + } + + @Override + public Builder service(String service) { + setService(service); + return this; + } + + public void setRegion(String region) { + this.region = region; + } + + @Override + public Builder region(String region) { + setRegion(region); + return this; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } + + @Override + public Builder accountId(String accountId) { + setAccountId(accountId); + return this; + } + + public void setResource(String resource) { + this.resource = resource; + } + + @Override + public Builder resource(String resource) { + setResource(resource); + return this; + } + + @Override + public Arn build() { + return new Arn(this); + } + } +} diff --git a/core/arns/src/main/java/software/amazon/awssdk/arns/ArnResource.java b/core/arns/src/main/java/software/amazon/awssdk/arns/ArnResource.java new file mode 100644 index 000000000000..b3af63262032 --- /dev/null +++ b/core/arns/src/main/java/software/amazon/awssdk/arns/ArnResource.java @@ -0,0 +1,236 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.arns; + +import java.util.Objects; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.StringUtils; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * An additional model within {@link Arn} that provides the Resource Type, Resource, and + * Resource Qualifier of an AWS Arn when those values are present and correctly formatted + * within an Arn. + * + *

+ * If {@link #resourceType} is not present, {@link #resource} will return the entire resource + * as a string the same as {@link Arn#resource()}. + * + * @see Arn + */ +@SdkPublicApi +public final class ArnResource implements ToCopyableBuilder { + + private final String resourceType; + private final String resource; + private final String qualifier; + + private ArnResource(DefaultBuilder builder) { + this.resourceType = builder.resourceType; + this.resource = Validate.paramNotBlank(builder.resource, "resource"); + this.qualifier = builder.qualifier; + } + + /** + * @return the optional resource type + */ + public Optional resourceType() { + return Optional.ofNullable(resourceType); + } + + /** + * @return the entire resource as a string + */ + public String resource() { + return resource; + } + + /** + * @return the optional resource qualifier + */ + public Optional qualifier() { + return Optional.ofNullable(qualifier); + } + + /** + * @return a builder for {@link ArnResource}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Parses a string containing either a resource, resource type and resource or + * resource type, resource and qualifier into an {@link ArnResource}. + * + *

+ * Supports fields separated by either ":" or "/". + * + *

+ * For legacy AWS Arns not following the resourceType:resource:qualifier pattern, + * the qualifier field will contain everything after the first two sections separated + * by either ":" or "/". + * + * @param resource - The resource string to parse. + * @return {@link ArnResource} + */ + public static ArnResource fromString(String resource) { + Character splitter = StringUtils.findFirstOccurrence(resource, ':', '/'); + + if (splitter == null) { + return ArnResource.builder().resource(resource).build(); + } + + int resourceTypeColonIndex = resource.indexOf(splitter); + + ArnResource.Builder builder = ArnResource.builder().resourceType(resource.substring(0, resourceTypeColonIndex)); + int resourceColonIndex = resource.indexOf(splitter, resourceTypeColonIndex); + int qualifierColonIndex = resource.indexOf(splitter, resourceColonIndex + 1); + if (qualifierColonIndex < 0) { + builder.resource(resource.substring(resourceTypeColonIndex + 1)); + } else { + builder.resource(resource.substring(resourceTypeColonIndex + 1, qualifierColonIndex)); + builder.qualifier(resource.substring(qualifierColonIndex + 1)); + } + + return builder.build(); + } + + @Override + public String toString() { + return this.resourceType + + ":" + + this.resource + + ":" + + this.qualifier; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ArnResource that = (ArnResource) o; + + if (!Objects.equals(resourceType, that.resourceType)) { + return false; + } + if (!Objects.equals(resource, that.resource)) { + return false; + } + return Objects.equals(qualifier, that.qualifier); + } + + @Override + public int hashCode() { + int result = resourceType != null ? resourceType.hashCode() : 0; + result = 31 * result + (resource != null ? resource.hashCode() : 0); + result = 31 * result + (qualifier != null ? qualifier.hashCode() : 0); + return result; + } + + @Override + public Builder toBuilder() { + return builder() + .resource(resource) + .resourceType(resourceType) + .qualifier(qualifier); + } + + + public interface Builder extends CopyableBuilder { + + /** + * Define the type of the resource. + * + * @param resourceType the partition that the resource is in + * @return Returns a reference to this builder + */ + Builder resourceType(String resourceType); + + /** + * Define the entire resource. + * + * @param resource the entire resource + * @return Returns a reference to this builder + */ + Builder resource(String resource); + + /** + * Define the qualifier of the resource. + * + * @param qualifier the qualifier of the resource + * @return Returns a reference to this builder + */ + Builder qualifier(String qualifier); + + /** + * @return an instance of {@link ArnResource} that is created from the builder + */ + ArnResource build(); + } + + public static final class DefaultBuilder implements Builder { + private String resourceType; + private String resource; + private String qualifier; + + private DefaultBuilder() { + } + + public void setResourceType(String resourceType) { + this.resourceType = resourceType; + } + + @Override + public Builder resourceType(String resourceType) { + setResourceType(resourceType); + return this; + } + + public void setResource(String resource) { + this.resource = resource; + } + + @Override + public Builder resource(String resource) { + setResource(resource); + return this; + } + + public void setQualifier(String qualifier) { + this.qualifier = qualifier; + } + + @Override + public Builder qualifier(String qualifier) { + setQualifier(qualifier); + return this; + } + + @Override + public ArnResource build() { + return new ArnResource(this); + } + } +} \ No newline at end of file diff --git a/core/arns/src/test/java/software/amazon/awssdk/arns/ArnResourceTest.java b/core/arns/src/test/java/software/amazon/awssdk/arns/ArnResourceTest.java new file mode 100644 index 000000000000..e5fa6c0b6d8d --- /dev/null +++ b/core/arns/src/test/java/software/amazon/awssdk/arns/ArnResourceTest.java @@ -0,0 +1,67 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.arns; + + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.util.Optional; +import org.junit.Test; + +public class ArnResourceTest { + + @Test + public void toBuilder() { + ArnResource oneResource = ArnResource.fromString("bucket:foobar:1"); + ArnResource anotherResource = oneResource.toBuilder().build(); + assertThat(oneResource).isEqualTo(anotherResource); + assertThat(oneResource.hashCode()).isEqualTo(anotherResource.hashCode()); + } + + @Test + public void hashCodeEquals() { + ArnResource oneResource = ArnResource.fromString("bucket:foobar:1"); + ArnResource anotherResource = oneResource.toBuilder().qualifier("test").build(); + assertThat(oneResource).isNotEqualTo(anotherResource); + assertThat(oneResource.hashCode()).isNotEqualTo(anotherResource.hashCode()); + } + + @Test + public void arnResource_nullResource_shouldThrowException() { + assertThatThrownBy(() -> ArnResource.builder() + .build()).hasMessageContaining("resource must not be null."); + } + + @Test + public void arnResourceFromBuilder_shouldParseCorrectly() { + ArnResource arnResource = ArnResource.builder() + .resource("bucket:foobar:1") + .resourceType("foobar") + .qualifier("1").build(); + assertThat(arnResource.qualifier()).isEqualTo(Optional.of("1")); + assertThat(arnResource.resourceType()).isEqualTo(Optional.of("foobar")); + assertThat(arnResource.resource()).isEqualTo("bucket:foobar:1"); + } + + @Test + public void hashCodeEquals_minimalProperties() { + ArnResource arnResource = ArnResource.builder().resource("resource").build(); + ArnResource anotherResource = arnResource.toBuilder().build(); + assertThat(arnResource.equals(anotherResource)).isTrue(); + assertThat(arnResource.hashCode()).isEqualTo(anotherResource.hashCode()); + } +} diff --git a/core/arns/src/test/java/software/amazon/awssdk/arns/ArnTest.java b/core/arns/src/test/java/software/amazon/awssdk/arns/ArnTest.java new file mode 100644 index 000000000000..8e5b7061d22a --- /dev/null +++ b/core/arns/src/test/java/software/amazon/awssdk/arns/ArnTest.java @@ -0,0 +1,264 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.arns; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.util.Optional; +import org.junit.Test; + +public class ArnTest { + + @Test + public void arnWithBasicResource_ParsesCorrectly() { + String arnString = "arn:aws:s3:us-east-1:12345678910:myresource"; + Arn arn = Arn.fromString(arnString); + assertThat(arn.partition()).isEqualTo("aws"); + assertThat(arn.service()).isEqualTo("s3"); + assertThat(arn.region()).isEqualTo(Optional.of("us-east-1")); + assertThat(arn.accountId()).isEqualTo(Optional.of("12345678910")); + assertThat(arn.resourceAsString()).isEqualTo("myresource"); + System.out.println(arn.resource()); + } + + @Test + public void arnWithMinimalRequirementFromString() { + Arn arn = Arn.fromString("arn:aws:foobar:::myresource"); + assertThat(arn.partition()).isEqualTo("aws"); + assertThat(arn.service()).isEqualTo("foobar"); + assertThat(arn.resourceAsString()).isEqualTo("myresource"); + } + + @Test + public void arn_ParsesBackToString() { + String arnString = "arn:aws:s3:us-east-1:12345678910:myresource"; + Arn arn = Arn.fromString(arnString); + assertThat(arn.toString()).isEqualTo(arnString); + } + + @Test + public void arnWithQualifiedResource_ParsesBackToString() { + String arnString = "arn:aws:s3:us-east-1:12345678910:myresource:foobar:1"; + Arn arn = Arn.fromString(arnString); + assertThat(arn.toString()).isEqualTo(arnString); + assertThat(arn.resourceAsString()).isEqualTo("myresource:foobar:1"); + } + + @Test + public void arnWithResourceTypeAndResource_ParsesCorrectly() { + String arnString = "arn:aws:s3:us-east-1:12345678910:bucket:foobar"; + Arn arn = Arn.fromString(arnString); + assertThat(arn.partition()).isEqualTo("aws"); + assertThat(arn.service()).isEqualTo("s3"); + assertThat(arn.region()).isEqualTo(Optional.of("us-east-1")); + assertThat(arn.resourceAsString()).isEqualTo("bucket:foobar"); + + verifyArnResource(arn.resource()); + } + + private void verifyArnResource(ArnResource arnResource) { + assertThat(arnResource.resource()).isEqualTo("foobar"); + assertThat(arnResource.resourceType()).isPresent(); + assertThat(arnResource.resourceType().get()).isEqualTo("bucket"); + } + + @Test + public void arnWithResourceTypeAndResourceAndQualifier_ParsesCorrectly() { + String arnString = "arn:aws:s3:us-east-1:12345678910:bucket:foobar:1"; + Arn arn = Arn.fromString(arnString); + assertThat(arn.partition()).isEqualTo("aws"); + assertThat(arn.service()).isEqualTo("s3"); + assertThat(arn.region()).isEqualTo(Optional.of("us-east-1")); + assertThat(arn.resourceAsString()).isEqualTo("bucket:foobar:1"); + + + ArnResource arnResource = arn.resource(); + verifyArnResource(arnResource); + assertThat(arnResource.qualifier()).isPresent(); + assertThat(arnResource.qualifier().get()).isEqualTo("1"); + } + + @Test + public void arnWithResourceTypeAndResource_SlashSplitter_ParsesCorrectly() { + String arnString = "arn:aws:s3:us-east-1:12345678910:bucket/foobar"; + Arn arn = Arn.fromString(arnString); + assertThat(arn.partition()).isEqualTo("aws"); + assertThat(arn.service()).isEqualTo("s3"); + assertThat(arn.region()).isEqualTo(Optional.of("us-east-1")); + assertThat(arn.resourceAsString()).isEqualTo("bucket/foobar"); + verifyArnResource(arn.resource()); + } + + @Test + public void arnWithResourceTypeAndResourceAndQualifier_SlashSplitter_ParsesCorrectly() { + String arnString = "arn:aws:s3:us-east-1:12345678910:bucket/foobar/1"; + Arn arn = Arn.fromString(arnString); + assertThat(arn.partition()).isEqualTo("aws"); + assertThat(arn.service()).isEqualTo("s3"); + assertThat(arn.region()).isEqualTo(Optional.of("us-east-1")); + assertThat(arn.resourceAsString()).isEqualTo("bucket/foobar/1"); + verifyArnResource(arn.resource()); + assertThat(arn.resource().qualifier().get()).isEqualTo("1"); + } + + @Test + public void oneArnEqualsEquivalentArn() { + String arnString = "arn:aws:s3:us-east-1:12345678910:myresource:foobar"; + Arn arn1 = Arn.fromString(arnString); + Arn arn2 = Arn.fromString(arnString); + assertThat(arn1).isEqualTo(arn2); + assertThat(arn1.resource()).isEqualTo(arn2.resource()); + } + + @Test + public void arnFromBuilder_ParsesCorrectly() { + Arn arn = Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("bucket:foobar:1") + .build(); + + assertThat(arn.partition()).isEqualTo("aws"); + assertThat(arn.service()).isEqualTo("s3"); + assertThat(arn.region()).isEqualTo(Optional.of("us-east-1")); + assertThat(arn.accountId()).isEqualTo(Optional.of("123456789012")); + assertThat(arn.resourceAsString()).isEqualTo("bucket:foobar:1"); + verifyArnResource(arn.resource()); + assertThat(arn.resource().qualifier()).isPresent(); + assertThat(arn.resource().qualifier().get()).isEqualTo("1"); + } + + @Test + public void arnResourceWithColonAndSlash_ParsesOnFirstSplitter() { + String resourceWithColonAndSlash = "object:foobar/myobjectname:1"; + Arn arn = Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource(resourceWithColonAndSlash) + .build(); + assertThat(arn.partition()).isEqualTo("aws"); + assertThat(arn.service()).isEqualTo("s3"); + assertThat(arn.region()).isEqualTo(Optional.of("us-east-1")); + assertThat(arn.accountId()).isEqualTo(Optional.of("123456789012")); + assertThat(arn.resourceAsString()).isEqualTo(resourceWithColonAndSlash); + + assertThat(arn.resource().resource()).isEqualTo("foobar/myobjectname"); + assertThat(arn.resource().qualifier()).isEqualTo(Optional.of("1")); + assertThat(arn.resource().resourceType()).isEqualTo(Optional.of("object")); + } + + @Test + public void toBuilder() { + Arn oneArn = Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("bucket:foobar:1") + .build(); + + Arn anotherArn = oneArn.toBuilder().build(); + + assertThat(oneArn).isEqualTo(anotherArn); + assertThat(oneArn.hashCode()).isEqualTo(anotherArn.hashCode()); + } + + @Test + public void hashCodeEquals() { + Arn oneArn = Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("bucket:foobar:1") + .build(); + + Arn anotherArn = oneArn.toBuilder().region("somethingelse").build(); + assertThat(oneArn).isNotEqualTo(anotherArn); + assertThat(oneArn.hashCode()).isNotEqualTo(anotherArn.hashCode()); + } + + @Test + public void hashCodeEquals_minimalProperties() { + Arn arn = Arn.builder() + .partition("aws") + .service("foobar") + .resource("resource") + .build(); + Arn anotherArn = arn.toBuilder().build(); + assertThat(arn.hashCode()).isEqualTo(anotherArn.hashCode()); + assertThat(arn.equals(anotherArn)).isTrue(); + } + + @Test + public void arnWithoutPartition_ThrowsIllegalArgumentException() { + String arnString = "arn::s3:us-east-1:12345678910:myresource"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("artition must not be blank or empty."); + } + + @Test + public void arnWithoutService_ThrowsIllegalArgumentException() { + String arnString = "arn:aws::us-east-1:12345678910:myresource"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("service must not be blank or empty"); + } + + @Test + public void arnWithoutResource_ThrowsIllegalArgumentException() { + String arnString = "arn:aws:s3:us-east-1:12345678910:"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("Malformed ARN"); + } + + @Test + public void invalidArn_ThrowsIllegalArgumentException() { + String arnString = "arn:aws:"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("Malformed ARN"); + } + + @Test + public void arnDoesntStartWithArn_ThrowsIllegalArgumentException() { + String arnString = "fakearn:aws:"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("Malformed ARN"); + } + + @Test + public void invalidArnWithoutPartition_ThrowsIllegalArgumentException() { + String arnString = "arn:"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("Malformed ARN"); + } + + @Test + public void invalidArnWithoutService_ThrowsIllegalArgumentException() { + String arnString = "arn:aws:"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("Malformed ARN"); + } + + @Test + public void invalidArnWithoutRegion_ThrowsIllegalArgumentException() { + String arnString = "arn:aws:s3:"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("Malformed ARN"); + } + + @Test + public void invalidArnWithoutAccountId_ThrowsIllegalArgumentException() { + String arnString = "arn:aws:s3:us-east-1:"; + assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("Malformed ARN"); + } +} diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 28b810b3d4fa..1644404c3f07 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -1,4 +1,19 @@ + + @@ -7,7 +22,7 @@ software.amazon.awssdk core - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT auth diff --git a/core/auth/src/it/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderIntegrationTest.java b/core/auth/src/it/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderIntegrationTest.java index 189c3e514043..3647d0de88a3 100644 --- a/core/auth/src/it/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderIntegrationTest.java +++ b/core/auth/src/it/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AnonymousCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AnonymousCredentialsProvider.java index 29c1662dd998..b4d7d22f4580 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AnonymousCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AnonymousCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsBasicCredentials.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsBasicCredentials.java index 14f8de09c3ad..33b0fb03c3e3 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsBasicCredentials.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsBasicCredentials.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -49,7 +49,7 @@ public final class AwsBasicCredentials implements AwsCredentials { private final String secretAccessKey; /** - * Constructs a new credentials object, with the specified AWS access key, AWS secret key and AWS session token. + * Constructs a new credentials object, with the specified AWS access key and AWS secret key. * * @param accessKeyId The AWS access key, used to identify the user interacting with AWS. * @param secretAccessKey The AWS secret access key, used to authenticate the user interacting with AWS. @@ -69,7 +69,7 @@ private AwsBasicCredentials(String accessKeyId, String secretAccessKey, boolean } /** - * Constructs a new credentials object, with the specified AWS access key, AWS secret key and AWS session token. + * Constructs a new credentials object, with the specified AWS access key and AWS secret key. * * @param accessKeyId The AWS access key, used to identify the user interacting with AWS. * @param secretAccessKey The AWS secret access key, used to authenticate the user interacting with AWS. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentials.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentials.java index ecd24408750c..b907d77a892a 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentials.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentials.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProvider.java index 095f42741cff..0e8ddb4aad3b 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProviderChain.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProviderChain.java index 12b58a726ca4..fcc62e51af42 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProviderChain.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProviderChain.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -160,7 +160,8 @@ private static final class BuilderImpl implements Builder { private Boolean reuseLastProviderEnabled = true; private List credentialsProviders = new ArrayList<>(); - private BuilderImpl() {} + private BuilderImpl() { + } @Override public Builder reuseLastProviderEnabled(Boolean reuseLastProviderEnabled) { diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsSessionCredentials.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsSessionCredentials.java index 94fca729c651..8acd9efd02b9 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsSessionCredentials.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsSessionCredentials.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ChildProfileCredentialsProviderFactory.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ChildProfileCredentialsProviderFactory.java index 2229ef8dc56e..620e32decfe2 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ChildProfileCredentialsProviderFactory.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ChildProfileCredentialsProviderFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsProvider.java index 1872e10cc133..1d2bdc249ab8 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableSet; -import static java.util.stream.Collectors.joining; import java.io.IOException; import java.net.URI; @@ -138,7 +137,7 @@ private URI createGenericContainerUrl() { uri, SdkSystemSetting.AWS_CONTAINER_CREDENTIALS_FULL_URI .environmentVariable(), - ALLOWED_HOSTS.stream().collect(joining(",")))) + String.join(",", ALLOWED_HOSTS))) .build(); } return uri; diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/CredentialUtils.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/CredentialUtils.java index 09bcfcddd5fe..6e4879b59c2a 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/CredentialUtils.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/CredentialUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java index a8cbf236fbbb..764112223ae4 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.auth.credentials.internal.LazyAwsCredentialsProvider; +import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.utils.SdkAutoCloseable; import software.amazon.awssdk.utils.ToString; @@ -34,6 +35,7 @@ * @see SystemPropertyCredentialsProvider * @see EnvironmentVariableCredentialsProvider * @see ProfileCredentialsProvider + * @see WebIdentityTokenFileCredentialsProvider * @see ContainerCredentialsProvider * @see InstanceProfileCredentialsProvider */ @@ -70,7 +72,11 @@ private static LazyAwsCredentialsProvider createChain(Builder builder) { AwsCredentialsProvider[] credentialsProviders = new AwsCredentialsProvider[] { SystemPropertyCredentialsProvider.create(), EnvironmentVariableCredentialsProvider.create(), - ProfileCredentialsProvider.create(), + WebIdentityTokenFileCredentialsProvider.create(), + ProfileCredentialsProvider.builder() + .profileFile(builder.profileFile) + .profileName(builder.profileName) + .build(), ContainerCredentialsProvider.builder() .asyncCredentialUpdateEnabled(asyncCredentialUpdateEnabled) .build(), @@ -114,13 +120,26 @@ public String toString() { * Configuration that defines the {@link DefaultCredentialsProvider}'s behavior. */ public static final class Builder { + private ProfileFile profileFile; + private String profileName; private Boolean reuseLastProviderEnabled = true; private Boolean asyncCredentialUpdateEnabled = false; /** * Created with {@link #builder()}. */ - private Builder() {} + private Builder() { + } + + public Builder profileFile(ProfileFile profileFile) { + this.profileFile = profileFile; + return this; + } + + public Builder profileName(String profileName) { + this.profileName = profileName; + return this; + } /** * Controls whether the provider should reuse the last successful credentials provider in the chain. Reusing the last diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/EnvironmentVariableCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/EnvironmentVariableCredentialsProvider.java index 3efbe3367ea1..18d566687718 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/EnvironmentVariableCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/EnvironmentVariableCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProvider.java index 6bad1959657f..f2e1aa03d600 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProvider.java index 7ddb2c6adf7c..3a704dbcb969 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,9 +17,13 @@ import java.io.IOException; import java.net.URI; +import java.util.HashMap; +import java.util.Map; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.internal.util.UserAgentUtils; +import software.amazon.awssdk.regions.internal.util.EC2MetadataUtils; import software.amazon.awssdk.regions.util.HttpResourcesUtils; import software.amazon.awssdk.regions.util.ResourcesEndpointProvider; import software.amazon.awssdk.utils.ToString; @@ -33,9 +37,9 @@ */ @SdkPublicApi public final class InstanceProfileCredentialsProvider extends HttpCredentialsProvider { + private static final String EC2_METADATA_TOKEN_HEADER = "x-aws-ec2-metadata-token"; private static final String SECURITY_CREDENTIALS_RESOURCE = "/latest/meta-data/iam/security-credentials/"; - private final ResourcesEndpointProvider credentialsEndpointProvider = new InstanceProviderCredentialsEndpointProvider(); /** * @see #builder() @@ -62,7 +66,7 @@ public static InstanceProfileCredentialsProvider create() { @Override protected ResourcesEndpointProvider getCredentialsEndpointProvider() { - return credentialsEndpointProvider; + return new InstanceProviderCredentialsEndpointProvider(getToken()); } @Override @@ -75,13 +79,45 @@ public String toString() { return ToString.create("InstanceProfileCredentialsProvider"); } + private String getToken() { + return EC2MetadataUtils.getToken(); + } + + private static ResourcesEndpointProvider includeTokenHeader(ResourcesEndpointProvider provider, String token) { + return new ResourcesEndpointProvider() { + @Override + public URI endpoint() throws IOException { + return provider.endpoint(); + } + + @Override + public Map headers() { + Map headers = new HashMap<>(provider.headers()); + headers.put(EC2_METADATA_TOKEN_HEADER, token); + return headers; + } + }; + } + private static final class InstanceProviderCredentialsEndpointProvider implements ResourcesEndpointProvider { + private final String metadataToken; + + private InstanceProviderCredentialsEndpointProvider(String metadataToken) { + this.metadataToken = metadataToken; + } + @Override public URI endpoint() throws IOException { String host = SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.getStringValueOrThrow(); URI endpoint = URI.create(host + SECURITY_CREDENTIALS_RESOURCE); - String securityCredentialsList = HttpResourcesUtils.instance().readResource(endpoint); + ResourcesEndpointProvider endpointProvider = () -> endpoint; + + if (metadataToken != null) { + endpointProvider = includeTokenHeader(endpointProvider, metadataToken); + } + + String securityCredentialsList = HttpResourcesUtils.instance().readResource(endpointProvider); String[] securityCredentials = securityCredentialsList.trim().split("\n"); if (securityCredentials.length == 0) { @@ -90,8 +126,23 @@ public URI endpoint() throws IOException { return URI.create(host + SECURITY_CREDENTIALS_RESOURCE + securityCredentials[0]); } + + @Override + public Map headers() { + Map requestHeaders = new HashMap<>(); + requestHeaders.put("User-Agent", UserAgentUtils.getUserAgent()); + requestHeaders.put("Accept", "*/*"); + requestHeaders.put("Connection", "keep-alive"); + + if (metadataToken != null) { + requestHeaders.put(EC2_METADATA_TOKEN_HEADER, metadataToken); + } + + return requestHeaders; + } } + /** * A builder for creating a custom a {@link InstanceProfileCredentialsProvider}. */ diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProvider.java index 2f1cee14bd55..fac87fbb0e69 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -53,7 +53,7 @@ * start to be refreshed. This allows the credentials to be refreshed *before* they are reported to expire. Default: 15 * seconds. *

  • ProcessOutputLimit - The maximum amount of data that can be returned by the external process before an exception is - * raised. Default: 1024 bytes.
  • + * raised. Default: 64000 bytes (64KB). * */ @SdkPublicApi @@ -222,12 +222,13 @@ public static class Builder { private Boolean asyncCredentialUpdateEnabled = false; private String command; private Duration credentialRefreshThreshold = Duration.ofSeconds(15); - private long processOutputLimit = 1024; + private long processOutputLimit = 64000; /** * @see #builder() */ - private Builder() {} + private Builder() { + } /** * Configure whether the provider should fetch credentials asynchronously in the background. If this is true, threads are @@ -264,7 +265,7 @@ public Builder credentialRefreshThreshold(Duration credentialRefreshThreshold) { * Configure the maximum amount of data that can be returned by the external process before an exception is * raised. * - *

    Default: 1024 bytes.

    + *

    Default: 64000 bytes (64KB).

    */ public Builder processOutputLimit(long outputByteLimit) { this.processOutputLimit = outputByteLimit; diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProvider.java index fe13174f2c31..1a4dd42e870d 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -21,7 +21,6 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; import software.amazon.awssdk.auth.credentials.internal.ProfileCredentialsUtils; -import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileFileSystemSetting; @@ -163,7 +162,7 @@ public interface Builder { /** * Define the name of the profile that should be used by this credentials provider. By default, the value in - * {@link SdkSystemSetting#AWS_PROFILE} is used. + * {@link ProfileFileSystemSetting#AWS_PROFILE} is used. */ Builder profileName(String profileName); diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderFactory.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderFactory.java new file mode 100644 index 000000000000..8d146280f106 --- /dev/null +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderFactory.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.credentials; + +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.profiles.Profile; + +/** + * A factory for {@link AwsCredentialsProvider}s, which can be used to create different credentials providers with + * different Profile properties. + */ +@FunctionalInterface +@SdkProtectedApi +public interface ProfileCredentialsProviderFactory { + AwsCredentialsProvider create(Profile profile); +} diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/StaticCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/StaticCredentialsProvider.java index e52efdab2e83..aa67c4dd68b4 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/StaticCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/StaticCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/SystemPropertyCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/SystemPropertyCredentialsProvider.java index b4320ffb6580..135b6ce88565 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/SystemPropertyCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/SystemPropertyCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/WebIdentityTokenCredentialsProviderFactory.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/WebIdentityTokenCredentialsProviderFactory.java index 9faeb71c373e..0772b895f0a9 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/WebIdentityTokenCredentialsProviderFactory.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/WebIdentityTokenCredentialsProviderFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/WebIdentityTokenFileCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/WebIdentityTokenFileCredentialsProvider.java index ab439ada4998..168ecb3a1965 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/WebIdentityTokenFileCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/WebIdentityTokenFileCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -156,7 +156,7 @@ public Builder webIdentityTokenFile(Path webIdentityTokenFile) { return this; } - public void setwebIdentityTokenFile(Path webIdentityTokenFile) { + public void setWebIdentityTokenFile(Path webIdentityTokenFile) { webIdentityTokenFile(webIdentityTokenFile); } diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ContainerCredentialsRetryPolicy.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ContainerCredentialsRetryPolicy.java index 352dd2aff1ce..448d19aee53c 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ContainerCredentialsRetryPolicy.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ContainerCredentialsRetryPolicy.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/CredentialSourceType.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/CredentialSourceType.java index 61c82a75bcc9..db3de2845d49 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/CredentialSourceType.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/CredentialSourceType.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProvider.java index 54260eca2e8a..6999e25af250 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.utils.IoUtils; +import software.amazon.awssdk.utils.Lazy; import software.amazon.awssdk.utils.SdkAutoCloseable; import software.amazon.awssdk.utils.ToString; @@ -29,11 +30,10 @@ */ @SdkInternalApi public class LazyAwsCredentialsProvider implements AwsCredentialsProvider, SdkAutoCloseable { - private final Supplier delegateConstructor; - private volatile AwsCredentialsProvider delegate; + private final Lazy delegate; private LazyAwsCredentialsProvider(Supplier delegateConstructor) { - this.delegateConstructor = delegateConstructor; + this.delegate = new Lazy<>(delegateConstructor); } public static LazyAwsCredentialsProvider create(Supplier delegateConstructor) { @@ -42,14 +42,7 @@ public static LazyAwsCredentialsProvider create(Supplier @Override public AwsCredentials resolveCredentials() { - if (delegate == null) { - synchronized (this) { - if (delegate == null) { - delegate = delegateConstructor.get(); - } - } - } - return delegate.resolveCredentials(); + return delegate.getValue().resolveCredentials(); } @Override @@ -60,7 +53,6 @@ public void close() { @Override public String toString() { return ToString.builder("LazyAwsCredentialsProvider") - .add("delegateConstructor", delegateConstructor) .add("delegate", delegate) .build(); } diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java index d2646ec6a57a..202b0fbad000 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -35,8 +35,10 @@ import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider; import software.amazon.awssdk.auth.credentials.ProcessCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProviderFactory; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider; +import software.amazon.awssdk.core.internal.util.ClassLoaderHelper; import software.amazon.awssdk.profiles.Profile; import software.amazon.awssdk.profiles.ProfileProperty; import software.amazon.awssdk.utils.SdkAutoCloseable; @@ -49,6 +51,8 @@ public final class ProfileCredentialsUtils { private static final String STS_PROFILE_CREDENTIALS_PROVIDER_FACTORY = "software.amazon.awssdk.services.sts.internal.StsProfileCredentialsProviderFactory"; + private static final String SSO_PROFILE_CREDENTIALS_PROVIDER_FACTORY = + "software.amazon.awssdk.services.sso.auth.SsoProfileCredentialsProviderFactory"; private final Profile profile; @@ -94,19 +98,22 @@ public Optional credentialsProvider() { * @param children The child profiles that source credentials from this profile. */ private Optional credentialsProvider(Set children) { + if (properties.containsKey(ProfileProperty.ROLE_ARN) && properties.containsKey(ProfileProperty.WEB_IDENTITY_TOKEN_FILE)) { + return Optional.ofNullable(roleAndWebIdentityTokenProfileCredentialsProvider()); + } + + if (properties.containsKey(ProfileProperty.SSO_ROLE_NAME) || properties.containsKey(ProfileProperty.SSO_ACCOUNT_ID) + || properties.containsKey(ProfileProperty.SSO_REGION) || properties.containsKey(ProfileProperty.SSO_START_URL)) { + return Optional.ofNullable(ssoProfileCredentialsProvider()); + } + if (properties.containsKey(ProfileProperty.ROLE_ARN)) { boolean hasSourceProfile = properties.containsKey(ProfileProperty.SOURCE_PROFILE); boolean hasCredentialSource = properties.containsKey(ProfileProperty.CREDENTIAL_SOURCE); - boolean hasWebIdentityTokenFile = properties.containsKey(ProfileProperty.WEB_IDENTITY_TOKEN_FILE); - boolean hasRoleArn = properties.containsKey(ProfileProperty.ROLE_ARN); Validate.validState(!(hasSourceProfile && hasCredentialSource), "Invalid profile file: profile has both %s and %s.", ProfileProperty.SOURCE_PROFILE, ProfileProperty.CREDENTIAL_SOURCE); - if (hasWebIdentityTokenFile && hasRoleArn) { - return Optional.ofNullable(roleAndWebIdentityTokenProfileCredentialsProvider()); - } - if (hasSourceProfile) { return Optional.ofNullable(roleAndSourceProfileBasedProfileCredentialsProvider(children)); } @@ -163,6 +170,17 @@ private AwsCredentialsProvider credentialProcessCredentialsProvider() { .build(); } + /** + * Create the SSO credentials provider based on the related profile properties. + */ + private AwsCredentialsProvider ssoProfileCredentialsProvider() { + requireProperties(ProfileProperty.SSO_ACCOUNT_ID, + ProfileProperty.SSO_REGION, + ProfileProperty.SSO_ROLE_NAME, + ProfileProperty.SSO_START_URL); + return ssoCredentialsProviderFactory().create(profile); + } + private AwsCredentialsProvider roleAndWebIdentityTokenProfileCredentialsProvider() { requireProperties(ProfileProperty.ROLE_ARN, ProfileProperty.WEB_IDENTITY_TOKEN_FILE); @@ -252,8 +270,8 @@ private IllegalStateException noSourceCredentialsException() { */ private ChildProfileCredentialsProviderFactory stsCredentialsProviderFactory() { try { - Class stsCredentialsProviderFactory = Class.forName(STS_PROFILE_CREDENTIALS_PROVIDER_FACTORY, true, - Thread.currentThread().getContextClassLoader()); + Class stsCredentialsProviderFactory = ClassLoaderHelper.loadClass(STS_PROFILE_CREDENTIALS_PROVIDER_FACTORY, + getClass()); return (ChildProfileCredentialsProviderFactory) stsCredentialsProviderFactory.getConstructor().newInstance(); } catch (ClassNotFoundException e) { throw new IllegalStateException("To use assumed roles in the '" + name + "' profile, the 'sts' service module must " @@ -262,4 +280,20 @@ private ChildProfileCredentialsProviderFactory stsCredentialsProviderFactory() { throw new IllegalStateException("Failed to create the '" + name + "' profile credentials provider.", e); } } + + /** + * Load the factory that can be used to create the SSO credentials provider, assuming it is on the classpath. + */ + private ProfileCredentialsProviderFactory ssoCredentialsProviderFactory() { + try { + Class ssoProfileCredentialsProviderFactory = ClassLoaderHelper.loadClass(SSO_PROFILE_CREDENTIALS_PROVIDER_FACTORY, + getClass()); + return (ProfileCredentialsProviderFactory) ssoProfileCredentialsProviderFactory.getConstructor().newInstance(); + } catch (ClassNotFoundException e) { + throw new IllegalStateException("To use Sso related properties in the '" + name + "' profile, the 'sso' service " + + "module must be on the class path.", e); + } catch (NoSuchMethodException | InvocationTargetException | InstantiationException | IllegalAccessException e) { + throw new IllegalStateException("Failed to create the '" + name + "' profile credentials provider.", e); + } + } } diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/SystemSettingsCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/SystemSettingsCredentialsProvider.java index 74e84a210900..c84cc44c6237 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/SystemSettingsCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/SystemSettingsCredentialsProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityCredentialsUtils.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityCredentialsUtils.java index 9c2136ec4db0..44fe98d4bd20 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityCredentialsUtils.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityCredentialsUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,17 +18,21 @@ import java.lang.reflect.InvocationTargetException; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.WebIdentityTokenCredentialsProviderFactory; +import software.amazon.awssdk.core.internal.util.ClassLoaderHelper; +import software.amazon.awssdk.utils.Logger; /** * Utility class used to configure credential providers based on JWT web identity tokens. */ @SdkInternalApi public final class WebIdentityCredentialsUtils { + private static final Logger log = Logger.loggerFor(WebIdentityCredentialsUtils.class); private static final String STS_WEB_IDENTITY_CREDENTIALS_PROVIDER_FACTORY = "software.amazon.awssdk.services.sts.internal.StsWebIdentityCredentialsProviderFactory"; - private WebIdentityCredentialsUtils() {} + private WebIdentityCredentialsUtils() { + } /** * Resolves the StsWebIdentityCredentialsProviderFactory from the Sts module if on the classpath to allow @@ -38,11 +42,13 @@ private WebIdentityCredentialsUtils() {} */ public static WebIdentityTokenCredentialsProviderFactory factory() { try { - Class stsCredentialsProviderFactory = Class.forName(STS_WEB_IDENTITY_CREDENTIALS_PROVIDER_FACTORY, true, - Thread.currentThread().getContextClassLoader()); + Class stsCredentialsProviderFactory = ClassLoaderHelper.loadClass(STS_WEB_IDENTITY_CREDENTIALS_PROVIDER_FACTORY, + WebIdentityCredentialsUtils.class); return (WebIdentityTokenCredentialsProviderFactory) stsCredentialsProviderFactory.getConstructor().newInstance(); } catch (ClassNotFoundException e) { - throw new IllegalStateException("To use web identity tokens, the 'sts' service module must be on the class path.", e); + String message = "To use web identity tokens, the 'sts' service module must be on the class path."; + log.warn(() -> message); + throw new IllegalStateException(message, e); } catch (NoSuchMethodException | InvocationTargetException | InstantiationException | IllegalAccessException e) { throw new IllegalStateException("Failed to create a web identity token credentials provider.", e); } diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityTokenCredentialProperties.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityTokenCredentialProperties.java index 06375802e356..0d6687edd5ac 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityTokenCredentialProperties.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityTokenCredentialProperties.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AsyncAws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AsyncAws4Signer.java new file mode 100644 index 000000000000..f8b4e68b97e1 --- /dev/null +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AsyncAws4Signer.java @@ -0,0 +1,77 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.signer; + +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.auth.credentials.CredentialUtils; +import software.amazon.awssdk.auth.signer.internal.Aws4SignerRequestParams; +import software.amazon.awssdk.auth.signer.internal.BaseAws4Signer; +import software.amazon.awssdk.auth.signer.internal.DigestComputingSubscriber; +import software.amazon.awssdk.auth.signer.params.Aws4SignerParams; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.signer.AsyncSigner; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +/** + * AWS Signature Version 4 signer that can include contents of an asynchronous request body into the signature + * calculation. + */ +@SdkPublicApi +public final class AsyncAws4Signer extends BaseAws4Signer implements AsyncSigner { + + @Override + public CompletableFuture sign(SdkHttpFullRequest request, + AsyncRequestBody requestBody, + ExecutionAttributes executionAttributes) { + Aws4SignerParams signingParams = extractSignerParams(Aws4SignerParams.builder(), executionAttributes).build(); + return signWithBody(request, requestBody, signingParams); + } + + public CompletableFuture signWithBody(SdkHttpFullRequest request, + AsyncRequestBody requestBody, + Aws4SignerParams signingParams) { + // anonymous credentials, don't sign + if (CredentialUtils.isAnonymous(signingParams.awsCredentials())) { + return CompletableFuture.completedFuture(request); + } + + DigestComputingSubscriber bodyDigester = DigestComputingSubscriber.forSha256(); + + requestBody.subscribe(bodyDigester); + + CompletableFuture digestBytes = bodyDigester.digestBytes(); + + CompletableFuture signedReqFuture = digestBytes.thenApply(bodyHash -> { + String digestHex = BinaryUtils.toHex(bodyHash); + + Aws4SignerRequestParams requestParams = new Aws4SignerRequestParams(signingParams); + + SdkHttpFullRequest.Builder builder = doSign(request, requestParams, signingParams, digestHex); + + return builder.build(); + }); + + return CompletableFutureUtils.forwardExceptionTo(signedReqFuture, digestBytes); + } + + public static AsyncAws4Signer create() { + return new AsyncAws4Signer(); + } +} diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/Aws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/Aws4Signer.java index 05934f34b9f3..bad5428c3a8c 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/Aws4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/Aws4Signer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/Aws4UnsignedPayloadSigner.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/Aws4UnsignedPayloadSigner.java index ffc9a3ec78e6..271f7a95457c 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/Aws4UnsignedPayloadSigner.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/Aws4UnsignedPayloadSigner.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AwsS3V4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AwsS3V4Signer.java index 9d27859a473a..6dc3385d0ef5 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AwsS3V4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AwsS3V4Signer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AwsSignerExecutionAttribute.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AwsSignerExecutionAttribute.java index 5f20a9c92957..5b8fcfc026d0 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AwsSignerExecutionAttribute.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/AwsSignerExecutionAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/EventStreamAws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/EventStreamAws4Signer.java index 560057f54498..474046cf2202 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/EventStreamAws4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/EventStreamAws4Signer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,7 +20,8 @@ @SdkProtectedApi public final class EventStreamAws4Signer extends BaseEventStreamAsyncAws4Signer { - private EventStreamAws4Signer() {} + private EventStreamAws4Signer() { + } public static EventStreamAws4Signer create() { return new EventStreamAws4Signer(); diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/S3SignerExecutionAttribute.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/S3SignerExecutionAttribute.java index 391a58c9853c..69576d3d193f 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/S3SignerExecutionAttribute.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/S3SignerExecutionAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java index 3d841b58eea6..310b60db61ac 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,16 +15,16 @@ package software.amazon.awssdk.auth.signer.internal; -import static software.amazon.awssdk.utils.DateUtils.numberOfDaysSinceEpoch; import static software.amazon.awssdk.utils.StringUtils.lowerCase; import java.io.InputStream; import java.nio.charset.Charset; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; +import java.util.TreeMap; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; @@ -61,6 +61,15 @@ public abstract class AbstractAws4Signer h.equals("required")) .ifPresent(h -> mutableRequest.putHeader(SignerConstant.X_AMZ_CONTENT_SHA256, contentSha256)); - String canonicalRequest = createCanonicalRequest(mutableRequest, contentSha256, signingParams.doubleUrlEncode()); + Map> canonicalHeaders = canonicalizeSigningHeaders(mutableRequest.headers()); + String signedHeadersString = getSignedHeadersString(canonicalHeaders); + + String canonicalRequest = createCanonicalRequest(mutableRequest, + canonicalHeaders, + signedHeadersString, + contentSha256, + signingParams.doubleUrlEncode()); String stringToSign = createStringToSign(canonicalRequest, requestParams); @@ -85,7 +100,7 @@ protected SdkHttpFullRequest.Builder doSign(SdkHttpFullRequest request, byte[] signature = computeSignature(stringToSign, signingKey); mutableRequest.putHeader(SignerConstant.AUTHORIZATION, - buildAuthorizationHeader(signature, sanitizedCredentials, requestParams, mutableRequest)); + buildAuthorizationHeader(signature, sanitizedCredentials, requestParams, signedHeadersString)); processRequestPayload(mutableRequest, signature, signingKey, requestParams, signingParams); @@ -111,11 +126,16 @@ protected SdkHttpFullRequest.Builder doPresign(SdkHttpFullRequest request, } // Add the important parameters for v4 signing - addPreSignInformationToRequest(mutableRequest, sanitizedCredentials, requestParams, expirationInSeconds); + Map> canonicalizedHeaders = canonicalizeSigningHeaders(mutableRequest.headers()); + String signedHeadersString = getSignedHeadersString(canonicalizedHeaders); + + addPreSignInformationToRequest(mutableRequest, signedHeadersString, sanitizedCredentials, + requestParams, expirationInSeconds); String contentSha256 = calculateContentHashPresign(mutableRequest, signingParams); - String canonicalRequest = createCanonicalRequest(mutableRequest, contentSha256, signingParams.doubleUrlEncode()); + String canonicalRequest = createCanonicalRequest(mutableRequest, canonicalizedHeaders, signedHeadersString, + contentSha256, signingParams.doubleUrlEncode()); String stringToSign = createStringToSign(canonicalRequest, requestParams); @@ -160,24 +180,28 @@ protected abstract void processRequestPayload(SdkHttpFullRequest.Builder mutable * http://docs.aws.amazon * .com/general/latest/gr/sigv4-calculate-signature.html */ - protected byte[] deriveSigningKey(AwsCredentials credentials, Aws4SignerRequestParams signerRequestParams) { - - String cacheKey = computeSigningCacheKeyName(credentials, signerRequestParams); - long daysSinceEpochSigningDate = numberOfDaysSinceEpoch(signerRequestParams.getRequestSigningDateTimeMilli()); + protected final byte[] deriveSigningKey(AwsCredentials credentials, Aws4SignerRequestParams signerRequestParams) { + return deriveSigningKey(credentials, + Instant.ofEpochMilli(signerRequestParams.getRequestSigningDateTimeMilli()), + signerRequestParams.getRegionName(), + signerRequestParams.getServiceSigningName()); + } + protected final byte[] deriveSigningKey(AwsCredentials credentials, Instant signingInstant, String region, String service) { + String cacheKey = createSigningCacheKeyName(credentials, region, service); SignerKey signerKey = SIGNER_CACHE.get(cacheKey); - if (signerKey != null && daysSinceEpochSigningDate == signerKey.getNumberOfDaysSinceEpoch()) { + if (signerKey != null && signerKey.isValidForDate(signingInstant)) { return signerKey.getSigningKey(); } LOG.trace(() -> "Generating a new signing key as the signing key not available in the cache for the date: " + - TimeUnit.DAYS.toMillis(daysSinceEpochSigningDate)); + signingInstant.toEpochMilli()); byte[] signingKey = newSigningKey(credentials, - signerRequestParams.getFormattedRequestSigningDate(), - signerRequestParams.getRegionName(), - signerRequestParams.getServiceSigningName()); - SIGNER_CACHE.add(cacheKey, new SignerKey(daysSinceEpochSigningDate, signingKey)); + Aws4SignerUtils.formatDateStamp(signingInstant), + region, + service); + SIGNER_CACHE.add(cacheKey, new SignerKey(signingInstant, signingKey)); return signingKey; } @@ -188,9 +212,10 @@ protected byte[] deriveSigningKey(AwsCredentials credentials, Aws4SignerRequestP * generate the canonical request. */ private String createCanonicalRequest(SdkHttpFullRequest.Builder request, + Map> canonicalHeaders, + String signedHeadersString, String contentSha256, boolean doubleUrlEncode) { - String canonicalRequest = request.method().toString() + SignerConstant.LINE_SEPARATOR + // This would optionally double url-encode the resource path @@ -198,9 +223,9 @@ private String createCanonicalRequest(SdkHttpFullRequest.Builder request, SignerConstant.LINE_SEPARATOR + getCanonicalizedQueryString(request.rawQueryParameters()) + SignerConstant.LINE_SEPARATOR + - getCanonicalizedHeaderString(request.headers()) + + getCanonicalizedHeaderString(canonicalHeaders) + SignerConstant.LINE_SEPARATOR + - getSignedHeadersString(request.headers()) + + signedHeadersString + SignerConstant.LINE_SEPARATOR + contentSha256; @@ -228,14 +253,10 @@ private String createStringToSign(String canonicalRequest, return stringToSign; } - - /** - * Computes the name to be used to reference the signing key in the cache. - */ - private String computeSigningCacheKeyName(AwsCredentials credentials, - Aws4SignerRequestParams signerRequestParams) { - return credentials.secretAccessKey() + "-" + signerRequestParams.getRegionName() + "-" + - signerRequestParams.getServiceSigningName(); + private String createSigningCacheKeyName(AwsCredentials credentials, + String regionName, + String serviceName) { + return credentials.secretAccessKey() + "-" + regionName + "-" + serviceName; } /** @@ -255,12 +276,11 @@ private byte[] computeSignature(String stringToSign, byte[] signingKey) { private String buildAuthorizationHeader(byte[] signature, AwsCredentials credentials, Aws4SignerRequestParams signerParams, - SdkHttpFullRequest.Builder mutableRequest) { + String signedHeadersString) { String signingCredentials = credentials.accessKeyId() + "/" + signerParams.getScope(); String credential = "Credential=" + signingCredentials; - String signerHeaders = "SignedHeaders=" + - getSignedHeadersString(mutableRequest.headers()); + String signerHeaders = "SignedHeaders=" + signedHeadersString; String signatureHeader = "Signature=" + BinaryUtils.toHex(signature); return SignerConstant.AWS4_SIGNING_ALGORITHM + " " + credential + ", " + signerHeaders + ", " + signatureHeader; @@ -270,6 +290,7 @@ private String buildAuthorizationHeader(byte[] signature, * Includes all the signing headers as request parameters for pre-signing. */ private void addPreSignInformationToRequest(SdkHttpFullRequest.Builder mutableRequest, + String signedHeadersString, AwsCredentials sanitizedCredentials, Aws4SignerRequestParams signerParams, long expirationInSeconds) { @@ -278,34 +299,39 @@ private void addPreSignInformationToRequest(SdkHttpFullRequest.Builder mutableRe mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_ALGORITHM, SignerConstant.AWS4_SIGNING_ALGORITHM); mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_DATE, signerParams.getFormattedRequestSigningDateTime()); - mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_SIGNED_HEADER, - getSignedHeadersString(mutableRequest.headers())); - mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_EXPIRES, - Long.toString(expirationInSeconds)); + mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_SIGNED_HEADER, signedHeadersString); + mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_EXPIRES, Long.toString(expirationInSeconds)); mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_CREDENTIAL, signingCredentials); } + private Map> canonicalizeSigningHeaders(Map> headers) { + Map> result = new TreeMap<>(); - private String getCanonicalizedHeaderString(Map> headers) { - List sortedHeaders = new ArrayList<>(headers.keySet()); - sortedHeaders.sort(String.CASE_INSENSITIVE_ORDER); - - StringBuilder buffer = new StringBuilder(); - for (String header : sortedHeaders) { - if (shouldExcludeHeaderFromSigning(header)) { + for (Map.Entry> header : headers.entrySet()) { + String lowerCaseHeader = lowerCase(header.getKey()); + if (LIST_OF_HEADERS_TO_IGNORE_IN_LOWER_CASE.contains(lowerCaseHeader)) { continue; } - String key = lowerCase(header); - for (String headerValue : headers.get(header)) { - appendCompactedString(buffer, key); + result.computeIfAbsent(lowerCaseHeader, x -> new ArrayList<>()).addAll(header.getValue()); + } + + return result; + } + + private String getCanonicalizedHeaderString(Map> canonicalizedHeaders) { + StringBuilder buffer = new StringBuilder(); + + canonicalizedHeaders.forEach((headerName, headerValues) -> { + for (String headerValue : headerValues) { + appendCompactedString(buffer, headerName); buffer.append(":"); if (headerValue != null) { appendCompactedString(buffer, headerValue); } buffer.append("\n"); } - } + }); return buffer.toString(); } @@ -351,28 +377,17 @@ private boolean isWhiteSpace(final char ch) { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\u000b' || ch == '\r' || ch == '\f'; } - private String getSignedHeadersString(Map> headers) { - List sortedHeaders = new ArrayList<>(headers.keySet()); - sortedHeaders.sort(String.CASE_INSENSITIVE_ORDER); - + private String getSignedHeadersString(Map> canonicalizedHeaders) { StringBuilder buffer = new StringBuilder(); - for (String header : sortedHeaders) { - if (shouldExcludeHeaderFromSigning(header)) { - continue; - } + for (String header : canonicalizedHeaders.keySet()) { if (buffer.length() > 0) { buffer.append(";"); } - buffer.append(lowerCase(header)); + buffer.append(header); } - return buffer.toString(); } - private boolean shouldExcludeHeaderFromSigning(String header) { - return LIST_OF_HEADERS_TO_IGNORE_IN_LOWER_CASE.contains(lowerCase(header)); - } - private void addHostHeader(SdkHttpFullRequest.Builder mutableRequest) { // AWS4 requires that we sign the Host header so we // have to have it in the request by the time we sign. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAwsSigner.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAwsSigner.java index 893705186fb2..aa7e31376207 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAwsSigner.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAwsSigner.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AsyncSigV4SubscriberAdapter.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AsyncSigV4SubscriberAdapter.java index 52351111d2eb..9cfa40f7b094 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AsyncSigV4SubscriberAdapter.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AsyncSigV4SubscriberAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerRequestParams.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerRequestParams.java index eb4ae5ae59cc..8c577e81cde5 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerRequestParams.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerRequestParams.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerUtils.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerUtils.java index 7b543198a5e3..2ca42ca7d3e5 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerUtils.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -46,6 +46,10 @@ public static String formatDateStamp(long timeMilli) { return DATE_FORMATTER.format(Instant.ofEpochMilli(timeMilli)); } + public static String formatDateStamp(Instant instant) { + return DATE_FORMATTER.format(instant); + } + /** * Returns a string representation of the given date time in * yyyyMMdd'T'HHmmss'Z' format. The date returned is in the UTC zone. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AwsChunkedEncodingInputStream.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AwsChunkedEncodingInputStream.java index 8abfd4e2aa89..d11c1b14bcb8 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AwsChunkedEncodingInputStream.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AwsChunkedEncodingInputStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseAsyncAws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseAsyncAws4Signer.java index 3ab593570bf5..df25b40c41ee 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseAsyncAws4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseAsyncAws4Signer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,7 +20,6 @@ import java.util.regex.Pattern; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; -import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.signer.params.Aws4SignerParams; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.exception.SdkClientException; @@ -57,11 +56,8 @@ public AsyncRequestBody signAsyncRequestBody(SdkHttpFullRequest request, AsyncRe @SdkTestInternalApi protected final AsyncRequestBody signAsync(SdkHttpFullRequest request, AsyncRequestBody asyncRequestBody, Aws4SignerRequestParams requestParams, Aws4SignerParams signingParams) { - AwsCredentials sanitizedCredentials = sanitizeCredentials(signingParams.awsCredentials()); - byte[] signingKey = deriveSigningKey(sanitizedCredentials, requestParams); - String headerSignature = getHeaderSignature(request); - return transformRequestProvider(headerSignature, signingKey, requestParams, signingParams, asyncRequestBody); + return transformRequestProvider(headerSignature, requestParams, signingParams, asyncRequestBody); } /** @@ -70,7 +66,6 @@ protected final AsyncRequestBody signAsync(SdkHttpFullRequest request, AsyncRequ * Can be overriden by subclasses to provide specific signing method */ protected abstract AsyncRequestBody transformRequestProvider(String headerSignature, - byte[] signingKey, Aws4SignerRequestParams signerRequestParams, Aws4SignerParams signerParams, AsyncRequestBody asyncRequestBody); diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseAws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseAws4Signer.java index 1ac07719f21b..256b143ea845 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseAws4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseAws4Signer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseEventStreamAsyncAws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseEventStreamAsyncAws4Signer.java index d7b3cf179f3f..e50ac4e1450f 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseEventStreamAsyncAws4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BaseEventStreamAsyncAws4Signer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,7 +20,9 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.time.Instant; +import java.util.Arrays; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Optional; import java.util.TreeMap; @@ -28,6 +30,7 @@ import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.signer.params.Aws4SignerParams; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.SdkPublisher; @@ -49,6 +52,8 @@ public abstract class BaseEventStreamAsyncAws4Signer extends BaseAsyncAws4Signer private static final String HTTP_CONTENT_SHA_256 = "STREAMING-AWS4-HMAC-SHA256-EVENTS"; private static final String EVENT_STREAM_PAYLOAD = "AWS4-HMAC-SHA256-PAYLOAD"; + private static final int PAYLOAD_TRUNCATE_LENGTH = 32; + protected BaseEventStreamAsyncAws4Signer() { } @@ -67,7 +72,6 @@ public SdkHttpFullRequest sign(SdkHttpFullRequest request, Aws4SignerParams sign @Override protected AsyncRequestBody transformRequestProvider(String headerSignature, - byte[] signingKey, Aws4SignerRequestParams signerRequestParams, Aws4SignerParams signerParams, AsyncRequestBody asyncRequestBody) { @@ -80,7 +84,8 @@ protected AsyncRequestBody transformRequestProvider(String headerSignature, * Map publisher with signing function */ Publisher publisherWithSignedFrame = - transformRequestBodyPublisher(publisherWithTrailingEmptyFrame, headerSignature, signingKey, signerRequestParams); + transformRequestBodyPublisher(publisherWithTrailingEmptyFrame, headerSignature, + signerParams.awsCredentials(), signerRequestParams); AsyncRequestBody transformedRequestBody = AsyncRequestBody.fromPublisher(publisherWithSignedFrame); @@ -105,16 +110,16 @@ private static Publisher appendEmptyFrame(Publisher publ } private Publisher transformRequestBodyPublisher(Publisher publisher, String headerSignature, - byte[] signingKey, Aws4SignerRequestParams signerRequestParams) { + AwsCredentials credentials, + Aws4SignerRequestParams signerRequestParams) { return SdkPublisher.adapt(publisher) - .map(getDataFrameSigner(headerSignature, signingKey, signerRequestParams)); + .map(getDataFrameSigner(headerSignature, credentials, signerRequestParams)); } - private Function getDataFrameSigner(String headerSignature, byte[] signingKey, + private Function getDataFrameSigner(String headerSignature, + AwsCredentials credentials, Aws4SignerRequestParams signerRequestParams) { return new Function() { - - final byte[] key = signingKey; final Aws4SignerRequestParams requestParams = signerRequestParams; /** @@ -129,14 +134,21 @@ public ByteBuffer apply(ByteBuffer byteBuffer) { */ Map nonSignatureHeaders = new HashMap<>(); Instant signingInstant = requestParams.getSigningClock().instant(); - String signingDate = Aws4SignerUtils.formatTimestamp(signingInstant); nonSignatureHeaders.put(EVENT_STREAM_DATE, HeaderValue.fromTimestamp(signingInstant)); + /** + * Derive Signing Key + */ + AwsCredentials sanitizedCredentials = sanitizeCredentials(credentials); + byte[] signingKey = deriveSigningKey(sanitizedCredentials, + signingInstant, + requestParams.getRegionName(), + requestParams.getServiceSigningName()); /** * Calculate rolling signature */ byte[] payload = byteBuffer.array(); - byte[] signatureBytes = signEventStream(priorSignature, key, signingDate, requestParams, + byte[] signatureBytes = signEventStream(priorSignature, signingKey, signingInstant, requestParams, nonSignatureHeaders, payload); priorSignature = BinaryUtils.toHex(signatureBytes); @@ -151,6 +163,13 @@ public ByteBuffer apply(ByteBuffer byteBuffer) { * Encode signed event to byte */ Message signedMessage = new Message(sortHeaders(headers), payload); + + if (LOG.isLoggingLevelEnabled("trace")) { + LOG.trace(() -> "Signed message: " + toDebugString(signedMessage, false)); + } else { + LOG.debug(() -> "Signed message: " + toDebugString(signedMessage, true)); + } + return signedMessage.toByteBuffer(); } }; @@ -162,7 +181,7 @@ public ByteBuffer apply(ByteBuffer byteBuffer) { * * @param priorSignature signature of previous frame (Header frame is the 0th frame) * @param signingKey derived signing key - * @param date siging date + * @param signingInstant the instant at which this message is being signed * @param requestParams request parameters * @param nonSignatureHeaders non-signature headers * @param payload event stream payload @@ -171,7 +190,7 @@ public ByteBuffer apply(ByteBuffer byteBuffer) { private byte[] signEventStream( String priorSignature, byte[] signingKey, - String date, + Instant signingInstant, Aws4SignerRequestParams requestParams, Map nonSignatureHeaders, byte[] payload) { @@ -180,9 +199,9 @@ private byte[] signEventStream( String stringToSign = EVENT_STREAM_PAYLOAD + SignerConstant.LINE_SEPARATOR + - date + + Aws4SignerUtils.formatTimestamp(signingInstant) + SignerConstant.LINE_SEPARATOR + - requestParams.getScope() + + computeScope(signingInstant, requestParams) + SignerConstant.LINE_SEPARATOR + priorSignature + SignerConstant.LINE_SEPARATOR + @@ -195,6 +214,13 @@ private byte[] signEventStream( SigningAlgorithm.HmacSHA256); } + private String computeScope(Instant signingInstant, Aws4SignerRequestParams requestParams) { + return Aws4SignerUtils.formatDateStamp(signingInstant) + "/" + + requestParams.getRegionName() + "/" + + requestParams.getServiceSigningName() + "/" + + SignerConstant.AWS4_TERMINATOR; + } + /** * Sort headers in alphabetic order, with exception that EVENT_STREAM_SIGNATURE header always at last * @@ -244,4 +270,45 @@ public Optional contentLength() { } } + static String toDebugString(Message m, boolean truncatePayload) { + StringBuilder sb = new StringBuilder("Message = {headers={"); + Map headers = m.getHeaders(); + + Iterator> headersIter = headers.entrySet().iterator(); + + while (headersIter.hasNext()) { + Map.Entry h = headersIter.next(); + + sb.append(h.getKey()).append("={").append(h.getValue().toString()).append("}"); + + if (headersIter.hasNext()) { + sb.append(", "); + } + } + + sb.append("}, payload="); + + byte[] payload = m.getPayload(); + byte[] payloadToLog; + + // We don't actually need to truncate if the payload length is already within the truncate limit + truncatePayload = truncatePayload && payload.length > PAYLOAD_TRUNCATE_LENGTH; + + if (truncatePayload) { + // Would be nice if BinaryUtils.toHex() could take an array index range instead so we don't need to copy + payloadToLog = Arrays.copyOf(payload, PAYLOAD_TRUNCATE_LENGTH); + } else { + payloadToLog = payload; + } + + sb.append(BinaryUtils.toHex(payloadToLog)); + + if (truncatePayload) { + sb.append("..."); + } + + sb.append("}"); + + return sb.toString(); + } } diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BoundedLinkedHashMap.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BoundedLinkedHashMap.java index 59c4aa01f94d..20d2d75bd070 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BoundedLinkedHashMap.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/BoundedLinkedHashMap.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/ChunkContentIterator.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/ChunkContentIterator.java index 527b826d0c33..0a40d7571812 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/ChunkContentIterator.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/ChunkContentIterator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/DecodedStreamBuffer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/DecodedStreamBuffer.java index 5161ce43ba21..9cb5b48283a8 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/DecodedStreamBuffer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/DecodedStreamBuffer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/DigestComputingSubscriber.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/DigestComputingSubscriber.java new file mode 100644 index 000000000000..755dc4dac2ca --- /dev/null +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/DigestComputingSubscriber.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.signer.internal; + +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; + +@SdkInternalApi +public final class DigestComputingSubscriber implements Subscriber { + private final CompletableFuture digestBytes = new CompletableFuture<>(); + private final MessageDigest messageDigest; + private volatile boolean canceled = false; + private volatile Subscription subscription; + + public DigestComputingSubscriber(MessageDigest messageDigest) { + this.messageDigest = messageDigest; + + digestBytes.whenComplete((r, t) -> { + if (t instanceof CancellationException) { + synchronized (DigestComputingSubscriber.this) { + canceled = true; + if (subscription != null) { + subscription.cancel(); + } + } + } + }); + } + + @Override + public void onSubscribe(Subscription subscription) { + synchronized (this) { + if (!canceled) { + this.subscription = subscription; + subscription.request(Long.MAX_VALUE); + } else { + subscription.cancel(); + } + } + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + if (!canceled) { + messageDigest.update(byteBuffer); + } + } + + @Override + public void onError(Throwable throwable) { + digestBytes.completeExceptionally(throwable); + } + + @Override + public void onComplete() { + digestBytes.complete(messageDigest.digest()); + } + + public CompletableFuture digestBytes() { + return digestBytes; + } + + public static DigestComputingSubscriber forSha256() { + try { + return new DigestComputingSubscriber(MessageDigest.getInstance("SHA-256")); + } catch (NoSuchAlgorithmException e) { + throw SdkClientException.create("Unable to create SHA-256 computing subscriber", e); + } + } +} diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/FifoCache.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/FifoCache.java index 29838e0c4f0c..ccb3b108915d 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/FifoCache.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/FifoCache.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ public FifoCache(final int maxSize) { throw new IllegalArgumentException("maxSize " + maxSize + " must be at least 1"); } - map = new BoundedLinkedHashMap(maxSize); + map = new BoundedLinkedHashMap<>(maxSize); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); rlock = lock.readLock(); wlock = lock.writeLock(); diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SignerConstant.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SignerConstant.java index f211641d758a..591e80746204 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SignerConstant.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SignerConstant.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SignerKey.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SignerKey.java index ed2ef8cb1002..4dc7dcde2526 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SignerKey.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SignerKey.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,8 +15,10 @@ package software.amazon.awssdk.auth.signer.internal; +import java.time.Instant; import software.amazon.awssdk.annotations.Immutable; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.DateUtils; /** * Holds the signing key and the number of days since epoch for the date for @@ -26,29 +28,25 @@ @SdkInternalApi public final class SignerKey { - private final long numberOfDaysSinceEpoch; + private final long daysSinceEpoch; private final byte[] signingKey; - public SignerKey(long numberOfDaysSinceEpoch, byte[] signingKey) { - if (numberOfDaysSinceEpoch <= 0L) { + public SignerKey(Instant date, byte[] signingKey) { + if (date == null) { throw new IllegalArgumentException( - "Not able to cache signing key. Signing date to be cached is invalid"); + "Not able to cache signing key. Signing date to be is null"); } if (signingKey == null) { throw new IllegalArgumentException( "Not able to cache signing key. Signing Key to be cached are null"); } - this.numberOfDaysSinceEpoch = numberOfDaysSinceEpoch; + this.daysSinceEpoch = DateUtils.numberOfDaysSinceEpoch(date.toEpochMilli()); this.signingKey = signingKey.clone(); } - /** - * Returns the number of days since epoch for the date used for generating - * signing key. - */ - public long getNumberOfDaysSinceEpoch() { - return numberOfDaysSinceEpoch; + public boolean isValidForDate(Instant other) { + return daysSinceEpoch == DateUtils.numberOfDaysSinceEpoch(other.toEpochMilli()); } /** diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SigningAlgorithm.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SigningAlgorithm.java index ddf424ef0990..388c1b38112f 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SigningAlgorithm.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/SigningAlgorithm.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/Aws4PresignerParams.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/Aws4PresignerParams.java index 80fb5c1f0baa..4a3fff21cd92 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/Aws4PresignerParams.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/Aws4PresignerParams.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/Aws4SignerParams.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/Aws4SignerParams.java index 40c0a7719fdd..457dc43ccc12 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/Aws4SignerParams.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/Aws4SignerParams.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/AwsS3V4SignerParams.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/AwsS3V4SignerParams.java index 40e56a10b5a6..9eeb5a5001a7 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/AwsS3V4SignerParams.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/params/AwsS3V4SignerParams.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProviderChainTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProviderChainTest.java index cd33ba93b449..ea8b688e3161 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProviderChainTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/AwsCredentialsProviderChainTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsEndpointProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsEndpointProviderTest.java index b4d163d23ad3..d7ccc4622a30 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsEndpointProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsEndpointProviderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsProviderTest.java index 5657e6988207..507b855d3831 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsProviderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsRetryPolicyTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsRetryPolicyTest.java index b962e4366141..38b4b7c8d4fd 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsRetryPolicyTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ContainerCredentialsRetryPolicyTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/CredentialsEndpointRetryParametersTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/CredentialsEndpointRetryParametersTest.java index 67f59266c121..aad419e649ba 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/CredentialsEndpointRetryParametersTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/CredentialsEndpointRetryParametersTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/EC2MetadataServiceMock.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/EC2MetadataServiceMock.java index 99d0e7d7b140..495fff4f943d 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/EC2MetadataServiceMock.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/EC2MetadataServiceMock.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -26,6 +26,8 @@ import java.net.Socket; import java.util.List; import org.apache.commons.io.IOUtils; +import software.amazon.awssdk.auth.signer.internal.SignerTestUtils; +import software.amazon.awssdk.utils.StringUtils; /** * Mock server for imitating the Amazon EC2 Instance Metadata Service. Tests can @@ -99,6 +101,7 @@ private ServerSocket startServerSocket() { * and response with a predefined response file. */ private static class EC2MockMetadataServiceListenerThread extends Thread { + private static final String TOKEN_RESOURCE_PATH = "/latest/api/token"; private ServerSocket serverSocket; private final String credentialsResource; private String responseFileName; @@ -164,7 +167,13 @@ public void run() { .toString()); outputStream.write(httpResponse.getBytes()); - } else { + } else if (TOKEN_RESOURCE_PATH.equals(resourcePath)) { + httpResponse = "HTTP/1.1 404 Not Found\r\n" + + "Content-Length: 0\r\n" + + "\r\n"; + outputStream.write(httpResponse.getBytes()); + } + else { throw new RuntimeException("Unknown resource requested: " + resourcePath); } } catch (IOException e) { diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProviderTest.java index 15d889eca9e2..3e16b3dd3f05 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/HttpCredentialsProviderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderTest.java new file mode 100644 index 000000000000..17b9b71a5b80 --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/InstanceProfileCredentialsProviderTest.java @@ -0,0 +1,213 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.credentials; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.put; +import static com.github.tomakehurst.wiremock.client.WireMock.putRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static org.hamcrest.Matchers.instanceOf; +import com.github.tomakehurst.wiremock.client.WireMock; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.SocketTimeoutException; +import java.time.Duration; +import java.time.Instant; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.internal.util.UserAgentUtils; +import software.amazon.awssdk.utils.DateUtils; + +public class InstanceProfileCredentialsProviderTest { + private static final String TOKEN_RESOURCE_PATH = "/latest/api/token"; + private static final String CREDENTIALS_RESOURCE_PATH = "/latest/meta-data/iam/security-credentials/"; + private static final String STUB_CREDENTIALS = "{\"AccessKeyId\":\"ACCESS_KEY_ID\",\"SecretAccessKey\":\"SECRET_ACCESS_KEY\"," + + "\"Expiration\":\"" + DateUtils.formatIso8601Date(Instant.now().plus(Duration.ofDays(1))) + + "\"}"; + private static final String TOKEN_HEADER = "x-aws-ec2-metadata-token"; + private static final String EC2_METADATA_TOKEN_TTL_HEADER = "x-aws-ec2-metadata-token-ttl-seconds"; + + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Rule + public WireMockRule mockMetadataEndpoint = new WireMockRule(); + + @Before + public void methodSetup() { + System.setProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property(), "http://localhost:" + mockMetadataEndpoint.port()); + } + + @AfterClass + public static void teardown() { + System.clearProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property()); + } + + @Test + public void resolveCredentials_metadataLookupDisabled_throws() { + System.setProperty(SdkSystemSetting.AWS_EC2_METADATA_DISABLED.property(), "true"); + thrown.expect(SdkClientException.class); + thrown.expectMessage("Loading credentials from local endpoint is disabled"); + try { + InstanceProfileCredentialsProvider.builder().build().resolveCredentials(); + } finally { + System.clearProperty(SdkSystemSetting.AWS_EC2_METADATA_DISABLED.property()); + } + } + + @Test + public void resolveCredentials_requestsIncludeUserAgent() { + String stubToken = "some-token"; + stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withBody(stubToken))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + + String userAgentHeader = "User-Agent"; + String userAgent = UserAgentUtils.getUserAgent(); + WireMock.verify(putRequestedFor(urlPathEqualTo(TOKEN_RESOURCE_PATH)).withHeader(userAgentHeader, equalTo(userAgent))); + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).withHeader(userAgentHeader, equalTo(userAgent))); + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).withHeader(userAgentHeader, equalTo(userAgent))); + } + + @Test + public void resolveCredentials_queriesTokenResource() { + stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withBody("some-token"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + + WireMock.verify(putRequestedFor(urlPathEqualTo(TOKEN_RESOURCE_PATH)).withHeader(EC2_METADATA_TOKEN_TTL_HEADER, equalTo("21600"))); + } + + @Test + public void resolveCredentials_queriesTokenResource_includedInCredentialsRequests() { + String stubToken = "some-token"; + stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withBody(stubToken))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).withHeader(TOKEN_HEADER, equalTo(stubToken))); + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).withHeader(TOKEN_HEADER, equalTo(stubToken))); + } + + @Test + public void resolveCredentials_queriesTokenResource_403Error_fallbackToInsecure() { + stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withStatus(403).withBody("oops"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH))); + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile"))); + } + + @Test + public void resolveCredentials_queriesTokenResource_404Error_fallbackToInsecure() { + stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withStatus(404).withBody("oops"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH))); + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile"))); + } + + @Test + public void resolveCredentials_queriesTokenResource_405Error_fallbackToInsecure() { + stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withStatus(405).withBody("oops"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH))); + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile"))); + } + + @Test + public void resolveCredentials_queriesTokenResource_400Error_throws() { + thrown.expect(SdkClientException.class); + thrown.expectMessage("token"); + + stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withStatus(400).withBody("oops"))); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + } + + @Test + public void resolveCredentials_queriesTokenResource_socketTimeout_fallbackToInsecure() { + stubFor(put(urlPathEqualTo(TOKEN_RESOURCE_PATH)).willReturn(aResponse().withBody("some-token").withFixedDelay(Integer.MAX_VALUE))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH)).willReturn(aResponse().withBody("some-profile"))); + stubFor(get(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile")).willReturn(aResponse().withBody(STUB_CREDENTIALS))); + + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH))); + WireMock.verify(getRequestedFor(urlPathEqualTo(CREDENTIALS_RESOURCE_PATH + "some-profile"))); + } + + @Test + public void resolveCredentials_endpointSettingEmpty_throws() { + thrown.expect(SdkClientException.class); + + System.setProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property(), ""); + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + } + + @Test + public void resolveCredentials_endpointSettingHostNotExists_throws() { + thrown.expect(SdkClientException.class); + + System.setProperty(SdkSystemSetting.AWS_EC2_METADATA_SERVICE_ENDPOINT.property(), "some-host-that-does-not-exist"); + InstanceProfileCredentialsProvider provider = InstanceProfileCredentialsProvider.builder().build(); + + provider.resolveCredentials(); + } +} diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/NoopTestRequest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/NoopTestRequest.java index 3cfbacec90d9..ba1e9eb60163 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/NoopTestRequest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/NoopTestRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProviderTest.java index 81fc65413d52..95dc28c091a5 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ProcessCredentialsProviderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -24,6 +24,8 @@ import java.io.UncheckedIOException; import java.time.Duration; import java.time.Instant; + +import org.assertj.core.api.Assertions; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -33,6 +35,9 @@ import software.amazon.awssdk.utils.Platform; public class ProcessCredentialsProviderTest { + + private final static String PROCESS_RESOURCE_PATH = "/resources/process/"; + private final static String RANDOM_SESSION_TOKEN = "RANDOM_TOKEN"; private static String scriptLocation; @BeforeClass @@ -98,8 +103,10 @@ public void resultsAreCached() { public void expirationBufferOverrideIsApplied() { ProcessCredentialsProvider credentialsProvider = ProcessCredentialsProvider.builder() - .command(scriptLocation + " accessKeyId secretAccessKey sessionToken " + - DateUtils.formatIso8601Date(Instant.now().plusSeconds(20))) + .command(String.format("%s accessKeyId secretAccessKey %s %s", + scriptLocation, + RANDOM_SESSION_TOKEN, + DateUtils.formatIso8601Date(Instant.now().plusSeconds(20)))) .credentialRefreshThreshold(Duration.ofSeconds(20)) .build(); @@ -132,10 +139,36 @@ public void processOutputLimitIsEnforced() { .resolveCredentials(); } + @Test + public void processOutputLimitDefaultPassesLargeInput() { + + String LONG_SESSION_TOKEN = "lYzvmByqdS1E69QQVEavDDHabQ2GuYKYABKRA4xLbAXpdnFtV030UH4" + + "bQoZWCDcfADFvBwBm3ixEFTYMjn5XQozpFV2QAsWHirCVcEJ5DC60KPCNBcDi4KLNJfbsp3r6kKTOmYOeqhEyiC4emDX33X2ppZsa5" + + "1iwr6ShIZPOUPmuR4WDglmWubgO2q5tZv48xA5idkcHEmtGdoL343sY24q4gMh21eeBnF6ikjZdfvZ0Mn86UQ8r05AD346rSwM5bFs" + + "t019ZkJIjLHD3HoKJ44EndRvSvQClXfJCmmQDH5INiXdFLLNm0dzT3ynbVIW5x1YYBWptyts4NUSy2eJ3dTPjYICpQVCkbuNVA7PqR" + + "ctUyE8lU7uvnrIVnx9xTgl34J6D9VJKHQkPuGvbtN6w4CVtXoPAQcE8tlkKyOQmIeqEahhaqLW15t692SI6hwBW0b8DxCQawX5ukt4" + + "f5gZoRFz3u8qHMSnm5oEnTgv7C5AAs0V680YvelFMNYvSoSbDnoThxfTIG9msj7WBh7iNa7mI8TXmvOegQtDWR011ZOo8dR3jnhWNH" + + "nSW4CRB7iSC5DMZ2y56dYS28XGBl01LYXF5ZTJJfLwQEhbRWSTdXIBJq07E0YxRu0SaLokA4uknOoicwXnD7LMCld4hFjuypYgWBuk" + + "3pC09CPA0MJjQNTTAvxGqDTqSWoXWDZRIMUWyGyz3FCkpPUjv4mIpVYt2bGl6cHsMBzVnpL6yXMCw2mNqJx8Rvi4gQaHH6LzvHbVKR" + + "w4kE53703DNOc8cA9Zc0efJa4NHOFxc4XmMOtjGW7vbWPp0CTVCJLG94ddSFJrimpamPM59bs12x2ih51EpOFR5ITIxJnd79HEkYDU" + + "xRIOuPIe4VpM01RnFN4g3ChDqmjQ03wQY9I8Mvh59u3MujggQfwAhCc84MAz0jVukoMfhAAhMNUPLuwRj0qpqr6B3DdKZ4KDFWF2Ga" + + "Iu1sEFlKvPdfF1uefbTj6YdjUciWu1UBH47VbIcTbvbwmUiu2javB21kOenyDoelK5GUM4u0uPeXIOOhtZsJb8kz88h1joWkaKr2fc" + + "jrIS08FM47Y4Z2Mi4zfwyN54L"; + + ProcessCredentialsProvider credentialsProvider = ProcessCredentialsProvider.builder() + .command(scriptLocation + " accessKeyId secretAccessKey " + LONG_SESSION_TOKEN) + .build(); + + AwsSessionCredentials sessionCredentials = (AwsSessionCredentials) credentialsProvider.resolveCredentials(); + + Assertions.assertThat(sessionCredentials.accessKeyId()).isEqualTo("accessKeyId"); + Assertions.assertThat(sessionCredentials.sessionToken()).isNotNull(); + } + public static String copyProcessCredentialsScript() { String scriptClasspathFilename = Platform.isWindows() ? "windows-credentials-script.bat" : "linux-credentials-script.sh"; - String scriptClasspathLocation = "/resources/process/" + scriptClasspathFilename; + String scriptClasspathLocation = PROCESS_RESOURCE_PATH + scriptClasspathFilename; InputStream scriptInputStream = null; OutputStream scriptOutputStream = null; diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderTest.java index 3de935acb09b..8be07bb42bf0 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/StaticCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/StaticCredentialsProviderTest.java index feefc4b1ded7..19c99236eee6 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/StaticCredentialsProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/StaticCredentialsProviderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/AwsSessionCredentialsTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/AwsSessionCredentialsTest.java index e2f47fc1355c..de34421dee4b 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/AwsSessionCredentialsTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/AwsSessionCredentialsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProviderTest.java index f669402b1fac..e712ea6257ce 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProviderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.utils.SdkAutoCloseable; public class LazyAwsCredentialsProviderTest { @SuppressWarnings("unchecked") @@ -48,4 +49,30 @@ public void resolveCredentialsInvokesSupplierExactlyOnce() { Mockito.verify(credentialsConstructor, Mockito.times(1)).get(); Mockito.verify(credentials, Mockito.times(2)).resolveCredentials(); } + + @Test + public void delegatesClosesInitializerAndValue() { + CloseableSupplier initializer = Mockito.mock(CloseableSupplier.class); + CloseableCredentialsProvider value = Mockito.mock(CloseableCredentialsProvider.class); + + Mockito.when(initializer.get()).thenReturn(value); + + LazyAwsCredentialsProvider.create(initializer).close(); + + Mockito.verify(initializer).close(); + Mockito.verify(value).close(); + } + + @Test + public void delegatesClosesInitializerEvenIfGetFails() { + CloseableSupplier initializer = Mockito.mock(CloseableSupplier.class); + Mockito.when(initializer.get()).thenThrow(new RuntimeException()); + + LazyAwsCredentialsProvider.create(initializer).close(); + + Mockito.verify(initializer).close(); + } + + private interface CloseableSupplier extends Supplier, SdkAutoCloseable {} + private interface CloseableCredentialsProvider extends SdkAutoCloseable, AwsCredentialsProvider {} } diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtilsTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtilsTest.java index 9ab5e24161eb..eaf9a1f067e7 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtilsTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtilsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/AbstractAws4SignerTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/AbstractAws4SignerTest.java index 6176784ec531..9b028c2444bf 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/AbstractAws4SignerTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/AbstractAws4SignerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4EventStreamSignerTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4EventStreamSignerTest.java index c77d9fe0512f..eedded3748f9 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4EventStreamSignerTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4EventStreamSignerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4SignerTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4SignerTest.java index e36a52f3b22e..ef2ee593a995 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4SignerTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/Aws4SignerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/EventStreamAws4SignerTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/EventStreamAws4SignerTest.java new file mode 100644 index 000000000000..6c4b8bbec0a2 --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/EventStreamAws4SignerTest.java @@ -0,0 +1,105 @@ +package software.amazon.awssdk.auth.signer; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +import io.reactivex.Flowable; +import java.net.URI; +import java.nio.ByteBuffer; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.signer.internal.SignerTestUtils; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.regions.Region; +import software.amazon.eventstream.HeaderValue; +import software.amazon.eventstream.Message; +import software.amazon.eventstream.MessageDecoder; + +public class EventStreamAws4SignerTest { + /** + * Verify that when an event stream is open from one day to the next, the signature is properly signed for the day of the + * event. + */ + @Test + public void openStreamEventSignaturesCanRollOverBetweenDays() { + EventStreamAws4Signer signer = EventStreamAws4Signer.create(); + + Region region = Region.US_WEST_2; + AwsCredentials credentials = AwsBasicCredentials.create("a", "s"); + String signingName = "name"; + AdjustableClock clock = new AdjustableClock(); + clock.time = Instant.parse("2020-01-01T23:59:59Z"); + + SdkHttpFullRequest initialRequest = SdkHttpFullRequest.builder() + .uri(URI.create("http://localhost:8080")) + .method(SdkHttpMethod.GET) + .build(); + SdkHttpFullRequest signedRequest = SignerTestUtils.signRequest(signer, initialRequest, credentials, signingName, clock, + region.id()); + + ByteBuffer event = new Message(Collections.emptyMap(), "foo".getBytes(UTF_8)).toByteBuffer(); + + Callable lastEvent = () -> { + clock.time = Instant.parse("2020-01-02T00:00:00Z"); + return event; + }; + + AsyncRequestBody requestBody = AsyncRequestBody.fromPublisher(Flowable.concatArray(Flowable.just(event), + Flowable.fromCallable(lastEvent))); + + AsyncRequestBody signedBody = SignerTestUtils.signAsyncRequest(signer, signedRequest, requestBody, credentials, + signingName, clock, region.id()); + + List signedMessages = readMessages(signedBody); + assertThat(signedMessages.size()).isEqualTo(3); + + Map firstMessageHeaders = signedMessages.get(0).getHeaders(); + assertThat(firstMessageHeaders.get(":date").getTimestamp()).isEqualTo("2020-01-01T23:59:59Z"); + assertThat(Base64.getEncoder().encodeToString(firstMessageHeaders.get(":chunk-signature").getByteArray())) + .isEqualTo("EFt7ZU043r/TJE8U+1GxJXscmNxoqmIdGtUIl8wE9u0="); + + Map lastMessageHeaders = signedMessages.get(2).getHeaders(); + assertThat(lastMessageHeaders.get(":date").getTimestamp()).isEqualTo("2020-01-02T00:00:00Z"); + assertThat(Base64.getEncoder().encodeToString(lastMessageHeaders.get(":chunk-signature").getByteArray())) + .isEqualTo("UTRGo0D7BQytiVkH1VofR/8f3uFsM4V5QR1A8grr1+M="); + + } + + private List readMessages(AsyncRequestBody signedBody) { + MessageDecoder decoder = new MessageDecoder(); + Flowable.fromPublisher(signedBody).blockingForEach(x -> decoder.feed(x.array())); + return decoder.getDecodedMessages(); + } + + private static class AdjustableClock extends Clock { + private Instant time; + + @Override + public ZoneId getZone() { + return ZoneOffset.UTC; + } + + @Override + public Clock withZone(ZoneId zone) { + throw new UnsupportedOperationException(); + } + + @Override + public Instant instant() { + return time; + } + } + +} \ No newline at end of file diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/NonStreamingAsyncBodyAws4SignerTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/NonStreamingAsyncBodyAws4SignerTest.java new file mode 100644 index 000000000000..d112676e2ccf --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/NonStreamingAsyncBodyAws4SignerTest.java @@ -0,0 +1,154 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.signer; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import io.reactivex.Flowable; +import java.io.ByteArrayInputStream; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import org.junit.Test; +import org.mockito.stubbing.Answer; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.signer.params.Aws4SignerParams; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.regions.Region; + +public class NonStreamingAsyncBodyAws4SignerTest { + @Test + public void test_sign_computesCorrectSignature() { + Aws4Signer aws4Signer = Aws4Signer.create(); + AsyncAws4Signer asyncAws4Signer = AsyncAws4Signer.create(); + + byte[] content = "Hello AWS!".getBytes(StandardCharsets.UTF_8); + ContentStreamProvider syncBody = () -> new ByteArrayInputStream(content); + AsyncRequestBody asyncBody = AsyncRequestBody.fromBytes(content); + + SdkHttpFullRequest httpRequest = SdkHttpFullRequest.builder() + .protocol("https") + .host("my-cool-aws-service.us-west-2.amazonaws.com") + .method(SdkHttpMethod.GET) + .putHeader("header1", "headerval1") + .contentStreamProvider(syncBody) + .build(); + + AwsCredentials credentials = AwsBasicCredentials.create("akid", "skid"); + + Aws4SignerParams signerParams = Aws4SignerParams.builder() + .awsCredentials(credentials) + .signingClockOverride(Clock.fixed(Instant.EPOCH, ZoneId.of("UTC"))) + .signingName("my-cool-aws-service") + .signingRegion(Region.US_WEST_2) + .build(); + + List syncSignature = aws4Signer.sign(httpRequest, signerParams).headers().get("Authorization"); + List asyncSignature = asyncAws4Signer.signWithBody(httpRequest, asyncBody, signerParams).join() + .headers().get("Authorization"); + + assertThat(asyncSignature).isEqualTo(syncSignature); + } + + @Test + public void test_sign_publisherThrows_exceptionPropagated() { + AsyncAws4Signer asyncAws4Signer = AsyncAws4Signer.create(); + + RuntimeException error = new RuntimeException("error"); + Flowable errorPublisher = Flowable.error(error); + AsyncRequestBody asyncBody = new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.of(42L); + } + + @Override + public void subscribe(Subscriber subscriber) { + errorPublisher.subscribe(subscriber); + } + }; + + SdkHttpFullRequest httpRequest = SdkHttpFullRequest.builder() + .protocol("https") + .host("my-cool-aws-service.us-west-2.amazonaws.com") + .method(SdkHttpMethod.GET) + .putHeader("header1", "headerval1") + .build(); + + AwsCredentials credentials = AwsBasicCredentials.create("akid", "skid"); + + Aws4SignerParams signerParams = Aws4SignerParams.builder() + .awsCredentials(credentials) + .signingClockOverride(Clock.fixed(Instant.EPOCH, ZoneId.of("UTC"))) + .signingName("my-cool-aws-service") + .signingRegion(Region.US_WEST_2) + .build(); + + assertThatThrownBy(asyncAws4Signer.signWithBody(httpRequest, asyncBody, signerParams)::join) + .hasCause(error); + } + + @Test + public void test_sign_futureCancelled_propagatedToPublisher() { + SdkHttpFullRequest httpRequest = SdkHttpFullRequest.builder() + .protocol("https") + .host("my-cool-aws-service.us-west-2.amazonaws.com") + .method(SdkHttpMethod.GET) + .putHeader("header1", "headerval1") + .build(); + + AwsCredentials credentials = AwsBasicCredentials.create("akid", "skid"); + + Aws4SignerParams signerParams = Aws4SignerParams.builder() + .awsCredentials(credentials) + .signingClockOverride(Clock.fixed(Instant.EPOCH, ZoneId.of("UTC"))) + .signingName("my-cool-aws-service") + .signingRegion(Region.US_WEST_2) + .build(); + + AsyncRequestBody mockRequestBody = mock(AsyncRequestBody.class); + Subscription mockSubscription = mock(Subscription.class); + doAnswer((Answer) invocationOnMock -> { + Subscriber subscriber = invocationOnMock.getArgumentAt(0, Subscriber.class); + subscriber.onSubscribe(mockSubscription); + return null; + }).when(mockRequestBody).subscribe(any(Subscriber.class)); + + AsyncAws4Signer asyncAws4Signer = AsyncAws4Signer.create(); + + CompletableFuture signedRequestFuture = asyncAws4Signer.signWithBody(httpRequest, + mockRequestBody, signerParams); + + signedRequestFuture.cancel(true); + + verify(mockSubscription).cancel(); + } +} diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerRequestParamsTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerRequestParamsTest.java index 58eaeb33b388..8b470001fbb0 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerRequestParamsTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/Aws4SignerRequestParamsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/BaseEventStreamAsyncAws4SignerTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/BaseEventStreamAsyncAws4SignerTest.java new file mode 100644 index 000000000000..6e78ea6f8f79 --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/BaseEventStreamAsyncAws4SignerTest.java @@ -0,0 +1,89 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.signer.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Random; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.eventstream.HeaderValue; +import software.amazon.eventstream.Message; + +public class BaseEventStreamAsyncAws4SignerTest { + private static Map headers; + + @BeforeClass + public static void setup() { + headers = new LinkedHashMap<>(); + headers.put("header1", HeaderValue.fromInteger(42)); + headers.put("header2", HeaderValue.fromBoolean(false)); + headers.put("header3", HeaderValue.fromString("Hello world")); + } + + @Test + public void toDebugString_emptyPayload_generatesCorrectString() { + Message m = new Message(headers, new byte[0]); + + assertThat(BaseEventStreamAsyncAws4Signer.toDebugString(m, false)) + .isEqualTo("Message = {headers={header1={42}, header2={false}, header3={\"Hello world\"}}, payload=}"); + } + + @Test + public void toDebugString_noHeaders_emptyPayload_generatesCorrectString() { + Message m = new Message(new LinkedHashMap<>(), new byte[0]); + + assertThat(BaseEventStreamAsyncAws4Signer.toDebugString(m, false)) + .isEqualTo("Message = {headers={}, payload=}"); + } + + @Test + public void toDebugString_largePayload_truncate_generatesCorrectString() { + byte[] payload = new byte[128]; + new Random().nextBytes(payload); + Message m = new Message(headers, payload); + + byte[] first32 = Arrays.copyOf(payload, 32); + String expectedPayloadString = BinaryUtils.toHex(first32); + assertThat(BaseEventStreamAsyncAws4Signer.toDebugString(m, true)) + .isEqualTo("Message = {headers={header1={42}, header2={false}, header3={\"Hello world\"}}, payload=" + expectedPayloadString + "...}"); + } + + @Test + public void toDebugString_largePayload_noTruncate_generatesCorrectString() { + byte[] payload = new byte[128]; + new Random().nextBytes(payload); + Message m = new Message(headers, payload); + + String expectedPayloadString = BinaryUtils.toHex(payload); + assertThat(BaseEventStreamAsyncAws4Signer.toDebugString(m, false)) + .isEqualTo("Message = {headers={header1={42}, header2={false}, header3={\"Hello world\"}}, payload=" + expectedPayloadString + "}"); + } + + @Test + public void toDebugString_smallPayload_truncate_doesNotAddEllipsis() { + byte[] payload = new byte[8]; + new Random().nextBytes(payload); + Message m = new Message(headers, payload); + + String expectedPayloadString = BinaryUtils.toHex(payload); + assertThat(BaseEventStreamAsyncAws4Signer.toDebugString(m, true)) + .isEqualTo("Message = {headers={header1={42}, header2={false}, header3={\"Hello world\"}}, payload=" + expectedPayloadString + "}"); + } +} diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/DigestComputingSubscriberTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/DigestComputingSubscriberTest.java new file mode 100644 index 000000000000..c82dbec4c673 --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/DigestComputingSubscriberTest.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.signer.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import io.reactivex.Flowable; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import org.junit.Test; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.utils.BinaryUtils; + +public class DigestComputingSubscriberTest { + + @Test + public void test_computesCorrectSha256() { + String testString = "AWS SDK for Java"; + String expectedDigest = "004c6bbd87e7fe70109b3bc23c8b1ab8f18a8bede0ed38c9233f6cdfd4f7b5d6"; + + DigestComputingSubscriber subscriber = DigestComputingSubscriber.forSha256(); + + Flowable publisher = Flowable.just(ByteBuffer.wrap(testString.getBytes(StandardCharsets.UTF_8))); + + publisher.subscribe(subscriber); + + String computedDigest = BinaryUtils.toHex(subscriber.digestBytes().join()); + + assertThat(computedDigest).isEqualTo(expectedDigest); + } + + @Test + public void test_futureCancelledBeforeSubscribe_cancelsSubscription() { + Subscription mockSubscription = mock(Subscription.class); + + DigestComputingSubscriber subscriber = DigestComputingSubscriber.forSha256(); + subscriber.digestBytes().cancel(true); + + subscriber.onSubscribe(mockSubscription); + + verify(mockSubscription).cancel(); + verify(mockSubscription, times(0)).request(anyLong()); + } + + @Test + public void test_publisherCallsOnError_errorPropagatedToFuture() { + Subscription mockSubscription = mock(Subscription.class); + + DigestComputingSubscriber subscriber = DigestComputingSubscriber.forSha256(); + subscriber.onSubscribe(mockSubscription); + + RuntimeException error = new RuntimeException("error"); + subscriber.onError(error); + + assertThatThrownBy(subscriber.digestBytes()::join).hasCause(error); + } +} diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/FifoCacheTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/FifoCacheTest.java index 244e56185729..7988bc17db3e 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/FifoCacheTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/FifoCacheTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/SignerKeyTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/SignerKeyTest.java new file mode 100644 index 000000000000..9100c163965c --- /dev/null +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/SignerKeyTest.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.signer.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import java.time.Instant; +import org.junit.Test; + +public class SignerKeyTest { + + @Test + public void isValidForDate_dayBefore_false() { + Instant signerDate = Instant.parse("2020-03-03T23:59:59Z"); + SignerKey key = new SignerKey(signerDate, new byte[0]); + Instant dayBefore = Instant.parse("2020-03-02T23:59:59Z"); + + assertThat(key.isValidForDate(dayBefore)).isFalse(); + } + + @Test + public void isValidForDate_sameDay_true() { + Instant signerDate = Instant.parse("2020-03-03T23:59:59Z"); + SignerKey key = new SignerKey(signerDate, new byte[0]); + Instant sameDay = Instant.parse("2020-03-03T01:02:03Z"); + + assertThat(key.isValidForDate(sameDay)).isTrue(); + } + + @Test + public void isValidForDate_dayAfter_false() { + Instant signerDate = Instant.parse("2020-03-03T23:59:59Z"); + SignerKey key = new SignerKey(signerDate, new byte[0]); + Instant dayAfter = Instant.parse("2020-03-04T00:00:00Z"); + + assertThat(key.isValidForDate(dayAfter)).isFalse(); + } +} diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/SignerTestUtils.java b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/SignerTestUtils.java index cb9bf935a674..9fc7be4e6fd0 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/SignerTestUtils.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/signer/internal/SignerTestUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/resources/jetty-logging.properties b/core/auth/src/test/resources/jetty-logging.properties index 0b7cafa3b426..6d7baed3e770 100644 --- a/core/auth/src/test/resources/jetty-logging.properties +++ b/core/auth/src/test/resources/jetty-logging.properties @@ -1,3 +1,18 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + # Set up logging implementation org.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.StdErrLog org.eclipse.jetty.LEVEL=INFO diff --git a/core/auth/src/test/resources/log4j.properties b/core/auth/src/test/resources/log4j.properties index 63c957f7a291..f59ce7e9b749 100644 --- a/core/auth/src/test/resources/log4j.properties +++ b/core/auth/src/test/resources/log4j.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/core/auth/src/test/resources/resources/process/linux-credentials-script.sh b/core/auth/src/test/resources/resources/process/linux-credentials-script.sh index 9d2b51a171cb..03c802ec3034 100644 --- a/core/auth/src/test/resources/resources/process/linux-credentials-script.sh +++ b/core/auth/src/test/resources/resources/process/linux-credentials-script.sh @@ -4,8 +4,12 @@ echo '"Version": 1,'; echo "\"AccessKeyId\": \"$1\","; echo "\"SecretAccessKey\": \"$2\""; if [[ $# -ge 3 ]]; then - echo ',' - echo "\"SessionToken\": \"$RANDOM\""; + echo ','; + if [[ "$3" = "RANDOM_TOKEN" ]]; then + echo "\"SessionToken\": \"$RANDOM\"" + else + echo "\"SessionToken\": \"$3\"" + fi; fi; if [[ $# -ge 4 ]]; then echo ',' diff --git a/core/auth/src/test/resources/resources/process/windows-credentials-script.bat b/core/auth/src/test/resources/resources/process/windows-credentials-script.bat index c8cabe4967aa..bb5ed72f070c 100644 --- a/core/auth/src/test/resources/resources/process/windows-credentials-script.bat +++ b/core/auth/src/test/resources/resources/process/windows-credentials-script.bat @@ -6,7 +6,11 @@ ECHO "AccessKeyId": "%1", ECHO "SecretAccessKey": "%2" IF NOT "%3"=="" ( ECHO , - ECHO "SessionToken": "%RANDOM" + IF "%3"=="RANDOM_TOKEN" ( + ECHO "SessionToken": "%RANDOM%" + ) ELSE ( + ECHO "SessionToken": "%3" + ) ) IF NOT "%4"=="" ( ECHO , diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index 7f1d11af901e..b79c768c2c5d 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -1,4 +1,19 @@ + + @@ -7,7 +22,7 @@ software.amazon.awssdk core - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT aws-core @@ -33,6 +48,11 @@ auth ${awsjavasdk.version} + + software.amazon.awssdk + profiles + ${awsjavasdk.version} + sdk-core software.amazon.awssdk @@ -43,6 +63,11 @@ http-client-spi ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + software.amazon.awssdk utils diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsExecutionAttribute.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsExecutionAttribute.java index a58e8485a991..4071e0bec0ff 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsExecutionAttribute.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsExecutionAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; @@ -33,5 +34,11 @@ public final class AwsExecutionAttribute extends SdkExecutionAttribute { */ public static final ExecutionAttribute AWS_REGION = new ExecutionAttribute<>("AwsRegion"); - private AwsExecutionAttribute() {} + /** + * The {@link AwsClientOption#ENDPOINT_PREFIX} for the client. + */ + public static final ExecutionAttribute ENDPOINT_PREFIX = new ExecutionAttribute<>("AwsEndpointPrefix"); + + private AwsExecutionAttribute() { + } } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequest.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequest.java index 455f114d3a24..e993a66cbc2c 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequest.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java index 17cb2b02f41f..da8f7e7abd92 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponse.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponse.java index 61659aa0cb30..15f21e97457c 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponse.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponseMetadata.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponseMetadata.java index 9b61de5a9b05..f9e326f62317 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponseMetadata.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponseMetadata.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/DefaultAwsResponseMetadata.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/DefaultAwsResponseMetadata.java index 528792e54e2b..eaa8fa2fc2d5 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/DefaultAwsResponseMetadata.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/DefaultAwsResponseMetadata.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsAsyncClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsAsyncClientBuilder.java index 60d211345992..a318d2016c81 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsAsyncClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsAsyncClientBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsClientBuilder.java index 1d086185633e..e8b22a8b28d7 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsClientBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java index 98b1ae31d582..d775c9016cb8 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ package software.amazon.awssdk.awscore.client.builder; import java.net.URI; +import java.util.Collections; +import java.util.List; import java.util.Optional; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; @@ -23,20 +25,24 @@ import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.awscore.client.config.AwsAdvancedClientOption; import software.amazon.awssdk.awscore.client.config.AwsClientOption; -import software.amazon.awssdk.awscore.internal.EndpointUtils; +import software.amazon.awssdk.awscore.endpoint.DefaultServiceEndpointBuilder; +import software.amazon.awssdk.awscore.interceptor.HelpfulUnknownHostExceptionInterceptor; import software.amazon.awssdk.awscore.retry.AwsRetryPolicy; import software.amazon.awssdk.core.client.builder.SdkDefaultClientBuilder; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.regions.ServiceMetadata; -import software.amazon.awssdk.regions.providers.AwsRegionProvider; import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; -import software.amazon.awssdk.regions.providers.LazyAwsRegionProvider; import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.CollectionUtils; /** * An SDK-internal implementation of the methods in {@link AwsClientBuilder}, {@link AwsAsyncClientBuilder} and @@ -61,8 +67,6 @@ public abstract class AwsDefaultClientBuilder implements AwsClientBuilder { private static final String DEFAULT_ENDPOINT_PROTOCOL = "https"; - private static final AwsRegionProvider DEFAULT_REGION_PROVIDER = - new LazyAwsRegionProvider(DefaultAwsRegionProviderChain::new); protected AwsDefaultClientBuilder() { super(); @@ -108,13 +112,12 @@ protected AttributeMap serviceHttpConfig() { @Override protected final SdkClientConfiguration mergeChildDefaults(SdkClientConfiguration configuration) { SdkClientConfiguration config = mergeServiceDefaults(configuration); - return config.merge(c -> c.option(AwsClientOption.AWS_REGION, resolveRegion(config)) - .option(AwsAdvancedClientOption.ENABLE_DEFAULT_REGION_DETECTION, true) - .option(AwsClientOption.CREDENTIALS_PROVIDER, DefaultCredentialsProvider.create()) - .option(SdkClientOption.RETRY_POLICY, AwsRetryPolicy.defaultRetryPolicy()) + + return config.merge(c -> c.option(AwsAdvancedClientOption.ENABLE_DEFAULT_REGION_DETECTION, true) .option(SdkAdvancedClientOption.DISABLE_HOST_PREFIX_INJECTION, false) .option(AwsClientOption.SERVICE_SIGNING_NAME, signingName()) - .option(SdkClientOption.SERVICE_NAME, serviceName())); + .option(SdkClientOption.SERVICE_NAME, serviceName()) + .option(AwsClientOption.ENDPOINT_PREFIX, serviceEndpointPrefix())); } /** @@ -126,11 +129,19 @@ protected SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration con @Override protected final SdkClientConfiguration finalizeChildConfiguration(SdkClientConfiguration configuration) { - SdkClientConfiguration config = configuration.toBuilder() - .option(SdkClientOption.ENDPOINT, resolveEndpoint(configuration)) - .option(AwsClientOption.SIGNING_REGION, resolveSigningRegion(configuration)) - .build(); - return finalizeServiceConfiguration(config); + configuration = finalizeServiceConfiguration(configuration); + + configuration = configuration.toBuilder() + .option(AwsClientOption.AWS_REGION, resolveRegion(configuration)) + .build(); + + return configuration.toBuilder() + .option(AwsClientOption.CREDENTIALS_PROVIDER, resolveCredentials(configuration)) + .option(SdkClientOption.ENDPOINT, resolveEndpoint(configuration)) + .option(SdkClientOption.EXECUTION_INTERCEPTORS, addAwsInterceptors(configuration)) + .option(AwsClientOption.SIGNING_REGION, resolveSigningRegion(configuration)) + .option(SdkClientOption.RETRY_POLICY, resolveAwsRetryPolicy(configuration)) + .build(); } /** @@ -153,9 +164,15 @@ private Region resolveSigningRegion(SdkClientConfiguration config) { */ private URI resolveEndpoint(SdkClientConfiguration config) { return Optional.ofNullable(config.option(SdkClientOption.ENDPOINT)) - .orElseGet(() -> EndpointUtils.buildEndpoint(DEFAULT_ENDPOINT_PROTOCOL, - serviceEndpointPrefix(), - config.option(AwsClientOption.AWS_REGION))); + .orElseGet(() -> endpointFromConfig(config)); + } + + private URI endpointFromConfig(SdkClientConfiguration config) { + return new DefaultServiceEndpointBuilder(serviceEndpointPrefix(), DEFAULT_ENDPOINT_PROTOCOL) + .withRegion(config.option(AwsClientOption.AWS_REGION)) + .withProfileFile(config.option(SdkClientOption.PROFILE_FILE)) + .withProfileName(config.option(SdkClientOption.PROFILE_NAME)) + .getServiceEndpoint(); } /** @@ -175,7 +192,44 @@ private Region regionFromDefaultProvider(SdkClientConfiguration config) { if (defaultRegionDetectionEnabled != null && !defaultRegionDetectionEnabled) { throw new IllegalStateException("No region was configured, and use-region-provider-chain was disabled."); } - return DEFAULT_REGION_PROVIDER.getRegion(); + + ProfileFile profileFile = config.option(SdkClientOption.PROFILE_FILE); + String profileName = config.option(SdkClientOption.PROFILE_NAME); + return DefaultAwsRegionProviderChain.builder() + .profileFile(() -> profileFile) + .profileName(profileName) + .build() + .getRegion(); + } + + /** + * Resolve the credentials that should be used based on the customer's configuration. + */ + private AwsCredentialsProvider resolveCredentials(SdkClientConfiguration config) { + return config.option(AwsClientOption.CREDENTIALS_PROVIDER) != null + ? config.option(AwsClientOption.CREDENTIALS_PROVIDER) + : DefaultCredentialsProvider.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .build(); + } + + private RetryPolicy resolveAwsRetryPolicy(SdkClientConfiguration config) { + RetryPolicy policy = config.option(SdkClientOption.RETRY_POLICY); + + if (policy != null) { + if (policy.additionalRetryConditionsAllowed()) { + return AwsRetryPolicy.addRetryConditions(policy); + } else { + return policy; + } + } + + RetryMode retryMode = RetryMode.resolver() + .profileFile(() -> config.option(SdkClientOption.PROFILE_FILE)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .resolve(); + return AwsRetryPolicy.forRetryMode(retryMode); } @Override @@ -197,4 +251,14 @@ public final BuilderT credentialsProvider(AwsCredentialsProvider credentialsProv public final void setCredentialsProvider(AwsCredentialsProvider credentialsProvider) { credentialsProvider(credentialsProvider); } + + private List addAwsInterceptors(SdkClientConfiguration config) { + List interceptors = awsInterceptors(); + interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); + return interceptors; + } + + private List awsInterceptors() { + return Collections.singletonList(new HelpfulUnknownHostExceptionInterceptor()); + } } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsSyncClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsSyncClientBuilder.java index 10aca94d429e..f4dbfdf4035f 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsSyncClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsSyncClientBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/config/AwsAdvancedClientOption.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/config/AwsAdvancedClientOption.java index 444460fbddfc..f004a74b5a9c 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/config/AwsAdvancedClientOption.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/config/AwsAdvancedClientOption.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/config/AwsClientOption.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/config/AwsClientOption.java index 3fba7909ad4e..44e6f69851c8 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/config/AwsClientOption.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/config/AwsClientOption.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -45,6 +45,14 @@ public final class AwsClientOption extends ClientOption { */ public static final AwsClientOption SERVICE_SIGNING_NAME = new AwsClientOption<>(String.class); + /** + * The first part of the URL in the DNS name for the service. Eg. in the endpoint "dynamodb.amazonaws.com", this is the + * "dynamodb". + * + * For standard services, this should match the "endpointPrefix" field in the AWS model. + */ + public static final AwsClientOption ENDPOINT_PREFIX = new AwsClientOption<>(String.class); + private AwsClientOption(Class valueClass) { super(valueClass); } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsAsyncClientHandler.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsAsyncClientHandler.java index 8e8ddb510bf9..a731247f4f28 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsAsyncClientHandler.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsAsyncClientHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -28,6 +28,7 @@ import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.client.handler.SdkAsyncClientHandler; import software.amazon.awssdk.core.http.ExecutionContext; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; /** * Async client handler for AWS SDK clients. @@ -60,8 +61,8 @@ public Complet @Override protected ExecutionContext createExecutionContext( - ClientExecutionParams executionParams) { - return AwsClientHandlerUtils.createExecutionContext(executionParams, clientConfiguration); + ClientExecutionParams executionParams, ExecutionAttributes executionAttributes) { + return AwsClientHandlerUtils.createExecutionContext(executionParams, clientConfiguration, executionAttributes); } } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java index add11ba27c8e..0147a7618e2b 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.nio.ByteBuffer; +import java.time.Duration; import java.util.Map; import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkProtectedApi; @@ -30,6 +31,7 @@ import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.awscore.client.config.AwsAdvancedClientOption; import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -41,8 +43,10 @@ import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.Validate; import software.amazon.eventstream.HeaderValue; @@ -57,7 +61,8 @@ private AwsClientHandlerUtils() { static ExecutionContext createExecutionContext( ClientExecutionParams executionParams, - SdkClientConfiguration clientConfig) { + SdkClientConfiguration clientConfig, + ExecutionAttributes executionAttributes) { SdkRequest originalRequest = executionParams.getInput(); AwsCredentialsProvider clientCredentials = clientConfig.option(AwsClientOption.CREDENTIALS_PROVIDER); @@ -67,21 +72,28 @@ static ExecutionContext .flatMap(AwsRequestOverrideConfiguration::credentialsProvider) .orElse(clientCredentials); + long credentialsResolveStart = System.nanoTime(); AwsCredentials credentials = credentialsProvider.resolveCredentials(); + Duration fetchDuration = Duration.ofNanos(System.nanoTime() - credentialsResolveStart); + MetricCollector metricCollector = resolveMetricCollector(executionParams); + metricCollector.reportMetric(CoreMetric.CREDENTIALS_FETCH_DURATION, fetchDuration); Validate.validState(credentials != null, "Credential providers must never return null."); - ExecutionAttributes executionAttributes = new ExecutionAttributes() + executionAttributes .putAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG, clientConfig.option(SdkClientOption.SERVICE_CONFIGURATION)) .putAttribute(AwsSignerExecutionAttribute.AWS_CREDENTIALS, credentials) .putAttribute(AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME, clientConfig.option(AwsClientOption.SERVICE_SIGNING_NAME)) .putAttribute(AwsExecutionAttribute.AWS_REGION, clientConfig.option(AwsClientOption.AWS_REGION)) + .putAttribute(AwsExecutionAttribute.ENDPOINT_PREFIX, clientConfig.option(AwsClientOption.ENDPOINT_PREFIX)) .putAttribute(AwsSignerExecutionAttribute.SIGNING_REGION, clientConfig.option(AwsClientOption.SIGNING_REGION)) .putAttribute(SdkInternalExecutionAttribute.IS_FULL_DUPLEX, executionParams.isFullDuplex()) .putAttribute(SdkExecutionAttribute.CLIENT_TYPE, clientConfig.option(SdkClientOption.CLIENT_TYPE)) .putAttribute(SdkExecutionAttribute.SERVICE_NAME, clientConfig.option(SdkClientOption.SERVICE_NAME)) - .putAttribute(SdkExecutionAttribute.OPERATION_NAME, executionParams.getOperationName()); + .putAttribute(SdkExecutionAttribute.OPERATION_NAME, executionParams.getOperationName()) + .putAttribute(SdkExecutionAttribute.ENDPOINT_OVERRIDDEN, + clientConfig.option(SdkClientOption.ENDPOINT_OVERRIDDEN)); ExecutionInterceptorChain executionInterceptorChain = new ExecutionInterceptorChain(clientConfig.option(SdkClientOption.EXECUTION_INTERCEPTORS)); @@ -94,6 +106,7 @@ static ExecutionContext .build()) .executionAttributes(executionAttributes) .signer(computeSigner(originalRequest, clientConfig)) + .metricCollector(metricCollector) .build(); } @@ -125,7 +138,15 @@ public static ByteBuffer encodeEventStreamRequestToByteBuffer(SdkHttpFullRequest private static Signer computeSigner(SdkRequest originalRequest, SdkClientConfiguration clientConfiguration) { return originalRequest.overrideConfiguration() - .flatMap(config -> config.signer()) + .flatMap(RequestOverrideConfiguration::signer) .orElse(clientConfiguration.option(AwsAdvancedClientOption.SIGNER)); } + + private static MetricCollector resolveMetricCollector(ClientExecutionParams params) { + MetricCollector metricCollector = params.getMetricCollector(); + if (metricCollector == null) { + metricCollector = MetricCollector.create("ApiCall"); + } + return metricCollector; + } } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsSyncClientHandler.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsSyncClientHandler.java index dd6f96414613..3edc19686430 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsSyncClientHandler.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsSyncClientHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -64,13 +64,19 @@ public ReturnT @Override protected ExecutionContext createExecutionContext( - ClientExecutionParams executionParams) { - return AwsClientHandlerUtils.createExecutionContext(executionParams, clientConfiguration); + ClientExecutionParams executionParams, ExecutionAttributes executionAttributes) { + return AwsClientHandlerUtils.createExecutionContext(executionParams, clientConfiguration, executionAttributes); } private ClientExecutionParams addCrc32Validation( ClientExecutionParams executionParams) { - return executionParams.withResponseHandler(new Crc32ValidationResponseHandler<>(executionParams.getResponseHandler())); + if (executionParams.getCombinedResponseHandler() != null) { + return executionParams.withCombinedResponseHandler( + new Crc32ValidationResponseHandler<>(executionParams.getCombinedResponseHandler())); + } else { + return executionParams.withResponseHandler( + new Crc32ValidationResponseHandler<>(executionParams.getResponseHandler())); + } } /** diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/DefaultServiceEndpointBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/DefaultServiceEndpointBuilder.java index e4cafc3c0c3b..0320e3f7dfa2 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/DefaultServiceEndpointBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/DefaultServiceEndpointBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,8 +17,11 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.List; import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.regions.ServiceMetadata; import software.amazon.awssdk.utils.Validate; @@ -31,10 +34,12 @@ // TODO We may not need this anymore, we should default to AWS partition when resolving // a region we don't know about yet. public final class DefaultServiceEndpointBuilder { - private final String serviceName; private final String protocol; + private Region region; + private ProfileFile profileFile; + private String profileName; public DefaultServiceEndpointBuilder(String serviceName, String protocol) { this.serviceName = Validate.paramNotNull(serviceName, "serviceName"); @@ -49,12 +54,37 @@ public DefaultServiceEndpointBuilder withRegion(Region region) { return this; } + public DefaultServiceEndpointBuilder withProfileFile(ProfileFile profileFile) { + this.profileFile = profileFile; + return this; + } + + public DefaultServiceEndpointBuilder withProfileName(String profileName) { + this.profileName = profileName; + return this; + } + public URI getServiceEndpoint() { - ServiceMetadata serviceMetadata = ServiceMetadata.of(serviceName); - return withProtocol(serviceMetadata.endpointFor(region)); + ServiceMetadata serviceMetadata = ServiceMetadata.of(serviceName) + .reconfigure(c -> c.profileFile(() -> profileFile) + .profileName(profileName)); + URI endpoint = addProtocolToServiceEndpoint(serviceMetadata.endpointFor(region)); + + if (endpoint.getHost() == null) { + String error = "Configured region (" + region + ") resulted in an invalid URI: " + endpoint; + + List exampleRegions = serviceMetadata.regions(); + if (!exampleRegions.isEmpty()) { + error += " Valid region examples: " + exampleRegions; + } + + throw SdkClientException.create(error); + } + + return endpoint; } - private URI withProtocol(URI endpointWithoutProtocol) throws IllegalArgumentException { + private URI addProtocolToServiceEndpoint(URI endpointWithoutProtocol) throws IllegalArgumentException { try { return new URI(protocol + "://" + endpointWithoutProtocol); } catch (URISyntaxException e) { diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/DefaultEventStreamResponseHandlerBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/DefaultEventStreamResponseHandlerBuilder.java index eb18057ce9b9..c5342aabd402 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/DefaultEventStreamResponseHandlerBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/DefaultEventStreamResponseHandlerBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java index 174378f43377..4f4ec847504b 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ import static java.util.Collections.singletonList; import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZN_REQUEST_ID_HEADER; +import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZN_REQUEST_ID_HEADERS; import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZ_ID_2_HEADER; import static software.amazon.awssdk.utils.FunctionalUtils.runAndLogError; @@ -49,6 +50,7 @@ import software.amazon.awssdk.http.SdkCancellationException; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.http.SdkHttpUtils; import software.amazon.eventstream.Message; import software.amazon.eventstream.MessageDecoder; @@ -193,9 +195,10 @@ public CompletableFuture prepare() { @Override public void onResponse(SdkResponse response) { if (response != null && response.sdkHttpResponse() != null) { - this.requestId = response.sdkHttpResponse() - .firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER) - .orElse(null); + this.requestId = SdkHttpUtils.firstMatchingHeaderFromCollection(response.sdkHttpResponse().headers(), + X_AMZN_REQUEST_ID_HEADERS) + .orElse(null); + this.extendedRequestId = response.sdkHttpResponse() .firstMatchingHeader(X_AMZ_ID_2_HEADER) .orElse(null); @@ -391,7 +394,9 @@ public void onError(Throwable throwable) { @Override public void onComplete() { // Add the special on complete event to signal drainEvents to complete the subscriber - eventsToDeliver.add(ON_COMPLETE_EVENT); + synchronized (eventsToDeliver) { + eventsToDeliver.add(ON_COMPLETE_EVENT); + } drainEventsIfNotAlready(); transformFuture.complete(null); } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamResponseHandler.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamResponseHandler.java index 77baff4cf6a6..093ab3420a75 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamResponseHandler.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamResponseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamResponseHandlerFromBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamResponseHandlerFromBuilder.java index ee1c48888afc..2cfe1bf6c8e4 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamResponseHandlerFromBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamResponseHandlerFromBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamTaggedUnionJsonMarshaller.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamTaggedUnionJsonMarshaller.java index cc1dae4628ed..be7fa785cd4d 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamTaggedUnionJsonMarshaller.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamTaggedUnionJsonMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamTaggedUnionPojoSupplier.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamTaggedUnionPojoSupplier.java index ead629f4e9b0..f780ae9ecb20 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamTaggedUnionPojoSupplier.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamTaggedUnionPojoSupplier.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/RestEventStreamAsyncResponseTransformer.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/RestEventStreamAsyncResponseTransformer.java index aecadf150b45..5e9f84cea3f2 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/RestEventStreamAsyncResponseTransformer.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/RestEventStreamAsyncResponseTransformer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; - import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.async.AsyncResponseTransformer; diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsErrorDetails.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsErrorDetails.java index 289d4a188238..8073839e45d6 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsErrorDetails.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsErrorDetails.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -204,7 +204,8 @@ protected static final class BuilderImpl implements Builder { private SdkHttpResponse sdkHttpResponse; private SdkBytes rawResponse; - private BuilderImpl() {} + private BuilderImpl() { + } private BuilderImpl(AwsErrorDetails awsErrorDetails) { this.errorMessage = awsErrorDetails.errorMessage(); diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsServiceException.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsServiceException.java index 05ffe3c1bf61..9f93a307baa2 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsServiceException.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsServiceException.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -65,7 +65,8 @@ public String getMessage() { return awsErrorDetails().errorMessage() + " (Service: " + awsErrorDetails().serviceName() + ", Status Code: " + statusCode() + - ", Request ID: " + requestId() + ")"; + ", Request ID: " + requestId() + + ", Extended Request ID: " + extendedRequestId() + ")"; } return super.getMessage(); @@ -164,6 +165,9 @@ public interface Builder extends SdkServiceException.Builder { @Override Builder requestId(String requestId); + @Override + Builder extendedRequestId(String extendedRequestId); + @Override Builder statusCode(int statusCode); @@ -232,6 +236,12 @@ public Builder requestId(String requestId) { return this; } + @Override + public Builder extendedRequestId(String extendedRequestId) { + this.extendedRequestId = extendedRequestId; + return this; + } + @Override public Builder statusCode(int statusCode) { this.statusCode = statusCode; diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/GlobalServiceExecutionInterceptor.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/GlobalServiceExecutionInterceptor.java index 244f85e73437..43f0f0566908 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/GlobalServiceExecutionInterceptor.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/GlobalServiceExecutionInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,41 +15,22 @@ package software.amazon.awssdk.awscore.interceptor; -import java.net.UnknownHostException; import software.amazon.awssdk.annotations.SdkProtectedApi; -import software.amazon.awssdk.awscore.AwsExecutionAttribute; -import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; /** - * An interceptor that can be used for global services that will tell the customer when they're using a global service that - * doesn't support non-global regions. + * A more specific version of {@link HelpfulUnknownHostExceptionInterceptor} that was used for older IAM clients. This can be + * removed if we ever drop backwards-compatibility with older IAM client versions, because newer IAM client versions do not + * depend on this interceptor. */ @SdkProtectedApi -public final class GlobalServiceExecutionInterceptor implements ExecutionInterceptor { - @Override - public void onExecutionFailure(Context.FailedExecution context, ExecutionAttributes executionAttributes) { - if (hasCause(context.exception(), UnknownHostException.class) && - !executionAttributes.getAttribute(AwsExecutionAttribute.AWS_REGION).isGlobalRegion()) { - throw SdkClientException.builder() - .message("This is a global service. Consider setting AWS_GLOBAL or another global " + - "region when creating your client.") - .cause(context.exception()) - .build(); - } - } - - private boolean hasCause(Throwable thrown, Class cause) { - if (thrown == null) { - return false; - } +public class GlobalServiceExecutionInterceptor implements ExecutionInterceptor { + private static final HelpfulUnknownHostExceptionInterceptor DELEGATE = new HelpfulUnknownHostExceptionInterceptor(); - if (cause.isAssignableFrom(thrown.getClass())) { - return true; - } - - return hasCause(thrown.getCause(), cause); + @Override + public Throwable modifyException(Context.FailedExecution context, ExecutionAttributes executionAttributes) { + return DELEGATE.modifyException(context, executionAttributes); } } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/HelpfulUnknownHostExceptionInterceptor.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/HelpfulUnknownHostExceptionInterceptor.java new file mode 100644 index 000000000000..2572e1b79f32 --- /dev/null +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/HelpfulUnknownHostExceptionInterceptor.java @@ -0,0 +1,137 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.interceptor; + +import java.net.UnknownHostException; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.AwsExecutionAttribute; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.regions.PartitionMetadata; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.regions.RegionMetadata; +import software.amazon.awssdk.regions.ServiceMetadata; +import software.amazon.awssdk.regions.ServicePartitionMetadata; + +/** + * This interceptor will monitor for {@link UnknownHostException}s and provide the customer with additional information they can + * use to debug or fix the problem. + */ +@SdkInternalApi +public final class HelpfulUnknownHostExceptionInterceptor implements ExecutionInterceptor { + @Override + public Throwable modifyException(Context.FailedExecution context, ExecutionAttributes executionAttributes) { + if (!hasCause(context.exception(), UnknownHostException.class)) { + return context.exception(); + } + + StringBuilder error = new StringBuilder(); + error.append("Received an UnknownHostException when attempting to interact with a service. See cause for the " + + "exact endpoint that is failing to resolve. "); + + Optional globalRegionErrorDetails = getGlobalRegionErrorDetails(executionAttributes); + + if (globalRegionErrorDetails.isPresent()) { + error.append(globalRegionErrorDetails.get()); + } else { + error.append("If this is happening on an endpoint that previously worked, there may be a network connectivity " + + "issue or your DNS cache could be storing endpoints for too long."); + } + + return SdkClientException.builder().message(error.toString()).cause(context.exception()).build(); + } + + /** + * If the customer is interacting with a global service (one with a single endpoint/region for an entire partition), this + * will return error details that can instruct the customer on how to configure their client for success. + */ + private Optional getGlobalRegionErrorDetails(ExecutionAttributes executionAttributes) { + Region clientRegion = clientRegion(executionAttributes); + if (clientRegion.isGlobalRegion()) { + return Optional.empty(); + } + + List globalPartitionsForService = globalPartitionsForService(executionAttributes); + if (globalPartitionsForService.isEmpty()) { + return Optional.empty(); + } + + String clientPartition = Optional.ofNullable(clientRegion.metadata()) + .map(RegionMetadata::partition) + .map(PartitionMetadata::id) + .orElse(null); + + Optional globalRegionForClientRegion = + globalPartitionsForService.stream() + .filter(p -> p.partition().id().equals(clientPartition)) + .findAny() + .flatMap(ServicePartitionMetadata::globalRegion); + + if (!globalRegionForClientRegion.isPresent()) { + String globalRegionsForThisService = globalPartitionsForService.stream() + .map(ServicePartitionMetadata::globalRegion) + .filter(Optional::isPresent) + .map(Optional::get) + .filter(Region::isGlobalRegion) + .map(Region::id) + .collect(Collectors.joining("/")); + + return Optional.of("This specific service may be a global service, in which case you should configure a global " + + "region like " + globalRegionsForThisService + " on the client."); + } + + Region globalRegion = globalRegionForClientRegion.get(); + + return Optional.of("This specific service is global in the same partition as the region configured on this client (" + + clientRegion + "). If this is the first time you're trying to talk to this service in this region, " + + "you should try configuring the global region on your client, instead: " + globalRegion); + } + + /** + * Retrieve the region configured on the client. + */ + private Region clientRegion(ExecutionAttributes executionAttributes) { + return executionAttributes.getAttribute(AwsExecutionAttribute.AWS_REGION); + } + + /** + * Retrieve all global partitions for the AWS service that we're interacting with. + */ + private List globalPartitionsForService(ExecutionAttributes executionAttributes) { + return ServiceMetadata.of(executionAttributes.getAttribute(AwsExecutionAttribute.ENDPOINT_PREFIX)) + .servicePartitions() + .stream() + .filter(sp -> sp.globalRegion().isPresent()) + .collect(Collectors.toList()); + } + + private boolean hasCause(Throwable thrown, Class cause) { + if (thrown == null) { + return false; + } + + if (cause.isAssignableFrom(thrown.getClass())) { + return true; + } + + return hasCause(thrown.getCause(), cause); + } +} diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsErrorCode.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsErrorCode.java index 5d632e4c125a..19589e4735bd 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsErrorCode.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsErrorCode.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -44,6 +44,8 @@ public final class AwsErrorCode { throttlingErrorCodes.add("BandwidthLimitExceeded"); throttlingErrorCodes.add("RequestThrottled"); throttlingErrorCodes.add("RequestThrottledException"); + throttlingErrorCodes.add("EC2ThrottledException"); + throttlingErrorCodes.add("TransactionInProgressException"); THROTTLING_ERROR_CODES = unmodifiableSet(throttlingErrorCodes); Set definiteClockSkewErrorCodes = new HashSet<>(3); @@ -60,6 +62,8 @@ public final class AwsErrorCode { Set retryableErrorCodes = new HashSet<>(1); retryableErrorCodes.add("PriorRequestNotComplete"); + retryableErrorCodes.add("RequestTimeout"); + retryableErrorCodes.add("RequestTimeoutException"); RETRYABLE_ERROR_CODES = unmodifiableSet(retryableErrorCodes); } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsStatusCode.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsStatusCode.java index 7a49baee3e5b..eaf254ddb8ab 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsStatusCode.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsStatusCode.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/EndpointUtils.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/EndpointUtils.java deleted file mode 100644 index 6736b1740777..000000000000 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/EndpointUtils.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.awscore.internal; - -import java.net.URI; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.awscore.endpoint.DefaultServiceEndpointBuilder; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.utils.Validate; - -/** - * A collection of utility methods centered around generating service endpoints from various pieces of information. - */ -@SdkInternalApi -public final class EndpointUtils { - private EndpointUtils() {} - - /** - * Generate an endpoint from the provided endpoint protocol, url prefix, and region. - * - * @param protocol The protocol that should be used when communicating with AWS (usually http or https). - * @param serviceEndpointPrefix The endpoint prefix that should be used when communicating with AWS (usually the - * endpointPrefix in the service's model). - * @param region The AWS region that should be communicated with. - * @return The AWS endpoint to use for communication. - */ - public static URI buildEndpoint(String protocol, String serviceEndpointPrefix, Region region) { - Validate.paramNotNull(protocol, "protocol"); - Validate.paramNotNull(serviceEndpointPrefix, "serviceEndpointPrefix"); - Validate.paramNotNull(region, "region"); - return new DefaultServiceEndpointBuilder(serviceEndpointPrefix, protocol).withRegion(region).getServiceEndpoint(); - } -} diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/client/config/AwsClientOptionValidation.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/client/config/AwsClientOptionValidation.java index 2c9fc5e77478..5dce8619099a 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/client/config/AwsClientOptionValidation.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/client/config/AwsClientOptionValidation.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -27,7 +27,8 @@ */ @SdkInternalApi public final class AwsClientOptionValidation extends SdkClientOptionValidation { - private AwsClientOptionValidation() {} + private AwsClientOptionValidation() { + } public static void validateAsyncClientOptions(SdkClientConfiguration c) { validateClientOptions(c); diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/PresignRequest.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/PresignRequest.java new file mode 100644 index 000000000000..9e24358a47f7 --- /dev/null +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/PresignRequest.java @@ -0,0 +1,100 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.presigner; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.Validate; + +/** + * The base class for all presign requests. + */ +@SdkPublicApi +public abstract class PresignRequest { + private final Duration signatureDuration; + + protected PresignRequest(DefaultBuilder builder) { + this.signatureDuration = Validate.paramNotNull(builder.signatureDuration, "signatureDuration"); + } + + /** + * Retrieves the duration for which this presigned request should be valid. After this time has + * expired, attempting to use the presigned request will fail.  + */ + public Duration signatureDuration() { + return this.signatureDuration; + } + + /** + * The base interface for all presign request builders. + */ + @SdkPublicApi + public interface Builder { + /** + * Specifies the duration for which this presigned request should be valid. After this time has + * expired, attempting to use the presigned request will fail.  + */ + Builder signatureDuration(Duration signatureDuration); + + /** + * Build the presigned request, based on the configuration on this builder. + */ + PresignRequest build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PresignRequest that = (PresignRequest) o; + + return signatureDuration.equals(that.signatureDuration); + } + + @Override + public int hashCode() { + return signatureDuration.hashCode(); + } + + @SdkProtectedApi + protected abstract static class DefaultBuilder> implements Builder { + private Duration signatureDuration; + + protected DefaultBuilder() { + } + + protected DefaultBuilder(PresignRequest request) { + this.signatureDuration = request.signatureDuration; + } + + @Override + public B signatureDuration(Duration signatureDuration) { + this.signatureDuration = signatureDuration; + return thisBuilder(); + } + + @SuppressWarnings("unchecked") + private B thisBuilder() { + return (B) this; + } + } +} diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/PresignedRequest.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/PresignedRequest.java new file mode 100644 index 000000000000..f3da23bf11f5 --- /dev/null +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/PresignedRequest.java @@ -0,0 +1,233 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.presigner; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.net.URL; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.utils.Validate; + + +/** + * The base class for all presigned requests. + *

    + * The {@link #isBrowserExecutable} method can be used to determine whether this request can be executed by a web browser. + */ +@SdkPublicApi +public abstract class PresignedRequest { + private final URL url; + private final Instant expiration; + private final boolean isBrowserExecutable; + private final Map> signedHeaders; + private final SdkBytes signedPayload; + private final SdkHttpRequest httpRequest; + + protected PresignedRequest(DefaultBuilder builder) { + this.expiration = Validate.notNull(builder.expiration, "expiration"); + this.isBrowserExecutable = Validate.notNull(builder.isBrowserExecutable, "isBrowserExecutable"); + this.signedHeaders = Validate.notEmpty(builder.signedHeaders, "signedHeaders"); + this.signedPayload = builder.signedPayload; + this.httpRequest = Validate.notNull(builder.httpRequest, "httpRequest"); + this.url = invokeSafely(httpRequest.getUri()::toURL); + } + + /** + * The URL that the presigned request will execute against. The {@link #isBrowserExecutable} method can be used to + * determine whether this request will work in a browser. + */ + public URL url() { + return url; + } + + /** + * The exact SERVICE time that the request will expire. After this time, attempting to execute the request + * will fail. + *

    + * This may differ from the local clock, based on the skew between the local and AWS service clocks. + */ + public Instant expiration() { + return expiration; + } + + /** + * Whether the url returned by the url method can be executed in a browser. + *

    + * This is true when the HTTP request method is GET and all data included in the signature will be sent by a standard web + * browser. + */ + public boolean isBrowserExecutable() { + return isBrowserExecutable; + } + + /** + * Returns the subset of headers that were signed, and MUST be included in the presigned request to prevent + * the request from failing. + */ + public Map> signedHeaders() { + return signedHeaders; + } + + /** + * Returns the payload that was signed, or Optional.empty() if there is no signed payload with this request. + */ + public Optional signedPayload() { + return Optional.ofNullable(signedPayload); + } + + /** + * The entire SigV4 query-parameter signed request (minus the payload), that can be transmitted as-is to a + * service using any HTTP client that implement the SDK's HTTP client SPI. + *

    + * This request includes signed AND unsigned headers. + */ + public SdkHttpRequest httpRequest() { + return httpRequest; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PresignedRequest that = (PresignedRequest) o; + + if (isBrowserExecutable != that.isBrowserExecutable) { + return false; + } + if (!expiration.equals(that.expiration)) { + return false; + } + if (!signedHeaders.equals(that.signedHeaders)) { + return false; + } + if (signedPayload != null ? !signedPayload.equals(that.signedPayload) : that.signedPayload != null) { + return false; + } + return httpRequest.equals(that.httpRequest); + } + + @Override + public int hashCode() { + int result = expiration.hashCode(); + result = 31 * result + (isBrowserExecutable ? 1 : 0); + result = 31 * result + signedHeaders.hashCode(); + result = 31 * result + (signedPayload != null ? signedPayload.hashCode() : 0); + result = 31 * result + httpRequest.hashCode(); + return result; + } + + @SdkPublicApi + public interface Builder { + /** + * Configure the exact SERVICE time that the request will expire. After this time, attempting to execute the request + * will fail. + */ + Builder expiration(Instant expiration); + + /** + * Configure whether the url returned by the url method can be executed in a browser. + */ + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + /** + * Configure the subset of headers that were signed, and MUST be included in the presigned request to prevent + * the request from failing. + */ + Builder signedHeaders(Map> signedHeaders); + + /** + * Configure the payload that was signed. + */ + Builder signedPayload(SdkBytes signedPayload); + + /** + * Configure the entire SigV4 query-parameter signed request (minus the payload), that can be transmitted as-is to a + * service using any HTTP client that implement the SDK's HTTP client SPI. + */ + Builder httpRequest(SdkHttpRequest httpRequest); + + PresignedRequest build(); + } + + + + @SdkProtectedApi + protected abstract static class DefaultBuilder> implements Builder { + private Instant expiration; + private Boolean isBrowserExecutable; + private Map> signedHeaders; + private SdkBytes signedPayload; + private SdkHttpRequest httpRequest; + + protected DefaultBuilder() { + } + + protected DefaultBuilder(PresignedRequest request) { + this.expiration = request.expiration; + this.isBrowserExecutable = request.isBrowserExecutable; + this.signedHeaders = request.signedHeaders; + this.signedPayload = request.signedPayload; + this.httpRequest = request.httpRequest; + } + + @Override + public B expiration(Instant expiration) { + this.expiration = expiration; + return thisBuilder(); + } + + @Override + public B isBrowserExecutable(Boolean isBrowserExecutable) { + this.isBrowserExecutable = isBrowserExecutable; + return thisBuilder(); + } + + @Override + public B signedHeaders(Map> signedHeaders) { + this.signedHeaders = signedHeaders; + return thisBuilder(); + } + + @Override + public B signedPayload(SdkBytes signedPayload) { + this.signedPayload = signedPayload; + return thisBuilder(); + } + + @Override + public B httpRequest(SdkHttpRequest httpRequest) { + this.httpRequest = httpRequest; + return thisBuilder(); + } + + @SuppressWarnings("unchecked") + private B thisBuilder() { + return (B) this; + } + } +} \ No newline at end of file diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/SdkPresigner.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/SdkPresigner.java new file mode 100644 index 000000000000..83d513174d8f --- /dev/null +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/presigner/SdkPresigner.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.presigner; + +import java.net.URI; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; +import software.amazon.awssdk.utils.SdkAutoCloseable; + +/** + * The base interface for all SDK presigners. + */ +@SdkPublicApi +public interface SdkPresigner extends SdkAutoCloseable { + /** + * Close this presigner, releasing any resources it might have acquired. It is recommended to invoke this method whenever + * the presigner is done being used, to prevent resource leaks. + *

    + * For example, some {@link AwsCredentialsProvider} implementations hold resources that could be released by this method. + */ + @Override + void close(); + + /** + * The base interface for all SDK presigner builders. + */ + @SdkPublicApi + interface Builder { + /** + * Configure the region that should be used for request signing. + *

    + * If this is not set, the {@link DefaultAwsRegionProviderChain} will be consulted to determine the region. + */ + Builder region(Region region); + + /** + * Configure the credentials that should be used for request signing. + *

    + * If this is not set, the {@link DefaultCredentialsProvider} will be used. + */ + Builder credentialsProvider(AwsCredentialsProvider credentialsProvider); + + /** + * Configure an endpoint that should be used in the pre-signed requests. This will override the endpoint that is usually + * determined by the {@link #region(Region)}. + */ + Builder endpointOverride(URI endpointOverride); + + /** + * Build the presigner using the configuration on this builder. + */ + SdkPresigner build(); + } +} diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicy.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicy.java index 5b9fc17e6a88..4899a2601b02 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicy.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicy.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.awscore.internal.AwsErrorCode; import software.amazon.awssdk.awscore.retry.conditions.RetryOnErrorCodeCondition; +import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.retry.conditions.OrRetryCondition; import software.amazon.awssdk.core.retry.conditions.RetryCondition; @@ -31,12 +32,38 @@ public final class AwsRetryPolicy { private AwsRetryPolicy() { } + /** + * Retrieve the {@link RetryCondition#defaultRetryCondition()} with AWS-specific conditions added. + */ public static RetryCondition defaultRetryCondition() { - return OrRetryCondition.create(RetryCondition.defaultRetryCondition(), - RetryOnErrorCodeCondition.create(AwsErrorCode.RETRYABLE_ERROR_CODES)); + return OrRetryCondition.create(RetryCondition.defaultRetryCondition(), awsRetryCondition()); } + /** + * Retrieve the {@link RetryPolicy#defaultRetryPolicy()} with AWS-specific conditions added. + */ public static RetryPolicy defaultRetryPolicy() { - return RetryPolicy.defaultRetryPolicy().toBuilder().retryCondition(defaultRetryCondition()).build(); + return forRetryMode(RetryMode.defaultRetryMode()); + } + + /** + * Retrieve the {@link RetryPolicy#defaultRetryPolicy()} with AWS-specific conditions added. This uses the specified + * {@link RetryMode} when constructing the {@link RetryPolicy}. + */ + public static RetryPolicy forRetryMode(RetryMode retryMode) { + return addRetryConditions(RetryPolicy.forRetryMode(retryMode)); + } + + /** + * Update the provided {@link RetryPolicy} to add AWS-specific conditions. + */ + public static RetryPolicy addRetryConditions(RetryPolicy condition) { + return condition.toBuilder() + .retryCondition(OrRetryCondition.create(condition.retryCondition(), awsRetryCondition())) + .build(); + } + + private static RetryOnErrorCodeCondition awsRetryCondition() { + return RetryOnErrorCodeCondition.create(AwsErrorCode.RETRYABLE_ERROR_CODES); } } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/conditions/RetryOnErrorCodeCondition.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/conditions/RetryOnErrorCodeCondition.java index 8fd542c522e8..6673b13440f2 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/conditions/RetryOnErrorCodeCondition.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/conditions/RetryOnErrorCodeCondition.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/util/AwsHeader.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/util/AwsHeader.java index c4478847d5fb..bda478a7ea47 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/util/AwsHeader.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/util/AwsHeader.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/util/AwsHostNameUtils.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/util/AwsHostNameUtils.java index 16a5d6b8d930..eb44ac136c83 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/util/AwsHostNameUtils.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/util/AwsHostNameUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/builder/DefaultAwsClientBuilderTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/builder/DefaultAwsClientBuilderTest.java index 8c42c68a7f43..c32600c5f014 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/builder/DefaultAwsClientBuilderTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/builder/DefaultAwsClientBuilderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ import org.mockito.runners.MockitoJUnitRunner; import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; import software.amazon.awssdk.auth.signer.Aws4Signer; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/endpoint/DefaultServiceEndpointBuilderTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/endpoint/DefaultServiceEndpointBuilderTest.java index e8c13d978c79..90fa1f40034d 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/endpoint/DefaultServiceEndpointBuilderTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/endpoint/DefaultServiceEndpointBuilderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/http/NoopTestAwsRequest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/http/NoopTestAwsRequest.java index d55afe982d78..a87238aafa71 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/http/NoopTestAwsRequest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/http/NoopTestAwsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/HttpTestUtils.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/HttpTestUtils.java index 9fc12690cc00..e7ff085f290d 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/HttpTestUtils.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/HttpTestUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ import java.util.concurrent.Executors; import software.amazon.awssdk.core.client.config.SdkAdvancedAsyncClientOption; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; -import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.internal.http.AmazonSyncHttpClient; import software.amazon.awssdk.core.internal.http.loader.DefaultSdkHttpClientBuilder; import software.amazon.awssdk.core.retry.RetryPolicy; diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/ValidSdkObjects.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/ValidSdkObjects.java index df11173862b5..ac00131437b5 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/ValidSdkObjects.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/ValidSdkObjects.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformerTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformerTest.java index 0995b0518872..d8ca1069a97b 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformerTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/exception/AwsServiceExceptionTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/exception/AwsServiceExceptionTest.java index 11b8849f4d8b..b9e003a4c4e4 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/exception/AwsServiceExceptionTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/exception/AwsServiceExceptionTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/interceptor/HelpfulUnknownHostExceptionInterceptorTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/interceptor/HelpfulUnknownHostExceptionInterceptorTest.java new file mode 100644 index 000000000000..471eedcff9b8 --- /dev/null +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/interceptor/HelpfulUnknownHostExceptionInterceptorTest.java @@ -0,0 +1,118 @@ +package software.amazon.awssdk.awscore.interceptor; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.IOException; +import java.net.UnknownHostException; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.awscore.AwsExecutionAttribute; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.InterceptorContext; +import software.amazon.awssdk.core.internal.interceptor.DefaultFailedExecutionContext; +import software.amazon.awssdk.regions.Region; + +public class HelpfulUnknownHostExceptionInterceptorTest { + private static final ExecutionInterceptor INTERCEPTOR = new HelpfulUnknownHostExceptionInterceptor(); + + @Test + public void modifyException_skipsNonUnknownHostExceptions() { + IOException exception = new IOException(); + assertThat(modifyException(exception)).isEqualTo(exception); + } + + @Test + public void modifyException_supportsNestedUnknownHostExceptions() { + Exception exception = new UnknownHostException(); + exception.initCause(new IOException()); + exception = new IllegalArgumentException(exception); + exception = new UnsupportedOperationException(exception); + + assertThat(modifyException(exception, Region.AWS_GLOBAL)).isInstanceOf(SdkClientException.class); + } + + @Test + public void modifyException_returnsGenericHelp_forGlobalRegions() { + UnknownHostException exception = new UnknownHostException(); + assertThat(modifyException(exception, Region.AWS_GLOBAL)) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("network"); + } + + @Test + public void modifyException_returnsGenericHelp_forUnknownServices() { + UnknownHostException exception = new UnknownHostException(); + assertThat(modifyException(exception, Region.US_EAST_1, "millems-hotdog-stand")) + .isInstanceOf(SdkClientException.class) + .satisfies(t -> doesNotHaveMessageContaining(t, "global")) + .hasMessageContaining("network"); + } + + @Test + public void modifyException_returnsGenericHelp_forUnknownServicesInUnknownRegions() { + UnknownHostException exception = new UnknownHostException(); + assertThat(modifyException(exception, Region.of("cn-north-99"), "millems-hotdog-stand")) + .isInstanceOf(SdkClientException.class) + .satisfies(t -> doesNotHaveMessageContaining(t, "global")) + .hasMessageContaining("network"); + } + + @Test + public void modifyException_returnsGenericHelp_forServicesRegionalizedInAllPartitions() { + UnknownHostException exception = new UnknownHostException(); + assertThat(modifyException(exception, Region.US_EAST_1, "dynamodb")) + .isInstanceOf(SdkClientException.class) + .satisfies(t -> doesNotHaveMessageContaining(t, "global")) + .hasMessageContaining("network"); + } + + @Test + public void modifyException_returnsGenericGlobalRegionHelp_forServicesGlobalInSomePartitionOtherThanTheClientPartition() { + UnknownHostException exception = new UnknownHostException(); + assertThat(modifyException(exception, Region.of("cn-north-99"), "iam")) + .isInstanceOf(SdkClientException.class) + .satisfies(t -> doesNotHaveMessageContaining(t, "network")) + .hasMessageContaining("aws-global") + .hasMessageContaining("aws-cn-global"); + } + + @Test + public void modifyException_returnsSpecificGlobalRegionHelp_forServicesGlobalInTheClientRegionPartition() { + UnknownHostException exception = new UnknownHostException(); + assertThat(modifyException(exception, Region.of("cn-north-1"), "iam")) + .isInstanceOf(SdkClientException.class) + .satisfies(t -> doesNotHaveMessageContaining(t, "aws-global")) + .hasMessageContaining("aws-cn-global"); + } + + private void doesNotHaveMessageContaining(Throwable throwable, String value) { + assertThat(throwable.getMessage()).doesNotContain(value); + } + + private Throwable modifyException(Throwable throwable) { + return modifyException(throwable, null); + } + + private Throwable modifyException(Throwable throwable, Region clientRegion) { + return modifyException(throwable, clientRegion, null); + } + + private Throwable modifyException(Throwable throwable, Region clientRegion, String serviceEndpointPrefix) { + SdkRequest sdkRequest = Mockito.mock(SdkRequest.class); + + DefaultFailedExecutionContext context = + DefaultFailedExecutionContext.builder() + .interceptorContext(InterceptorContext.builder().request(sdkRequest).build()) + .exception(throwable) + .build(); + + ExecutionAttributes executionAttributes = + new ExecutionAttributes().putAttribute(AwsExecutionAttribute.AWS_REGION, clientRegion) + .putAttribute(AwsExecutionAttribute.ENDPOINT_PREFIX, serviceEndpointPrefix); + + return INTERCEPTOR.modifyException(context, executionAttributes); + } +} \ No newline at end of file diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicyTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicyTest.java index e3f287ad11d0..002e93f81633 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicyTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicyTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -102,6 +102,17 @@ public void doesNotRetryOnNonRetryableErrorCode() { assertFalse(shouldRetry(applyErrorCode("ValidationError"))); } + @Test + public void retriesOnEC2ThrottledException() { + AwsServiceException ex = AwsServiceException.builder() + .awsErrorDetails(AwsErrorDetails.builder() + .errorCode("EC2ThrottledException") + .build()) + .build(); + + assertTrue(shouldRetry(b -> b.exception(ex))); + } + private boolean shouldRetry(Consumer builder) { return defaultRetryCondition().shouldRetry(RetryPolicyContext.builder().applyMutation(builder).build()); } diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/retry/RetryOnErrorCodeConditionTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/retry/RetryOnErrorCodeConditionTest.java index e9e17b15dd44..bb72774c6511 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/retry/RetryOnErrorCodeConditionTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/retry/RetryOnErrorCodeConditionTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/util/AwsHostNameUtilsTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/util/AwsHostNameUtilsTest.java index 265057f4445a..91a2cb1cfd90 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/util/AwsHostNameUtilsTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/util/AwsHostNameUtilsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml new file mode 100644 index 000000000000..b9d03e595fa1 --- /dev/null +++ b/core/metrics-spi/pom.xml @@ -0,0 +1,81 @@ + + + + core + software.amazon.awssdk + 2.15.62-SNAPSHOT + + 4.0.0 + + metrics-spi + AWS Java SDK :: Metrics SPI + This is the base module for SDK metrics feature. It contains the interfaces used for metrics feature + that are used by other modules in the library. + + + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + software.amazon.awssdk + test-utils + ${awsjavasdk.version} + test + + + junit + junit + test + + + com.github.tomakehurst + wiremock + test + + + org.assertj + assertj-core + test + + + org.mockito + mockito-core + test + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.metrics + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + + diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/LoggingMetricPublisher.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/LoggingMetricPublisher.java new file mode 100644 index 000000000000..004f41c63e81 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/LoggingMetricPublisher.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.Logger; + +/** + * An implementation of {@link MetricPublisher} that writes all published metrics to the logs at the INFO level under the + * {@code software.amazon.awssdk.metrics.LoggingMetricPublisher} namespace. + */ +@SdkPublicApi +public final class LoggingMetricPublisher implements MetricPublisher { + private static final Logger LOGGER = Logger.loggerFor(LoggingMetricPublisher.class); + + private LoggingMetricPublisher() { + } + + public static LoggingMetricPublisher create() { + return new LoggingMetricPublisher(); + } + + @Override + public void publish(MetricCollection metricCollection) { + LOGGER.info(() -> "Metrics published: " + metricCollection); + } + + @Override + public void close() { + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCategory.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCategory.java new file mode 100644 index 000000000000..f034c2184997 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCategory.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * A enum class representing the different types of metric categories in the SDK. + *

    + * A metric can be tagged with multiple categories. Clients can enable/disable metric collection + * at a {@link MetricCategory} level. + */ +@SdkPublicApi +public enum MetricCategory { + /** + * Metrics collected by the core SDK are classified under this category. + */ + CORE("Core"), + + /** + * Metrics collected at the http client level are classified under this category. + */ + HTTP_CLIENT("HttpClient"), + + /** + * Metrics specified by the customer should be classified under this category. + */ + CUSTOM("Custom"), + + /** + * This is an umbrella category (provided for convenience) that records metrics belonging to every category + * defined in this enum. Clients who wish to collect lot of SDK metrics data should use this. + *

    + * Note: Enabling this option along with {@link MetricLevel#TRACE} is verbose and can be expensive based on the platform + * the metrics are uploaded to. Please make sure you need all this data before using this category. + */ + ALL("All"); + + private final String value; + + MetricCategory(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + /** + * Create a {@link MetricCategory} from the given String value. This method is case insensitive. + * + * @param value the value to create the {@link MetricCategory} from + * @return A {@link MetricCategory} if the given {@link #value} matches one of the enum values. + * Otherwise throws {@link IllegalArgumentException} + */ + public static MetricCategory fromString(String value) { + for (MetricCategory mc : MetricCategory.values()) { + if (mc.value.equalsIgnoreCase(value)) { + return mc; + } + } + + throw new IllegalArgumentException("MetricCategory cannot be created from value: " + value); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java new file mode 100644 index 000000000000..5eb4a031de98 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java @@ -0,0 +1,69 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import java.time.Instant; +import java.util.List; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * An immutable collection of metrics. + */ +@SdkPublicApi +public interface MetricCollection extends Iterable> { + /** + * @return The name of this metric collection. + */ + String name(); + + /** + * Return a stream of records in this collection. + */ + default Stream> stream() { + return StreamSupport.stream(spliterator(), false); + } + + /** + * Return all the values of the given metric. + * + * @param metric The metric. + * @param The type of the value. + * @return All of the values of this metric. + */ + List metricValues(SdkMetric metric); + + /** + * @return The child metric collections. + */ + List children(); + + /** + * Return all of the {@link #children()} with a specific name. + * + * @param name The name by which we will filter {@link #children()}. + * @return The child metric collections that have the provided name. + */ + default Stream childrenWithName(String name) { + return children().stream().filter(c -> c.name().equals(name)); + } + + /** + * @return The time at which this collection was created. + */ + Instant creationTime(); +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollector.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollector.java new file mode 100644 index 000000000000..c4599ab37411 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollector.java @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.internal.DefaultMetricCollector; + +/** + * Used to collect metrics reported by the SDK. + */ +@NotThreadSafe +@SdkPublicApi +public interface MetricCollector { + /** + * @return The name of this collector. + */ + String name(); + + /** + * Report a metric. + */ + void reportMetric(SdkMetric metric, T data); + + /** + * Create a child of this metric collector. + * + * @param name The name of the child collector. + * @return The child collector. + */ + MetricCollector createChild(String name); + + /** + * Return the collected metrics. + *

    + * Calling {@code collect()} prevents further invocations of {@link #reportMetric(SdkMetric, Object)}. + * @return The collected metrics. + */ + MetricCollection collect(); + + static MetricCollector create(String name) { + return DefaultMetricCollector.create(name); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricLevel.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricLevel.java new file mode 100644 index 000000000000..5c87d9805a68 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricLevel.java @@ -0,0 +1,49 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * The {@code MetricLevel} associated with a {@link SdkMetric}, similar to log levels, defines the 'scenario' in which the metric + * is useful. This makes it easy to reduce the cost of metric publishing (e.g. by setting it to {@link #INFO}), and then increase + * it when additional data level is needed for debugging purposes (e.g. by setting it to {@link #TRACE}. + */ +@SdkPublicApi +public enum MetricLevel { + /** + * The metric level that includes every other metric level, as well as some highly-technical metrics that may only be useful + * in very specific performance or failure scenarios. + */ + TRACE, + + /** + * The "default" metric level that includes metrics that are useful for identifying why errors or performance issues + * are occurring within the SDK. This excludes technical metrics that are only useful in very specific performance or failure + * scenarios. + */ + INFO, + + /** + * Includes metrics that report when API call errors are occurring within the SDK. This does not include all + * of the information that may be generally useful when debugging why errors are occurring (e.g. latency). + */ + ERROR; + + public boolean includesLevel(MetricLevel level) { + return this.compareTo(level) <= 0; + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricPublisher.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricPublisher.java new file mode 100644 index 000000000000..78fd56616928 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricPublisher.java @@ -0,0 +1,67 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.utils.SdkAutoCloseable; + +/** + * Interface to report and publish the collected SDK metric events to external + * sources. + *

    + * Conceptually, a publisher receives a stream of {@link MetricCollection} objects + * overs its lifetime through its {@link #publish(MetricCollection)} )} method. + * Implementations are then free further aggregate these events into sets of + * metrics that are then published to some external system for further use. + * As long as a publisher is not closed, then it can receive {@code + * MetricCollection} objects at any time. In addition, as the SDK makes use of + * multithreading, it's possible that the publisher is shared concurrently by + * multiple threads, and necessitates that all implementations are threadsafe. + *

    + * The SDK may invoke methods on the interface from multiple threads + * concurrently so implementations must be threadsafe. + */ +@ThreadSafe +@SdkPublicApi +public interface MetricPublisher extends SdkAutoCloseable { + /** + * Notify the publisher of new metric data. After this call returns, the + * caller can safely discard the given {@code metricCollection} instance if it + * no longer needs it. Implementations are strongly encouraged to complete + * any further aggregation and publishing of metrics in an asynchronous manner to + * avoid blocking the calling thread. + *

    + * With the exception of a {@code null} {@code metricCollection}, all + * invocations of this method must return normally. This + * is to ensure that callers of the publisher can safely assume that even + * in situations where an error happens during publishing that it will not + * interrupt the calling thread. + * + * @param metricCollection The collection of metrics. + * @throws IllegalArgumentException If {@code metricCollection} is {@code null}. + */ + void publish(MetricCollection metricCollection); + + /** + * {@inheritDoc} + *

    + * Important: Implementations must block the calling thread until all + * pending metrics are published and any resources acquired have been freed. + */ + @Override + void close(); +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java new file mode 100644 index 000000000000..2ec0cbcb5db2 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * A container associating a metric and its value. + */ +@SdkPublicApi +public interface MetricRecord { + /** + * @return The metric. + */ + SdkMetric metric(); + + /** + * @return The value of this metric. + */ + T value(); +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/NoOpMetricCollector.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/NoOpMetricCollector.java new file mode 100644 index 000000000000..8ebd377aa095 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/NoOpMetricCollector.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.internal.EmptyMetricCollection; + +/** + * A metric collector that doesn't do anything. + */ +@SdkPublicApi +public final class NoOpMetricCollector implements MetricCollector { + private static final NoOpMetricCollector INSTANCE = new NoOpMetricCollector(); + + private NoOpMetricCollector() { + } + + @Override + public String name() { + return "NoOp"; + } + + @Override + public void reportMetric(SdkMetric metric, T data) { + } + + @Override + public MetricCollector createChild(String name) { + return INSTANCE; + } + + @Override + public MetricCollection collect() { + return EmptyMetricCollection.create(); + } + + public static NoOpMetricCollector create() { + return INSTANCE; + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/SdkMetric.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/SdkMetric.java new file mode 100644 index 000000000000..35e6861e4adb --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/SdkMetric.java @@ -0,0 +1,80 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import java.util.Set; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.internal.DefaultSdkMetric; + +/** + * A specific SDK metric. + * + * @param The type for values of this metric. + */ +@SdkPublicApi +public interface SdkMetric { + + /** + * @return The name of this metric. + */ + String name(); + + /** + * @return The categories of this metric. + */ + Set categories(); + + /** + * @return The level of this metric. + */ + MetricLevel level(); + + /** + * @return The class of the value associated with this metric. + */ + Class valueClass(); + + /** + * Create a new metric. + * + * @param name The name of this metric. + * @param clzz The class of the object containing the associated value for this metric. + * @param c1 A category associated with this metric. + * @param cn Additional categories associated with this metric. + * @param The type of the object containing the associated value for this metric. + * @return The created metric. + * + * @throws IllegalArgumentException If a metric of the same name has already been created. + */ + static SdkMetric create(String name, Class clzz, MetricLevel level, MetricCategory c1, MetricCategory... cn) { + return DefaultSdkMetric.create(name, clzz, level, c1, cn); + } + + /** + * Create a new metric. + * + * @param name The name of this metric. + * @param clzz The class of the object containing the associated value for this metric. + * @param categories The categories associated with this metric. + * @param The type of the object containing the associated value for this metric. + * @return The created metric. + * + * @throws IllegalArgumentException If a metric of the same name has already been created. + */ + static SdkMetric create(String name, Class clzz, MetricLevel level, Set categories) { + return DefaultSdkMetric.create(name, clzz, level, categories); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollection.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollection.java new file mode 100644 index 000000000000..7047be072806 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollection.java @@ -0,0 +1,91 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static java.util.stream.Collectors.toList; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.ToString; + +@SdkInternalApi +public final class DefaultMetricCollection implements MetricCollection { + private final String name; + private final Map, List>> metrics; + private final List children; + private final Instant creationTime; + + public DefaultMetricCollection(String name, Map, + List>> metrics, List children) { + this.name = name; + this.metrics = new HashMap<>(metrics); + this.children = children != null ? Collections.unmodifiableList(new ArrayList<>(children)) : Collections.emptyList(); + this.creationTime = Instant.now(); + } + + @Override + public String name() { + return name; + } + + @SuppressWarnings("unchecked") + @Override + public List metricValues(SdkMetric metric) { + if (metrics.containsKey(metric)) { + List> metricRecords = metrics.get(metric); + List values = metricRecords.stream() + .map(MetricRecord::value) + .collect(toList()); + return (List) Collections.unmodifiableList(values); + } + return Collections.emptyList(); + } + + @Override + public List children() { + return children; + } + + @Override + public Instant creationTime() { + return creationTime; + } + + @Override + public Iterator> iterator() { + return metrics.values().stream() + .flatMap(List::stream) + .iterator(); + } + + @Override + public String toString() { + return ToString.builder("MetricCollection") + .add("name", name) + .add("metrics", metrics.values().stream().flatMap(List::stream).collect(toList())) + .add("children", children) + .build(); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollector.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollector.java new file mode 100644 index 000000000000..a63dc6ed85e1 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollector.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +@SdkInternalApi +public final class DefaultMetricCollector implements MetricCollector { + private static final Logger log = Logger.loggerFor(DefaultMetricCollector.class); + private final String name; + private final Map, List>> metrics = new LinkedHashMap<>(); + private final List children = new ArrayList<>(); + + public DefaultMetricCollector(String name) { + this.name = name; + } + + @Override + public String name() { + return name; + } + + @Override + public synchronized void reportMetric(SdkMetric metric, T data) { + metrics.computeIfAbsent(metric, (m) -> new ArrayList<>()) + .add(new DefaultMetricRecord<>(metric, data)); + } + + @Override + public synchronized MetricCollector createChild(String name) { + MetricCollector child = new DefaultMetricCollector(name); + children.add(child); + return child; + } + + @Override + public synchronized MetricCollection collect() { + List collectedChildren = children.stream() + .map(MetricCollector::collect) + .collect(Collectors.toList()); + + DefaultMetricCollection metricRecords = new DefaultMetricCollection(name, metrics, collectedChildren); + + log.debug(() -> "Collected metrics records: " + metricRecords); + return metricRecords; + } + + public static MetricCollector create(String name) { + Validate.notEmpty(name, "name"); + return new DefaultMetricCollector(name); + } + + @Override + public String toString() { + return ToString.builder("DefaultMetricCollector") + .add("metrics", metrics).build(); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricRecord.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricRecord.java new file mode 100644 index 000000000000..801823c1e9b4 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricRecord.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.ToString; + +@SdkInternalApi +public final class DefaultMetricRecord implements MetricRecord { + private final SdkMetric metric; + private final T value; + + public DefaultMetricRecord(SdkMetric metric, T value) { + this.metric = metric; + this.value = value; + } + + @Override + public SdkMetric metric() { + return metric; + } + + @Override + public T value() { + return value; + } + + @Override + public String toString() { + return ToString.builder("MetricRecord") + .add("metric", metric.name()) + .add("value", value) + .build(); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetric.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetric.java new file mode 100644 index 000000000000..461307f31e70 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetric.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import java.util.Collections; +import java.util.EnumSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +@SdkInternalApi +public final class DefaultSdkMetric extends AttributeMap.Key implements SdkMetric { + private static final ConcurrentHashMap, Boolean> SDK_METRICS = new ConcurrentHashMap<>(); + + private final String name; + private final Class clzz; + private final Set categories; + private final MetricLevel level; + + private DefaultSdkMetric(String name, Class clzz, MetricLevel level, Set categories) { + super(clzz); + this.name = Validate.notBlank(name, "name must not be blank"); + this.clzz = Validate.notNull(clzz, "clzz must not be null"); + this.level = Validate.notNull(level, "level must not be null"); + Validate.notEmpty(categories, "categories must not be empty"); + this.categories = EnumSet.copyOf(categories); + } + + /** + * @return The name of this event. + */ + @Override + public String name() { + return name; + } + + /** + * @return The categories of this event. + */ + @Override + public Set categories() { + return Collections.unmodifiableSet(categories); + } + + @Override + public MetricLevel level() { + return level; + } + + /** + * @return The class of the value associated with this event. + */ + @Override + public Class valueClass() { + return clzz; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultSdkMetric that = (DefaultSdkMetric) o; + + return name.equals(that.name); + } + + @Override + public int hashCode() { + return name.hashCode(); + } + + @Override + public String toString() { + return ToString.builder("DefaultMetric") + .add("name", name) + .add("categories", categories()) + .build(); + } + + /** + * Create a new metric. + * + * @param name The name of this metric. + * @param clzz The class of the object containing the associated value for this metric. + * @param c1 A category associated with this metric. + * @param cn Additional categories associated with this metric. + * @param The type of the object containing the associated value for this metric. + * @return The created metric. + * + * @throws IllegalArgumentException If a metric of the same name has already been created. + */ + public static SdkMetric create(String name, Class clzz, MetricLevel level, + MetricCategory c1, MetricCategory... cn) { + Stream categoryStream = Stream.of(c1); + if (cn != null) { + categoryStream = Stream.concat(categoryStream, Stream.of(cn)); + } + Set categories = categoryStream.collect(Collectors.toSet()); + return create(name, clzz, level, categories); + } + + /** + * Create a new metric. + * + * @param name The name of this metric. + * @param clzz The class of the object containing the associated value for this metric. + * @param categories The categories associated with this metric. + * @param The type of the object containing the associated value for this metric. + * @return The created metric. + * + * @throws IllegalArgumentException If a metric of the same name has already been created. + */ + public static SdkMetric create(String name, Class clzz, MetricLevel level, Set categories) { + Validate.noNullElements(categories, "categories must not contain null elements"); + SdkMetric event = new DefaultSdkMetric<>(name, clzz, level, categories); + if (SDK_METRICS.putIfAbsent(event, Boolean.TRUE) != null) { + throw new IllegalArgumentException("Metric with name " + name + " has already been created"); + } + return event; + } + + @SdkTestInternalApi + static void clearDeclaredMetrics() { + SDK_METRICS.clear(); + } + + @SdkTestInternalApi + static Set> declaredEvents() { + return SDK_METRICS.keySet(); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/EmptyMetricCollection.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/EmptyMetricCollection.java new file mode 100644 index 000000000000..e7fc23366d49 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/EmptyMetricCollection.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import java.time.Instant; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; + +@SdkInternalApi +public final class EmptyMetricCollection implements MetricCollection { + private final Instant creationTime = Instant.now(); + + @Override + public String name() { + return "NoOp"; + } + + @Override + public List metricValues(SdkMetric metric) { + return Collections.emptyList(); + } + + @Override + public List children() { + return Collections.emptyList(); + } + + @Override + public Instant creationTime() { + return creationTime; + } + + @Override + public Iterator> iterator() { + return Collections.emptyIterator(); + } + + public static EmptyMetricCollection create() { + return new EmptyMetricCollection(); + } +} diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/MetricLevelTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/MetricLevelTest.java new file mode 100644 index 000000000000..317538e32b16 --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/MetricLevelTest.java @@ -0,0 +1,43 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.Test; + +public class MetricLevelTest { + @Test + public void allLevelsAreCorrect() { + assertThat(MetricLevel.TRACE.includesLevel(MetricLevel.TRACE)).isTrue(); + assertThat(MetricLevel.TRACE.includesLevel(MetricLevel.INFO)).isTrue(); + assertThat(MetricLevel.TRACE.includesLevel(MetricLevel.ERROR)).isTrue(); + } + + @Test + public void infoLevelsAreCorrect() { + assertThat(MetricLevel.INFO.includesLevel(MetricLevel.TRACE)).isFalse(); + assertThat(MetricLevel.INFO.includesLevel(MetricLevel.INFO)).isTrue(); + assertThat(MetricLevel.INFO.includesLevel(MetricLevel.ERROR)).isTrue(); + } + + @Test + public void errorLevelsAreCorrect() { + assertThat(MetricLevel.ERROR.includesLevel(MetricLevel.TRACE)).isFalse(); + assertThat(MetricLevel.ERROR.includesLevel(MetricLevel.INFO)).isFalse(); + assertThat(MetricLevel.ERROR.includesLevel(MetricLevel.ERROR)).isTrue(); + } +} \ No newline at end of file diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectionTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectionTest.java new file mode 100644 index 000000000000..65d168b4e1a7 --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectionTest.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import org.junit.AfterClass; +import org.junit.Test; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; + +public class DefaultMetricCollectionTest { + private static final SdkMetric M1 = SdkMetric.create("m1", Integer.class, MetricLevel.INFO, MetricCategory.CORE); + + @AfterClass + public static void teardown() { + DefaultSdkMetric.clearDeclaredMetrics(); + } + + @Test + public void testMetricValues_noValues_returnsEmptyList() { + DefaultMetricCollection foo = new DefaultMetricCollection("foo", Collections.emptyMap(), Collections.emptyList()); + assertThat(foo.metricValues(M1)).isEmpty(); + } + + @Test + public void testChildren_noChildren_returnsEmptyList() { + DefaultMetricCollection foo = new DefaultMetricCollection("foo", Collections.emptyMap(), Collections.emptyList()); + assertThat(foo.children()).isEmpty(); + } + + @Test + public void testIterator_iteratesOverAllValues() { + Integer[] values = {1, 2, 3}; + Map, List>> recordMap = new HashMap<>(); + List> records = Stream.of(values).map(v -> new DefaultMetricRecord<>(M1, v)).collect(Collectors.toList()); + recordMap.put(M1, records); + + DefaultMetricCollection collection = new DefaultMetricCollection("foo", recordMap, Collections.emptyList()); + final Set iteratorValues = StreamSupport.stream(collection.spliterator(), false) + .map(MetricRecord::value) + .map(Integer.class::cast) + .collect(Collectors.toSet()); + + assertThat(iteratorValues).containsExactly(values); + } +} diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectorTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectorTest.java new file mode 100644 index 000000000000..d3f0682d6c8d --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectorTest.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import java.util.stream.Stream; +import org.junit.AfterClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; + +public class DefaultMetricCollectorTest { + private static final SdkMetric M1 = SdkMetric.create("m1", Integer.class, MetricLevel.INFO, MetricCategory.CORE); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @AfterClass + public static void teardown() { + DefaultSdkMetric.clearDeclaredMetrics(); + } + + @Test + public void testName_returnsName() { + MetricCollector collector = MetricCollector.create("collector"); + assertThat(collector.name()).isEqualTo("collector"); + } + + @Test + public void testCreateChild_returnsChildWithCorrectName() { + MetricCollector parent = MetricCollector.create("parent"); + MetricCollector child = parent.createChild("child"); + + assertThat(child.name()).isEqualTo("child"); + } + + @Test + public void testCollect_allReportedMetricsInCollection() { + MetricCollector collector = MetricCollector.create("collector"); + Integer[] values = {1, 2, 3}; + Stream.of(values).forEach(v -> collector.reportMetric(M1, v)); + MetricCollection collect = collector.collect(); + assertThat(collect.metricValues(M1)).containsExactly(values); + } + + @Test + public void testCollect_returnedCollectionContainsAllChildren() { + MetricCollector parent = MetricCollector.create("parent"); + String[] childNames = {"c1", "c2", "c3" }; + Stream.of(childNames).forEach(parent::createChild); + MetricCollection collected = parent.collect(); + assertThat(collected.children().stream().map(MetricCollection::name)).containsExactly(childNames); + } +} diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricRecordTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricRecordTest.java new file mode 100644 index 000000000000..a6a2fbbc18d6 --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricRecordTest.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.Test; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.MetricRecord; + +/** + * Tests for {@link DefaultMetricRecord}. + */ +public class DefaultSdkMetricRecordTest { + @Test + public void testGetters() { + SdkMetric event = SdkMetric.create("foo", Integer.class, MetricLevel.INFO, MetricCategory.CORE); + + MetricRecord record = new DefaultMetricRecord<>(event, 2); + + assertThat(record.metric()).isEqualTo(event); + assertThat(record.value()).isEqualTo(2); + } +} diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricTest.java new file mode 100644 index 000000000000..1fe8d4fbea1a --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricTest.java @@ -0,0 +1,136 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; + +public class DefaultSdkMetricTest { + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Before + public void methodSetup() { + DefaultSdkMetric.clearDeclaredMetrics(); + } + + @Test + public void testOf_variadicOverload_createdProperly() { + SdkMetric event = SdkMetric.create("event", Integer.class, MetricLevel.INFO, MetricCategory.CORE); + + assertThat(event.categories()).containsExactly(MetricCategory.CORE); + assertThat(event.name()).isEqualTo("event"); + assertThat(event.valueClass()).isEqualTo(Integer.class); + } + + @Test + public void testOf_setOverload_createdProperly() { + SdkMetric event = SdkMetric.create("event", Integer.class, MetricLevel.INFO, Stream.of(MetricCategory.CORE) + .collect(Collectors.toSet())); + + assertThat(event.categories()).containsExactly(MetricCategory.CORE); + assertThat(event.name()).isEqualTo("event"); + assertThat(event.valueClass()).isEqualTo(Integer.class); + } + + @Test + public void testOf_variadicOverload_c1Null_throws() { + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("must not contain null elements"); + SdkMetric.create("event", Integer.class, MetricLevel.INFO, (MetricCategory) null); + } + + @Test + public void testOf_variadicOverload_c1NotNull_cnNull_doesNotThrow() { + SdkMetric.create("event", Integer.class, MetricLevel.INFO, MetricCategory.CORE, null); + } + + @Test + public void testOf_variadicOverload_cnContainsNull_throws() { + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("must not contain null elements"); + SdkMetric.create("event", Integer.class, MetricLevel.INFO, MetricCategory.CORE, new MetricCategory[]{null }); + } + + @Test + public void testOf_setOverload_null_throws() { + thrown.expect(NullPointerException.class); + thrown.expectMessage("object is null"); + SdkMetric.create("event", Integer.class, MetricLevel.INFO, (Set) null); + } + + @Test + public void testOf_setOverload_nullElement_throws() { + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("categories must not contain null elements"); + SdkMetric.create("event", Integer.class, MetricLevel.INFO, Stream.of((MetricCategory) null).collect(Collectors.toSet())); + } + + @Test + public void testOf_namePreviouslyUsed_throws() { + String fooName = "metricEvent"; + + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage(fooName + " has already been created"); + + SdkMetric.create(fooName, Integer.class, MetricLevel.INFO, MetricCategory.CORE); + SdkMetric.create(fooName, Integer.class, MetricLevel.INFO, MetricCategory.CORE); + } + + @Test + public void testOf_namePreviouslyUsed_differentArgs_throws() { + String fooName = "metricEvent"; + + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage(fooName + " has already been created"); + + SdkMetric.create(fooName, Integer.class, MetricLevel.INFO, MetricCategory.CORE); + SdkMetric.create(fooName, Long.class, MetricLevel.INFO, MetricCategory.HTTP_CLIENT); + } + + @Test + public void testOf_namePreviouslyUsed_doesNotReplaceExisting() { + String fooName = "fooMetric"; + + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage(fooName + " has already been created"); + + SdkMetric.create(fooName, Integer.class, MetricLevel.INFO, MetricCategory.CORE); + try { + SdkMetric.create(fooName, Long.class, MetricLevel.INFO, MetricCategory.HTTP_CLIENT); + } finally { + SdkMetric fooMetric = DefaultSdkMetric.declaredEvents() + .stream() + .filter(e -> e.name().equals(fooName)) + .findFirst() + .get(); + + assertThat(fooMetric.name()).isEqualTo(fooName); + assertThat(fooMetric.valueClass()).isEqualTo(Integer.class); + assertThat(fooMetric.categories()).containsExactly(MetricCategory.CORE); + } + } +} diff --git a/core/pom.xml b/core/pom.xml index be761eefc8c0..3e011e0c271b 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -1,6 +1,6 @@ + protocols software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/AwsCborProtocolFactory.java b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/AwsCborProtocolFactory.java index 3a9c37bec1f5..86fc70e23fc2 100644 --- a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/AwsCborProtocolFactory.java +++ b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/AwsCborProtocolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/AwsStructuredCborFactory.java b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/AwsStructuredCborFactory.java index 76c48f341e3a..d9f9144d6bf2 100644 --- a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/AwsStructuredCborFactory.java +++ b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/AwsStructuredCborFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkCborGenerator.java b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkCborGenerator.java index 002c1bea2aa8..0520530a5103 100644 --- a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkCborGenerator.java +++ b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkCborGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkStructuredCborFactory.java b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkStructuredCborFactory.java index aca1307d771f..3e014774db63 100644 --- a/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkStructuredCborFactory.java +++ b/core/protocols/aws-cbor-protocol/src/main/java/software/amazon/awssdk/protocols/cbor/internal/SdkStructuredCborFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-cbor-protocol/src/test/java/software/amazon/awssdk/protocols/cbor/AwsCborProtocolFactoryTest.java b/core/protocols/aws-cbor-protocol/src/test/java/software/amazon/awssdk/protocols/cbor/AwsCborProtocolFactoryTest.java index 4dcbc1a27df3..6cc6cbaf2ccd 100644 --- a/core/protocols/aws-cbor-protocol/src/test/java/software/amazon/awssdk/protocols/cbor/AwsCborProtocolFactoryTest.java +++ b/core/protocols/aws-cbor-protocol/src/test/java/software/amazon/awssdk/protocols/cbor/AwsCborProtocolFactoryTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/pom.xml b/core/protocols/aws-ion-protocol/pom.xml index 2e713481e276..2729b6deb9c7 100644 --- a/core/protocols/aws-ion-protocol/pom.xml +++ b/core/protocols/aws-ion-protocol/pom.xml @@ -1,11 +1,26 @@ + + protocols software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/AwsIonProtocolFactory.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/AwsIonProtocolFactory.java index dff6a6b6f8e4..5e976913b51d 100644 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/AwsIonProtocolFactory.java +++ b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/AwsIonProtocolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/AwsStructuredIonFactory.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/AwsStructuredIonFactory.java index a3e789b73444..85345e33245e 100644 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/AwsStructuredIonFactory.java +++ b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/AwsStructuredIonFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/CompositeErrorCodeParser.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/CompositeErrorCodeParser.java index 13b522378222..0d3bdb20aeb0 100644 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/CompositeErrorCodeParser.java +++ b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/CompositeErrorCodeParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonErrorCodeParser.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonErrorCodeParser.java index 50d59b5c8ede..64d91e3b080b 100644 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonErrorCodeParser.java +++ b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonErrorCodeParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonFactory.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonFactory.java index a1b1ab35a97e..7425516f5ee6 100644 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonFactory.java +++ b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonParser.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonParser.java index 09e204e35dc2..f93568dd43af 100644 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonParser.java +++ b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/IonParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkIonGenerator.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkIonGenerator.java index 1c92a263ab3a..4d758970d2d2 100644 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkIonGenerator.java +++ b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkIonGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkStructuredIonFactory.java b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkStructuredIonFactory.java index fb04ff63011c..639ea5e60bbe 100644 --- a/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkStructuredIonFactory.java +++ b/core/protocols/aws-ion-protocol/src/main/java/software/amazon/awssdk/protocols/ion/internal/SdkStructuredIonFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/AwsStructuredIonFactoryTest.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/AwsStructuredIonFactoryTest.java index d9599cd682b1..2b2fab841717 100644 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/AwsStructuredIonFactoryTest.java +++ b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/AwsStructuredIonFactoryTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonFactoryTest.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonFactoryTest.java index edead0a92c31..31c9b82298a3 100644 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonFactoryTest.java +++ b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonFactoryTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonParserTest.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonParserTest.java index 337b12ac1e02..ca829c900c83 100644 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonParserTest.java +++ b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonParserTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonRoundtripTest.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonRoundtripTest.java index 895bf3f2bafa..ba5429205563 100644 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonRoundtripTest.java +++ b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/IonRoundtripTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/ValidSdkObjects.java b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/ValidSdkObjects.java index 63ddcc3d30f2..eabb349822ba 100644 --- a/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/ValidSdkObjects.java +++ b/core/protocols/aws-ion-protocol/src/test/java/software/amazon/awssdk/protocols/ion/ValidSdkObjects.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index 15b17ae280a2..9c569678eafb 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -1,11 +1,26 @@ + + protocols software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocol.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocol.java index c0188854aa63..98f923f0f114 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocol.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocol.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocolFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocolFactory.java index 072ea031301f..fe6217c5f431 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocolFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocolMetadata.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocolMetadata.java index 130b2d8046f1..47ccb9491b80 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocolMetadata.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/AwsJsonProtocolMetadata.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java index 0e183ad8ffdf..1fd0a6b05670 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -31,6 +31,8 @@ import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.http.MetricCollectingHttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.MarshallLocation; import software.amazon.awssdk.core.traits.TimestampFormatTrait; import software.amazon.awssdk.http.SdkHttpFullRequest; @@ -102,11 +104,12 @@ public final HttpResponseHandler createResponseHandler(Js public final HttpResponseHandler createResponseHandler( JsonOperationMetadata operationMetadata, Function pojoSupplier) { - return new AwsJsonResponseHandler<>( - new JsonResponseHandler<>(protocolUnmarshaller, - pojoSupplier, - operationMetadata.hasStreamingSuccessResponse(), - operationMetadata.isPayloadJson())); + return timeUnmarshalling( + new AwsJsonResponseHandler<>( + new JsonResponseHandler<>(protocolUnmarshaller, + pojoSupplier, + operationMetadata.hasStreamingSuccessResponse(), + operationMetadata.isPayloadJson()))); } /** @@ -114,7 +117,7 @@ public final HttpResponseHandler createResponseHandler( */ public final HttpResponseHandler createErrorResponseHandler( JsonOperationMetadata errorResponseMetadata) { - return AwsJsonProtocolErrorUnmarshaller + return timeUnmarshalling(AwsJsonProtocolErrorUnmarshaller .builder() .jsonProtocolUnmarshaller(protocolUnmarshaller) .exceptions(modeledExceptions) @@ -122,7 +125,11 @@ public final HttpResponseHandler createErrorResponseHandler .errorMessageParser(AwsJsonErrorMessageParser.DEFAULT_ERROR_MESSAGE_PARSER) .jsonFactory(getSdkFactory().getJsonFactory()) .defaultExceptionSupplier(defaultServiceExceptionSupplier) - .build(); + .build()); + } + + private MetricCollectingHttpResponseHandler timeUnmarshalling(HttpResponseHandler delegate) { + return MetricCollectingHttpResponseHandler.create(CoreMetric.UNMARSHALLING_DURATION, delegate); } private StructuredJsonGenerator createGenerator(OperationInfo operationInfo) { diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsStructuredJsonFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsStructuredJsonFactory.java index 63819e81d98a..a27196287e4c 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsStructuredJsonFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsStructuredJsonFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/DefaultJsonContentTypeResolver.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/DefaultJsonContentTypeResolver.java index 24731d9d2307..12b6eda2b838 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/DefaultJsonContentTypeResolver.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/DefaultJsonContentTypeResolver.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -32,6 +32,7 @@ public DefaultJsonContentTypeResolver(String prefix) { @Override public String resolveContentType(AwsJsonProtocolMetadata protocolMetadata) { + //Changing this to 'application/json' may break clients expecting 'application/x-amz-json-1.1' return prefix + protocolMetadata.protocolVersion(); } } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/ErrorCodeParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/ErrorCodeParser.java index 1143f2e20540..bb7fc7d487af 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/ErrorCodeParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/ErrorCodeParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContent.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContent.java index 0889183932f1..bb5edba9ee11 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContent.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContent.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContentTypeResolver.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContentTypeResolver.java index 8d0ff1eb29bd..59e395af105b 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContentTypeResolver.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonContentTypeResolver.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonOperationMetadata.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonOperationMetadata.java index 87074578b86a..874528a2babe 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonOperationMetadata.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/JsonOperationMetadata.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/SdkJsonGenerator.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/SdkJsonGenerator.java index 559af86a646e..0e91bfbd019e 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/SdkJsonGenerator.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/SdkJsonGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonFactory.java index 2ef486ce682b..c88840d4d927 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonGenerator.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonGenerator.java index e411878f5f67..98b83699849c 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonGenerator.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/StructuredJsonGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -32,8 +32,6 @@ public interface StructuredJsonGenerator { */ StructuredJsonGenerator NO_OP = new StructuredJsonGenerator() { - private final byte[] emptyBytes = new byte[0]; - @Override public StructuredJsonGenerator writeStartArray() { return this; @@ -126,7 +124,7 @@ public StructuredJsonGenerator writeNumber(String number) { @Override public byte[] getBytes() { - return emptyBytes; + return null; } @Override diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/AwsStructuredPlainJsonFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/AwsStructuredPlainJsonFactory.java index ec1efc9166d0..14d97b854627 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/AwsStructuredPlainJsonFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/AwsStructuredPlainJsonFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/MarshallerUtil.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/MarshallerUtil.java new file mode 100644 index 000000000000..fc5625289e61 --- /dev/null +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/MarshallerUtil.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.json.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.protocol.MarshallLocation; + +@SdkInternalApi +public final class MarshallerUtil { + private MarshallerUtil() { + } + + /** + * @return true if the location is in the URI, false otherwise. + */ + public static boolean locationInUri(MarshallLocation location) { + switch (location) { + case PATH: + case QUERY_PARAM: + return true; + default: + return false; + } + } +} diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParser.java index 1392ceba41fa..914376cd4db1 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkArrayNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkArrayNode.java index 6088da13dab3..2f2521946205 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkArrayNode.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkArrayNode.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkEmbeddedObject.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkEmbeddedObject.java index ea70f1beabe5..1d2d413df357 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkEmbeddedObject.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkEmbeddedObject.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkJsonNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkJsonNode.java index 366b5e6e50c2..83a93e7370b5 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkJsonNode.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkJsonNode.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkNullNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkNullNode.java index 40056340b43c..3d8739cd4345 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkNullNode.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkNullNode.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkObjectNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkObjectNode.java index e8cc7d2a8fad..59ab40887a02 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkObjectNode.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkObjectNode.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNode.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNode.java index b75aed0d9456..98c1eb862b7e 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNode.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/dom/SdkScalarNode.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/HeaderMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/HeaderMarshaller.java index fee5eaa946b4..c8af60708ac0 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/HeaderMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/HeaderMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshaller.java index facfa5e7ab17..7b6ba85abbc5 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -24,14 +24,11 @@ * * @param Type to marshall. */ +@FunctionalInterface @SdkInternalApi public interface JsonMarshaller extends Marshaller { - JsonMarshaller NULL = new JsonMarshaller() { - @Override - public void marshall(Void val, JsonMarshallerContext context, String paramName, SdkField sdkField) { - } - }; + JsonMarshaller NULL = (val, context, paramName, sdkField) -> { }; /** * Marshall the data into the request. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshallerContext.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshallerContext.java index 15c4f97da95a..3d4581ed4a58 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshallerContext.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshallerContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshallerRegistry.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshallerRegistry.java index 3c50226973c6..a95beb6e2760 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshallerRegistry.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonMarshallerRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java index a34806b90179..18adefe81d61 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ package software.amazon.awssdk.protocols.json.internal.marshall; +import static software.amazon.awssdk.core.internal.util.Mimetype.MIMETYPE_EVENT_STREAM; import static software.amazon.awssdk.http.Header.CONTENT_LENGTH; import static software.amazon.awssdk.http.Header.CONTENT_TYPE; @@ -59,6 +60,8 @@ public class JsonProtocolMarshaller implements ProtocolMarshaller new ByteArrayInputStream(content)); - if (content.length > 0) { - request.putHeader(CONTENT_LENGTH, Integer.toString(content.length)); + + if (content != null) { + request.contentStreamProvider(() -> new ByteArrayInputStream(content)); + if (content.length > 0) { + request.putHeader(CONTENT_LENGTH, Integer.toString(content.length)); + } } } // We skip setting the default content type if the request is streaming as // content-type is determined based on the body of the stream - if (!request.headers().containsKey(CONTENT_TYPE) && contentType != null && !hasStreamingInput) { - request.putHeader(CONTENT_TYPE, contentType); + // TODO: !request.headers().containsKey(CONTENT_TYPE) does not work because request is created from line 77 + // and not from the original request + if (!request.headers().containsKey(CONTENT_TYPE) && !hasEvent) { + if (hasEventStreamingInput) { + request.putHeader(CONTENT_TYPE, MIMETYPE_EVENT_STREAM); + } else if (contentType != null && !hasStreamingInput && request.contentStreamProvider() != null) { + request.putHeader(CONTENT_TYPE, contentType); + } } + return request.build(); } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshallerBuilder.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshallerBuilder.java index c8b32bfb9719..1a4446d4776a 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshallerBuilder.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshallerBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/NullAsEmptyBodyProtocolRequestMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/NullAsEmptyBodyProtocolRequestMarshaller.java index 26fb8c7ab0c0..9b3c444ef1fa 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/NullAsEmptyBodyProtocolRequestMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/NullAsEmptyBodyProtocolRequestMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/QueryParamMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/QueryParamMarshaller.java index d857e4d4feee..3d491fb6f392 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/QueryParamMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/QueryParamMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/SimpleTypeJsonMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/SimpleTypeJsonMarshaller.java index c269e52e5101..756691904224 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/SimpleTypeJsonMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/SimpleTypeJsonMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -93,34 +93,31 @@ public void marshall(Boolean val, StructuredJsonGenerator jsonGenerator, JsonMar } }; - public static final JsonMarshaller INSTANT = new JsonMarshaller() { - @Override - public void marshall(Instant val, JsonMarshallerContext context, String paramName, SdkField sdkField) { - StructuredJsonGenerator jsonGenerator = context.jsonGenerator(); - if (paramName != null) { - jsonGenerator.writeFieldName(paramName); - } - TimestampFormatTrait trait = sdkField.getTrait(TimestampFormatTrait.class); - if (trait != null) { - switch (trait.format()) { - case UNIX_TIMESTAMP: - jsonGenerator.writeNumber(DateUtils.formatUnixTimestampInstant(val)); - break; - case RFC_822: - jsonGenerator.writeValue(DateUtils.formatRfc1123Date(val)); - break; - case ISO_8601: - jsonGenerator.writeValue(DateUtils.formatIso8601Date(val)); - break; - default: - throw SdkClientException.create("Unrecognized timestamp format - " + trait.format()); - } - } else { - // Important to fallback to the jsonGenerator implementation as that may differ per wire format, - // irrespective of protocol. I.E. CBOR would default to unix timestamp as milliseconds while JSON - // will default to unix timestamp as seconds with millisecond decimal precision. - jsonGenerator.writeValue(val); + public static final JsonMarshaller INSTANT = (val, context, paramName, sdkField) -> { + StructuredJsonGenerator jsonGenerator = context.jsonGenerator(); + if (paramName != null) { + jsonGenerator.writeFieldName(paramName); + } + TimestampFormatTrait trait = sdkField.getTrait(TimestampFormatTrait.class); + if (trait != null) { + switch (trait.format()) { + case UNIX_TIMESTAMP: + jsonGenerator.writeNumber(DateUtils.formatUnixTimestampInstant(val)); + break; + case RFC_822: + jsonGenerator.writeValue(DateUtils.formatRfc1123Date(val)); + break; + case ISO_8601: + jsonGenerator.writeValue(DateUtils.formatIso8601Date(val)); + break; + default: + throw SdkClientException.create("Unrecognized timestamp format - " + trait.format()); } + } else { + // Important to fallback to the jsonGenerator implementation as that may differ per wire format, + // irrespective of protocol. I.E. CBOR would default to unix timestamp as milliseconds while JSON + // will default to unix timestamp as seconds with millisecond decimal precision. + jsonGenerator.writeValue(val); } }; diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/SimpleTypePathMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/SimpleTypePathMarshaller.java index e3d4810a06a5..8b48a9ab1b61 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/SimpleTypePathMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/SimpleTypePathMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonErrorMessageParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonErrorMessageParser.java index f9941934d51f..954504d7d25f 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonErrorMessageParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonErrorMessageParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java index f9c883a391ae..ea832b762e8e 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -85,6 +85,7 @@ private AwsServiceException unmarshall(SdkHttpFullResponse response, ExecutionAt exception.message(errorMessage); exception.statusCode(statusCode(response, modeledExceptionMetadata)); exception.requestId(getRequestIdFromHeaders(response.headers())); + exception.extendedRequestId(getExtendedRequestIdFromHeaders(response.headers())); return exception.build(); } @@ -133,7 +134,11 @@ private AwsErrorDetails extractAwsErrorDetails(SdkHttpFullResponse response, } private String getRequestIdFromHeaders(Map> headers) { - return SdkHttpUtils.firstMatchingHeader(headers, X_AMZN_REQUEST_ID_HEADER).orElse(null); + return SdkHttpUtils.firstMatchingHeaderFromCollection(headers, X_AMZN_REQUEST_ID_HEADERS).orElse(null); + } + + private String getExtendedRequestIdFromHeaders(Map> headers) { + return SdkHttpUtils.firstMatchingHeader(headers, X_AMZ_ID_2_HEADER).orElse(null); } public static Builder builder() { diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonResponseHandler.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonResponseHandler.java index 41f4b04e086c..7569bf3f8e36 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonResponseHandler.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonResponseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.utils.http.SdkHttpUtils; @SdkInternalApi public final class AwsJsonResponseHandler implements HttpResponseHandler { @@ -57,7 +58,9 @@ public T handle(SdkHttpFullResponse response, ExecutionAttributes executionAttri private AwsResponseMetadata generateResponseMetadata(SdkHttpResponse response) { Map metadata = new HashMap<>(); - metadata.put(AWS_REQUEST_ID, response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null)); + metadata.put(AWS_REQUEST_ID, SdkHttpUtils.firstMatchingHeaderFromCollection(response.headers(), + X_AMZN_REQUEST_ID_HEADERS) + .orElse(null)); response.headers().forEach((key, value) -> metadata.put(key, value.get(0))); return DefaultAwsResponseMetadata.create(metadata); diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/ErrorMessageParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/ErrorMessageParser.java index ef1b164f44f2..9eab0e57cd36 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/ErrorMessageParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/ErrorMessageParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/HeaderUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/HeaderUnmarshaller.java index 8c029f2f7cfb..f7dbbdf266ad 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/HeaderUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/HeaderUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonErrorCodeParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonErrorCodeParser.java index 706c44e3cab7..dcaf190e3565 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonErrorCodeParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonErrorCodeParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java index f3c06838bbed..7d1c564454d4 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -38,6 +38,7 @@ import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.protocols.core.StringToInstant; import software.amazon.awssdk.protocols.core.StringToValueConverter; +import software.amazon.awssdk.protocols.json.internal.MarshallerUtil; import software.amazon.awssdk.protocols.json.internal.dom.JsonDomParser; import software.amazon.awssdk.protocols.json.internal.dom.SdkJsonNode; import software.amazon.awssdk.utils.builder.Buildable; @@ -121,11 +122,11 @@ private static SdkPojo unmarshallStructured(JsonUnmarshallerContext context, Sdk if (jsonContent == null || jsonContent.isNull()) { return null; } - SdkField valueInfo = field.getTrait(MapTrait.class).valueFieldInfo(); + SdkField valueInfo = field.getTrait(MapTrait.class).valueFieldInfo(); Map map = new HashMap<>(); jsonContent.fields().forEach((fieldName, value) -> { JsonUnmarshaller unmarshaller = context.getUnmarshaller(valueInfo.location(), valueInfo.marshallingType()); - map.put(fieldName, unmarshaller.unmarshall(context, value, (SdkField) valueInfo)); + map.put(fieldName, unmarshaller.unmarshall(context, value, valueInfo)); }); return map; } @@ -137,10 +138,10 @@ private static List unmarshallList(JsonUnmarshallerContext context, SdkJsonNo return jsonContent.items() .stream() .map(item -> { - SdkField memberInfo = field.getTrait(ListTrait.class).memberFieldInfo(); + SdkField memberInfo = field.getTrait(ListTrait.class).memberFieldInfo(); JsonUnmarshaller unmarshaller = context.getUnmarshaller(memberInfo.location(), memberInfo.marshallingType()); - return unmarshaller.unmarshall(context, item, (SdkField) memberInfo); + return unmarshaller.unmarshall(context, item, memberInfo); }) .collect(Collectors.toList()); } @@ -163,8 +164,8 @@ public T unmarshall(JsonUnmarshallerContext context, public TypeT unmarshall(SdkPojo sdkPojo, SdkHttpFullResponse response) throws IOException { - if (hasPayloadMembers(sdkPojo) && !hasExplicitBlobPayloadMember(sdkPojo)) { - SdkJsonNode jsonNode = parser.parse(ReleasableInputStream.wrap(response.content().orElse(null)).disableClose()); + if (hasPayloadMembersOnUnmarshall(sdkPojo) && !hasExplicitBlobPayloadMember(sdkPojo) && response.content().isPresent()) { + SdkJsonNode jsonNode = parser.parse(ReleasableInputStream.wrap(response.content().get()).disableClose()); return unmarshall(sdkPojo, response, jsonNode); } else { return unmarshall(sdkPojo, response, null); @@ -181,10 +182,11 @@ private static boolean isExplicitPayloadMember(SdkField f) { return f.containsTrait(PayloadTrait.class); } - private boolean hasPayloadMembers(SdkPojo sdkPojo) { + private boolean hasPayloadMembersOnUnmarshall(SdkPojo sdkPojo) { return sdkPojo.sdkFields() - .stream() - .anyMatch(f -> f.location() == MarshallLocation.PAYLOAD); + .stream() + .anyMatch(f -> f.location() == MarshallLocation.PAYLOAD + || MarshallerUtil.locationInUri(f.location())); } public TypeT unmarshall(SdkPojo sdkPojo, @@ -202,8 +204,9 @@ private static TypeT unmarshallStructured(SdkPojo sdkPoj SdkJsonNode jsonContent, JsonUnmarshallerContext context) { for (SdkField field : sdkPojo.sdkFields()) { - if (isExplicitPayloadMember(field) && field.marshallingType() == MarshallingType.SDK_BYTES) { - field.set(sdkPojo, SdkBytes.fromInputStream(context.response().content().orElse(null))); + if (isExplicitPayloadMember(field) && field.marshallingType() == MarshallingType.SDK_BYTES && + context.response().content().isPresent()) { + field.set(sdkPojo, SdkBytes.fromInputStream(context.response().content().get())); } else { SdkJsonNode jsonFieldContent = getSdkJsonNode(jsonContent, field); JsonUnmarshaller unmarshaller = context.getUnmarshaller(field.location(), field.marshallingType()); diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonResponseHandler.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonResponseHandler.java index ff044e2f8668..4af3e85d0717 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonResponseHandler.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonResponseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -74,6 +74,9 @@ public T handle(SdkHttpFullResponse response, ExecutionAttributes executionAttri response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER) .orElse("not available")); + SdkStandardLogger.REQUEST_ID_LOGGER.debug(() -> X_AMZ_ID_2_HEADER + " : " + + response.firstMatchingHeader(X_AMZ_ID_2_HEADER) + .orElse("not available")); try { T result = unmarshaller.unmarshall(pojoSupplier.apply(response), response); diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshaller.java index 64ce4e65e599..b71e0adbbdac 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallerContext.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallerContext.java index d5a49627627e..69fea61aa9e1 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallerContext.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallerContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import software.amazon.awssdk.core.protocol.MarshallLocation; import software.amazon.awssdk.core.protocol.MarshallingType; import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.protocols.json.internal.MarshallerUtil; /** * Dependencies needed by implementations of {@link JsonUnmarshaller}. @@ -51,6 +52,11 @@ public SdkHttpFullResponse response() { * @throws SdkClientException if no unmarshaller is found. */ public JsonUnmarshaller getUnmarshaller(MarshallLocation location, MarshallingType marshallingType) { + // A member being in the URI on a response is nonsensical; when a member is declared to be somewhere in the URI, + // it should be found in the payload on response + if (MarshallerUtil.locationInUri(location)) { + location = MarshallLocation.PAYLOAD; + } return unmarshallerRegistry.getUnmarshaller(location, marshallingType); } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallerRegistry.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallerRegistry.java index 9bbd66cae0e3..ff461163f5d3 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallerRegistry.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallerRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/SdkJsonErrorMessageParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/SdkJsonErrorMessageParser.java index c747f5991c64..02fac7fb163b 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/SdkJsonErrorMessageParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/SdkJsonErrorMessageParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/AwsJsonErrorMessageParserTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/AwsJsonErrorMessageParserTest.java index b454876d192a..073ba292b3ca 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/AwsJsonErrorMessageParserTest.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/AwsJsonErrorMessageParserTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/JsonErrorCodeParserTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/JsonErrorCodeParserTest.java index df433959049a..9c4b94faee02 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/JsonErrorCodeParserTest.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/JsonErrorCodeParserTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/SdkJsonGeneratorTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/SdkJsonGeneratorTest.java index 058e1cf01e02..7a81210c06fe 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/SdkJsonGeneratorTest.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/SdkJsonGeneratorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/ValidSdkObjects.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/ValidSdkObjects.java index ba368c8e9a28..4fbb78221816 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/ValidSdkObjects.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/ValidSdkObjects.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParserTest.java b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParserTest.java index 96aa0b4cf508..ab8ae765a369 100644 --- a/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParserTest.java +++ b/core/protocols/aws-json-protocol/src/test/java/software/amazon/awssdk/protocols/json/internal/dom/JsonDomParserTest.java @@ -1,3 +1,18 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + package software.amazon.awssdk.protocols.json.internal.dom; diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index e8b5779b89fc..f1dc4937ada8 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -1,11 +1,26 @@ + + protocols software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsEc2ProtocolFactory.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsEc2ProtocolFactory.java index 3c6e5536b426..072b696587dc 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsEc2ProtocolFactory.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsEc2ProtocolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsQueryProtocolFactory.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsQueryProtocolFactory.java index e097f849d623..e7e791f555a9 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsQueryProtocolFactory.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsQueryProtocolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -28,6 +28,8 @@ import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.http.MetricCollectingHttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.core.OperationInfo; @@ -47,20 +49,20 @@ public class AwsQueryProtocolFactory { private final SdkClientConfiguration clientConfiguration; private final List modeledExceptions; private final Supplier defaultServiceExceptionSupplier; - private final AwsXmlErrorProtocolUnmarshaller errorUnmarshaller; + private final MetricCollectingHttpResponseHandler errorUnmarshaller; AwsQueryProtocolFactory(Builder builder) { this.clientConfiguration = builder.clientConfiguration; this.modeledExceptions = unmodifiableList(builder.modeledExceptions); this.defaultServiceExceptionSupplier = builder.defaultServiceExceptionSupplier; - this.errorUnmarshaller = AwsXmlErrorProtocolUnmarshaller + this.errorUnmarshaller = timeUnmarshalling(AwsXmlErrorProtocolUnmarshaller .builder() .defaultExceptionSupplier(defaultServiceExceptionSupplier) .exceptions(modeledExceptions) // We don't set result wrapper since that's handled by the errorRootExtractor .errorUnmarshaller(QueryProtocolUnmarshaller.builder().build()) .errorRootExtractor(this::getErrorRoot) - .build(); + .build()); } /** @@ -86,10 +88,9 @@ public final ProtocolMarshaller createProtocolMarshaller( * @return New {@link HttpResponseHandler} for success responses. */ public final HttpResponseHandler createResponseHandler(Supplier pojoSupplier) { - return new AwsQueryResponseHandler<>(QueryProtocolUnmarshaller.builder() - .hasResultWrapper(!isEc2()) - .build(), - r -> pojoSupplier.get()); + return timeUnmarshalling(new AwsQueryResponseHandler<>(QueryProtocolUnmarshaller.builder() + .hasResultWrapper(!isEc2()) + .build(), r -> pojoSupplier.get())); } /** @@ -100,6 +101,10 @@ public final HttpResponseHandler createErrorResponseHandler return errorUnmarshaller; } + private MetricCollectingHttpResponseHandler timeUnmarshalling(HttpResponseHandler delegate) { + return MetricCollectingHttpResponseHandler.create(CoreMetric.UNMARSHALLING_DURATION, delegate); + } + /** * Extracts the element from the root XML document. Method is protected as EC2 has a slightly * different location. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/interceptor/QueryParametersToBodyInterceptor.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/interceptor/QueryParametersToBodyInterceptor.java new file mode 100644 index 000000000000..3c8b5f7663a9 --- /dev/null +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/interceptor/QueryParametersToBodyInterceptor.java @@ -0,0 +1,81 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.query.interceptor; + +import static java.util.Collections.singletonList; +import static software.amazon.awssdk.utils.StringUtils.lowerCase; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.http.SdkHttpUtils; + +/** + * Modifies an HTTP request by moving query parameters to the body under the following conditions: + * - It is a POST request + * - There is no content stream provider + * - There are query parameters to transfer + *

    + * This interceptor is automatically inserted by codegen for services using Query Protocol + */ +@SdkProtectedApi +public final class QueryParametersToBodyInterceptor implements ExecutionInterceptor { + + private static final String DEFAULT_CONTENT_TYPE = "application/x-www-form-urlencoded; charset=" + + lowerCase(StandardCharsets.UTF_8.toString()); + + @Override + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, + ExecutionAttributes executionAttributes) { + + SdkHttpRequest httpRequest = context.httpRequest(); + + if (!(httpRequest instanceof SdkHttpFullRequest)) { + return httpRequest; + } + + SdkHttpFullRequest httpFullRequest = (SdkHttpFullRequest) httpRequest; + if (shouldPutParamsInBody(httpFullRequest)) { + return changeQueryParametersToFormData(httpFullRequest); + } + return httpFullRequest; + } + + private boolean shouldPutParamsInBody(SdkHttpFullRequest input) { + return input.method() == SdkHttpMethod.POST && + !input.contentStreamProvider().isPresent() && + !CollectionUtils.isNullOrEmpty(input.rawQueryParameters()); + } + + private SdkHttpRequest changeQueryParametersToFormData(SdkHttpFullRequest input) { + byte[] params = SdkHttpUtils.encodeAndFlattenFormData(input.rawQueryParameters()).orElse("") + .getBytes(StandardCharsets.UTF_8); + + return input.toBuilder().clearQueryParameters() + .contentStreamProvider(() -> new ByteArrayInputStream(params)) + .putHeader("Content-Length", singletonList(String.valueOf(params.length))) + .putHeader("Content-Type", singletonList(DEFAULT_CONTENT_TYPE)) + .build(); + } + +} diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/ListQueryMarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/ListQueryMarshaller.java index d0d76d03cee8..3acd1e9d5288 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/ListQueryMarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/ListQueryMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/MapQueryMarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/MapQueryMarshaller.java index bb58cc5f98f7..81951313ed87 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/MapQueryMarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/MapQueryMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshaller.java index bf6046a1c531..a6ee83b905b1 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ * * @param Type being marshalled. */ +@FunctionalInterface @SdkInternalApi public interface QueryMarshaller extends Marshaller { diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshallerContext.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshallerContext.java index 36e933968fdd..9a8babbdfdce 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshallerContext.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshallerContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshallerRegistry.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshallerRegistry.java index b5bbdcc49d1b..55e15d405d88 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshallerRegistry.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryMarshallerRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryProtocolMarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryProtocolMarshaller.java index e6298d62c093..518ed1685e54 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryProtocolMarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/QueryProtocolMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/SimpleTypeQueryMarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/SimpleTypeQueryMarshaller.java index ff2c4f506bba..98fcdb53ea9d 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/SimpleTypeQueryMarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/marshall/SimpleTypeQueryMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsQueryResponseHandler.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsQueryResponseHandler.java index 6d52c5471f6f..bf0c23ad6fb9 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsQueryResponseHandler.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsQueryResponseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -32,6 +32,7 @@ import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Pair; +import software.amazon.awssdk.utils.http.SdkHttpUtils; /** * Response handler for AWS/Query services and Amazon EC2 which is a dialect of the Query protocol. @@ -84,7 +85,8 @@ private T unmarshallResponse(SdkHttpFullResponse response) throws Exception { private AwsResponseMetadata generateResponseMetadata(SdkHttpResponse response, Map metadata) { if (!metadata.containsKey(AWS_REQUEST_ID)) { metadata.put(AWS_REQUEST_ID, - response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null)); + SdkHttpUtils.firstMatchingHeaderFromCollection(response.headers(), X_AMZN_REQUEST_ID_HEADERS) + .orElse(null)); } response.headers().forEach((key, value) -> metadata.put(key, value.get(0))); diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsXmlErrorUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsXmlErrorUnmarshaller.java new file mode 100644 index 000000000000..b94d2e3ca48c --- /dev/null +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsXmlErrorUnmarshaller.java @@ -0,0 +1,244 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.query.internal.unmarshall; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.time.Duration; +import java.util.List; +import java.util.Optional; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.AwsExecutionAttribute; +import software.amazon.awssdk.awscore.exception.AwsErrorDetails; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.protocols.core.ExceptionMetadata; +import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; +import software.amazon.awssdk.protocols.query.unmarshall.XmlErrorUnmarshaller; + +/** + * Unmarshalls an AWS XML exception from parsed XML. + */ +@SdkInternalApi +public final class AwsXmlErrorUnmarshaller { + private static final String X_AMZN_REQUEST_ID_HEADER = "x-amzn-RequestId"; + private static final String X_AMZ_ID_2_HEADER = "x-amz-id-2"; + + private final List exceptions; + private final Supplier defaultExceptionSupplier; + + private final XmlErrorUnmarshaller errorUnmarshaller; + + private AwsXmlErrorUnmarshaller(Builder builder) { + this.exceptions = builder.exceptions; + this.errorUnmarshaller = builder.errorUnmarshaller; + this.defaultExceptionSupplier = builder.defaultExceptionSupplier; + } + + /** + * @return New Builder instance. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Unmarshal an AWS XML exception + * @param documentRoot The parsed payload document + * @param errorRoot The specific element of the parsed payload document that contains the error to be marshalled + * or empty if it could not be located. + * @param documentBytes The raw bytes of the original payload document if they are available + * @param response The HTTP response object + * @param executionAttributes {@link ExecutionAttributes} for the current execution + * @return An {@link AwsServiceException} unmarshalled from the XML. + */ + public AwsServiceException unmarshall(XmlElement documentRoot, + Optional errorRoot, + Optional documentBytes, + SdkHttpFullResponse response, + ExecutionAttributes executionAttributes) { + String errorCode = getErrorCode(errorRoot); + + AwsServiceException.Builder builder = errorRoot + .map(e -> invokeSafely(() -> unmarshallFromErrorCode(response, e, errorCode))) + .orElseGet(this::defaultException); + + AwsErrorDetails awsErrorDetails = + AwsErrorDetails.builder() + .errorCode(errorCode) + .errorMessage(builder.message()) + .rawResponse(documentBytes.orElse(null)) + .sdkHttpResponse(response) + .serviceName(executionAttributes.getAttribute(AwsExecutionAttribute.SERVICE_NAME)) + .build(); + + builder.requestId(getRequestId(response, documentRoot)) + .extendedRequestId(getExtendedRequestId(response)) + .statusCode(response.statusCode()) + .clockSkew(getClockSkew(executionAttributes)) + .awsErrorDetails(awsErrorDetails); + + return builder.build(); + } + + private Duration getClockSkew(ExecutionAttributes executionAttributes) { + Integer timeOffset = executionAttributes.getAttribute(SdkExecutionAttribute.TIME_OFFSET); + return timeOffset == null ? null : Duration.ofSeconds(timeOffset); + } + + /** + * @return Builder for the default service exception. Used when the error code doesn't match + * any known modeled exception or when we can't determine the error code. + */ + private AwsServiceException.Builder defaultException() { + return (AwsServiceException.Builder) defaultExceptionSupplier.get(); + } + + /** + * Unmarshalls the XML into the appropriate modeled exception based on the error code. If the error code + * is not present or does not match any known exception we unmarshall into the base service exception. + * + * @param errorRoot Root of element. Contains any modeled fields of the exception. + * @param errorCode Error code identifying the modeled exception. + * @return Unmarshalled exception builder. + */ + private AwsServiceException.Builder unmarshallFromErrorCode(SdkHttpFullResponse response, + XmlElement errorRoot, + String errorCode) { + SdkPojo sdkPojo = exceptions.stream() + .filter(e -> e.errorCode().equals(errorCode)) + .map(ExceptionMetadata::exceptionBuilderSupplier) + .findAny() + .orElse(defaultExceptionSupplier) + .get(); + + AwsServiceException.Builder builder = + ((AwsServiceException) errorUnmarshaller.unmarshall(sdkPojo, errorRoot, response)).toBuilder(); + builder.message(getMessage(errorRoot)); + return builder; + } + + /** + * Extracts the error code (used to identify the modeled exception) from the + * element. + * + * @param errorRoot Error element root. + * @return Error code or null if not present. + */ + private String getErrorCode(Optional errorRoot) { + return errorRoot.map(e -> e.getOptionalElementByName("Code") + .map(XmlElement::textContent) + .orElse(null)) + .orElse(null); + } + + /** + * Extracts the error message from the XML document. The message is in the + * element for all services. + * + * @param errorRoot Error element root. + * @return Error message or null if not present. + */ + private String getMessage(XmlElement errorRoot) { + return errorRoot.getOptionalElementByName("Message") + .map(XmlElement::textContent) + .orElse(null); + } + + /** + * Extracts the request ID from the XML document. Request ID is a top level element + * for all protocols, it may be RequestId or RequestID depending on the service. + * + * @param document Root XML document. + * @return Request ID string or null if not present. + */ + private String getRequestId(SdkHttpFullResponse response, XmlElement document) { + XmlElement requestId = document.getOptionalElementByName("RequestId") + .orElse(document.getElementByName("RequestID")); + return requestId != null ? + requestId.textContent() : + response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null); + } + + /** + * Extracts the extended request ID from the response headers. + * + * @param response The HTTP response object. + * @return Extended Request ID string or null if not present. + */ + private String getExtendedRequestId(SdkHttpFullResponse response) { + return response.firstMatchingHeader(X_AMZ_ID_2_HEADER).orElse(null); + } + + /** + * Builder for {@link AwsXmlErrorUnmarshaller}. + */ + public static final class Builder { + + private List exceptions; + private Supplier defaultExceptionSupplier; + private XmlErrorUnmarshaller errorUnmarshaller; + + private Builder() { + } + + /** + * List of {@link ExceptionMetadata} to represent the modeled exceptions for the service. + * For AWS services the error type is a string representing the type of the modeled exception. + * + * @return This builder for method chaining. + */ + public Builder exceptions(List exceptions) { + this.exceptions = exceptions; + return this; + } + + /** + * Default exception type if "error code" does not match any known modeled exception. This is the generated + * base exception for the service (i.e. DynamoDbException). + * + * @return This builder for method chaining. + */ + public Builder defaultExceptionSupplier(Supplier defaultExceptionSupplier) { + this.defaultExceptionSupplier = defaultExceptionSupplier; + return this; + } + + /** + * The unmarshaller to use. The unmarshaller only unmarshalls any modeled fields of the exception, + * additional metadata is extracted by {@link AwsXmlErrorUnmarshaller}. + * + * @param errorUnmarshaller Error unmarshaller to use. + * @return This builder for method chaining. + */ + public Builder errorUnmarshaller(XmlErrorUnmarshaller errorUnmarshaller) { + this.errorUnmarshaller = errorUnmarshaller; + return this; + } + + /** + * @return New instance of {@link AwsXmlErrorUnmarshaller}. + */ + public AwsXmlErrorUnmarshaller build() { + return new AwsXmlErrorUnmarshaller(this); + } + } +} diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/ListQueryUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/ListQueryUnmarshaller.java index a0a550fafcec..fdbaf536b063 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/ListQueryUnmarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/ListQueryUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/MapQueryUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/MapQueryUnmarshaller.java index fb1c33522c4c..9f7b36e276d6 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/MapQueryUnmarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/MapQueryUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryProtocolUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryProtocolUnmarshaller.java index 3442d5c6d920..8b4359bb8cdc 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryProtocolUnmarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryProtocolUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshaller.java index 1b99e23c424b..0fac5dfb2882 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshallerContext.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshallerContext.java index e29409b5082f..3b1c77955af9 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshallerContext.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshallerContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshallerRegistry.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshallerRegistry.java index a4ad50f5f53c..89fbbffaa163 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshallerRegistry.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/QueryUnmarshallerRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/SimpleTypeQueryUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/SimpleTypeQueryUnmarshaller.java index 04dc56da9583..161649126bbe 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/SimpleTypeQueryUnmarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/SimpleTypeQueryUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/AwsXmlErrorProtocolUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/AwsXmlErrorProtocolUnmarshaller.java index 873ee9965c5f..009f6a14195a 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/AwsXmlErrorProtocolUnmarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/AwsXmlErrorProtocolUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,25 +15,21 @@ package software.amazon.awssdk.protocols.query.unmarshall; -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; - import java.io.IOException; -import java.time.Duration; import java.util.List; import java.util.Optional; import java.util.function.Function; import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; -import software.amazon.awssdk.awscore.AwsExecutionAttribute; import software.amazon.awssdk.awscore.exception.AwsErrorDetails; import software.amazon.awssdk.awscore.exception.AwsServiceException; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.protocols.core.ExceptionMetadata; +import software.amazon.awssdk.protocols.query.internal.unmarshall.AwsXmlErrorUnmarshaller; import software.amazon.awssdk.utils.Pair; /** @@ -86,17 +82,16 @@ @SdkProtectedApi public final class AwsXmlErrorProtocolUnmarshaller implements HttpResponseHandler { - private final List exceptions; - private final Supplier defaultExceptionSupplier; + private final AwsXmlErrorUnmarshaller awsXmlErrorUnmarshaller; private final Function> errorRootExtractor; - private final XmlErrorUnmarshaller errorUnmarshaller; - private AwsXmlErrorProtocolUnmarshaller(Builder builder) { - this.exceptions = builder.exceptions; this.errorRootExtractor = builder.errorRootExtractor; - this.errorUnmarshaller = builder.errorUnmarshaller; - this.defaultExceptionSupplier = builder.defaultExceptionSupplier; + this.awsXmlErrorUnmarshaller = AwsXmlErrorUnmarshaller.builder() + .defaultExceptionSupplier(builder.defaultExceptionSupplier) + .exceptions(builder.exceptions) + .errorUnmarshaller(builder.errorUnmarshaller) + .build(); } @Override @@ -104,32 +99,8 @@ public AwsServiceException handle(SdkHttpFullResponse response, ExecutionAttribu Pair xmlAndBytes = parseXml(response); XmlElement document = xmlAndBytes.left(); Optional errorRoot = errorRootExtractor.apply(document); - String errorCode = getErrorCode(errorRoot); - - AwsServiceException.Builder builder = errorRoot - .map(e -> invokeSafely(() -> unmarshallFromErrorCode(response, e, errorCode))) - .orElseGet(this::defaultException); - - AwsErrorDetails awsErrorDetails = - AwsErrorDetails.builder() - .errorCode(errorCode) - .errorMessage(builder.message()) - .rawResponse(xmlAndBytes.right()) - .sdkHttpResponse(response) - .serviceName(executionAttributes.getAttribute(AwsExecutionAttribute.SERVICE_NAME)) - .build(); - - builder.requestId(getRequestId(response, document)) - .statusCode(response.statusCode()) - .clockSkew(getClockSkew(executionAttributes)) - .awsErrorDetails(awsErrorDetails); - - return builder.build(); - } - - private Duration getClockSkew(ExecutionAttributes executionAttributes) { - Integer timeOffset = executionAttributes.getAttribute(SdkExecutionAttribute.TIME_OFFSET); - return timeOffset == null ? null : Duration.ofSeconds(timeOffset); + return awsXmlErrorUnmarshaller.unmarshall(document, errorRoot, Optional.of(xmlAndBytes.right()), response, + executionAttributes); } /** @@ -173,79 +144,6 @@ private SdkBytes emptyXmlBytes() { return SdkBytes.fromUtf8String(""); } - /** - * @return Builder for the default service exception. Used when the error code doesn't match - * any known modeled exception or when we can't determine the error code. - */ - private AwsServiceException.Builder defaultException() { - return (AwsServiceException.Builder) defaultExceptionSupplier.get(); - } - - /** - * Unmarshalls the XML into the appropriate modeled exception based on the error code. If the error code - * is not present or does not match any known exception we unmarshall into the base service exception. - * - * @param errorRoot Root of element. Contains any modeled fields of the exception. - * @param errorCode Error code identifying the modeled exception. - * @return Unmarshalled exception builder. - */ - private AwsServiceException.Builder unmarshallFromErrorCode(SdkHttpFullResponse response, - XmlElement errorRoot, - String errorCode) { - SdkPojo sdkPojo = exceptions.stream() - .filter(e -> e.errorCode().equals(errorCode)) - .map(ExceptionMetadata::exceptionBuilderSupplier) - .findAny() - .orElse(defaultExceptionSupplier) - .get(); - - AwsServiceException.Builder builder = - ((AwsServiceException) errorUnmarshaller.unmarshall(sdkPojo, errorRoot, response)).toBuilder(); - builder.message(getMessage(errorRoot)); - return builder; - } - - /** - * Extracts the error code (used to identify the modeled exception) from the - * element. - * - * @param errorRoot Error element root. - * @return Error code or null if not present. - */ - private String getErrorCode(Optional errorRoot) { - return errorRoot.map(e -> e.getOptionalElementByName("Code") - .map(XmlElement::textContent) - .orElse(null)) - .orElse(null); - } - - /** - * Extracts the error message from the XML document. The message is in the - * element for all services. - * - * @param errorRoot Error element root. - * @return Error message or null if not present. - */ - private String getMessage(XmlElement errorRoot) { - return errorRoot.getOptionalElementByName("Message") - .map(XmlElement::textContent) - .orElse(null); - } - - /** - * Extracts the request ID from the XML document. Request ID is a top level element - * for all protocols, it may be RequestId or RequestID depending on the service. - * - * @param document Root XML document. - * @return Request ID string or null if not present. - */ - private String getRequestId(SdkHttpFullResponse response, XmlElement document) { - XmlElement requestId = document.getOptionalElementByName("RequestId") - .orElse(document.getElementByName("RequestID")); - return requestId != null ? - requestId.textContent() : - response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null); - } /** * @return New Builder instance. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlDomParser.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlDomParser.java index 25a56400ce17..59be4ec94c33 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlDomParser.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlDomParser.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,9 +16,13 @@ package software.amazon.awssdk.protocols.query.unmarshall; import java.io.InputStream; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; import javax.xml.stream.XMLEventReader; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLStreamException; +import javax.xml.stream.events.Attribute; import javax.xml.stream.events.StartElement; import javax.xml.stream.events.XMLEvent; import software.amazon.awssdk.annotations.SdkProtectedApi; @@ -59,6 +63,11 @@ public static XmlElement parse(InputStream inputStream) { private static XmlElement parseElement(StartElement startElement, XMLEventReader reader) throws XMLStreamException { XmlElement.Builder elementBuilder = XmlElement.builder() .elementName(startElement.getName().getLocalPart()); + + if (startElement.getAttributes().hasNext()) { + parseAttributes(startElement, elementBuilder); + } + XMLEvent nextEvent; do { nextEvent = reader.nextEvent(); @@ -71,6 +80,21 @@ private static XmlElement parseElement(StartElement startElement, XMLEventReader return elementBuilder.build(); } + /** + * Parse the attributes of the element. + */ + @SuppressWarnings("unchecked") + private static void parseAttributes(StartElement startElement, XmlElement.Builder elementBuilder) { + Iterator iterator = startElement.getAttributes(); + Map attributes = new HashMap<>(); + iterator.forEachRemaining(a -> { + String key = a.getName().getPrefix() + ":" + a.getName().getLocalPart(); + attributes.put(key, a.getValue()); + }); + + elementBuilder.attributes(attributes); + } + /** * Reads all characters until the next end element event. * diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlElement.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlElement.java index 1fc044fd464f..ecb8a3ed1eff 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlElement.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlElement.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.exception.SdkClientException; @@ -35,12 +36,14 @@ public final class XmlElement { private final HashMap> childrenByElement; private final List children; private final String textContent; + private final Map attributes; private XmlElement(Builder builder) { this.elementName = builder.elementName; this.childrenByElement = new HashMap<>(builder.childrenByElement); this.children = Collections.unmodifiableList(new ArrayList<>(builder.children)); this.textContent = builder.textContent; + this.attributes = Collections.unmodifiableMap(new HashMap<>(builder.attributes)); } /** @@ -109,6 +112,20 @@ public String textContent() { return textContent; } + /** + * Retrieves an optional attribute by attribute name. + */ + public Optional getOptionalAttributeByName(String attribute) { + return Optional.ofNullable(attributes.get(attribute)); + } + + /** + * Retrieves the attributes associated with the element + */ + public Map attributes() { + return attributes; + } + /** * @return New {@link Builder} instance. */ @@ -129,9 +146,10 @@ public static XmlElement empty() { public static final class Builder { private String elementName; - private final HashMap> childrenByElement = new HashMap<>(); + private final Map> childrenByElement = new HashMap<>(); private final List children = new ArrayList<>(); private String textContent = ""; + private Map attributes = new HashMap<>(); private Builder() { } @@ -153,6 +171,11 @@ public Builder textContent(String textContent) { return this; } + public Builder attributes(Map attributes) { + this.attributes = attributes; + return this; + } + public XmlElement build() { return new XmlElement(this); } diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlErrorUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlErrorUnmarshaller.java index 4613e081dd5c..1c03c36a8970 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlErrorUnmarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/unmarshall/XmlErrorUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-query-protocol/src/test/java/software/amazon/awssdk/protocols/query/XmlDomParserTest.java b/core/protocols/aws-query-protocol/src/test/java/software/amazon/awssdk/protocols/query/XmlDomParserTest.java index 640cde5eacc6..16ea0957c23e 100644 --- a/core/protocols/aws-query-protocol/src/test/java/software/amazon/awssdk/protocols/query/XmlDomParserTest.java +++ b/core/protocols/aws-query-protocol/src/test/java/software/amazon/awssdk/protocols/query/XmlDomParserTest.java @@ -1,3 +1,18 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + package software.amazon.awssdk.protocols.query; @@ -33,6 +48,22 @@ public void simpleXmlDocument_ParsedCorrectly() { .isEqualTo("42"); } + @Test + public void xmlWithAttributes_ParsedCorrectly() { + String xml = "" + + "" + + " stringVal" + + ""; + XmlElement element = XmlDomParser.parse(new StringInputStream(xml)); + assertThat(element.elementName()).isEqualTo("Struct"); + assertThat(element.children()).hasSize(1); + assertThat(element.getElementsByName("stringMember")) + .hasSize(1); + assertThat(element.attributes()).hasSize(2); + assertThat(element.getOptionalAttributeByName("xsi:type").get()).isEqualTo("foo"); + assertThat(element.getOptionalAttributeByName("xsi:nil").get()).isEqualTo("bar"); + } + @Test public void multipleElementsWithSameName_ParsedCorrectly() { String xml = "" diff --git a/core/protocols/aws-query-protocol/src/test/java/software/amazon/awssdk/protocols/query/interceptor/QueryParametersToBodyInterceptorTest.java b/core/protocols/aws-query-protocol/src/test/java/software/amazon/awssdk/protocols/query/interceptor/QueryParametersToBodyInterceptorTest.java new file mode 100644 index 000000000000..2398ab806589 --- /dev/null +++ b/core/protocols/aws-query-protocol/src/test/java/software/amazon/awssdk/protocols/query/interceptor/QueryParametersToBodyInterceptorTest.java @@ -0,0 +1,154 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.query.interceptor; + +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.Protocol; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.utils.IoUtils; + +import java.io.ByteArrayInputStream; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.Optional; +import java.util.stream.Stream; + +import static java.util.Collections.singletonList; +import static org.assertj.core.api.Assertions.assertThat; + +public class QueryParametersToBodyInterceptorTest { + + public static final URI HTTP_LOCALHOST = URI.create("http://localhost:8080"); + + private QueryParametersToBodyInterceptor interceptor; + private ExecutionAttributes executionAttributes; + + private SdkHttpFullRequest.Builder requestBuilder; + + @Before + public void setup() { + + interceptor = new QueryParametersToBodyInterceptor(); + executionAttributes = new ExecutionAttributes(); + + requestBuilder = SdkHttpFullRequest.builder() + .protocol(Protocol.HTTPS.toString()) + .method(SdkHttpMethod.POST) + .putRawQueryParameter("key", singletonList("value")) + .uri(HTTP_LOCALHOST); + } + + @Test + public void postRequestsWithNoBodyHaveTheirParametersMovedToTheBody() throws Exception { + + SdkHttpFullRequest request = requestBuilder.build(); + + SdkHttpFullRequest output = (SdkHttpFullRequest) interceptor.modifyHttpRequest( + new HttpRequestOnlyContext(request, null), executionAttributes); + + assertThat(output.rawQueryParameters()).hasSize(0); + assertThat(output.headers()) + .containsKey("Content-Length") + .containsEntry("Content-Type", singletonList("application/x-www-form-urlencoded; charset=utf-8")); + assertThat(output.contentStreamProvider()).isNotEmpty(); + } + + @Test + public void nonPostRequestsWithNoBodyAreUnaltered() throws Exception { + Stream.of(SdkHttpMethod.values()) + .filter(m -> !m.equals(SdkHttpMethod.POST)) + .forEach(this::nonPostRequestsUnaltered); + } + + @Test + public void postWithContentIsUnaltered() throws Exception { + byte[] contentBytes = "hello".getBytes(StandardCharsets.UTF_8); + ContentStreamProvider contentProvider = () -> new ByteArrayInputStream(contentBytes); + + SdkHttpFullRequest request = requestBuilder.contentStreamProvider(contentProvider).build(); + + SdkHttpFullRequest output = (SdkHttpFullRequest) interceptor.modifyHttpRequest( + new HttpRequestOnlyContext(request, null), executionAttributes); + + assertThat(output.rawQueryParameters()).hasSize(1); + assertThat(output.headers()).hasSize(0); + assertThat(IoUtils.toByteArray(output.contentStreamProvider().get().newStream())).isEqualTo(contentBytes); + } + + @Test + public void onlyAlterRequestsIfParamsArePresent() throws Exception { + SdkHttpFullRequest request = requestBuilder.clearQueryParameters().build(); + + SdkHttpFullRequest output = (SdkHttpFullRequest) interceptor.modifyHttpRequest( + new HttpRequestOnlyContext(request, null), executionAttributes); + + assertThat(output.rawQueryParameters()).hasSize(0); + assertThat(output.headers()).hasSize(0); + assertThat(output.contentStreamProvider()).isEmpty(); + } + + private void nonPostRequestsUnaltered(SdkHttpMethod method) { + + SdkHttpFullRequest request = requestBuilder.method(method).build(); + + SdkHttpFullRequest output = (SdkHttpFullRequest) interceptor.modifyHttpRequest( + new HttpRequestOnlyContext(request, null), executionAttributes); + + assertThat(output.rawQueryParameters()).hasSize(1); + assertThat(output.headers()).hasSize(0); + assertThat(output.contentStreamProvider()).isEmpty(); + } + + public final class HttpRequestOnlyContext implements software.amazon.awssdk.core.interceptor.Context.ModifyHttpRequest { + + private final SdkHttpRequest request; + private final RequestBody requestBody; + + public HttpRequestOnlyContext(SdkHttpRequest request, + RequestBody requestBody) { + this.request = request; + this.requestBody = requestBody; + } + + @Override + public SdkRequest request() { + return null; + } + + @Override + public SdkHttpRequest httpRequest() { + return request; + } + + @Override + public Optional requestBody() { + return Optional.ofNullable(requestBody); + } + + @Override + public Optional asyncRequestBody() { + return Optional.empty(); + } + } +} diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index 48c05e6ca07c..7053188832de 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -1,11 +1,26 @@ + + protocols software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 @@ -61,6 +76,25 @@ assertj-core test + + org.mockito + mockito-core + test + - + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.protocols.xml + + + + + + diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsS3ProtocolFactory.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsS3ProtocolFactory.java index 24390da1e211..448f994fff09 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsS3ProtocolFactory.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsS3ProtocolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,8 +16,15 @@ package software.amazon.awssdk.protocols.xml; import java.util.Optional; +import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.awscore.AwsResponse; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; +import software.amazon.awssdk.protocols.xml.internal.unmarshall.AwsXmlPredicatedResponseHandler; +import software.amazon.awssdk.protocols.xml.internal.unmarshall.DecorateErrorFromResponseBodyUnmarshaller; /** * Factory to generate the various protocol handlers and generators to be used for communicating with @@ -25,7 +32,6 @@ */ @SdkProtectedApi public final class AwsS3ProtocolFactory extends AwsXmlProtocolFactory { - private AwsS3ProtocolFactory(Builder builder) { super(builder); } @@ -57,4 +63,21 @@ public AwsS3ProtocolFactory build() { return new AwsS3ProtocolFactory(this); } } + + @Override + public HttpResponseHandler> createCombinedResponseHandler( + Supplier pojoSupplier, XmlOperationMetadata staxOperationMetadata) { + + return createErrorCouldBeInBodyResponseHandler(pojoSupplier, staxOperationMetadata); + } + + private HttpResponseHandler> createErrorCouldBeInBodyResponseHandler( + Supplier pojoSupplier, XmlOperationMetadata staxOperationMetadata) { + + return new AwsXmlPredicatedResponseHandler<>(r -> pojoSupplier.get(), + createResponseTransformer(pojoSupplier), + createErrorTransformer(), + DecorateErrorFromResponseBodyUnmarshaller.of(this::getErrorRoot), + staxOperationMetadata.isHasStreamingSuccessResponse()); + } } diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsXmlProtocolFactory.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsXmlProtocolFactory.java index 0bf4377430b8..296ba2483e77 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsXmlProtocolFactory.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsXmlProtocolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,14 +20,19 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.function.Function; import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.awscore.AwsResponse; import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.Response; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.http.MetricCollectingHttpResponseHandler; +import software.amazon.awssdk.core.internal.http.CombinedResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.core.OperationInfo; @@ -37,7 +42,10 @@ import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; import software.amazon.awssdk.protocols.xml.internal.marshall.XmlGenerator; import software.amazon.awssdk.protocols.xml.internal.marshall.XmlProtocolMarshaller; +import software.amazon.awssdk.protocols.xml.internal.unmarshall.AwsXmlErrorTransformer; import software.amazon.awssdk.protocols.xml.internal.unmarshall.AwsXmlResponseHandler; +import software.amazon.awssdk.protocols.xml.internal.unmarshall.AwsXmlResponseTransformer; +import software.amazon.awssdk.protocols.xml.internal.unmarshall.AwsXmlUnmarshallingContext; import software.amazon.awssdk.protocols.xml.internal.unmarshall.XmlProtocolUnmarshaller; /** @@ -62,26 +70,29 @@ public class AwsXmlProtocolFactory { public static final OperationMetadataAttribute ROOT_MARSHALL_LOCATION_ATTRIBUTE = new OperationMetadataAttribute<>(String.class); + private static final XmlProtocolUnmarshaller XML_PROTOCOL_UNMARSHALLER = XmlProtocolUnmarshaller.create(); + private final List modeledExceptions; private final Supplier defaultServiceExceptionSupplier; - private final AwsXmlErrorProtocolUnmarshaller errorUnmarshaller; + private final HttpResponseHandler errorUnmarshaller; private final SdkClientConfiguration clientConfiguration; AwsXmlProtocolFactory(Builder builder) { this.modeledExceptions = unmodifiableList(builder.modeledExceptions); this.defaultServiceExceptionSupplier = builder.defaultServiceExceptionSupplier; this.clientConfiguration = builder.clientConfiguration; - this.errorUnmarshaller = AwsXmlErrorProtocolUnmarshaller - .builder() - .defaultExceptionSupplier(defaultServiceExceptionSupplier) - .exceptions(modeledExceptions) - .errorUnmarshaller(XmlProtocolUnmarshaller.builder().build()) - .errorRootExtractor(this::getErrorRoot) - .build(); + + this.errorUnmarshaller = timeUnmarshalling( + AwsXmlErrorProtocolUnmarshaller.builder() + .defaultExceptionSupplier(defaultServiceExceptionSupplier) + .exceptions(modeledExceptions) + .errorUnmarshaller(XML_PROTOCOL_UNMARSHALLER) + .errorRootExtractor(this::getErrorRoot) + .build()); } /** - * Creates an instance of {@link XmlProtocolMarshaller} to be used for marshalling the requess. + * Creates an instance of {@link XmlProtocolMarshaller} to be used for marshalling the request. * * @param operationInfo Info required to marshall the request */ @@ -95,15 +106,40 @@ public ProtocolMarshaller createProtocolMarshaller(Operation public HttpResponseHandler createResponseHandler(Supplier pojoSupplier, XmlOperationMetadata staxOperationMetadata) { - return new AwsXmlResponseHandler<>( - XmlProtocolUnmarshaller.builder().build(), r -> pojoSupplier.get(), - staxOperationMetadata.isHasStreamingSuccessResponse()); + return timeUnmarshalling(new AwsXmlResponseHandler<>(XML_PROTOCOL_UNMARSHALLER, r -> pojoSupplier.get(), + staxOperationMetadata.isHasStreamingSuccessResponse())); + } + + protected Function createResponseTransformer( + Supplier pojoSupplier) { + + return new AwsXmlResponseTransformer<>( + XML_PROTOCOL_UNMARSHALLER, r -> pojoSupplier.get()); + } + + protected Function createErrorTransformer() { + return AwsXmlErrorTransformer.builder() + .defaultExceptionSupplier(defaultServiceExceptionSupplier) + .exceptions(modeledExceptions) + .errorUnmarshaller(XML_PROTOCOL_UNMARSHALLER) + .build(); } public HttpResponseHandler createErrorResponseHandler() { return errorUnmarshaller; } + private MetricCollectingHttpResponseHandler timeUnmarshalling(HttpResponseHandler delegate) { + return MetricCollectingHttpResponseHandler.create(CoreMetric.UNMARSHALLING_DURATION, delegate); + } + + public HttpResponseHandler> createCombinedResponseHandler( + Supplier pojoSupplier, XmlOperationMetadata staxOperationMetadata) { + + return new CombinedResponseHandler<>(createResponseHandler(pojoSupplier, staxOperationMetadata), + createErrorResponseHandler()); + } + /** * Extracts the element from the root XML document. This method is protected as S3 has * a slightly different location. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/XmlOperationMetadata.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/XmlOperationMetadata.java index b43d33f065e4..dfda61a4528e 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/XmlOperationMetadata.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/XmlOperationMetadata.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/HeaderMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/HeaderMarshaller.java index b5fc8259cdab..8ad1c8264242 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/HeaderMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/HeaderMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/QueryParamMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/QueryParamMarshaller.java index c72837240fc8..f7f905892af6 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/QueryParamMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/QueryParamMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/SimpleTypePathMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/SimpleTypePathMarshaller.java index 2f8454be8e30..9e2620ee5e93 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/SimpleTypePathMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/SimpleTypePathMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlGenerator.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlGenerator.java index 6996355b6e51..3ce773f633f0 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlGenerator.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ package software.amazon.awssdk.protocols.xml.internal.marshall; import java.io.StringWriter; +import java.util.Map; import software.amazon.awssdk.annotations.SdkInternalApi; /** @@ -49,6 +50,16 @@ public void startElement(String element) { xmlWriter.startElement(element); } + /** + * Start to write the element + * + * @param element the element to write + * @param attributes the attributes + */ + public void startElement(String element, Map attributes) { + xmlWriter.startElement(element, attributes); + } + public void endElement() { xmlWriter.endElement(); } diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshaller.java index 458572480c09..e26bd4b38f19 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ * * @param Type to marshall. */ +@FunctionalInterface @SdkInternalApi public interface XmlMarshaller extends Marshaller { diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshallerContext.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshallerContext.java index 00d03de296d8..7e7b3bf45035 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshallerContext.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshallerContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshallerRegistry.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshallerRegistry.java index bc2fa3a7e346..adf8bef921ae 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshallerRegistry.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlMarshallerRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlPayloadMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlPayloadMarshaller.java index 8ac3c00c95d5..b7a78f3ea4ca 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlPayloadMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlPayloadMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,6 +17,8 @@ import java.math.BigDecimal; import java.time.Instant; +import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -26,6 +28,8 @@ import software.amazon.awssdk.core.protocol.MarshallLocation; import software.amazon.awssdk.core.traits.ListTrait; import software.amazon.awssdk.core.traits.MapTrait; +import software.amazon.awssdk.core.traits.XmlAttributeTrait; +import software.amazon.awssdk.core.traits.XmlAttributesTrait; import software.amazon.awssdk.core.util.SdkAutoConstructList; import software.amazon.awssdk.core.util.SdkAutoConstructMap; import software.amazon.awssdk.protocols.core.ValueToStringConverter; @@ -35,7 +39,7 @@ public class XmlPayloadMarshaller { public static final XmlMarshaller STRING = new BasePayloadMarshaller<>(ValueToStringConverter.FROM_STRING); - public static final XmlMarshaller INTEGER = new BasePayloadMarshaller<>(ValueToStringConverter.FROM_INTEGER); + public static final XmlMarshaller INTEGER = new BasePayloadMarshaller<>(ValueToStringConverter.FROM_INTEGER); public static final XmlMarshaller LONG = new BasePayloadMarshaller<>(ValueToStringConverter.FROM_LONG); @@ -164,7 +168,26 @@ public void marshall(T val, XmlMarshallerContext context, String paramName, SdkF return; } - context.xmlGenerator().startElement(paramName); + // Should ignore marshalling for xml attribute + if (isXmlAttribute(sdkField)) { + return; + } + + if (sdkField != null && sdkField.getOptionalTrait(XmlAttributesTrait.class).isPresent()) { + XmlAttributesTrait attributeTrait = sdkField.getTrait(XmlAttributesTrait.class); + Map attributes = attributeTrait.attributes() + .entrySet() + .stream() + .collect(LinkedHashMap::new, (m, e) -> m.put(e.getKey(), + e.getValue() + .attributeGetter() + .apply(val)), + HashMap::putAll); + context.xmlGenerator().startElement(paramName, attributes); + } else { + context.xmlGenerator().startElement(paramName); + } + marshall(val, context, paramName, sdkField, converter); context.xmlGenerator().endElement(); } @@ -177,6 +200,10 @@ void marshall(T val, XmlMarshallerContext context, String paramName, SdkField protected boolean shouldEmit(T val, String paramName) { return val != null && paramName != null; } + + private boolean isXmlAttribute(SdkField sdkField) { + return sdkField != null && sdkField.getOptionalTrait(XmlAttributeTrait.class).isPresent(); + } } } diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java index 868ef9e54eee..10b07ea0d795 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlWriter.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlWriter.java index 39ee79c4a3fc..4ef99766def8 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlWriter.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlWriter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ import java.io.Writer; import java.nio.ByteBuffer; import java.util.Date; +import java.util.Map; import java.util.Stack; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.exception.SdkClientException; @@ -86,6 +87,23 @@ XmlWriter startElement(String element) { return this; } + /** + * Start to write an element with xml attributes. + * + * @param element the elment to write + * @param attributes the xml attribtues + * @return the XmlWriter + */ + XmlWriter startElement(String element, Map attributes) { + append("<" + element); + for (Map.Entry attribute: attributes.entrySet()) { + append(" " + attribute.getKey() + "=\"" + attribute.getValue() + "\""); + } + append(">"); + elementStack.push(element); + return this; + } + /** * Closes the last opened element at the current position in the in-progress * XML document. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlErrorTransformer.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlErrorTransformer.java new file mode 100644 index 000000000000..d5a3a2f304c0 --- /dev/null +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlErrorTransformer.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.xml.internal.unmarshall; + +import java.util.List; +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.protocols.core.ExceptionMetadata; +import software.amazon.awssdk.protocols.query.internal.unmarshall.AwsXmlErrorUnmarshaller; +import software.amazon.awssdk.protocols.query.unmarshall.XmlErrorUnmarshaller; + +/** + * A transformer function that takes a parsed XML response and converts it into an {@link AwsServiceException}. Used + * as a component in the {@link AwsXmlPredicatedResponseHandler}. + */ +@SdkInternalApi +public final class AwsXmlErrorTransformer + implements Function { + + private final AwsXmlErrorUnmarshaller awsXmlErrorUnmarshaller; + + private AwsXmlErrorTransformer(Builder builder) { + this.awsXmlErrorUnmarshaller = AwsXmlErrorUnmarshaller.builder() + .defaultExceptionSupplier(builder.defaultExceptionSupplier) + .exceptions(builder.exceptions) + .errorUnmarshaller(builder.errorUnmarshaller) + .build(); + } + + @Override + public AwsServiceException apply(AwsXmlUnmarshallingContext context) { + return awsXmlErrorUnmarshaller.unmarshall(context.parsedRootXml(), + Optional.ofNullable(context.parsedErrorXml()), + Optional.empty(), + context.sdkHttpFullResponse(), + context.executionAttributes()); + } + + /** + * @return New Builder instance. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link AwsXmlErrorTransformer}. + */ + public static final class Builder { + + private List exceptions; + private Supplier defaultExceptionSupplier; + private XmlErrorUnmarshaller errorUnmarshaller; + + private Builder() { + } + + /** + * List of {@link ExceptionMetadata} to represent the modeled exceptions for the service. + * For AWS services the error type is a string representing the type of the modeled exception. + * + * @return This builder for method chaining. + */ + public Builder exceptions(List exceptions) { + this.exceptions = exceptions; + return this; + } + + /** + * Default exception type if "error code" does not match any known modeled exception. This is the generated + * base exception for the service (i.e. DynamoDbException). + * + * @return This builder for method chaining. + */ + public Builder defaultExceptionSupplier(Supplier defaultExceptionSupplier) { + this.defaultExceptionSupplier = defaultExceptionSupplier; + return this; + } + + /** + * The unmarshaller to use. The unmarshaller only unmarshalls any modeled fields of the exception, + * additional metadata is extracted by {@link AwsXmlErrorTransformer}. + * + * @param errorUnmarshaller Error unmarshaller to use. + * @return This builder for method chaining. + */ + public Builder errorUnmarshaller(XmlErrorUnmarshaller errorUnmarshaller) { + this.errorUnmarshaller = errorUnmarshaller; + return this; + } + + /** + * @return New instance of {@link AwsXmlErrorTransformer}. + */ + public AwsXmlErrorTransformer build() { + return new AwsXmlErrorTransformer(this); + } + } +} diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlPredicatedResponseHandler.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlPredicatedResponseHandler.java new file mode 100644 index 000000000000..d3d255496669 --- /dev/null +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlPredicatedResponseHandler.java @@ -0,0 +1,180 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.xml.internal.unmarshall; + +import java.util.Optional; +import java.util.function.Function; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.SdkStandardLogger; +import software.amazon.awssdk.core.exception.RetryableException; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; +import software.amazon.awssdk.utils.IoUtils; + +/** + * Unmarshalls an HTTP response into either a successful response POJO, or into a (possibly modeled) exception based + * on a predicate that the unmarshalled response can be tested against. Returns a wrapper {@link Response} object which + * may contain either the unmarshalled success POJO, or the unmarshalled exception. + * + * @param Type of successful unmarshalled POJO. + */ +@SdkInternalApi +public class AwsXmlPredicatedResponseHandler implements HttpResponseHandler> { + private static final Logger log = LoggerFactory.getLogger(AwsXmlPredicatedResponseHandler.class); + + private final Function pojoSupplier; + private final Function successResponseTransformer; + private final Function errorResponseTransformer; + private final Function decorateContextWithError; + private final boolean needsConnectionLeftOpen; + + /** + * Standard constructor + * @param pojoSupplier A method that supplies an empty builder of the correct type + * @param successResponseTransformer A function that can unmarshall a response object from parsed XML + * @param errorResponseTransformer A function that can unmarshall an exception object from parsed XML + * @param decorateContextWithError A function that determines if the response was an error or not + * @param needsConnectionLeftOpen true if the underlying connection should not be closed once parsed + */ + public AwsXmlPredicatedResponseHandler( + Function pojoSupplier, + Function successResponseTransformer, + Function errorResponseTransformer, + Function decorateContextWithError, + boolean needsConnectionLeftOpen) { + + this.pojoSupplier = pojoSupplier; + this.successResponseTransformer = successResponseTransformer; + this.errorResponseTransformer = errorResponseTransformer; + this.decorateContextWithError = decorateContextWithError; + this.needsConnectionLeftOpen = needsConnectionLeftOpen; + } + + /** + * Handle a response + * @param httpResponse The HTTP response object + * @param executionAttributes The attributes attached to this particular execution. + * @return A wrapped response object with the unmarshalled result in it. + */ + @Override + public Response handle(SdkHttpFullResponse httpResponse, ExecutionAttributes executionAttributes) { + boolean didRequestFail = true; + try { + Response response = handleResponse(httpResponse, executionAttributes); + didRequestFail = !response.isSuccess(); + return response; + } finally { + closeInputStreamIfNeeded(httpResponse, didRequestFail); + } + } + + private Response handleResponse(SdkHttpFullResponse httpResponse, + ExecutionAttributes executionAttributes) { + + AwsXmlUnmarshallingContext parsedResponse = parseResponse(httpResponse, executionAttributes); + parsedResponse = decorateContextWithError.apply(parsedResponse); + + if (parsedResponse.isResponseSuccess()) { + OutputT response = handleSuccessResponse(parsedResponse); + return Response.builder().httpResponse(httpResponse) + .response(response) + .isSuccess(true) + .build(); + } else { + return Response.builder().httpResponse(httpResponse) + .exception(handleErrorResponse(parsedResponse)) + .isSuccess(false) + .build(); + } + } + + private AwsXmlUnmarshallingContext parseResponse(SdkHttpFullResponse httpFullResponse, + ExecutionAttributes executionAttributes) { + XmlElement document = XmlResponseParserUtils.parse(pojoSupplier.apply(httpFullResponse), httpFullResponse); + + return AwsXmlUnmarshallingContext.builder() + .parsedXml(document) + .executionAttributes(executionAttributes) + .sdkHttpFullResponse(httpFullResponse) + .build(); + } + + /** + * Handles a successful response from a service call by unmarshalling the results using the + * specified response handler. + * + * @return The contents of the response, unmarshalled using the specified response handler. + */ + private OutputT handleSuccessResponse(AwsXmlUnmarshallingContext parsedResponse) { + try { + SdkStandardLogger.REQUEST_LOGGER.debug(() -> "Received successful response: " + + parsedResponse.sdkHttpFullResponse().statusCode()); + return successResponseTransformer.apply(parsedResponse); + } catch (RetryableException e) { + throw e; + } catch (Exception e) { + if (e instanceof SdkException && ((SdkException) e).retryable()) { + throw (SdkException) e; + } + + String errorMessage = + "Unable to unmarshall response (" + e.getMessage() + "). Response Code: " + + parsedResponse.sdkHttpFullResponse().statusCode() + ", Response Text: " + + parsedResponse.sdkHttpFullResponse().statusText().orElse(null); + throw SdkClientException.builder().message(errorMessage).cause(e).build(); + } + } + + /** + * Responsible for handling an error response, including unmarshalling the error response + * into the most specific exception type possible, and throwing the exception. + */ + private SdkException handleErrorResponse(AwsXmlUnmarshallingContext parsedResponse) { + try { + SdkException exception = errorResponseTransformer.apply(parsedResponse); + exception.fillInStackTrace(); + SdkStandardLogger.REQUEST_LOGGER.debug(() -> "Received error response: " + exception); + return exception; + } catch (Exception e) { + String errorMessage = String.format("Unable to unmarshall error response (%s). " + + "Response Code: %d, Response Text: %s", e.getMessage(), + parsedResponse.sdkHttpFullResponse().statusCode(), + parsedResponse.sdkHttpFullResponse().statusText().orElse("null")); + throw SdkClientException.builder().message(errorMessage).cause(e).build(); + } + } + + /** + * Close the input stream if required. + */ + private void closeInputStreamIfNeeded(SdkHttpFullResponse httpResponse, + boolean didRequestFail) { + // Always close on failed requests. Close on successful requests unless it needs connection left open + if (didRequestFail || !needsConnectionLeftOpen) { + Optional.ofNullable(httpResponse) + .flatMap(SdkHttpFullResponse::content) // If no content, no need to close + .ifPresent(s -> IoUtils.closeQuietly(s, log)); + } + } +} diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseHandler.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseHandler.java index 39590e068a59..0a81b613928a 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseHandler.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -32,6 +32,7 @@ import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.http.SdkHttpUtils; /** * Response handler for REST-XML services (Cloudfront, Route53, and S3). @@ -92,7 +93,7 @@ private T unmarshallResponse(SdkHttpFullResponse response) throws Exception { private AwsResponseMetadata generateResponseMetadata(SdkHttpResponse response) { Map metadata = new HashMap<>(); metadata.put(AWS_REQUEST_ID, - response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null)); + SdkHttpUtils.firstMatchingHeaderFromCollection(response.headers(), X_AMZN_REQUEST_ID_HEADERS).orElse(null)); response.headers().forEach((key, value) -> metadata.put(key, value.get(0))); return DefaultAwsResponseMetadata.create(metadata); diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseTransformer.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseTransformer.java new file mode 100644 index 000000000000..3a2affb06db3 --- /dev/null +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseTransformer.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.xml.internal.unmarshall; + +import static software.amazon.awssdk.awscore.util.AwsHeader.AWS_REQUEST_ID; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.AwsResponse; +import software.amazon.awssdk.awscore.AwsResponseMetadata; +import software.amazon.awssdk.awscore.DefaultAwsResponseMetadata; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.SdkStandardLogger; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; + +/** + * A transformer function that takes a parsed XML response and converts it into an {@link AwsResponse}. Used + * as a component in the {@link AwsXmlPredicatedResponseHandler}. + */ +@SdkInternalApi +public final class AwsXmlResponseTransformer + implements Function { + + private static final String X_AMZN_REQUEST_ID_HEADER = "x-amzn-RequestId"; + + private final XmlProtocolUnmarshaller unmarshaller; + private final Function pojoSupplier; + + public AwsXmlResponseTransformer(XmlProtocolUnmarshaller unmarshaller, + Function pojoSupplier) { + this.unmarshaller = unmarshaller; + this.pojoSupplier = pojoSupplier; + } + + @Override + public T apply(AwsXmlUnmarshallingContext context) { + return unmarshallResponse(context.sdkHttpFullResponse(), context.parsedRootXml()); + } + + @SuppressWarnings("unchecked") + private T unmarshallResponse(SdkHttpFullResponse response, XmlElement parsedXml) { + SdkStandardLogger.REQUEST_LOGGER.trace(() -> "Unmarshalling parsed service response XML."); + T result = unmarshaller.unmarshall(pojoSupplier.apply(response), parsedXml, response); + SdkStandardLogger.REQUEST_LOGGER.trace(() -> "Done unmarshalling parsed service response."); + AwsResponseMetadata responseMetadata = generateResponseMetadata(response); + return (T) result.toBuilder().responseMetadata(responseMetadata).build(); + } + + /** + * Create the default {@link AwsResponseMetadata}. This might be wrapped by a service + * specific metadata object to provide modeled access to additional metadata. (See S3 and Kinesis). + */ + private AwsResponseMetadata generateResponseMetadata(SdkHttpResponse response) { + Map metadata = new HashMap<>(); + metadata.put(AWS_REQUEST_ID, + response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null)); + + response.headers().forEach((key, value) -> metadata.put(key, value.get(0))); + return DefaultAwsResponseMetadata.create(metadata); + } +} diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlUnmarshallingContext.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlUnmarshallingContext.java new file mode 100644 index 000000000000..730fca7daa88 --- /dev/null +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlUnmarshallingContext.java @@ -0,0 +1,168 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.xml.internal.unmarshall; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; + +/** + * A data class to hold all the context of an unmarshalling stage for the AWS XML protocol as orchestrated by + * {@link AwsXmlPredicatedResponseHandler}. + */ +@SdkInternalApi +public class AwsXmlUnmarshallingContext { + private final SdkHttpFullResponse sdkHttpFullResponse; + private final XmlElement parsedXml; + private final ExecutionAttributes executionAttributes; + private final Boolean isResponseSuccess; + private final XmlElement parsedErrorXml; + + private AwsXmlUnmarshallingContext(Builder builder) { + this.sdkHttpFullResponse = builder.sdkHttpFullResponse; + this.parsedXml = builder.parsedXml; + this.executionAttributes = builder.executionAttributes; + this.isResponseSuccess = builder.isResponseSuccess; + this.parsedErrorXml = builder.parsedErrorXml; + } + + public static Builder builder() { + return new Builder(); + } + + /** + * The HTTP response. + */ + public SdkHttpFullResponse sdkHttpFullResponse() { + return sdkHttpFullResponse; + } + + /** + * The parsed XML of the body, or null if there was no body. + */ + public XmlElement parsedRootXml() { + return parsedXml; + } + + /** + * The {@link ExecutionAttributes} associated with this request. + */ + public ExecutionAttributes executionAttributes() { + return executionAttributes; + } + + /** + * true if the response indicates success; false if not; null if that has not been determined yet + */ + public Boolean isResponseSuccess() { + return isResponseSuccess; + } + + /** + * The parsed XML of just the error. null if not found or determined yet. + */ + public XmlElement parsedErrorXml() { + return parsedErrorXml; + } + + public Builder toBuilder() { + return builder().sdkHttpFullResponse(this.sdkHttpFullResponse) + .parsedXml(this.parsedXml) + .executionAttributes(this.executionAttributes) + .isResponseSuccess(this.isResponseSuccess) + .parsedErrorXml(this.parsedErrorXml); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AwsXmlUnmarshallingContext that = (AwsXmlUnmarshallingContext) o; + + if (sdkHttpFullResponse != null ? ! sdkHttpFullResponse.equals(that.sdkHttpFullResponse) : + that.sdkHttpFullResponse != null) { + return false; + } + if (parsedXml != null ? ! parsedXml.equals(that.parsedXml) : that.parsedXml != null) { + return false; + } + if (executionAttributes != null ? ! executionAttributes.equals(that.executionAttributes) : + that.executionAttributes != null) { + return false; + } + if (isResponseSuccess != null ? ! isResponseSuccess.equals(that.isResponseSuccess) : + that.isResponseSuccess != null) { + return false; + } + return parsedErrorXml != null ? parsedErrorXml.equals(that.parsedErrorXml) : that.parsedErrorXml == null; + } + + @Override + public int hashCode() { + int result = sdkHttpFullResponse != null ? sdkHttpFullResponse.hashCode() : 0; + result = 31 * result + (parsedXml != null ? parsedXml.hashCode() : 0); + result = 31 * result + (executionAttributes != null ? executionAttributes.hashCode() : 0); + result = 31 * result + (isResponseSuccess != null ? isResponseSuccess.hashCode() : 0); + result = 31 * result + (parsedErrorXml != null ? parsedErrorXml.hashCode() : 0); + return result; + } + + public static final class Builder { + private SdkHttpFullResponse sdkHttpFullResponse; + private XmlElement parsedXml; + private ExecutionAttributes executionAttributes; + private Boolean isResponseSuccess; + private XmlElement parsedErrorXml; + + private Builder() { + } + + public Builder sdkHttpFullResponse(SdkHttpFullResponse sdkHttpFullResponse) { + this.sdkHttpFullResponse = sdkHttpFullResponse; + return this; + } + + public Builder parsedXml(XmlElement parsedXml) { + this.parsedXml = parsedXml; + return this; + } + + public Builder executionAttributes(ExecutionAttributes executionAttributes) { + this.executionAttributes = executionAttributes; + return this; + } + + public Builder isResponseSuccess(Boolean isResponseSuccess) { + this.isResponseSuccess = isResponseSuccess; + return this; + } + + public Builder parsedErrorXml(XmlElement parsedErrorXml) { + this.parsedErrorXml = parsedErrorXml; + return this; + } + + public AwsXmlUnmarshallingContext build() { + return new AwsXmlUnmarshallingContext(this); + } + } +} diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/DecorateErrorFromResponseBodyUnmarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/DecorateErrorFromResponseBodyUnmarshaller.java new file mode 100644 index 000000000000..2d31826adefa --- /dev/null +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/DecorateErrorFromResponseBodyUnmarshaller.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.xml.internal.unmarshall; + +import java.util.Optional; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; + +/** + * A function that decorates a {@link AwsXmlUnmarshallingContext} that already contains the parsed XML of the + * response body with parsed error XML if the HTTP response status indicates failure or a serialized error is found + * in the XML body of a 'successful' response. This is a non-standard error handling behavior that is used by some + * non-streaming S3 operations. + */ +@SdkInternalApi +public class DecorateErrorFromResponseBodyUnmarshaller + implements Function { + + private static final String ERROR_IN_SUCCESS_BODY_ELEMENT_NAME = "Error"; + + private final Function> errorRootLocationFunction; + + private DecorateErrorFromResponseBodyUnmarshaller(Function> errorRootLocationFunction) { + this.errorRootLocationFunction = errorRootLocationFunction; + } + + /** + * Constructs a function that can be used to decorate a parsed error from a response if one is found. + * @param errorRootFunction A function that can be used to locate the root of the serialized error in the XML + * body if the HTTP status code of the response indicates an error. This function is not + * applied for HTTP responses that indicate success, instead the root of the document + * will always be checked for an element tagged 'Error'. + * @return An unmarshalling function that will decorate the unmarshalling context with a parsed error if one is + * found in the response. + */ + public static DecorateErrorFromResponseBodyUnmarshaller of(Function> errorRootFunction) { + return new DecorateErrorFromResponseBodyUnmarshaller(errorRootFunction); + } + + @Override + public AwsXmlUnmarshallingContext apply(AwsXmlUnmarshallingContext context) { + Optional parsedRootXml = Optional.ofNullable(context.parsedRootXml()); + + if (!context.sdkHttpFullResponse().isSuccessful()) { + // Request was non-2xx, defer to protocol handler for error root + Optional parsedErrorXml = parsedRootXml.flatMap(errorRootLocationFunction); + return context.toBuilder().isResponseSuccess(false).parsedErrorXml(parsedErrorXml.orElse(null)).build(); + } + + // Check body to see if an error turned up there + Optional parsedErrorXml = parsedRootXml.isPresent() ? + getErrorRootFromSuccessBody(context.parsedRootXml()) : Optional.empty(); + + // Request had an HTTP success code, but an error was found in the body + return parsedErrorXml.map(xmlElement -> context.toBuilder() + .isResponseSuccess(false) + .parsedErrorXml(xmlElement) + .build()) + // Otherwise the response can be considered successful + .orElseGet(() -> context.toBuilder() + .isResponseSuccess(true) + .build()); + } + + private static Optional getErrorRootFromSuccessBody(XmlElement document) { + return ERROR_IN_SUCCESS_BODY_ELEMENT_NAME.equals(document.elementName()) ? + Optional.of(document) : Optional.empty(); + } +} diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/HeaderUnmarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/HeaderUnmarshaller.java index 10f51623fbf4..ac23dcad2220 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/HeaderUnmarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/HeaderUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlPayloadUnmarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlPayloadUnmarshaller.java index 4abda25bd29f..c26a77e5f773 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlPayloadUnmarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlPayloadUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlProtocolUnmarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlProtocolUnmarshaller.java index de7141fa3c3e..9429422a1050 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlProtocolUnmarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlProtocolUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -29,10 +29,10 @@ import software.amazon.awssdk.core.protocol.MarshallingType; import software.amazon.awssdk.core.traits.PayloadTrait; import software.amazon.awssdk.core.traits.TimestampFormatTrait; +import software.amazon.awssdk.core.traits.XmlAttributeTrait; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.protocols.core.StringToInstant; import software.amazon.awssdk.protocols.core.StringToValueConverter; -import software.amazon.awssdk.protocols.query.unmarshall.XmlDomParser; import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; import software.amazon.awssdk.protocols.query.unmarshall.XmlErrorUnmarshaller; import software.amazon.awssdk.utils.CollectionUtils; @@ -49,12 +49,13 @@ public final class XmlProtocolUnmarshaller implements XmlErrorUnmarshaller { private XmlProtocolUnmarshaller() { } + public static XmlProtocolUnmarshaller create() { + return new XmlProtocolUnmarshaller(); + } + public TypeT unmarshall(SdkPojo sdkPojo, SdkHttpFullResponse response) { - - XmlElement document = hasPayloadMembers(sdkPojo) && response.content().isPresent() - ? XmlDomParser.parse(response.content().get()) : null; - + XmlElement document = XmlResponseParserUtils.parse(sdkPojo, response); return unmarshall(sdkPojo, document, response); } @@ -79,30 +80,43 @@ SdkPojo unmarshall(XmlUnmarshallerContext context, SdkPojo sdkPojo, XmlElement r XmlUnmarshaller unmarshaller = REGISTRY.getUnmarshaller(field.location(), field.marshallingType()); if (root != null && field.location() == MarshallLocation.PAYLOAD) { - List element = isExplicitPayloadMember(field) ? - singletonList(root) : - root.getElementsByName(field.unmarshallLocationName()); - if (!CollectionUtils.isNullOrEmpty(element)) { - Object unmarshalled = unmarshaller.unmarshall(context, element, (SdkField) field); - field.set(sdkPojo, unmarshalled); + if (!context.response().content().isPresent()) { + // This is a payload field, but the service sent no content. Do not populate this field (leave it null). + continue; + } + + if (isAttribute(field)) { + root.getOptionalAttributeByName(field.unmarshallLocationName()) + .ifPresent(e -> field.set(sdkPojo, e)); + } else { + List element = isExplicitPayloadMember(field) ? + singletonList(root) : + root.getElementsByName(field.unmarshallLocationName()); + + if (!CollectionUtils.isNullOrEmpty(element)) { + Object unmarshalled = unmarshaller.unmarshall(context, element, (SdkField) field); + field.set(sdkPojo, unmarshalled); + } } } else { Object unmarshalled = unmarshaller.unmarshall(context, null, (SdkField) field); field.set(sdkPojo, unmarshalled); } } + + if (!(sdkPojo instanceof Buildable)) { + throw new RuntimeException("The sdkPojo passed to the unmarshaller is not buildable (must implement " + + "Buildable)"); + } return (SdkPojo) ((Buildable) sdkPojo).build(); } - private boolean isExplicitPayloadMember(SdkField field) { - return field.containsTrait(PayloadTrait.class); + private boolean isAttribute(SdkField field) { + return field.containsTrait(XmlAttributeTrait.class); } - private boolean hasPayloadMembers(SdkPojo sdkPojo) { - return sdkPojo.sdkFields().stream() - .filter(f -> f.location() == MarshallLocation.PAYLOAD) - .findAny() - .isPresent(); + private boolean isExplicitPayloadMember(SdkField field) { + return field.containsTrait(PayloadTrait.class); } private static Map getDefaultTimestampFormats() { @@ -139,27 +153,4 @@ private static XmlUnmarshallerRegistry createUnmarshallerRegistry() { .payloadUnmarshaller(MarshallingType.MAP, XmlPayloadUnmarshaller::unmarshallMap) .build(); } - - /** - * @return New {@link Builder} instance. - */ - public static Builder builder() { - return new Builder(); - } - - /** - * Builder for {@link XmlProtocolUnmarshaller}. - */ - public static final class Builder { - - private Builder() { - } - - /** - * @return New instance of {@link XmlProtocolUnmarshaller}. - */ - public XmlProtocolUnmarshaller build() { - return new XmlProtocolUnmarshaller(); - } - } } diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlResponseParserUtils.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlResponseParserUtils.java new file mode 100644 index 000000000000..97023b37709b --- /dev/null +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlResponseParserUtils.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.xml.internal.unmarshall; + +import static software.amazon.awssdk.http.Header.CONTENT_LENGTH; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.protocol.MarshallLocation; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.protocols.query.unmarshall.XmlDomParser; +import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; + +/** + * Static methods to assist with parsing the response of AWS XML requests. + */ +@SdkInternalApi +public final class XmlResponseParserUtils { + private XmlResponseParserUtils() { + } + + /** + * Parse an XML response if one is expected and available. If we are not expecting a payload, but the HTTP response + * code shows an error then we will parse it anyway, as it should contain a serialized error. + * @param sdkPojo the SDK builder object associated with the final response + * @param response the HTTP response + * @return A parsed XML document or an empty XML document if no payload/contents were found in the response. + */ + public static XmlElement parse(SdkPojo sdkPojo, SdkHttpFullResponse response) { + + try { + Optional responseContent = response.content(); + + // In some cases the responseContent is present but empty, so when we are not expecting a body we should + // not attempt to parse it even if the body appears to be present. + if ((!response.isSuccessful() || hasPayloadMembers(sdkPojo)) && responseContent.isPresent() && + !contentLengthZero(response)) { + return XmlDomParser.parse(responseContent.get()); + } else { + return XmlElement.empty(); + } + } catch (RuntimeException e) { + if (response.isSuccessful()) { + throw e; + } + + return XmlElement.empty(); + } + } + + private static boolean hasPayloadMembers(SdkPojo sdkPojo) { + return sdkPojo.sdkFields().stream() + .anyMatch(f -> f.location() == MarshallLocation.PAYLOAD); + } + + private static boolean contentLengthZero(SdkHttpFullResponse response) { + return response.firstMatchingHeader(CONTENT_LENGTH).map(l -> Long.parseLong(l) == 0).orElse(false); + } +} diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshaller.java index 2aa1e01c0585..35f4ac37c1a3 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshallerContext.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshallerContext.java index cb453781767e..4bf9d4d9ce30 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshallerContext.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshallerContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshallerRegistry.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshallerRegistry.java index 043bcd9b700c..9b56d67bc4c0 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshallerRegistry.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlUnmarshallerRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/aws-xml-protocol/src/test/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlUnmarshallingContextTest.java b/core/protocols/aws-xml-protocol/src/test/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlUnmarshallingContextTest.java new file mode 100644 index 000000000000..207ec55788d8 --- /dev/null +++ b/core/protocols/aws-xml-protocol/src/test/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlUnmarshallingContextTest.java @@ -0,0 +1,134 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.xml.internal.unmarshall; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.internal.InternalCoreExecutionAttribute; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; + +@RunWith(MockitoJUnitRunner.class) +public class AwsXmlUnmarshallingContextTest { + private static final XmlElement XML_ELEMENT_1 = XmlElement.builder().elementName("one").build(); + private static final XmlElement XML_ELEMENT_2 = XmlElement.builder().elementName("two").build(); + private static final XmlElement XML_ERROR_ELEMENT_1 = XmlElement.builder().elementName("error-one").build(); + private static final XmlElement XML_ERROR_ELEMENT_2 = XmlElement.builder().elementName("error-two").build(); + private static final ExecutionAttributes EXECUTION_ATTRIBUTES_1 = + new ExecutionAttributes().putAttribute(InternalCoreExecutionAttribute.EXECUTION_ATTEMPT, 1); + private static final ExecutionAttributes EXECUTION_ATTRIBUTES_2 = + new ExecutionAttributes().putAttribute(InternalCoreExecutionAttribute.EXECUTION_ATTEMPT, 2); + + @Mock + private SdkHttpFullResponse mockSdkHttpFullResponse; + + private AwsXmlUnmarshallingContext minimal() { + return AwsXmlUnmarshallingContext.builder().build(); + } + + private AwsXmlUnmarshallingContext maximal() { + return AwsXmlUnmarshallingContext.builder() + .parsedXml(XML_ELEMENT_1) + .parsedErrorXml(XML_ERROR_ELEMENT_1) + .isResponseSuccess(true) + .sdkHttpFullResponse(mockSdkHttpFullResponse) + .executionAttributes(EXECUTION_ATTRIBUTES_1) + .build(); + } + + @Test + public void builder_minimal() { + AwsXmlUnmarshallingContext result = minimal(); + + assertThat(result.isResponseSuccess()).isNull(); + assertThat(result.sdkHttpFullResponse()).isNull(); + assertThat(result.parsedRootXml()).isNull(); + assertThat(result.executionAttributes()).isNull(); + assertThat(result.parsedErrorXml()).isNull(); + } + + @Test + public void builder_maximal() { + AwsXmlUnmarshallingContext result = maximal(); + + assertThat(result.isResponseSuccess()).isTrue(); + assertThat(result.sdkHttpFullResponse()).isEqualTo(mockSdkHttpFullResponse); + assertThat(result.parsedRootXml()).isEqualTo(XML_ELEMENT_1); + assertThat(result.executionAttributes()).isEqualTo(EXECUTION_ATTRIBUTES_1); + assertThat(result.parsedErrorXml()).isEqualTo(XML_ERROR_ELEMENT_1); + } + + @Test + public void toBuilder_maximal() { + assertThat(maximal().toBuilder().build()).isEqualTo(maximal()); + } + + @Test + public void toBuilder_minimal() { + assertThat(minimal().toBuilder().build()).isEqualTo(minimal()); + } + + @Test + public void equals_maximal_positive() { + assertThat(maximal()).isEqualTo(maximal()); + } + + @Test + public void equals_minimal() { + assertThat(minimal()).isEqualTo(minimal()); + } + + @Test + public void equals_maximal_negative() { + assertThat(maximal().toBuilder().isResponseSuccess(false).build()).isNotEqualTo(maximal()); + assertThat(maximal().toBuilder().sdkHttpFullResponse(mock(SdkHttpFullResponse.class)).build()).isNotEqualTo(maximal()); + assertThat(maximal().toBuilder().parsedXml(XML_ELEMENT_2).build()).isNotEqualTo(maximal()); + assertThat(maximal().toBuilder().parsedErrorXml(XML_ERROR_ELEMENT_2).build()).isNotEqualTo(maximal()); + assertThat(maximal().toBuilder().executionAttributes(EXECUTION_ATTRIBUTES_2).build()).isNotEqualTo(maximal()); + } + + @Test + public void hashcode_maximal_positive() { + assertThat(maximal().hashCode()).isEqualTo(maximal().hashCode()); + } + + @Test + public void hashcode_minimal_positive() { + assertThat(minimal().hashCode()).isEqualTo(minimal().hashCode()); + } + + @Test + public void hashcode_maximal_negative() { + assertThat(maximal().toBuilder().isResponseSuccess(false).build().hashCode()) + .isNotEqualTo(maximal().hashCode()); + assertThat(maximal().toBuilder().sdkHttpFullResponse(mock(SdkHttpFullResponse.class)).build().hashCode()) + .isNotEqualTo(maximal().hashCode()); + assertThat(maximal().toBuilder().parsedXml(XML_ELEMENT_2).build().hashCode()) + .isNotEqualTo(maximal().hashCode()); + assertThat(maximal().toBuilder().parsedErrorXml(XML_ERROR_ELEMENT_2).build().hashCode()) + .isNotEqualTo(maximal().hashCode()); + assertThat(maximal().toBuilder().executionAttributes(EXECUTION_ATTRIBUTES_2).build().hashCode()) + .isNotEqualTo(maximal().hashCode()); + } + +} \ No newline at end of file diff --git a/core/protocols/aws-xml-protocol/src/test/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/DecorateErrorFromResponseBodyUnmarshallerTest.java b/core/protocols/aws-xml-protocol/src/test/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/DecorateErrorFromResponseBodyUnmarshallerTest.java new file mode 100644 index 000000000000..e69c1e585e52 --- /dev/null +++ b/core/protocols/aws-xml-protocol/src/test/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/DecorateErrorFromResponseBodyUnmarshallerTest.java @@ -0,0 +1,175 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocols.xml.internal.unmarshall; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Optional; +import java.util.function.Function; + +import org.junit.Test; + +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.protocols.query.unmarshall.XmlElement; + +public class DecorateErrorFromResponseBodyUnmarshallerTest { + private static final Function> FAIL_TEST_ERROR_ROOT_LOCATOR = + ignored -> { throw new RuntimeException("This function should not have been called"); }; + + @Test + public void status200_noBody() { + DecorateErrorFromResponseBodyUnmarshaller decorateErrorFromResponseBodyUnmarshaller = + DecorateErrorFromResponseBodyUnmarshaller.of(FAIL_TEST_ERROR_ROOT_LOCATOR); + + SdkHttpFullResponse sdkHttpFullResponse = SdkHttpFullResponse.builder() + .statusCode(200) + .build(); + + AwsXmlUnmarshallingContext context = AwsXmlUnmarshallingContext.builder() + .sdkHttpFullResponse(sdkHttpFullResponse) + .build(); + + AwsXmlUnmarshallingContext result = decorateErrorFromResponseBodyUnmarshaller.apply(context); + + assertThat(result.isResponseSuccess()).isTrue(); + assertThat(result.parsedErrorXml()).isNull(); + } + + @Test + public void status200_bodyWithNoError() { + DecorateErrorFromResponseBodyUnmarshaller decorateErrorFromResponseBodyUnmarshaller = + DecorateErrorFromResponseBodyUnmarshaller.of(FAIL_TEST_ERROR_ROOT_LOCATOR); + + SdkHttpFullResponse sdkHttpFullResponse = SdkHttpFullResponse.builder() + .statusCode(200) + .build(); + + XmlElement parsedBody = XmlElement.builder() + .elementName("ValidResponse") + .build(); + + AwsXmlUnmarshallingContext context = AwsXmlUnmarshallingContext.builder() + .sdkHttpFullResponse(sdkHttpFullResponse) + .parsedXml(parsedBody) + .build(); + + AwsXmlUnmarshallingContext result = decorateErrorFromResponseBodyUnmarshaller.apply(context); + + assertThat(result.isResponseSuccess()).isTrue(); + assertThat(result.parsedErrorXml()).isNull(); + } + + @Test + public void status200_bodyWithError() { + DecorateErrorFromResponseBodyUnmarshaller decorateErrorFromResponseBodyUnmarshaller = + DecorateErrorFromResponseBodyUnmarshaller.of(FAIL_TEST_ERROR_ROOT_LOCATOR); + + SdkHttpFullResponse sdkHttpFullResponse = SdkHttpFullResponse.builder() + .statusCode(200) + .build(); + + XmlElement parsedError = XmlElement.builder() + .elementName("test-error") + .build(); + + XmlElement parsedBody = XmlElement.builder() + .elementName("Error") + .addChildElement(parsedError) + .build(); + + AwsXmlUnmarshallingContext context = AwsXmlUnmarshallingContext.builder() + .sdkHttpFullResponse(sdkHttpFullResponse) + .parsedXml(parsedBody) + .build(); + + AwsXmlUnmarshallingContext result = decorateErrorFromResponseBodyUnmarshaller.apply(context); + + assertThat(result.isResponseSuccess()).isFalse(); + assertThat(result.parsedErrorXml()).isSameAs(parsedBody); + } + + @Test + public void status500_noBody() { + DecorateErrorFromResponseBodyUnmarshaller decorateErrorFromResponseBodyUnmarshaller = + DecorateErrorFromResponseBodyUnmarshaller.of(xml -> xml.getOptionalElementByName("test-error")); + + SdkHttpFullResponse sdkHttpFullResponse = SdkHttpFullResponse.builder() + .statusCode(500) + .build(); + + AwsXmlUnmarshallingContext context = AwsXmlUnmarshallingContext.builder() + .sdkHttpFullResponse(sdkHttpFullResponse) + .build(); + + AwsXmlUnmarshallingContext result = decorateErrorFromResponseBodyUnmarshaller.apply(context); + + assertThat(result.isResponseSuccess()).isFalse(); + assertThat(result.parsedErrorXml()).isNull(); + } + + @Test + public void status500_bodyWithNoError() { + DecorateErrorFromResponseBodyUnmarshaller decorateErrorFromResponseBodyUnmarshaller = + DecorateErrorFromResponseBodyUnmarshaller.of(xml -> xml.getOptionalElementByName("test-error")); + + SdkHttpFullResponse sdkHttpFullResponse = SdkHttpFullResponse.builder() + .statusCode(500) + .build(); + + XmlElement parsedBody = XmlElement.builder() + .elementName("ValidResponse") + .build(); + + AwsXmlUnmarshallingContext context = AwsXmlUnmarshallingContext.builder() + .sdkHttpFullResponse(sdkHttpFullResponse) + .parsedXml(parsedBody) + .build(); + + AwsXmlUnmarshallingContext result = decorateErrorFromResponseBodyUnmarshaller.apply(context); + + assertThat(result.isResponseSuccess()).isFalse(); + assertThat(result.parsedErrorXml()).isNull(); + } + + @Test + public void status500_bodyWithError() { + DecorateErrorFromResponseBodyUnmarshaller decorateErrorFromResponseBodyUnmarshaller = + DecorateErrorFromResponseBodyUnmarshaller.of(xml -> xml.getOptionalElementByName("test-error")); + + SdkHttpFullResponse sdkHttpFullResponse = SdkHttpFullResponse.builder() + .statusCode(500) + .build(); + + XmlElement parsedError = XmlElement.builder() + .elementName("test-error") + .build(); + + XmlElement parsedBody = XmlElement.builder() + .elementName("Error") + .addChildElement(parsedError) + .build(); + + AwsXmlUnmarshallingContext context = AwsXmlUnmarshallingContext.builder() + .sdkHttpFullResponse(sdkHttpFullResponse) + .parsedXml(parsedBody) + .build(); + + AwsXmlUnmarshallingContext result = decorateErrorFromResponseBodyUnmarshaller.apply(context); + + assertThat(result.isResponseSuccess()).isFalse(); + assertThat(result.parsedErrorXml()).isSameAs(parsedError); + } +} \ No newline at end of file diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 8bcf79114117..b1bcafa3fba4 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -1,11 +1,26 @@ + + core software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index c3910ecf49eb..5560ffaf50a1 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -1,11 +1,26 @@ + + protocols software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/AbstractMarshallingRegistry.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/AbstractMarshallingRegistry.java index e39367f57efc..6bd09d52a257 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/AbstractMarshallingRegistry.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/AbstractMarshallingRegistry.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ExceptionMetadata.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ExceptionMetadata.java index 7a3dab6aaa21..de298bc1ee4a 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ExceptionMetadata.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ExceptionMetadata.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/InstantToString.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/InstantToString.java index ca71665eab60..96eaa4164932 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/InstantToString.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/InstantToString.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/Marshaller.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/Marshaller.java index 11008876292b..a289dc7d2654 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/Marshaller.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/Marshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/OperationInfo.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/OperationInfo.java index d938481bf421..8b4dbf287e49 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/OperationInfo.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/OperationInfo.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -32,6 +32,8 @@ public final class OperationInfo { private final boolean hasExplicitPayloadMember; private final boolean hasPayloadMembers; private final boolean hasStreamingInput; + private final boolean hasEventStreamingInput; + private final boolean hasEvent; private final AttributeMap additionalMetadata; private OperationInfo(Builder builder) { @@ -43,6 +45,8 @@ private OperationInfo(Builder builder) { this.hasPayloadMembers = builder.hasPayloadMembers; this.hasStreamingInput = builder.hasStreamingInput; this.additionalMetadata = builder.additionalMetadata.build(); + this.hasEventStreamingInput = builder.hasEventStreamingInput; + this.hasEvent = builder.hasEvent; } /** @@ -98,6 +102,20 @@ public boolean hasStreamingInput() { return hasStreamingInput; } + /** + * @return True if the operation has event streaming input. + */ + public boolean hasEventStreamingInput() { + return hasEventStreamingInput; + } + + /** + * @return True if the operation has event. + */ + public boolean hasEvent() { + return hasEvent; + } + /** * Gets an unmodeled piece of metadata. Useful for protocol specific options. * @@ -128,6 +146,8 @@ public static final class Builder { private boolean hasExplicitPayloadMember; private boolean hasPayloadMembers; private boolean hasStreamingInput; + private boolean hasEventStreamingInput; + private boolean hasEvent; private AttributeMap.Builder additionalMetadata = AttributeMap.builder(); private Builder() { @@ -168,6 +188,16 @@ public Builder hasStreamingInput(boolean hasStreamingInput) { return this; } + public Builder hasEventStreamingInput(boolean hasEventStreamingInput) { + this.hasEventStreamingInput = hasEventStreamingInput; + return this; + } + + public Builder hasEvent(boolean hasEvent) { + this.hasEvent = hasEvent; + return this; + } + /** * Adds additional unmodeled metadata to the {@link OperationInfo}. Useful for communicating protocol * specific operation metadata. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/OperationMetadataAttribute.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/OperationMetadataAttribute.java index 76fbe13e9bde..f77f084339b3 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/OperationMetadataAttribute.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/OperationMetadataAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/PathMarshaller.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/PathMarshaller.java index 3a1ce5e52c5b..ff98704e8c6b 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/PathMarshaller.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/PathMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ProtocolMarshaller.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ProtocolMarshaller.java index 66333fdfc220..7a769d47363c 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ProtocolMarshaller.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ProtocolMarshaller.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ProtocolUtils.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ProtocolUtils.java index d28a7251916f..cead56f1cf65 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ProtocolUtils.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ProtocolUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/StringToInstant.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/StringToInstant.java index fa41b4f7f3fc..a9888f076c1d 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/StringToInstant.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/StringToInstant.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/StringToValueConverter.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/StringToValueConverter.java index d38e8757f049..e1fe1d2ec73e 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/StringToValueConverter.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/StringToValueConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ValueToStringConverter.java b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ValueToStringConverter.java index ce2192e7f206..8dc35eb35bdd 100644 --- a/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ValueToStringConverter.java +++ b/core/protocols/protocol-core/src/main/java/software/amazon/awssdk/protocols/core/ValueToStringConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/GreedyPathMarshallerTest.java b/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/GreedyPathMarshallerTest.java index f74cb995c8f2..f53932516a5d 100644 --- a/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/GreedyPathMarshallerTest.java +++ b/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/GreedyPathMarshallerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/NonGreedyPathMarshallerTest.java b/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/NonGreedyPathMarshallerTest.java index c8de721e923c..0994009bb65d 100644 --- a/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/NonGreedyPathMarshallerTest.java +++ b/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/NonGreedyPathMarshallerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/ProtocolUtilsTest.java b/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/ProtocolUtilsTest.java index a29eeef08b78..ffb7c9048f4e 100644 --- a/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/ProtocolUtilsTest.java +++ b/core/protocols/protocol-core/src/test/java/software/amazon/awssdk/protocols/core/ProtocolUtilsTest.java @@ -1,3 +1,18 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + package software.amazon.awssdk.protocols.core; import static java.util.Collections.singletonList; diff --git a/core/regions/pom.xml b/core/regions/pom.xml index 286849e9237e..8b60f625fd40 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -1,6 +1,6 @@ + + + + + http-clients + software.amazon.awssdk + 2.15.62-SNAPSHOT + + 4.0.0 + + aws-crt-client + AWS Java SDK :: HTTP Clients :: AWS Common Runtime Client + jar + ${awsjavasdk.version}-PREVIEW + + + ${project.parent.version} + 1.8 + + + + + + software.amazon.awssdk + bom-internal + ${awsjavasdk.version} + pom + import + + + + + + + + software.amazon.awssdk.crt + aws-crt + ${awscrt.version} + + + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + http-client-spi + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + + + com.github.tomakehurst + wiremock + test + + + org.apache.commons + commons-lang3 + test + + + junit + junit + test + + + org.mockito + mockito-core + test + + + org.assertj + assertj-core + test + + + org.reactivestreams + reactive-streams-tck + test + + + org.slf4j + slf4j-log4j12 + test + + + log4j + log4j + test + + + software.amazon.awssdk + http-client-tests + ${awsjavasdk.version} + test + + + software.amazon.awssdk + sdk-core + ${awsjavasdk.version} + test + + + software.amazon.awssdk + regions + ${awsjavasdk.version} + test + + + software.amazon.awssdk + s3 + ${awsjavasdk.version} + test + + + software.amazon.awssdk + kms + ${awsjavasdk.version} + test + + + software.amazon.awssdk + auth + ${awsjavasdk.version} + test + + + service-test-utils + software.amazon.awssdk + ${awsjavasdk.version} + test + + + commons-codec + commons-codec + ${commons-codec.verion} + test + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + ${maven.surefire.version} + + + + junit + false + + + 1 + + + + org.apache.maven.surefire + surefire-junit47 + ${maven.surefire.version} + + + org.apache.maven.surefire + surefire-testng + ${maven.surefire.version} + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.http.crt + + + + + + + diff --git a/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientCallingPatternIntegrationTest.java b/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientCallingPatternIntegrationTest.java new file mode 100644 index 000000000000..4d489d6c0ab5 --- /dev/null +++ b/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientCallingPatternIntegrationTest.java @@ -0,0 +1,208 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static software.amazon.awssdk.testutils.service.AwsTestBase.CREDENTIALS_PROVIDER_CHAIN; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Assert; +import org.junit.experimental.theories.DataPoints; +import org.junit.experimental.theories.FromDataPoints; +import org.junit.experimental.theories.Theories; +import org.junit.experimental.theories.Theory; +import org.junit.runner.RunWith; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kms.KmsAsyncClient; +import software.amazon.awssdk.services.kms.model.GenerateRandomRequest; +import software.amazon.awssdk.services.kms.model.GenerateRandomResponse; +import software.amazon.awssdk.utils.AttributeMap; + + +/** + * Test many possible different calling patterns that users might do, and make sure everything works. + */ +@RunWith(Theories.class) +public class AwsCrtClientCallingPatternIntegrationTest { + private final static String KEY_ALIAS = "alias/aws-sdk-java-v2-integ-test"; + private final static Region REGION = Region.US_EAST_1; + private final static int DEFAULT_KEY_SIZE = 32; + + // Success rate will currently never go above ~99% due to aws-c-http not detecting connection close headers, and KMS + // closing the connection after the 100th Request on a Http Connection. + // Tracking Issue: https://github.com/awslabs/aws-c-http/issues/106 + private static double MINIMUM_SUCCESS_RATE = 0.95; + + private boolean testWithClient(KmsAsyncClient asyncKMSClient, int numberOfRequests) { + List> futures = new ArrayList<>(); + + for (int i = 0; i < numberOfRequests; i++) { + GenerateRandomRequest request = GenerateRandomRequest.builder().numberOfBytes(DEFAULT_KEY_SIZE).build(); + CompletableFuture future = asyncKMSClient.generateRandom(request); + futures.add(future); + } + + List failures = new ArrayList<>(); + int actualNumSucceeded = 0; + for (CompletableFuture f : futures) { + try { + GenerateRandomResponse resp = f.get(5, TimeUnit.MINUTES); + if (200 == resp.sdkHttpResponse().statusCode()) { + actualNumSucceeded += 1; + } + } catch (Exception e) { + failures.add(e); + } + } + + int minimumNumSucceeded = (int)(numberOfRequests * (MINIMUM_SUCCESS_RATE)); + boolean succeeded = true; + if (actualNumSucceeded < minimumNumSucceeded) { + System.err.println("Failure Metrics: numRequests=" + numberOfRequests + ", numSucceeded=" + actualNumSucceeded); + succeeded = false; + } + + if (!succeeded) { + for(Exception e: failures) { + System.err.println(e.getMessage()); + } + failures.get(0).printStackTrace(); + } + + return succeeded; + } + + private boolean testWithNewClient(int eventLoopSize, int numberOfRequests) { + + try (SdkAsyncHttpClient newAwsCrtHttpClient = AwsCrtAsyncHttpClient.builder() + .build()) { + try (KmsAsyncClient newAsyncKMSClient = KmsAsyncClient.builder() + .region(REGION) + .httpClient(newAwsCrtHttpClient) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build()) { + boolean succeeded = testWithClient(newAsyncKMSClient, numberOfRequests); + return succeeded; + } + } + } + + @DataPoints("EventLoop") + public static int[] eventLoopValues(){ + return new int[]{1, 4}; + } + + @DataPoints("ConnectionPool") + public static int[] connectionsValues(){ + /* Don't use 1 connection Pool of size 1, otherwise test takes too long */ + return new int[]{10, 100}; + } + + @DataPoints("NumRequests") + public static int[] requestValues(){ + return new int[]{1, 25, 250}; + } + + @DataPoints("ParallelClients") + public static int[] parallelClientValues(){ + return new int[]{1, 2, 8}; + } + + @DataPoints("SharedClient") + public static boolean[] sharedClientValue(){ + return new boolean[]{true, false}; + } + + @Theory + public void checkAllCombinations(@FromDataPoints("EventLoop") int eventLoopSize, + @FromDataPoints("ConnectionPool") int connectionPoolSize, + @FromDataPoints("NumRequests") int numberOfRequests, + @FromDataPoints("ParallelClients") int numberOfParallelClients, + @FromDataPoints("SharedClient") boolean useSharedClient) throws Exception { + + try { + + CrtResource.waitForNoResources(); + String testName = String.format("Testing with eventLoopSize %d, connectionPoolSize %d, numberOfRequests %d, " + + "numberOfParallelJavaClients %d, useSharedClient %b", eventLoopSize, connectionPoolSize, + numberOfRequests, numberOfParallelClients, useSharedClient); + System.out.println("\n" + testName); + + CountDownLatch latch = new CountDownLatch(numberOfParallelClients); + + AttributeMap attributes = AttributeMap.builder() + .put(SdkHttpConfigurationOption.MAX_CONNECTIONS, connectionPoolSize) + .build(); + + SdkAsyncHttpClient awsCrtHttpClient = AwsCrtAsyncHttpClient.builder() + .buildWithDefaults(attributes); + + KmsAsyncClient sharedAsyncKMSClient = KmsAsyncClient.builder() + .region(REGION) + .httpClient(awsCrtHttpClient) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + final AtomicBoolean failed = new AtomicBoolean(false); + + long start = System.currentTimeMillis(); + ExecutorService pool = Executors.newCachedThreadPool(); + for (int threads = 0; threads < numberOfParallelClients; threads++) { + pool.submit(() -> { + if (useSharedClient) { + if (!testWithClient(sharedAsyncKMSClient, numberOfRequests)) { + System.err.println("Failed: " + testName); + failed.set(true); + } + } else { + if (!testWithNewClient(eventLoopSize, numberOfRequests)) { + System.err.println("Failed: " + testName); + failed.set(true); + } + } + latch.countDown(); + }); + } + + latch.await(5, TimeUnit.MINUTES); + + sharedAsyncKMSClient.close(); + awsCrtHttpClient.close(); + Assert.assertFalse(failed.get()); + + CrtResource.waitForNoResources(); + + float numSeconds = (float) ((System.currentTimeMillis() - start) / 1000.0); + String timeElapsed = String.format("%.2f sec", numSeconds); + + System.out.println("Passed: " + testName + ", Time " + timeElapsed); + } catch (Exception e) { + System.err.println(e.getMessage()); + e.printStackTrace(); + } + } +} diff --git a/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientKmsIntegrationTest.java b/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientKmsIntegrationTest.java new file mode 100644 index 000000000000..fc7e36803a87 --- /dev/null +++ b/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientKmsIntegrationTest.java @@ -0,0 +1,142 @@ +package software.amazon.awssdk.http.crt; + +import static software.amazon.awssdk.testutils.service.AwsTestBase.CREDENTIALS_PROVIDER_CHAIN; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.crt.io.TlsCipherPreference; +import software.amazon.awssdk.crt.io.TlsContextOptions; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kms.KmsAsyncClient; +import software.amazon.awssdk.services.kms.model.CreateAliasRequest; +import software.amazon.awssdk.services.kms.model.CreateAliasResponse; +import software.amazon.awssdk.services.kms.model.CreateKeyRequest; +import software.amazon.awssdk.services.kms.model.CreateKeyResponse; +import software.amazon.awssdk.services.kms.model.DecryptRequest; +import software.amazon.awssdk.services.kms.model.DecryptResponse; +import software.amazon.awssdk.services.kms.model.DescribeKeyRequest; +import software.amazon.awssdk.services.kms.model.DescribeKeyResponse; +import software.amazon.awssdk.services.kms.model.EncryptRequest; +import software.amazon.awssdk.services.kms.model.EncryptResponse; + + +public class AwsCrtClientKmsIntegrationTest { + private static String KEY_ALIAS = "alias/aws-sdk-java-v2-integ-test"; + private static Region REGION = Region.US_EAST_1; + private static List awsCrtHttpClients = new ArrayList<>(); + private static EventLoopGroup eventLoopGroup; + private static HostResolver hostResolver; + + @Before + public void setup() { + CrtResource.waitForNoResources(); + + // Create an Http Client for each TLS Cipher Preference supported on the current platform + for (TlsCipherPreference pref: TlsCipherPreference.values()) { + if (!TlsContextOptions.isCipherPreferenceSupported(pref)) { + continue; + } + + int numThreads = 1; + eventLoopGroup = new EventLoopGroup(numThreads); + hostResolver = new HostResolver(eventLoopGroup); + + SdkAsyncHttpClient awsCrtHttpClient = AwsCrtAsyncHttpClient.builder() + .build(); + + awsCrtHttpClients.add(awsCrtHttpClient); + } + } + + + @After + public void tearDown() { + hostResolver.close(); + eventLoopGroup.close(); + CrtResource.waitForNoResources(); + } + + private boolean doesKeyExist(KmsAsyncClient kms, String keyAlias) { + try { + DescribeKeyRequest req = DescribeKeyRequest.builder().keyId(keyAlias).build(); + DescribeKeyResponse resp = kms.describeKey(req).get(); + Assert.assertEquals(200, resp.sdkHttpResponse().statusCode()); + return resp.sdkHttpResponse().isSuccessful(); + } catch (Exception e) { + return false; + } + } + + private void createKeyAlias(KmsAsyncClient kms, String keyId, String keyAlias) throws Exception { + CreateAliasRequest req = CreateAliasRequest.builder().aliasName(keyAlias).targetKeyId(keyId).build(); + CreateAliasResponse resp = kms.createAlias(req).get(); + Assert.assertEquals(200, resp.sdkHttpResponse().statusCode()); + } + + private String createKey(KmsAsyncClient kms) throws Exception { + CreateKeyRequest req = CreateKeyRequest.builder().build(); + CreateKeyResponse resp = kms.createKey(req).get(); + Assert.assertEquals(200, resp.sdkHttpResponse().statusCode()); + return resp.keyMetadata().keyId(); + } + + private void createKeyIfNotExists(KmsAsyncClient kms, String keyAlias) throws Exception { + if (!doesKeyExist(kms, keyAlias)) { + String keyId = createKey(kms); + createKeyAlias(kms, keyId, KEY_ALIAS); + } + } + + private SdkBytes encrypt(KmsAsyncClient kms, String keyId, String plaintext) throws Exception { + SdkBytes bytes = SdkBytes.fromUtf8String(plaintext); + EncryptRequest req = EncryptRequest.builder().keyId(keyId).plaintext(bytes).build(); + EncryptResponse resp = kms.encrypt(req).get(); + Assert.assertEquals(200, resp.sdkHttpResponse().statusCode()); + return resp.ciphertextBlob(); + } + + private String decrypt(KmsAsyncClient kms, SdkBytes ciphertext) throws Exception { + DecryptRequest req = DecryptRequest.builder().ciphertextBlob(ciphertext).build(); + DecryptResponse resp = kms.decrypt(req).get(); + Assert.assertEquals(200, resp.sdkHttpResponse().statusCode()); + return resp.plaintext().asUtf8String(); + } + + private void testEncryptDecryptWithKms(KmsAsyncClient kms) throws Exception { + createKeyIfNotExists(kms, KEY_ALIAS); + Assert.assertTrue(doesKeyExist(kms, KEY_ALIAS)); + Assert.assertFalse(doesKeyExist(kms, "alias/does-not-exist-" + UUID.randomUUID())); + + String secret = UUID.randomUUID().toString(); + SdkBytes cipherText = encrypt(kms, KEY_ALIAS, secret); + String plainText = decrypt(kms, cipherText); + + Assert.assertEquals(plainText, secret); + } + + @Test + public void testEncryptDecryptWithKms() throws Exception { + for (SdkAsyncHttpClient awsCrtHttpClient: awsCrtHttpClients) { + KmsAsyncClient kms = KmsAsyncClient.builder() + .region(REGION) + .httpClient(awsCrtHttpClient) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + testEncryptDecryptWithKms(kms); + + kms.close(); + awsCrtHttpClient.close(); + } + } +} diff --git a/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientS3IntegrationTest.java b/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientS3IntegrationTest.java new file mode 100644 index 000000000000..03862022e26c --- /dev/null +++ b/http-clients/aws-crt-client/src/it/java/software/amazon/awssdk/http/crt/AwsCrtClientS3IntegrationTest.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static org.apache.commons.codec.digest.DigestUtils.sha256Hex; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.ResponseBytes; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; + + +public class AwsCrtClientS3IntegrationTest { + /** + * The name of the bucket created, used, and deleted by these tests. + */ + private static String BUCKET_NAME = "aws-crt-test-stuff"; + + private static String LARGE_FILE = "http_test_doc.txt"; + private static String SMALL_FILE = "random_32_byte.data"; + private static String LARGE_FILE_SHA256 = "C7FDB5314B9742467B16BD5EA2F8012190B5E2C44A005F7984F89AAB58219534"; + private static int NUM_REQUESTS = 1000; + + private static Region REGION = Region.US_EAST_1; + + private static SdkAsyncHttpClient crtClient; + + private static S3AsyncClient s3; + + @BeforeClass + public static void setup() { + CrtResource.waitForNoResources(); + + crtClient = AwsCrtAsyncHttpClient.create(); + + s3 = S3AsyncClient.builder() + .region(REGION) + .httpClient(crtClient) + .credentialsProvider(AnonymousCredentialsProvider.create()) // File is publicly readable + .build(); + } + + @AfterClass + public static void tearDown() { + s3.close(); + crtClient.close(); + CrtResource.waitForNoResources(); + } + + @Test + public void testDownloadFromS3() throws Exception { + GetObjectRequest s3Request = GetObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(LARGE_FILE) + .build(); + + byte[] responseBody = s3.getObject(s3Request, AsyncResponseTransformer.toBytes()).get(120, TimeUnit.SECONDS).asByteArray(); + + assertThat(sha256Hex(responseBody).toUpperCase()).isEqualTo(LARGE_FILE_SHA256); + } + + @Test + public void testParallelDownloadFromS3() throws Exception { + List> > requestFutures = new ArrayList<>(); + + for (int i = 0; i < NUM_REQUESTS; i++) { + GetObjectRequest s3Request = GetObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(SMALL_FILE) + .build(); + CompletableFuture> requestFuture = s3.getObject(s3Request, AsyncResponseTransformer.toBytes()); + requestFutures.add(requestFuture); + } + + for(CompletableFuture> f: requestFutures) { + f.join(); + Assert.assertEquals(32, f.get().asByteArray().length); + } + } + +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtAsyncHttpClient.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtAsyncHttpClient.java new file mode 100644 index 000000000000..ca20f32895e3 --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtAsyncHttpClient.java @@ -0,0 +1,423 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static software.amazon.awssdk.utils.Validate.paramNotNull; + +import java.net.URI; +import java.time.Duration; +import java.util.LinkedList; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.http.HttpClientConnectionManager; +import software.amazon.awssdk.crt.http.HttpClientConnectionManagerOptions; +import software.amazon.awssdk.crt.http.HttpMonitoringOptions; +import software.amazon.awssdk.crt.http.HttpProxyOptions; +import software.amazon.awssdk.crt.io.ClientBootstrap; +import software.amazon.awssdk.crt.io.SocketOptions; +import software.amazon.awssdk.crt.io.TlsCipherPreference; +import software.amazon.awssdk.crt.io.TlsContext; +import software.amazon.awssdk.crt.io.TlsContextOptions; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.internal.CrtRequestContext; +import software.amazon.awssdk.http.crt.internal.CrtRequestExecutor; +import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.IoUtils; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * An implementation of {@link SdkAsyncHttpClient} that uses the AWS Common Runtime (CRT) Http Client to communicate with + * Http Web Services. This client is asynchronous and uses non-blocking IO. + * + *

    This can be created via {@link #builder()}

    + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPublicApi +@SdkPreviewApi +public final class AwsCrtAsyncHttpClient implements SdkAsyncHttpClient { + private static final Logger log = Logger.loggerFor(AwsCrtAsyncHttpClient.class); + + private static final String AWS_COMMON_RUNTIME = "AwsCommonRuntime"; + private static final int DEFAULT_STREAM_WINDOW_SIZE = 16 * 1024 * 1024; // 16 MB + + private final Map connectionPools = new ConcurrentHashMap<>(); + private final LinkedList ownedSubResources = new LinkedList<>(); + private final ClientBootstrap bootstrap; + private final SocketOptions socketOptions; + private final TlsContext tlsContext; + private final HttpProxyOptions proxyOptions; + private final HttpMonitoringOptions monitoringOptions; + private final long maxConnectionIdleInMilliseconds; + private final int readBufferSize; + private final int maxConnectionsPerEndpoint; + private boolean isClosed = false; + + private AwsCrtAsyncHttpClient(DefaultBuilder builder, AttributeMap config) { + int maxConns = config.get(SdkHttpConfigurationOption.MAX_CONNECTIONS); + + Validate.isPositive(maxConns, "maxConns"); + Validate.notNull(builder.cipherPreference, "cipherPreference"); + Validate.isPositive(builder.readBufferSize, "readBufferSize"); + + try (ClientBootstrap clientBootstrap = new ClientBootstrap(null, null); + SocketOptions clientSocketOptions = new SocketOptions(); + TlsContextOptions clientTlsContextOptions = TlsContextOptions.createDefaultClient() // NOSONAR + .withCipherPreference(builder.cipherPreference) + .withVerifyPeer(!config.get(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES)); + TlsContext clientTlsContext = new TlsContext(clientTlsContextOptions)) { + + this.bootstrap = registerOwnedResource(clientBootstrap); + this.socketOptions = registerOwnedResource(clientSocketOptions); + this.tlsContext = registerOwnedResource(clientTlsContext); + this.readBufferSize = builder.readBufferSize; + this.maxConnectionsPerEndpoint = maxConns; + this.monitoringOptions = revolveHttpMonitoringOptions(builder.connectionHealthChecksConfiguration); + this.maxConnectionIdleInMilliseconds = config.get(SdkHttpConfigurationOption.CONNECTION_MAX_IDLE_TIMEOUT).toMillis(); + this.proxyOptions = buildProxyOptions(builder.proxyConfiguration); + } + } + + private HttpMonitoringOptions revolveHttpMonitoringOptions(ConnectionHealthChecksConfiguration config) { + if (config == null) { + return null; + } + + HttpMonitoringOptions httpMonitoringOptions = new HttpMonitoringOptions(); + httpMonitoringOptions.setMinThroughputBytesPerSecond(config.minThroughputInBytesPerSecond()); + int seconds = (int) config.allowableThroughputFailureInterval().getSeconds(); + httpMonitoringOptions.setAllowableThroughputFailureIntervalSeconds(seconds); + return httpMonitoringOptions; + } + + private HttpProxyOptions buildProxyOptions(ProxyConfiguration proxyConfiguration) { + if (proxyConfiguration == null) { + return null; + } + + HttpProxyOptions clientProxyOptions = new HttpProxyOptions(); + + clientProxyOptions.setHost(proxyConfiguration.host()); + clientProxyOptions.setPort(proxyConfiguration.port()); + + if ("https".equalsIgnoreCase(proxyConfiguration.scheme())) { + clientProxyOptions.setTlsContext(tlsContext); + } + + if (proxyConfiguration.username() != null && proxyConfiguration.password() != null) { + clientProxyOptions.setAuthorizationUsername(proxyConfiguration.username()); + clientProxyOptions.setAuthorizationPassword(proxyConfiguration.password()); + clientProxyOptions.setAuthorizationType(HttpProxyOptions.HttpProxyAuthorizationType.Basic); + } else { + clientProxyOptions.setAuthorizationType(HttpProxyOptions.HttpProxyAuthorizationType.None); + } + + return clientProxyOptions; + } + + /** + * Marks a Native CrtResource as owned by the current Java Object. + * + * @param subresource The Resource to own. + * @param The CrtResource Type + * @return The CrtResource passed in + */ + private T registerOwnedResource(T subresource) { + if (subresource != null) { + subresource.addRef(); + ownedSubResources.push(subresource); + } + return subresource; + } + + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Create a {@link AwsCrtAsyncHttpClient} client with the default configuration + * + * @return an {@link SdkAsyncHttpClient} + */ + public static SdkAsyncHttpClient create() { + return new DefaultBuilder().build(); + } + + @Override + public String clientName() { + return AWS_COMMON_RUNTIME; + } + + private HttpClientConnectionManager createConnectionPool(URI uri) { + log.debug(() -> "Creating ConnectionPool for: URI:" + uri + ", MaxConns: " + maxConnectionsPerEndpoint); + + HttpClientConnectionManagerOptions options = new HttpClientConnectionManagerOptions() + .withClientBootstrap(bootstrap) + .withSocketOptions(socketOptions) + .withTlsContext(tlsContext) + .withUri(uri) + .withWindowSize(readBufferSize) + .withMaxConnections(maxConnectionsPerEndpoint) + .withManualWindowManagement(true) + .withProxyOptions(proxyOptions) + .withMonitoringOptions(monitoringOptions) + .withMaxConnectionIdleInMilliseconds(maxConnectionIdleInMilliseconds); + + return HttpClientConnectionManager.create(options); + } + + /* + * Callers of this function MUST account for the addRef() on the pool before returning. + * Every execution path consuming the return value must guarantee an associated close(). + * Currently this function is only used by execute(), which guarantees a matching close + * via the try-with-resources block. + * + * This guarantees that a returned pool will not get closed (by closing the http client) during + * the time it takes to submit a request to the pool. Acquisition requests submitted to the pool will + * be properly failed if the http client is closed before the acquisition completes. + * + * This additional complexity means we only have to keep a lock for the scope of this function, as opposed to + * the scope of calling execute(). This function will almost always just be a hash lookup and the return of an + * existing pool. If we add all of execute() to the scope, we include, at minimum a JNI call to the native + * pool implementation. + */ + private HttpClientConnectionManager getOrCreateConnectionPool(URI uri) { + synchronized (this) { + if (isClosed) { + throw new IllegalStateException("Client is closed. No more requests can be made with this client."); + } + + HttpClientConnectionManager connPool = connectionPools.computeIfAbsent(uri, this::createConnectionPool); + connPool.addRef(); + return connPool; + } + } + + @Override + public CompletableFuture execute(AsyncExecuteRequest asyncRequest) { + + paramNotNull(asyncRequest, "asyncRequest"); + paramNotNull(asyncRequest.request(), "SdkHttpRequest"); + paramNotNull(asyncRequest.requestContentPublisher(), "RequestContentPublisher"); + paramNotNull(asyncRequest.responseHandler(), "ResponseHandler"); + + /* + * See the note on getOrCreateConnectionPool() + * + * In particular, this returns a ref-counted object and calling getOrCreateConnectionPool + * increments the ref count by one. We add a try-with-resources to release our ref + * once we have successfully submitted a request. In this way, we avoid a race condition + * when close/shutdown is called from another thread while this function is executing (ie. + * we have a pool and no one can destroy it underneath us until we've finished submitting the + * request) + */ + try (HttpClientConnectionManager crtConnPool = getOrCreateConnectionPool(asyncRequest.request().getUri())) { + CrtRequestContext context = CrtRequestContext.builder() + .crtConnPool(crtConnPool) + .readBufferSize(readBufferSize) + .request(asyncRequest) + .build(); + + return new CrtRequestExecutor().execute(context); + } + } + + @Override + public void close() { + synchronized (this) { + + if (isClosed) { + return; + } + + connectionPools.values().forEach(pool -> IoUtils.closeQuietly(pool, log.logger())); + ownedSubResources.forEach(r -> IoUtils.closeQuietly(r, log.logger())); + ownedSubResources.clear(); + + isClosed = true; + } + } + + /** + * Builder that allows configuration of the AWS CRT HTTP implementation. + */ + public interface Builder extends SdkAsyncHttpClient.Builder { + + /** + * The Maximum number of allowed concurrent requests. For HTTP/1.1 this is the same as max connections. + * @param maxConcurrency maximum concurrency per endpoint + * @return The builder of the method chaining. + */ + Builder maxConcurrency(int maxConcurrency); + + /** + * The AWS CRT TlsCipherPreference to use for this Client + * @param tlsCipherPreference The AWS Common Runtime TlsCipherPreference + * @return The builder of the method chaining. + */ + Builder tlsCipherPreference(TlsCipherPreference tlsCipherPreference); + + /** + * Configures the number of unread bytes that can be buffered in the + * client before we stop reading from the underlying TCP socket and wait for the Subscriber + * to read more data. + * + * @param readBufferSize The number of bytes that can be buffered + * @return The builder of the method chaining. + */ + Builder readBufferSize(int readBufferSize); + + /** + * Sets the http proxy configuration to use for this client. + * @param proxyConfiguration The http proxy configuration to use + * @return The builder of the method chaining. + */ + Builder proxyConfiguration(ProxyConfiguration proxyConfiguration); + + /** + * Sets the http proxy configuration to use for this client. + * + * @param proxyConfigurationBuilderConsumer The consumer of the proxy configuration builder object. + * @return the builder for method chaining. + */ + Builder proxyConfiguration(Consumer proxyConfigurationBuilderConsumer); + + /** + * Configure the health checks for for all connections established by this client. + * + *

    + * eg: you can set a throughput threshold for the a connection to be considered healthy. + * If the connection falls below this threshold for a configurable amount of time, + * then the connection is considered unhealthy and will be shut down. + * + * @param healthChecksConfiguration The health checks config to use + * @return The builder of the method chaining. + */ + Builder connectionHealthChecksConfiguration(ConnectionHealthChecksConfiguration healthChecksConfiguration); + + /** + * A convenience method to configure the health checks for for all connections established by this client. + * + *

    + * eg: you can set a throughput threshold for the a connection to be considered healthy. + * If the connection falls below this threshold for a configurable amount of time, + * then the connection is considered unhealthy and will be shut down. + * + * @param healthChecksConfigurationBuilder The health checks config builder to use + * @return The builder of the method chaining. + * @see #connectionHealthChecksConfiguration(ConnectionHealthChecksConfiguration) + */ + Builder connectionHealthChecksConfiguration(Consumer + healthChecksConfigurationBuilder); + + /** + * Configure the maximum amount of time that a connection should be allowed to remain open while idle. + */ + Builder connectionMaxIdleTime(Duration connectionMaxIdleTime); + } + + /** + * Factory that allows more advanced configuration of the AWS CRT HTTP implementation. Use {@link #builder()} to + * configure and construct an immutable instance of the factory. + */ + private static final class DefaultBuilder implements Builder { + private final AttributeMap.Builder standardOptions = AttributeMap.builder(); + private TlsCipherPreference cipherPreference = TlsCipherPreference.TLS_CIPHER_SYSTEM_DEFAULT; + private int readBufferSize = DEFAULT_STREAM_WINDOW_SIZE; + private ProxyConfiguration proxyConfiguration; + private ConnectionHealthChecksConfiguration connectionHealthChecksConfiguration; + + private DefaultBuilder() { + } + + @Override + public SdkAsyncHttpClient build() { + return new AwsCrtAsyncHttpClient(this, standardOptions.build() + .merge(SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS)); + } + + @Override + public SdkAsyncHttpClient buildWithDefaults(AttributeMap serviceDefaults) { + return new AwsCrtAsyncHttpClient(this, standardOptions.build() + .merge(serviceDefaults) + .merge(SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS)); + } + + @Override + public Builder maxConcurrency(int maxConcurrency) { + Validate.isPositive(maxConcurrency, "maxConcurrency"); + standardOptions.put(SdkHttpConfigurationOption.MAX_CONNECTIONS, maxConcurrency); + return this; + } + + @Override + public Builder tlsCipherPreference(TlsCipherPreference tlsCipherPreference) { + Validate.notNull(tlsCipherPreference, "cipherPreference"); + Validate.isTrue(TlsContextOptions.isCipherPreferenceSupported(tlsCipherPreference), + "TlsCipherPreference not supported on current Platform"); + this.cipherPreference = tlsCipherPreference; + return this; + } + + @Override + public Builder readBufferSize(int readBufferSize) { + Validate.isPositive(readBufferSize, "readBufferSize"); + this.readBufferSize = readBufferSize; + return this; + } + + @Override + public Builder proxyConfiguration(ProxyConfiguration proxyConfiguration) { + this.proxyConfiguration = proxyConfiguration; + return this; + } + + @Override + public Builder connectionHealthChecksConfiguration(ConnectionHealthChecksConfiguration monitoringOptions) { + this.connectionHealthChecksConfiguration = monitoringOptions; + return this; + } + + @Override + public Builder connectionHealthChecksConfiguration(Consumer + configurationBuilder) { + ConnectionHealthChecksConfiguration.Builder builder = ConnectionHealthChecksConfiguration.builder(); + configurationBuilder.accept(builder); + return connectionHealthChecksConfiguration(builder.build()); + } + + @Override + public Builder connectionMaxIdleTime(Duration connectionMaxIdleTime) { + standardOptions.put(SdkHttpConfigurationOption.CONNECTION_MAX_IDLE_TIMEOUT, connectionMaxIdleTime); + return this; + } + + @Override + public Builder proxyConfiguration(Consumer proxyConfigurationBuilderConsumer) { + ProxyConfiguration.Builder builder = ProxyConfiguration.builder(); + proxyConfigurationBuilderConsumer.accept(builder); + return proxyConfiguration(builder.build()); + } + } +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtSdkHttpService.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtSdkHttpService.java new file mode 100644 index 000000000000..cf0a609497b4 --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtSdkHttpService.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpService; + +/** + * Service binding for the AWS common runtime HTTP client implementation. Allows SDK to pick this up automatically from the + * classpath. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPublicApi +@SdkPreviewApi +public class AwsCrtSdkHttpService implements SdkAsyncHttpService { + @Override + public SdkAsyncHttpClient.Builder createAsyncHttpClientFactory() { + return AwsCrtAsyncHttpClient.builder(); + } +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ConnectionHealthChecksConfiguration.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ConnectionHealthChecksConfiguration.java new file mode 100644 index 000000000000..f8b14366cdfa --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ConnectionHealthChecksConfiguration.java @@ -0,0 +1,118 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.Validate; + +/** + * Configuration that defines health checks for for all connections established by + * the{@link ConnectionHealthChecksConfiguration}. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPublicApi +@SdkPreviewApi +public final class ConnectionHealthChecksConfiguration { + private final long minThroughputInBytesPerSecond; + private final Duration allowableThroughputFailureInterval; + + private ConnectionHealthChecksConfiguration(DefaultConnectionHealthChecksConfigurationBuilder builder) { + this.minThroughputInBytesPerSecond = Validate.paramNotNull(builder.minThroughputInBytesPerSecond, + "minThroughputInBytesPerSecond"); + this.allowableThroughputFailureInterval = Validate.isPositive(builder.allowableThroughputFailureIntervalSeconds, + "allowableThroughputFailureIntervalSeconds"); + } + + /** + * @return the minimum amount of throughput, in bytes per second, for a connection to be considered healthy. + */ + public long minThroughputInBytesPerSecond() { + return minThroughputInBytesPerSecond; + } + + /** + * @return How long a connection is allowed to be unhealthy before getting shut down. + */ + public Duration allowableThroughputFailureInterval() { + return allowableThroughputFailureInterval; + } + + public static Builder builder() { + return new DefaultConnectionHealthChecksConfigurationBuilder(); + } + + /** + * A builder for {@link ConnectionHealthChecksConfiguration}. + * + *

    All implementations of this interface are mutable and not thread safe.

    + */ + public interface Builder { + + /** + * Sets a throughput threshold for connections. Throughput below this value will be considered unhealthy. + * + * @param minThroughputInBytesPerSecond minimum amount of throughput, in bytes per second, for a connection to be + * considered healthy. + * @return Builder + */ + Builder minThroughputInBytesPerSecond(Long minThroughputInBytesPerSecond); + + /** + * Sets how long a connection is allowed to be unhealthy before getting shut down. + * + *

    + * It only supports seconds precision + * + * @param allowableThroughputFailureIntervalSeconds How long a connection is allowed to be unhealthy + * before getting shut down. + * @return Builder + */ + Builder allowableThroughputFailureInterval(Duration allowableThroughputFailureIntervalSeconds); + + ConnectionHealthChecksConfiguration build(); + } + + /** + * An SDK-internal implementation of {@link Builder}. + */ + private static final class DefaultConnectionHealthChecksConfigurationBuilder implements Builder { + private Long minThroughputInBytesPerSecond; + private Duration allowableThroughputFailureIntervalSeconds; + + private DefaultConnectionHealthChecksConfigurationBuilder() { + } + + @Override + public Builder minThroughputInBytesPerSecond(Long minThroughputInBytesPerSecond) { + this.minThroughputInBytesPerSecond = minThroughputInBytesPerSecond; + return this; + } + + @Override + public Builder allowableThroughputFailureInterval(Duration allowableThroughputFailureIntervalSeconds) { + this.allowableThroughputFailureIntervalSeconds = allowableThroughputFailureIntervalSeconds; + return this; + } + + @Override + public ConnectionHealthChecksConfiguration build() { + return new ConnectionHealthChecksConfiguration(this); + } + } +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ProxyConfiguration.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ProxyConfiguration.java new file mode 100644 index 000000000000..ee5f4f836f01 --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ProxyConfiguration.java @@ -0,0 +1,240 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + + +/** + * Proxy configuration for {@link AwsCrtAsyncHttpClient}. This class is used to configure an HTTP proxy to be used by + * the {@link AwsCrtAsyncHttpClient}. + * + * @see AwsCrtAsyncHttpClient.Builder#proxyConfiguration(ProxyConfiguration) + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPublicApi +@SdkPreviewApi +public final class ProxyConfiguration implements ToCopyableBuilder { + private final String scheme; + private final String host; + private final int port; + + private final String username; + private final String password; + + private ProxyConfiguration(BuilderImpl builder) { + this.scheme = builder.scheme; + this.host = builder.host; + this.port = builder.port; + this.username = builder.username; + this.password = builder.password; + } + + /** + * @return The proxy scheme. + */ + public String scheme() { + return scheme; + } + + /** + * @return The proxy host. + */ + public String host() { + return host; + } + + /** + * @return The proxy port. + */ + public int port() { + return port; + } + + /** + * @return Basic authentication username + */ + public String username() { + return username; + } + + /** + * @return Basic authentication password + */ + public String password() { + return password; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + ProxyConfiguration that = (ProxyConfiguration) o; + + if (port != that.port) { + return false; + } + + if (!Objects.equals(this.scheme, that.scheme)) { + return false; + } + + if (!Objects.equals(this.host, that.host)) { + return false; + } + + if (!Objects.equals(this.username, that.username)) { + return false; + } + + return Objects.equals(this.password, that.password); + } + + @Override + public int hashCode() { + int result = scheme != null ? scheme.hashCode() : 0; + result = 31 * result + (host != null ? host.hashCode() : 0); + result = 31 * result + port; + result = 31 * result + (username != null ? username.hashCode() : 0); + result = 31 * result + (password != null ? password.hashCode() : 0); + + return result; + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + /** + * Builder for {@link ProxyConfiguration}. + */ + public interface Builder extends CopyableBuilder { + + /** + * Set the hostname of the proxy. + * @param host The proxy host. + * @return This object for method chaining. + */ + Builder host(String host); + + /** + * Set the port that the proxy expects connections on. + * @param port The proxy port. + * @return This object for method chaining. + */ + Builder port(int port); + + /** + * The HTTP scheme to use for connecting to the proxy. Valid values are {@code http} and {@code https}. + *

    + * The client defaults to {@code http} if none is given. + * + * @param scheme The proxy scheme. + * @return This object for method chaining. + */ + Builder scheme(String scheme); + + /** + * The username to use for basic proxy authentication + *

    + * If not set, the client will not use basic authentication + * + * @param username The basic authentication username. + * @return This object for method chaining. + */ + Builder username(String username); + + /** + * The password to use for basic proxy authentication + *

    + * If not set, the client will not use basic authentication + * + * @param password The basic authentication password. + * @return This object for method chaining. + */ + Builder password(String password); + } + + private static final class BuilderImpl implements Builder { + private String scheme; + private String host; + private int port; + private String username; + private String password; + + private BuilderImpl() { + } + + private BuilderImpl(ProxyConfiguration proxyConfiguration) { + this.scheme = proxyConfiguration.scheme; + this.host = proxyConfiguration.host; + this.port = proxyConfiguration.port; + this.username = proxyConfiguration.username; + this.password = proxyConfiguration.password; + } + + @Override + public Builder scheme(String scheme) { + this.scheme = scheme; + return this; + } + + @Override + public Builder host(String host) { + this.host = host; + return this; + } + + @Override + public Builder port(int port) { + this.port = port; + return this; + } + + @Override + public Builder username(String username) { + this.username = username; + return this; + } + + @Override + public Builder password(String password) { + this.password = password; + return this; + } + + @Override + public ProxyConfiguration build() { + return new ProxyConfiguration(this); + } + } +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtAsyncHttpStreamAdapter.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtAsyncHttpStreamAdapter.java new file mode 100644 index 000000000000..86f51846a3c7 --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtAsyncHttpStreamAdapter.java @@ -0,0 +1,137 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt.internal; + +import java.nio.ByteBuffer; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.crt.CRT; +import software.amazon.awssdk.crt.http.HttpClientConnection; +import software.amazon.awssdk.crt.http.HttpException; +import software.amazon.awssdk.crt.http.HttpHeader; +import software.amazon.awssdk.crt.http.HttpHeaderBlock; +import software.amazon.awssdk.crt.http.HttpRequestBodyStream; +import software.amazon.awssdk.crt.http.HttpStream; +import software.amazon.awssdk.crt.http.HttpStreamResponseHandler; +import software.amazon.awssdk.http.HttpStatusFamily; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * Implements the CrtHttpStreamHandler API and converts CRT callbacks into calls to SDK AsyncExecuteRequest methods + */ +@SdkInternalApi +public final class AwsCrtAsyncHttpStreamAdapter implements HttpStreamResponseHandler, HttpRequestBodyStream { + private static final Logger log = Logger.loggerFor(AwsCrtAsyncHttpStreamAdapter.class); + + private final HttpClientConnection connection; + private final CompletableFuture responseComplete; + private final AsyncExecuteRequest sdkRequest; + private final SdkHttpResponse.Builder respBuilder = SdkHttpResponse.builder(); + private final int windowSize; + private final AwsCrtRequestBodySubscriber requestBodySubscriber; + private AwsCrtResponseBodyPublisher respBodyPublisher = null; + + public AwsCrtAsyncHttpStreamAdapter(HttpClientConnection connection, CompletableFuture responseComplete, + AsyncExecuteRequest sdkRequest, int windowSize) { + this.connection = Validate.notNull(connection, "HttpConnection is null"); + this.responseComplete = Validate.notNull(responseComplete, "reqComplete Future is null"); + this.sdkRequest = Validate.notNull(sdkRequest, "AsyncExecuteRequest Future is null"); + this.windowSize = Validate.isPositive(windowSize, "windowSize is <= 0"); + this.requestBodySubscriber = new AwsCrtRequestBodySubscriber(windowSize); + + sdkRequest.requestContentPublisher().subscribe(requestBodySubscriber); + } + + private void initRespBodyPublisherIfNeeded(HttpStream stream) { + if (respBodyPublisher == null) { + respBodyPublisher = new AwsCrtResponseBodyPublisher(connection, stream, responseComplete, windowSize); + } + } + + @Override + public void onResponseHeaders(HttpStream stream, int responseStatusCode, int blockType, HttpHeader[] nextHeaders) { + initRespBodyPublisherIfNeeded(stream); + + for (HttpHeader h : nextHeaders) { + respBuilder.appendHeader(h.getName(), h.getValue()); + } + } + + @Override + public void onResponseHeadersDone(HttpStream stream, int headerType) { + if (headerType == HttpHeaderBlock.MAIN.getValue()) { + initRespBodyPublisherIfNeeded(stream); + + respBuilder.statusCode(stream.getResponseStatusCode()); + sdkRequest.responseHandler().onHeaders(respBuilder.build()); + sdkRequest.responseHandler().onStream(respBodyPublisher); + } + } + + @Override + public int onResponseBody(HttpStream stream, byte[] bodyBytesIn) { + initRespBodyPublisherIfNeeded(stream); + + respBodyPublisher.queueBuffer(bodyBytesIn); + respBodyPublisher.publishToSubscribers(); + + /* + * Intentionally zero. We manually manage the crt stream's window within the body publisher by updating with + * the exact amount we were able to push to the subcriber. + * + * See the call to stream.incrementWindow() in AwsCrtResponseBodyPublisher. + */ + return 0; + } + + @Override + public void onResponseComplete(HttpStream stream, int errorCode) { + initRespBodyPublisherIfNeeded(stream); + + if (HttpStatusFamily.of(respBuilder.statusCode()) == HttpStatusFamily.SERVER_ERROR) { + connection.shutdown(); + } + + if (errorCode == CRT.AWS_CRT_SUCCESS) { + log.debug(() -> "Response Completed Successfully"); + respBodyPublisher.setQueueComplete(); + respBodyPublisher.publishToSubscribers(); + } else { + HttpException error = new HttpException(errorCode); + log.error(() -> "Response Encountered an Error.", error); + + // Invoke Error Callback on SdkAsyncHttpResponseHandler + try { + sdkRequest.responseHandler().onError(error); + } catch (Exception e) { + log.error(() -> String.format("SdkAsyncHttpResponseHandler %s threw an exception in onError: %s", + sdkRequest.responseHandler(), e)); + } + + // Invoke Error Callback on any Subscriber's of the Response Body + respBodyPublisher.setError(error); + respBodyPublisher.publishToSubscribers(); + } + } + + @Override + public boolean sendRequestBody(ByteBuffer bodyBytesOut) { + return requestBodySubscriber.transferRequestBody(bodyBytesOut); + } +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtRequestBodySubscriber.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtRequestBodySubscriber.java new file mode 100644 index 000000000000..877cb474dc3c --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtRequestBodySubscriber.java @@ -0,0 +1,132 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt.internal; + +import static software.amazon.awssdk.crt.utils.ByteBufferUtils.transferData; + +import java.nio.ByteBuffer; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * Implements the Subscriber API to be be callable from AwsCrtAsyncHttpStreamAdapter.sendRequestBody() + */ +@SdkInternalApi +public final class AwsCrtRequestBodySubscriber implements Subscriber { + private static final Logger log = Logger.loggerFor(AwsCrtRequestBodySubscriber.class); + + private final int windowSize; + private final Queue queuedBuffers = new ConcurrentLinkedQueue<>(); + private final AtomicLong queuedByteCount = new AtomicLong(0); + private final AtomicBoolean isComplete = new AtomicBoolean(false); + private final AtomicReference error = new AtomicReference<>(null); + + private AtomicReference subscriptionRef = new AtomicReference<>(null); + + /** + * + * @param windowSize The number bytes to be queued before we stop proactively queuing data + */ + public AwsCrtRequestBodySubscriber(int windowSize) { + Validate.isPositive(windowSize, "windowSize is <= 0"); + this.windowSize = windowSize; + } + + protected void requestDataIfNecessary() { + Subscription subscription = subscriptionRef.get(); + if (subscription == null) { + log.error(() -> "Subscription is null"); + return; + } + if (queuedByteCount.get() < windowSize) { + subscription.request(1); + } + } + + @Override + public void onSubscribe(Subscription s) { + Validate.paramNotNull(s, "s"); + + boolean wasFirstSubscription = subscriptionRef.compareAndSet(null, s); + + if (!wasFirstSubscription) { + log.error(() -> "Only one Subscription supported!"); + s.cancel(); + return; + } + + requestDataIfNecessary(); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + Validate.paramNotNull(byteBuffer, "byteBuffer"); + queuedBuffers.add(byteBuffer); + queuedByteCount.addAndGet(byteBuffer.remaining()); + requestDataIfNecessary(); + } + + @Override + public void onError(Throwable t) { + log.error(() -> "onError() received an error: " + t.getMessage()); + error.compareAndSet(null, t); + } + + @Override + public void onComplete() { + log.debug(() -> "AwsCrtRequestBodySubscriber Completed"); + isComplete.set(true); + } + + /** + * Transfers any queued data from the Request Body subscriptionRef to the output buffer + * @param out The output ByteBuffer + * @return true if Request Body is completely transferred, false otherwise + */ + public synchronized boolean transferRequestBody(ByteBuffer out) { + if (error.get() != null) { + throw new RuntimeException(error.get()); + } + + while (out.remaining() > 0 && !queuedBuffers.isEmpty()) { + ByteBuffer nextBuffer = queuedBuffers.peek(); + int amtTransferred = transferData(nextBuffer, out); + queuedByteCount.addAndGet(-amtTransferred); + + if (nextBuffer.remaining() == 0) { + queuedBuffers.remove(); + } + } + + boolean endOfStream = isComplete.get() && queuedBuffers.isEmpty(); + + if (!endOfStream) { + requestDataIfNecessary(); + } else { + log.debug(() -> "End Of RequestBody reached"); + } + + return endOfStream; + } +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtResponseBodyPublisher.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtResponseBodyPublisher.java new file mode 100644 index 000000000000..a72ac8c7fb14 --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtResponseBodyPublisher.java @@ -0,0 +1,333 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt.internal; + +import java.nio.ByteBuffer; +import java.util.Optional; +import java.util.Queue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongUnaryOperator; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.crt.http.HttpClientConnection; +import software.amazon.awssdk.crt.http.HttpStream; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * Adapts an AWS Common Runtime Response Body stream from CrtHttpStreamHandler to a Publisher + */ +@SdkInternalApi +public final class AwsCrtResponseBodyPublisher implements Publisher { + private static final Logger log = Logger.loggerFor(AwsCrtResponseBodyPublisher.class); + private static final LongUnaryOperator DECREMENT_IF_GREATER_THAN_ZERO = x -> ((x > 0) ? (x - 1) : (x)); + + private final HttpClientConnection connection; + private final HttpStream stream; + private final CompletableFuture responseComplete; + private final AtomicLong outstandingRequests = new AtomicLong(0); + private final int windowSize; + private final AtomicBoolean isCancelled = new AtomicBoolean(false); + private final AtomicBoolean areNativeResourcesReleased = new AtomicBoolean(false); + private final AtomicBoolean isSubscriptionComplete = new AtomicBoolean(false); + private final AtomicBoolean queueComplete = new AtomicBoolean(false); + private final AtomicInteger mutualRecursionDepth = new AtomicInteger(0); + private final AtomicInteger queuedBytes = new AtomicInteger(0); + private final AtomicReference> subscriberRef = new AtomicReference<>(null); + private final Queue queuedBuffers = new ConcurrentLinkedQueue<>(); + private final AtomicReference error = new AtomicReference<>(null); + + /** + * Adapts a streaming AWS CRT Http Response Body to a Publisher + * @param stream The AWS CRT Http Stream for this Response + * @param windowSize The max allowed bytes to be queued. The sum of the sizes of all queued ByteBuffers should + * never exceed this value. + */ + public AwsCrtResponseBodyPublisher(HttpClientConnection connection, HttpStream stream, + CompletableFuture responseComplete, int windowSize) { + this.connection = Validate.notNull(connection, "HttpConnection must not be null"); + this.stream = Validate.notNull(stream, "Stream must not be null"); + this.responseComplete = Validate.notNull(responseComplete, "ResponseComplete future must not be null"); + this.windowSize = Validate.isPositive(windowSize, "windowSize must be > 0"); + } + + /** + * Method for the users consuming the Http Response Body to register a subscriber. + * @param subscriber The Subscriber to register. + */ + @Override + public void subscribe(Subscriber subscriber) { + Validate.notNull(subscriber, "Subscriber must not be null"); + + boolean wasFirstSubscriber = subscriberRef.compareAndSet(null, subscriber); + + if (!wasFirstSubscriber) { + log.error(() -> "Only one subscriber allowed"); + + // onSubscribe must be called first before onError gets called, so give it a do-nothing Subscription + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + // This is a dummy implementation to allow the onError call + } + + @Override + public void cancel() { + // This is a dummy implementation to allow the onError call + } + }); + subscriber.onError(new IllegalStateException("Only one subscriber allowed")); + } else { + subscriber.onSubscribe(new AwsCrtResponseBodySubscription(this)); + } + } + + /** + * Adds a Buffer to the Queue to be published to any Subscribers + * @param buffer The Buffer to be queued. + */ + public void queueBuffer(byte[] buffer) { + Validate.notNull(buffer, "ByteBuffer must not be null"); + + if (isCancelled.get()) { + // Immediately open HttpStream's IO window so it doesn't see any IO Back-pressure. + // AFAIK there's no way to abort an in-progress HttpStream, only free it's memory by calling close() + stream.incrementWindow(buffer.length); + return; + } + + queuedBuffers.add(buffer); + int totalBytesQueued = queuedBytes.addAndGet(buffer.length); + + if (totalBytesQueued > windowSize) { + throw new IllegalStateException("Queued more than Window Size: queued=" + totalBytesQueued + + ", window=" + windowSize); + } + } + + /** + * Function called by Response Body Subscribers to request more Response Body buffers. + * @param n The number of buffers requested. + */ + protected void request(long n) { + Validate.inclusiveBetween(1, Long.MAX_VALUE, n, "request"); + + // Check for overflow of outstanding Requests, and clamp to LONG_MAX. + long outstandingReqs; + if (n > (Long.MAX_VALUE - outstandingRequests.get())) { + outstandingRequests.set(Long.MAX_VALUE); + outstandingReqs = Long.MAX_VALUE; + } else { + outstandingReqs = outstandingRequests.addAndGet(n); + } + + /* + * Since we buffer, in the case where the subscriber came in after the publication has already begun, + * go ahead and flush what we have. + */ + publishToSubscribers(); + + log.trace(() -> "Subscriber Requested more Buffers. Outstanding Requests: " + outstandingReqs); + } + + public void setError(Throwable t) { + log.error(() -> "Error processing Response Body", t); + error.compareAndSet(null, t); + } + + protected void setCancelled() { + isCancelled.set(true); + /** + * subscriberRef must set to null due to ReactiveStream Spec stating references to Subscribers must be deleted + * when onCancel() is called. + */ + subscriberRef.set(null); + } + + private synchronized void releaseNativeResources() { + boolean alreadyReleased = areNativeResourcesReleased.getAndSet(true); + + if (!alreadyReleased) { + stream.close(); + connection.close(); + } + } + + /** + * Called when the final Buffer has been queued and no more data is expected. + */ + public void setQueueComplete() { + log.trace(() -> "Response Body Publisher queue marked as completed."); + queueComplete.set(true); + // We're done with the Native Resources, release them so they can be used by another request. + releaseNativeResources(); + } + + /** + * Completes the Subscription by calling either the .onError() or .onComplete() callbacks exactly once. + */ + protected void completeSubscriptionExactlyOnce() { + boolean alreadyComplete = isSubscriptionComplete.getAndSet(true); + + if (alreadyComplete) { + return; + } + + // Subscriber may have cancelled their subscription, in which case this may be null. + Optional> subscriber = Optional.ofNullable(subscriberRef.getAndSet(null)); + + Throwable throwable = error.get(); + + // We're done with the Native Resources, release them so they can be used by another request. + releaseNativeResources(); + + // Complete the Futures + if (throwable != null) { + log.error(() -> "Error before ResponseBodyPublisher could complete: " + throwable.getMessage()); + try { + subscriber.ifPresent(s -> s.onError(throwable)); + } catch (Exception e) { + log.warn(() -> "Failed to exceptionally complete subscriber future with: " + throwable.getMessage()); + } + responseComplete.completeExceptionally(throwable); + } else { + log.debug(() -> "ResponseBodyPublisher Completed Successfully"); + try { + subscriber.ifPresent(Subscriber::onComplete); + } catch (Exception e) { + log.warn(() -> "Failed to successfully complete subscriber future"); + } + responseComplete.complete(null); + } + } + + /** + * Publishes any queued data to any Subscribers if there is data queued and there is an outstanding Subscriber + * request for more data. Will also call onError() or onComplete() callbacks if needed. + * + * This method MUST be synchronized since it can be called simultaneously from both the Native EventLoop Thread and + * the User Thread. If this method wasn't synchronized, it'd be possible for each thread to dequeue a buffer by + * calling queuedBuffers.poll(), but then have the 2nd thread call subscriber.onNext(buffer) first, resulting in the + * subscriber seeing out-of-order data. To avoid this race condition, this method must be synchronized. + */ + protected void publishToSubscribers() { + boolean shouldComplete = true; + synchronized (this) { + if (error.get() == null) { + if (isSubscriptionComplete.get() || isCancelled.get()) { + log.debug(() -> "Subscription already completed or cancelled, can't publish updates to Subscribers."); + return; + } + + if (mutualRecursionDepth.get() > 0) { + /** + * If our depth is > 0, then we already made a call to publishToSubscribers() further up the stack that + * will continue publishing to subscribers, and this call should return without completing work to avoid + * infinite recursive loop between: "subscription.request() -> subscriber.onNext() -> subscription.request()" + */ + return; + } + + int totalAmountTransferred = 0; + + while (outstandingRequests.get() > 0 && !queuedBuffers.isEmpty()) { + byte[] buffer = queuedBuffers.poll(); + outstandingRequests.getAndUpdate(DECREMENT_IF_GREATER_THAN_ZERO); + int amount = buffer.length; + publishWithoutMutualRecursion(subscriberRef.get(), ByteBuffer.wrap(buffer)); + totalAmountTransferred += amount; + } + + if (totalAmountTransferred > 0) { + queuedBytes.addAndGet(-totalAmountTransferred); + + // We may have released the Native HttpConnection and HttpStream if they completed before the Subscriber + // has finished reading the data. + if (!areNativeResourcesReleased.get()) { + // Open HttpStream's IO window so HttpStream can keep track of IO back-pressure + // This is why it is correct to return 0 from AwsCrtAsyncHttpStreamAdapter::onResponseBody + stream.incrementWindow(totalAmountTransferred); + } + } + + shouldComplete = queueComplete.get() && queuedBuffers.isEmpty(); + } else { + shouldComplete = true; + } + } + + // Check if Complete, consider no subscriber as a completion. + if (shouldComplete) { + completeSubscriptionExactlyOnce(); + } + } + + /** + * This method is used to avoid a StackOverflow due to the potential infinite loop between + * "subscription.request() -> subscriber.onNext() -> subscription.request()" calls. We only call subscriber.onNext() + * if the recursion depth is zero, otherwise we return up to the stack frame with depth zero and continue publishing + * from there. + * @param subscriber The Subscriber to publish to. + * @param buffer The buffer to publish to the subscriber. + */ + private synchronized void publishWithoutMutualRecursion(Subscriber subscriber, ByteBuffer buffer) { + try { + /** + * Need to keep track of recursion depth between .onNext() -> .request() calls + */ + int depth = mutualRecursionDepth.getAndIncrement(); + if (depth == 0) { + subscriber.onNext(buffer); + } + } finally { + mutualRecursionDepth.decrementAndGet(); + } + } + + static class AwsCrtResponseBodySubscription implements Subscription { + private final AwsCrtResponseBodyPublisher publisher; + + AwsCrtResponseBodySubscription(AwsCrtResponseBodyPublisher publisher) { + this.publisher = publisher; + } + + @Override + public void request(long n) { + if (n <= 0) { + // Reactive Stream Spec requires us to call onError() callback instead of throwing Exception here. + publisher.setError(new IllegalArgumentException("Request is for <= 0 elements: " + n)); + publisher.publishToSubscribers(); + return; + } + + publisher.request(n); + publisher.publishToSubscribers(); + } + + @Override + public void cancel() { + publisher.setCancelled(); + } + } + +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/CrtRequestContext.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/CrtRequestContext.java new file mode 100644 index 000000000000..d43fd1c9fb19 --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/CrtRequestContext.java @@ -0,0 +1,77 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.crt.http.HttpClientConnectionManager; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; + +@SdkInternalApi +public final class CrtRequestContext { + private final AsyncExecuteRequest request; + private final int readBufferSize; + private final HttpClientConnectionManager crtConnPool; + + private CrtRequestContext(Builder builder) { + this.request = builder.request; + this.readBufferSize = builder.readBufferSize; + this.crtConnPool = builder.crtConnPool; + } + + public static Builder builder() { + return new Builder(); + } + + public AsyncExecuteRequest sdkRequest() { + return request; + } + + public int readBufferSize() { + return readBufferSize; + } + + public HttpClientConnectionManager crtConnPool() { + return crtConnPool; + } + + public static class Builder { + private AsyncExecuteRequest request; + private int readBufferSize; + private HttpClientConnectionManager crtConnPool; + + private Builder() { + } + + public Builder request(AsyncExecuteRequest request) { + this.request = request; + return this; + } + + public Builder readBufferSize(int readBufferSize) { + this.readBufferSize = readBufferSize; + return this; + } + + public Builder crtConnPool(HttpClientConnectionManager crtConnPool) { + this.crtConnPool = crtConnPool; + return this; + } + + public CrtRequestContext build() { + return new CrtRequestContext(this); + } + } +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/CrtRequestExecutor.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/CrtRequestExecutor.java new file mode 100644 index 000000000000..fb6c269ca226 --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/CrtRequestExecutor.java @@ -0,0 +1,166 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt.internal; + +import static software.amazon.awssdk.utils.CollectionUtils.isNullOrEmpty; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.crt.CrtRuntimeException; +import software.amazon.awssdk.crt.http.HttpClientConnection; +import software.amazon.awssdk.crt.http.HttpHeader; +import software.amazon.awssdk.crt.http.HttpRequest; +import software.amazon.awssdk.http.Header; +import software.amazon.awssdk.http.SdkCancellationException; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.http.SdkHttpUtils; + +@SdkInternalApi +public final class CrtRequestExecutor { + private static final Logger log = Logger.loggerFor(CrtRequestExecutor.class); + + public CompletableFuture execute(CrtRequestContext executionContext) { + CompletableFuture requestFuture = createExecutionFuture(executionContext.sdkRequest()); + + // When a Connection is ready from the Connection Pool, schedule the Request on the connection + CompletableFuture httpClientConnectionCompletableFuture = + executionContext.crtConnPool().acquireConnection(); + + httpClientConnectionCompletableFuture.whenComplete((crtConn, throwable) -> { + AsyncExecuteRequest asyncRequest = executionContext.sdkRequest(); + // If we didn't get a connection for some reason, fail the request + if (throwable != null) { + handleFailure(new IOException("An exception occurred when acquiring connection", throwable), + requestFuture, + asyncRequest.responseHandler()); + return; + } + + AwsCrtAsyncHttpStreamAdapter crtToSdkAdapter = + new AwsCrtAsyncHttpStreamAdapter(crtConn, requestFuture, asyncRequest, executionContext.readBufferSize()); + HttpRequest crtRequest = toCrtRequest(asyncRequest, crtToSdkAdapter); + // Submit the Request on this Connection + invokeSafely(() -> { + try { + crtConn.makeRequest(crtRequest, crtToSdkAdapter).activate(); + } catch (IllegalStateException | CrtRuntimeException e) { + log.debug(() -> "An exception occurred when making the request", e); + handleFailure(new IOException("An exception occurred when making the request", e), + requestFuture, + asyncRequest.responseHandler()); + + } + }); + }); + + return requestFuture; + } + + /** + * Convenience method to create the execution future and set up the cancellation logic. + * + * @return The created execution future. + */ + private CompletableFuture createExecutionFuture(AsyncExecuteRequest request) { + CompletableFuture future = new CompletableFuture<>(); + + future.whenComplete((r, t) -> { + if (t == null) { + return; + } + //TODO: Aborting request once it's supported in CRT + if (future.isCancelled()) { + request.responseHandler().onError(new SdkCancellationException("The request was cancelled")); + } + }); + + return future; + } + + private void handleFailure(Throwable cause, + CompletableFuture executeFuture, + SdkAsyncHttpResponseHandler responseHandler) { + try { + responseHandler.onError(cause); + } catch (Exception e) { + log.error(() -> String.format("SdkAsyncHttpResponseHandler %s throw an exception in onError", + responseHandler.toString()), e); + } + + executeFuture.completeExceptionally(cause); + } + + private static HttpRequest toCrtRequest(AsyncExecuteRequest asyncRequest, AwsCrtAsyncHttpStreamAdapter crtToSdkAdapter) { + URI uri = asyncRequest.request().getUri(); + SdkHttpRequest sdkRequest = asyncRequest.request(); + + String method = sdkRequest.method().name(); + String encodedPath = sdkRequest.encodedPath(); + if (encodedPath == null || encodedPath.length() == 0) { + encodedPath = "/"; + } + + String encodedQueryString = SdkHttpUtils.encodeAndFlattenQueryParameters(sdkRequest.rawQueryParameters()) + .map(value -> "?" + value) + .orElse(""); + + HttpHeader[] crtHeaderArray = asArray(createHttpHeaderList(uri, asyncRequest)); + + return new HttpRequest(method, encodedPath + encodedQueryString, crtHeaderArray, crtToSdkAdapter); + } + + private static HttpHeader[] asArray(List crtHeaderList) { + return crtHeaderList.toArray(new HttpHeader[0]); + } + + private static List createHttpHeaderList(URI uri, AsyncExecuteRequest asyncRequest) { + SdkHttpRequest sdkRequest = asyncRequest.request(); + // worst case we may add 3 more headers here + List crtHeaderList = new ArrayList<>(sdkRequest.headers().size() + 3); + + // Set Host Header if needed + if (isNullOrEmpty(sdkRequest.headers().get(Header.HOST))) { + crtHeaderList.add(new HttpHeader(Header.HOST, uri.getHost())); + } + + // Add Connection Keep Alive Header to reuse this Http Connection as long as possible + if (isNullOrEmpty(sdkRequest.headers().get(Header.CONNECTION))) { + crtHeaderList.add(new HttpHeader(Header.CONNECTION, Header.KEEP_ALIVE_VALUE)); + } + + // Set Content-Length if needed + Optional contentLength = asyncRequest.requestContentPublisher().contentLength(); + if (isNullOrEmpty(sdkRequest.headers().get(Header.CONTENT_LENGTH)) && contentLength.isPresent()) { + crtHeaderList.add(new HttpHeader(Header.CONTENT_LENGTH, Long.toString(contentLength.get()))); + } + + // Add the rest of the Headers + sdkRequest.headers().forEach((key, value) -> { + value.stream().map(val -> new HttpHeader(key, val)).forEach(crtHeaderList::add); + }); + + return crtHeaderList; + } +} diff --git a/http-clients/aws-crt-client/src/main/resources/META-INF/services/software.amazon.awssdk.http.async.SdkAsyncHttpService b/http-clients/aws-crt-client/src/main/resources/META-INF/services/software.amazon.awssdk.http.async.SdkAsyncHttpService new file mode 100644 index 000000000000..f0312a3b901d --- /dev/null +++ b/http-clients/aws-crt-client/src/main/resources/META-INF/services/software.amazon.awssdk.http.async.SdkAsyncHttpService @@ -0,0 +1,16 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +software.amazon.awssdk.http.crt.AwsCrtSdkHttpService diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientSpiVerificationTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientSpiVerificationTest.java new file mode 100644 index 000000000000..4c7feb9a3929 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientSpiVerificationTest.java @@ -0,0 +1,281 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.binaryEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static java.util.Collections.emptyMap; +import static org.apache.commons.codec.digest.DigestUtils.sha256Hex; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.github.tomakehurst.wiremock.http.Fault; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Random; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.utils.Logger; + +public class AwsCrtHttpClientSpiVerificationTest { + private static final Logger log = Logger.loggerFor(AwsCrtHttpClientSpiVerificationTest.class); + private static final int TEST_BODY_LEN = 1024; + + @Rule + public WireMockRule mockServer = new WireMockRule(wireMockConfig() + .dynamicPort() + .dynamicHttpsPort()); + + private SdkAsyncHttpClient client; + + @Before + public void setup() throws Exception { + CrtResource.waitForNoResources(); + + client = AwsCrtAsyncHttpClient.builder() + .connectionHealthChecksConfiguration(b -> b.minThroughputInBytesPerSecond(4068L) + .allowableThroughputFailureInterval(Duration.ofSeconds(3))) + .build(); + } + + @After + public void tearDown() { + client.close(); + EventLoopGroup.closeStaticDefault(); + HostResolver.closeStaticDefault(); + CrtResource.waitForNoResources(); + } + + private byte[] generateRandomBody(int size) { + byte[] randomData = new byte[size]; + new Random().nextBytes(randomData); + return randomData; + } + + @Test + public void signalsErrorViaOnErrorAndFuture() throws InterruptedException, ExecutionException, TimeoutException { + stubFor(any(urlEqualTo("/")).willReturn(aResponse().withFault(Fault.RANDOM_DATA_THEN_CLOSE))); + + CompletableFuture errorSignaled = new CompletableFuture<>(); + + SdkAsyncHttpResponseHandler handler = new TestResponseHandler() { + @Override + public void onError(Throwable error) { + errorSignaled.complete(true); + } + }; + + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(URI.create("http://localhost:" + mockServer.port())); + + CompletableFuture executeFuture = client.execute(AsyncExecuteRequest.builder() + .request(request) + .responseHandler(handler) + .requestContentPublisher(new EmptyPublisher()) + .build()); + + assertThat(errorSignaled.get(1, TimeUnit.SECONDS)).isTrue(); + assertThatThrownBy(executeFuture::join).hasCauseInstanceOf(Exception.class); + + } + + @Test + public void callsOnStreamForEmptyResponseContent() throws Exception { + stubFor(any(urlEqualTo("/")).willReturn(aResponse().withStatus(204).withHeader("foo", "bar"))); + + CompletableFuture streamReceived = new CompletableFuture<>(); + AtomicReference response = new AtomicReference<>(null); + + SdkAsyncHttpResponseHandler handler = new TestResponseHandler() { + @Override + public void onHeaders(SdkHttpResponse headers) { + response.compareAndSet(null, headers); + } + @Override + public void onStream(Publisher stream) { + super.onStream(stream); + streamReceived.complete(true); + } + }; + + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(URI.create("http://localhost:" + mockServer.port())); + + CompletableFuture future = client.execute(AsyncExecuteRequest.builder() + .request(request) + .responseHandler(handler) + .requestContentPublisher(new EmptyPublisher()) + .build()); + + future.get(60, TimeUnit.SECONDS); + assertThat(streamReceived.get(1, TimeUnit.SECONDS)).isTrue(); + assertThat(response.get() != null).isTrue(); + assertThat(response.get().statusCode() == 204).isTrue(); + assertThat(response.get().headers().get("foo").isEmpty()).isFalse(); + } + + @Test + public void testGetRequest() throws Exception { + String path = "/testGetRequest"; + byte[] body = generateRandomBody(TEST_BODY_LEN); + String expectedBodyHash = sha256Hex(body).toUpperCase(); + stubFor(any(urlEqualTo(path)).willReturn(aResponse().withStatus(200) + .withHeader("Content-Length", Integer.toString(TEST_BODY_LEN)) + .withHeader("foo", "bar") + .withBody(body))); + + CompletableFuture streamReceived = new CompletableFuture<>(); + AtomicReference response = new AtomicReference<>(null); + Sha256BodySubscriber bodySha256Subscriber = new Sha256BodySubscriber(); + AtomicReference error = new AtomicReference<>(null); + + SdkAsyncHttpResponseHandler handler = new SdkAsyncHttpResponseHandler() { + @Override + public void onHeaders(SdkHttpResponse headers) { + response.compareAndSet(null, headers); + } + @Override + public void onStream(Publisher stream) { + stream.subscribe(bodySha256Subscriber); + streamReceived.complete(true); + } + + @Override + public void onError(Throwable t) { + error.compareAndSet(null, t); + } + }; + + URI uri = URI.create("http://localhost:" + mockServer.port()); + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(uri, path, null, SdkHttpMethod.GET, emptyMap()); + + CompletableFuture future = client.execute(AsyncExecuteRequest.builder() + .request(request) + .responseHandler(handler) + .requestContentPublisher(new EmptyPublisher()) + .build()); + + future.get(60, TimeUnit.SECONDS); + assertThat(error.get()).isNull(); + assertThat(streamReceived.get(1, TimeUnit.SECONDS)).isTrue(); + assertThat(bodySha256Subscriber.getFuture().get(60, TimeUnit.SECONDS)).isEqualTo(expectedBodyHash); + assertThat(response.get().statusCode()).isEqualTo(200); + assertThat(response.get().headers().get("foo").isEmpty()).isFalse(); + } + + + private void makePutRequest(String path, byte[] reqBody, int expectedStatus) throws Exception { + CompletableFuture streamReceived = new CompletableFuture<>(); + AtomicReference response = new AtomicReference<>(null); + AtomicReference error = new AtomicReference<>(null); + + Subscriber subscriber = CrtHttpClientTestUtils.createDummySubscriber(); + + SdkAsyncHttpResponseHandler handler = CrtHttpClientTestUtils.createTestResponseHandler(response, + streamReceived, error, subscriber); + + URI uri = URI.create("http://localhost:" + mockServer.port()); + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(uri, path, reqBody, SdkHttpMethod.PUT, emptyMap()); + + CompletableFuture future = client.execute(AsyncExecuteRequest.builder() + .request(request) + .responseHandler(handler) + .requestContentPublisher(new SdkTestHttpContentPublisher(reqBody)) + .build()); + future.get(60, TimeUnit.SECONDS); + assertThat(error.get()).isNull(); + assertThat(streamReceived.get(60, TimeUnit.SECONDS)).isTrue(); + assertThat(response.get().statusCode()).isEqualTo(expectedStatus); + } + + + @Test + public void testPutRequest() throws Exception { + String pathExpect200 = "/testPutRequest/return_200_on_exact_match"; + byte[] expectedBody = generateRandomBody(TEST_BODY_LEN); + stubFor(any(urlEqualTo(pathExpect200)).withRequestBody(binaryEqualTo(expectedBody)).willReturn(aResponse().withStatus(200))); + makePutRequest(pathExpect200, expectedBody, 200); + + String pathExpect404 = "/testPutRequest/return_404_always"; + byte[] randomBody = generateRandomBody(TEST_BODY_LEN); + stubFor(any(urlEqualTo(pathExpect404)).willReturn(aResponse().withStatus(404))); + makePutRequest(pathExpect404, randomBody, 404); + } + + + + private static class TestResponseHandler implements SdkAsyncHttpResponseHandler { + @Override + public void onHeaders(SdkHttpResponse headers) { + } + + @Override + public void onStream(Publisher stream) { + stream.subscribe(new DrainingSubscriber<>()); + } + + @Override + public void onError(Throwable error) { + } + } + + private static class DrainingSubscriber implements Subscriber { + private Subscription subscription; + + @Override + public void onSubscribe(Subscription subscription) { + this.subscription = subscription; + this.subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(T t) { + this.subscription.request(1); + } + + @Override + public void onError(Throwable throwable) { + } + + @Override + public void onComplete() { + } + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientWireMockTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientWireMockTest.java new file mode 100644 index 000000000000..8758c8212aab --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientWireMockTest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.http.HttpTestUtils.createProvider; +import static software.amazon.awssdk.http.crt.CrtHttpClientTestUtils.createRequest; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.RecordingNetworkTrafficListener; +import software.amazon.awssdk.http.RecordingResponseHandler; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.utils.Logger; + +public class AwsCrtHttpClientWireMockTest { + private static final Logger log = Logger.loggerFor(AwsCrtHttpClientWireMockTest.class); + private final RecordingNetworkTrafficListener wiremockTrafficListener = new RecordingNetworkTrafficListener(); + + @Rule + public WireMockRule mockServer = new WireMockRule(wireMockConfig() + .dynamicPort() + .dynamicHttpsPort() + .networkTrafficListener(wiremockTrafficListener)); + + @BeforeClass + public static void setup() { + System.setProperty("aws.crt.debugnative", "true"); + } + + @Before + public void methodSetup() { + wiremockTrafficListener.reset(); + } + + @After + public void tearDown() { + // Verify there is no resource leak. + EventLoopGroup.closeStaticDefault(); + HostResolver.closeStaticDefault(); + CrtResource.waitForNoResources(); + } + + @Test + public void closeClient_reuse_throwException() throws Exception { + SdkAsyncHttpClient client = AwsCrtAsyncHttpClient.create(); + + client.close(); + assertThatThrownBy(() -> makeSimpleRequest(client)).hasMessageContaining("is closed"); + } + + @Test + public void sharedEventLoopGroup_closeOneClient_shouldNotAffectOtherClients() throws Exception { + try (SdkAsyncHttpClient client = AwsCrtAsyncHttpClient.create()) { + makeSimpleRequest(client); + } + + try (SdkAsyncHttpClient anotherClient = AwsCrtAsyncHttpClient.create()) { + makeSimpleRequest(anotherClient); + } + } + + /** + * Make a simple async request and wait for it to finish. + * + * @param client Client to make request with. + */ + private void makeSimpleRequest(SdkAsyncHttpClient client) throws Exception { + String body = randomAlphabetic(10); + URI uri = URI.create("http://localhost:" + mockServer.port()); + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body))); + SdkHttpRequest request = createRequest(uri); + RecordingResponseHandler recorder = new RecordingResponseHandler(); + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); + recorder.completeFuture().get(5, TimeUnit.SECONDS); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtRequestBodySubscriberReactiveStreamCompatTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtRequestBodySubscriberReactiveStreamCompatTest.java new file mode 100644 index 000000000000..d2b07542c85c --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtRequestBodySubscriberReactiveStreamCompatTest.java @@ -0,0 +1,66 @@ +package software.amazon.awssdk.http.crt; + +import java.nio.ByteBuffer; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import org.reactivestreams.tck.SubscriberWhiteboxVerification; +import org.reactivestreams.tck.TestEnvironment; +import software.amazon.awssdk.http.crt.internal.AwsCrtRequestBodySubscriber; + +public class AwsCrtRequestBodySubscriberReactiveStreamCompatTest extends SubscriberWhiteboxVerification { + private static final int DEFAULT_STREAM_WINDOW_SIZE = 16 * 1024 * 1024; // 16 MB Total Buffer size + + public AwsCrtRequestBodySubscriberReactiveStreamCompatTest() { + super(new TestEnvironment()); + } + + @Override + public Subscriber createSubscriber(WhiteboxSubscriberProbe probe) { + AwsCrtRequestBodySubscriber actualSubscriber = new AwsCrtRequestBodySubscriber(DEFAULT_STREAM_WINDOW_SIZE); + + // Pass Through calls to AwsCrtRequestBodySubscriber, but also register calls to the whitebox probe + Subscriber passthroughSubscriber = new Subscriber() { + @Override + public void onSubscribe(Subscription s) { + actualSubscriber.onSubscribe(s); + probe.registerOnSubscribe(new SubscriberPuppet() { + + @Override + public void triggerRequest(long elements) { + s.request(elements); + } + + @Override + public void signalCancel() { + s.cancel(); + } + }); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + actualSubscriber.onNext(byteBuffer); + probe.registerOnNext(byteBuffer); + } + + @Override + public void onError(Throwable t) { + actualSubscriber.onError(t); + probe.registerOnError(t); + } + + @Override + public void onComplete() { + actualSubscriber.onComplete(); + probe.registerOnComplete(); + } + }; + + return passthroughSubscriber; + } + + @Override + public ByteBuffer createElement(int element) { + return ByteBuffer.wrap(Integer.toString(element).getBytes()); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtResponseBodyPublisherReactiveStreamCompatTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtResponseBodyPublisherReactiveStreamCompatTest.java new file mode 100644 index 000000000000..143f1e7b591b --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtResponseBodyPublisherReactiveStreamCompatTest.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static org.mockito.Mockito.mock; + +import java.nio.ByteBuffer; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import software.amazon.awssdk.crt.http.HttpClientConnection; +import software.amazon.awssdk.crt.http.HttpStream; +import software.amazon.awssdk.http.crt.internal.AwsCrtResponseBodyPublisher; +import software.amazon.awssdk.utils.Logger; + +public class AwsCrtResponseBodyPublisherReactiveStreamCompatTest extends PublisherVerification { + private static final Logger log = Logger.loggerFor(AwsCrtResponseBodyPublisherReactiveStreamCompatTest.class); + + public AwsCrtResponseBodyPublisherReactiveStreamCompatTest() { + super(new TestEnvironment()); + } + + @Override + public Publisher createPublisher(long elements) { + HttpClientConnection connection = mock(HttpClientConnection.class); + HttpStream stream = mock(HttpStream.class); + AwsCrtResponseBodyPublisher bodyPublisher = new AwsCrtResponseBodyPublisher(connection, stream, new CompletableFuture<>(), Integer.MAX_VALUE); + + for (long i = 0; i < elements; i++) { + bodyPublisher.queueBuffer(UUID.randomUUID().toString().getBytes()); + } + + bodyPublisher.setQueueComplete(); + return bodyPublisher; + } + + // Some tests try to create INT_MAX elements, which causes OutOfMemory Exceptions. Lower the max allowed number of + // queued buffers to 1024. + @Override + public long maxElementsFromPublisher() { + return 1024; + } + + @Override + public Publisher createFailedPublisher() { + return null; + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ConnectionHealthChecksConfigurationTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ConnectionHealthChecksConfigurationTest.java new file mode 100644 index 000000000000..bc7eef8b9b14 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ConnectionHealthChecksConfigurationTest.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + + + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.time.Duration; +import org.junit.Test; + +public class ConnectionHealthChecksConfigurationTest { + + @Test + public void builder_allPropertiesSet() { + ConnectionHealthChecksConfiguration connectionHealthChecksConfiguration = + ConnectionHealthChecksConfiguration.builder() + .minThroughputInBytesPerSecond(123l) + .allowableThroughputFailureInterval(Duration.ofSeconds(1)) + .build(); + + assertThat(connectionHealthChecksConfiguration.minThroughputInBytesPerSecond()).isEqualTo(123); + assertThat(connectionHealthChecksConfiguration.allowableThroughputFailureInterval()).isEqualTo(Duration.ofSeconds(1)); + } + + @Test + public void builder_nullMinThroughputInBytesPerSecond_shouldThrowException() { + assertThatThrownBy(() -> + ConnectionHealthChecksConfiguration.builder() + .allowableThroughputFailureInterval(Duration.ofSeconds(1)) + .build()).hasMessageContaining("minThroughputInBytesPerSecond"); + } + + @Test + public void builder_nullAllowableThroughputFailureInterval() { + assertThatThrownBy(() -> + ConnectionHealthChecksConfiguration.builder() + .minThroughputInBytesPerSecond(1L) + .build()).hasMessageContaining("allowableThroughputFailureIntervalSeconds"); + } + + @Test + public void builder_negativeAllowableThroughputFailureInterval() { + assertThatThrownBy(() -> + ConnectionHealthChecksConfiguration.builder() + .minThroughputInBytesPerSecond(1L) + .allowableThroughputFailureInterval(Duration.ofSeconds(-1)) + .build()).hasMessageContaining("allowableThroughputFailureIntervalSeconds"); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/CrtHttpClientTestUtils.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/CrtHttpClientTestUtils.java new file mode 100644 index 000000000000..d564afd596b8 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/CrtHttpClientTestUtils.java @@ -0,0 +1,87 @@ +package software.amazon.awssdk.http.crt; + +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; + +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyMap; + +public class CrtHttpClientTestUtils { + + static Subscriber createDummySubscriber() { + return new Subscriber() { + @Override + public void onSubscribe(Subscription subscription) { + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + } + + @Override + public void onError(Throwable throwable) { + } + + @Override + public void onComplete() { + } + }; + } + + static SdkAsyncHttpResponseHandler createTestResponseHandler(AtomicReference response, + CompletableFuture streamReceived, + AtomicReference error, + Subscriber subscriber) { + return new SdkAsyncHttpResponseHandler() { + @Override + public void onHeaders(SdkHttpResponse headers) { + response.compareAndSet(null, headers); + } + @Override + public void onStream(Publisher stream) { + stream.subscribe(subscriber); + streamReceived.complete(true); + } + + @Override + public void onError(Throwable t) { + error.compareAndSet(null, t); + } + }; + } + + public static SdkHttpFullRequest createRequest(URI endpoint) { + return createRequest(endpoint, "/", null, SdkHttpMethod.GET, emptyMap()); + } + + static SdkHttpFullRequest createRequest(URI endpoint, + String resourcePath, + byte[] body, + SdkHttpMethod method, + Map params) { + + String contentLength = (body == null) ? null : String.valueOf(body.length); + return SdkHttpFullRequest.builder() + .uri(endpoint) + .method(method) + .encodedPath(resourcePath) + .applyMutation(b -> params.forEach(b::putRawQueryParameter)) + .applyMutation(b -> { + b.putHeader("Host", endpoint.getHost()); + if (contentLength != null) { + b.putHeader("Content-Length", contentLength); + } + }).build(); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/EmptyPublisher.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/EmptyPublisher.java new file mode 100644 index 000000000000..1e85fc43cda6 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/EmptyPublisher.java @@ -0,0 +1,45 @@ +package software.amazon.awssdk.http.crt; + +import java.nio.ByteBuffer; +import java.util.Optional; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.async.SdkHttpContentPublisher; + +public class EmptyPublisher implements SdkHttpContentPublisher { + @Override + public void subscribe(Subscriber subscriber) { + subscriber.onSubscribe(new EmptySubscription(subscriber)); + } + + @Override + public Optional contentLength() { + return Optional.of(0L); + } + + private static class EmptySubscription implements Subscription { + private final Subscriber subscriber; + private volatile boolean done; + + EmptySubscription(Subscriber subscriber) { + this.subscriber = subscriber; + } + + @Override + public void request(long l) { + if (!done) { + done = true; + if (l <= 0) { + this.subscriber.onError(new IllegalArgumentException("Demand must be positive")); + } else { + this.subscriber.onComplete(); + } + } + } + + @Override + public void cancel() { + done = true; + } + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/H1ServerBehaviorTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/H1ServerBehaviorTest.java new file mode 100644 index 000000000000..84c4f9c194b6 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/H1ServerBehaviorTest.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.SdkAsyncHttpClientH1TestSuite; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.utils.AttributeMap; + +/** + * Testing the scenario where h1 server sends 5xx errors. + */ +public class H1ServerBehaviorTest extends SdkAsyncHttpClientH1TestSuite { + + @Override + protected SdkAsyncHttpClient setupClient() { + return AwsCrtAsyncHttpClient.builder() + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + } + +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyConfigurationTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyConfigurationTest.java new file mode 100644 index 000000000000..3f01c7a7774d --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyConfigurationTest.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static org.assertj.core.api.Assertions.assertThat; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Random; +import java.util.stream.Stream; +import org.junit.Test; + +/** + * Tests for {@link ProxyConfiguration}. + */ +public class ProxyConfigurationTest { + private static final Random RNG = new Random(); + + @Test + public void build_setsAllProperties() { + verifyAllPropertiesSet(allPropertiesSetConfig()); + } + + @Test + public void toBuilder_roundTrip_producesExactCopy() { + ProxyConfiguration original = allPropertiesSetConfig(); + + ProxyConfiguration copy = original.toBuilder().build(); + + assertThat(copy).isEqualTo(original); + } + + @Test + public void toBuilderModified_doesNotModifySource() { + ProxyConfiguration original = allPropertiesSetConfig(); + + ProxyConfiguration modified = setAllPropertiesToRandomValues(original.toBuilder()).build(); + + assertThat(original).isNotEqualTo(modified); + } + + private ProxyConfiguration allPropertiesSetConfig() { + return setAllPropertiesToRandomValues(ProxyConfiguration.builder()).build(); + } + + private ProxyConfiguration.Builder setAllPropertiesToRandomValues(ProxyConfiguration.Builder builder) { + Stream.of(builder.getClass().getDeclaredMethods()) + .filter(m -> m.getParameterCount() == 1 && m.getReturnType().equals(ProxyConfiguration.Builder.class)) + .forEach(m -> { + try { + m.setAccessible(true); + setRandomValue(builder, m); + } catch (Exception e) { + throw new RuntimeException("Could not create random proxy config", e); + } + }); + return builder; + } + + private void setRandomValue(Object o, Method setter) throws InvocationTargetException, IllegalAccessException { + Class paramClass = setter.getParameterTypes()[0]; + + if (String.class.equals(paramClass)) { + setter.invoke(o, randomString()); + } else if (int.class.equals(paramClass)) { + setter.invoke(o, RNG.nextInt()); + } else { + throw new RuntimeException("Don't know how create random value for type " + paramClass); + } + } + + private void verifyAllPropertiesSet(ProxyConfiguration cfg) { + boolean hasNullProperty = Stream.of(cfg.getClass().getDeclaredMethods()) + .filter(m -> !m.getReturnType().equals(Void.class) && m.getParameterCount() == 0) + .anyMatch(m -> { + m.setAccessible(true); + try { + return m.invoke(cfg) == null; + } catch (Exception e) { + return true; + } + }); + + if (hasNullProperty) { + throw new RuntimeException("Given configuration has unset property"); + } + } + + private String randomString() { + String alpha = "abcdefghijklmnopqrstuwxyz"; + + StringBuilder sb = new StringBuilder(16); + for (int i = 0; i < 16; ++i) { + sb.append(alpha.charAt(RNG.nextInt(16))); + } + + return sb.toString(); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyWireMockTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyWireMockTest.java new file mode 100644 index 000000000000..1487344b099e --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyWireMockTest.java @@ -0,0 +1,123 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + + +package software.amazon.awssdk.http.crt; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; + +/** + * Tests for HTTP proxy functionality in the CRT client. + */ +public class ProxyWireMockTest { + private SdkAsyncHttpClient client; + + private ProxyConfiguration proxyCfg; + + private WireMockServer mockProxy = new WireMockServer(new WireMockConfiguration() + .dynamicPort() + .dynamicHttpsPort() + .enableBrowserProxying(true)); // make the mock proxy actually forward (to the mock server for our test) + + private WireMockServer mockServer = new WireMockServer(new WireMockConfiguration() + .dynamicPort() + .dynamicHttpsPort()); + + + @Before + public void setup() { + mockProxy.start(); + mockServer.start(); + + mockServer.stubFor(get(urlMatching(".*")).willReturn(aResponse().withStatus(200).withBody("hello"))); + + proxyCfg = ProxyConfiguration.builder() + .host("localhost") + .port(mockProxy.port()) + .build(); + + client = AwsCrtAsyncHttpClient.builder() + .proxyConfiguration(proxyCfg) + .build(); + } + + @After + public void teardown() { + mockServer.stop(); + mockProxy.stop(); + client.close(); + EventLoopGroup.closeStaticDefault(); + HostResolver.closeStaticDefault(); + CrtResource.waitForNoResources(); + } + + /* + * Note the contrast between this test and the netty connect test. The CRT proxy implementation does not + * do a CONNECT call for requests using http, so by configuring the proxy mock to forward and the server mock + * to return success, we can actually create an end-to-end test. + * + * We have an outstanding request to change this behavior to match https (use a CONNECT call). Once that + * change happens, this test will break and need to be updated to be more like the netty one. + */ + @Test + public void proxyConfigured_httpGet() throws Throwable { + + CompletableFuture streamReceived = new CompletableFuture<>(); + AtomicReference response = new AtomicReference<>(null); + AtomicReference error = new AtomicReference<>(null); + + Subscriber subscriber = CrtHttpClientTestUtils.createDummySubscriber(); + + SdkAsyncHttpResponseHandler handler = CrtHttpClientTestUtils.createTestResponseHandler(response, streamReceived, error, subscriber); + + URI uri = URI.create("http://localhost:" + mockServer.port()); + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(uri, "/server/test", null, SdkHttpMethod.GET, emptyMap()); + + CompletableFuture future = client.execute(AsyncExecuteRequest.builder() + .request(request) + .responseHandler(handler) + .requestContentPublisher(new EmptyPublisher()) + .build()); + future.get(60, TimeUnit.SECONDS); + assertThat(error.get()).isNull(); + assertThat(streamReceived.get(60, TimeUnit.SECONDS)).isTrue(); + assertThat(response.get().statusCode()).isEqualTo(200); + } + +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/SdkTestHttpContentPublisher.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/SdkTestHttpContentPublisher.java new file mode 100644 index 000000000000..3ad5f08ac0c0 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/SdkTestHttpContentPublisher.java @@ -0,0 +1,56 @@ +package software.amazon.awssdk.http.crt; + +import java.nio.ByteBuffer; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.async.SdkHttpContentPublisher; + +public class SdkTestHttpContentPublisher implements SdkHttpContentPublisher { + private final byte[] body; + private final AtomicReference> subscriber = new AtomicReference<>(null); + private final AtomicBoolean complete = new AtomicBoolean(false); + + public SdkTestHttpContentPublisher(byte[] body) { + this.body = body; + } + + @Override + public void subscribe(Subscriber s) { + boolean wasFirstSubscriber = subscriber.compareAndSet(null, s); + + SdkTestHttpContentPublisher publisher = this; + + if (wasFirstSubscriber) { + s.onSubscribe(new Subscription() { + @Override + public void request(long n) { + publisher.request(n); + } + + @Override + public void cancel() { + // Do nothing + } + }); + } else { + s.onError(new RuntimeException("Only allow one subscriber")); + } + } + + protected void request(long n) { + // Send the whole body if they request >0 ByteBuffers + if (n > 0 && !complete.get()) { + complete.set(true); + subscriber.get().onNext(ByteBuffer.wrap(body)); + subscriber.get().onComplete(); + } + } + + @Override + public Optional contentLength() { + return Optional.of((long)body.length); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/Sha256BodySubscriber.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/Sha256BodySubscriber.java new file mode 100644 index 000000000000..508deffcb199 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/Sha256BodySubscriber.java @@ -0,0 +1,44 @@ +package software.amazon.awssdk.http.crt; + +import static org.apache.commons.codec.binary.Hex.encodeHexString; + +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.concurrent.CompletableFuture; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +public class Sha256BodySubscriber implements Subscriber { + private MessageDigest digest; + private CompletableFuture future; + + public Sha256BodySubscriber() throws NoSuchAlgorithmException { + digest = MessageDigest.getInstance("SHA-256"); + future = new CompletableFuture<>(); + } + + @Override + public void onSubscribe(Subscription s) { + s.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + digest.update(byteBuffer); + } + + @Override + public void onError(Throwable t) { + future.completeExceptionally(t); + } + + @Override + public void onComplete() { + future.complete(encodeHexString(digest.digest()).toUpperCase()); + } + + public CompletableFuture getFuture() { + return future; + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/internal/CrtRequestExecutorTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/internal/CrtRequestExecutorTest.java new file mode 100644 index 000000000000..3c10564d3811 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/internal/CrtRequestExecutorTest.java @@ -0,0 +1,164 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.HttpTestUtils.createProvider; +import static software.amazon.awssdk.http.crt.CrtHttpClientTestUtils.createRequest; + +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.crt.CrtRuntimeException; +import software.amazon.awssdk.crt.http.HttpClientConnection; +import software.amazon.awssdk.crt.http.HttpClientConnectionManager; +import software.amazon.awssdk.crt.http.HttpRequest; +import software.amazon.awssdk.http.SdkCancellationException; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; + +@RunWith(MockitoJUnitRunner.class) +public class CrtRequestExecutorTest { + + private CrtRequestExecutor requestExecutor; + @Mock + private HttpClientConnectionManager connectionManager; + + @Mock + private SdkAsyncHttpResponseHandler responseHandler; + + @Mock + private HttpClientConnection httpClientConnection; + + @Before + public void setup() { + requestExecutor = new CrtRequestExecutor(); + } + + @After + public void teardown() { + Mockito.reset(connectionManager, responseHandler, httpClientConnection); + } + + @Test + public void acquireConnectionThrowException_shouldInvokeOnError() { + RuntimeException exception = new RuntimeException("error"); + CrtRequestContext context = CrtRequestContext.builder() + .crtConnPool(connectionManager) + .request(AsyncExecuteRequest.builder() + .responseHandler(responseHandler) + .build()) + .build(); + CompletableFuture completableFuture = new CompletableFuture<>(); + + Mockito.when(connectionManager.acquireConnection()).thenReturn(completableFuture); + completableFuture.completeExceptionally(exception); + + CompletableFuture executeFuture = requestExecutor.execute(context); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(Exception.class); + Mockito.verify(responseHandler).onError(argumentCaptor.capture()); + + Exception actualException = argumentCaptor.getValue(); + assertThat(actualException).hasMessageContaining("An exception occurred when acquiring connection"); + assertThat(actualException).hasCause(exception); + assertThat(executeFuture).hasFailedWithThrowableThat().hasCause(exception).isInstanceOf(IOException.class); + } + + @Test + public void makeRequestThrowException_shouldInvokeOnError() { + CrtRuntimeException exception = new CrtRuntimeException(""); + SdkHttpFullRequest request = createRequest(URI.create("http://localhost")); + CrtRequestContext context = CrtRequestContext.builder() + .readBufferSize(2000) + .crtConnPool(connectionManager) + .request(AsyncExecuteRequest.builder() + .request(request) + .requestContentPublisher(createProvider("")) + .responseHandler(responseHandler) + .build()) + .build(); + CompletableFuture completableFuture = new CompletableFuture<>(); + + Mockito.when(connectionManager.acquireConnection()).thenReturn(completableFuture); + completableFuture.complete(httpClientConnection); + + Mockito.when(httpClientConnection.makeRequest(Mockito.any(HttpRequest.class), Mockito.any(AwsCrtAsyncHttpStreamAdapter.class))) + .thenThrow(exception); + + CompletableFuture executeFuture = requestExecutor.execute(context); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(Exception.class); + Mockito.verify(responseHandler).onError(argumentCaptor.capture()); + + Exception actualException = argumentCaptor.getValue(); + assertThat(actualException).hasMessageContaining("An exception occurred when making the request"); + assertThat(actualException).hasCause(exception); + assertThat(executeFuture).hasFailedWithThrowableThat().hasCause(exception).isInstanceOf(IOException.class); + } + + @Test + public void makeRequest_success() { + SdkHttpFullRequest request = createRequest(URI.create("http://localhost")); + CrtRequestContext context = CrtRequestContext.builder() + .readBufferSize(2000) + .crtConnPool(connectionManager) + .request(AsyncExecuteRequest.builder() + .request(request) + .requestContentPublisher(createProvider("")) + .responseHandler(responseHandler) + .build()) + .build(); + CompletableFuture completableFuture = new CompletableFuture<>(); + Mockito.when(connectionManager.acquireConnection()).thenReturn(completableFuture); + completableFuture.complete(httpClientConnection); + + CompletableFuture executeFuture = requestExecutor.execute(context); + Mockito.verifyZeroInteractions(responseHandler); + } + + @Test + public void cancelRequest_shouldInvokeOnError() { + CrtRequestContext context = CrtRequestContext.builder() + .crtConnPool(connectionManager) + .request(AsyncExecuteRequest.builder() + .responseHandler(responseHandler) + .build()) + .build(); + CompletableFuture completableFuture = new CompletableFuture<>(); + + Mockito.when(connectionManager.acquireConnection()).thenReturn(completableFuture); + + CompletableFuture executeFuture = requestExecutor.execute(context); + executeFuture.cancel(true); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(Exception.class); + Mockito.verify(responseHandler).onError(argumentCaptor.capture()); + + Exception actualException = argumentCaptor.getValue(); + assertThat(actualException).hasMessageContaining("The request was cancelled"); + assertThat(actualException).isInstanceOf(SdkCancellationException.class); + } +} diff --git a/http-clients/aws-crt-client/src/test/resources/jetty-logging.properties b/http-clients/aws-crt-client/src/test/resources/jetty-logging.properties new file mode 100644 index 000000000000..4ee410e7fa92 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/resources/jetty-logging.properties @@ -0,0 +1,18 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +# Set up logging implementation +org.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.StdErrLog +org.eclipse.jetty.LEVEL=OFF diff --git a/http-clients/aws-crt-client/src/test/resources/log4j.properties b/http-clients/aws-crt-client/src/test/resources/log4j.properties new file mode 100644 index 000000000000..5a6e0a5388d9 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/resources/log4j.properties @@ -0,0 +1,24 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +log4j.rootLogger=WARN, A1 +log4j.appender.A1=org.apache.log4j.ConsoleAppender +log4j.appender.A1.layout=org.apache.log4j.PatternLayout + +# Print the date in ISO 8601 format +log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + + + diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index df77cd847812..6a9b3ac99282 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -1,6 +1,6 @@ @@ -97,6 +102,12 @@ + + software.amazon.awssdk + http-client-tests + ${awsjavasdk.version} + test + com.github.tomakehurst wiremock @@ -112,6 +123,11 @@ junit test + + org.testng + testng + test + org.mockito mockito-core diff --git a/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedDefaultHttp2ConnectionDecoder.java b/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedDefaultHttp2ConnectionDecoder.java deleted file mode 100644 index 543d9ec4e1d7..000000000000 --- a/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedDefaultHttp2ConnectionDecoder.java +++ /dev/null @@ -1,676 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package io.netty.handler.codec.http2; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.http.HttpStatusClass; -import io.netty.handler.codec.http2.Http2Connection.Endpoint; -import io.netty.util.internal.UnstableApi; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; - -import java.util.List; - -import static io.netty.handler.codec.http.HttpStatusClass.INFORMATIONAL; -import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_PRIORITY_WEIGHT; -import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR; -import static io.netty.handler.codec.http2.Http2Error.PROTOCOL_ERROR; -import static io.netty.handler.codec.http2.Http2Error.STREAM_CLOSED; -import static io.netty.handler.codec.http2.Http2Exception.connectionError; -import static io.netty.handler.codec.http2.Http2Exception.streamError; -import static io.netty.handler.codec.http2.Http2PromisedRequestVerifier.ALWAYS_VERIFY; -import static io.netty.handler.codec.http2.Http2Stream.State.CLOSED; -import static io.netty.handler.codec.http2.Http2Stream.State.HALF_CLOSED_REMOTE; -import static io.netty.util.internal.ObjectUtil.checkNotNull; -import static java.lang.Integer.MAX_VALUE; -import static java.lang.Math.min; - -/** - * Provides the default implementation for processing inbound frame events and delegates to a - * {@link Http2FrameListener} - *

    - * This class will read HTTP/2 frames and delegate the events to a {@link Http2FrameListener} - *

    - * This interface enforces inbound flow control functionality through - * {@link Http2LocalFlowController} - */ -@UnstableApi -public class ForkedDefaultHttp2ConnectionDecoder implements Http2ConnectionDecoder { - private static final InternalLogger logger = InternalLoggerFactory.getInstance(ForkedDefaultHttp2ConnectionDecoder.class); - private Http2FrameListener internalFrameListener = new PrefaceFrameListener(); - private final Http2Connection connection; - private Http2LifecycleManager lifecycleManager; - private final Http2ConnectionEncoder encoder; - private final Http2FrameReader frameReader; - private Http2FrameListener listener; - private final Http2PromisedRequestVerifier requestVerifier; - - public ForkedDefaultHttp2ConnectionDecoder(Http2Connection connection, - Http2ConnectionEncoder encoder, - Http2FrameReader frameReader) { - this(connection, encoder, frameReader, ALWAYS_VERIFY); - } - - public ForkedDefaultHttp2ConnectionDecoder(Http2Connection connection, - Http2ConnectionEncoder encoder, - Http2FrameReader frameReader, - Http2PromisedRequestVerifier requestVerifier) { - this.connection = checkNotNull(connection, "connection"); - this.frameReader = checkNotNull(frameReader, "frameReader"); - this.encoder = checkNotNull(encoder, "encoder"); - this.requestVerifier = checkNotNull(requestVerifier, "requestVerifier"); - if (connection.local().flowController() == null) { - connection.local().flowController(new DefaultHttp2LocalFlowController(connection)); - } - connection.local().flowController().frameWriter(encoder.frameWriter()); - } - - @Override - public void lifecycleManager(Http2LifecycleManager lifecycleManager) { - this.lifecycleManager = checkNotNull(lifecycleManager, "lifecycleManager"); - } - - @Override - public Http2Connection connection() { - return connection; - } - - @Override - public final Http2LocalFlowController flowController() { - return connection.local().flowController(); - } - - @Override - public void frameListener(Http2FrameListener listener) { - this.listener = checkNotNull(listener, "listener"); - } - - @Override - public Http2FrameListener frameListener() { - return listener; - } - - // Visible for testing - Http2FrameListener internalFrameListener() { - return internalFrameListener; - } - - @Override - public boolean prefaceReceived() { - return FrameReadListener.class == internalFrameListener.getClass(); - } - - @Override - public void decodeFrame(ChannelHandlerContext ctx, ByteBuf in, List out) throws Http2Exception { - frameReader.readFrame(ctx, in, internalFrameListener); - } - - @Override - public Http2Settings localSettings() { - Http2Settings settings = new Http2Settings(); - Http2FrameReader.Configuration config = frameReader.configuration(); - Http2HeadersDecoder.Configuration headersConfig = config.headersConfiguration(); - Http2FrameSizePolicy frameSizePolicy = config.frameSizePolicy(); - settings.initialWindowSize(flowController().initialWindowSize()); - settings.maxConcurrentStreams(connection.remote().maxActiveStreams()); - settings.headerTableSize(headersConfig.maxHeaderTableSize()); - settings.maxFrameSize(frameSizePolicy.maxFrameSize()); - settings.maxHeaderListSize(headersConfig.maxHeaderListSize()); - if (!connection.isServer()) { - // Only set the pushEnabled flag if this is a client endpoint. - settings.pushEnabled(connection.local().allowPushTo()); - } - return settings; - } - - @Override - public void close() { - frameReader.close(); - } - - /** - * Calculate the threshold in bytes which should trigger a {@code GO_AWAY} if a set of headers exceeds this amount. - * @param maxHeaderListSize - * SETTINGS_MAX_HEADER_LIST_SIZE for the local - * endpoint. - * @return the threshold in bytes which should trigger a {@code GO_AWAY} if a set of headers exceeds this amount. - */ - protected long calculateMaxHeaderListSizeGoAway(long maxHeaderListSize) { - return Http2CodecUtil.calculateMaxHeaderListSizeGoAway(maxHeaderListSize); - } - - private int unconsumedBytes(Http2Stream stream) { - return flowController().unconsumedBytes(stream); - } - - void onGoAwayRead0(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData) - throws Http2Exception { - if (connection.goAwayReceived() && connection.local().lastStreamKnownByPeer() < lastStreamId) { - throw connectionError(PROTOCOL_ERROR, "lastStreamId MUST NOT increase. Current value: %d new value: %d", - connection.local().lastStreamKnownByPeer(), lastStreamId); - } - listener.onGoAwayRead(ctx, lastStreamId, errorCode, debugData); - connection.goAwayReceived(lastStreamId, errorCode, debugData); - } - - void onUnknownFrame0(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, - ByteBuf payload) throws Http2Exception { - listener.onUnknownFrame(ctx, frameType, streamId, flags, payload); - } - - /** - * Handles all inbound frames from the network. - */ - private final class FrameReadListener implements Http2FrameListener { - @Override - public int onDataRead(final ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, - boolean endOfStream) throws Http2Exception { - Http2Stream stream = connection.stream(streamId); - Http2LocalFlowController flowController = flowController(); - int bytesToReturn = data.readableBytes() + padding; - - boolean shouldIgnore; - try { - shouldIgnore = shouldIgnoreHeadersOrDataFrame(ctx, streamId, stream, "DATA"); - } catch (Http2Exception e) { - // Ignoring this frame. We still need to count the frame towards the connection flow control - // window, but we immediately mark all bytes as consumed. - flowController.receiveFlowControlledFrame(stream, data, padding, endOfStream); - flowController.consumeBytes(stream, bytesToReturn); - throw e; - } catch (Throwable t) { - throw connectionError(INTERNAL_ERROR, t, "Unhandled error on data stream id %d", streamId); - } - - if (shouldIgnore) { - // Ignoring this frame. We still need to count the frame towards the connection flow control - // window, but we immediately mark all bytes as consumed. - flowController.receiveFlowControlledFrame(stream, data, padding, endOfStream); - flowController.consumeBytes(stream, bytesToReturn); - - // Verify that the stream may have existed after we apply flow control. - verifyStreamMayHaveExisted(streamId); - - // All bytes have been consumed. - return bytesToReturn; - } - - Http2Exception error = null; - switch (stream.state()) { - case OPEN: - case HALF_CLOSED_LOCAL: - break; - case HALF_CLOSED_REMOTE: - case CLOSED: - error = streamError(stream.id(), STREAM_CLOSED, "Stream %d in unexpected state: %s", - stream.id(), stream.state()); - break; - default: - error = streamError(stream.id(), PROTOCOL_ERROR, - "Stream %d in unexpected state: %s", stream.id(), stream.state()); - break; - } - - int unconsumedBytes = unconsumedBytes(stream); - try { - flowController.receiveFlowControlledFrame(stream, data, padding, endOfStream); - // Update the unconsumed bytes after flow control is applied. - unconsumedBytes = unconsumedBytes(stream); - - // If the stream is in an invalid state to receive the frame, throw the error. - if (error != null) { - throw error; - } - - // Call back the application and retrieve the number of bytes that have been - // immediately processed. - bytesToReturn = listener.onDataRead(ctx, streamId, data, padding, endOfStream); - return bytesToReturn; - } catch (Http2Exception e) { - // If an exception happened during delivery, the listener may have returned part - // of the bytes before the error occurred. If that's the case, subtract that from - // the total processed bytes so that we don't return too many bytes. - int delta = unconsumedBytes - unconsumedBytes(stream); - bytesToReturn -= delta; - throw e; - } catch (RuntimeException e) { - // If an exception happened during delivery, the listener may have returned part - // of the bytes before the error occurred. If that's the case, subtract that from - // the total processed bytes so that we don't return too many bytes. - int delta = unconsumedBytes - unconsumedBytes(stream); - bytesToReturn -= delta; - throw e; - } finally { - // If appropriate, return the processed bytes to the flow controller. - flowController.consumeBytes(stream, bytesToReturn); - - if (endOfStream) { - lifecycleManager.closeStreamRemote(stream, ctx.newSucceededFuture()); - } - } - } - - @Override - public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, - boolean endOfStream) throws Http2Exception { - onHeadersRead(ctx, streamId, headers, 0, DEFAULT_PRIORITY_WEIGHT, false, padding, endOfStream); - } - - @Override - public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency, - short weight, boolean exclusive, int padding, boolean endOfStream) throws Http2Exception { - Http2Stream stream = connection.stream(streamId); - boolean allowHalfClosedRemote = false; - if (stream == null && !connection.streamMayHaveExisted(streamId)) { - stream = connection.remote().createStream(streamId, endOfStream); - // Allow the state to be HALF_CLOSE_REMOTE if we're creating it in that state. - allowHalfClosedRemote = stream.state() == HALF_CLOSED_REMOTE; - } - - if (shouldIgnoreHeadersOrDataFrame(ctx, streamId, stream, "HEADERS")) { - return; - } - - boolean isInformational = !connection.isServer() && - HttpStatusClass.valueOf(headers.status()) == INFORMATIONAL; - if ((isInformational || !endOfStream) && stream.isHeadersReceived() || stream.isTrailersReceived()) { - throw streamError(streamId, PROTOCOL_ERROR, - "Stream %d received too many headers EOS: %s state: %s", - streamId, endOfStream, stream.state()); - } - - switch (stream.state()) { - case RESERVED_REMOTE: - stream.open(endOfStream); - break; - case OPEN: - case HALF_CLOSED_LOCAL: - // Allowed to receive headers in these states. - break; - case HALF_CLOSED_REMOTE: - if (!allowHalfClosedRemote) { - throw streamError(stream.id(), STREAM_CLOSED, "Stream %d in unexpected state: %s", - stream.id(), stream.state()); - } - break; - case CLOSED: - throw streamError(stream.id(), STREAM_CLOSED, "Stream %d in unexpected state: %s", - stream.id(), stream.state()); - default: - // Connection error. - throw connectionError(PROTOCOL_ERROR, "Stream %d in unexpected state: %s", stream.id(), - stream.state()); - } - - stream.headersReceived(isInformational); - encoder.flowController().updateDependencyTree(streamId, streamDependency, weight, exclusive); - - listener.onHeadersRead(ctx, streamId, headers, streamDependency, weight, exclusive, padding, endOfStream); - - // If the headers completes this stream, close it. - if (endOfStream) { - lifecycleManager.closeStreamRemote(stream, ctx.newSucceededFuture()); - } - } - - @Override - public void onPriorityRead(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, - boolean exclusive) throws Http2Exception { - encoder.flowController().updateDependencyTree(streamId, streamDependency, weight, exclusive); - - listener.onPriorityRead(ctx, streamId, streamDependency, weight, exclusive); - } - - @Override - public void onRstStreamRead(ChannelHandlerContext ctx, int streamId, long errorCode) throws Http2Exception { - Http2Stream stream = connection.stream(streamId); - if (stream == null) { - verifyStreamMayHaveExisted(streamId); - return; - } - - switch(stream.state()) { - case IDLE: - throw connectionError(PROTOCOL_ERROR, "RST_STREAM received for IDLE stream %d", streamId); - case CLOSED: - return; // RST_STREAM frames must be ignored for closed streams. - default: - break; - } - - listener.onRstStreamRead(ctx, streamId, errorCode); - - lifecycleManager.closeStream(stream, ctx.newSucceededFuture()); - } - - @Override - public void onSettingsAckRead(ChannelHandlerContext ctx) throws Http2Exception { - // Apply oldest outstanding local settings here. This is a synchronization point between endpoints. - Http2Settings settings = encoder.pollSentSettings(); - - if (settings != null) { - applyLocalSettings(settings); - } - - listener.onSettingsAckRead(ctx); - } - - /** - * Applies settings sent from the local endpoint. - *

    - * This method is only called after the local settings have been acknowledged from the remote endpoint. - */ - private void applyLocalSettings(Http2Settings settings) throws Http2Exception { - Boolean pushEnabled = settings.pushEnabled(); - Http2FrameReader.Configuration config = frameReader.configuration(); - Http2HeadersDecoder.Configuration headerConfig = config.headersConfiguration(); - Http2FrameSizePolicy frameSizePolicy = config.frameSizePolicy(); - if (pushEnabled != null) { - if (connection.isServer()) { - throw connectionError(PROTOCOL_ERROR, "Server sending SETTINGS frame with ENABLE_PUSH specified"); - } - connection.local().allowPushTo(pushEnabled); - } - - Long maxConcurrentStreams = settings.maxConcurrentStreams(); - if (maxConcurrentStreams != null) { - connection.remote().maxActiveStreams((int) min(maxConcurrentStreams, MAX_VALUE)); - } - - Long headerTableSize = settings.headerTableSize(); - if (headerTableSize != null) { - headerConfig.maxHeaderTableSize(headerTableSize); - } - - Long maxHeaderListSize = settings.maxHeaderListSize(); - if (maxHeaderListSize != null) { - headerConfig.maxHeaderListSize(maxHeaderListSize, calculateMaxHeaderListSizeGoAway(maxHeaderListSize)); - } - - Integer maxFrameSize = settings.maxFrameSize(); - if (maxFrameSize != null) { - frameSizePolicy.maxFrameSize(maxFrameSize); - } - - Integer initialWindowSize = settings.initialWindowSize(); - if (initialWindowSize != null) { - flowController().initialWindowSize(initialWindowSize); - } - } - - @Override - public void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) throws Http2Exception { - // Acknowledge receipt of the settings. We should do this before we process the settings to ensure our - // remote peer applies these settings before any subsequent frames that we may send which depend upon these - // new settings. See https://github.com/netty/netty/issues/6520. - encoder.writeSettingsAck(ctx, ctx.newPromise()); - - encoder.remoteSettings(settings); - - listener.onSettingsRead(ctx, settings); - } - - @Override - public void onPingRead(ChannelHandlerContext ctx, long data) throws Http2Exception { - // Send an ack back to the remote client. - // Need to retain the buffer here since it will be released after the write completes. - encoder.writePing(ctx, true, data, ctx.newPromise()); - - listener.onPingRead(ctx, data); - } - - @Override - public void onPingAckRead(ChannelHandlerContext ctx, long data) throws Http2Exception { - listener.onPingAckRead(ctx, data); - } - - @Override - public void onPushPromiseRead(ChannelHandlerContext ctx, int streamId, int promisedStreamId, - Http2Headers headers, int padding) throws Http2Exception { - // A client cannot push. - if (connection().isServer()) { - throw connectionError(PROTOCOL_ERROR, "A client cannot push."); - } - - Http2Stream parentStream = connection.stream(streamId); - - if (shouldIgnoreHeadersOrDataFrame(ctx, streamId, parentStream, "PUSH_PROMISE")) { - return; - } - - if (parentStream == null) { - throw connectionError(PROTOCOL_ERROR, "Stream %d does not exist", streamId); - } - - switch (parentStream.state()) { - case OPEN: - case HALF_CLOSED_LOCAL: - // Allowed to receive push promise in these states. - break; - default: - // Connection error. - throw connectionError(PROTOCOL_ERROR, - "Stream %d in unexpected state for receiving push promise: %s", - parentStream.id(), parentStream.state()); - } - - if (!requestVerifier.isAuthoritative(ctx, headers)) { - throw streamError(promisedStreamId, PROTOCOL_ERROR, - "Promised request on stream %d for promised stream %d is not authoritative", - streamId, promisedStreamId); - } - if (!requestVerifier.isCacheable(headers)) { - throw streamError(promisedStreamId, PROTOCOL_ERROR, - "Promised request on stream %d for promised stream %d is not known to be cacheable", - streamId, promisedStreamId); - } - if (!requestVerifier.isSafe(headers)) { - throw streamError(promisedStreamId, PROTOCOL_ERROR, - "Promised request on stream %d for promised stream %d is not known to be safe", - streamId, promisedStreamId); - } - - // Reserve the push stream based with a priority based on the current stream's priority. - connection.remote().reservePushStream(promisedStreamId, parentStream); - - listener.onPushPromiseRead(ctx, streamId, promisedStreamId, headers, padding); - } - - @Override - public void onGoAwayRead(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData) - throws Http2Exception { - onGoAwayRead0(ctx, lastStreamId, errorCode, debugData); - } - - @Override - public void onWindowUpdateRead(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement) - throws Http2Exception { - Http2Stream stream = connection.stream(streamId); - if (stream == null || stream.state() == CLOSED || streamCreatedAfterGoAwaySent(streamId)) { - // Ignore this frame. - verifyStreamMayHaveExisted(streamId); - return; - } - - // Update the outbound flow control window. - encoder.flowController().incrementWindowSize(stream, windowSizeIncrement); - - listener.onWindowUpdateRead(ctx, streamId, windowSizeIncrement); - } - - @Override - public void onUnknownFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, - ByteBuf payload) throws Http2Exception { - onUnknownFrame0(ctx, frameType, streamId, flags, payload); - } - - /** - * Helper method to determine if a frame that has the semantics of headers or data should be ignored for the - * {@code stream} (which may be {@code null}) associated with {@code streamId}. - */ - private boolean shouldIgnoreHeadersOrDataFrame(ChannelHandlerContext ctx, int streamId, Http2Stream stream, - String frameName) throws Http2Exception { - if (stream == null) { - if (streamCreatedAfterGoAwaySent(streamId)) { - logger.info("{} ignoring {} frame for stream {}. Stream sent after GOAWAY sent", - ctx.channel(), frameName, streamId); - return true; - } - // If the stream could have existed in the past we assume this is a frame sent by the remote - // after a RST_STREAM has been sent. Since we don't retain metadata about streams that have been - // reset we can't know this for sure. - verifyStreamMayHaveExisted(streamId); - return true; - } else if (stream.isResetSent() || streamCreatedAfterGoAwaySent(streamId)) { - if (logger.isInfoEnabled()) { - logger.info("{} ignoring {} frame for stream {} {}", ctx.channel(), frameName, - stream.isResetSent() ? "RST_STREAM sent." : - ("Stream created after GOAWAY sent. Last known stream by peer " + - connection.remote().lastStreamKnownByPeer())); - } - return true; - } - return false; - } - - /** - * Helper method for determining whether or not to ignore inbound frames. A stream is considered to be created - * after a {@code GOAWAY} is sent if the following conditions hold: - *

    - *

      - *
    • A {@code GOAWAY} must have been sent by the local endpoint
    • - *
    • The {@code streamId} must identify a legitimate stream id for the remote endpoint to be creating
    • - *
    • {@code streamId} is greater than the Last Known Stream ID which was sent by the local endpoint - * in the last {@code GOAWAY} frame
    • - *
    - *

    - */ - private boolean streamCreatedAfterGoAwaySent(int streamId) { - Endpoint remote = connection.remote(); - return connection.goAwaySent() && remote.isValidStreamId(streamId) && - streamId > remote.lastStreamKnownByPeer(); - } - - private void verifyStreamMayHaveExisted(int streamId) throws Http2Exception { - if (!connection.streamMayHaveExisted(streamId)) { - throw connectionError(PROTOCOL_ERROR, "Stream %d does not exist", streamId); - } - } - } - - private final class PrefaceFrameListener implements Http2FrameListener { - /** - * Verifies that the HTTP/2 connection preface has been received from the remote endpoint. - * It is possible that the current call to - * {@link Http2FrameReader#readFrame(ChannelHandlerContext, ByteBuf, Http2FrameListener)} will have multiple - * frames to dispatch. So it may be OK for this class to get legitimate frames for the first readFrame. - */ - private void verifyPrefaceReceived() throws Http2Exception { - if (!prefaceReceived()) { - throw connectionError(PROTOCOL_ERROR, "Received non-SETTINGS as first frame."); - } - } - - @Override - public int onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, boolean endOfStream) - throws Http2Exception { - verifyPrefaceReceived(); - return internalFrameListener.onDataRead(ctx, streamId, data, padding, endOfStream); - } - - @Override - public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, - boolean endOfStream) throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onHeadersRead(ctx, streamId, headers, padding, endOfStream); - } - - @Override - public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency, - short weight, boolean exclusive, int padding, boolean endOfStream) throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onHeadersRead(ctx, streamId, headers, streamDependency, weight, - exclusive, padding, endOfStream); - } - - @Override - public void onPriorityRead(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, - boolean exclusive) throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onPriorityRead(ctx, streamId, streamDependency, weight, exclusive); - } - - @Override - public void onRstStreamRead(ChannelHandlerContext ctx, int streamId, long errorCode) throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onRstStreamRead(ctx, streamId, errorCode); - } - - @Override - public void onSettingsAckRead(ChannelHandlerContext ctx) throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onSettingsAckRead(ctx); - } - - @Override - public void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) throws Http2Exception { - // The first settings should change the internalFrameListener to the "real" listener - // that expects the preface to be verified. - if (!prefaceReceived()) { - internalFrameListener = new FrameReadListener(); - } - internalFrameListener.onSettingsRead(ctx, settings); - } - - @Override - public void onPingRead(ChannelHandlerContext ctx, long data) throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onPingRead(ctx, data); - } - - @Override - public void onPingAckRead(ChannelHandlerContext ctx, long data) throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onPingAckRead(ctx, data); - } - - @Override - public void onPushPromiseRead(ChannelHandlerContext ctx, int streamId, int promisedStreamId, - Http2Headers headers, int padding) throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onPushPromiseRead(ctx, streamId, promisedStreamId, headers, padding); - } - - @Override - public void onGoAwayRead(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData) - throws Http2Exception { - onGoAwayRead0(ctx, lastStreamId, errorCode, debugData); - } - - @Override - public void onWindowUpdateRead(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement) - throws Http2Exception { - verifyPrefaceReceived(); - internalFrameListener.onWindowUpdateRead(ctx, streamId, windowSizeIncrement); - } - - @Override - public void onUnknownFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, - ByteBuf payload) throws Http2Exception { - onUnknownFrame0(ctx, frameType, streamId, flags, payload); - } - } -} diff --git a/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2MultiplexCodec.java b/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2MultiplexCodec.java deleted file mode 100644 index e8ef174d609d..000000000000 --- a/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2MultiplexCodec.java +++ /dev/null @@ -1,1207 +0,0 @@ -/* - * Copyright 2016 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http2; - -import static io.netty.handler.codec.http2.Http2CodecUtil.isStreamIdValid; -import static java.lang.Math.min; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.ChannelConfig; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelId; -import io.netty.channel.ChannelMetadata; -import io.netty.channel.ChannelOutboundBuffer; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.ChannelProgressivePromise; -import io.netty.channel.ChannelPromise; -import io.netty.channel.DefaultChannelConfig; -import io.netty.channel.DefaultChannelPipeline; -import io.netty.channel.DefaultMaxMessagesRecvByteBufAllocator; -import io.netty.channel.EventLoop; -import io.netty.channel.MessageSizeEstimator; -import io.netty.channel.RecvByteBufAllocator; -import io.netty.channel.VoidChannelPromise; -import io.netty.channel.WriteBufferWaterMark; -import io.netty.util.DefaultAttributeMap; -import io.netty.util.ReferenceCountUtil; -import io.netty.util.ReferenceCounted; -import io.netty.util.internal.StringUtil; -import io.netty.util.internal.ThrowableUtil; -import io.netty.util.internal.UnstableApi; -import java.net.SocketAddress; -import java.nio.channels.ClosedChannelException; -import java.util.ArrayDeque; -import java.util.Queue; - -/** - * An HTTP/2 handler that creates child channels for each stream. - * - *

    When a new stream is created, a new {@link Channel} is created for it. Applications send and - * receive {@link Http2StreamFrame}s on the created channel. {@link ByteBuf}s cannot be processed by the channel; - * all writes that reach the head of the pipeline must be an instance of {@link Http2StreamFrame}. Writes that reach - * the head of the pipeline are processed directly by this handler and cannot be intercepted. - * - *

    The child channel will be notified of user events that impact the stream, such as {@link - * Http2GoAwayFrame} and {@link Http2ResetFrame}, as soon as they occur. Although {@code - * Http2GoAwayFrame} and {@code Http2ResetFrame} signify that the remote is ignoring further - * communication, closing of the channel is delayed until any inbound queue is drained with {@link - * Channel#read()}, which follows the default behavior of channels in Netty. Applications are - * free to close the channel in response to such events if they don't have use for any queued - * messages. Any connection level events like {@link Http2SettingsFrame} and {@link Http2GoAwayFrame} - * will be processed internally and also propagated down the pipeline for other handlers to act on. - * - *

    Outbound streams are supported via the {@link Http2StreamChannelBootstrap}. - * - *

    {@link ChannelConfig#setMaxMessagesPerRead(int)} and {@link ChannelConfig#setAutoRead(boolean)} are supported. - * - *

    Reference Counting

    - * - * Some {@link Http2StreamFrame}s implement the {@link ReferenceCounted} interface, as they carry - * reference counted objects (e.g. {@link ByteBuf}s). The multiplex codec will call {@link ReferenceCounted#retain()} - * before propagating a reference counted object through the pipeline, and thus an application handler needs to release - * such an object after having consumed it. For more information on reference counting take a look at - * http://netty.io/wiki/reference-counted-objects.html - * - *

    Channel Events

    - * - * A child channel becomes active as soon as it is registered to an {@link EventLoop}. Therefore, an active channel - * does not map to an active HTTP/2 stream immediately. Only once a {@link Http2HeadersFrame} has been successfully sent - * or received, does the channel map to an active HTTP/2 stream. In case it is not possible to open a new HTTP/2 stream - * (i.e. due to the maximum number of active streams being exceeded), the child channel receives an exception - * indicating the cause and is closed immediately thereafter. - * - *

    Writability and Flow Control

    - * - * A child channel observes outbound/remote flow control via the channel's writability. A channel only becomes writable - * when it maps to an active HTTP/2 stream and the stream's flow control window is greater than zero. A child channel - * does not know about the connection-level flow control window. {@link ChannelHandler}s are free to ignore the - * channel's writability, in which case the excessive writes will be buffered by the parent channel. It's important to - * note that only {@link Http2DataFrame}s are subject to HTTP/2 flow control. - */ -@UnstableApi -public class ForkedHttp2MultiplexCodec extends Http2FrameCodec { - - private static final ChannelFutureListener CHILD_CHANNEL_REGISTRATION_LISTENER = new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - registerDone(future); - } - }; - - private static final ChannelMetadata METADATA = new ChannelMetadata(false, 16); - private static final ClosedChannelException CLOSED_CHANNEL_EXCEPTION = ThrowableUtil.unknownStackTrace( - new ClosedChannelException(), DefaultHttp2StreamChannel.Http2ChannelUnsafe.class, "write(...)"); - /** - * Number of bytes to consider non-payload messages. 9 is arbitrary, but also the minimum size of an HTTP/2 frame. - * Primarily is non-zero. - */ - private static final int MIN_HTTP2_FRAME_SIZE = 9; - - /** - * Returns the flow-control size for DATA frames, and 0 for all other frames. - */ - private static final class FlowControlledFrameSizeEstimator implements MessageSizeEstimator { - - static final FlowControlledFrameSizeEstimator INSTANCE = new FlowControlledFrameSizeEstimator(); - - static final MessageSizeEstimator.Handle HANDLE_INSTANCE = new MessageSizeEstimator.Handle() { - @Override - public int size(Object msg) { - return msg instanceof Http2DataFrame ? - // Guard against overflow. - (int) min(Integer.MAX_VALUE, ((Http2DataFrame) msg).initialFlowControlledBytes() + - (long) MIN_HTTP2_FRAME_SIZE) : MIN_HTTP2_FRAME_SIZE; - } - }; - - @Override - public Handle newHandle() { - return HANDLE_INSTANCE; - } - } - - private static final class Http2StreamChannelRecvByteBufAllocator extends DefaultMaxMessagesRecvByteBufAllocator { - - @Override - public MaxMessageHandle newHandle() { - return new MaxMessageHandle() { - @Override - public int guess() { - return 1024; - } - }; - } - } - - private final ChannelHandler inboundStreamHandler; - - private int initialOutboundStreamWindow = Http2CodecUtil.DEFAULT_WINDOW_SIZE; - private boolean parentReadInProgress; - private int idCount; - - // Linked-List for DefaultHttp2StreamChannel instances that need to be processed by channelReadComplete(...) - private DefaultHttp2StreamChannel head; - private DefaultHttp2StreamChannel tail; - - // Need to be volatile as accessed from within the DefaultHttp2StreamChannel in a multi-threaded fashion. - volatile ChannelHandlerContext ctx; - - ForkedHttp2MultiplexCodec(Http2ConnectionEncoder encoder, - Http2ConnectionDecoder decoder, - Http2Settings initialSettings, - ChannelHandler inboundStreamHandler) { - super(encoder, decoder, initialSettings); - this.inboundStreamHandler = inboundStreamHandler; - } - - private static void registerDone(ChannelFuture future) { - // Handle any errors that occurred on the local thread while registering. Even though - // failures can happen after this point, they will be handled by the channel by closing the - // childChannel. - if (!future.isSuccess()) { - Channel childChannel = future.channel(); - if (childChannel.isRegistered()) { - childChannel.close(); - } else { - childChannel.unsafe().closeForcibly(); - } - } - } - - @Override - public final void handlerAdded0(ChannelHandlerContext ctx) throws Exception { - if (ctx.executor() != ctx.channel().eventLoop()) { - throw new IllegalStateException("EventExecutor must be EventLoop of Channel"); - } - this.ctx = ctx; - } - - @Override - public final void handlerRemoved0(ChannelHandlerContext ctx) throws Exception { - super.handlerRemoved0(ctx); - - // Unlink the linked list to guard against GC nepotism. - DefaultHttp2StreamChannel ch = head; - while (ch != null) { - DefaultHttp2StreamChannel curr = ch; - ch = curr.next; - curr.next = null; - } - head = tail = null; - } - - @Override - Http2MultiplexCodecStream newStream() { - return new Http2MultiplexCodecStream(); - } - - @Override - final void onHttp2Frame(ChannelHandlerContext ctx, Http2Frame frame) { - if (frame instanceof Http2StreamFrame) { - Http2StreamFrame streamFrame = (Http2StreamFrame) frame; - onHttp2StreamFrame(((Http2MultiplexCodecStream) streamFrame.stream()).channel, streamFrame); - } else if (frame instanceof Http2GoAwayFrame) { - onHttp2GoAwayFrame(ctx, (Http2GoAwayFrame) frame); - // Allow other handlers to act on GOAWAY frame - ctx.fireChannelRead(frame); - } else if (frame instanceof Http2SettingsFrame) { - Http2Settings settings = ((Http2SettingsFrame) frame).settings(); - if (settings.initialWindowSize() != null) { - initialOutboundStreamWindow = settings.initialWindowSize(); - } - // Allow other handlers to act on SETTINGS frame - ctx.fireChannelRead(frame); - } else { - // Send any other frames down the pipeline - ctx.fireChannelRead(frame); - } - } - - @Override - final void onHttp2StreamStateChanged(ChannelHandlerContext ctx, Http2FrameStream stream) { - ForkedHttp2MultiplexCodec.Http2MultiplexCodecStream s = (ForkedHttp2MultiplexCodec.Http2MultiplexCodecStream) stream; - - switch (stream.state()) { - case HALF_CLOSED_REMOTE: - case OPEN: - if (s.channel != null) { - // ignore if child channel was already created. - break; - } - // fall-trough - ChannelFuture future = ctx.channel().eventLoop().register(new DefaultHttp2StreamChannel(s, false)); - if (future.isDone()) { - registerDone(future); - } else { - future.addListener(CHILD_CHANNEL_REGISTRATION_LISTENER); - } - break; - case CLOSED: - DefaultHttp2StreamChannel channel = s.channel; - if (channel != null) { - channel.streamClosed(); - } - break; - default: - // ignore for now - break; - } - } - - @Override - final void onHttp2StreamWritabilityChanged(ChannelHandlerContext ctx, Http2FrameStream stream, boolean writable) { - (((Http2MultiplexCodecStream) stream).channel).writabilityChanged(writable); - } - - // TODO: This is most likely not the best way to expose this, need to think more about it. - final Http2StreamChannel newOutboundStream() { - return new DefaultHttp2StreamChannel(newStream(), true); - } - - @Override - final void onHttp2FrameStreamException(ChannelHandlerContext ctx, Http2FrameStreamException cause) { - Http2FrameStream stream = cause.stream(); - DefaultHttp2StreamChannel childChannel = ((Http2MultiplexCodecStream) stream).channel; - - try { - childChannel.pipeline().fireExceptionCaught(cause.getCause()); - } finally { - childChannel.unsafe().closeForcibly(); - } - } - - private void onHttp2StreamFrame(DefaultHttp2StreamChannel childChannel, Http2StreamFrame frame) { - // Ignore window update frames - if (frame instanceof Http2WindowUpdateFrame) { - return; - } - switch (childChannel.fireChildRead(frame)) { - case READ_PROCESSED_BUT_STOP_READING: - childChannel.fireChildReadComplete(); - break; - case READ_PROCESSED_OK_TO_PROCESS_MORE: - addChildChannelToReadPendingQueue(childChannel); - break; - case READ_IGNORED_CHANNEL_INACTIVE: - case READ_QUEUED: - // nothing to do: - break; - default: - throw new Error(); - } - } - - final void addChildChannelToReadPendingQueue(DefaultHttp2StreamChannel childChannel) { - if (!childChannel.fireChannelReadPending) { - assert childChannel.next == null; - - if (tail == null) { - assert head == null; - tail = head = childChannel; - } else { - tail.next = childChannel; - tail = childChannel; - } - childChannel.fireChannelReadPending = true; - } - } - - private void onHttp2GoAwayFrame(ChannelHandlerContext ctx, final Http2GoAwayFrame goAwayFrame) { - try { - forEachActiveStream(new Http2FrameStreamVisitor() { - @Override - public boolean visit(Http2FrameStream stream) { - int streamId = stream.id(); - DefaultHttp2StreamChannel childChannel = ((Http2MultiplexCodecStream) stream).channel; - if (streamId > goAwayFrame.lastStreamId() && connection().local().isValidStreamId(streamId)) { - childChannel.pipeline().fireUserEventTriggered(goAwayFrame.retainedDuplicate()); - } - return true; - } - }); - } catch (Http2Exception e) { - ctx.fireExceptionCaught(e); - ctx.close(); - } - } - - /** - * Notifies any child streams of the read completion. - */ - @Override - public final void channelReadComplete(ChannelHandlerContext ctx) throws Exception { - parentReadInProgress = false; - onChannelReadComplete(ctx); - channelReadComplete0(ctx); - } - - @Override - public final void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - parentReadInProgress = true; - super.channelRead(ctx, msg); - } - - final void onChannelReadComplete(ChannelHandlerContext ctx) { - // If we have many child channel we can optimize for the case when multiple call flush() in - // channelReadComplete(...) callbacks and only do it once as otherwise we will end-up with multiple - // write calls on the socket which is expensive. - try { - DefaultHttp2StreamChannel current = head; - while (current != null) { - DefaultHttp2StreamChannel childChannel = current; - if (childChannel.fireChannelReadPending) { - // Clear early in case fireChildReadComplete() causes it to need to be re-processed - childChannel.fireChannelReadPending = false; - childChannel.fireChildReadComplete(); - } - childChannel.next = null; - current = current.next; - } - } finally { - tail = head = null; - - // We always flush as this is what Http2ConnectionHandler does for now. - flush0(ctx); - } - } - - // Allow to override for testing - void flush0(ChannelHandlerContext ctx) { - flush(ctx); - } - - /** - * Return bytes to flow control. - *

    - * Package private to allow to override for testing - * @param ctx The {@link ChannelHandlerContext} associated with the parent channel. - * @param stream The object representing the HTTP/2 stream. - * @param bytes The number of bytes to return to flow control. - * @return {@code true} if a frame has been written as a result of this method call. - * @throws Http2Exception If this operation violates the flow control limits. - */ - boolean onBytesConsumed(@SuppressWarnings("unused") ChannelHandlerContext ctx, - Http2FrameStream stream, int bytes) throws Http2Exception { - return consumeBytes(stream.id(), bytes); - } - - // Allow to extend for testing - static class Http2MultiplexCodecStream extends DefaultHttp2FrameStream { - DefaultHttp2StreamChannel channel; - } - - private enum ReadState { - READ_QUEUED, - READ_IGNORED_CHANNEL_INACTIVE, - READ_PROCESSED_BUT_STOP_READING, - READ_PROCESSED_OK_TO_PROCESS_MORE - } - - private boolean initialWritability(DefaultHttp2FrameStream stream) { - // If the stream id is not valid yet we will just mark the channel as writable as we will be notified - // about non-writability state as soon as the first Http2HeaderFrame is written (if needed). - // This should be good enough and simplify things a lot. - return !isStreamIdValid(stream.id()) || isWritable(stream); - } - - // TODO: Handle writability changes due writing from outside the eventloop. - private final class DefaultHttp2StreamChannel extends DefaultAttributeMap implements Http2StreamChannel { - private final Http2StreamChannelConfig config = new Http2StreamChannelConfig(this); - private final Http2ChannelUnsafe unsafe = new Http2ChannelUnsafe(); - private final ChannelId channelId; - private final ChannelPipeline pipeline; - private final DefaultHttp2FrameStream stream; - private final ChannelPromise closePromise; - private final boolean outbound; - - private volatile boolean registered; - // We start with the writability of the channel when creating the StreamChannel. - private volatile boolean writable; - - private boolean outboundClosed; - private boolean closePending; - private boolean readInProgress; - private Queue inboundBuffer; - - /** {@code true} after the first HEADERS frame has been written **/ - private boolean firstFrameWritten; - - /** {@code true} if a close without an error was initiated **/ - private boolean streamClosedWithoutError; - - // Keeps track of flush calls in channelReadComplete(...) and aggregate these. - private boolean inFireChannelReadComplete; - - boolean fireChannelReadPending; - - // Holds the reference to the next DefaultHttp2StreamChannel that should be processed in - // channelReadComplete(...) - DefaultHttp2StreamChannel next; - - DefaultHttp2StreamChannel(DefaultHttp2FrameStream stream, boolean outbound) { - this.stream = stream; - this.outbound = outbound; - writable = initialWritability(stream); - ((Http2MultiplexCodecStream) stream).channel = this; - pipeline = new DefaultChannelPipeline(this) { - @Override - protected void incrementPendingOutboundBytes(long size) { - // Do thing for now - } - - @Override - protected void decrementPendingOutboundBytes(long size) { - // Do thing for now - } - }; - closePromise = pipeline.newPromise(); - channelId = new Http2StreamChannelId(parent().id(), ++idCount); - } - - @Override - public Http2FrameStream stream() { - return stream; - } - - void streamClosed() { - streamClosedWithoutError = true; - if (readInProgress) { - // Just call closeForcibly() as this will take care of fireChannelInactive(). - unsafe().closeForcibly(); - } else { - closePending = true; - } - } - - @Override - public ChannelMetadata metadata() { - return METADATA; - } - - @Override - public ChannelConfig config() { - return config; - } - - @Override - public boolean isOpen() { - return !closePromise.isDone(); - } - - @Override - public boolean isActive() { - return isOpen(); - } - - @Override - public boolean isWritable() { - return writable; - } - - @Override - public ChannelId id() { - return channelId; - } - - @Override - public EventLoop eventLoop() { - return parent().eventLoop(); - } - - @Override - public Channel parent() { - return ctx.channel(); - } - - @Override - public boolean isRegistered() { - return registered; - } - - @Override - public SocketAddress localAddress() { - return parent().localAddress(); - } - - @Override - public SocketAddress remoteAddress() { - return parent().remoteAddress(); - } - - @Override - public ChannelFuture closeFuture() { - return closePromise; - } - - @Override - public long bytesBeforeUnwritable() { - // TODO: Do a proper impl - return config().getWriteBufferHighWaterMark(); - } - - @Override - public long bytesBeforeWritable() { - // TODO: Do a proper impl - return 0; - } - - @Override - public Unsafe unsafe() { - return unsafe; - } - - @Override - public ChannelPipeline pipeline() { - return pipeline; - } - - @Override - public ByteBufAllocator alloc() { - return config().getAllocator(); - } - - @Override - public Channel read() { - pipeline().read(); - return this; - } - - @Override - public Channel flush() { - pipeline().flush(); - return this; - } - - @Override - public ChannelFuture bind(SocketAddress localAddress) { - return pipeline().bind(localAddress); - } - - @Override - public ChannelFuture connect(SocketAddress remoteAddress) { - return pipeline().connect(remoteAddress); - } - - @Override - public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) { - return pipeline().connect(remoteAddress, localAddress); - } - - @Override - public ChannelFuture disconnect() { - return pipeline().disconnect(); - } - - @Override - public ChannelFuture close() { - return pipeline().close(); - } - - @Override - public ChannelFuture deregister() { - return pipeline().deregister(); - } - - @Override - public ChannelFuture bind(SocketAddress localAddress, ChannelPromise promise) { - return pipeline().bind(localAddress, promise); - } - - @Override - public ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) { - return pipeline().connect(remoteAddress, promise); - } - - @Override - public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) { - return pipeline().connect(remoteAddress, localAddress, promise); - } - - @Override - public ChannelFuture disconnect(ChannelPromise promise) { - return pipeline().disconnect(promise); - } - - @Override - public ChannelFuture close(ChannelPromise promise) { - return pipeline().close(promise); - } - - @Override - public ChannelFuture deregister(ChannelPromise promise) { - return pipeline().deregister(promise); - } - - @Override - public ChannelFuture write(Object msg) { - return pipeline().write(msg); - } - - @Override - public ChannelFuture write(Object msg, ChannelPromise promise) { - return pipeline().write(msg, promise); - } - - @Override - public ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) { - return pipeline().writeAndFlush(msg, promise); - } - - @Override - public ChannelFuture writeAndFlush(Object msg) { - return pipeline().writeAndFlush(msg); - } - - @Override - public ChannelPromise newPromise() { - return pipeline().newPromise(); - } - - @Override - public ChannelProgressivePromise newProgressivePromise() { - return pipeline().newProgressivePromise(); - } - - @Override - public ChannelFuture newSucceededFuture() { - return pipeline().newSucceededFuture(); - } - - @Override - public ChannelFuture newFailedFuture(Throwable cause) { - return pipeline().newFailedFuture(cause); - } - - @Override - public ChannelPromise voidPromise() { - return pipeline().voidPromise(); - } - - @Override - public int hashCode() { - return id().hashCode(); - } - - @Override - public boolean equals(Object o) { - return this == o; - } - - @Override - public int compareTo(Channel o) { - if (this == o) { - return 0; - } - - return id().compareTo(o.id()); - } - - @Override - public String toString() { - return parent().toString() + "(H2 - " + stream + ')'; - } - - void writabilityChanged(boolean writable) { - assert eventLoop().inEventLoop(); - if (writable != this.writable && isActive()) { - // Only notify if we received a state change. - this.writable = writable; - pipeline().fireChannelWritabilityChanged(); - } - } - - /** - * Receive a read message. This does not notify handlers unless a read is in progress on the - * channel. - */ - ReadState fireChildRead(Http2Frame frame) { - assert eventLoop().inEventLoop(); - if (!isActive()) { - ReferenceCountUtil.release(frame); - return ReadState.READ_IGNORED_CHANNEL_INACTIVE; - } - if (readInProgress && (inboundBuffer == null || inboundBuffer.isEmpty())) { - // Check for null because inboundBuffer doesn't support null; we want to be consistent - // for what values are supported. - RecvByteBufAllocator.ExtendedHandle allocHandle = unsafe.recvBufAllocHandle(); - unsafe.doRead0(frame, allocHandle); - return allocHandle.continueReading() ? - ReadState.READ_PROCESSED_OK_TO_PROCESS_MORE : ReadState.READ_PROCESSED_BUT_STOP_READING; - } else { - if (inboundBuffer == null) { - inboundBuffer = new ArrayDeque(4); - } - inboundBuffer.add(frame); - return ReadState.READ_QUEUED; - } - } - - void fireChildReadComplete() { - assert eventLoop().inEventLoop(); - try { - if (readInProgress) { - inFireChannelReadComplete = true; - readInProgress = false; - unsafe().recvBufAllocHandle().readComplete(); - pipeline().fireChannelReadComplete(); - } - } finally { - inFireChannelReadComplete = false; - } - } - - private final class Http2ChannelUnsafe implements Unsafe { - private final VoidChannelPromise unsafeVoidPromise = - new VoidChannelPromise(DefaultHttp2StreamChannel.this, false); - @SuppressWarnings("deprecation") - private RecvByteBufAllocator.ExtendedHandle recvHandle; - private boolean writeDoneAndNoFlush; - private boolean closeInitiated; - - @Override - public void connect(final SocketAddress remoteAddress, - SocketAddress localAddress, final ChannelPromise promise) { - if (!promise.setUncancellable()) { - return; - } - promise.setFailure(new UnsupportedOperationException()); - } - - @Override - public RecvByteBufAllocator.ExtendedHandle recvBufAllocHandle() { - if (recvHandle == null) { - recvHandle = (RecvByteBufAllocator.ExtendedHandle) config().getRecvByteBufAllocator().newHandle(); - } - return recvHandle; - } - - @Override - public SocketAddress localAddress() { - return parent().unsafe().localAddress(); - } - - @Override - public SocketAddress remoteAddress() { - return parent().unsafe().remoteAddress(); - } - - @Override - public void register(EventLoop eventLoop, ChannelPromise promise) { - if (!promise.setUncancellable()) { - return; - } - if (registered) { - throw new UnsupportedOperationException("Re-register is not supported"); - } - - registered = true; - - if (!outbound) { - // Add the handler to the pipeline now that we are registered. - pipeline().addLast(inboundStreamHandler); - } - - promise.setSuccess(); - - pipeline().fireChannelRegistered(); - if (isActive()) { - pipeline().fireChannelActive(); - } - } - - @Override - public void bind(SocketAddress localAddress, ChannelPromise promise) { - if (!promise.setUncancellable()) { - return; - } - promise.setFailure(new UnsupportedOperationException()); - } - - @Override - public void disconnect(ChannelPromise promise) { - close(promise); - } - - @Override - public void close(final ChannelPromise promise) { - if (!promise.setUncancellable()) { - return; - } - if (closeInitiated) { - if (closePromise.isDone()) { - // Closed already. - promise.setSuccess(); - } else if (!(promise instanceof VoidChannelPromise)) { // Only needed if no VoidChannelPromise. - // This means close() was called before so we just register a listener and return - closePromise.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - promise.setSuccess(); - } - }); - } - return; - } - closeInitiated = true; - - closePending = false; - fireChannelReadPending = false; - - // Only ever send a reset frame if the connection is still alive as otherwise it makes no sense at - // all anyway. - if (parent().isActive() && !streamClosedWithoutError && isStreamIdValid(stream().id())) { - Http2StreamFrame resetFrame = new DefaultHttp2ResetFrame(Http2Error.CANCEL).stream(stream()); - write(resetFrame, unsafe().voidPromise()); - flush(); - } - - if (inboundBuffer != null) { - for (;;) { - Object msg = inboundBuffer.poll(); - if (msg == null) { - break; - } - ReferenceCountUtil.release(msg); - } - } - - // The promise should be notified before we call fireChannelInactive(). - outboundClosed = true; - closePromise.setSuccess(); - promise.setSuccess(); - - pipeline().fireChannelInactive(); - if (isRegistered()) { - deregister(unsafe().voidPromise()); - } - } - - @Override - public void closeForcibly() { - close(unsafe().voidPromise()); - } - - @Override - public void deregister(ChannelPromise promise) { - if (!promise.setUncancellable()) { - return; - } - if (registered) { - registered = true; - promise.setSuccess(); - pipeline().fireChannelUnregistered(); - } else { - promise.setFailure(new IllegalStateException("Not registered")); - } - } - - @Override - public void beginRead() { - if (readInProgress || !isActive()) { - return; - } - readInProgress = true; - - RecvByteBufAllocator.Handle allocHandle = unsafe().recvBufAllocHandle(); - allocHandle.reset(config()); - if (inboundBuffer == null || inboundBuffer.isEmpty()) { - if (closePending) { - unsafe.closeForcibly(); - } - return; - } - - // We have already checked that the queue is not empty, so before this value is used it will always be - // set by allocHandle.continueReading(). - boolean continueReading; - do { - Object m = inboundBuffer.poll(); - if (m == null) { - continueReading = false; - break; - } - doRead0((Http2Frame) m, allocHandle); - } while (continueReading = allocHandle.continueReading()); - - if (continueReading && parentReadInProgress) { - // We don't know if more frames will be delivered in the parent channel's read loop, so add this - // channel to the channelReadComplete queue to be notified later. - addChildChannelToReadPendingQueue(DefaultHttp2StreamChannel.this); - } else { - // Reading data may result in frames being written (e.g. WINDOW_UPDATE, RST, etc..). If the parent - // channel is not currently reading we need to force a flush at the child channel, because we cannot - // rely upon flush occurring in channelReadComplete on the parent channel. - readInProgress = false; - allocHandle.readComplete(); - pipeline().fireChannelReadComplete(); - flush(); - if (closePending) { - unsafe.closeForcibly(); - } - } - } - - @SuppressWarnings("deprecation") - void doRead0(Http2Frame frame, RecvByteBufAllocator.Handle allocHandle) { - int numBytesToBeConsumed = 0; - if (frame instanceof Http2DataFrame) { - numBytesToBeConsumed = ((Http2DataFrame) frame).initialFlowControlledBytes(); - allocHandle.lastBytesRead(numBytesToBeConsumed); - } else { - allocHandle.lastBytesRead(MIN_HTTP2_FRAME_SIZE); - } - allocHandle.incMessagesRead(1); - pipeline().fireChannelRead(frame); - - if (numBytesToBeConsumed != 0) { - try { - writeDoneAndNoFlush |= onBytesConsumed(ctx, stream, numBytesToBeConsumed); - } catch (Http2Exception e) { - pipeline().fireExceptionCaught(e); - } - } - } - - @Override - public void write(Object msg, final ChannelPromise promise) { - // After this point its not possible to cancel a write anymore. - if (!promise.setUncancellable()) { - ReferenceCountUtil.release(msg); - return; - } - - if (!isActive() || - // Once the outbound side was closed we should not allow header / data frames - outboundClosed && (msg instanceof Http2HeadersFrame || msg instanceof Http2DataFrame)) { - ReferenceCountUtil.release(msg); - promise.setFailure(CLOSED_CHANNEL_EXCEPTION); - return; - } - - try { - if (msg instanceof Http2StreamFrame) { - Http2StreamFrame frame = validateStreamFrame((Http2StreamFrame) msg).stream(stream()); - if (!firstFrameWritten && !isStreamIdValid(stream().id())) { - if (!(frame instanceof Http2HeadersFrame)) { - ReferenceCountUtil.release(frame); - promise.setFailure( - new IllegalArgumentException("The first frame must be a headers frame. Was: " - + frame.name())); - return; - } - firstFrameWritten = true; - ChannelFuture future = write0(frame); - if (future.isDone()) { - firstWriteComplete(future, promise); - } else { - future.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - firstWriteComplete(future, promise); - } - }); - } - return; - } - } else { - String msgStr = msg.toString(); - ReferenceCountUtil.release(msg); - promise.setFailure(new IllegalArgumentException( - "Message must be an " + StringUtil.simpleClassName(Http2StreamFrame.class) + - ": " + msgStr)); - return; - } - - ChannelFuture future = write0(msg); - if (future.isDone()) { - writeComplete(future, promise); - } else { - future.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - writeComplete(future, promise); - } - }); - } - } catch (Throwable t) { - promise.tryFailure(t); - } finally { - writeDoneAndNoFlush = true; - } - } - - private void firstWriteComplete(ChannelFuture future, ChannelPromise promise) { - Throwable cause = future.cause(); - if (cause == null) { - // As we just finished our first write which made the stream-id valid we need to re-evaluate - // the writability of the channel. - writabilityChanged(ForkedHttp2MultiplexCodec.this.isWritable(stream)); - promise.setSuccess(); - } else { - promise.setFailure(wrapStreamClosedError(cause)); - // If the first write fails there is not much we can do, just close - closeForcibly(); - } - } - - private void writeComplete(ChannelFuture future, ChannelPromise promise) { - Throwable cause = future.cause(); - if (cause == null) { - promise.setSuccess(); - } else { - Throwable error = wrapStreamClosedError(cause); - promise.setFailure(error); - - if (error instanceof ClosedChannelException) { - if (config.isAutoClose()) { - // Close channel if needed. - closeForcibly(); - } else { - outboundClosed = true; - } - } - } - } - - private Throwable wrapStreamClosedError(Throwable cause) { - // If the error was caused by STREAM_CLOSED we should use a ClosedChannelException to better - // mimic other transports and make it easier to reason about what exceptions to expect. - if (cause instanceof Http2Exception && ((Http2Exception) cause).error() == Http2Error.STREAM_CLOSED) { - return new ClosedChannelException().initCause(cause); - } - return cause; - } - - private Http2StreamFrame validateStreamFrame(Http2StreamFrame frame) { - if (frame.stream() != null && frame.stream() != stream) { - String msgString = frame.toString(); - ReferenceCountUtil.release(frame); - throw new IllegalArgumentException( - "Stream " + frame.stream() + " must not be set on the frame: " + msgString); - } - return frame; - } - - private ChannelFuture write0(Object msg) { - ChannelPromise promise = ctx.newPromise(); - ForkedHttp2MultiplexCodec.this.write(ctx, msg, promise); - return promise; - } - - @Override - public void flush() { - if (!writeDoneAndNoFlush) { - // There is nothing to flush so this is a NOOP. - return; - } - try { - // If we are currently in the channelReadComplete(...) call we should just ignore the flush. - // We will ensure we trigger ctx.flush() after we processed all Channels later on and - // so aggregate the flushes. This is done as ctx.flush() is expensive when as it may trigger an - // write(...) or writev(...) operation on the socket. - if (!inFireChannelReadComplete) { - flush0(ctx); - } - } finally { - writeDoneAndNoFlush = false; - } - } - - @Override - public ChannelPromise voidPromise() { - return unsafeVoidPromise; - } - - @Override - public ChannelOutboundBuffer outboundBuffer() { - // Always return null as we not use the ChannelOutboundBuffer and not even support it. - return null; - } - } - - /** - * {@link ChannelConfig} so that the high and low writebuffer watermarks can reflect the outbound flow control - * window, without having to create a new {@link WriteBufferWaterMark} object whenever the flow control window - * changes. - */ - private final class Http2StreamChannelConfig extends DefaultChannelConfig { - - Http2StreamChannelConfig(Channel channel) { - super(channel); - setRecvByteBufAllocator(new Http2StreamChannelRecvByteBufAllocator()); - } - - @Override - public int getWriteBufferHighWaterMark() { - return min(parent().config().getWriteBufferHighWaterMark(), initialOutboundStreamWindow); - } - - @Override - public int getWriteBufferLowWaterMark() { - return min(parent().config().getWriteBufferLowWaterMark(), initialOutboundStreamWindow); - } - - @Override - public MessageSizeEstimator getMessageSizeEstimator() { - return FlowControlledFrameSizeEstimator.INSTANCE; - } - - @Override - public WriteBufferWaterMark getWriteBufferWaterMark() { - int mark = getWriteBufferHighWaterMark(); - return new WriteBufferWaterMark(mark, mark); - } - - @Override - public ChannelConfig setMessageSizeEstimator(MessageSizeEstimator estimator) { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public ChannelConfig setWriteBufferHighWaterMark(int writeBufferHighWaterMark) { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public ChannelConfig setWriteBufferLowWaterMark(int writeBufferLowWaterMark) { - throw new UnsupportedOperationException(); - } - - @Override - public ChannelConfig setWriteBufferWaterMark(WriteBufferWaterMark writeBufferWaterMark) { - throw new UnsupportedOperationException(); - } - - @Override - public ChannelConfig setRecvByteBufAllocator(RecvByteBufAllocator allocator) { - if (!(allocator.newHandle() instanceof RecvByteBufAllocator.ExtendedHandle)) { - throw new IllegalArgumentException("allocator.newHandle() must return an object of type: " + - RecvByteBufAllocator.ExtendedHandle.class); - } - super.setRecvByteBufAllocator(allocator); - return this; - } - } - } -} - diff --git a/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2MultiplexCodecBuilder.java b/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2MultiplexCodecBuilder.java deleted file mode 100644 index e853f6d7347f..000000000000 --- a/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2MultiplexCodecBuilder.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package io.netty.handler.codec.http2; - -import static io.netty.util.internal.ObjectUtil.checkNotNull; - -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerAdapter; - -public class ForkedHttp2MultiplexCodecBuilder - extends AbstractHttp2ConnectionHandlerBuilder { - - final ChannelHandler childHandler; - - ForkedHttp2MultiplexCodecBuilder(boolean server, ChannelHandler childHandler) { - server(server); - this.childHandler = checkSharable(checkNotNull(childHandler, "childHandler")); - } - - private static ChannelHandler checkSharable(ChannelHandler handler) { - if ((handler instanceof ChannelHandlerAdapter && !((ChannelHandlerAdapter) handler).isSharable()) && - !handler.getClass().isAnnotationPresent(ChannelHandler.Sharable.class)) { - throw new IllegalArgumentException("The handler must be Sharable"); - } - return handler; - } - - /** - * Creates a builder for a HTTP/2 client. - * - * @param childHandler the handler added to channels for remotely-created streams. It must be - * {@link ChannelHandler.Sharable}. - */ - public static ForkedHttp2MultiplexCodecBuilder forClient(ChannelHandler childHandler) { - return new ForkedHttp2MultiplexCodecBuilder(false, childHandler); - } - - /** - * Creates a builder for a HTTP/2 server. - * - * @param childHandler the handler added to channels for remotely-created streams. It must be - * {@link ChannelHandler.Sharable}. - */ - public static ForkedHttp2MultiplexCodecBuilder forServer(ChannelHandler childHandler) { - return new ForkedHttp2MultiplexCodecBuilder(true, childHandler); - } - - @Override - public Http2Settings initialSettings() { - return super.initialSettings(); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder initialSettings(Http2Settings settings) { - return super.initialSettings(settings); - } - - @Override - public long gracefulShutdownTimeoutMillis() { - return super.gracefulShutdownTimeoutMillis(); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder gracefulShutdownTimeoutMillis(long gracefulShutdownTimeoutMillis) { - return super.gracefulShutdownTimeoutMillis(gracefulShutdownTimeoutMillis); - } - - @Override - public boolean isServer() { - return super.isServer(); - } - - @Override - public int maxReservedStreams() { - return super.maxReservedStreams(); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder maxReservedStreams(int maxReservedStreams) { - return super.maxReservedStreams(maxReservedStreams); - } - - @Override - public boolean isValidateHeaders() { - return super.isValidateHeaders(); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder validateHeaders(boolean validateHeaders) { - return super.validateHeaders(validateHeaders); - } - - @Override - public Http2FrameLogger frameLogger() { - return super.frameLogger(); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder frameLogger(Http2FrameLogger frameLogger) { - return super.frameLogger(frameLogger); - } - - @Override - public boolean encoderEnforceMaxConcurrentStreams() { - return super.encoderEnforceMaxConcurrentStreams(); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder encoderEnforceMaxConcurrentStreams(boolean encoderEnforceMaxConcurrentStreams) { - return super.encoderEnforceMaxConcurrentStreams(encoderEnforceMaxConcurrentStreams); - } - - @Override - public Http2HeadersEncoder.SensitivityDetector headerSensitivityDetector() { - return super.headerSensitivityDetector(); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder headerSensitivityDetector( - Http2HeadersEncoder.SensitivityDetector headerSensitivityDetector) { - return super.headerSensitivityDetector(headerSensitivityDetector); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder encoderIgnoreMaxHeaderListSize(boolean ignoreMaxHeaderListSize) { - return super.encoderIgnoreMaxHeaderListSize(ignoreMaxHeaderListSize); - } - - @Override - public ForkedHttp2MultiplexCodecBuilder initialHuffmanDecodeCapacity(int initialHuffmanDecodeCapacity) { - return super.initialHuffmanDecodeCapacity(initialHuffmanDecodeCapacity); - } - - @Override - public ForkedHttp2MultiplexCodec build() { - Http2Connection connection = connection(); - if (connection == null) { - connection = new DefaultHttp2Connection(this.isServer(), this.maxReservedStreams()); - } - return this.buildFromConnection((Http2Connection) connection); - } - - private ForkedHttp2MultiplexCodec buildFromConnection(Http2Connection connection) { - Long maxHeaderListSize = initialSettings().maxHeaderListSize(); - Http2FrameReader reader = new DefaultHttp2FrameReader( - new DefaultHttp2HeadersDecoder(this.isValidateHeaders(), 8192L, 32)); - Http2FrameWriter writer = new DefaultHttp2FrameWriter(this.headerSensitivityDetector()); - if (frameLogger() != null) { - reader = new Http2InboundFrameLogger((Http2FrameReader) reader, frameLogger()); - writer = new Http2OutboundFrameLogger((Http2FrameWriter) writer, frameLogger()); - } - - Http2ConnectionEncoder encoder = new DefaultHttp2ConnectionEncoder(connection, (Http2FrameWriter) writer); - boolean encoderEnforceMaxConcurrentStreams = this.encoderEnforceMaxConcurrentStreams(); - if (encoderEnforceMaxConcurrentStreams) { - if (connection.isServer()) { - ((Http2ConnectionEncoder) encoder).close(); - ((Http2FrameReader) reader).close(); - throw new IllegalArgumentException("encoderEnforceMaxConcurrentStreams: " + encoderEnforceMaxConcurrentStreams + " not supported for server"); - } - - encoder = new StreamBufferingEncoder((Http2ConnectionEncoder) encoder); - } - - Http2ConnectionDecoder decoder = new ForkedDefaultHttp2ConnectionDecoder(connection, (Http2ConnectionEncoder) encoder, (Http2FrameReader) reader); - return this.buildFromCodec(decoder, (Http2ConnectionEncoder) encoder); - } - - private ForkedHttp2MultiplexCodec buildFromCodec(Http2ConnectionDecoder decoder, Http2ConnectionEncoder encoder) { - ForkedHttp2MultiplexCodec handler; - try { - handler = this.build(decoder, encoder, initialSettings()); - } catch (Throwable var5) { - encoder.close(); - decoder.close(); - throw new IllegalStateException("failed to build a Http2ConnectionHandler", var5); - } - - handler.gracefulShutdownTimeoutMillis(gracefulShutdownTimeoutMillis()); - if (handler.decoder().frameListener() == null) { - handler.decoder().frameListener(frameListener()); - } - - return handler; - } - - @Override - protected ForkedHttp2MultiplexCodec build( - Http2ConnectionDecoder decoder, Http2ConnectionEncoder encoder, Http2Settings initialSettings) { - return new ForkedHttp2MultiplexCodec(encoder, decoder, initialSettings, childHandler); - } -} diff --git a/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2StreamChannelBootstrap.java b/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2StreamChannelBootstrap.java deleted file mode 100644 index 746aede9fb69..000000000000 --- a/http-clients/netty-nio-client/src/main/java/io/netty/handler/codec/http2/ForkedHttp2StreamChannelBootstrap.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright 2017 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http2; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPipeline; -import io.netty.util.AttributeKey; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.Promise; -import io.netty.util.internal.ObjectUtil; -import io.netty.util.internal.StringUtil; -import io.netty.util.internal.UnstableApi; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; -import java.nio.channels.ClosedChannelException; -import java.util.LinkedHashMap; -import java.util.Map; - -@UnstableApi -public final class ForkedHttp2StreamChannelBootstrap { - private static final InternalLogger logger = InternalLoggerFactory.getInstance(ForkedHttp2StreamChannelBootstrap.class); - - private final Map, Object> options = new LinkedHashMap, Object>(); - private final Map, Object> attrs = new LinkedHashMap, Object>(); - private final Channel channel; - private volatile ChannelHandler handler; - - public ForkedHttp2StreamChannelBootstrap(Channel channel) { - this.channel = ObjectUtil.checkNotNull(channel, "channel"); - } - - /** - * Allow to specify a {@link ChannelOption} which is used for the {@link Http2StreamChannel} instances once they got - * created. Use a value of {@code null} to remove a previous set {@link ChannelOption}. - */ - @SuppressWarnings("unchecked") - public ForkedHttp2StreamChannelBootstrap option(ChannelOption option, T value) { - if (option == null) { - throw new NullPointerException("option"); - } - if (value == null) { - synchronized (options) { - options.remove(option); - } - } else { - synchronized (options) { - options.put(option, value); - } - } - return this; - } - - /** - * Allow to specify an initial attribute of the newly created {@link Http2StreamChannel}. If the {@code value} is - * {@code null}, the attribute of the specified {@code key} is removed. - */ - @SuppressWarnings("unchecked") - public ForkedHttp2StreamChannelBootstrap attr(AttributeKey key, T value) { - if (key == null) { - throw new NullPointerException("key"); - } - if (value == null) { - synchronized (attrs) { - attrs.remove(key); - } - } else { - synchronized (attrs) { - attrs.put(key, value); - } - } - return this; - } - - /** - * the {@link ChannelHandler} to use for serving the requests. - */ - @SuppressWarnings("unchecked") - public ForkedHttp2StreamChannelBootstrap handler(ChannelHandler handler) { - this.handler = ObjectUtil.checkNotNull(handler, "handler"); - return this; - } - - public Future open() { - return open(channel.eventLoop().newPromise()); - } - - public Future open(final Promise promise) { - ChannelHandlerContext ctx = channel.pipeline().context(ForkedHttp2MultiplexCodec.class); - if (ctx == null) { - if (channel.isActive()) { - promise.setFailure(new IllegalStateException(StringUtil.simpleClassName(ForkedHttp2MultiplexCodec.class) + - " must be in the ChannelPipeline of Channel " + channel)); - } else { - promise.setFailure(new ClosedChannelException()); - } - } else { - EventExecutor executor = ctx.executor(); - if (executor.inEventLoop()) { - open0(ctx, promise); - } else { - executor.execute(new Runnable() { - @Override - public void run() { - open0(ctx, promise); - } - }); - } - } - return promise; - } - - public void open0(ChannelHandlerContext ctx, final Promise promise) { - assert ctx.executor().inEventLoop(); - Http2StreamChannel streamChannel = ((ForkedHttp2MultiplexCodec) ctx.handler()).newOutboundStream(); - try { - init(streamChannel); - } catch (Exception e) { - streamChannel.unsafe().closeForcibly(); - promise.setFailure(e); - return; - } - - ChannelFuture future = ctx.channel().eventLoop().register(streamChannel); - future.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (future.isSuccess()) { - promise.setSuccess(streamChannel); - } else if (future.isCancelled()) { - promise.cancel(false); - } else { - if (streamChannel.isRegistered()) { - streamChannel.close(); - } else { - streamChannel.unsafe().closeForcibly(); - } - - promise.setFailure(future.cause()); - } - } - }); - } - - @SuppressWarnings("unchecked") - private void init(Channel channel) throws Exception { - ChannelPipeline p = channel.pipeline(); - ChannelHandler handler = this.handler; - if (handler != null) { - p.addLast(handler); - } - synchronized (options) { - setChannelOptions(channel, options, logger); - } - - synchronized (attrs) { - for (Map.Entry, Object> e: attrs.entrySet()) { - channel.attr((AttributeKey) e.getKey()).set(e.getValue()); - } - } - } - - private static void setChannelOptions( - Channel channel, Map, Object> options, InternalLogger logger) { - for (Map.Entry, Object> e: options.entrySet()) { - setChannelOption(channel, e.getKey(), e.getValue(), logger); - } - } - - @SuppressWarnings("unchecked") - private static void setChannelOption( - Channel channel, ChannelOption option, Object value, InternalLogger logger) { - try { - if (!channel.config().setOption((ChannelOption) option, value)) { - logger.warn("Unknown channel option '{}' for channel '{}'", option, channel); - } - } catch (Throwable t) { - logger.warn( - "Failed to set channel option '{}' with value '{}' for channel '{}'", option, value, channel, t); - } - } -} - diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/Http2Configuration.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/Http2Configuration.java new file mode 100644 index 000000000000..93037270e310 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/Http2Configuration.java @@ -0,0 +1,181 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Configuration specific to HTTP/2 connections. + */ +@SdkPublicApi +public final class Http2Configuration implements ToCopyableBuilder { + private final Long maxStreams; + private final Integer initialWindowSize; + private final Duration healthCheckPingPeriod; + + private Http2Configuration(DefaultBuilder builder) { + this.maxStreams = builder.maxStreams; + this.initialWindowSize = builder.initialWindowSize; + this.healthCheckPingPeriod = builder.healthCheckPingPeriod; + } + + /** + * @return The maximum number of streams to be created per HTTP/2 connection. + */ + public Long maxStreams() { + return maxStreams; + } + + /** + * @return The initial window size for an HTTP/2 stream. + */ + public Integer initialWindowSize() { + return initialWindowSize; + } + + /** + * @return The health check period for an HTTP/2 connection. + */ + public Duration healthCheckPingPeriod() { + return healthCheckPingPeriod; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + Http2Configuration that = (Http2Configuration) o; + + if (maxStreams != null ? !maxStreams.equals(that.maxStreams) : that.maxStreams != null) { + return false; + } + + return initialWindowSize != null ? initialWindowSize.equals(that.initialWindowSize) : that.initialWindowSize == null; + + } + + @Override + public int hashCode() { + int result = maxStreams != null ? maxStreams.hashCode() : 0; + result = 31 * result + (initialWindowSize != null ? initialWindowSize.hashCode() : 0); + return result; + } + + public static Builder builder() { + return new DefaultBuilder(); + } + + public interface Builder extends CopyableBuilder { + + /** + * Sets the max number of concurrent streams per connection. + * + *

    Note that this cannot exceed the value of the MAX_CONCURRENT_STREAMS setting returned by the service. If it + * does the service setting is used instead.

    + * + * @param maxStreams Max concurrent HTTP/2 streams per connection. + * @return This builder for method chaining. + */ + Builder maxStreams(Long maxStreams); + + /** + * Sets initial window size of a stream. This setting is only respected when the HTTP/2 protocol is used. + * + * See https://tools.ietf.org/html/rfc7540#section-6.5.2 + * for more information about this parameter. + * + * @param initialWindowSize The initial window size of a stream. + * @return This builder for method chaining. + */ + Builder initialWindowSize(Integer initialWindowSize); + + /** + * Sets the period that the Netty client will send {@code PING} frames to the remote endpoint to check the + * health of the connection. The default value is {@link + * software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration#HTTP2_CONNECTION_PING_TIMEOUT_SECONDS}. To + * disable this feature, set a duration of 0. + * + * @param healthCheckPingPeriod The ping period. + * @return This builder for method chaining. + */ + Builder healthCheckPingPeriod(Duration healthCheckPingPeriod); + } + + private static final class DefaultBuilder implements Builder { + private Long maxStreams; + private Integer initialWindowSize; + private Duration healthCheckPingPeriod; + + private DefaultBuilder() { + } + + private DefaultBuilder(Http2Configuration http2Configuration) { + this.maxStreams = http2Configuration.maxStreams; + this.initialWindowSize = http2Configuration.initialWindowSize; + this.healthCheckPingPeriod = http2Configuration.healthCheckPingPeriod; + } + + @Override + public Builder maxStreams(Long maxStreams) { + this.maxStreams = Validate.isPositiveOrNull(maxStreams, "maxStreams"); + return this; + } + + public void setMaxStreams(Long maxStreams) { + maxStreams(maxStreams); + } + + @Override + public Builder initialWindowSize(Integer initialWindowSize) { + this.initialWindowSize = Validate.isPositiveOrNull(initialWindowSize, "initialWindowSize"); + return this; + } + + public void setInitialWindowSize(Integer initialWindowSize) { + initialWindowSize(initialWindowSize); + } + + @Override + public Builder healthCheckPingPeriod(Duration healthCheckPingPeriod) { + this.healthCheckPingPeriod = healthCheckPingPeriod; + return this; + } + + public void setHealthCheckPingPeriod(Duration healthCheckPingPeriod) { + healthCheckPingPeriod(healthCheckPingPeriod); + } + + @Override + public Http2Configuration build() { + return new Http2Configuration(this); + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java index 3e0c564f47ce..a2068c882cba 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,15 +15,7 @@ package software.amazon.awssdk.http.nio.netty; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.CONNECTION_ACQUIRE_TIMEOUT; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.CONNECTION_MAX_IDLE_TIMEOUT; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.CONNECTION_TIMEOUT; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.CONNECTION_TIME_TO_LIVE; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.MAX_CONNECTIONS; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.MAX_PENDING_CONNECTION_ACQUIRES; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.READ_TIMEOUT; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.REAP_IDLE_CONNECTIONS; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.WRITE_TIMEOUT; +import static software.amazon.awssdk.http.HttpMetric.HTTP_CLIENT_NAME; import static software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration.EVENTLOOP_SHUTDOWN_FUTURE_TIMEOUT_SECONDS; import static software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration.EVENTLOOP_SHUTDOWN_QUIET_PERIOD_SECONDS; import static software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration.EVENTLOOP_SHUTDOWN_TIMEOUT_SECONDS; @@ -32,7 +24,6 @@ import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslProvider; import java.net.URI; @@ -41,6 +32,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.annotations.SdkPublicApi; @@ -48,6 +40,9 @@ import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SystemPropertyTlsKeyManagersProvider; +import software.amazon.awssdk.http.TlsKeyManagersProvider; +import software.amazon.awssdk.http.TlsTrustManagersProvider; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.internal.AwaitCloseChannelPoolMap; @@ -56,6 +51,7 @@ import software.amazon.awssdk.http.nio.netty.internal.NonManagedEventLoopGroup; import software.amazon.awssdk.http.nio.netty.internal.RequestContext; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelOptions; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPoolMap; import software.amazon.awssdk.http.nio.netty.internal.SharedSdkEventLoopGroup; import software.amazon.awssdk.utils.AttributeMap; @@ -74,29 +70,45 @@ public final class NettyNioAsyncHttpClient implements SdkAsyncHttpClient { private static final Logger log = LoggerFactory.getLogger(NettyNioAsyncHttpClient.class); private static final long MAX_STREAMS_ALLOWED = 4294967295L; // unsigned 32-bit, 2^32 -1 + private static final int DEFAULT_INITIAL_WINDOW_SIZE = 1_048_576; // 1MiB + + // Override connection idle timeout for Netty http client to reduce the frequency of "server failed to complete the + // response error". see https://github.com/aws/aws-sdk-java-v2/issues/1122 + private static final AttributeMap NETTY_HTTP_DEFAULTS = + AttributeMap.builder() + .put(SdkHttpConfigurationOption.CONNECTION_MAX_IDLE_TIMEOUT, Duration.ofSeconds(5)) + .build(); private final SdkEventLoopGroup sdkEventLoopGroup; - private final SdkChannelPoolMap pools; + private final SdkChannelPoolMap pools; private final NettyConfiguration configuration; private NettyNioAsyncHttpClient(DefaultBuilder builder, AttributeMap serviceDefaultsMap) { this.configuration = new NettyConfiguration(serviceDefaultsMap); Protocol protocol = serviceDefaultsMap.get(SdkHttpConfigurationOption.PROTOCOL); - long maxStreams = builder.maxHttp2Streams == null ? MAX_STREAMS_ALLOWED : builder.maxHttp2Streams; this.sdkEventLoopGroup = eventLoopGroup(builder); + + Http2Configuration http2Configuration = builder.http2Configuration; + + long maxStreams = resolveMaxHttp2Streams(builder.maxHttp2Streams, http2Configuration); + int initialWindowSize = resolveInitialWindowSize(http2Configuration); + this.pools = AwaitCloseChannelPoolMap.builder() .sdkChannelOptions(builder.sdkChannelOptions) .configuration(configuration) .protocol(protocol) .maxStreams(maxStreams) + .initialWindowSize(initialWindowSize) + .healthCheckPingPeriod(resolveHealthCheckPingPeriod(http2Configuration)) .sdkEventLoopGroup(sdkEventLoopGroup) .sslProvider(resolveSslProvider(builder)) + .proxyConfiguration(builder.proxyConfiguration) .build(); } @SdkTestInternalApi NettyNioAsyncHttpClient(SdkEventLoopGroup sdkEventLoopGroup, - SdkChannelPoolMap pools, + SdkChannelPoolMap pools, NettyConfiguration configuration) { this.sdkEventLoopGroup = sdkEventLoopGroup; this.pools = pools; @@ -106,6 +118,7 @@ private NettyNioAsyncHttpClient(DefaultBuilder builder, AttributeMap serviceDefa @Override public CompletableFuture execute(AsyncExecuteRequest request) { RequestContext ctx = createRequestContext(request); + ctx.metricCollector().reportMetric(HTTP_CLIENT_NAME, clientName()); // TODO: Can't this be done in core? return new NettyRequestExecutor(ctx).execute(); } @@ -113,8 +126,17 @@ public static Builder builder() { return new DefaultBuilder(); } + /** + * Create a {@link NettyNioAsyncHttpClient} with the default properties + * + * @return an {@link NettyNioAsyncHttpClient} + */ + public static SdkAsyncHttpClient create() { + return new DefaultBuilder().build(); + } + private RequestContext createRequestContext(AsyncExecuteRequest request) { - ChannelPool pool = pools.get(poolKey(request.request())); + SdkChannelPool pool = pools.get(poolKey(request.request())); return new RequestContext(pool, sdkEventLoopGroup.eventLoopGroup(), request, configuration); } @@ -139,6 +161,32 @@ private SslProvider resolveSslProvider(DefaultBuilder builder) { return SslContext.defaultClientProvider(); } + private long resolveMaxHttp2Streams(Integer topLevelValue, Http2Configuration http2Configuration) { + if (topLevelValue != null) { + return topLevelValue; + } + + if (http2Configuration == null || http2Configuration.maxStreams() == null) { + return MAX_STREAMS_ALLOWED; + } + + return Math.min(http2Configuration.maxStreams(), MAX_STREAMS_ALLOWED); + } + + private int resolveInitialWindowSize(Http2Configuration http2Configuration) { + if (http2Configuration == null || http2Configuration.initialWindowSize() == null) { + return DEFAULT_INITIAL_WINDOW_SIZE; + } + return http2Configuration.initialWindowSize(); + } + + private Duration resolveHealthCheckPingPeriod(Http2Configuration http2Configuration) { + if (http2Configuration != null) { + return http2Configuration.healthCheckPingPeriod(); + } + return null; + } + private SdkEventLoopGroup nonManagedEventLoopGroup(SdkEventLoopGroup eventLoopGroup) { return SdkEventLoopGroup.create(new NonManagedEventLoopGroup(eventLoopGroup.eventLoopGroup()), eventLoopGroup.channelFactory()); @@ -171,6 +219,11 @@ public String clientName() { return CLIENT_NAME; } + @SdkTestInternalApi + NettyConfiguration configuration() { + return configuration; + } + /** * Builder that allows configuration of the Netty NIO HTTP implementation. Use {@link #builder()} to configure and construct * a Netty HTTP client. @@ -328,6 +381,9 @@ public interface Builder extends SdkAsyncHttpClient.Builder + * If no provider is configured, the client will default to {@link SystemPropertyTlsKeyManagersProvider}. To + * disable any automatic resolution via the system properties, use {@link TlsKeyManagersProvider#noneProvider()}. + * + * @param keyManagersProvider The {@code TlsKeyManagersProvider}. + * @return The builder for method chaining. + */ + Builder tlsKeyManagersProvider(TlsKeyManagersProvider keyManagersProvider); + + /** + * Configure the {@link TlsTrustManagersProvider} that will provide the {@link javax.net.ssl.TrustManager}s to use + * when constructing the SSL context. + * + * @param trustManagersProvider The {@code TlsKeyManagersProvider}. + * @return The builder for method chaining. + */ + Builder tlsTrustManagersProvider(TlsTrustManagersProvider trustManagersProvider); + + /** + * Set the HTTP/2 specific configuration for this client. + *

    + * Note:If {@link #maxHttp2Streams(Integer)} and {@link Http2Configuration#maxStreams()} are both set, + * the value set using {@link #maxHttp2Streams(Integer)} takes precedence. + * + * @param http2Configuration The HTTP/2 configuration object. + * @return the builder for method chaining. + */ + Builder http2Configuration(Http2Configuration http2Configuration); + + /** + * Set the HTTP/2 specific configuration for this client. + *

    + * Note:If {@link #maxHttp2Streams(Integer)} and {@link Http2Configuration#maxStreams()} are both set, + * the value set using {@link #maxHttp2Streams(Integer)} takes precedence. + * + * @param http2ConfigurationBuilderConsumer The consumer of the HTTP/2 configuration builder object. + * @return the builder for method chaining. + */ + Builder http2Configuration(Consumer http2ConfigurationBuilderConsumer); } /** @@ -350,7 +459,6 @@ public interface Builder extends SdkAsyncHttpClient.Builder http2ConfigurationBuilderConsumer) { + Http2Configuration.Builder builder = Http2Configuration.builder(); + http2ConfigurationBuilderConsumer.accept(builder); + return http2Configuration(builder.build()); + } + + public void setHttp2Configuration(Http2Configuration http2Configuration) { + http2Configuration(http2Configuration); + } + @Override public SdkAsyncHttpClient buildWithDefaults(AttributeMap serviceDefaults) { return new NettyNioAsyncHttpClient(this, standardOptions.build() .merge(serviceDefaults) + .merge(NETTY_HTTP_DEFAULTS) .merge(SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS)); } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettySdkAsyncHttpService.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettySdkAsyncHttpService.java index 82cb745f7c23..3824b34ed024 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettySdkAsyncHttpService.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettySdkAsyncHttpService.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/ProxyConfiguration.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/ProxyConfiguration.java new file mode 100644 index 000000000000..5ee2ac1cf977 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/ProxyConfiguration.java @@ -0,0 +1,207 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Proxy configuration for {@link NettyNioAsyncHttpClient}. This class is used to configure an HTTP proxy to be used by + * the {@link NettyNioAsyncHttpClient}. + * + * @see NettyNioAsyncHttpClient.Builder#proxyConfiguration(ProxyConfiguration) + */ +@SdkPublicApi +public final class ProxyConfiguration implements ToCopyableBuilder { + private final String scheme; + private final String host; + private final int port; + private final Set nonProxyHosts; + + private ProxyConfiguration(BuilderImpl builder) { + this.scheme = builder.scheme; + this.host = builder.host; + this.port = builder.port; + this.nonProxyHosts = Collections.unmodifiableSet(builder.nonProxyHosts); + } + + /** + * @return The proxy scheme. + */ + public String scheme() { + return scheme; + } + + /** + * @return The proxy host. + */ + public String host() { + return host; + } + + /** + * @return The proxy port. + */ + public int port() { + return port; + } + + /** + * @return The set of hosts that should not be proxied. + */ + public Set nonProxyHosts() { + return nonProxyHosts; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + ProxyConfiguration that = (ProxyConfiguration) o; + + if (port != that.port) { + return false; + } + + if (scheme != null ? !scheme.equals(that.scheme) : that.scheme != null) { + return false; + } + + if (host != null ? !host.equals(that.host) : that.host != null) { + return false; + } + + return nonProxyHosts.equals(that.nonProxyHosts); + + } + + @Override + public int hashCode() { + int result = scheme != null ? scheme.hashCode() : 0; + result = 31 * result + (host != null ? host.hashCode() : 0); + result = 31 * result + port; + result = 31 * result + nonProxyHosts.hashCode(); + return result; + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + /** + * Builder for {@link ProxyConfiguration}. + */ + public interface Builder extends CopyableBuilder { + + /** + * Set the hostname of the proxy. + * @param host The proxy host. + * @return This object for method chaining. + */ + Builder host(String host); + + /** + * Set the port that the proxy expects connections on. + * @param port The proxy port. + * @return This object for method chaining. + */ + Builder port(int port); + + /** + * The HTTP scheme to use for connecting to the proxy. Valid values are {@code http} and {@code https}. + *

    + * The client defaults to {@code http} if none is given. + * + * @param scheme The proxy scheme. + * @return This object for method chaining. + */ + Builder scheme(String scheme); + + /** + * Set the set of hosts that should not be proxied. Any request whose host portion matches any of the patterns + * given in the set will be sent to the remote host directly instead of through the proxy. + * + * @param nonProxyHosts The set of hosts that should not be proxied. + * @return This object for method chaining. + */ + Builder nonProxyHosts(Set nonProxyHosts); + } + + private static final class BuilderImpl implements Builder { + private String scheme; + private String host; + private int port; + private Set nonProxyHosts = Collections.emptySet(); + + private BuilderImpl() { + } + + private BuilderImpl(ProxyConfiguration proxyConfiguration) { + this.scheme = proxyConfiguration.scheme; + this.host = proxyConfiguration.host; + this.port = proxyConfiguration.port; + this.nonProxyHosts = new HashSet<>(proxyConfiguration.nonProxyHosts); + } + + @Override + public Builder scheme(String scheme) { + this.scheme = scheme; + return this; + } + + @Override + public Builder host(String host) { + this.host = host; + return this; + } + + @Override + public Builder port(int port) { + this.port = port; + return this; + } + + @Override + public Builder nonProxyHosts(Set nonProxyHosts) { + if (nonProxyHosts != null) { + this.nonProxyHosts = new HashSet<>(nonProxyHosts); + } else { + this.nonProxyHosts = Collections.emptySet(); + } + return this; + } + + @Override + public ProxyConfiguration build() { + return new ProxyConfiguration(this); + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java index df6bc1fa6db2..870853c4de69 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java index 9e8232729228..9cadab45bef4 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -19,28 +19,26 @@ import io.netty.bootstrap.Bootstrap; import io.netty.channel.Channel; -import io.netty.channel.ChannelOption; import io.netty.channel.pool.ChannelPool; -import io.netty.handler.codec.http2.Http2SecurityUtil; +import io.netty.channel.pool.ChannelPoolHandler; import io.netty.handler.ssl.SslContext; -import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.SslProvider; -import io.netty.handler.ssl.SupportedCipherSuiteFilter; -import io.netty.handler.ssl.util.InsecureTrustManagerFactory; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.Promise; import java.net.URI; +import java.net.URISyntaxException; +import java.time.Duration; import java.util.Collection; +import java.util.Map; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; -import javax.net.ssl.SSLException; -import javax.net.ssl.TrustManagerFactory; +import java.util.function.Function; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; import software.amazon.awssdk.http.nio.netty.internal.http2.HttpOrHttp2ChannelPool; import software.amazon.awssdk.utils.Logger; @@ -49,25 +47,67 @@ * Implementation of {@link SdkChannelPoolMap} that awaits channel pools to be closed upon closing. */ @SdkInternalApi -public final class AwaitCloseChannelPoolMap extends SdkChannelPoolMap { +public final class AwaitCloseChannelPoolMap extends SdkChannelPoolMap { private static final Logger log = Logger.loggerFor(AwaitCloseChannelPoolMap.class); - private final SdkChannelOptions sdkChannelOptions; - private final SdkEventLoopGroup sdkEventLoopGroup; + private static final ChannelPoolHandler NOOP_HANDLER = new ChannelPoolHandler() { + @Override + public void channelReleased(Channel ch) throws Exception { + } + + @Override + public void channelAcquired(Channel ch) throws Exception { + } + + @Override + public void channelCreated(Channel ch) throws Exception { + } + }; + + // IMPORTANT: If the default bootstrap provider is changed, ensure that the new implementation is compliant with + // DNS resolver testing in BootstrapProviderTest, specifically that no caching of hostname lookups is taking place. + private static final Function DEFAULT_BOOTSTRAP_PROVIDER = + b -> new BootstrapProvider(b.sdkEventLoopGroup, b.configuration, b.sdkChannelOptions); + + private final Map shouldProxyForHostCache = new ConcurrentHashMap<>(); + + private final NettyConfiguration configuration; private final Protocol protocol; private final long maxStreams; + private final Duration healthCheckPingPeriod; + private final int initialWindowSize; private final SslProvider sslProvider; + private final ProxyConfiguration proxyConfiguration; + private final BootstrapProvider bootstrapProvider; + private final SslContextProvider sslContextProvider; - private AwaitCloseChannelPoolMap(Builder builder) { - this.sdkChannelOptions = builder.sdkChannelOptions; - this.sdkEventLoopGroup = builder.sdkEventLoopGroup; + private AwaitCloseChannelPoolMap(Builder builder, Function createBootStrapProvider) { this.configuration = builder.configuration; this.protocol = builder.protocol; this.maxStreams = builder.maxStreams; + this.healthCheckPingPeriod = builder.healthCheckPingPeriod; + this.initialWindowSize = builder.initialWindowSize; this.sslProvider = builder.sslProvider; + this.proxyConfiguration = builder.proxyConfiguration; + this.bootstrapProvider = createBootStrapProvider.apply(builder); + this.sslContextProvider = new SslContextProvider(configuration, protocol, sslProvider); + } + + private AwaitCloseChannelPoolMap(Builder builder) { + this(builder, DEFAULT_BOOTSTRAP_PROVIDER); + } + + @SdkTestInternalApi + AwaitCloseChannelPoolMap(Builder builder, + Map shouldProxyForHostCache, + BootstrapProvider bootstrapProvider) { + this(builder, bootstrapProvider == null ? DEFAULT_BOOTSTRAP_PROVIDER : b -> bootstrapProvider); + + if (shouldProxyForHostCache != null) { + this.shouldProxyForHostCache.putAll(shouldProxyForHostCache); + } } public static Builder builder() { @@ -76,24 +116,37 @@ public static Builder builder() { @Override protected SimpleChannelPoolAwareChannelPool newPool(URI key) { - SslContext sslContext = sslContext(key.getScheme()); - Bootstrap bootstrap = - new Bootstrap() - .group(sdkEventLoopGroup.eventLoopGroup()) - .channelFactory(sdkEventLoopGroup.channelFactory()) - .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, configuration.connectTimeoutMillis()) - // TODO run some performance tests with and without this. - .remoteAddress(key.getHost(), key.getPort()); - sdkChannelOptions.channelOptions().forEach(bootstrap::option); + SslContext sslContext = needSslContext(key) ? sslContextProvider.sslContext() : null; + + Bootstrap bootstrap = createBootstrap(key); AtomicReference channelPoolRef = new AtomicReference<>(); - ChannelPipelineInitializer handler = - new ChannelPipelineInitializer(protocol, sslContext, maxStreams, channelPoolRef, configuration, key); - BetterSimpleChannelPool simpleChannelPool = new BetterSimpleChannelPool(bootstrap, handler); + ChannelPipelineInitializer pipelineInitializer = new ChannelPipelineInitializer(protocol, + sslContext, + sslProvider, + maxStreams, + initialWindowSize, + healthCheckPingPeriod, + channelPoolRef, + configuration, + key); + + BetterSimpleChannelPool tcpChannelPool; + ChannelPool baseChannelPool; + if (shouldUseProxyForHost(key)) { + tcpChannelPool = new BetterSimpleChannelPool(bootstrap, NOOP_HANDLER); + baseChannelPool = new Http1TunnelConnectionPool(bootstrap.config().group().next(), tcpChannelPool, + sslContext, proxyAddress(key), key, pipelineInitializer); + } else { + tcpChannelPool = new BetterSimpleChannelPool(bootstrap, pipelineInitializer); + baseChannelPool = tcpChannelPool; + } + + SdkChannelPool wrappedPool = wrapBaseChannelPool(bootstrap, baseChannelPool); - channelPoolRef.set(wrapSimpleChannelPool(bootstrap, simpleChannelPool)); - return new SimpleChannelPoolAwareChannelPool(simpleChannelPool, channelPoolRef.get()); + channelPoolRef.set(wrappedPool); + return new SimpleChannelPoolAwareChannelPool(wrappedPool, tcpChannelPool); } @Override @@ -110,7 +163,7 @@ public void close() { try { CompletableFuture.allOf(channelPools.stream() - .map(pool -> pool.underlyingSimpleChannelPool.closeFuture()) + .map(pool -> pool.underlyingSimpleChannelPool().closeFuture()) .toArray(CompletableFuture[]::new)) .get(CHANNEL_POOL_CLOSE_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (InterruptedException e) { @@ -121,93 +174,90 @@ public void close() { } } - private ChannelPool wrapSimpleChannelPool(Bootstrap bootstrap, ChannelPool channelPool) { - - // Wrap the channel pool such that the ChannelAttributeKey.CLOSE_ON_RELEASE flag is honored. - channelPool = new HonorCloseOnReleaseChannelPool(channelPool); - - // Wrap the channel pool such that HTTP 2 channels won't be released to the underlying pool while they're still in use. - channelPool = new HttpOrHttp2ChannelPool(channelPool, - bootstrap.config().group(), - configuration.maxConnections(), - configuration); + private Bootstrap createBootstrap(URI poolKey) { + String host = bootstrapHost(poolKey); + int port = bootstrapPort(poolKey); + return bootstrapProvider.createBootstrap(host, port); + } - // Wrap the channel pool such that we remove request-specific handlers with each request. - channelPool = new HandlerRemovingChannelPool(channelPool); + private boolean shouldUseProxyForHost(URI remoteAddr) { + if (proxyConfiguration == null) { + return false; + } - // Wrap the channel pool such that an individual channel can only be released to the underlying pool once. - channelPool = new ReleaseOnceChannelPool(channelPool); - // Wrap the channel pool to guarantee all channels checked out are healthy, and all unhealthy channels checked in are - // closed. - channelPool = new HealthCheckedChannelPool(bootstrap.config().group(), configuration, channelPool); + return shouldProxyForHostCache.computeIfAbsent(remoteAddr, (uri) -> + proxyConfiguration.nonProxyHosts().stream().noneMatch(h -> uri.getHost().matches(h)) + ); + } - // Wrap the channel pool such that if the Promise given to acquire(Promise) is done when the channel is acquired - // from the underlying pool, the channel is closed and released. - channelPool = new CancellableAcquireChannelPool(bootstrap.config().group().next(), channelPool); + private String bootstrapHost(URI remoteHost) { + if (shouldUseProxyForHost(remoteHost)) { + return proxyConfiguration.host(); + } + return remoteHost.getHost(); + } - return channelPool; + private int bootstrapPort(URI remoteHost) { + if (shouldUseProxyForHost(remoteHost)) { + return proxyConfiguration.port(); + } + return remoteHost.getPort(); } - private SslContext sslContext(String protocol) { - if (!protocol.equalsIgnoreCase("https")) { + private URI proxyAddress(URI remoteHost) { + if (!shouldUseProxyForHost(remoteHost)) { return null; } + + String scheme = proxyConfiguration.scheme(); + if (scheme == null) { + scheme = "http"; + } + try { - return SslContextBuilder.forClient() - .sslProvider(sslProvider) - .ciphers(Http2SecurityUtil.CIPHERS, SupportedCipherSuiteFilter.INSTANCE) - .trustManager(getTrustManager()) - .build(); - } catch (SSLException e) { - throw new RuntimeException(e); + return new URI(scheme, null, proxyConfiguration.host(), proxyConfiguration.port(), null, null, + null); + } catch (URISyntaxException e) { + throw new RuntimeException("Unable to construct proxy URI", e); } } - private TrustManagerFactory getTrustManager() { - return configuration.trustAllCertificates() ? InsecureTrustManagerFactory.INSTANCE : null; - } + private SdkChannelPool wrapBaseChannelPool(Bootstrap bootstrap, ChannelPool channelPool) { - static final class SimpleChannelPoolAwareChannelPool implements ChannelPool { - private final BetterSimpleChannelPool underlyingSimpleChannelPool; - private final ChannelPool actualChannelPool; + // Wrap the channel pool such that the ChannelAttributeKey.CLOSE_ON_RELEASE flag is honored. + channelPool = new HonorCloseOnReleaseChannelPool(channelPool); - private SimpleChannelPoolAwareChannelPool(BetterSimpleChannelPool underlyingSimpleChannelPool, - ChannelPool actualChannelPool) { - this.underlyingSimpleChannelPool = underlyingSimpleChannelPool; - this.actualChannelPool = actualChannelPool; - } + // Wrap the channel pool such that HTTP 2 channels won't be released to the underlying pool while they're still in use. + SdkChannelPool sdkChannelPool = new HttpOrHttp2ChannelPool(channelPool, + bootstrap.config().group(), + configuration.maxConnections(), + configuration); - @Override - public Future acquire() { - return actualChannelPool.acquire(); - } + // Wrap the channel pool such that we remove request-specific handlers with each request. + sdkChannelPool = new HandlerRemovingChannelPool(sdkChannelPool); - @Override - public Future acquire(Promise promise) { - return actualChannelPool.acquire(promise); - } + // Wrap the channel pool such that an individual channel can only be released to the underlying pool once. + sdkChannelPool = new ReleaseOnceChannelPool(sdkChannelPool); - @Override - public Future release(Channel channel) { - return actualChannelPool.release(channel); - } + // Wrap the channel pool to guarantee all channels checked out are healthy, and all unhealthy channels checked in are + // closed. + sdkChannelPool = new HealthCheckedChannelPool(bootstrap.config().group(), configuration, sdkChannelPool); - @Override - public Future release(Channel channel, Promise promise) { - return actualChannelPool.release(channel, promise); - } + // Wrap the channel pool such that if the Promise given to acquire(Promise) is done when the channel is acquired + // from the underlying pool, the channel is closed and released. + sdkChannelPool = new CancellableAcquireChannelPool(bootstrap.config().group().next(), sdkChannelPool); - @Override - public void close() { - actualChannelPool.close(); - } + return sdkChannelPool; + } - @SdkTestInternalApi - BetterSimpleChannelPool underlyingSimpleChannelPool() { - return underlyingSimpleChannelPool; - } + private boolean needSslContext(URI targetAddress) { + URI proxyAddress = proxyAddress(targetAddress); + boolean needContext = targetAddress.getScheme().equalsIgnoreCase("https") + || proxyAddress != null && proxyAddress.getScheme().equalsIgnoreCase("https"); + + return needContext; } public static class Builder { @@ -217,7 +267,10 @@ public static class Builder { private NettyConfiguration configuration; private Protocol protocol; private long maxStreams; + private int initialWindowSize; + private Duration healthCheckPingPeriod; private SslProvider sslProvider; + private ProxyConfiguration proxyConfiguration; private Builder() { } @@ -247,11 +300,26 @@ public Builder maxStreams(long maxStreams) { return this; } + public Builder initialWindowSize(int initialWindowSize) { + this.initialWindowSize = initialWindowSize; + return this; + } + + public Builder healthCheckPingPeriod(Duration healthCheckPingPeriod) { + this.healthCheckPingPeriod = healthCheckPingPeriod; + return this; + } + public Builder sslProvider(SslProvider sslProvider) { this.sslProvider = sslProvider; return this; } + public Builder proxyConfiguration(ProxyConfiguration proxyConfiguration) { + this.proxyConfiguration = proxyConfiguration; + return this; + } + public AwaitCloseChannelPoolMap build() { return new AwaitCloseChannelPoolMap(this); } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/BetterSimpleChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/BetterSimpleChannelPool.java index 2770b3c1de67..3c1ae77d99ca 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/BetterSimpleChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/BetterSimpleChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProvider.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProvider.java new file mode 100644 index 000000000000..03b18ccd1acb --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProvider.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.ChannelOption; +import java.net.InetSocketAddress; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; + +/** + * The primary purpose of this Bootstrap provider is to ensure that all Bootstraps created by it are 'unresolved' + * InetSocketAddress. This is to prevent Netty from caching the resolved address of a host and then re-using it in + * subsequent connection attempts, and instead deferring to the JVM to handle address resolution and caching. + */ +@SdkInternalApi +public class BootstrapProvider { + private final SdkEventLoopGroup sdkEventLoopGroup; + private final NettyConfiguration nettyConfiguration; + private final SdkChannelOptions sdkChannelOptions; + + + BootstrapProvider(SdkEventLoopGroup sdkEventLoopGroup, + NettyConfiguration nettyConfiguration, + SdkChannelOptions sdkChannelOptions) { + this.sdkEventLoopGroup = sdkEventLoopGroup; + this.nettyConfiguration = nettyConfiguration; + this.sdkChannelOptions = sdkChannelOptions; + } + + /** + * Creates a Bootstrap for a specific host and port with an unresolved InetSocketAddress as the remoteAddress. + * @param host The unresolved remote hostname + * @param port The remote port + * @return A newly created Bootstrap using the configuration this provider was initialized with, and having an + * unresolved remote address. + */ + public Bootstrap createBootstrap(String host, int port) { + Bootstrap bootstrap = + new Bootstrap() + .group(sdkEventLoopGroup.eventLoopGroup()) + .channelFactory(sdkEventLoopGroup.channelFactory()) + .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, nettyConfiguration.connectTimeoutMillis()) + .remoteAddress(InetSocketAddress.createUnresolved(host, port)); + sdkChannelOptions.channelOptions().forEach(bootstrap::option); + + return bootstrap; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPool.java index 2ed939ef7b51..c8fe1cd8b739 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,7 +20,9 @@ import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollector; /** * Simple decorator {@link ChannelPool} that attempts to complete the promise @@ -29,11 +31,11 @@ * is closed then released back to the delegate. */ @SdkInternalApi -public final class CancellableAcquireChannelPool implements ChannelPool { +public final class CancellableAcquireChannelPool implements SdkChannelPool { private final EventExecutor executor; - private final ChannelPool delegatePool; + private final SdkChannelPool delegatePool; - public CancellableAcquireChannelPool(EventExecutor executor, ChannelPool delegatePool) { + public CancellableAcquireChannelPool(EventExecutor executor, SdkChannelPool delegatePool) { this.executor = executor; this.delegatePool = delegatePool; } @@ -73,4 +75,9 @@ public Future release(Channel channel, Promise promise) { public void close() { delegatePool.close(); } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegatePool.collectChannelPoolMetrics(metrics); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelAttributeKey.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelAttributeKey.java index 65468364a67c..27c180ac5870 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelAttributeKey.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelAttributeKey.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,13 +16,18 @@ package software.amazon.awssdk.http.nio.netty.internal; import io.netty.channel.Channel; +import io.netty.handler.codec.http.LastHttpContent; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2FrameStream; import io.netty.util.AttributeKey; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; import org.reactivestreams.Subscriber; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.Protocol; -import software.amazon.awssdk.http.nio.netty.internal.http2.MultiplexedChannelRecord; +import software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool; +import software.amazon.awssdk.http.nio.netty.internal.http2.PingTracker; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; /** * Keys for attributes attached via {@link io.netty.channel.Channel#attr(AttributeKey)}. @@ -33,48 +38,78 @@ public final class ChannelAttributeKey { /** * Future that when a protocol (http/1.1 or h2) has been selected. */ - public static final AttributeKey> PROTOCOL_FUTURE = AttributeKey.newInstance( + public static final AttributeKey> PROTOCOL_FUTURE = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.protocolFuture"); /** - * Reference to {@link MultiplexedChannelRecord} which stores information about leased streams for a multiplexed connection. + * Reference to {@link Http2MultiplexedChannelPool} which stores information about leased streams for a multiplexed + * connection. */ - public static final AttributeKey CHANNEL_POOL_RECORD = AttributeKey.newInstance( - "aws.http.nio.netty.async.channelPoolRecord"); + public static final AttributeKey HTTP2_MULTIPLEXED_CHANNEL_POOL = + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.http2MultiplexedChannelPool"); + + public static final AttributeKey PING_TRACKER = + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.h2.pingTracker"); + + public static final AttributeKey HTTP2_CONNECTION = + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.http2Connection"); + + public static final AttributeKey HTTP2_INITIAL_WINDOW_SIZE = + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.http2InitialWindowSize"); /** * Value of the MAX_CONCURRENT_STREAMS from the server's SETTING frame. */ - public static final AttributeKey MAX_CONCURRENT_STREAMS = AttributeKey.newInstance( + public static final AttributeKey MAX_CONCURRENT_STREAMS = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.maxConcurrentStreams"); + /** + * The {@link Http2FrameStream} associated with this stream channel. This is added to stream channels when they are created, + * before they are fully initialized. + */ + public static final AttributeKey HTTP2_FRAME_STREAM = NettyUtils.getOrCreateAttributeKey( + "aws.http.nio.netty.async.http2FrameStream"); + + /** + * {@link AttributeKey} to keep track of whether we should close the connection after this request + * has completed. + */ + static final AttributeKey KEEP_ALIVE = NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.keepAlive"); + /** * Attribute key for {@link RequestContext}. */ - static final AttributeKey REQUEST_CONTEXT_KEY = AttributeKey.newInstance( + static final AttributeKey REQUEST_CONTEXT_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.requestContext"); - static final AttributeKey> SUBSCRIBER_KEY = AttributeKey.newInstance( + static final AttributeKey> SUBSCRIBER_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.subscriber"); - static final AttributeKey RESPONSE_COMPLETE_KEY = AttributeKey.newInstance( + static final AttributeKey RESPONSE_COMPLETE_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.responseComplete"); - static final AttributeKey> EXECUTE_FUTURE_KEY = AttributeKey.newInstance( + /** + * {@link AttributeKey} to keep track of whether we have received the {@link LastHttpContent}. + */ + static final AttributeKey LAST_HTTP_CONTENT_RECEIVED_KEY = NettyUtils.getOrCreateAttributeKey( + "aws.http.nio.netty.async.lastHttpContentReceived"); + + static final AttributeKey> EXECUTE_FUTURE_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.executeFuture"); - static final AttributeKey EXECUTION_ID_KEY = AttributeKey.newInstance( + static final AttributeKey EXECUTION_ID_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.executionId"); /** * Whether the channel is still in use */ - static final AttributeKey IN_USE = AttributeKey.newInstance("aws.http.nio.netty.async.inUse"); + static final AttributeKey IN_USE = NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.inUse"); /** * Whether the channel should be closed once it is released. */ - static final AttributeKey CLOSE_ON_RELEASE = AttributeKey.newInstance("aws.http.nio.netty.async.closeOnRelease"); + static final AttributeKey CLOSE_ON_RELEASE = NettyUtils.getOrCreateAttributeKey( + "aws.http.nio.netty.async.closeOnRelease"); private ChannelAttributeKey() { } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializer.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializer.java index 399471aacd04..2893e6016971 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializer.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializer.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,29 +15,40 @@ package software.amazon.awssdk.http.nio.netty.internal; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_CONNECTION; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_INITIAL_WINDOW_SIZE; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; +import static software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration.HTTP2_CONNECTION_PING_TIMEOUT_SECONDS; +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.newSslHandler; +import static software.amazon.awssdk.utils.NumericUtils.saturatedCast; import static software.amazon.awssdk.utils.StringUtils.lowerCase; +import io.netty.buffer.UnpooledByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPipeline; import io.netty.channel.pool.AbstractChannelPoolHandler; import io.netty.channel.pool.ChannelPool; import io.netty.handler.codec.http.HttpClientCodec; -import io.netty.handler.codec.http2.ForkedHttp2MultiplexCodecBuilder; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; import io.netty.handler.codec.http2.Http2FrameLogger; +import io.netty.handler.codec.http2.Http2MultiplexHandler; import io.netty.handler.codec.http2.Http2Settings; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.SslProvider; import java.net.URI; +import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicReference; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.http2.Http2GoAwayEventListener; +import software.amazon.awssdk.http.nio.netty.internal.http2.Http2PingHandler; import software.amazon.awssdk.http.nio.netty.internal.http2.Http2SettingsFrameHandler; /** @@ -47,20 +58,29 @@ public final class ChannelPipelineInitializer extends AbstractChannelPoolHandler { private final Protocol protocol; private final SslContext sslCtx; + private final SslProvider sslProvider; private final long clientMaxStreams; + private final int clientInitialWindowSize; + private final Duration healthCheckPingPeriod; private final AtomicReference channelPoolRef; private final NettyConfiguration configuration; private final URI poolKey; public ChannelPipelineInitializer(Protocol protocol, SslContext sslCtx, + SslProvider sslProvider, long clientMaxStreams, + int clientInitialWindowSize, + Duration healthCheckPingPeriod, AtomicReference channelPoolRef, NettyConfiguration configuration, URI poolKey) { this.protocol = protocol; this.sslCtx = sslCtx; + this.sslProvider = sslProvider; this.clientMaxStreams = clientMaxStreams; + this.clientInitialWindowSize = clientInitialWindowSize; + this.healthCheckPingPeriod = healthCheckPingPeriod; this.channelPoolRef = channelPoolRef; this.configuration = configuration; this.poolKey = poolKey; @@ -72,13 +92,16 @@ public void channelCreated(Channel ch) { ChannelPipeline pipeline = ch.pipeline(); if (sslCtx != null) { - // Need to provide host and port to enable SNI - // https://github.com/netty/netty/issues/3801#issuecomment-104274440 - SslHandler sslHandler = sslCtx.newHandler(ch.alloc(), poolKey.getHost(), poolKey.getPort()); - configureSslEngine(sslHandler.engine()); + SslHandler sslHandler = newSslHandler(sslCtx, ch.alloc(), poolKey.getHost(), poolKey.getPort()); pipeline.addLast(sslHandler); pipeline.addLast(SslCloseCompletionEventHandler.getInstance()); + + // Use unpooled allocator to avoid increased heap memory usage from Netty 4.1.43. + // See https://github.com/netty/netty/issues/9768 + if (sslProvider == SslProvider.JDK) { + ch.config().setOption(ChannelOption.ALLOCATOR, UnpooledByteBufAllocator.DEFAULT); + } } if (protocol == Protocol.HTTP2) { @@ -96,35 +119,44 @@ public void channelCreated(Channel ch) { } pipeline.addLast(FutureCancelHandler.getInstance()); - pipeline.addLast(UnusedChannelExceptionHandler.getInstance()); - pipeline.addLast(new LoggingHandler(LogLevel.DEBUG)); - } - /** - * Enable HostName verification. - * - * See https://netty.io/4.0/api/io/netty/handler/ssl/SslContext.html#newHandler-io.netty.buffer.ByteBufAllocator-java.lang - * .String-int- - * - * @param sslEngine the sslEngine to configure - */ - private void configureSslEngine(SSLEngine sslEngine) { - SSLParameters sslParameters = sslEngine.getSSLParameters(); - sslParameters.setEndpointIdentificationAlgorithm("HTTPS"); - sslEngine.setSSLParameters(sslParameters); + // Only add it for h1 channel because it does not apply to + // h2 connection channel. It will be attached + // to stream channels when they are created. + if (protocol == Protocol.HTTP1_1) { + pipeline.addLast(UnusedChannelExceptionHandler.getInstance()); + } + + pipeline.addLast(new LoggingHandler(LogLevel.DEBUG)); } private void configureHttp2(Channel ch, ChannelPipeline pipeline) { - ForkedHttp2MultiplexCodecBuilder codecBuilder = ForkedHttp2MultiplexCodecBuilder - .forClient(new NoOpChannelInitializer()) - .headerSensitivityDetector((name, value) -> lowerCase(name.toString()).equals("authorization")) - .initialSettings(Http2Settings.defaultSettings().initialWindowSize(1_048_576)); - - codecBuilder.frameLogger(new Http2FrameLogger(LogLevel.DEBUG)); - - pipeline.addLast(codecBuilder.build()); - + // Using Http2FrameCodecBuilder and Http2MultiplexHandler based on 4.1.37 release notes + // https://netty.io/news/2019/06/28/4-1-37-Final.html + Http2FrameCodec codec = + Http2FrameCodecBuilder.forClient() + .headerSensitivityDetector((name, value) -> lowerCase(name.toString()).equals("authorization")) + .initialSettings(Http2Settings.defaultSettings().initialWindowSize(clientInitialWindowSize)) + .frameLogger(new Http2FrameLogger(LogLevel.DEBUG)) + .build(); + + // Connection listeners have higher priority than handlers, in the eyes of the Http2FrameCodec. The Http2FrameCodec will + // close any connections when a GOAWAY is received, but we'd like to send a "GOAWAY happened" exception instead of just + // closing the connection. Because of this, we use a go-away listener instead of a handler, so that we can send the + // exception before the Http2FrameCodec closes the connection itself. + codec.connection().addListener(new Http2GoAwayEventListener(ch)); + + pipeline.addLast(codec); + ch.attr(HTTP2_CONNECTION).set(codec.connection()); + + ch.attr(HTTP2_INITIAL_WINDOW_SIZE).set(clientInitialWindowSize); + pipeline.addLast(new Http2MultiplexHandler(new NoOpChannelInitializer())); pipeline.addLast(new Http2SettingsFrameHandler(ch, clientMaxStreams, channelPoolRef)); + if (healthCheckPingPeriod == null) { + pipeline.addLast(new Http2PingHandler(HTTP2_CONNECTION_PING_TIMEOUT_SECONDS * 1_000)); + } else if (healthCheckPingPeriod.toMillis() > 0) { + pipeline.addLast(new Http2PingHandler(saturatedCast(healthCheckPingPeriod.toMillis()))); + } } private void configureHttp11(Channel ch, ChannelPipeline pipeline) { @@ -137,7 +169,6 @@ private static class NoOpChannelInitializer extends ChannelInitializer protected void initChannel(Channel ch) { } } - } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/DelegatingEventLoopGroup.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/DelegatingEventLoopGroup.java index 314c809d730b..ced76358c740 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/DelegatingEventLoopGroup.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/DelegatingEventLoopGroup.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandler.java index 1e89c3a7cb73..3e122537c438 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelledException.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelledException.java index a10643a48b5b..809c8a2fa940 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelledException.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelledException.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPool.java index 96a7bae7a959..680b594a8cd7 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,26 +18,28 @@ import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.IN_USE; import static software.amazon.awssdk.http.nio.netty.internal.utils.ChannelUtils.removeIfExists; -import com.typesafe.netty.http.HttpStreamsClientHandler; import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; -import io.netty.channel.pool.ChannelPool; import io.netty.handler.timeout.ReadTimeoutHandler; import io.netty.handler.timeout.WriteTimeoutHandler; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.nio.netty.internal.http2.FlushOnReadHandler; +import software.amazon.awssdk.http.nio.netty.internal.nrs.HttpStreamsClientHandler; +import software.amazon.awssdk.metrics.MetricCollector; /** * Removes any per request {@link ChannelHandler} from the pipeline prior to releasing * it to the pool. */ @SdkInternalApi -public class HandlerRemovingChannelPool implements ChannelPool { +public class HandlerRemovingChannelPool implements SdkChannelPool { - private final ChannelPool delegate; + private final SdkChannelPool delegate; - public HandlerRemovingChannelPool(ChannelPool delegate) { + public HandlerRemovingChannelPool(SdkChannelPool delegate) { this.delegate = delegate; } @@ -78,9 +80,16 @@ private void removePerRequestHandlers(Channel channel) { if (channel.isOpen() || channel.isRegistered()) { removeIfExists(channel.pipeline(), HttpStreamsClientHandler.class, + LastHttpContentHandler.class, + FlushOnReadHandler.class, ResponseHandler.class, ReadTimeoutHandler.class, WriteTimeoutHandler.class); } } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegate.collectChannelPoolMetrics(metrics); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPool.java index 58945edef8b7..dd8ca3cf53fa 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,15 +15,19 @@ package software.amazon.awssdk.http.nio.netty.internal; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.KEEP_ALIVE; + import io.netty.channel.Channel; import io.netty.channel.EventLoopGroup; import io.netty.channel.pool.ChannelPool; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; import io.netty.util.concurrent.ScheduledFuture; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollector; /** * An implementation of {@link ChannelPool} that validates the health of its connections. @@ -38,14 +42,14 @@ * {@link NettyConfiguration#connectionAcquireTimeoutMillis()} timeout is reached. */ @SdkInternalApi -public class HealthCheckedChannelPool implements ChannelPool { +public class HealthCheckedChannelPool implements SdkChannelPool { private final EventLoopGroup eventLoopGroup; private final int acquireTimeoutMillis; - private final ChannelPool delegate; + private final SdkChannelPool delegate; public HealthCheckedChannelPool(EventLoopGroup eventLoopGroup, NettyConfiguration configuration, - ChannelPool delegate) { + SdkChannelPool delegate) { this.eventLoopGroup = eventLoopGroup; this.acquireTimeoutMillis = configuration.connectionAcquireTimeoutMillis(); this.delegate = delegate; @@ -159,6 +163,17 @@ private void closeIfUnhealthy(Channel channel) { * Determine whether the provided channel is 'healthy' enough to use. */ private boolean isHealthy(Channel channel) { + // There might be cases where the channel is not reusable but still active at the moment + // See https://github.com/aws/aws-sdk-java-v2/issues/1380 + if (channel.attr(KEEP_ALIVE).get() != null && !channel.attr(KEEP_ALIVE).get()) { + return false; + } + return channel.isActive(); } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegate.collectChannelPoolMetrics(metrics); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HonorCloseOnReleaseChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HonorCloseOnReleaseChannelPool.java index 0d7169e8a173..6fea4f0ea068 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HonorCloseOnReleaseChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HonorCloseOnReleaseChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java new file mode 100644 index 000000000000..0d9b3703eb33 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java @@ -0,0 +1,165 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.newSslHandler; + +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.EventLoop; +import io.netty.channel.pool.ChannelPool; +import io.netty.channel.pool.ChannelPoolHandler; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslHandler; +import io.netty.util.AttributeKey; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import java.net.URI; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.StringUtils; + +/** + * Connection pool that knows how to establish a tunnel using the HTTP CONNECT method. + */ +@SdkInternalApi +public class Http1TunnelConnectionPool implements ChannelPool { + static final AttributeKey TUNNEL_ESTABLISHED_KEY = NettyUtils.getOrCreateAttributeKey( + "aws.http.nio.netty.async.Http1TunnelConnectionPool.tunnelEstablished"); + + private static final Logger log = Logger.loggerFor(Http1TunnelConnectionPool.class); + + private final EventLoop eventLoop; + private final ChannelPool delegate; + private final SslContext sslContext; + private final URI proxyAddress; + private final URI remoteAddress; + private final ChannelPoolHandler handler; + private final InitHandlerSupplier initHandlerSupplier; + + public Http1TunnelConnectionPool(EventLoop eventLoop, ChannelPool delegate, SslContext sslContext, + URI proxyAddress, URI remoteAddress, ChannelPoolHandler handler) { + this(eventLoop, delegate, sslContext, proxyAddress, remoteAddress, handler, ProxyTunnelInitHandler::new); + + } + + @SdkTestInternalApi + Http1TunnelConnectionPool(EventLoop eventLoop, ChannelPool delegate, SslContext sslContext, + URI proxyAddress, URI remoteAddress, ChannelPoolHandler handler, + InitHandlerSupplier initHandlerSupplier) { + this.eventLoop = eventLoop; + this.delegate = delegate; + this.sslContext = sslContext; + this.proxyAddress = proxyAddress; + this.remoteAddress = remoteAddress; + this.handler = handler; + this.initHandlerSupplier = initHandlerSupplier; + } + + @Override + public Future acquire() { + return acquire(eventLoop.newPromise()); + } + + @Override + public Future acquire(Promise promise) { + delegate.acquire(eventLoop.newPromise()).addListener((Future f) -> { + if (f.isSuccess()) { + setupChannel(f.getNow(), promise); + } else { + promise.setFailure(f.cause()); + } + }); + return promise; + } + + @Override + public Future release(Channel channel) { + return release(channel, eventLoop.newPromise()); + } + + @Override + public Future release(Channel channel, Promise promise) { + return delegate.release(channel, promise); + } + + @Override + public void close() { + delegate.close(); + } + + private void setupChannel(Channel ch, Promise acquirePromise) { + if (isTunnelEstablished(ch)) { + log.debug(() -> String.format("Tunnel already established for %s", ch.id().asShortText())); + acquirePromise.setSuccess(ch); + return; + } + + log.debug(() -> String.format("Tunnel not yet established for channel %s. Establishing tunnel now.", + ch.id().asShortText())); + + Promise tunnelEstablishedPromise = eventLoop.newPromise(); + + SslHandler sslHandler = createSslHandlerIfNeeded(ch.alloc()); + if (sslHandler != null) { + ch.pipeline().addLast(sslHandler); + } + ch.pipeline().addLast(initHandlerSupplier.newInitHandler(delegate, remoteAddress, tunnelEstablishedPromise)); + tunnelEstablishedPromise.addListener((Future f) -> { + if (f.isSuccess()) { + Channel tunnel = f.getNow(); + handler.channelCreated(tunnel); + tunnel.attr(TUNNEL_ESTABLISHED_KEY).set(true); + acquirePromise.setSuccess(tunnel); + } else { + ch.close(); + delegate.release(ch); + + Throwable cause = f.cause(); + log.error(() -> String.format("Unable to establish tunnel for channel %s", ch.id().asShortText()), cause); + acquirePromise.setFailure(cause); + } + }); + } + + private SslHandler createSslHandlerIfNeeded(ByteBufAllocator alloc) { + if (sslContext == null) { + return null; + } + + String scheme = proxyAddress.getScheme(); + + if (!"https".equals(StringUtils.lowerCase(scheme))) { + return null; + } + + return newSslHandler(sslContext, alloc, proxyAddress.getHost(), proxyAddress.getPort()); + } + + private static boolean isTunnelEstablished(Channel ch) { + Boolean established = ch.attr(TUNNEL_ESTABLISHED_KEY).get(); + return Boolean.TRUE.equals(established); + } + + @SdkTestInternalApi + @FunctionalInterface + interface InitHandlerSupplier { + ChannelHandler newInitHandler(ChannelPool sourcePool, URI remoteAddress, Promise tunnelInitFuture); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPool.java new file mode 100644 index 000000000000..4b0f4571f1bc --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPool.java @@ -0,0 +1,235 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.doInEventLoop; + +import io.netty.channel.Channel; +import io.netty.channel.pool.ChannelPool; +import io.netty.util.AttributeKey; +import io.netty.util.concurrent.EventExecutor; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.Logger; + +/** + * A channel pool implementation that tracks the number of "idle" channels in an underlying channel pool. + * + *

    Specifically, this pool counts the number of channels acquired and then released from/to the underlying channel pool. It + * will monitor for the underlying channels to be closed, and will remove them from the "idle" count. + */ +@SdkInternalApi +public class IdleConnectionCountingChannelPool implements SdkChannelPool { + private static final Logger log = Logger.loggerFor(IdleConnectionCountingChannelPool.class); + + /** + * The idle channel state for a specific channel. This should only be accessed from the {@link #executor}. + */ + private static final AttributeKey CHANNEL_STATE = + NettyUtils.getOrCreateAttributeKey("IdleConnectionCountingChannelPool.CHANNEL_STATE"); + + /** + * The executor in which all updates to {@link #idleConnections} is performed. + */ + private final EventExecutor executor; + + /** + * The delegate pool to which all acquire and release calls are delegated. + */ + private final ChannelPool delegatePool; + + /** + * The number of idle connections in the underlying channel pool. This value is only valid if accessed from the + * {@link #executor}. + */ + private int idleConnections = 0; + + public IdleConnectionCountingChannelPool(EventExecutor executor, ChannelPool delegatePool) { + this.executor = executor; + this.delegatePool = delegatePool; + } + + @Override + public Future acquire() { + return acquire(executor.newPromise()); + } + + @Override + public Future acquire(Promise promise) { + Future acquirePromise = delegatePool.acquire(executor.newPromise()); + acquirePromise.addListener(f -> { + Throwable failure = acquirePromise.cause(); + if (failure != null) { + promise.setFailure(failure); + } else { + Channel channel = acquirePromise.getNow(); + channelAcquired(channel); + promise.setSuccess(channel); + } + }); + + return promise; + } + + @Override + public Future release(Channel channel) { + channelReleased(channel); + return delegatePool.release(channel); + } + + @Override + public Future release(Channel channel, Promise promise) { + channelReleased(channel); + return delegatePool.release(channel, promise); + } + + @Override + public void close() { + delegatePool.close(); + } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + CompletableFuture result = new CompletableFuture<>(); + doInEventLoop(executor, () -> { + metrics.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, idleConnections); + result.complete(null); + }); + return result; + } + + /** + * Add a listener to the provided channel that will update the idle channel count when the channel is closed. + */ + private void addUpdateIdleCountOnCloseListener(Channel channel) { + channel.closeFuture().addListener(f -> channelClosed(channel)); + } + + /** + * Invoked when a channel is acquired, marking it non-idle until it's closed or released. + */ + private void channelAcquired(Channel channel) { + doInEventLoop(executor, () -> { + ChannelIdleState channelIdleState = getChannelIdleState(channel); + + if (channelIdleState == null) { + addUpdateIdleCountOnCloseListener(channel); + setChannelIdleState(channel, ChannelIdleState.NOT_IDLE); + } else { + switch (channelIdleState) { + case IDLE: + decrementIdleConnections(); + setChannelIdleState(channel, ChannelIdleState.NOT_IDLE); + break; + case CLOSED: + break; + case NOT_IDLE: + default: + log.warn(() -> "Failed to update idle connection count metric on acquire, because the channel (" + + channel + ") was in an unexpected state: " + channelIdleState); + } + } + }); + } + + /** + * Invoked when a channel is released, marking it idle until it's acquired. + */ + private void channelReleased(Channel channel) { + doInEventLoop(executor, () -> { + ChannelIdleState channelIdleState = getChannelIdleState(channel); + + if (channelIdleState == null) { + log.warn(() -> "Failed to update idle connection count metric on release, because the channel (" + channel + + ") was in an unexpected state: null"); + } else { + switch (channelIdleState) { + case NOT_IDLE: + incrementIdleConnections(); + setChannelIdleState(channel, ChannelIdleState.IDLE); + break; + case CLOSED: + break; + case IDLE: + default: + log.warn(() -> "Failed to update idle connection count metric on release, because the channel (" + + channel + ") was in an unexpected state: " + channelIdleState); + } + } + }); + } + + /** + * Invoked when a channel is closed, ensure it is marked as non-idle. + */ + private void channelClosed(Channel channel) { + doInEventLoop(executor, () -> { + ChannelIdleState channelIdleState = getChannelIdleState(channel); + setChannelIdleState(channel, ChannelIdleState.CLOSED); + + if (channelIdleState != null) { + switch (channelIdleState) { + case IDLE: + decrementIdleConnections(); + break; + case NOT_IDLE: + break; + default: + log.warn(() -> "Failed to update idle connection count metric on close, because the channel (" + channel + + ") was in an unexpected state: " + channelIdleState); + } + } + }); + } + + private ChannelIdleState getChannelIdleState(Channel channel) { + return channel.attr(CHANNEL_STATE).get(); + } + + private void setChannelIdleState(Channel channel, ChannelIdleState newState) { + channel.attr(CHANNEL_STATE).set(newState); + } + + /** + * Decrement the idle connection count. This must be invoked from the {@link #executor}. + */ + private void decrementIdleConnections() { + --idleConnections; + log.trace(() -> "Idle connection count decremented, now " + idleConnections); + } + + /** + * Increment the idle connection count. This must be invoked from the {@link #executor}. + */ + private void incrementIdleConnections() { + ++idleConnections; + log.trace(() -> "Idle connection count incremented, now " + idleConnections); + } + + /** + * The idle state of a channel. + */ + private enum ChannelIdleState { + IDLE, + NOT_IDLE, + CLOSED + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionReaperHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionReaperHandler.java index 7d9ec1805975..f70fd291740e 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionReaperHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionReaperHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentHandler.java new file mode 100644 index 000000000000..4dd1cf822c0b --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentHandler.java @@ -0,0 +1,49 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.LAST_HTTP_CONTENT_RECEIVED_KEY; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.handler.codec.http.LastHttpContent; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Logger; + +/** + * Marks {@code ChannelAttributeKey.LAST_HTTP_CONTENT_RECEIVED_KEY} if {@link LastHttpContent} is received. + */ +@SdkInternalApi +@ChannelHandler.Sharable +public final class LastHttpContentHandler extends ChannelInboundHandlerAdapter { + private static final LastHttpContentHandler INSTANCE = new LastHttpContentHandler(); + private static final Logger logger = Logger.loggerFor(LastHttpContent.class); + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (msg instanceof LastHttpContent) { + logger.debug(() -> "Received LastHttpContent " + ctx.channel()); + ctx.channel().attr(LAST_HTTP_CONTENT_RECEIVED_KEY).set(true); + } + + ctx.fireChannelRead(msg); + } + + public static LastHttpContentHandler create() { + return INSTANCE; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentSwallower.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentSwallower.java index e2c777db8616..60c6ab14a419 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentSwallower.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentSwallower.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyConfiguration.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyConfiguration.java index e8b1dc795a8b..f8abcf11329f 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyConfiguration.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyConfiguration.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -24,6 +24,8 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.SdkHttpConfigurationOption; +import software.amazon.awssdk.http.TlsKeyManagersProvider; +import software.amazon.awssdk.http.TlsTrustManagersProvider; import software.amazon.awssdk.utils.AttributeMap; /** @@ -36,6 +38,7 @@ public final class NettyConfiguration { public static final int EVENTLOOP_SHUTDOWN_QUIET_PERIOD_SECONDS = 2; public static final int EVENTLOOP_SHUTDOWN_TIMEOUT_SECONDS = 15; public static final int EVENTLOOP_SHUTDOWN_FUTURE_TIMEOUT_SECONDS = 16; + public static final int HTTP2_CONNECTION_PING_TIMEOUT_SECONDS = 5; private final AttributeMap configuration; @@ -63,10 +66,6 @@ public int maxPendingConnectionAcquires() { return configuration.get(MAX_PENDING_CONNECTION_ACQUIRES); } - public boolean trustAllCertificates() { - return configuration.get(TRUST_ALL_CERTIFICATES); - } - public int readTimeoutMillis() { return saturatedCast(configuration.get(SdkHttpConfigurationOption.READ_TIMEOUT).toMillis()); } @@ -86,4 +85,16 @@ public int connectionTtlMillis() { public boolean reapIdleConnections() { return configuration.get(SdkHttpConfigurationOption.REAP_IDLE_CONNECTIONS); } + + public TlsKeyManagersProvider tlsKeyManagersProvider() { + return configuration.get(SdkHttpConfigurationOption.TLS_KEY_MANAGERS_PROVIDER); + } + + public TlsTrustManagersProvider tlsTrustManagersProvider() { + return configuration.get(SdkHttpConfigurationOption.TLS_TRUST_MANAGERS_PROVIDER); + } + + public boolean trustAllCertificates() { + return configuration.get(TRUST_ALL_CERTIFICATES); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutor.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutor.java index 845c0ba2d829..9995b3f85660 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutor.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,16 +15,15 @@ package software.amazon.awssdk.http.nio.netty.internal; -import static software.amazon.awssdk.http.Protocol.HTTP1_1; -import static software.amazon.awssdk.http.Protocol.HTTP2; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.EXECUTE_FUTURE_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.EXECUTION_ID_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.IN_USE; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.KEEP_ALIVE; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.LAST_HTTP_CONTENT_RECEIVED_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.REQUEST_CONTEXT_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.RESPONSE_COMPLETE_KEY; +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.CLOSED_CHANNEL_MESSAGE; -import com.typesafe.netty.http.HttpStreamsClientHandler; -import com.typesafe.netty.http.StreamedHttpRequest; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.Channel; @@ -55,7 +54,6 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; - import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; @@ -63,19 +61,26 @@ import org.slf4j.LoggerFactory; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.http2.FlushOnReadHandler; +import software.amazon.awssdk.http.nio.netty.internal.http2.Http2StreamExceptionHandler; import software.amazon.awssdk.http.nio.netty.internal.http2.Http2ToHttpInboundAdapter; import software.amazon.awssdk.http.nio.netty.internal.http2.HttpToHttp2OutboundAdapter; +import software.amazon.awssdk.http.nio.netty.internal.nrs.HttpStreamsClientHandler; +import software.amazon.awssdk.http.nio.netty.internal.nrs.StreamedHttpRequest; import software.amazon.awssdk.http.nio.netty.internal.utils.ChannelUtils; +import software.amazon.awssdk.metrics.MetricCollector; @SdkInternalApi public final class NettyRequestExecutor { private static final Logger log = LoggerFactory.getLogger(NettyRequestExecutor.class); - private static final RequestAdapter REQUEST_ADAPTER = new RequestAdapter(); + private static final RequestAdapter REQUEST_ADAPTER_HTTP2 = new RequestAdapter(Protocol.HTTP2); + private static final RequestAdapter REQUEST_ADAPTER_HTTP1_1 = new RequestAdapter(Protocol.HTTP1_1); private static final AtomicLong EXECUTION_COUNTER = new AtomicLong(0L); private final long executionId = EXECUTION_COUNTER.incrementAndGet(); private final RequestContext context; private CompletableFuture executeFuture; private Channel channel; + private RequestAdapter requestAdapter; public NettyRequestExecutor(RequestContext context) { this.context = context; @@ -84,8 +89,8 @@ public NettyRequestExecutor(RequestContext context) { @SuppressWarnings("unchecked") public CompletableFuture execute() { Promise channelFuture = context.eventLoopGroup().next().newPromise(); + executeFuture = createExecutionFuture(channelFuture); context.channelPool().acquire(channelFuture); - executeFuture = createExecuteFuture(channelFuture); channelFuture.addListener((GenericFutureListener) this::makeRequestListener); return executeFuture; } @@ -97,10 +102,13 @@ public CompletableFuture execute() { * * @return The created execution future. */ - private CompletableFuture createExecuteFuture(Promise channelPromise) { - CompletableFuture future = new CompletableFuture<>(); + private CompletableFuture createExecutionFuture(Promise channelPromise) { + CompletableFuture metricsFuture = initiateMetricsCollection(); + CompletableFuture future = new CompletableFuture<>(); future.whenComplete((r, t) -> { + verifyMetricsWereCollected(metricsFuture); + if (t == null) { return; } @@ -128,6 +136,31 @@ private CompletableFuture createExecuteFuture(Promise channelProm return future; } + private CompletableFuture initiateMetricsCollection() { + MetricCollector metricCollector = context.metricCollector(); + if (!NettyRequestMetrics.metricsAreEnabled(metricCollector)) { + return null; + } + return context.channelPool().collectChannelPoolMetrics(metricCollector); + } + + private void verifyMetricsWereCollected(CompletableFuture metricsFuture) { + if (metricsFuture == null) { + return; + } + + if (!metricsFuture.isDone()) { + log.debug("HTTP request metric collection did not finish in time, so results may be incomplete."); + metricsFuture.cancel(false); + return; + } + + metricsFuture.exceptionally(t -> { + log.debug("HTTP request metric collection failed, so results may be incomplete.", t); + return null; + }); + } + private void makeRequestListener(Future channelFuture) { if (channelFuture.isSuccess()) { channel = channelFuture.getNow(); @@ -145,6 +178,7 @@ private void configureChannel() { channel.attr(EXECUTE_FUTURE_KEY).set(executeFuture); channel.attr(REQUEST_CONTEXT_KEY).set(context); channel.attr(RESPONSE_COMPLETE_KEY).set(false); + channel.attr(LAST_HTTP_CONTENT_RECEIVED_KEY).set(false); channel.attr(IN_USE).set(true); channel.config().setOption(ChannelOption.AUTO_READ, false); } @@ -152,16 +186,28 @@ private void configureChannel() { private boolean tryConfigurePipeline() { Protocol protocol = ChannelAttributeKey.getProtocolNow(channel); ChannelPipeline pipeline = channel.pipeline(); - if (HTTP2.equals(protocol)) { - pipeline.addLast(new Http2ToHttpInboundAdapter()); - pipeline.addLast(new HttpToHttp2OutboundAdapter()); - } else if (!HTTP1_1.equals(protocol)) { - String errorMsg = "Unknown protocol: " + protocol; - closeAndRelease(channel); - handleFailure(() -> errorMsg, new RuntimeException(errorMsg)); - return false; + + switch (protocol) { + case HTTP2: + pipeline.addLast(new Http2ToHttpInboundAdapter()); + pipeline.addLast(new HttpToHttp2OutboundAdapter()); + pipeline.addLast(Http2StreamExceptionHandler.create()); + requestAdapter = REQUEST_ADAPTER_HTTP2; + break; + case HTTP1_1: + requestAdapter = REQUEST_ADAPTER_HTTP1_1; + break; + default: + String errorMsg = "Unknown protocol: " + protocol; + closeAndRelease(channel); + handleFailure(() -> errorMsg, new RuntimeException(errorMsg)); + return false; } + pipeline.addLast(LastHttpContentHandler.create()); + if (Protocol.HTTP2.equals(protocol)) { + pipeline.addLast(FlushOnReadHandler.getInstance()); + } pipeline.addLast(new HttpStreamsClientHandler()); pipeline.addLast(ResponseHandler.getInstance()); @@ -179,7 +225,7 @@ private boolean tryConfigurePipeline() { } private void makeRequest() { - HttpRequest request = REQUEST_ADAPTER.adapt(context.executeRequest().request()); + HttpRequest request = requestAdapter.adapt(context.executeRequest().request()); writeRequest(request); } @@ -193,6 +239,8 @@ private void writeRequest(HttpRequest request) { // Done writing so remove the idle write timeout handler ChannelUtils.removeIfExists(channel.pipeline(), WriteTimeoutHandler.class); if (wireCall.isSuccess()) { + NettyRequestMetrics.publishHttp2StreamMetrics(context.metricCollector(), channel); + if (context.executeRequest().fullDuplex()) { return; } @@ -200,7 +248,6 @@ private void writeRequest(HttpRequest request) { channel.pipeline().addFirst(new ReadTimeoutHandler(context.configuration().readTimeoutMillis(), TimeUnit.MILLISECONDS)); channel.read(); - } else { // TODO: Are there cases where we can keep the channel open? closeAndRelease(channel); @@ -264,7 +311,7 @@ private Throwable decorateException(Throwable originalCause) { } else if (originalCause instanceof WriteTimeoutException) { return new IOException("Write timed out", originalCause); } else if (originalCause instanceof ClosedChannelException) { - return new IOException(getMessageForClosedChannel(), originalCause); + return new IOException(CLOSED_CHANNEL_MESSAGE, originalCause); } return originalCause; @@ -323,12 +370,6 @@ private String getMessageForTooManyAcquireOperationsError() { + "AWS, or by increasing the number of hosts sending requests."; } - private String getMessageForClosedChannel() { - return "The channel was closed. This may have been done by the client (e.g. because the request was aborted), " + - "by the service (e.g. because the request took too long or the client tried to write on a read-only socket), " + - "or by an intermediary party (e.g. because the channel was idle for too long)."; - } - /** * Close and release the channel back to the pool. * @@ -336,6 +377,7 @@ private String getMessageForClosedChannel() { */ private void closeAndRelease(Channel channel) { log.trace("closing and releasing channel {}", channel.id().asLongText()); + channel.attr(KEEP_ALIVE).set(false); channel.close(); context.channelPool().release(channel); } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestMetrics.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestMetrics.java new file mode 100644 index 000000000000..092b1c79d686 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestMetrics.java @@ -0,0 +1,76 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.channel.Channel; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2Stream; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.Http2Metric; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.NoOpMetricCollector; + +/** + * Utilities for collecting and publishing request-level metrics. + */ +@SdkInternalApi +public class NettyRequestMetrics { + private NettyRequestMetrics() { + } + + /** + * Determine whether metrics are enabled, based on the provided metric collector. + */ + public static boolean metricsAreEnabled(MetricCollector metricCollector) { + return metricCollector != null && !(metricCollector instanceof NoOpMetricCollector); + } + + /** + * Publish stream metrics for the provided stream channel to the provided collector. This should only be invoked after + * the stream has been initialized. If the stream is not initialized when this is invoked, an exception will be thrown. + */ + public static void publishHttp2StreamMetrics(MetricCollector metricCollector, Channel channel) { + if (!metricsAreEnabled(metricCollector)) { + return; + } + + getHttp2Connection(channel).ifPresent(http2Connection -> { + writeHttp2RequestMetrics(metricCollector, channel, http2Connection); + }); + } + + private static Optional getHttp2Connection(Channel channel) { + Channel parentChannel = channel.parent(); + if (parentChannel == null) { + return Optional.empty(); + } + + return Optional.ofNullable(parentChannel.attr(ChannelAttributeKey.HTTP2_CONNECTION).get()); + } + + private static void writeHttp2RequestMetrics(MetricCollector metricCollector, + Channel channel, + Http2Connection http2Connection) { + int streamId = channel.attr(ChannelAttributeKey.HTTP2_FRAME_STREAM).get().id(); + + Http2Stream stream = http2Connection.stream(streamId); + metricCollector.reportMetric(Http2Metric.LOCAL_STREAM_WINDOW_SIZE_IN_BYTES, + http2Connection.local().flowController().windowSize(stream)); + metricCollector.reportMetric(Http2Metric.REMOTE_STREAM_WINDOW_SIZE_IN_BYTES, + http2Connection.remote().flowController().windowSize(stream)); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NonManagedEventLoopGroup.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NonManagedEventLoopGroup.java index ba9ceb7fd8d7..518e8f7bbbce 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NonManagedEventLoopGroup.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NonManagedEventLoopGroup.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/OldConnectionReaperHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/OldConnectionReaperHandler.java index 9457396c75eb..1625a8b1132a 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/OldConnectionReaperHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/OldConnectionReaperHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/OneTimeReadTimeoutHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/OneTimeReadTimeoutHandler.java index caded88dcbd5..1bd9bf970249 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/OneTimeReadTimeoutHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/OneTimeReadTimeoutHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandler.java new file mode 100644 index 000000000000..c5500f7f6633 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandler.java @@ -0,0 +1,147 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.pool.ChannelPool; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.HttpClientCodec; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.concurrent.Promise; +import java.io.IOException; +import java.net.URI; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.utils.Logger; + +/** + * Handler that initializes the HTTP tunnel. + */ +@SdkInternalApi +public final class ProxyTunnelInitHandler extends ChannelDuplexHandler { + public static final Logger log = Logger.loggerFor(ProxyTunnelInitHandler.class); + private final ChannelPool sourcePool; + private final URI remoteHost; + private final Promise initPromise; + private final Supplier httpCodecSupplier; + + public ProxyTunnelInitHandler(ChannelPool sourcePool, URI remoteHost, Promise initPromise) { + this(sourcePool, remoteHost, initPromise, HttpClientCodec::new); + } + + @SdkTestInternalApi + public ProxyTunnelInitHandler(ChannelPool sourcePool, URI remoteHost, Promise initPromise, + Supplier httpCodecSupplier) { + this.sourcePool = sourcePool; + this.remoteHost = remoteHost; + this.initPromise = initPromise; + this.httpCodecSupplier = httpCodecSupplier; + } + + @Override + public void handlerAdded(ChannelHandlerContext ctx) { + ChannelPipeline pipeline = ctx.pipeline(); + pipeline.addBefore(ctx.name(), null, httpCodecSupplier.get()); + HttpRequest connectRequest = connectRequest(); + ctx.channel().writeAndFlush(connectRequest).addListener(f -> { + if (!f.isSuccess()) { + handleConnectRequestFailure(ctx, f.cause()); + } + }); + } + + @Override + public void handlerRemoved(ChannelHandlerContext ctx) { + if (ctx.pipeline().get(HttpClientCodec.class) != null) { + ctx.pipeline().remove(HttpClientCodec.class); + } + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (msg instanceof HttpResponse) { + HttpResponse response = (HttpResponse) msg; + if (response.status().code() == 200) { + ctx.pipeline().remove(this); + // Note: we leave the SslHandler here (if we added it) + initPromise.setSuccess(ctx.channel()); + return; + } + } + + // Fail if we received any other type of message or we didn't get a 200 from the proxy + ctx.pipeline().remove(this); + ctx.close(); + sourcePool.release(ctx.channel()); + initPromise.setFailure(new IOException("Could not connect to proxy")); + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) { + if (!initPromise.isDone()) { + handleConnectRequestFailure(ctx, null); + } else { + log.debug(() -> "The proxy channel (" + ctx.channel().id() + ") is inactive"); + closeAndRelease(ctx); + } + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + if (!initPromise.isDone()) { + handleConnectRequestFailure(ctx, cause); + } else { + log.debug(() -> "An exception occurred on the proxy tunnel channel (" + ctx.channel().id() + "). " + + "The channel has been closed to prevent any ongoing issues.", cause); + closeAndRelease(ctx); + } + } + + private void handleConnectRequestFailure(ChannelHandlerContext ctx, Throwable cause) { + closeAndRelease(ctx); + String errorMsg = "Unable to send CONNECT request to proxy"; + IOException ioException = cause == null ? new IOException(errorMsg) : + new IOException(errorMsg, cause); + initPromise.setFailure(ioException); + } + + private void closeAndRelease(ChannelHandlerContext ctx) { + ctx.close(); + sourcePool.release(ctx.channel()); + } + + private HttpRequest connectRequest() { + String uri = getUri(); + HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.CONNECT, uri, + Unpooled.EMPTY_BUFFER, false); + request.headers().add(HttpHeaderNames.HOST, uri); + return request; + } + + private String getUri() { + return remoteHost.getHost() + ":" + remoteHost.getPort(); + } +} + diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ReleaseOnceChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ReleaseOnceChannelPool.java index 196329a61929..c4e80a1922e9 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ReleaseOnceChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ReleaseOnceChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -23,9 +23,12 @@ import io.netty.util.concurrent.GenericFutureListener; import io.netty.util.concurrent.Promise; import io.netty.util.concurrent.SucceededFuture; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; +import software.amazon.awssdk.metrics.MetricCollector; /** * Wrapper around a {@link ChannelPool} to protect it from having the same channel released twice. This can @@ -33,13 +36,14 @@ * mechanism to track leased connections. */ @SdkInternalApi -public class ReleaseOnceChannelPool implements ChannelPool { +public class ReleaseOnceChannelPool implements SdkChannelPool { - private static final AttributeKey IS_RELEASED = AttributeKey.newInstance("isReleased"); + private static final AttributeKey IS_RELEASED = NettyUtils.getOrCreateAttributeKey( + "software.amazon.awssdk.http.nio.netty.internal.http2.ReleaseOnceChannelPool.isReleased"); - private final ChannelPool delegate; + private final SdkChannelPool delegate; - public ReleaseOnceChannelPool(ChannelPool delegate) { + public ReleaseOnceChannelPool(SdkChannelPool delegate) { this.delegate = delegate; } @@ -90,4 +94,9 @@ private boolean shouldRelease(Channel channel) { public void close() { delegate.close(); } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegate.collectChannelPoolMetrics(metrics); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestAdapter.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestAdapter.java index 628184d8921a..c4fc63af918f 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestAdapter.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -21,23 +21,82 @@ import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil.ExtensionHeaderNames; +import java.util.Collections; +import java.util.List; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.utils.StringUtils; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.http.SdkHttpUtils; @SdkInternalApi public final class RequestAdapter { + private static final String HOST = "Host"; + private static final List IGNORE_HEADERS = Collections.singletonList(HOST); + + private final Protocol protocol; + + public RequestAdapter(Protocol protocol) { + this.protocol = Validate.paramNotNull(protocol, "protocol"); + } + public HttpRequest adapt(SdkHttpRequest sdkRequest) { HttpMethod method = toNettyHttpMethod(sdkRequest.method()); HttpHeaders headers = new DefaultHttpHeaders(); - String uri = sdkRequest.getUri().toString(); + String uri = encodedPathAndQueryParams(sdkRequest); + // All requests start out as HTTP/1.1 objects, even if they will + // ultimately be sent over HTTP2. Conversion to H2 is handled at a + // later stage if necessary; see HttpToHttp2OutboundAdapter. DefaultHttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, method, uri, headers); - sdkRequest.headers().forEach(request.headers()::add); + addHeadersToRequest(request, sdkRequest); return request; } private static HttpMethod toNettyHttpMethod(SdkHttpMethod method) { return HttpMethod.valueOf(method.name()); } + + private static String encodedPathAndQueryParams(SdkHttpRequest sdkRequest) { + String encodedPath = sdkRequest.encodedPath(); + if (StringUtils.isBlank(encodedPath)) { + encodedPath = "/"; + } + + String encodedQueryParams = SdkHttpUtils.encodeAndFlattenQueryParameters(sdkRequest.rawQueryParameters()) + .map(queryParams -> "?" + queryParams) + .orElse(""); + + return encodedPath + encodedQueryParams; + } + + /** + * Configures the headers in the specified Netty HTTP request. + */ + private void addHeadersToRequest(DefaultHttpRequest httpRequest, SdkHttpRequest request) { + httpRequest.headers().add(HOST, getHostHeaderValue(request)); + + String scheme = request.getUri().getScheme(); + if (Protocol.HTTP2 == protocol && !StringUtils.isBlank(scheme)) { + httpRequest.headers().add(ExtensionHeaderNames.SCHEME.text(), scheme); + } + + // Copy over any other headers already in our request + request.headers().entrySet().stream() + /* + * Skip the Host header to avoid sending it twice, which will + * interfere with some signing schemes. + */ + .filter(e -> !IGNORE_HEADERS.contains(e.getKey())) + .forEach(e -> e.getValue().forEach(h -> httpRequest.headers().add(e.getKey(), h))); + } + + private String getHostHeaderValue(SdkHttpRequest request) { + return SdkHttpUtils.isUsingStandardPort(request.protocol(), request.port()) + ? request.host() + : request.host() + ":" + request.port(); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestContext.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestContext.java index e97bbe7f1728..9dc2e09f1d4e 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestContext.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestContext.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,20 +16,23 @@ package software.amazon.awssdk.http.nio.netty.internal; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.NoOpMetricCollector; @SdkInternalApi public final class RequestContext { - private final ChannelPool channelPool; + private final SdkChannelPool channelPool; private final EventLoopGroup eventLoopGroup; private final AsyncExecuteRequest executeRequest; private final NettyConfiguration configuration; - public RequestContext(ChannelPool channelPool, + private final MetricCollector metricCollector; + + public RequestContext(SdkChannelPool channelPool, EventLoopGroup eventLoopGroup, AsyncExecuteRequest executeRequest, NettyConfiguration configuration) { @@ -37,9 +40,10 @@ public RequestContext(ChannelPool channelPool, this.eventLoopGroup = eventLoopGroup; this.executeRequest = executeRequest; this.configuration = configuration; + this.metricCollector = executeRequest.metricCollector().orElseGet(NoOpMetricCollector::create); } - public ChannelPool channelPool() { + public SdkChannelPool channelPool() { return channelPool; } @@ -64,4 +68,8 @@ public SdkAsyncHttpResponseHandler handler() { public NettyConfiguration configuration() { return configuration; } + + public MetricCollector metricCollector() { + return metricCollector; + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java index 541a30fc7a9d..566987d03370 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,13 +18,14 @@ import static java.util.stream.Collectors.groupingBy; import static java.util.stream.Collectors.mapping; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.EXECUTE_FUTURE_KEY; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.KEEP_ALIVE; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.LAST_HTTP_CONTENT_RECEIVED_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.REQUEST_CONTEXT_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.RESPONSE_COMPLETE_KEY; import static software.amazon.awssdk.http.nio.netty.internal.utils.ExceptionHandlingUtils.tryCatch; import static software.amazon.awssdk.http.nio.netty.internal.utils.ExceptionHandlingUtils.tryCatchFinally; +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.CLOSED_CHANNEL_MESSAGE; -import com.typesafe.netty.http.HttpStreamsClientHandler; -import com.typesafe.netty.http.StreamedHttpResponse; import io.netty.buffer.ByteBuf; import io.netty.channel.Channel; import io.netty.channel.ChannelHandler.Sharable; @@ -38,7 +39,7 @@ import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.WriteTimeoutException; -import io.netty.util.AttributeKey; +import io.netty.util.ReferenceCountUtil; import java.io.IOException; import java.nio.ByteBuffer; import java.util.List; @@ -53,12 +54,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.HttpStatusFamily; import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.SdkCancellationException; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; import software.amazon.awssdk.http.nio.netty.internal.http2.Http2ResetSendingSubscription; +import software.amazon.awssdk.http.nio.netty.internal.nrs.HttpStreamsClientHandler; +import software.amazon.awssdk.http.nio.netty.internal.nrs.StreamedHttpResponse; import software.amazon.awssdk.utils.FunctionalUtils.UnsafeRunnable; import software.amazon.awssdk.utils.async.DelegatingSubscription; @@ -66,12 +70,6 @@ @SdkInternalApi public class ResponseHandler extends SimpleChannelInboundHandler { - /** - * {@link AttributeKey} to keep track of whether we should close the connection after this request - * has completed. - */ - private static final AttributeKey KEEP_ALIVE = AttributeKey.newInstance("aws.http.nio.netty.async.keepAlive"); - private static final Logger log = LoggerFactory.getLogger(ResponseHandler.class); private static final ResponseHandler INSTANCE = new ResponseHandler(); @@ -90,7 +88,7 @@ protected void channelRead0(ChannelHandlerContext channelContext, HttpObject msg .statusCode(response.status().code()) .statusText(response.status().reasonPhrase()) .build(); - channelContext.channel().attr(KEEP_ALIVE).set(HttpUtil.isKeepAlive(response)); + channelContext.channel().attr(KEEP_ALIVE).set(shouldKeepAlive(response)); requestContext.handler().onHeaders(sdkResponse); } @@ -133,6 +131,13 @@ private static void finalizeResponse(RequestContext requestContext, ChannelHandl } } + private boolean shouldKeepAlive(HttpResponse response) { + if (HttpStatusFamily.of(response.status().code()) == HttpStatusFamily.SERVER_ERROR) { + return false; + } + return HttpUtil.isKeepAlive(response); + } + @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { RequestContext requestContext = ctx.channel().attr(REQUEST_CONTEXT_KEY).get(); @@ -144,12 +149,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } @Override - public void channelInactive(ChannelHandlerContext handlerCtx) throws Exception { - notifyIfResponseNotCompleted(handlerCtx); - } - - @Override - public void channelUnregistered(ChannelHandlerContext handlerCtx) throws Exception { + public void channelInactive(ChannelHandlerContext handlerCtx) { notifyIfResponseNotCompleted(handlerCtx); } @@ -164,6 +164,7 @@ public static ResponseHandler getInstance() { */ private static void closeAndRelease(ChannelHandlerContext ctx) { Channel channel = ctx.channel(); + channel.attr(KEEP_ALIVE).set(false); RequestContext requestContext = channel.attr(REQUEST_CONTEXT_KEY).get(); ctx.close(); requestContext.channelPool().release(channel); @@ -252,6 +253,7 @@ private void onCancel() { public void onNext(HttpContent httpContent) { // isDone may be true if the subscriber cancelled if (isDone.get()) { + ReferenceCountUtil.release(httpContent); return; } @@ -391,9 +393,12 @@ private Throwable wrapException(Throwable originalCause) { private void notifyIfResponseNotCompleted(ChannelHandlerContext handlerCtx) { RequestContext requestCtx = handlerCtx.channel().attr(REQUEST_CONTEXT_KEY).get(); - boolean responseCompleted = handlerCtx.channel().attr(RESPONSE_COMPLETE_KEY).get(); - if (!responseCompleted) { - IOException err = new IOException("Server failed to send complete response"); + Boolean responseCompleted = handlerCtx.channel().attr(RESPONSE_COMPLETE_KEY).get(); + Boolean lastHttpContentReceived = handlerCtx.channel().attr(LAST_HTTP_CONTENT_RECEIVED_KEY).get(); + handlerCtx.channel().attr(KEEP_ALIVE).set(false); + + if (!Boolean.TRUE.equals(responseCompleted) && !Boolean.TRUE.equals(lastHttpContentReceived)) { + IOException err = new IOException("Server failed to send complete response. " + CLOSED_CHANNEL_MESSAGE); runAndLogError("Fail to execute SdkAsyncHttpResponseHandler#onError", () -> requestCtx.handler().onError(err)); executeFuture(handlerCtx).completeExceptionally(err); runAndLogError("Could not release channel", () -> closeAndRelease(handlerCtx)); diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelOptions.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelOptions.java index 9cca922b62bb..5c8f21e19e1b 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelOptions.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelOptions.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPool.java new file mode 100644 index 000000000000..3238797eecde --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPool.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.channel.pool.ChannelPool; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollector; + +/** + * A {@link ChannelPool} implementation that allows a caller to asynchronously retrieve channel-pool related metrics via + * {@link #collectChannelPoolMetrics(MetricCollector)}. + */ +@SdkInternalApi +public interface SdkChannelPool extends ChannelPool { + /** + * Collect channel pool metrics into the provided {@link MetricCollector} collection, completing the returned future when + * all metric publishing is complete. + * + * @param metrics The collection to which all metrics should be added. + * @return A future that is completed when all metric publishing is complete. + */ + CompletableFuture collectChannelPoolMetrics(MetricCollector metrics); +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPoolMap.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPoolMap.java index 102ccc11c685..9d1c0163044e 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPoolMap.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPoolMap.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SharedSdkEventLoopGroup.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SharedSdkEventLoopGroup.java index 9259f418f183..f316b77f3d8b 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SharedSdkEventLoopGroup.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SharedSdkEventLoopGroup.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SimpleChannelPoolAwareChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SimpleChannelPoolAwareChannelPool.java new file mode 100644 index 000000000000..30e6247d419e --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SimpleChannelPoolAwareChannelPool.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.channel.Channel; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollector; + +@SdkInternalApi +final class SimpleChannelPoolAwareChannelPool implements SdkChannelPool { + private final SdkChannelPool delegate; + private final BetterSimpleChannelPool simpleChannelPool; + + SimpleChannelPoolAwareChannelPool(SdkChannelPool delegate, BetterSimpleChannelPool simpleChannelPool) { + this.delegate = delegate; + this.simpleChannelPool = simpleChannelPool; + } + + @Override + public Future acquire() { + return delegate.acquire(); + } + + @Override + public Future acquire(Promise promise) { + return delegate.acquire(promise); + } + + @Override + public Future release(Channel channel) { + return delegate.release(channel); + } + + @Override + public Future release(Channel channel, Promise promise) { + return delegate.release(channel, promise); + } + + @Override + public void close() { + delegate.close(); + } + + public BetterSimpleChannelPool underlyingSimpleChannelPool() { + return simpleChannelPool; + } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegate.collectChannelPoolMetrics(metrics); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SslCloseCompletionEventHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SslCloseCompletionEventHandler.java index ce59eea73565..2a7f861fb661 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SslCloseCompletionEventHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SslCloseCompletionEventHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SslContextProvider.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SslContextProvider.java new file mode 100644 index 000000000000..61576850c04d --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SslContextProvider.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.handler.codec.http2.Http2SecurityUtil; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; +import io.netty.handler.ssl.SupportedCipherSuiteFilter; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import java.util.List; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SystemPropertyTlsKeyManagersProvider; +import software.amazon.awssdk.http.TlsTrustManagersProvider; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +@SdkInternalApi +public final class SslContextProvider { + private static final Logger log = Logger.loggerFor(SslContextProvider.class); + private final Protocol protocol; + private final SslProvider sslProvider; + private final TrustManagerFactory trustManagerFactory; + private final KeyManagerFactory keyManagerFactory; + + public SslContextProvider(NettyConfiguration configuration, Protocol protocol, SslProvider sslProvider) { + this.protocol = protocol; + this.sslProvider = sslProvider; + this.trustManagerFactory = getTrustManager(configuration); + this.keyManagerFactory = getKeyManager(configuration); + } + + public SslContext sslContext() { + try { + return SslContextBuilder.forClient() + .sslProvider(sslProvider) + .ciphers(getCiphers(), SupportedCipherSuiteFilter.INSTANCE) + .trustManager(trustManagerFactory) + .keyManager(keyManagerFactory) + .build(); + } catch (SSLException e) { + throw new RuntimeException(e); + } + } + + /** + * HTTP/2: per Rfc7540, there is a blocked list of cipher suites for HTTP/2, so setting + * the recommended cipher suites directly here + * + * HTTP/1.1: return null so that the default ciphers suites will be used + * https://github.com/netty/netty/blob/0dc246eb129796313b58c1dbdd674aa289f72cad/handler/src/main/java/io/netty/handler/ssl + * /SslUtils.java + */ + private List getCiphers() { + return protocol.equals(Protocol.HTTP2) ? Http2SecurityUtil.CIPHERS : null; + } + + private TrustManagerFactory getTrustManager(NettyConfiguration configuration) { + TlsTrustManagersProvider tlsTrustManagersProvider = configuration.tlsTrustManagersProvider(); + Validate.isTrue(tlsTrustManagersProvider == null || !configuration.trustAllCertificates(), + "A TlsTrustManagerProvider can't be provided if TrustAllCertificates is also set"); + + if (tlsTrustManagersProvider != null) { + return StaticTrustManagerFactory.create(tlsTrustManagersProvider.trustManagers()); + } + + if (configuration.trustAllCertificates()) { + log.warn(() -> "SSL Certificate verification is disabled. This is not a safe setting and should only be " + + "used for testing."); + return InsecureTrustManagerFactory.INSTANCE; + } + + // return null so that the system default trust manager will be used + return null; + } + + private KeyManagerFactory getKeyManager(NettyConfiguration configuration) { + if (configuration.tlsKeyManagersProvider() != null) { + KeyManager[] keyManagers = configuration.tlsKeyManagersProvider().keyManagers(); + if (keyManagers != null) { + return StaticKeyManagerFactory.create(keyManagers); + } + } + + KeyManager[] systemPropertyKeyManagers = SystemPropertyTlsKeyManagersProvider.create().keyManagers(); + return systemPropertyKeyManagers == null ? null : StaticKeyManagerFactory.create(systemPropertyKeyManagers); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactory.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactory.java new file mode 100644 index 000000000000..2aa2a2da9f71 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactory.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Factory that simply returns a statically provided set of {@link KeyManager}s. + */ +@SdkInternalApi +public final class StaticKeyManagerFactory extends KeyManagerFactory { + private StaticKeyManagerFactory(KeyManager[] keyManagers) { + super(new StaticKeyManagerFactorySpi(keyManagers), null, null); + } + + public static StaticKeyManagerFactory create(KeyManager[] keyManagers) { + return new StaticKeyManagerFactory(keyManagers); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactorySpi.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactorySpi.java new file mode 100644 index 000000000000..19c7b85cde6c --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactorySpi.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import java.security.KeyStore; +import java.util.Arrays; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactorySpi; +import javax.net.ssl.ManagerFactoryParameters; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + + +/** +* Factory SPI that simply returns a statically provided set of {@link KeyManager}s. +*/ +@SdkInternalApi +public final class StaticKeyManagerFactorySpi extends KeyManagerFactorySpi { + private final KeyManager[] keyManagers; + + public StaticKeyManagerFactorySpi(KeyManager[] keyManagers) { + Validate.paramNotNull(keyManagers, "keyManagers"); + this.keyManagers = Arrays.copyOf(keyManagers, keyManagers.length); + } + + @Override + protected void engineInit(KeyStore ks, char[] password) { + throw new UnsupportedOperationException("engineInit not supported by this KeyManagerFactory"); + } + + @Override + protected void engineInit(ManagerFactoryParameters spec) { + throw new UnsupportedOperationException("engineInit not supported by this KeyManagerFactory"); + } + + @Override + protected KeyManager[] engineGetKeyManagers() { + return keyManagers; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticTrustManagerFactory.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticTrustManagerFactory.java new file mode 100644 index 000000000000..2da51afcd594 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/StaticTrustManagerFactory.java @@ -0,0 +1,49 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.handler.ssl.util.SimpleTrustManagerFactory; +import java.security.KeyStore; +import javax.net.ssl.ManagerFactoryParameters; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public final class StaticTrustManagerFactory extends SimpleTrustManagerFactory { + private final TrustManager[] trustManagers; + + private StaticTrustManagerFactory(TrustManager[] trustManagers) { + this.trustManagers = trustManagers; + } + + @Override + protected void engineInit(KeyStore keyStore) { + } + + @Override + protected void engineInit(ManagerFactoryParameters managerFactoryParameters) { + } + + @Override + protected TrustManager[] engineGetTrustManagers() { + return trustManagers; + } + + public static TrustManagerFactory create(TrustManager[] trustManagers) { + return new StaticTrustManagerFactory(trustManagers); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/UnusedChannelExceptionHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/UnusedChannelExceptionHandler.java index 7f122a2bd5d5..b4559880ad0b 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/UnusedChannelExceptionHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/UnusedChannelExceptionHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,8 +18,8 @@ import static software.amazon.awssdk.http.nio.netty.internal.utils.ChannelUtils.getAttribute; import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerAdapter; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.timeout.TimeoutException; import java.io.IOException; import java.util.Optional; @@ -36,7 +36,7 @@ */ @SdkInternalApi @ChannelHandler.Sharable -public final class UnusedChannelExceptionHandler extends ChannelHandlerAdapter { +public final class UnusedChannelExceptionHandler extends ChannelInboundHandlerAdapter { public static final UnusedChannelExceptionHandler INSTANCE = new UnusedChannelExceptionHandler(); private static final Logger log = Logger.loggerFor(UnusedChannelExceptionHandler.class); diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/FlushOnReadHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/FlushOnReadHandler.java new file mode 100644 index 000000000000..3cdd06c0c2a5 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/FlushOnReadHandler.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This is an HTTP/2 related workaround for an issue where a WINDOW_UPDATE is + * queued but not written to the socket, causing a read() on the channel to + * hang if the remote endpoint thinks our inbound window is 0. + */ +@SdkInternalApi +@ChannelHandler.Sharable +public final class FlushOnReadHandler extends ChannelOutboundHandlerAdapter { + private static final FlushOnReadHandler INSTANCE = new FlushOnReadHandler(); + + private FlushOnReadHandler() { + } + + @Override + public void read(ChannelHandlerContext ctx) { + //Note: order is important, we need to fire the read() event first + // since it's what triggers the WINDOW_UPDATE frame write + ctx.read(); + ctx.channel().parent().flush(); + } + + public static FlushOnReadHandler getInstance() { + return INSTANCE; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/GoAwayException.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/GoAwayException.java new file mode 100644 index 000000000000..bc9719b3ef38 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/GoAwayException.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import io.netty.buffer.ByteBuf; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Exception thrown when a GOAWAY frame is sent by the service. + */ +@SdkInternalApi +public class GoAwayException extends IOException { + private final String message; + + GoAwayException(long errorCode, ByteBuf debugData) { + this.message = String.format("GOAWAY received from service, requesting this stream be closed. " + + "Error Code = %d, Debug Data = %s", + errorCode, debugData.toString(StandardCharsets.UTF_8)); + } + + @Override + public String getMessage() { + return message; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ConnectionTerminatingException.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ConnectionTerminatingException.java new file mode 100644 index 000000000000..e241eb98c572 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ConnectionTerminatingException.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Exception indicating a connection is terminating + */ +@SdkInternalApi +final class Http2ConnectionTerminatingException extends RuntimeException { + + Http2ConnectionTerminatingException(String message) { + super(message); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2GoAwayEventListener.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2GoAwayEventListener.java new file mode 100644 index 000000000000..bb67346a25ea --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2GoAwayEventListener.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.handler.codec.http2.Http2ConnectionAdapter; +import io.netty.handler.codec.http2.Http2GoAwayFrame; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; +import software.amazon.awssdk.utils.Logger; + +/** + * Handles {@link Http2GoAwayFrame}s sent on a connection. This will pass the frame along to the connection's + * {@link Http2MultiplexedChannelPool#handleGoAway(Channel, int, GoAwayException)}. + */ +@SdkInternalApi +public final class Http2GoAwayEventListener extends Http2ConnectionAdapter { + private static final Logger log = Logger.loggerFor(Http2GoAwayEventListener.class); + + private final Channel parentChannel; + + public Http2GoAwayEventListener(Channel parentChannel) { + this.parentChannel = parentChannel; + } + + + @Override + public void onGoAwayReceived(int lastStreamId, long errorCode, ByteBuf debugData) { + Http2MultiplexedChannelPool channelPool = parentChannel.attr(ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL).get(); + GoAwayException exception = new GoAwayException(errorCode, debugData.retain()); + if (channelPool != null) { + channelPool.handleGoAway(parentChannel, lastStreamId, exception); + } else { + log.warn(() -> "GOAWAY received on a connection (" + parentChannel + ") not associated with any multiplexed " + + "channel pool."); + parentChannel.pipeline().fireExceptionCaught(exception); + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPool.java index 49b42087654b..d02bf5d96a09 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,22 +15,50 @@ package software.amazon.awssdk.http.nio.netty.internal.http2; -import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.CHANNEL_POOL_RECORD; +import static java.util.stream.Collectors.toList; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_CONNECTION; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_INITIAL_WINDOW_SIZE; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.MAX_CONCURRENT_STREAMS; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.doInEventLoop; import io.netty.channel.Channel; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandler.Sharable; +import io.netty.channel.ChannelHandlerContext; import io.netty.channel.EventLoop; +import io.netty.channel.EventLoopGroup; import io.netty.channel.pool.ChannelPool; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2Exception; +import io.netty.handler.codec.http2.Http2LocalFlowController; +import io.netty.handler.codec.http2.Http2Stream; import io.netty.handler.codec.http2.Http2StreamChannelBootstrap; -import io.netty.util.concurrent.DefaultPromise; +import io.netty.util.AttributeKey; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; +import io.netty.util.concurrent.PromiseCombiner; +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.time.Duration; import java.util.ArrayList; -import java.util.Collection; - +import java.util.List; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; import software.amazon.awssdk.http.nio.netty.internal.utils.BetterFixedChannelPool; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; /** * {@link ChannelPool} implementation that handles multiplexed streams. Child channels are created @@ -45,152 +73,440 @@ *

    */ @SdkInternalApi -public class Http2MultiplexedChannelPool implements ChannelPool { +public class Http2MultiplexedChannelPool implements SdkChannelPool { + private static final Logger log = Logger.loggerFor(Http2MultiplexedChannelPool.class); + + /** + * Reference to the {@link MultiplexedChannelRecord} on a channel. + */ + private static final AttributeKey MULTIPLEXED_CHANNEL = NettyUtils.getOrCreateAttributeKey( + "software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool.MULTIPLEXED_CHANNEL"); + + /** + * Whether a parent channel has been released yet. This guards against double-releasing to the delegate connection pool. + */ + private static final AttributeKey RELEASED = NettyUtils.getOrCreateAttributeKey( + "software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool.RELEASED"); - private final EventLoop eventLoop; private final ChannelPool connectionPool; - private final long maxConcurrencyPerConnection; - private final ArrayList connections; - private boolean closed = false; + private final EventLoopGroup eventLoopGroup; + private final Set connections; + private final Duration idleConnectionTimeout; + + private AtomicBoolean closed = new AtomicBoolean(false); /** * @param connectionPool Connection pool for parent channels (i.e. the socket channel). - * @param eventLoop Event loop to run all tasks in. - * @param maxConcurrencyPerConnection Max concurrent streams per HTTP/2 connection. */ Http2MultiplexedChannelPool(ChannelPool connectionPool, - EventLoop eventLoop, - long maxConcurrencyPerConnection) { + EventLoopGroup eventLoopGroup, + Duration idleConnectionTimeout) { this.connectionPool = connectionPool; - this.eventLoop = eventLoop; - this.maxConcurrencyPerConnection = maxConcurrencyPerConnection; - // Customers that want an unbounded connection pool may set max concurrency to something like - // Long.MAX_VALUE so we just stick with the initial ArrayList capacity and grow from there. - this.connections = new ArrayList<>(); + this.eventLoopGroup = eventLoopGroup; + this.connections = ConcurrentHashMap.newKeySet(); + this.idleConnectionTimeout = idleConnectionTimeout; } @SdkTestInternalApi Http2MultiplexedChannelPool(ChannelPool connectionPool, - EventLoop eventLoop, - long maxConcurrencyPerConnection, - Collection connections) { - this.connectionPool = connectionPool; - this.eventLoop = eventLoop; - this.maxConcurrencyPerConnection = maxConcurrencyPerConnection; - this.connections = new ArrayList<>(connections); + EventLoopGroup eventLoopGroup, + Set connections, + Duration idleConnectionTimeout) { + this(connectionPool, eventLoopGroup, idleConnectionTimeout); + this.connections.addAll(connections); } @Override public Future acquire() { - return acquire(new DefaultPromise<>(eventLoop)); + return acquire(eventLoopGroup.next().newPromise()); } @Override public Future acquire(Promise promise) { - doInEventLoop(eventLoop, () -> acquire0(promise), promise); - return promise; - } - - private Future acquire0(Promise promise) { - if (closed) { - return promise.setFailure(new IllegalStateException("Channel pool is closed!")); + if (closed.get()) { + return promise.setFailure(new IOException("Channel pool is closed!")); } - for (MultiplexedChannelRecord connection : connections) { - if (connection.availableStreams() > 0) { - connection.acquire(promise); + for (MultiplexedChannelRecord multiplexedChannel : connections) { + if (acquireStreamOnInitializedConnection(multiplexedChannel, promise)) { return promise; } } - // No available streams, establish new connection and add it to list - connections.add(new MultiplexedChannelRecord(connectionPool.acquire(), - maxConcurrencyPerConnection, - this::releaseParentChannel) - .acquire(promise)); + + // No available streams on existing connections, establish new connection and add it to list + acquireStreamOnNewConnection(promise); return promise; } + private void acquireStreamOnNewConnection(Promise promise) { + Future newConnectionAcquire = connectionPool.acquire(); + + newConnectionAcquire.addListener(f -> { + if (!newConnectionAcquire.isSuccess()) { + promise.setFailure(newConnectionAcquire.cause()); + return; + } + + Channel parentChannel = newConnectionAcquire.getNow(); + try { + parentChannel.attr(HTTP2_MULTIPLEXED_CHANNEL_POOL).set(this); + + // When the protocol future is completed on the new connection, we're ready for new streams to be added to it. + parentChannel.attr(PROTOCOL_FUTURE).get() + .thenAccept(protocol -> acquireStreamOnFreshConnection(promise, parentChannel, protocol)) + .exceptionally(throwable -> failAndCloseParent(promise, parentChannel, throwable)); + } catch (Throwable e) { + failAndCloseParent(promise, parentChannel, e); + } + }); + } + + private void acquireStreamOnFreshConnection(Promise promise, Channel parentChannel, Protocol protocol) { + try { + Long maxStreams = parentChannel.attr(MAX_CONCURRENT_STREAMS).get(); + + Validate.isTrue(protocol == Protocol.HTTP2, + "Protocol negotiated on connection (%s) was expected to be HTTP/2, but it " + + "was %s.", parentChannel, Protocol.HTTP1_1); + Validate.isTrue(maxStreams != null, + "HTTP/2 was negotiated on the connection (%s), but the maximum number of " + + "streams was not initialized.", parentChannel); + Validate.isTrue(maxStreams > 0, "Maximum streams were not positive on channel (%s).", parentChannel); + + MultiplexedChannelRecord multiplexedChannel = new MultiplexedChannelRecord(parentChannel, maxStreams, + idleConnectionTimeout); + parentChannel.attr(MULTIPLEXED_CHANNEL).set(multiplexedChannel); + + Promise streamPromise = parentChannel.eventLoop().newPromise(); + + if (!acquireStreamOnInitializedConnection(multiplexedChannel, streamPromise)) { + failAndCloseParent(promise, parentChannel, + new IOException("Connection was closed while creating a new stream.")); + return; + } + + streamPromise.addListener(f -> { + if (!streamPromise.isSuccess()) { + promise.setFailure(streamPromise.cause()); + return; + } + + Channel stream = streamPromise.getNow(); + cacheConnectionForFutureStreams(stream, multiplexedChannel, promise); + }); + } catch (Throwable e) { + failAndCloseParent(promise, parentChannel, e); + } + } + + private void cacheConnectionForFutureStreams(Channel stream, + MultiplexedChannelRecord multiplexedChannel, + Promise promise) { + Channel parentChannel = stream.parent(); + + // Before we cache the connection, make sure that exceptions on the connection will remove it from the cache. + parentChannel.pipeline().addLast(ReleaseOnExceptionHandler.INSTANCE); + connections.add(multiplexedChannel); + + if (closed.get()) { + // Whoops, we were closed while we were setting up. Make sure everything here is cleaned up properly. + failAndCloseParent(promise, parentChannel, + new IOException("Connection pool was closed while creating a new stream.")); + return; + } + + promise.setSuccess(stream); + } + /** - * Releases parent channel on failure and cleans up record from connections list. - * - * @param parentChannel Channel to release. May be null if no channel is established. - * @param record Record to cleanup. + * By default, connection window size is a constant value: + * connectionWindowSize = 65535 + (configureInitialWindowSize - 65535) * 2. + * See https://github.com/netty/netty/blob/5c458c9a98d4d3d0345e58495e017175156d624f/codec-http2/src/main/java/io/netty + * /handler/codec/http2/Http2FrameCodec.java#L255 + * We should expand connection window so that the window size proportional to the number of concurrent streams within the + * connection. + * Note that when {@code WINDOW_UPDATE} will be sent depends on the processedWindow in DefaultHttp2LocalFlowController. */ - private void releaseParentChannel(Channel parentChannel, MultiplexedChannelRecord record) { - doInEventLoop(eventLoop, () -> releaseParentChannel0(parentChannel, record)); - } + private void tryExpandConnectionWindow(Channel parentChannel) { + doInEventLoop(parentChannel.eventLoop(), () -> { + Http2Connection http2Connection = parentChannel.attr(HTTP2_CONNECTION).get(); + Integer initialWindowSize = parentChannel.attr(HTTP2_INITIAL_WINDOW_SIZE).get(); - private void releaseParentChannel0(Channel parentChannel, MultiplexedChannelRecord record) { - if (parentChannel != null) { + Validate.notNull(http2Connection, "http2Connection should not be null on channel " + parentChannel); + Validate.notNull(http2Connection, "initialWindowSize should not be null on channel " + parentChannel); + + Http2Stream connectionStream = http2Connection.connectionStream(); + log.debug(() -> "Expanding connection window size for " + parentChannel + " by " + initialWindowSize); try { - parentChannel.close(); - } finally { - connectionPool.release(parentChannel); + Http2LocalFlowController localFlowController = http2Connection.local().flowController(); + localFlowController.incrementWindowSize(connectionStream, initialWindowSize); + + } catch (Http2Exception e) { + log.warn(() -> "Failed to increment windowSize of connection " + parentChannel, e); } + }); + } + + private Void failAndCloseParent(Promise promise, Channel parentChannel, Throwable exception) { + log.debug(() -> "Channel acquiring failed, closing connection " + parentChannel, exception); + promise.setFailure(exception); + closeAndReleaseParent(parentChannel); + return null; + } + + /** + * Acquire a stream on a connection that has already been initialized. This will return false if the connection cannot have + * any more streams allocated, and true if the stream can be allocated. + * + * This will NEVER complete the provided future when the return value is false. This will ALWAYS complete the provided + * future when the return value is true. + */ + private boolean acquireStreamOnInitializedConnection(MultiplexedChannelRecord channelRecord, Promise promise) { + Promise acquirePromise = channelRecord.getConnection().eventLoop().newPromise(); + + if (!channelRecord.acquireStream(acquirePromise)) { + return false; } - connections.remove(record); + + acquirePromise.addListener(f -> { + try { + if (!acquirePromise.isSuccess()) { + promise.setFailure(acquirePromise.cause()); + return; + } + + Channel channel = acquirePromise.getNow(); + channel.attr(HTTP2_MULTIPLEXED_CHANNEL_POOL).set(this); + channel.attr(MULTIPLEXED_CHANNEL).set(channelRecord); + promise.setSuccess(channel); + + tryExpandConnectionWindow(channel.parent()); + } catch (Exception e) { + promise.setFailure(e); + } + }); + + return true; } @Override public Future release(Channel childChannel) { - return release(childChannel, new DefaultPromise<>(eventLoop)); + return release(childChannel, childChannel.eventLoop().newPromise()); } @Override - public Future release(Channel channel, Promise promise) { - doInEventLoop(eventLoop, () -> release0(channel, promise), promise); - return promise; + public Future release(Channel childChannel, Promise promise) { + if (childChannel.parent() == null) { + // This isn't a child channel. Oddly enough, this is "expected" and is handled properly by the + // BetterFixedChannelPool AS LONG AS we return an IllegalArgumentException via the promise. + closeAndReleaseParent(childChannel); + return promise.setFailure(new IllegalArgumentException("Channel (" + childChannel + ") is not a child channel.")); + } + + Channel parentChannel = childChannel.parent(); + MultiplexedChannelRecord multiplexedChannel = parentChannel.attr(MULTIPLEXED_CHANNEL).get(); + if (multiplexedChannel == null) { + // This is a child channel, but there is no attached multiplexed channel, which there should be if it was from + // this pool. Close it and log an error. + Exception exception = new IOException("Channel (" + childChannel + ") is not associated with any channel records. " + + "It will be closed, but cannot be released within this pool."); + log.error(exception::getMessage); + childChannel.close(); + return promise.setFailure(exception); + } + + multiplexedChannel.closeAndReleaseChild(childChannel); + + if (multiplexedChannel.canBeClosedAndReleased()) { + // We just closed the last stream in a connection that has reached the end of its life. + return closeAndReleaseParent(parentChannel, null, promise); + } + + return promise.setSuccess(null); } - private void release0(Channel channel, Promise promise) { - if (channel.parent() == null) { - // This is the socket channel, close and release from underlying connection pool - try { - releaseParentChannel(channel); - } finally { - // This channel doesn't technically belong to this pool as it was never acquired directly - promise.setFailure(new IllegalArgumentException("Channel does not belong to this pool")); + private Future closeAndReleaseParent(Channel parentChannel) { + return closeAndReleaseParent(parentChannel, null, parentChannel.eventLoop().newPromise()); + } + + private Future closeAndReleaseParent(Channel parentChannel, Throwable cause) { + return closeAndReleaseParent(parentChannel, cause, parentChannel.eventLoop().newPromise()); + } + + private Future closeAndReleaseParent(Channel parentChannel, Throwable cause, Promise resultPromise) { + if (parentChannel.parent() != null) { + // This isn't a parent channel. Notify it that something is wrong. + Exception exception = new IOException("Channel (" + parentChannel + ") is not a parent channel. It will be closed, " + + "but cannot be released within this pool."); + log.error(exception::getMessage); + parentChannel.close(); + return resultPromise.setFailure(exception); + } + + MultiplexedChannelRecord multiplexedChannel = parentChannel.attr(MULTIPLEXED_CHANNEL).get(); + + // We may not have a multiplexed channel if the parent channel hasn't been fully initialized. + if (multiplexedChannel != null) { + if (cause == null) { + multiplexedChannel.closeChildChannels(); + } else { + multiplexedChannel.closeChildChannels(cause); } - } else { - Channel parentChannel = channel.parent(); - MultiplexedChannelRecord channelRecord = parentChannel.attr(CHANNEL_POOL_RECORD).get(); - channelRecord.release(channel); - channel.close(); - promise.setSuccess(null); + connections.remove(multiplexedChannel); } - } - private void releaseParentChannel(Channel parentChannel) { - MultiplexedChannelRecord channelRecord = parentChannel.attr(CHANNEL_POOL_RECORD).get(); - connections.remove(channelRecord); parentChannel.close(); - connectionPool.release(parentChannel); + if (parentChannel.attr(RELEASED).getAndSet(Boolean.TRUE) == null) { + return connectionPool.release(parentChannel, resultPromise); + } + + return resultPromise.setSuccess(null); + } + + void handleGoAway(Channel parentChannel, int lastStreamId, GoAwayException exception) { + log.debug(() -> "Received GOAWAY on " + parentChannel + " with lastStreamId of " + lastStreamId); + try { + MultiplexedChannelRecord multiplexedChannel = parentChannel.attr(MULTIPLEXED_CHANNEL).get(); + + if (multiplexedChannel != null) { + multiplexedChannel.handleGoAway(lastStreamId, exception); + } else { + // If we don't have a multiplexed channel, the parent channel hasn't been fully initialized. Close it now. + closeAndReleaseParent(parentChannel, exception); + } + } catch (Exception e) { + log.error(() -> "Failed to handle GOAWAY frame on channel " + parentChannel, e); + } } @Override public void close() { - try { - setClosedFlag().await(); - for (MultiplexedChannelRecord c : connections) { - Future f = c.getConnectionFuture(); - f.await(); - if (f.isSuccess()) { - connectionPool.release(f.getNow()).await(); + if (closed.compareAndSet(false, true)) { + Future closeCompleteFuture = doClose(); + + try { + if (!closeCompleteFuture.await(10, TimeUnit.SECONDS)) { + throw new RuntimeException("Event loop didn't close after 10 seconds."); } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + + Throwable exception = closeCompleteFuture.cause(); + if (exception != null) { + throw new RuntimeException("Failed to close channel pool.", exception); } - connectionPool.close(); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - throw new RuntimeException(ie); } } - private Promise setClosedFlag() { - Promise closedFuture = eventLoop.newPromise(); - doInEventLoop(eventLoop, () -> { - closed = true; - closedFuture.setSuccess(null); + private Future doClose() { + EventLoop closeEventLoop = eventLoopGroup.next(); + Promise closeFinishedPromise = closeEventLoop.newPromise(); + + doInEventLoop(closeEventLoop, () -> { + Promise releaseAllChannelsPromise = closeEventLoop.newPromise(); + PromiseCombiner promiseCombiner = new PromiseCombiner(closeEventLoop); + + // Create a copy of the connections to remove while we close them, in case closing updates the original list. + List channelsToRemove = new ArrayList<>(connections); + for (MultiplexedChannelRecord channel : channelsToRemove) { + promiseCombiner.add(closeAndReleaseParent(channel.getConnection())); + } + promiseCombiner.finish(releaseAllChannelsPromise); + + releaseAllChannelsPromise.addListener(f -> { + connectionPool.close(); + closeFinishedPromise.setSuccess(null); + }); }); - return closedFuture; + + return closeFinishedPromise; + } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + CompletableFuture result = new CompletableFuture<>(); + + CompletableFuture summedMetrics = new CompletableFuture<>(); + + List> channelMetrics = + connections.stream() + .map(MultiplexedChannelRecord::getMetrics) + .collect(toList()); + + accumulateMetrics(summedMetrics, channelMetrics); + + summedMetrics.whenComplete((m, t) -> { + if (t != null) { + result.completeExceptionally(t); + } else { + try { + metrics.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, Math.toIntExact(m.getAvailableStreams())); + result.complete(null); + } catch (Exception e) { + result.completeExceptionally(e); + } + } + }); + + return result; + } + + private void accumulateMetrics(CompletableFuture result, + List> channelMetrics) { + accumulateMetrics(result, channelMetrics, new MultiplexedChannelRecord.Metrics(), 0); + } + + private void accumulateMetrics(CompletableFuture result, + List> channelMetrics, + MultiplexedChannelRecord.Metrics resultAccumulator, + int index) { + if (index >= channelMetrics.size()) { + result.complete(resultAccumulator); + return; + } + + channelMetrics.get(index).whenComplete((m, t) -> { + if (t != null) { + result.completeExceptionally(t); + } else { + resultAccumulator.add(m); + accumulateMetrics(result, channelMetrics, resultAccumulator, index + 1); + } + }); + } + + @Sharable + private static final class ReleaseOnExceptionHandler extends ChannelDuplexHandler { + private static final ReleaseOnExceptionHandler INSTANCE = new ReleaseOnExceptionHandler(); + + @Override + public void channelInactive(ChannelHandlerContext ctx) { + closeAndReleaseParent(ctx, new ClosedChannelException()); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + if (cause instanceof Http2ConnectionTerminatingException) { + closeConnectionToNewRequests(ctx, cause); + } else { + closeAndReleaseParent(ctx, cause); + } + } + + void closeConnectionToNewRequests(ChannelHandlerContext ctx, Throwable cause) { + MultiplexedChannelRecord multiplexedChannel = ctx.channel().attr(MULTIPLEXED_CHANNEL).get(); + if (multiplexedChannel != null) { + multiplexedChannel.closeToNewStreams(); + } else { + closeAndReleaseParent(ctx, cause); + } + } + + private void closeAndReleaseParent(ChannelHandlerContext ctx, Throwable cause) { + Http2MultiplexedChannelPool pool = ctx.channel().attr(HTTP2_MULTIPLEXED_CHANNEL_POOL).get(); + pool.closeAndReleaseParent(ctx.channel(), cause); + } } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2PingHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2PingHandler.java new file mode 100644 index 000000000000..bb9f760e2681 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2PingHandler.java @@ -0,0 +1,123 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.handler.codec.http2.DefaultHttp2PingFrame; +import io.netty.handler.codec.http2.Http2PingFrame; +import io.netty.util.concurrent.ScheduledFuture; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * Attached to a {@link Channel} to periodically check the health of HTTP2 connections via PING frames. + * + * If a channel is found to be unhealthy, this will invoke {@link ChannelPipeline#fireExceptionCaught(Throwable)}. + */ +@SdkInternalApi +public class Http2PingHandler extends SimpleChannelInboundHandler { + private static final Logger log = Logger.loggerFor(Http2PingHandler.class); + private static final Http2PingFrame DEFAULT_PING_FRAME = new DefaultHttp2PingFrame(0); + + private final long pingTimeoutMillis; + + private ScheduledFuture periodicPing; + private long lastPingSendTime = 0; + private long lastPingAckTime = 0; + + public Http2PingHandler(int pingTimeoutMillis) { + this.pingTimeoutMillis = pingTimeoutMillis; + } + + @Override + public void handlerAdded(ChannelHandlerContext ctx) { + CompletableFuture protocolFuture = ctx.channel().attr(ChannelAttributeKey.PROTOCOL_FUTURE).get(); + Validate.validState(protocolFuture != null, "Protocol future must be initialized before handler is added."); + protocolFuture.thenAccept(p -> start(p, ctx)); + } + + private void start(Protocol protocol, ChannelHandlerContext ctx) { + if (protocol == Protocol.HTTP2 && periodicPing == null) { + periodicPing = ctx.channel() + .eventLoop() + .scheduleAtFixedRate(() -> doPeriodicPing(ctx.channel()), 0, pingTimeoutMillis, MILLISECONDS); + } + } + + @Override + public void handlerRemoved(ChannelHandlerContext ctx) { + stop(); + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) { + stop(); + ctx.fireChannelInactive(); + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2PingFrame frame) { + if (frame.ack()) { + log.debug(() -> "Received PING ACK from channel " + ctx.channel()); + lastPingAckTime = System.currentTimeMillis(); + } else { + ctx.fireChannelRead(frame); + } + } + + private void doPeriodicPing(Channel channel) { + if (lastPingAckTime <= lastPingSendTime - pingTimeoutMillis) { + long timeSinceLastPingSend = System.currentTimeMillis() - lastPingSendTime; + channelIsUnhealthy(channel, new PingFailedException("Server did not respond to PING after " + + timeSinceLastPingSend + "ms (limit: " + + pingTimeoutMillis + "ms)")); + } else { + sendPing(channel); + } + } + + private void sendPing(Channel channel) { + channel.writeAndFlush(DEFAULT_PING_FRAME).addListener(res -> { + if (!res.isSuccess()) { + log.debug(() -> "Failed to write and flush PING frame to connection", res.cause()); + channelIsUnhealthy(channel, new PingFailedException("Failed to send PING to the service", res.cause())); + } else { + lastPingSendTime = System.currentTimeMillis(); + } + }); + } + + private void channelIsUnhealthy(Channel channel, PingFailedException exception) { + stop(); + channel.pipeline().fireExceptionCaught(exception); + } + + private void stop() { + if (periodicPing != null) { + periodicPing.cancel(false); + periodicPing = null; + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ResetSendingSubscription.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ResetSendingSubscription.java index f50aea83d4d9..534d394373a1 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ResetSendingSubscription.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ResetSendingSubscription.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2SettingsFrameHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2SettingsFrameHandler.java index 3d45cae301db..fb243ad79e74 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2SettingsFrameHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2SettingsFrameHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -28,7 +28,6 @@ import java.util.concurrent.atomic.AtomicReference; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.Protocol; -import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; /** * Configure channel based on the {@link Http2SettingsFrame} received from server @@ -56,22 +55,19 @@ protected void channelRead0(ChannelHandlerContext ctx, Http2SettingsFrame msg) { @Override public void channelUnregistered(ChannelHandlerContext ctx) { if (!channel.attr(PROTOCOL_FUTURE).get().isDone()) { - channelError(new IOException("The channel was closed before the protocol could be determined."), channel); + channelError(new IOException("The channel was closed before the protocol could be determined."), channel, ctx); } } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - channelError(cause, channel); + channelError(cause, channel, ctx); } - private void channelError(Throwable cause, Channel ch) { + private void channelError(Throwable cause, Channel ch, ChannelHandlerContext ctx) { ch.attr(PROTOCOL_FUTURE).get().completeExceptionally(cause); - MultiplexedChannelRecord record = ch.attr(ChannelAttributeKey.CHANNEL_POOL_RECORD).get(); - // Deliver the exception to any child channels registered to this connection. - if (record != null) { - record.shutdownChildChannels(cause); - } + ctx.fireExceptionCaught(cause); + // Channel status may still be active at this point even if it's not so queue up the close so that status is // accurately updated ch.eventLoop().submit(() -> { diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandler.java new file mode 100644 index 000000000000..d1ff14628f0a --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandler.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.handler.timeout.TimeoutException; +import java.io.IOException; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Logger; + +/** + * Exception Handler for errors on the Http2 streams. + */ +@ChannelHandler.Sharable +@SdkInternalApi +public final class Http2StreamExceptionHandler extends ChannelInboundHandlerAdapter { + private static final Logger log = Logger.loggerFor(Http2StreamExceptionHandler.class); + private static final Http2StreamExceptionHandler INSTANCE = new Http2StreamExceptionHandler(); + + private Http2StreamExceptionHandler() { + } + + public static Http2StreamExceptionHandler create() { + return INSTANCE; + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + if (isIoError(cause) && ctx.channel().parent() != null) { + Channel parent = ctx.channel().parent(); + log.debug(() -> "An I/O error occurred on an Http2 stream, notifying the connection channel " + parent); + parent.pipeline().fireExceptionCaught(new Http2ConnectionTerminatingException("An I/O error occurred on an " + + "associated Http2 " + + "stream " + ctx.channel())); + } + + ctx.fireExceptionCaught(cause); + } + + private boolean isIoError(Throwable cause) { + return cause instanceof TimeoutException || cause instanceof IOException; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ToHttpInboundAdapter.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ToHttpInboundAdapter.java index da27876688a0..b4c55c0e96eb 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ToHttpInboundAdapter.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ToHttpInboundAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,24 +16,24 @@ package software.amazon.awssdk.http.nio.netty.internal.http2; import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultHttpContent; import io.netty.handler.codec.http.DefaultLastHttpContent; import io.netty.handler.codec.http.HttpObject; +import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http2.Http2DataFrame; import io.netty.handler.codec.http2.Http2Error; import io.netty.handler.codec.http2.Http2Exception; import io.netty.handler.codec.http2.Http2Frame; -import io.netty.handler.codec.http2.Http2GoAwayFrame; import io.netty.handler.codec.http2.Http2HeadersFrame; import io.netty.handler.codec.http2.Http2ResetFrame; import io.netty.handler.codec.http2.HttpConversionUtil; import java.io.IOException; -import java.nio.charset.StandardCharsets; - import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.http.HttpStatusFamily; +import software.amazon.awssdk.utils.Logger; /** * Converts {@link Http2Frame}s to {@link HttpObject}s. Ignores the majority of {@link Http2Frame}s like PING @@ -41,9 +41,7 @@ */ @SdkInternalApi public class Http2ToHttpInboundAdapter extends SimpleChannelInboundHandler { - - public Http2ToHttpInboundAdapter() { - } + private static final Logger log = Logger.loggerFor(Http2ToHttpInboundAdapter.class); @Override protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) throws Exception { @@ -54,8 +52,6 @@ protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) throws ctx.channel().read(); } else if (frame instanceof Http2ResetFrame) { onRstStreamRead((Http2ResetFrame) frame, ctx); - } else if (frame instanceof Http2GoAwayFrame) { - onGoAwayRead((Http2GoAwayFrame) frame, ctx); } else { // TODO this is related to the inbound window update bug. Revisit ctx.channel().parent().read(); @@ -63,7 +59,22 @@ protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) throws } private void onHeadersRead(Http2HeadersFrame headersFrame, ChannelHandlerContext ctx) throws Http2Exception { - ctx.fireChannelRead(HttpConversionUtil.toHttpResponse(headersFrame.stream().id(), headersFrame.headers(), true)); + + HttpResponse httpResponse = HttpConversionUtil.toHttpResponse(headersFrame.stream().id(), headersFrame.headers(), true); + ctx.fireChannelRead(httpResponse); + + if (HttpStatusFamily.of(httpResponse.status().code()) == HttpStatusFamily.SERVER_ERROR) { + fireConnectionExceptionForServerError(ctx); + } + } + + private void fireConnectionExceptionForServerError(ChannelHandlerContext ctx) { + if (ctx.channel().parent() != null) { + Channel parent = ctx.channel().parent(); + log.debug(() -> "A 5xx server error occurred on an Http2 stream, notifying the connection channel " + ctx.channel()); + parent.pipeline().fireExceptionCaught(new Http2ConnectionTerminatingException("A 5xx server error occurred on an " + + "Http2 stream " + ctx.channel())); + } } private void onDataRead(Http2DataFrame dataFrame, ChannelHandlerContext ctx) throws Http2Exception { @@ -76,38 +87,14 @@ private void onDataRead(Http2DataFrame dataFrame, ChannelHandlerContext ctx) thr } } - private void onGoAwayRead(Http2GoAwayFrame goAwayFrame, ChannelHandlerContext ctx) throws Http2Exception { - ctx.fireExceptionCaught(new GoawayException(goAwayFrame.errorCode(), goAwayFrame.content())); - } - private void onRstStreamRead(Http2ResetFrame resetFrame, ChannelHandlerContext ctx) throws Http2Exception { ctx.fireExceptionCaught(new Http2ResetException(resetFrame.errorCode())); } - public static class Http2ResetException extends IOException { + public static final class Http2ResetException extends IOException { Http2ResetException(long errorCode) { super(String.format("Connection reset. Error - %s(%d)", Http2Error.valueOf(errorCode).name(), errorCode)); } } - - /** - * Exception thrown when a GOAWAY frame is sent by the service. - */ - private static class GoawayException extends IOException { - - private final long errorCode; - private final byte[] debugData; - - GoawayException(long errorCode, ByteBuf debugData) { - this.errorCode = errorCode; - this.debugData = BinaryUtils.copyBytesFrom(debugData.nioBuffer()); - } - - @Override - public String getMessage() { - return String.format("GOAWAY received. Error Code = %d, Debug Data = %s", - errorCode, new String(debugData, StandardCharsets.UTF_8)); - } - } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPool.java index b4ae604804d2..e10a612689ec 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ package software.amazon.awssdk.http.nio.netty.internal.http2; -import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.MAX_CONCURRENT_STREAMS; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.doInEventLoop; @@ -26,10 +25,15 @@ import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; import io.netty.util.concurrent.Promise; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.IdleConnectionCountingChannelPool; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; import software.amazon.awssdk.http.nio.netty.internal.utils.BetterFixedChannelPool; +import software.amazon.awssdk.metrics.MetricCollector; /** * Channel pool that establishes an initial connection to determine protocol. Delegates @@ -37,14 +41,16 @@ * all connections will be negotiated with the same protocol. */ @SdkInternalApi -public class HttpOrHttp2ChannelPool implements ChannelPool { +public class HttpOrHttp2ChannelPool implements SdkChannelPool { private final ChannelPool delegatePool; private final int maxConcurrency; + private final EventLoopGroup eventLoopGroup; private final EventLoop eventLoop; private final NettyConfiguration configuration; + private boolean protocolImplPromiseInitializationStarted = false; private Promise protocolImplPromise; - private ChannelPool protocolImpl; + private BetterFixedChannelPool protocolImpl; private boolean closed; public HttpOrHttp2ChannelPool(ChannelPool delegatePool, @@ -53,8 +59,10 @@ public HttpOrHttp2ChannelPool(ChannelPool delegatePool, NettyConfiguration configuration) { this.delegatePool = delegatePool; this.maxConcurrency = maxConcurrency; + this.eventLoopGroup = group; this.eventLoop = group.next(); this.configuration = configuration; + this.protocolImplPromise = eventLoop.newPromise(); } @Override @@ -78,7 +86,7 @@ private void acquire0(Promise promise) { protocolImpl.acquire(promise); return; } - if (protocolImplPromise == null) { + if (!protocolImplPromiseInitializationStarted) { initializeProtocol(); } protocolImplPromise.addListener((GenericFutureListener>) future -> { @@ -96,7 +104,7 @@ private void acquire0(Promise promise) { * for {@link #protocolImpl}. */ private void initializeProtocol() { - protocolImplPromise = eventLoop.newPromise(); + protocolImplPromiseInitializationStarted = true; delegatePool.acquire().addListener((GenericFutureListener>) future -> { if (future.isSuccess()) { Channel newChannel = future.getNow(); @@ -121,27 +129,37 @@ private void initializeProtocol() { private void failProtocolImplPromise(Throwable e) { doInEventLoop(eventLoop, () -> { protocolImplPromise.setFailure(e); - protocolImplPromise = null; + protocolImplPromise = eventLoop.newPromise(); + protocolImplPromiseInitializationStarted = false; }); } - void completeProtocolConfiguration(Channel newChannel, Protocol protocol) { + private void completeProtocolConfiguration(Channel newChannel, Protocol protocol) { doInEventLoop(eventLoop, () -> { if (closed) { - newChannel.close(); - delegatePool.release(newChannel); - protocolImplPromise.setFailure(new IllegalStateException("Pool closed")); + closeAndRelease(newChannel, new IllegalStateException("Pool closed")); } else { - protocolImplPromise.setSuccess(configureProtocol(newChannel, protocol)); + try { + protocolImplPromise.setSuccess(configureProtocol(newChannel, protocol)); + } catch (Throwable e) { + closeAndRelease(newChannel, e); + } } }); } + private void closeAndRelease(Channel newChannel, Throwable e) { + newChannel.close(); + delegatePool.release(newChannel); + protocolImplPromise.setFailure(e); + } + private ChannelPool configureProtocol(Channel newChannel, Protocol protocol) { if (Protocol.HTTP1_1 == protocol) { // For HTTP/1.1 we use a traditional channel pool without multiplexing + SdkChannelPool idleConnectionMetricChannelPool = new IdleConnectionCountingChannelPool(eventLoop, delegatePool); protocolImpl = BetterFixedChannelPool.builder() - .channelPool(delegatePool) + .channelPool(idleConnectionMetricChannelPool) .executor(eventLoop) .acquireTimeoutAction(BetterFixedChannelPool.AcquireTimeoutAction.FAIL) .acquireTimeoutMillis(configuration.connectionAcquireTimeoutMillis()) @@ -149,8 +167,9 @@ private ChannelPool configureProtocol(Channel newChannel, Protocol protocol) { .maxPendingAcquires(configuration.maxPendingConnectionAcquires()) .build(); } else { - ChannelPool h2Pool = new Http2MultiplexedChannelPool( - delegatePool, eventLoop, newChannel.attr(MAX_CONCURRENT_STREAMS).get()); + Duration idleConnectionTimeout = configuration.reapIdleConnections() + ? Duration.ofMillis(configuration.idleTimeoutMillis()) : null; + SdkChannelPool h2Pool = new Http2MultiplexedChannelPool(delegatePool, eventLoopGroup, idleConnectionTimeout); protocolImpl = BetterFixedChannelPool.builder() .channelPool(h2Pool) .executor(eventLoop) @@ -201,7 +220,7 @@ private void close0() { closed = true; if (protocolImpl != null) { protocolImpl.close(); - } else if (protocolImplPromise != null) { + } else if (protocolImplPromiseInitializationStarted) { protocolImplPromise.addListener((Future f) -> { if (f.isSuccess()) { f.getNow().close(); @@ -213,4 +232,23 @@ private void close0() { delegatePool.close(); } } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + CompletableFuture result = new CompletableFuture<>(); + protocolImplPromise.addListener(f -> { + if (!f.isSuccess()) { + result.completeExceptionally(f.cause()); + } else { + protocolImpl.collectChannelPoolMetrics(metrics).whenComplete((m, t) -> { + if (t != null) { + result.completeExceptionally(t); + } else { + result.complete(m); + } + }); + } + }); + return result; + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpToHttp2OutboundAdapter.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpToHttp2OutboundAdapter.java index 9294ae76d4c5..881f50f64624 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpToHttp2OutboundAdapter.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpToHttp2OutboundAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -71,7 +71,8 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) // Convert and write the headers. Http2Headers http2Headers = HttpConversionUtil.toHttp2Headers(httpMsg, false); endStream = msg instanceof FullHttpMessage && !((FullHttpMessage) msg).content().isReadable(); - ctx.write(new DefaultHttp2HeadersFrame(http2Headers), promiseAggregator); + ctx.write(new DefaultHttp2HeadersFrame(http2Headers), promiseAggregator.newPromise()); + } if (!endStream && msg instanceof HttpContent) { @@ -91,11 +92,13 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) ByteBuf content = ((HttpContent) msg).content(); endStream = isLastContent && trailers.isEmpty(); release = false; - ctx.write(new DefaultHttp2DataFrame(content, endStream), promiseAggregator); + ctx.write(new DefaultHttp2DataFrame(content, endStream), promiseAggregator.newPromise()); + if (!trailers.isEmpty()) { // Write trailing headers. - ctx.write(new DefaultHttp2HeadersFrame(http2Trailers, true), promiseAggregator); + ctx.write(new DefaultHttp2HeadersFrame(http2Trailers, true), promiseAggregator.newPromise()); + } ctx.flush(); } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecord.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecord.java index 6598ace1643a..f1535be6cf63 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecord.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecord.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,151 +15,331 @@ package software.amazon.awssdk.http.nio.netty.internal.http2; -import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.CHANNEL_POOL_RECORD; -import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; -import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.asyncPromiseNotifyingBiConsumer; import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.doInEventLoop; -import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.promiseNotifyingListener; -import static software.amazon.awssdk.utils.NumericUtils.saturatedCast; +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.warnIfNotInEventLoop; import io.netty.channel.Channel; import io.netty.channel.ChannelId; -import io.netty.handler.codec.http2.ForkedHttp2StreamChannelBootstrap; +import io.netty.channel.ChannelOutboundInvoker; +import io.netty.handler.codec.http2.Http2GoAwayFrame; import io.netty.handler.codec.http2.Http2StreamChannel; +import io.netty.handler.codec.http2.Http2StreamChannelBootstrap; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; import io.netty.util.concurrent.Promise; +import io.netty.util.concurrent.ScheduledFuture; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.BiConsumer; - +import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.annotations.SdkTestInternalApi; -import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; +import software.amazon.awssdk.http.nio.netty.internal.UnusedChannelExceptionHandler; +import software.amazon.awssdk.utils.Logger; /** * Contains a {@link Future} for the actual socket channel and tracks available * streams based on the MAX_CONCURRENT_STREAMS setting for the connection. */ @SdkInternalApi -public final class MultiplexedChannelRecord { +public class MultiplexedChannelRecord { + private static final Logger log = Logger.loggerFor(MultiplexedChannelRecord.class); - private final Future connectionFuture; - private final Map childChannels; - private final AtomicLong availableStreams; - private final BiConsumer channelReleaser; + private final Channel connection; + private final long maxConcurrencyPerConnection; + private final Long allowedIdleConnectionTimeMillis; - private volatile Channel connection; + private final AtomicLong availableChildChannels; + private volatile long lastReserveAttemptTimeMillis; - /** - * @param connectionFuture Future for parent socket channel. - * @param maxConcurrencyPerConnection Max streams allowed per connection. - * @param channelReleaser Method to release a channel and record on failure. - */ - MultiplexedChannelRecord(Future connectionFuture, - long maxConcurrencyPerConnection, - BiConsumer channelReleaser) { - this.connectionFuture = connectionFuture; - this.availableStreams = new AtomicLong(maxConcurrencyPerConnection); - this.childChannels = new ConcurrentHashMap<>(saturatedCast(maxConcurrencyPerConnection)); - this.channelReleaser = channelReleaser; - } - - @SdkTestInternalApi - MultiplexedChannelRecord(Future connectionFuture, - Channel connection, - long maxConcurrencyPerConnection, - BiConsumer channelReleaser) { - this.connectionFuture = connectionFuture; - this.childChannels = new ConcurrentHashMap<>(saturatedCast(maxConcurrencyPerConnection)); - this.availableStreams = new AtomicLong(maxConcurrencyPerConnection); - this.channelReleaser = channelReleaser; + // Only read or write in the connection.eventLoop() + private final Map childChannels = new HashMap<>(); + private ScheduledFuture closeIfIdleTask; + + // Only write in the connection.eventLoop() + private volatile RecordState state = RecordState.OPEN; + + private volatile int lastStreamId; + + MultiplexedChannelRecord(Channel connection, long maxConcurrencyPerConnection, Duration allowedIdleConnectionTime) { this.connection = connection; + this.maxConcurrencyPerConnection = maxConcurrencyPerConnection; + this.availableChildChannels = new AtomicLong(maxConcurrencyPerConnection); + this.allowedIdleConnectionTimeMillis = allowedIdleConnectionTime == null ? null : allowedIdleConnectionTime.toMillis(); } - MultiplexedChannelRecord acquire(Promise channelPromise) { - availableStreams.decrementAndGet(); - if (connection != null) { - createChildChannel(channelPromise, connection); - } else { - connectionFuture.addListener((GenericFutureListener>) future -> { - if (future.isSuccess()) { - connection = future.getNow(); - connection.attr(CHANNEL_POOL_RECORD).set(this); - createChildChannel(channelPromise, connection); + boolean acquireStream(Promise promise) { + if (claimStream()) { + releaseClaimOnFailure(promise); + acquireClaimedStream(promise); + return true; + } + return false; + } + + void acquireClaimedStream(Promise promise) { + doInEventLoop(connection.eventLoop(), () -> { + if (state != RecordState.OPEN) { + String message; + // GOAWAY + if (state == RecordState.CLOSED_TO_NEW) { + message = String.format("Connection %s received GOAWAY with Last Stream ID %d. Unable to open new " + + "streams on this connection.", connection, lastStreamId); } else { - channelPromise.setFailure(future.cause()); - channelReleaser.accept(connection, this); + message = String.format("Connection %s was closed while acquiring new stream.", connection); + } + log.warn(() -> message); + promise.setFailure(new IOException(message)); + return; + } + + Future streamFuture = new Http2StreamChannelBootstrap(connection).open(); + streamFuture.addListener((GenericFutureListener>) future -> { + warnIfNotInEventLoop(connection.eventLoop()); + + if (!future.isSuccess()) { + promise.setFailure(future.cause()); + return; + } + + Http2StreamChannel channel = future.getNow(); + channel.pipeline().addLast(UnusedChannelExceptionHandler.getInstance()); + channel.attr(ChannelAttributeKey.HTTP2_FRAME_STREAM).set(channel.stream()); + childChannels.put(channel.id(), channel); + promise.setSuccess(channel); + + if (closeIfIdleTask == null && allowedIdleConnectionTimeMillis != null) { + enableCloseIfIdleTask(); } }); + }, promise); + } + + private void enableCloseIfIdleTask() { + warnIfNotInEventLoop(connection.eventLoop()); + + // Don't poll more frequently than 1 second. Being overly-conservative is okay. Blowing up our CPU is not. + long taskFrequencyMillis = Math.max(allowedIdleConnectionTimeMillis, 1_000); + + closeIfIdleTask = connection.eventLoop().scheduleAtFixedRate(this::closeIfIdle, taskFrequencyMillis, taskFrequencyMillis, + TimeUnit.MILLISECONDS); + connection.closeFuture().addListener(f -> closeIfIdleTask.cancel(false)); + } + + private void releaseClaimOnFailure(Promise promise) { + try { + promise.addListener(f -> { + if (!promise.isSuccess()) { + releaseClaim(); + } + }); + } catch (Throwable e) { + releaseClaim(); + throw e; + } + } + + private void releaseClaim() { + if (availableChildChannels.incrementAndGet() > maxConcurrencyPerConnection) { + assert false; + log.warn(() -> "Child channel count was caught attempting to be increased over max concurrency. " + + "Please report this issue to the AWS SDK for Java team."); + availableChildChannels.decrementAndGet(); } - return this; } /** - * Delivers the exception to all registered child channels. - * - * @param t Exception to deliver. + * Handle a {@link Http2GoAwayFrame} on this connection, preventing new streams from being created on it, and closing any + * streams newer than the last-stream-id on the go-away frame. */ - public void shutdownChildChannels(Throwable t) { - for (Channel childChannel : childChannels.values()) { - childChannel.pipeline().fireExceptionCaught(t); - } + void handleGoAway(int lastStreamId, GoAwayException exception) { + doInEventLoop(connection.eventLoop(), () -> { + this.lastStreamId = lastStreamId; + + if (state == RecordState.CLOSED) { + return; + } + + if (state == RecordState.OPEN) { + state = RecordState.CLOSED_TO_NEW; + } + + // Create a copy of the children to close, because fireExceptionCaught may remove from the childChannels. + List childrenToClose = new ArrayList<>(childChannels.values()); + childrenToClose.stream() + .filter(cc -> cc.stream().id() > lastStreamId) + .forEach(cc -> cc.pipeline().fireExceptionCaught(exception)); + }); } /** - * Bootstraps a child stream channel from the parent socket channel. Done in parent channel event loop. - * - * @param channelPromise Promise to notify when channel is available. - * @param parentChannel Parent socket channel. + * Prevent new streams from being acquired from the existing connection. */ - private void createChildChannel(Promise channelPromise, Channel parentChannel) { - doInEventLoop(parentChannel.eventLoop(), - () -> createChildChannel0(channelPromise, parentChannel), - channelPromise); + void closeToNewStreams() { + doInEventLoop(connection.eventLoop(), () -> { + if (state == RecordState.OPEN) { + state = RecordState.CLOSED_TO_NEW; + } + }); } - private void createChildChannel0(Promise channelPromise, Channel parentChannel) { - // Once protocol future is notified then parent pipeline is configured and ready to go - parentChannel.attr(PROTOCOL_FUTURE).get() - .whenComplete(asyncPromiseNotifyingBiConsumer(bootstrapChildChannel(parentChannel), channelPromise)); + /** + * Close all registered child channels, and prohibit new streams from being created on this connection. + */ + void closeChildChannels() { + closeAndExecuteOnChildChannels(ChannelOutboundInvoker::close); } /** - * Bootstraps the child stream channel and notifies the Promise on success or failure. - * - * @param parentChannel Parent socket channel. - * @return BiConsumer that will bootstrap the child channel. + * Delivers the exception to all registered child channels, and prohibits new streams being created on this connection. */ - private BiConsumer> bootstrapChildChannel(Channel parentChannel) { - return (s, p) -> new ForkedHttp2StreamChannelBootstrap(parentChannel) - .open() - .addListener((GenericFutureListener>) future -> { - if (future.isSuccess()) { - Http2StreamChannel channel = future.getNow(); - childChannels.put(channel.id(), channel); - } else { - if (!connection.isActive()) { - channelReleaser.accept(connection, this); - } - availableStreams.incrementAndGet(); - } - }) - .addListener(promiseNotifyingListener(p)); + void closeChildChannels(Throwable t) { + closeAndExecuteOnChildChannels(ch -> ch.pipeline().fireExceptionCaught(decorateConnectionException(t))); + } + + private Throwable decorateConnectionException(Throwable t) { + String message = "An error occurred on the connection: " + t.getMessage(); + if (t instanceof IOException) { + return new IOException(message, t); + } + + return new Throwable(message, t); } - void release(Channel channel) { - availableStreams.incrementAndGet(); - childChannels.remove(channel.id()); + private void closeAndExecuteOnChildChannels(Consumer childChannelConsumer) { + doInEventLoop(connection.eventLoop(), () -> { + if (state == RecordState.CLOSED) { + return; + } + state = RecordState.CLOSED; + + // Create a copy of the children, because they may be modified by the consumer. + List childrenToClose = new ArrayList<>(childChannels.values()); + for (Channel childChannel : childrenToClose) { + childChannelConsumer.accept(childChannel); + } + }); } - public Future getConnectionFuture() { - return connectionFuture; + void closeAndReleaseChild(Channel childChannel) { + childChannel.close(); + doInEventLoop(connection.eventLoop(), () -> { + childChannels.remove(childChannel.id()); + releaseClaim(); + }); } - long availableStreams() { - return availableStreams.get(); + private void closeIfIdle() { + warnIfNotInEventLoop(connection.eventLoop()); + + // Don't close if we have child channels. + if (!childChannels.isEmpty()) { + return; + } + + // Don't close if there have been any reserves attempted since the idle connection time. + long nonVolatileLastReserveAttemptTimeMillis = lastReserveAttemptTimeMillis; + if (nonVolatileLastReserveAttemptTimeMillis > System.currentTimeMillis() - allowedIdleConnectionTimeMillis) { + return; + } + + // Cut off new streams from being acquired from this connection by setting the number of available channels to 0. + // This write may fail if a reservation has happened since we checked the lastReserveAttemptTime. + if (!availableChildChannels.compareAndSet(maxConcurrencyPerConnection, 0)) { + return; + } + + // If we've been closed, no need to shut down. + if (state != RecordState.OPEN) { + return; + } + + log.debug(() -> "Connection " + connection + " has been idle for " + + (System.currentTimeMillis() - nonVolatileLastReserveAttemptTimeMillis) + "ms and will be shut down."); + + // Mark ourselves as closed + state = RecordState.CLOSED; + + // Start the shutdown process by closing the connection (which should be noticed by the connection pool) + connection.close(); + } + + public Channel getConnection() { + return connection; } + private boolean claimStream() { + lastReserveAttemptTimeMillis = System.currentTimeMillis(); + for (int attempt = 0; attempt < 5; ++attempt) { + + if (state != RecordState.OPEN) { + return false; + } + + long currentlyAvailable = availableChildChannels.get(); + + if (currentlyAvailable <= 0) { + return false; + } + if (availableChildChannels.compareAndSet(currentlyAvailable, currentlyAvailable - 1)) { + return true; + } + } + + return false; + } + + boolean canBeClosedAndReleased() { + return state != RecordState.OPEN && availableChildChannels.get() == maxConcurrencyPerConnection; + } + + CompletableFuture getMetrics() { + CompletableFuture result = new CompletableFuture<>(); + doInEventLoop(connection.eventLoop(), () -> { + int streamCount = childChannels.size(); + result.complete(new Metrics().setAvailableStreams(maxConcurrencyPerConnection - streamCount)); + }); + return result; + } + + private enum RecordState { + /** + * The connection is open and new streams may be acquired from it, if they are available. + */ + OPEN, + + /** + * The connection is open, but new streams may not be acquired from it. This occurs when a connection is being + * shut down (e.g. after it has received a GOAWAY frame), but all streams haven't been closed yet. + */ + CLOSED_TO_NEW, + + /** + * The connection is closed and new streams may not be acquired from it. + */ + CLOSED + } + + public static class Metrics { + private long availableStreams = 0; + + public long getAvailableStreams() { + return availableStreams; + } + + public Metrics setAvailableStreams(long availableStreams) { + this.availableStreams = availableStreams; + return this; + } + + public void add(Metrics rhs) { + this.availableStreams += rhs.availableStreams; + } + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/PingFailedException.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/PingFailedException.java new file mode 100644 index 000000000000..80103860f327 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/PingFailedException.java @@ -0,0 +1,30 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import java.io.IOException; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public final class PingFailedException extends IOException { + PingFailedException(String msg) { + super(msg); + } + + PingFailedException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/PingTracker.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/PingTracker.java new file mode 100644 index 000000000000..414ec20d834f --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/PingTracker.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import io.netty.util.concurrent.ScheduledFuture; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Tracking the status after sending out the PING frame + */ +@SdkInternalApi +public final class PingTracker { + + private final Supplier> timerFutureSupplier; + private ScheduledFuture pingTimerFuture; + + PingTracker(Supplier> timerFutureSupplier) { + this.timerFutureSupplier = timerFutureSupplier; + } + + public void start() { + pingTimerFuture = timerFutureSupplier.get(); + } + + public void cancel() { + if (pingTimerFuture != null) { + pingTimerFuture.cancel(false); + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/CancelledSubscriber.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/CancelledSubscriber.java new file mode 100644 index 000000000000..b2edb8b60c93 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/CancelledSubscriber.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * A cancelled subscriber. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +public final class CancelledSubscriber implements Subscriber { + + @Override + public void onSubscribe(Subscription subscription) { + if (subscription == null) { + throw new NullPointerException("Null subscription"); + } else { + subscription.cancel(); + } + } + + @Override + public void onNext(T t) { + } + + @Override + public void onError(Throwable error) { + if (error == null) { + throw new NullPointerException("Null error published"); + } + } + + @Override + public void onComplete() { + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DefaultStreamedHttpRequest.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DefaultStreamedHttpRequest.java new file mode 100644 index 000000000000..7d9a82c9bed9 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DefaultStreamedHttpRequest.java @@ -0,0 +1,81 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpVersion; +import java.util.Objects; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * A default streamed HTTP request. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +public class DefaultStreamedHttpRequest extends DefaultHttpRequest implements StreamedHttpRequest { + + private final Publisher stream; + + public DefaultStreamedHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, Publisher stream) { + super(httpVersion, method, uri); + this.stream = stream; + } + + public DefaultStreamedHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, boolean validateHeaders, + Publisher stream) { + super(httpVersion, method, uri, validateHeaders); + this.stream = stream; + } + + @Override + public void subscribe(Subscriber subscriber) { + stream.subscribe(subscriber); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + DefaultStreamedHttpRequest that = (DefaultStreamedHttpRequest) o; + + return Objects.equals(stream, that.stream); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (stream != null ? stream.hashCode() : 0); + return result; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DefaultStreamedHttpResponse.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DefaultStreamedHttpResponse.java new file mode 100644 index 000000000000..dc6fc82cb69b --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DefaultStreamedHttpResponse.java @@ -0,0 +1,81 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import java.util.Objects; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * A default streamed HTTP response. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +public class DefaultStreamedHttpResponse extends DefaultHttpResponse implements StreamedHttpResponse { + + private final Publisher stream; + + public DefaultStreamedHttpResponse(HttpVersion version, HttpResponseStatus status, Publisher stream) { + super(version, status); + this.stream = stream; + } + + public DefaultStreamedHttpResponse(HttpVersion version, HttpResponseStatus status, boolean validateHeaders, + Publisher stream) { + super(version, status, validateHeaders); + this.stream = stream; + } + + @Override + public void subscribe(Subscriber subscriber) { + stream.subscribe(subscriber); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + DefaultStreamedHttpResponse that = (DefaultStreamedHttpResponse) o; + + return Objects.equals(stream, that.stream); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (stream != null ? stream.hashCode() : 0); + return result; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpMessage.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpMessage.java new file mode 100644 index 000000000000..5cd6d3b68687 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpMessage.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.DecoderResult; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMessage; +import io.netty.handler.codec.http.HttpVersion; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +class DelegateHttpMessage implements HttpMessage { + protected final HttpMessage message; + + DelegateHttpMessage(HttpMessage message) { + this.message = message; + } + + @Override + @Deprecated + public HttpVersion getProtocolVersion() { + return message.protocolVersion(); + } + + @Override + public HttpVersion protocolVersion() { + return message.protocolVersion(); + } + + @Override + public HttpMessage setProtocolVersion(HttpVersion version) { + message.setProtocolVersion(version); + return this; + } + + @Override + public HttpHeaders headers() { + return message.headers(); + } + + @Override + @Deprecated + public DecoderResult getDecoderResult() { + return message.decoderResult(); + } + + @Override + public DecoderResult decoderResult() { + return message.decoderResult(); + } + + @Override + public void setDecoderResult(DecoderResult result) { + message.setDecoderResult(result); + } + + @Override + public String toString() { + return this.getClass().getName() + "(" + message.toString() + ")"; + } + +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpRequest.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpRequest.java new file mode 100644 index 000000000000..213a68824ff7 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpRequest.java @@ -0,0 +1,80 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpVersion; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +class DelegateHttpRequest extends DelegateHttpMessage implements HttpRequest { + + protected final HttpRequest request; + + DelegateHttpRequest(HttpRequest request) { + super(request); + this.request = request; + } + + @Override + public HttpRequest setMethod(HttpMethod method) { + request.setMethod(method); + return this; + } + + @Override + public HttpRequest setUri(String uri) { + request.setUri(uri); + return this; + } + + @Override + @Deprecated + public HttpMethod getMethod() { + return request.method(); + } + + @Override + public HttpMethod method() { + return request.method(); + } + + @Override + @Deprecated + public String getUri() { + return request.uri(); + } + + @Override + public String uri() { + return request.uri(); + } + + @Override + public HttpRequest setProtocolVersion(HttpVersion version) { + super.setProtocolVersion(version); + return this; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpResponse.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpResponse.java new file mode 100644 index 000000000000..e446aeb6df52 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateHttpResponse.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +class DelegateHttpResponse extends DelegateHttpMessage implements HttpResponse { + + protected final HttpResponse response; + + DelegateHttpResponse(HttpResponse response) { + super(response); + this.response = response; + } + + @Override + public HttpResponse setStatus(HttpResponseStatus status) { + response.setStatus(status); + return this; + } + + @Override + @Deprecated + public HttpResponseStatus getStatus() { + return response.status(); + } + + @Override + public HttpResponseStatus status() { + return response.status(); + } + + @Override + public HttpResponse setProtocolVersion(HttpVersion version) { + super.setProtocolVersion(version); + return this; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateStreamedHttpRequest.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateStreamedHttpRequest.java new file mode 100644 index 000000000000..c3c495c2e724 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateStreamedHttpRequest.java @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpRequest; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +final class DelegateStreamedHttpRequest extends DelegateHttpRequest implements StreamedHttpRequest { + + private final Publisher stream; + + DelegateStreamedHttpRequest(HttpRequest request, Publisher stream) { + super(request); + this.stream = stream; + } + + @Override + public void subscribe(Subscriber subscriber) { + stream.subscribe(subscriber); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateStreamedHttpResponse.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateStreamedHttpResponse.java new file mode 100644 index 000000000000..5a5612679205 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/DelegateStreamedHttpResponse.java @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpResponse; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +final class DelegateStreamedHttpResponse extends DelegateHttpResponse implements StreamedHttpResponse { + + private final Publisher stream; + + DelegateStreamedHttpResponse(HttpResponse response, Publisher stream) { + super(response); + this.stream = stream; + } + + @Override + public void subscribe(Subscriber subscriber) { + stream.subscribe(subscriber); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/EmptyHttpRequest.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/EmptyHttpRequest.java new file mode 100644 index 000000000000..08c1eb9649b7 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/EmptyHttpRequest.java @@ -0,0 +1,160 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.ReferenceCountUtil; +import io.netty.util.ReferenceCounted; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +class EmptyHttpRequest extends DelegateHttpRequest implements FullHttpRequest { + + EmptyHttpRequest(HttpRequest request) { + super(request); + } + + @Override + public FullHttpRequest setUri(String uri) { + super.setUri(uri); + return this; + } + + @Override + public FullHttpRequest setMethod(HttpMethod method) { + super.setMethod(method); + return this; + } + + @Override + public FullHttpRequest setProtocolVersion(HttpVersion version) { + super.setProtocolVersion(version); + return this; + } + + @Override + public FullHttpRequest copy() { + if (request instanceof FullHttpRequest) { + return new EmptyHttpRequest(((FullHttpRequest) request).copy()); + } else { + DefaultHttpRequest copy = new DefaultHttpRequest(protocolVersion(), method(), uri()); + copy.headers().set(headers()); + return new EmptyHttpRequest(copy); + } + } + + @Override + public FullHttpRequest retain(int increment) { + ReferenceCountUtil.retain(message, increment); + return this; + } + + @Override + public FullHttpRequest retain() { + ReferenceCountUtil.retain(message); + return this; + } + + @Override + public FullHttpRequest touch() { + if (request instanceof FullHttpRequest) { + return ((FullHttpRequest) request).touch(); + } else { + return this; + } + } + + @Override + public FullHttpRequest touch(Object o) { + if (request instanceof FullHttpRequest) { + return ((FullHttpRequest) request).touch(o); + } else { + return this; + } + } + + @Override + public HttpHeaders trailingHeaders() { + return new DefaultHttpHeaders(); + } + + @Override + public FullHttpRequest duplicate() { + if (request instanceof FullHttpRequest) { + return ((FullHttpRequest) request).duplicate(); + } else { + return this; + } + } + + @Override + public FullHttpRequest retainedDuplicate() { + if (request instanceof FullHttpRequest) { + return ((FullHttpRequest) request).retainedDuplicate(); + } else { + return this; + } + } + + @Override + public FullHttpRequest replace(ByteBuf byteBuf) { + if (message instanceof FullHttpRequest) { + return ((FullHttpRequest) request).replace(byteBuf); + } else { + return this; + } + } + + @Override + public ByteBuf content() { + return Unpooled.EMPTY_BUFFER; + } + + @Override + public int refCnt() { + if (message instanceof ReferenceCounted) { + return ((ReferenceCounted) message).refCnt(); + } else { + return 1; + } + } + + @Override + public boolean release() { + return ReferenceCountUtil.release(message); + } + + @Override + public boolean release(int decrement) { + return ReferenceCountUtil.release(message, decrement); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/EmptyHttpResponse.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/EmptyHttpResponse.java new file mode 100644 index 000000000000..b514b5639161 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/EmptyHttpResponse.java @@ -0,0 +1,154 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.ReferenceCountUtil; +import io.netty.util.ReferenceCounted; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +class EmptyHttpResponse extends DelegateHttpResponse implements FullHttpResponse { + + EmptyHttpResponse(HttpResponse response) { + super(response); + } + + @Override + public FullHttpResponse setStatus(HttpResponseStatus status) { + super.setStatus(status); + return this; + } + + @Override + public FullHttpResponse setProtocolVersion(HttpVersion version) { + super.setProtocolVersion(version); + return this; + } + + @Override + public FullHttpResponse copy() { + if (response instanceof FullHttpResponse) { + return new EmptyHttpResponse(((FullHttpResponse) response).copy()); + } else { + DefaultHttpResponse copy = new DefaultHttpResponse(protocolVersion(), status()); + copy.headers().set(headers()); + return new EmptyHttpResponse(copy); + } + } + + @Override + public FullHttpResponse retain(int increment) { + ReferenceCountUtil.retain(message, increment); + return this; + } + + @Override + public FullHttpResponse retain() { + ReferenceCountUtil.retain(message); + return this; + } + + @Override + public FullHttpResponse touch() { + if (response instanceof FullHttpResponse) { + return ((FullHttpResponse) response).touch(); + } else { + return this; + } + } + + @Override + public FullHttpResponse touch(Object o) { + if (response instanceof FullHttpResponse) { + return ((FullHttpResponse) response).touch(o); + } else { + return this; + } + } + + @Override + public HttpHeaders trailingHeaders() { + return new DefaultHttpHeaders(); + } + + @Override + public FullHttpResponse duplicate() { + if (response instanceof FullHttpResponse) { + return ((FullHttpResponse) response).duplicate(); + } else { + return this; + } + } + + @Override + public FullHttpResponse retainedDuplicate() { + if (response instanceof FullHttpResponse) { + return ((FullHttpResponse) response).retainedDuplicate(); + } else { + return this; + } + } + + @Override + public FullHttpResponse replace(ByteBuf byteBuf) { + if (response instanceof FullHttpResponse) { + return ((FullHttpResponse) response).replace(byteBuf); + } else { + return this; + } + } + + @Override + public ByteBuf content() { + return Unpooled.EMPTY_BUFFER; + } + + @Override + public int refCnt() { + if (message instanceof ReferenceCounted) { + return ((ReferenceCounted) message).refCnt(); + } else { + return 1; + } + } + + @Override + public boolean release() { + return ReferenceCountUtil.release(message); + } + + @Override + public boolean release(int decrement) { + return ReferenceCountUtil.release(message, decrement); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerPublisher.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerPublisher.java new file mode 100644 index 000000000000..0a29dab186de --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerPublisher.java @@ -0,0 +1,510 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandler; +import io.netty.channel.ChannelPipeline; +import io.netty.util.ReferenceCountUtil; +import io.netty.util.concurrent.EventExecutor; +import io.netty.util.internal.TypeParameterMatcher; +import java.util.LinkedList; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicBoolean; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Publisher for a Netty Handler. + * + * This publisher supports only one subscriber. + * + * All interactions with the subscriber are done from the handlers executor, hence, they provide the same happens before + * semantics that Netty provides. + * + * The handler publishes all messages that match the type as specified by the passed in class. Any non matching messages + * are forwarded to the next handler. + * + * The publisher will signal complete if it receives a channel inactive event. + * + * The publisher will release any messages that it drops (for example, messages that are buffered when the subscriber + * cancels), but other than that, it does not release any messages. It is up to the subscriber to release messages. + * + * If the subscriber cancels, the publisher will send a close event up the channel pipeline. + * + * All errors will short circuit the buffer, and cause publisher to immediately call the subscribers onError method, + * dropping the buffer. + * + * The publisher can be subscribed to or placed in a handler chain in any order. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + */ +@SdkInternalApi +public class HandlerPublisher extends ChannelDuplexHandler implements Publisher { + /** + * Used for buffering a completion signal. + */ + private static final Object COMPLETE = new Object() { + @Override + public String toString() { + return "COMPLETE"; + } + }; + + private final EventExecutor executor; + private final TypeParameterMatcher matcher; + + private final Queue buffer = new LinkedList<>(); + + /** + * Whether a subscriber has been provided. This is used to detect whether two subscribers are subscribing + * simultaneously. + */ + private final AtomicBoolean hasSubscriber = new AtomicBoolean(); + + private State state = HandlerPublisher.State.NO_SUBSCRIBER_OR_CONTEXT; + + private volatile Subscriber subscriber; + private ChannelHandlerContext ctx; + private long outstandingDemand = 0; + private Throwable noSubscriberError; + + /** + * Create a handler publisher. + * + * The supplied executor must be the same event loop as the event loop that this handler is eventually registered + * with, if not, an exception will be thrown when the handler is registered. + * + * @param executor The executor to execute asynchronous events from the subscriber on. + * @param subscriberMessageType The type of message this publisher accepts. + */ + public HandlerPublisher(EventExecutor executor, Class subscriberMessageType) { + this.executor = executor; + this.matcher = TypeParameterMatcher.get(subscriberMessageType); + } + + /** + * Returns {@code true} if the given message should be handled. If {@code false} it will be passed to the next + * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. + * + * @param msg The message to check. + * @return True if the message should be accepted. + */ + protected boolean acceptInboundMessage(Object msg) throws Exception { + return matcher.match(msg); + } + + /** + * Override to handle when a subscriber cancels the subscription. + * + * By default, this method will simply close the channel. + */ + protected void cancelled() { + ctx.close(); + } + + /** + * Override to intercept when demand is requested. + * + * By default, a channel read is invoked. + */ + protected void requestDemand() { + ctx.read(); + } + + enum State { + /** + * Initial state. There's no subscriber, and no context. + */ + NO_SUBSCRIBER_OR_CONTEXT, + + /** + * A subscriber has been provided, but no context has been provided. + */ + NO_CONTEXT, + + /** + * A context has been provided, but no subscriber has been provided. + */ + NO_SUBSCRIBER, + + /** + * An error has been received, but there's no subscriber to receive it. + */ + NO_SUBSCRIBER_ERROR, + + /** + * There is no demand, and we have nothing buffered. + */ + IDLE, + + /** + * There is no demand, and we're buffering elements. + */ + BUFFERING, + + /** + * We have nothing buffered, but there is demand. + */ + DEMANDING, + + /** + * The stream is complete, however there are still elements buffered for which no demand has come from the subscriber. + */ + DRAINING, + + /** + * We're done, in the terminal state. + */ + DONE + } + + @Override + public void subscribe(final Subscriber subscriber) { + if (subscriber == null) { + throw new NullPointerException("Null subscriber"); + } + + if (!hasSubscriber.compareAndSet(false, true)) { + // Must call onSubscribe first. + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + } + + @Override + public void cancel() { + } + }); + subscriber.onError(new IllegalStateException("This publisher only supports one subscriber")); + } else { + executor.execute(new Runnable() { + @Override + public void run() { + provideSubscriber(subscriber); + } + }); + } + } + + private void provideSubscriber(Subscriber subscriber) { + this.subscriber = subscriber; + switch (state) { + case NO_SUBSCRIBER_OR_CONTEXT: + state = HandlerPublisher.State.NO_CONTEXT; + break; + case NO_SUBSCRIBER: + if (buffer.isEmpty()) { + state = HandlerPublisher.State.IDLE; + } else { + state = HandlerPublisher.State.BUFFERING; + } + subscriber.onSubscribe(new ChannelSubscription()); + break; + case DRAINING: + subscriber.onSubscribe(new ChannelSubscription()); + break; + case NO_SUBSCRIBER_ERROR: + cleanup(); + state = HandlerPublisher.State.DONE; + subscriber.onSubscribe(new ChannelSubscription()); + subscriber.onError(noSubscriberError); + break; + default: + // Do nothing + } + } + + @Override + public void handlerAdded(ChannelHandlerContext ctx) throws Exception { + // If the channel is not yet registered, then it's not safe to invoke any methods on it, eg read() or close() + // So don't provide the context until it is registered. + if (ctx.channel().isRegistered()) { + provideChannelContext(ctx); + } + } + + @Override + public void channelRegistered(ChannelHandlerContext ctx) throws Exception { + provideChannelContext(ctx); + ctx.fireChannelRegistered(); + } + + private void provideChannelContext(ChannelHandlerContext ctx) { + switch (state) { + case NO_SUBSCRIBER_OR_CONTEXT: + verifyRegisteredWithRightExecutor(ctx); + this.ctx = ctx; + // It's set, we don't have a subscriber + state = HandlerPublisher.State.NO_SUBSCRIBER; + break; + case NO_CONTEXT: + verifyRegisteredWithRightExecutor(ctx); + this.ctx = ctx; + state = HandlerPublisher.State.IDLE; + subscriber.onSubscribe(new ChannelSubscription()); + break; + default: + // Ignore, this could be invoked twice by both handlerAdded and channelRegistered. + } + } + + private void verifyRegisteredWithRightExecutor(ChannelHandlerContext ctx) { + if (!executor.inEventLoop()) { + throw new IllegalArgumentException("Channel handler MUST be registered with the same EventExecutor that it is " + + "created with."); + } + } + + @Override + public void channelActive(ChannelHandlerContext ctx) throws Exception { + // If we subscribed before the channel was active, then our read would have been ignored. + if (state == HandlerPublisher.State.DEMANDING) { + requestDemand(); + } + ctx.fireChannelActive(); + } + + private void receivedDemand(long demand) { + switch (state) { + case BUFFERING: + case DRAINING: + if (addDemand(demand)) { + flushBuffer(); + } + break; + + case DEMANDING: + addDemand(demand); + break; + + case IDLE: + if (addDemand(demand)) { + // Important to change state to demanding before doing a read, in case we get a synchronous + // read back. + state = HandlerPublisher.State.DEMANDING; + requestDemand(); + } + break; + default: + + } + } + + private boolean addDemand(long demand) { + + if (demand <= 0) { + illegalDemand(); + return false; + } else { + if (outstandingDemand < Long.MAX_VALUE) { + outstandingDemand += demand; + if (outstandingDemand < 0) { + outstandingDemand = Long.MAX_VALUE; + } + } + return true; + } + } + + private void illegalDemand() { + cleanup(); + subscriber.onError(new IllegalArgumentException("Request for 0 or negative elements in violation of Section 3.9 " + + "of the Reactive Streams specification")); + ctx.close(); + state = HandlerPublisher.State.DONE; + } + + private void flushBuffer() { + while (!buffer.isEmpty() && (outstandingDemand > 0 || outstandingDemand == Long.MAX_VALUE)) { + publishMessage(buffer.remove()); + } + if (buffer.isEmpty()) { + if (outstandingDemand > 0) { + if (state == HandlerPublisher.State.BUFFERING) { + state = HandlerPublisher.State.DEMANDING; + } // otherwise we're draining + requestDemand(); + } else if (state == HandlerPublisher.State.BUFFERING) { + state = HandlerPublisher.State.IDLE; + } + } + } + + private void receivedCancel() { + switch (state) { + case BUFFERING: + case DEMANDING: + case IDLE: + cancelled(); + state = HandlerPublisher.State.DONE; + break; + case DRAINING: + state = HandlerPublisher.State.DONE; + break; + default: + // ignore + } + cleanup(); + subscriber = null; + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object message) throws Exception { + if (acceptInboundMessage(message)) { + switch (state) { + case IDLE: + buffer.add(message); + state = HandlerPublisher.State.BUFFERING; + break; + case NO_SUBSCRIBER: + case BUFFERING: + buffer.add(message); + break; + case DEMANDING: + publishMessage(message); + break; + case DRAINING: + case DONE: + ReferenceCountUtil.release(message); + break; + case NO_CONTEXT: + case NO_SUBSCRIBER_OR_CONTEXT: + throw new IllegalStateException("Message received before added to the channel context"); + default: + // Ignore + } + } else { + ctx.fireChannelRead(message); + } + } + + private void publishMessage(Object message) { + if (COMPLETE.equals(message)) { + subscriber.onComplete(); + state = HandlerPublisher.State.DONE; + } else { + @SuppressWarnings("unchecked") + T next = (T) message; + subscriber.onNext(next); + if (outstandingDemand < Long.MAX_VALUE) { + outstandingDemand--; + if (outstandingDemand == 0 && state != HandlerPublisher.State.DRAINING) { + if (buffer.isEmpty()) { + state = HandlerPublisher.State.IDLE; + } else { + state = HandlerPublisher.State.BUFFERING; + } + } + } + } + } + + @Override + public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { + if (state == HandlerPublisher.State.DEMANDING) { + requestDemand(); + } + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + complete(); + } + + @Override + public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { + complete(); + } + + private void complete() { + switch (state) { + case NO_SUBSCRIBER: + case BUFFERING: + buffer.add(COMPLETE); + state = HandlerPublisher.State.DRAINING; + break; + case DEMANDING: + case IDLE: + subscriber.onComplete(); + state = HandlerPublisher.State.DONE; + break; + case NO_SUBSCRIBER_ERROR: + // Ignore, we're already going to complete the stream with an error + // when the subscriber subscribes. + break; + default: + // Ignore + } + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + switch (state) { + case NO_SUBSCRIBER: + noSubscriberError = cause; + state = HandlerPublisher.State.NO_SUBSCRIBER_ERROR; + cleanup(); + break; + case BUFFERING: + case DEMANDING: + case IDLE: + case DRAINING: + state = HandlerPublisher.State.DONE; + cleanup(); + subscriber.onError(cause); + break; + default: + // Ignore + } + } + + /** + * Release all elements from the buffer. + */ + private void cleanup() { + while (!buffer.isEmpty()) { + ReferenceCountUtil.release(buffer.remove()); + } + } + + private class ChannelSubscription implements Subscription { + @Override + public void request(final long demand) { + executor.execute(new Runnable() { + @Override + public void run() { + receivedDemand(demand); + } + }); + } + + @Override + public void cancel() { + executor.execute(new Runnable() { + @Override + public void run() { + receivedCancel(); + } + }); + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriber.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriber.java new file mode 100644 index 000000000000..81e2a648a145 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriber.java @@ -0,0 +1,306 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.util.concurrent.EventExecutor; +import java.util.concurrent.atomic.AtomicBoolean; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.nio.netty.internal.utils.OrderedWriteChannelHandlerContext; +import software.amazon.awssdk.utils.Validate; + +/** + * Subscriber that publishes received messages to the handler pipeline. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +public class HandlerSubscriber extends ChannelDuplexHandler implements Subscriber { + static final long DEFAULT_LOW_WATERMARK = 4; + static final long DEFAULT_HIGH_WATERMARK = 16; + + private final EventExecutor executor; + private final long demandLowWatermark; + private final long demandHighWatermark; + + private final AtomicBoolean hasSubscription = new AtomicBoolean(); + + private volatile Subscription subscription; + private volatile ChannelHandlerContext ctx; + + private State state = HandlerSubscriber.State.NO_SUBSCRIPTION_OR_CONTEXT; + private long outstandingDemand = 0; + private ChannelFuture lastWriteFuture; + + /** + * Create a new handler subscriber. + * + * The supplied executor must be the same event loop as the event loop that this handler is eventually registered + * with, if not, an exception will be thrown when the handler is registered. + * + * @param executor The executor to execute asynchronous events from the publisher on. + * @param demandLowWatermark The low watermark for demand. When demand drops below this, more will be requested. + * @param demandHighWatermark The high watermark for demand. This is the maximum that will be requested. + */ + public HandlerSubscriber(EventExecutor executor, long demandLowWatermark, long demandHighWatermark) { + this.executor = executor; + this.demandLowWatermark = demandLowWatermark; + this.demandHighWatermark = demandHighWatermark; + } + + /** + * Create a new handler subscriber with the default low and high watermarks. + * + * The supplied executor must be the same event loop as the event loop that this handler is eventually registered + * with, if not, an exception will be thrown when the handler is registered. + * + * @param executor The executor to execute asynchronous events from the publisher on. + * @see #HandlerSubscriber(EventExecutor, long, long) + */ + public HandlerSubscriber(EventExecutor executor) { + this(executor, DEFAULT_LOW_WATERMARK, DEFAULT_HIGH_WATERMARK); + } + + /** + * Override for custom error handling. By default, it closes the channel. + * + * @param error The error to handle. + */ + protected void error(Throwable error) { + doClose(); + } + + /** + * Override for custom completion handling. By default, it closes the channel. + */ + protected void complete() { + doClose(); + } + + enum State { + NO_SUBSCRIPTION_OR_CONTEXT, + NO_SUBSCRIPTION, + NO_CONTEXT, + INACTIVE, + RUNNING, + CANCELLED, + COMPLETE + } + + @Override + public void handlerAdded(ChannelHandlerContext ctx) throws Exception { + verifyRegisteredWithRightExecutor(ctx); + + // Ensure that writes to the context happen consecutively, even if they're performed from within the event loop. + // See https://github.com/netty/netty/issues/7783 + ctx = OrderedWriteChannelHandlerContext.wrap(ctx); + + switch (state) { + case NO_SUBSCRIPTION_OR_CONTEXT: + this.ctx = ctx; + // We were in no subscription or context, now we just don't have a subscription. + state = HandlerSubscriber.State.NO_SUBSCRIPTION; + break; + case NO_CONTEXT: + this.ctx = ctx; + // We were in no context, we're now fully initialised + maybeStart(); + break; + case COMPLETE: + // We are complete, close + state = HandlerSubscriber.State.COMPLETE; + ctx.close(); + break; + default: + throw new IllegalStateException("This handler must only be added to a pipeline once " + state); + } + } + + @Override + public void channelRegistered(ChannelHandlerContext ctx) throws Exception { + verifyRegisteredWithRightExecutor(ctx); + ctx.fireChannelRegistered(); + } + + private void verifyRegisteredWithRightExecutor(ChannelHandlerContext ctx) { + if (ctx.channel().isRegistered() && !executor.inEventLoop()) { + throw new IllegalArgumentException("Channel handler MUST be registered with the same EventExecutor that " + + "it is created with."); + } + } + + @Override + public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception { + maybeRequestMore(); + ctx.fireChannelWritabilityChanged(); + } + + @Override + public void channelActive(ChannelHandlerContext ctx) throws Exception { + if (state == HandlerSubscriber.State.INACTIVE) { + state = HandlerSubscriber.State.RUNNING; + maybeRequestMore(); + } + ctx.fireChannelActive(); + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + cancel(); + ctx.fireChannelInactive(); + } + + @Override + public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { + cancel(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + cancel(); + ctx.fireExceptionCaught(cause); + } + + private void cancel() { + switch (state) { + case NO_SUBSCRIPTION: + state = HandlerSubscriber.State.CANCELLED; + break; + case RUNNING: + case INACTIVE: + subscription.cancel(); + state = HandlerSubscriber.State.CANCELLED; + break; + default: + // ignore + } + } + + @Override + public void onSubscribe(final Subscription subscription) { + if (subscription == null) { + throw new NullPointerException("Null subscription"); + } else if (!hasSubscription.compareAndSet(false, true)) { + subscription.cancel(); + } else { + this.subscription = subscription; + executor.execute(new Runnable() { + @Override + public void run() { + provideSubscription(); + } + }); + } + } + + private void provideSubscription() { + switch (state) { + case NO_SUBSCRIPTION_OR_CONTEXT: + state = HandlerSubscriber.State.NO_CONTEXT; + break; + case NO_SUBSCRIPTION: + maybeStart(); + break; + case CANCELLED: + subscription.cancel(); + break; + default: + // ignore + } + } + + private void maybeStart() { + if (ctx.channel().isActive()) { + state = HandlerSubscriber.State.RUNNING; + maybeRequestMore(); + } else { + state = HandlerSubscriber.State.INACTIVE; + } + } + + @Override + public void onNext(T t) { + // Publish straight to the context. + Validate.notNull(t, "Event must not be null."); + lastWriteFuture = ctx.writeAndFlush(t); + lastWriteFuture.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + outstandingDemand--; + maybeRequestMore(); + } + }); + } + + @Override + public void onError(final Throwable error) { + if (error == null) { + throw new NullPointerException("Null error published"); + } + error(error); + } + + @Override + public void onComplete() { + if (lastWriteFuture == null) { + complete(); + } else { + lastWriteFuture.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture channelFuture) throws Exception { + complete(); + } + }); + } + } + + private void doClose() { + executor.execute(new Runnable() { + @Override + public void run() { + switch (state) { + case NO_SUBSCRIPTION: + case INACTIVE: + case RUNNING: + ctx.close(); + state = HandlerSubscriber.State.COMPLETE; + break; + default: + // ignore + } + } + }); + } + + private void maybeRequestMore() { + if (outstandingDemand <= demandLowWatermark && ctx.channel().isWritable()) { + long toRequest = demandHighWatermark - outstandingDemand; + + outstandingDemand = demandHighWatermark; + subscription.request(toRequest); + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HttpStreamsClientHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HttpStreamsClientHandler.java new file mode 100644 index 000000000000..ce24f9c565d9 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HttpStreamsClientHandler.java @@ -0,0 +1,186 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.LastHttpContent; +import io.netty.util.ReferenceCountUtil; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Handler that converts written {@link StreamedHttpRequest} messages into {@link HttpRequest} messages + * followed by {@link HttpContent} messages and reads {@link HttpResponse} messages followed by + * {@link HttpContent} messages and produces {@link StreamedHttpResponse} messages. + * + * This allows request and response bodies to be handled using reactive streams. + * + * There are two types of messages that this handler accepts for writing, {@link StreamedHttpRequest} and + * {@link FullHttpRequest}. Writing any other messages may potentially lead to HTTP message mangling. + * + * There are two types of messages that this handler will send down the chain, {@link StreamedHttpResponse}, + * and {@link FullHttpResponse}. If {@link io.netty.channel.ChannelOption#AUTO_READ} is false for the channel, + * then any {@link StreamedHttpResponse} messages must be subscribed to consume the body, otherwise + * it's possible that no read will be done of the messages. + * + * As long as messages are returned in the order that they arrive, this handler implicitly supports HTTP + * pipelining. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +public class HttpStreamsClientHandler extends HttpStreamsHandler { + + private int inFlight = 0; + private int withServer = 0; + private ChannelPromise closeOnZeroInFlight = null; + private Subscriber awaiting100Continue; + private StreamedHttpMessage awaiting100ContinueMessage; + private boolean ignoreResponseBody = false; + + public HttpStreamsClientHandler() { + super(HttpResponse.class, HttpRequest.class); + } + + @Override + protected boolean hasBody(HttpResponse response) { + if (response.status().code() >= 100 && response.status().code() < 200) { + return false; + } + + if (response.status().equals(HttpResponseStatus.NO_CONTENT) || + response.status().equals(HttpResponseStatus.NOT_MODIFIED)) { + return false; + } + + if (HttpUtil.isTransferEncodingChunked(response)) { + return true; + } + + + if (HttpUtil.isContentLengthSet(response)) { + return HttpUtil.getContentLength(response) > 0; + } + + return true; + } + + @Override + public void close(ChannelHandlerContext ctx, ChannelPromise future) throws Exception { + if (inFlight == 0) { + ctx.close(future); + } else { + closeOnZeroInFlight = future; + } + } + + @Override + protected void consumedInMessage(ChannelHandlerContext ctx) { + inFlight--; + withServer--; + if (inFlight == 0 && closeOnZeroInFlight != null) { + ctx.close(closeOnZeroInFlight); + } + } + + @Override + protected void receivedOutMessage(ChannelHandlerContext ctx) { + inFlight++; + } + + @Override + protected void sentOutMessage(ChannelHandlerContext ctx) { + withServer++; + } + + @Override + protected HttpResponse createEmptyMessage(HttpResponse response) { + return new EmptyHttpResponse(response); + } + + @Override + protected HttpResponse createStreamedMessage(HttpResponse response, Publisher stream) { + return new DelegateStreamedHttpResponse(response, stream); + } + + @Override + protected void subscribeSubscriberToStream(StreamedHttpMessage msg, Subscriber subscriber) { + if (HttpUtil.is100ContinueExpected(msg)) { + awaiting100Continue = subscriber; + awaiting100ContinueMessage = msg; + } else { + super.subscribeSubscriberToStream(msg, subscriber); + } + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + + if (msg instanceof HttpResponse && awaiting100Continue != null && withServer == 0) { + HttpResponse response = (HttpResponse) msg; + if (response.status().equals(HttpResponseStatus.CONTINUE)) { + super.subscribeSubscriberToStream(awaiting100ContinueMessage, awaiting100Continue); + awaiting100Continue = null; + awaiting100ContinueMessage = null; + if (msg instanceof FullHttpResponse) { + ReferenceCountUtil.release(msg); + } else { + ignoreResponseBody = true; + } + } else { + awaiting100ContinueMessage.subscribe(new CancelledSubscriber()); + awaiting100ContinueMessage = null; + awaiting100Continue.onSubscribe(new NoOpSubscription()); + awaiting100Continue.onComplete(); + awaiting100Continue = null; + super.channelRead(ctx, msg); + } + } else if (ignoreResponseBody && msg instanceof HttpContent) { + + ReferenceCountUtil.release(msg); + if (msg instanceof LastHttpContent) { + ignoreResponseBody = false; + } + } else { + super.channelRead(ctx, msg); + } + } + + private static class NoOpSubscription implements Subscription { + @Override + public void request(long n) { + } + + @Override + public void cancel() { + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HttpStreamsHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HttpStreamsHandler.java new file mode 100644 index 000000000000..e2c643093cc2 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HttpStreamsHandler.java @@ -0,0 +1,391 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.http.FullHttpMessage; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpMessage; +import io.netty.handler.codec.http.LastHttpContent; +import io.netty.util.ReferenceCountUtil; +import java.util.LinkedList; +import java.util.Queue; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +abstract class HttpStreamsHandler extends ChannelDuplexHandler { + + private final Queue outgoing = new LinkedList<>(); + private final Class inClass; + private final Class outClass; + + /** + * The incoming message that is currently being streamed out to a subscriber. + * + * This is tracked so that if its subscriber cancels, we can go into a mode where we ignore the rest of the body. + * Since subscribers may cancel as many times as they like, including well after they've received all their content, + * we need to track what the current message that's being streamed out is so that we can ignore it if it's not + * currently being streamed out. + */ + private InT currentlyStreamedMessage; + + /** + * Ignore the remaining reads for the incoming message. + * + * This is used in conjunction with currentlyStreamedMessage, as well as in situations where we have received the + * full body, but still might be expecting a last http content message. + */ + private boolean ignoreBodyRead; + + /** + * Whether a LastHttpContent message needs to be written once the incoming publisher completes. + * + * Since the publisher may itself publish a LastHttpContent message, we need to track this fact, because if it + * doesn't, then we need to write one ourselves. + */ + private boolean sendLastHttpContent; + + HttpStreamsHandler(Class inClass, Class outClass) { + this.inClass = inClass; + this.outClass = outClass; + } + + /** + * Whether the given incoming message has a body. + */ + protected abstract boolean hasBody(InT in); + + /** + * Create an empty incoming message. This must be of type FullHttpMessage, and is invoked when we've determined + * that an incoming message can't have a body, so we send it on as a FullHttpMessage. + */ + protected abstract InT createEmptyMessage(InT in); + + /** + * Create a streamed incoming message with the given stream. + */ + protected abstract InT createStreamedMessage(InT in, Publisher stream); + + /** + * Invoked when an incoming message is first received. + * + * Overridden by sub classes for state tracking. + */ + protected void receivedInMessage(ChannelHandlerContext ctx) { + } + + /** + * Invoked when an incoming message is fully consumed. + * + * Overridden by sub classes for state tracking. + */ + protected void consumedInMessage(ChannelHandlerContext ctx) { + } + + /** + * Invoked when an outgoing message is first received. + * + * Overridden by sub classes for state tracking. + */ + protected void receivedOutMessage(ChannelHandlerContext ctx) { + } + + /** + * Invoked when an outgoing message is fully sent. + * + * Overridden by sub classes for state tracking. + */ + protected void sentOutMessage(ChannelHandlerContext ctx) { + } + + /** + * Subscribe the given subscriber to the given streamed message. + * + * Provided so that the client subclass can intercept this to hold off sending the body of an expect 100 continue + * request. + */ + protected void subscribeSubscriberToStream(StreamedHttpMessage msg, Subscriber subscriber) { + msg.subscribe(subscriber); + } + + /** + * Invoked every time a read of the incoming body is requested by the subscriber. + * + * Provided so that the server subclass can intercept this to send a 100 continue response. + */ + protected void bodyRequested(ChannelHandlerContext ctx) { + } + + @Override + public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exception { + + if (inClass.isInstance(msg)) { + + receivedInMessage(ctx); + InT inMsg = inClass.cast(msg); + + if (inMsg instanceof FullHttpMessage) { + + // Forward as is + ctx.fireChannelRead(inMsg); + consumedInMessage(ctx); + + } else if (!hasBody(inMsg)) { + + // Wrap in empty message + ctx.fireChannelRead(createEmptyMessage(inMsg)); + consumedInMessage(ctx); + + // There will be a LastHttpContent message coming after this, ignore it + ignoreBodyRead = true; + + } else { + + currentlyStreamedMessage = inMsg; + // It has a body, stream it + HandlerPublisher publisher = new HandlerPublisher(ctx.executor(), HttpContent.class) { + @Override + protected void cancelled() { + if (ctx.executor().inEventLoop()) { + handleCancelled(ctx, inMsg); + } else { + ctx.executor().execute(new Runnable() { + @Override + public void run() { + handleCancelled(ctx, inMsg); + } + }); + } + } + + @Override + protected void requestDemand() { + bodyRequested(ctx); + super.requestDemand(); + } + }; + + ctx.channel().pipeline().addAfter(ctx.name(), ctx.name() + "-body-publisher", publisher); + ctx.fireChannelRead(createStreamedMessage(inMsg, publisher)); + } + } else if (msg instanceof HttpContent) { + handleReadHttpContent(ctx, (HttpContent) msg); + } + } + + private void handleCancelled(ChannelHandlerContext ctx, InT msg) { + if (currentlyStreamedMessage == msg) { + ignoreBodyRead = true; + // Need to do a read in case the subscriber ignored a read completed. + ctx.read(); + } + } + + private void handleReadHttpContent(ChannelHandlerContext ctx, HttpContent content) { + if (!ignoreBodyRead) { + if (content instanceof LastHttpContent) { + + if (content.content().readableBytes() > 0 || + !((LastHttpContent) content).trailingHeaders().isEmpty()) { + // It has data or trailing headers, send them + ctx.fireChannelRead(content); + } else { + ReferenceCountUtil.release(content); + } + + removeHandlerIfActive(ctx, ctx.name() + "-body-publisher"); + currentlyStreamedMessage = null; + consumedInMessage(ctx); + + } else { + ctx.fireChannelRead(content); + } + + } else { + ReferenceCountUtil.release(content); + if (content instanceof LastHttpContent) { + ignoreBodyRead = false; + if (currentlyStreamedMessage != null) { + removeHandlerIfActive(ctx, ctx.name() + "-body-publisher"); + } + currentlyStreamedMessage = null; + } + } + } + + @Override + public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { + if (ignoreBodyRead) { + ctx.read(); + } else { + ctx.fireChannelReadComplete(); + } + } + + @Override + public void write(final ChannelHandlerContext ctx, Object msg, final ChannelPromise promise) throws Exception { + if (outClass.isInstance(msg)) { + + Outgoing out = new Outgoing(outClass.cast(msg), promise); + receivedOutMessage(ctx); + + if (outgoing.isEmpty()) { + outgoing.add(out); + flushNext(ctx); + } else { + outgoing.add(out); + } + + } else if (msg instanceof LastHttpContent) { + + sendLastHttpContent = false; + ctx.write(msg, promise); + } else { + + ctx.write(msg, promise); + } + } + + protected void unbufferedWrite(final ChannelHandlerContext ctx, final Outgoing out) { + + if (out.message instanceof FullHttpMessage) { + // Forward as is + ctx.writeAndFlush(out.message, out.promise); + out.promise.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture channelFuture) throws Exception { + executeInEventLoop(ctx, new Runnable() { + @Override + public void run() { + sentOutMessage(ctx); + outgoing.remove(); + flushNext(ctx); + } + }); + } + }); + + } else if (out.message instanceof StreamedHttpMessage) { + + StreamedHttpMessage streamed = (StreamedHttpMessage) out.message; + HandlerSubscriber subscriber = new HandlerSubscriber(ctx.executor()) { + @Override + protected void error(Throwable error) { + out.promise.tryFailure(error); + ctx.close(); + } + + @Override + protected void complete() { + executeInEventLoop(ctx, new Runnable() { + @Override + public void run() { + completeBody(ctx); + } + }); + } + }; + + sendLastHttpContent = true; + + // DON'T pass the promise through, create a new promise instead. + ctx.writeAndFlush(out.message); + + ctx.pipeline().addAfter(ctx.name(), ctx.name() + "-body-subscriber", subscriber); + subscribeSubscriberToStream(streamed, subscriber); + } + + } + + private void completeBody(final ChannelHandlerContext ctx) { + removeHandlerIfActive(ctx, ctx.name() + "-body-subscriber"); + + if (sendLastHttpContent) { + ChannelPromise promise = outgoing.peek().promise; + ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT, promise).addListener( + new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture channelFuture) throws Exception { + executeInEventLoop(ctx, new Runnable() { + @Override + public void run() { + outgoing.remove(); + sentOutMessage(ctx); + flushNext(ctx); + } + }); + } + } + ); + } else { + outgoing.remove().promise.setSuccess(); + sentOutMessage(ctx); + flushNext(ctx); + } + } + + /** + * Most operations we want to do even if the channel is not active, because if it's not, then we want to encounter + * the error that occurs when that operation happens and so that it can be passed up to the user. However, removing + * handlers should only be done if the channel is active, because the error that is encountered when they aren't + * makes no sense to the user (NoSuchElementException). + */ + private void removeHandlerIfActive(ChannelHandlerContext ctx, String name) { + if (ctx.channel().isActive()) { + ctx.pipeline().remove(name); + } + } + + private void flushNext(ChannelHandlerContext ctx) { + if (!outgoing.isEmpty()) { + unbufferedWrite(ctx, outgoing.element()); + } else { + ctx.fireChannelWritabilityChanged(); + } + } + + private void executeInEventLoop(ChannelHandlerContext ctx, Runnable runnable) { + if (ctx.executor().inEventLoop()) { + runnable.run(); + } else { + ctx.executor().execute(runnable); + } + } + + class Outgoing { + final OutT message; + final ChannelPromise promise; + + Outgoing(OutT message, ChannelPromise promise) { + this.message = message; + this.promise = promise; + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpMessage.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpMessage.java new file mode 100644 index 000000000000..23230e3c67fa --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpMessage.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpMessage; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Combines {@link HttpMessage} and {@link Publisher} into one + * message. So it represents an http message with a stream of {@link HttpContent} + * messages that can be subscribed to. + * + * Note that receivers of this message must consume the publisher, + * since the publisher will exert back pressure up the stream if not consumed. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +public interface StreamedHttpMessage extends HttpMessage, Publisher { +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpRequest.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpRequest.java new file mode 100644 index 000000000000..750715ead4ef --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpRequest.java @@ -0,0 +1,35 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.HttpRequest; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Combines {@link HttpRequest} and {@link StreamedHttpMessage} into one + * message. So it represents an http request with a stream of + * {@link io.netty.handler.codec.http.HttpContent} messages that can be subscribed to. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +public interface StreamedHttpRequest extends HttpRequest, StreamedHttpMessage { +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpResponse.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpResponse.java new file mode 100644 index 000000000000..6ce4aa900375 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/StreamedHttpResponse.java @@ -0,0 +1,35 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.handler.codec.http.HttpResponse; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Combines {@link HttpResponse} and {@link StreamedHttpMessage} into one + * message. So it represents an http response with a stream of + * {@link io.netty.handler.codec.http.HttpContent} messages that can be subscribed to. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +@SdkInternalApi +public interface StreamedHttpResponse extends HttpResponse, StreamedHttpMessage { +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/package-info.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/package-info.java new file mode 100644 index 000000000000..ba9b003b0d3c --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/nrs/package-info.java @@ -0,0 +1,24 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/** + * This package contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +package software.amazon.awssdk.http.nio.netty.internal.nrs; diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPool.java index 50a0d3c23e82..dac4f081ec1b 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPool.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ package software.amazon.awssdk.http.nio.netty.internal.utils; +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.doInEventLoop; + import io.netty.channel.Channel; import io.netty.channel.pool.ChannelPool; import io.netty.util.concurrent.DefaultPromise; @@ -28,21 +30,25 @@ import java.nio.channels.ClosedChannelException; import java.util.ArrayDeque; import java.util.Queue; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; +import software.amazon.awssdk.metrics.MetricCollector; /** * {@link ChannelPool} implementation that takes another {@link ChannelPool} implementation and enforce a maximum * number of concurrent connections. */ //TODO: Contribute me back to Netty -public class BetterFixedChannelPool implements ChannelPool { +public class BetterFixedChannelPool implements SdkChannelPool { private static final IllegalStateException FULL_EXCEPTION = ThrowableUtil.unknownStackTrace( new IllegalStateException("Too many outstanding acquire operations"), BetterFixedChannelPool.class, "acquire0(...)"); private static final TimeoutException TIMEOUT_EXCEPTION = ThrowableUtil.unknownStackTrace( - new TimeoutException("Acquire operation took longer then configured maximum time"), + new TimeoutException("Acquire operation took longer than configured maximum time"), BetterFixedChannelPool.class, "(...)"); static final IllegalStateException POOL_CLOSED_ON_RELEASE_EXCEPTION = ThrowableUtil.unknownStackTrace( new IllegalStateException("BetterFixedChannelPooled was closed"), @@ -66,11 +72,11 @@ public enum AcquireTimeoutAction { private final EventExecutor executor; private final long acquireTimeoutNanos; private final Runnable timeoutTask; - private final ChannelPool delegateChannelPool; + private final SdkChannelPool delegateChannelPool; // There is no need to worry about synchronization as everything that modified the queue or counts is done // by the above EventExecutor. - private final Queue pendingAcquireQueue = new ArrayDeque(); + private final Queue pendingAcquireQueue = new ArrayDeque<>(); private final int maxConnections; private final int maxPendingAcquires; private int acquiredChannelCount; @@ -137,12 +143,7 @@ public Future acquire(final Promise promise) { if (executor.inEventLoop()) { acquire0(promise); } else { - executor.execute(new Runnable() { - @Override - public void run() { - acquire0(promise); - } - }); + executor.execute(() -> acquire0(promise)); } } catch (Throwable cause) { promise.setFailure(cause); @@ -150,6 +151,22 @@ public void run() { return promise; } + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + CompletableFuture delegateMetricResult = delegateChannelPool.collectChannelPoolMetrics(metrics); + CompletableFuture result = new CompletableFuture<>(); + doInEventLoop(executor, () -> { + try { + metrics.reportMetric(HttpMetric.MAX_CONCURRENCY, this.maxConnections); + metrics.reportMetric(HttpMetric.PENDING_CONCURRENCY_ACQUIRES, this.pendingAcquireCount); + metrics.reportMetric(HttpMetric.LEASED_CONCURRENCY, this.acquiredChannelCount); + result.complete(null); + } catch (Throwable t) { + result.completeExceptionally(t); + } + }); + return CompletableFuture.allOf(result, delegateMetricResult); + } + private void acquire0(final Promise promise) { assert executor.inEventLoop(); @@ -348,12 +365,7 @@ public void close() { if (executor.inEventLoop()) { close0(); } else { - executor.submit(new Runnable() { - @Override - public void run() { - close0(); - } - }).awaitUninterruptibly(); + executor.submit(() -> close0()).awaitUninterruptibly(); } } @@ -376,12 +388,7 @@ private void close0() { // Ensure we dispatch this on another Thread as close0 will be called from the EventExecutor and we need // to ensure we will not block in a EventExecutor. - GlobalEventExecutor.INSTANCE.execute(new Runnable() { - @Override - public void run() { - delegateChannelPool.close(); - } - }); + GlobalEventExecutor.INSTANCE.execute(() -> delegateChannelPool.close()); } } @@ -391,7 +398,7 @@ public static Builder builder() { public static final class Builder { - private ChannelPool channelPool; + private SdkChannelPool channelPool; private EventExecutor executor; private AcquireTimeoutAction action; private long acquireTimeoutMillis; @@ -401,7 +408,7 @@ public static final class Builder { private Builder() { } - public Builder channelPool(ChannelPool channelPool) { + public Builder channelPool(SdkChannelPool channelPool) { this.channelPool = channelPool; return this; } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelUtils.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelUtils.java index 2b437e70630e..0901fa8e0590 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelUtils.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -26,7 +26,8 @@ @SdkInternalApi public final class ChannelUtils { - private ChannelUtils() {} + private ChannelUtils() { + } /** * Removes handlers of the given class types from the pipeline. diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/DelegatingChannelHandlerContext.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/DelegatingChannelHandlerContext.java new file mode 100644 index 000000000000..d1c5f43de607 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/DelegatingChannelHandlerContext.java @@ -0,0 +1,248 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.utils; + +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.ChannelProgressivePromise; +import io.netty.channel.ChannelPromise; +import io.netty.util.Attribute; +import io.netty.util.AttributeKey; +import io.netty.util.concurrent.EventExecutor; +import java.net.SocketAddress; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * An abstract implementation of {@link ChannelHandlerContext} that delegates to another + * context for non-overridden methods. + */ +@SdkInternalApi +public abstract class DelegatingChannelHandlerContext implements ChannelHandlerContext { + private final ChannelHandlerContext delegate; + + public DelegatingChannelHandlerContext(ChannelHandlerContext delegate) { + this.delegate = delegate; + } + + @Override + public Channel channel() { + return delegate.channel(); + } + + @Override + public EventExecutor executor() { + return delegate.executor(); + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public ChannelHandler handler() { + return delegate.handler(); + } + + @Override + public boolean isRemoved() { + return delegate.isRemoved(); + } + + @Override + public ChannelHandlerContext fireChannelRegistered() { + return delegate.fireChannelRegistered(); + } + + @Override + public ChannelHandlerContext fireChannelUnregistered() { + return delegate.fireChannelUnregistered(); + } + + @Override + public ChannelHandlerContext fireChannelActive() { + return delegate.fireChannelActive(); + } + + @Override + public ChannelHandlerContext fireChannelInactive() { + return delegate.fireChannelInactive(); + } + + @Override + public ChannelHandlerContext fireExceptionCaught(Throwable cause) { + return delegate.fireExceptionCaught(cause); + } + + @Override + public ChannelHandlerContext fireUserEventTriggered(Object evt) { + return delegate.fireUserEventTriggered(evt); + } + + @Override + public ChannelHandlerContext fireChannelRead(Object msg) { + return delegate.fireChannelRead(msg); + } + + @Override + public ChannelHandlerContext fireChannelReadComplete() { + return delegate.fireChannelReadComplete(); + } + + @Override + public ChannelHandlerContext fireChannelWritabilityChanged() { + return delegate.fireChannelWritabilityChanged(); + } + + @Override + public ChannelFuture bind(SocketAddress localAddress) { + return delegate.bind(localAddress); + } + + @Override + public ChannelFuture connect(SocketAddress remoteAddress) { + return delegate.connect(remoteAddress); + } + + @Override + public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) { + return delegate.connect(remoteAddress, localAddress); + } + + @Override + public ChannelFuture disconnect() { + return delegate.disconnect(); + } + + @Override + public ChannelFuture close() { + return delegate.close(); + } + + @Override + public ChannelFuture deregister() { + return delegate.deregister(); + } + + @Override + public ChannelFuture bind(SocketAddress localAddress, ChannelPromise promise) { + return delegate.bind(localAddress, promise); + } + + @Override + public ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) { + return delegate.connect(remoteAddress, promise); + } + + @Override + public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) { + return delegate.connect(remoteAddress, localAddress, promise); + } + + @Override + public ChannelFuture disconnect(ChannelPromise promise) { + return delegate.disconnect(promise); + } + + @Override + public ChannelFuture close(ChannelPromise promise) { + return delegate.close(promise); + } + + @Override + public ChannelFuture deregister(ChannelPromise promise) { + return delegate.deregister(promise); + } + + @Override + public ChannelHandlerContext read() { + return delegate.read(); + } + + @Override + public ChannelFuture write(Object msg) { + return delegate.write(msg); + } + + @Override + public ChannelFuture write(Object msg, ChannelPromise promise) { + return delegate.write(msg, promise); + } + + @Override + public ChannelHandlerContext flush() { + return delegate.flush(); + } + + @Override + public ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) { + return delegate.writeAndFlush(msg, promise); + } + + @Override + public ChannelFuture writeAndFlush(Object msg) { + return delegate.writeAndFlush(msg); + } + + @Override + public ChannelPromise newPromise() { + return delegate.newPromise(); + } + + @Override + public ChannelProgressivePromise newProgressivePromise() { + return delegate.newProgressivePromise(); + } + + @Override + public ChannelFuture newSucceededFuture() { + return delegate.newSucceededFuture(); + } + + @Override + public ChannelFuture newFailedFuture(Throwable cause) { + return delegate.newFailedFuture(cause); + } + + @Override + public ChannelPromise voidPromise() { + return delegate.voidPromise(); + } + + @Override + public ChannelPipeline pipeline() { + return delegate.pipeline(); + } + + @Override + public ByteBufAllocator alloc() { + return delegate.alloc(); + } + + @Override + public Attribute attr(AttributeKey key) { + return delegate.attr(key); + } + + @Override + public boolean hasAttr(AttributeKey key) { + return delegate.hasAttr(key); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ExceptionHandlingUtils.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ExceptionHandlingUtils.java index 74d53b02a338..edf117884672 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ExceptionHandlingUtils.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ExceptionHandlingUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -22,7 +22,8 @@ @SdkInternalApi public final class ExceptionHandlingUtils { - private ExceptionHandlingUtils() {} + private ExceptionHandlingUtils() { + } /** * Runs a task within try-catch block. All exceptions thrown from the execution diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java index 6fa8842ec503..0e5a69881161 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,6 +15,11 @@ package software.amazon.awssdk.http.nio.netty.internal.utils; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.EventLoop; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslHandler; +import io.netty.util.AttributeKey; import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; @@ -23,16 +28,28 @@ import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; import java.util.function.Function; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Logger; @SdkInternalApi public final class NettyUtils { - /** * Completed succeed future. */ public static final SucceededFuture SUCCEEDED_FUTURE = new SucceededFuture<>(null, null); + // TODO: add a link to the guide on how to diagnose this error here once it's available + public static final String CLOSED_CHANNEL_MESSAGE = "The channel was closed. This may have been done by the client (e.g. " + + "because the request was aborted), " + + "by the service (e.g. because there was a handshake error, the request " + + "took too long, or the client tried to write on a read-only socket), " + + "or by an intermediary party (e.g. because the channel was idle for too" + + " long)."; + + private static final Logger log = Logger.loggerFor(NettyUtils.class); + private NettyUtils() { } @@ -55,7 +72,7 @@ private NettyUtils() { } else { try { promise.setSuccess(successFunction.apply(success)); - } catch (Exception e) { + } catch (Throwable e) { promise.setFailure(e); } } @@ -82,7 +99,7 @@ private NettyUtils() { } else { try { successConsumer.accept(success, promise); - } catch (Exception e) { + } catch (Throwable e) { // If the successConsumer fails synchronously then we can notify the promise. If it fails asynchronously // it's up to the successConsumer to notify. promise.setFailure(e); @@ -132,9 +149,66 @@ public static void doInEventLoop(EventExecutor eventExecutor, Runnable runnable) */ public static void doInEventLoop(EventExecutor eventExecutor, Runnable runnable, Promise promise) { try { - doInEventLoop(eventExecutor, runnable); - } catch (Exception e) { + if (eventExecutor.inEventLoop()) { + runnable.run(); + } else { + eventExecutor.submit(() -> { + try { + runnable.run(); + } catch (Throwable e) { + promise.setFailure(e); + } + }); + } + } catch (Throwable e) { promise.setFailure(e); } } + + public static void warnIfNotInEventLoop(EventLoop loop) { + assert loop.inEventLoop(); + if (!loop.inEventLoop()) { + Exception exception = + new IllegalStateException("Execution is not in the expected event loop. Please report this issue to the " + + "AWS SDK for Java team on GitHub, because it could result in race conditions."); + log.warn(() -> "Execution is happening outside of the expected event loop.", exception); + } + } + + /** + * @return an {@code AttributeKey} for {@code attr}. This returns an existing instance if it was previously created. + */ + public static AttributeKey getOrCreateAttributeKey(String attr) { + if (AttributeKey.exists(attr)) { + return AttributeKey.valueOf(attr); + } + //CHECKSTYLE:OFF - This is the only place allowed to call AttributeKey.newInstance() + return AttributeKey.newInstance(attr); + //CHECKSTYLE:ON + } + + /** + * @return a new {@link SslHandler} with ssl engine configured + */ + public static SslHandler newSslHandler(SslContext sslContext, ByteBufAllocator alloc, String peerHost, int peerPort) { + // Need to provide host and port to enable SNI + // https://github.com/netty/netty/issues/3801#issuecomment-104274440 + SslHandler sslHandler = sslContext.newHandler(alloc, peerHost, peerPort); + configureSslEngine(sslHandler.engine()); + return sslHandler; + } + + /** + * Enable Hostname verification. + * + * See https://netty.io/4.0/api/io/netty/handler/ssl/SslContext.html#newHandler-io.netty.buffer.ByteBufAllocator-java.lang + * .String-int- + * + * @param sslEngine the sslEngine to configure + */ + private static void configureSslEngine(SSLEngine sslEngine) { + SSLParameters sslParameters = sslEngine.getSSLParameters(); + sslParameters.setEndpointIdentificationAlgorithm("HTTPS"); + sslEngine.setSSLParameters(sslParameters); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/OrderedWriteChannelHandlerContext.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/OrderedWriteChannelHandlerContext.java new file mode 100644 index 000000000000..996c06534c21 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/OrderedWriteChannelHandlerContext.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.utils; + +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * An implementation of {@link ChannelHandlerContext} that ensures all writes are performed in the order they are invoked. + * + * This works around https://github.com/netty/netty/issues/7783 where writes by an event loop 'skip ahead' of writes off of the + * event loop. + */ +@SdkInternalApi +public class OrderedWriteChannelHandlerContext extends DelegatingChannelHandlerContext { + private OrderedWriteChannelHandlerContext(ChannelHandlerContext delegate) { + super(delegate); + } + + public static ChannelHandlerContext wrap(ChannelHandlerContext ctx) { + return new OrderedWriteChannelHandlerContext(ctx); + } + + @Override + public ChannelFuture write(Object msg) { + return doInOrder(promise -> super.write(msg, promise)); + } + + @Override + public ChannelFuture write(Object msg, ChannelPromise promise) { + doInOrder(() -> super.write(msg, promise)); + return promise; + } + + @Override + public ChannelFuture writeAndFlush(Object msg) { + return doInOrder(promise -> super.writeAndFlush(msg, promise)); + } + + @Override + public ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) { + doInOrder(() -> super.writeAndFlush(msg, promise)); + return promise; + } + + private ChannelFuture doInOrder(Consumer task) { + ChannelPromise promise = newPromise(); + if (!channel().eventLoop().inEventLoop()) { + task.accept(promise); + } else { + // If we're in the event loop, queue a task to perform the write, so that it occurs after writes that were scheduled + // off of the event loop. + channel().eventLoop().execute(() -> task.accept(promise)); + } + return promise; + } + + private void doInOrder(Runnable task) { + if (!channel().eventLoop().inEventLoop()) { + task.run(); + } else { + // If we're in the event loop, queue a task to perform the write, so that it occurs after writes that were scheduled + // off of the event loop. + channel().eventLoop().execute(task); + } + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java index e1302fe2f74e..1d80dad5850f 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/main/resources/META-INF/services/software.amazon.awssdk.http.async.SdkAsyncHttpService b/http-clients/netty-nio-client/src/main/resources/META-INF/services/software.amazon.awssdk.http.async.SdkAsyncHttpService index 22e47f11f670..c3e05ecd129a 100644 --- a/http-clients/netty-nio-client/src/main/resources/META-INF/services/software.amazon.awssdk.http.async.SdkAsyncHttpService +++ b/http-clients/netty-nio-client/src/main/resources/META-INF/services/software.amazon.awssdk.http.async.SdkAsyncHttpService @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ClientTlsAuthTestBase.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ClientTlsAuthTestBase.java new file mode 100644 index 000000000000..468aad992e47 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ClientTlsAuthTestBase.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +abstract class ClientTlsAuthTestBase { + protected static final String STORE_PASSWORD = "password"; + protected static final String CLIENT_STORE_TYPE = "pkcs12"; + protected static final String TEST_KEY_STORE = "/software/amazon/awssdk/http/netty/server-keystore"; + protected static final String CLIENT_KEY_STORE = "/software/amazon/awssdk/http/netty/client1.p12"; + + protected static Path tempDir; + protected static Path serverKeyStore; + protected static Path clientKeyStore; + + @BeforeClass + public static void setUp() throws IOException { + tempDir = Files.createTempDirectory(ClientTlsAuthTestBase.class.getSimpleName()); + copyCertsToTmpDir(); + } + + @AfterClass + public static void teardown() throws IOException { + Files.deleteIfExists(serverKeyStore); + Files.deleteIfExists(clientKeyStore); + Files.deleteIfExists(tempDir); + } + + private static void copyCertsToTmpDir() throws IOException { + InputStream sksStream = ClientTlsAuthTestBase.class.getResourceAsStream(TEST_KEY_STORE); + Path sks = copyToTmpDir(sksStream, "server-keystore"); + + InputStream cksStream = ClientTlsAuthTestBase.class.getResourceAsStream(CLIENT_KEY_STORE); + Path cks = copyToTmpDir(cksStream, "client1.p12"); + + serverKeyStore = sks; + clientKeyStore = cks; + } + + private static Path copyToTmpDir(InputStream srcStream, String name) throws IOException { + Path dst = tempDir.resolve(name); + Files.copy(srcStream, dst); + return dst; + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/EmptyPublisher.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/EmptyPublisher.java deleted file mode 100644 index 78af13d3869b..000000000000 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/EmptyPublisher.java +++ /dev/null @@ -1,45 +0,0 @@ -package software.amazon.awssdk.http.nio.netty; - -import java.nio.ByteBuffer; -import java.util.Optional; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import software.amazon.awssdk.http.async.SdkHttpContentPublisher; - -public class EmptyPublisher implements SdkHttpContentPublisher { - @Override - public void subscribe(Subscriber subscriber) { - subscriber.onSubscribe(new EmptySubscription(subscriber)); - } - - @Override - public Optional contentLength() { - return Optional.of(0L); - } - - private static class EmptySubscription implements Subscription { - private final Subscriber subscriber; - private volatile boolean done; - - EmptySubscription(Subscriber subscriber) { - this.subscriber = subscriber; - } - - @Override - public void request(long l) { - if (!done) { - done = true; - if (l <= 0) { - this.subscriber.onError(new IllegalArgumentException("Demand must be positive")); - } else { - this.subscriber.onComplete(); - } - } - } - - @Override - public void cancel() { - done = true; - } - } -} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2ConfigurationTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2ConfigurationTest.java new file mode 100644 index 000000000000..ec93d1455fb5 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2ConfigurationTest.java @@ -0,0 +1,91 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class Http2ConfigurationTest { + @Rule + public ExpectedException expected = ExpectedException.none(); + + @Test + public void builder_returnsInstance() { + assertThat(Http2Configuration.builder()).isNotNull(); + } + + @Test + public void build_buildsCorrectConfig() { + long maxStreams = 1; + int initialWindowSize = 2; + + Http2Configuration config = Http2Configuration.builder() + .maxStreams(maxStreams) + .initialWindowSize(initialWindowSize) + .build(); + + assertThat(config.maxStreams()).isEqualTo(maxStreams); + assertThat(config.initialWindowSize()).isEqualTo(initialWindowSize); + } + + @Test + public void builder_toBuilder_roundTrip() { + Http2Configuration config1 = Http2Configuration.builder() + .maxStreams(7L) + .initialWindowSize(42) + .build(); + + Http2Configuration config2 = config1.toBuilder().build(); + + assertThat(config1).isEqualTo(config2); + } + + @Test + public void builder_maxStream_nullValue_doesNotThrow() { + Http2Configuration.builder().maxStreams(null); + } + + @Test + public void builder_maxStream_negative_throws() { + expected.expect(IllegalArgumentException.class); + Http2Configuration.builder().maxStreams(-1L); + } + + @Test + public void builder_maxStream_0_throws() { + expected.expect(IllegalArgumentException.class); + Http2Configuration.builder().maxStreams(0L); + } + + @Test + public void builder_initialWindowSize_nullValue_doesNotThrow() { + Http2Configuration.builder().initialWindowSize(null); + } + + @Test + public void builder_initialWindowSize_negative_throws() { + expected.expect(IllegalArgumentException.class); + Http2Configuration.builder().initialWindowSize(-1); + } + + @Test + public void builder_initialWindowSize_0_throws() { + expected.expect(IllegalArgumentException.class); + Http2Configuration.builder().initialWindowSize(0); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2MetricsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2MetricsTest.java new file mode 100644 index 000000000000..fe79f1e51b34 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2MetricsTest.java @@ -0,0 +1,213 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2HeadersFrame; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.codec.http2.Http2StreamFrame; +import io.netty.util.ReferenceCountUtil; +import java.net.URI; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.Http2Metric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; + +public class Http2MetricsTest { + private static final int H2_DEFAULT_WINDOW_SIZE = 65535; + private static final int SERVER_MAX_CONCURRENT_STREAMS = 2; + private static final int SERVER_INITIAL_WINDOW_SIZE = 65535 * 2; + + private static final TestHttp2Server SERVER = new TestHttp2Server(); + + @BeforeClass + public static void setup() throws InterruptedException { + SERVER.start(); + } + + @AfterClass + public static void teardown() throws InterruptedException { + SERVER.stop(); + } + + @Test + public void maxClientStreamsLowerThanServerMaxStreamsReportClientMaxStreams() { + try (SdkAsyncHttpClient client = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .maxConcurrency(10) + .http2Configuration(c -> c.maxStreams(1L) + .initialWindowSize(65535 * 3)) + .build()) { + MetricCollector metricCollector = MetricCollector.create("test"); + client.execute(createExecuteRequest(metricCollector)).join(); + MetricCollection metrics = metricCollector.collect(); + + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(10); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + // The stream window doesn't get initialized with the connection + // initial setting and the update appears to be asynchronous so + // this may be the default window size just based on when the + // stream window was queried or if this is the first time the + // stream is used (i.e. not previously pooled) + assertThat(metrics.metricValues(Http2Metric.LOCAL_STREAM_WINDOW_SIZE_IN_BYTES).get(0)).isIn(H2_DEFAULT_WINDOW_SIZE, 65535 * 3); + assertThat(metrics.metricValues(Http2Metric.REMOTE_STREAM_WINDOW_SIZE_IN_BYTES)).containsExactly(SERVER_INITIAL_WINDOW_SIZE); + } + } + + @Test + public void maxClientStreamsHigherThanServerMaxStreamsReportServerMaxStreams() { + try (SdkAsyncHttpClient client = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .maxConcurrency(10) + .http2Configuration(c -> c.maxStreams(3L) + .initialWindowSize(65535 * 3)) + .build()) { + MetricCollector metricCollector = MetricCollector.create("test"); + client.execute(createExecuteRequest(metricCollector)).join(); + MetricCollection metrics = metricCollector.collect(); + + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(10); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0)).isIn(0, 2, 3); + // The stream window doesn't get initialized with the connection + // initial setting and the update appears to be asynchronous so + // this may be the default window size just based on when the + // stream window was queried or if this is the first time the + // stream is used (i.e. not previously pooled) + assertThat(metrics.metricValues(Http2Metric.LOCAL_STREAM_WINDOW_SIZE_IN_BYTES).get(0)).isIn(H2_DEFAULT_WINDOW_SIZE, 65535 * 3); + assertThat(metrics.metricValues(Http2Metric.REMOTE_STREAM_WINDOW_SIZE_IN_BYTES)).containsExactly(SERVER_INITIAL_WINDOW_SIZE); + } + } + + private AsyncExecuteRequest createExecuteRequest(MetricCollector metricCollector) { + URI uri = URI.create("http://localhost:" + SERVER.port()); + SdkHttpRequest request = createRequest(uri); + return AsyncExecuteRequest.builder() + .request(request) + .requestContentPublisher(new EmptyPublisher()) + .responseHandler(new RecordingResponseHandler()) + .metricCollector(metricCollector) + .build(); + } + + private SdkHttpFullRequest createRequest(URI uri) { + return SdkHttpFullRequest.builder() + .uri(uri) + .method(SdkHttpMethod.GET) + .encodedPath("/") + .putHeader("Host", uri.getHost()) + .putHeader("Content-Length", "0") + .build(); + } + + private static final class TestHttp2Server extends ChannelInitializer { + private ServerBootstrap bootstrap; + private ServerSocketChannel channel; + + private TestHttp2Server() { + } + + public void start() throws InterruptedException { + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(new NioEventLoopGroup()) + .childHandler(this) + .localAddress(0) + .childOption(ChannelOption.SO_KEEPALIVE, true); + + channel = ((ServerSocketChannel) bootstrap.bind().await().channel()); + } + + public int port() { + return channel.localAddress().getPort(); + } + + public void stop() throws InterruptedException { + channel.close().await(); + } + + @Override + protected void initChannel(SocketChannel ch) { + Http2FrameCodec codec = Http2FrameCodecBuilder.forServer() + .initialSettings(new Http2Settings() + .maxConcurrentStreams(SERVER_MAX_CONCURRENT_STREAMS) + .initialWindowSize(SERVER_INITIAL_WINDOW_SIZE)) + .build(); + ch.pipeline().addLast(codec); + ch.pipeline().addLast(new SuccessfulHandler()); + } + } + + private static class SuccessfulHandler extends ChannelInboundHandlerAdapter { + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (!(msg instanceof Http2Frame)) { + ctx.fireChannelRead(msg); + return; + } + ReferenceCountUtil.release(msg); + + boolean isEnd = isEndFrame(msg); + if (isEnd) { + ctx.writeAndFlush(new DefaultHttp2HeadersFrame(new DefaultHttp2Headers().status("204"), true) + .stream(((Http2StreamFrame) msg).stream())); + } + } + + private boolean isEndFrame(Object msg) { + if (msg instanceof Http2HeadersFrame) { + return ((Http2HeadersFrame) msg).isEndStream(); + } + + if (msg instanceof Http2DataFrame) { + return ((Http2DataFrame) msg).isEndStream(); + } + + return false; + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java new file mode 100644 index 000000000000..8f1e486d3a07 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java @@ -0,0 +1,204 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathMatching; +import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import java.io.IOException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.FileStoreTlsKeyManagersProvider; +import software.amazon.awssdk.http.HttpTestUtils; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.TlsKeyManagersProvider; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.utils.AttributeMap; + +/** + * Tests to ensure that Netty layer can perform TLS client authentication. + */ +public class NettyClientTlsAuthTest extends ClientTlsAuthTestBase { + private static final AttributeMap DEFAULTS = AttributeMap.builder() + .put(TRUST_ALL_CERTIFICATES, true) + .build(); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + private static WireMockServer mockProxy; + private static ProxyConfiguration proxyCfg; + private static TlsKeyManagersProvider keyManagersProvider; + + private SdkAsyncHttpClient netty; + + @BeforeClass + public static void setUp() throws IOException { + ClientTlsAuthTestBase.setUp(); + + // Will be used by both client and server to trust the self-signed + // cert they present to each other + System.setProperty("javax.net.ssl.trustStore", serverKeyStore.toAbsolutePath().toString()); + System.setProperty("javax.net.ssl.trustStorePassword", STORE_PASSWORD); + System.setProperty("javax.net.ssl.trustStoreType", "jks"); + + mockProxy = new WireMockServer(new WireMockConfiguration() + .dynamicHttpsPort() + .needClientAuth(true) + .keystorePath(serverKeyStore.toAbsolutePath().toString()) + .keystorePassword(STORE_PASSWORD)); + + mockProxy.start(); + + mockProxy.stubFor(get(urlPathMatching(".*")).willReturn(aResponse().withStatus(200).withBody("hello"))); + + proxyCfg = ProxyConfiguration.builder() + .scheme("https") + .host("localhost") + .port(mockProxy.httpsPort()) + .build(); + + keyManagersProvider = FileStoreTlsKeyManagersProvider.create(clientKeyStore, CLIENT_STORE_TYPE, STORE_PASSWORD); + } + + @AfterClass + public static void teardown() throws IOException { + ClientTlsAuthTestBase.teardown(); + + mockProxy.stop(); + + System.clearProperty("javax.net.ssl.trustStore"); + System.clearProperty("javax.net.ssl.trustStorePassword"); + System.clearProperty("javax.net.ssl.trustStoreType"); + } + + @After + public void methodTeardown() { + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void builderUsesProvidedKeyManagersProvider() { + TlsKeyManagersProvider mockKeyManagersProvider = mock(TlsKeyManagersProvider.class); + netty = NettyNioAsyncHttpClient.builder() + .proxyConfiguration(proxyCfg) + .tlsKeyManagersProvider(mockKeyManagersProvider) + .buildWithDefaults(DEFAULTS); + + try { + sendRequest(netty, new RecordingResponseHandler()); + } catch (Exception ignored) { + } + verify(mockKeyManagersProvider).keyManagers(); + } + + @Test + public void proxyRequest_ableToAuthenticate() { + thrown.expectCause(instanceOf(IOException.class)); + thrown.expectMessage("Could not connect to proxy"); + + netty = NettyNioAsyncHttpClient.builder() + .proxyConfiguration(proxyCfg) + .tlsKeyManagersProvider(keyManagersProvider) + .buildWithDefaults(DEFAULTS); + + sendRequest(netty, new RecordingResponseHandler()); + } + + @Test + public void proxyRequest_noKeyManagerGiven_notAbleToSendConnect() throws Throwable { + thrown.expectCause(instanceOf(IOException.class)); + thrown.expectMessage("Unable to send CONNECT request to proxy"); + + netty = NettyNioAsyncHttpClient.builder() + .proxyConfiguration(proxyCfg) + .buildWithDefaults(DEFAULTS); + + sendRequest(netty, new RecordingResponseHandler()); + } + + @Test + public void proxyRequest_keyStoreSystemPropertiesConfigured_ableToAuthenticate() throws Throwable { + thrown.expectCause(instanceOf(IOException.class)); + thrown.expectMessage("Could not connect to proxy"); + + System.setProperty("javax.net.ssl.keyStore", clientKeyStore.toAbsolutePath().toString()); + System.setProperty("javax.net.ssl.keyStoreType", CLIENT_STORE_TYPE); + System.setProperty("javax.net.ssl.keyStorePassword", STORE_PASSWORD); + + netty = NettyNioAsyncHttpClient.builder() + .proxyConfiguration(proxyCfg) + .buildWithDefaults(DEFAULTS); + + try { + sendRequest(netty, new RecordingResponseHandler()); + } finally { + System.clearProperty("javax.net.ssl.keyStore"); + System.clearProperty("javax.net.ssl.keyStoreType"); + System.clearProperty("javax.net.ssl.keyStorePassword"); + } + } + + @Test + public void nonProxy_noKeyManagerGiven_shouldThrowException() { + thrown.expectCause(instanceOf(IOException.class)); + thrown.expectMessage("The channel was closed"); + + netty = NettyNioAsyncHttpClient.builder() + .buildWithDefaults(DEFAULTS); + + HttpTestUtils.sendGetRequest(mockProxy.httpsPort(), netty).join(); + } + + private void sendRequest(SdkAsyncHttpClient client, SdkAsyncHttpResponseHandler responseHandler) { + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .request(testSdkRequest()) + .requestContentPublisher(new EmptyPublisher()) + .responseHandler(responseHandler) + .build(); + + client.execute(req).join(); + } + + private static SdkHttpFullRequest testSdkRequest() { + return SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("https") + .host("some-awesome-service.amazonaws.com") + .port(443) + .putHeader("host", "some-awesome-service.amazonaws.com") + .build(); + } + +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientDefaultWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientDefaultWireMockTest.java new file mode 100644 index 000000000000..559edc8f2af6 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientDefaultWireMockTest.java @@ -0,0 +1,257 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.apache.commons.lang3.StringUtils.isBlank; +import static org.apache.commons.lang3.StringUtils.reverse; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; +import org.assertj.core.api.Condition; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkHttpContentPublisher; +import software.amazon.awssdk.utils.AttributeMap; + +public class NettyNioAsyncHttpClientDefaultWireMockTest { + + private final RecordingNetworkTrafficListener wiremockTrafficListener = new RecordingNetworkTrafficListener(); + + @Rule + public WireMockRule mockServer = new WireMockRule(wireMockConfig() + .dynamicPort() + .dynamicHttpsPort() + .networkTrafficListener(wiremockTrafficListener)); + + private static SdkAsyncHttpClient client = NettyNioAsyncHttpClient.create(); + + @Before + public void methodSetup() { + wiremockTrafficListener.reset(); + } + + @AfterClass + public static void tearDown() throws Exception { + client.close(); + } + + @Test + public void defaultThreadFactoryUsesHelpfulName() throws Exception { + // Make a request to ensure a thread is primed + makeSimpleRequest(client); + + String expectedPattern = "aws-java-sdk-NettyEventLoop-\\d+-\\d+"; + assertThat(Thread.getAllStackTraces().keySet()) + .areAtLeast(1, new Condition<>(t -> t.getName().matches(expectedPattern), + "Matches default thread pattern: `%s`", expectedPattern)); + } + + /** + * Make a simple async request and wait for it to fiish. + * + * @param client Client to make request with. + */ + private void makeSimpleRequest(SdkAsyncHttpClient client) throws Exception { + String body = randomAlphabetic(10); + URI uri = URI.create("http://localhost:" + mockServer.port()); + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body))); + SdkHttpRequest request = createRequest(uri); + RecordingResponseHandler recorder = new RecordingResponseHandler(); + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); + recorder.completeFuture.get(5, TimeUnit.SECONDS); + } + + @Test + public void canMakeBasicRequestOverHttp() throws Exception { + String smallBody = randomAlphabetic(10); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + assertCanReceiveBasicRequest(uri, smallBody); + } + + @Test + public void canHandleLargerPayloadsOverHttp() throws Exception { + String largishBody = randomAlphabetic(25000); + + URI uri = URI.create("http://localhost:" + mockServer.port()); + + assertCanReceiveBasicRequest(uri, largishBody); + } + + @Test + public void canSendContentAndGetThatContentBack() throws Exception { + String body = randomAlphabetic(50); + stubFor(any(urlEqualTo("/echo?reversed=true")) + .withRequestBody(equalTo(body)) + .willReturn(aResponse().withBody(reverse(body)))); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + SdkHttpRequest request = createRequest(uri, "/echo", body, SdkHttpMethod.POST, singletonMap("reversed", "true")); + + RecordingResponseHandler recorder = new RecordingResponseHandler(); + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider(body)).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + verify(1, postRequestedFor(urlEqualTo("/echo?reversed=true"))); + + assertThat(recorder.fullResponseAsString()).isEqualTo(reverse(body)); + } + + @Test + public void requestContentOnlyEqualToContentLengthHeaderFromProvider() throws InterruptedException, ExecutionException, TimeoutException, IOException { + final String content = randomAlphabetic(32); + final String streamContent = content + reverse(content); + stubFor(any(urlEqualTo("/echo?reversed=true")) + .withRequestBody(equalTo(content)) + .willReturn(aResponse().withBody(reverse(content)))); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + SdkHttpFullRequest request = createRequest(uri, "/echo", streamContent, SdkHttpMethod.POST, singletonMap("reversed", "true")); + request = request.toBuilder().putHeader("Content-Length", Integer.toString(content.length())).build(); + RecordingResponseHandler recorder = new RecordingResponseHandler(); + + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider(streamContent)).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + // HTTP servers will stop processing the request as soon as it reads + // bytes equal to 'Content-Length' so we need to inspect the raw + // traffic to ensure that there wasn't anything after that. + assertThat(wiremockTrafficListener.requests().toString()).endsWith(content); + } + + + private void assertCanReceiveBasicRequest(URI uri, String body) throws Exception { + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withHeader("Some-Header", "With Value").withBody(body))); + + SdkHttpRequest request = createRequest(uri); + + RecordingResponseHandler recorder = new RecordingResponseHandler(); + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + assertThat(recorder.responses).hasOnlyOneElementSatisfying( + headerResponse -> { + assertThat(headerResponse.headers()).containsKey("Some-Header"); + assertThat(headerResponse.statusCode()).isEqualTo(200); + }); + + assertThat(recorder.fullResponseAsString()).isEqualTo(body); + verify(1, getRequestedFor(urlMatching("/"))); + } + + private SdkHttpContentPublisher createProvider(String body) { + Stream chunks = splitStringBySize(body).stream() + .map(chunk -> ByteBuffer.wrap(chunk.getBytes(UTF_8))); + return new SdkHttpContentPublisher() { + + @Override + public Optional contentLength() { + return Optional.of(Long.valueOf(body.length())); + } + + @Override + public void subscribe(Subscriber s) { + s.onSubscribe(new Subscription() { + @Override + public void request(long n) { + chunks.forEach(s::onNext); + s.onComplete(); + } + + @Override + public void cancel() { + + } + }); + } + }; + } + + private SdkHttpFullRequest createRequest(URI uri) { + return createRequest(uri, "/", null, SdkHttpMethod.GET, emptyMap()); + } + + private SdkHttpFullRequest createRequest(URI uri, + String resourcePath, + String body, + SdkHttpMethod method, + Map params) { + String contentLength = body == null ? null : String.valueOf(body.getBytes(UTF_8).length); + return SdkHttpFullRequest.builder() + .uri(uri) + .method(method) + .encodedPath(resourcePath) + .applyMutation(b -> params.forEach(b::putRawQueryParameter)) + .applyMutation(b -> { + b.putHeader("Host", uri.getHost()); + if (contentLength != null) { + b.putHeader("Content-Length", contentLength); + } + }).build(); + } + + private static Collection splitStringBySize(String str) { + if (isBlank(str)) { + return Collections.emptyList(); + } + ArrayList split = new ArrayList<>(); + for (int i = 0; i <= str.length() / 1000; i++) { + split.add(str.substring(i * 1000, Math.min((i + 1) * 1000, str.length()))); + } + return split; + } + +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientSpiVerificationTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientSpiVerificationTest.java index 870e1b5f1931..a4e4047fde13 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientSpiVerificationTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientSpiVerificationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -41,6 +41,7 @@ import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java index 8f6585dddc37..843f0a0c4c33 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -41,23 +41,20 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; +import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.http.Fault; -import com.github.tomakehurst.wiremock.http.trafficlistener.WiremockNetworkTrafficListener; import com.github.tomakehurst.wiremock.junit.WireMockRule; import io.netty.channel.Channel; import io.netty.channel.ChannelFactory; import io.netty.channel.ChannelFuture; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.ssl.SslProvider; import io.netty.util.AttributeKey; import java.io.IOException; -import java.net.Socket; import java.net.URI; import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; @@ -71,6 +68,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Stream; +import javax.net.ssl.TrustManagerFactory; import org.assertj.core.api.Condition; import org.junit.AfterClass; import org.junit.Before; @@ -82,6 +80,8 @@ import org.mockito.stubbing.Answer; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.HttpTestUtils; import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; @@ -91,6 +91,8 @@ import software.amazon.awssdk.http.async.SdkHttpContentPublisher; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPoolMap; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; +import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.utils.AttributeMap; @RunWith(MockitoJUnitRunner.class) @@ -116,6 +118,32 @@ public static void tearDown() throws Exception { client.close(); } + @Test + public void defaultConnectionIdleTimeout() { + try (NettyNioAsyncHttpClient client = (NettyNioAsyncHttpClient) NettyNioAsyncHttpClient.builder().build()) { + assertThat(client.configuration().idleTimeoutMillis()).isEqualTo(5000); + } + } + + @Test + public void overrideConnectionIdleTimeout_shouldHonor() { + try (NettyNioAsyncHttpClient client = (NettyNioAsyncHttpClient) NettyNioAsyncHttpClient.builder() + .connectionMaxIdleTime(Duration.ofMillis(1000)) + .build()) { + assertThat(client.configuration().idleTimeoutMillis()).isEqualTo(1000); + } + } + + @Test + public void invalidMaxPendingConnectionAcquireConfig_shouldPropagateException() { + try (SdkAsyncHttpClient customClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(1) + .maxPendingConnectionAcquires(0) + .build()) { + assertThatThrownBy(() -> makeSimpleRequest(customClient)).hasMessageContaining("java.lang.IllegalArgumentException: maxPendingAcquires: 0 (expected: >= 1)"); + } + } + @Test public void customFactoryIsUsed() throws Exception { ThreadFactory threadFactory = spy(new CustomThreadFactory()); @@ -227,10 +255,10 @@ public void customChannelFactoryIsUsed() throws Exception { @Test public void closeClient_shouldCloseUnderlyingResources() { SdkEventLoopGroup eventLoopGroup = SdkEventLoopGroup.builder().build(); - ChannelPool channelPool = mock(ChannelPool.class); - SdkChannelPoolMap sdkChannelPoolMap = new SdkChannelPoolMap() { + SdkChannelPool channelPool = mock(SdkChannelPool.class); + SdkChannelPoolMap sdkChannelPoolMap = new SdkChannelPoolMap() { @Override - protected ChannelPool newPool(URI key) { + protected SdkChannelPool newPool(URI key) { return channelPool; } }; @@ -352,6 +380,29 @@ public void responseConnectionClosed_shouldCloseAndReleaseChannel() throws Excep eventLoopGroup.eventLoopGroup().shutdownGracefully().awaitUninterruptibly(); } + @Test + public void builderUsesProvidedTrustManagersProvider() throws Exception { + WireMockServer selfSignedServer = HttpTestUtils.createSelfSignedServer(); + + TrustManagerFactory managerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + managerFactory.init(HttpTestUtils.getSelfSignedKeyStore()); + + try (SdkAsyncHttpClient netty = NettyNioAsyncHttpClient.builder() + .tlsTrustManagersProvider(managerFactory::getTrustManagers) + .build()) { + selfSignedServer.start(); + URI uri = URI.create("https://localhost:" + selfSignedServer.httpsPort()); + + SdkHttpRequest request = createRequest(uri); + RecordingResponseHandler recorder = new RecordingResponseHandler(); + netty.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + } finally { + selfSignedServer.stop(); + } + } + /** * Make a simple async request and wait for it to fiish. * @@ -441,7 +492,7 @@ public void requestContentOnlyEqualToContentLengthHeaderFromProvider() throws In // HTTP servers will stop processing the request as soon as it reads // bytes equal to 'Content-Length' so we need to inspect the raw // traffic to ensure that there wasn't anything after that. - assertThat(wiremockTrafficListener.requests.toString()).endsWith(content); + assertThat(wiremockTrafficListener.requests().toString()).endsWith(content); } @Test @@ -586,7 +637,7 @@ public void testExceptionMessageChanged_WhenPendingAcquireQueueIsFull() throws E List> futures = new ArrayList<>(); for (int i = 0; i < 10; i++) { - futures.add(makeSimpleRequestAndReturnResponseHandler(customClient).completeFuture); + futures.add(makeSimpleRequestAndReturnResponseHandler(customClient, 1000).completeFuture); } assertThatThrownBy(() -> CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join()) @@ -609,7 +660,7 @@ public void testExceptionMessageChanged_WhenConnectionTimeoutErrorEncountered() List> futures = new ArrayList<>(); for (int i = 0; i < 2; i++) { - futures.add(makeSimpleRequestAndReturnResponseHandler(customClient).completeFuture); + futures.add(makeSimpleRequestAndReturnResponseHandler(customClient, 1000).completeFuture); } assertThatThrownBy(() -> CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join()) @@ -630,22 +681,102 @@ public void createNettyClient_ReadWriteTimeoutCanBeZero() throws Exception { customClient.close(); } + @Test + public void metricsAreCollectedWhenMaxPendingConnectionAcquisitionsAreExceeded() throws Exception { + SdkAsyncHttpClient customClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(1) + .maxPendingConnectionAcquires(1) + .build(); + + List handlers = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + handlers.add(makeSimpleRequestAndReturnResponseHandler(customClient, 1000)); + } + + for (RecordingResponseHandler handler : handlers) { + try { + handler.executionFuture.join(); + } catch (Exception e) { + // Ignored. + } + + MetricCollection metrics = handler.collector.collect(); + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(1); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES)).allSatisfy(a -> assertThat(a).isBetween(0, 9)); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY)).allSatisfy(a -> assertThat(a).isBetween(0, 1)); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).allSatisfy(a -> assertThat(a).isBetween(0, 1)); + } + + customClient.close(); + } + + @Test + public void metricsAreCollectedForSuccessfulCalls() throws Exception { + SdkAsyncHttpClient customClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(10) + .build(); + + RecordingResponseHandler handler = makeSimpleRequestAndReturnResponseHandler(customClient); + + handler.executionFuture.get(10, TimeUnit.SECONDS); + + Thread.sleep(5_000); + MetricCollection metrics = handler.collector.collect(); + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(10); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0)).isBetween(0, 1); + + customClient.close(); + } + + @Test + public void metricsAreCollectedForClosedClientCalls() throws Exception { + SdkAsyncHttpClient customClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(10) + .build(); + customClient.close(); + + RecordingResponseHandler handler = makeSimpleRequestAndReturnResponseHandler(customClient); + + try { + handler.executionFuture.get(10, TimeUnit.SECONDS); + } catch (Exception e) { + // Expected + } + + MetricCollection metrics = handler.collector.collect(); + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(10); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES)).containsExactly(0); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY)).containsExactly(0); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0)).isBetween(0, 1); + } + private void verifyChannelRelease(Channel channel) throws InterruptedException { Thread.sleep(1000); assertThat(channel.attr(AttributeKey.valueOf("channelPool")).get()).isNull(); } private RecordingResponseHandler makeSimpleRequestAndReturnResponseHandler(SdkAsyncHttpClient client) throws Exception { + return makeSimpleRequestAndReturnResponseHandler(client, null); + } + + private RecordingResponseHandler makeSimpleRequestAndReturnResponseHandler(SdkAsyncHttpClient client, Integer delayInMillis) + throws Exception { String body = randomAlphabetic(10); URI uri = URI.create("http://localhost:" + mockServer.port()); - stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body).withFixedDelay(1000))); + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body).withFixedDelay(delayInMillis))); SdkHttpRequest request = createRequest(uri); RecordingResponseHandler recorder = new RecordingResponseHandler(); - client.execute(AsyncExecuteRequest.builder() - .request(request) - .requestContentPublisher(createProvider("")) - .responseHandler(recorder) - .build()); + recorder.executionFuture = client.execute(AsyncExecuteRequest.builder() + .request(request) + .requestContentPublisher(createProvider("")) + .responseHandler(recorder) + .metricCollector(recorder.collector) + .build()); return recorder; } @@ -654,33 +785,4 @@ private static AttributeMap mapWithTrustAllCerts() { .put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, true) .build(); } - - private static class RecordingNetworkTrafficListener implements WiremockNetworkTrafficListener { - private final StringBuilder requests = new StringBuilder(); - - - @Override - public void opened(Socket socket) { - - } - - @Override - public void incoming(Socket socket, ByteBuffer byteBuffer) { - requests.append(StandardCharsets.UTF_8.decode(byteBuffer)); - } - - @Override - public void outgoing(Socket socket, ByteBuffer byteBuffer) { - - } - - @Override - public void closed(Socket socket) { - - } - - public void reset() { - requests.setLength(0); - } - } } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyConfigurationTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyConfigurationTest.java new file mode 100644 index 000000000000..239754f22e21 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyConfigurationTest.java @@ -0,0 +1,132 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static org.assertj.core.api.Assertions.assertThat; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; +import java.util.stream.Stream; +import org.junit.Test; + +/** + * Tests for {@link ProxyConfiguration}. + */ +public class ProxyConfigurationTest { + private static final Random RNG = new Random(); + + @Test + public void build_setsAllProperties() { + verifyAllPropertiesSet(allPropertiesSetConfig()); + } + + @Test + public void toBuilder_roundTrip_producesExactCopy() { + ProxyConfiguration original = allPropertiesSetConfig(); + + ProxyConfiguration copy = original.toBuilder().build(); + + assertThat(copy).isEqualTo(original); + } + + @Test + public void setNonProxyHostsToNull_createsEmptySet() { + ProxyConfiguration cfg = ProxyConfiguration.builder() + .nonProxyHosts(null) + .build(); + + assertThat(cfg.nonProxyHosts()).isEmpty(); + } + + @Test + public void toBuilderModified_doesNotModifySource() { + ProxyConfiguration original = allPropertiesSetConfig(); + + ProxyConfiguration modified = setAllPropertiesToRandomValues(original.toBuilder()).build(); + + assertThat(original).isNotEqualTo(modified); + } + + private ProxyConfiguration allPropertiesSetConfig() { + return setAllPropertiesToRandomValues(ProxyConfiguration.builder()).build(); + } + + private ProxyConfiguration.Builder setAllPropertiesToRandomValues(ProxyConfiguration.Builder builder) { + Stream.of(builder.getClass().getDeclaredMethods()) + .filter(m -> m.getParameterCount() == 1 && m.getReturnType().equals(ProxyConfiguration.Builder.class)) + .forEach(m -> { + try { + m.setAccessible(true); + setRandomValue(builder, m); + } catch (Exception e) { + throw new RuntimeException("Could not create random proxy config", e); + } + }); + return builder; + } + + private void setRandomValue(Object o, Method setter) throws InvocationTargetException, IllegalAccessException { + Class paramClass = setter.getParameterTypes()[0]; + + if (String.class.equals(paramClass)) { + setter.invoke(o, randomString()); + } else if (int.class.equals(paramClass)) { + setter.invoke(o, RNG.nextInt()); + } else if (Set.class.isAssignableFrom(paramClass)) { + setter.invoke(o, randomSet()); + } else { + throw new RuntimeException("Don't know how create random value for type " + paramClass); + } + } + + private void verifyAllPropertiesSet(ProxyConfiguration cfg) { + boolean hasNullProperty = Stream.of(cfg.getClass().getDeclaredMethods()) + .filter(m -> !m.getReturnType().equals(Void.class) && m.getParameterCount() == 0) + .anyMatch(m -> { + m.setAccessible(true); + try { + return m.invoke(cfg) == null; + } catch (Exception e) { + return true; + } + }); + + if (hasNullProperty) { + throw new RuntimeException("Given configuration has unset property"); + } + } + + private String randomString() { + String alpha = "abcdefghijklmnopqrstuwxyz"; + + StringBuilder sb = new StringBuilder(16); + for (int i = 0; i < 16; ++i) { + sb.append(alpha.charAt(RNG.nextInt(16))); + } + + return sb.toString(); + } + + private Set randomSet() { + Set ss = new HashSet<>(16); + for (int i = 0; i < 16; ++i) { + ss.add(randomString()); + } + return ss; + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java new file mode 100644 index 000000000000..f797a760fdf7 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java @@ -0,0 +1,138 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + + +package software.amazon.awssdk.http.nio.netty; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import java.io.IOException; +import java.util.concurrent.CompletionException; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; + +/** + * Tests for HTTP proxy functionality in the Netty client. + */ +public class ProxyWireMockTest { + private static SdkAsyncHttpClient client; + + private static ProxyConfiguration proxyCfg; + + private static WireMockServer mockServer = new WireMockServer(new WireMockConfiguration() + .dynamicPort() + .dynamicHttpsPort()); + + private static WireMockServer mockProxy = new WireMockServer(new WireMockConfiguration() + .dynamicPort() + .dynamicHttpsPort()); + + @BeforeClass + public static void setup() { + mockProxy.start(); + mockServer.start(); + + mockServer.stubFor(get(urlPathEqualTo("/")).willReturn(aResponse().withStatus(200).withBody("hello"))); + + proxyCfg = ProxyConfiguration.builder() + .host("localhost") + .port(mockProxy.port()) + .build(); + } + + @AfterClass + public static void teardown() { + mockServer.stop(); + mockProxy.stop(); + } + + @After + public void methodTeardown() { + if (client != null) { + client.close(); + } + client = null; + } + + @Test(expected = IOException.class) + public void proxyConfigured_attemptsToConnect() throws Throwable { + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .request(testSdkRequest()) + .responseHandler(mock(SdkAsyncHttpResponseHandler.class)) + .build(); + + client = NettyNioAsyncHttpClient.builder() + .proxyConfiguration(proxyCfg) + .build(); + + try { + client.execute(req).join(); + } catch (CompletionException e) { + Throwable cause = e.getCause(); + // WireMock doesn't allow for mocking the CONNECT method so it will just return a 404, causing the client + // to throw an exception. + assertThat(e.getCause().getMessage()).isEqualTo("Could not connect to proxy"); + throw cause; + } + } + + @Test + public void proxyConfigured_hostInNonProxySet_doesNotConnect() { + RecordingResponseHandler responseHandler = new RecordingResponseHandler(); + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .request(testSdkRequest()) + .responseHandler(responseHandler) + .requestContentPublisher(new EmptyPublisher()) + .build(); + + ProxyConfiguration cfg = proxyCfg.toBuilder() + .nonProxyHosts(Stream.of("localhost").collect(Collectors.toSet())) + .build(); + + client = NettyNioAsyncHttpClient.builder() + .proxyConfiguration(cfg) + .build(); + + client.execute(req).join(); + + responseHandler.completeFuture.join(); + assertThat(responseHandler.fullResponseAsString()).isEqualTo("hello"); + } + + private SdkHttpFullRequest testSdkRequest() { + return SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("http") + .host("localhost") + .port(mockServer.port()) + .putHeader("host", "localhost") + .build(); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingNetworkTrafficListener.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingNetworkTrafficListener.java new file mode 100644 index 000000000000..d9ed5cad9f07 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingNetworkTrafficListener.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import com.github.tomakehurst.wiremock.http.trafficlistener.WiremockNetworkTrafficListener; +import java.net.Socket; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + +/** + * Simple implementation of {@link WiremockNetworkTrafficListener} to record all requests received as a string for later + * verification. + */ +public class RecordingNetworkTrafficListener implements WiremockNetworkTrafficListener { + private final StringBuilder requests = new StringBuilder(); + + + @Override + public void opened(Socket socket) { + + } + + @Override + public void incoming(Socket socket, ByteBuffer byteBuffer) { + requests.append(StandardCharsets.UTF_8.decode(byteBuffer)); + } + + @Override + public void outgoing(Socket socket, ByteBuffer byteBuffer) { + + } + + @Override + public void closed(Socket socket) { + + } + + public void reset() { + requests.setLength(0); + } + + public StringBuilder requests() { + return requests; + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingResponseHandler.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingResponseHandler.java index 483fce008ecd..bfbee3bc57b5 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingResponseHandler.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingResponseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -30,12 +30,15 @@ import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; import software.amazon.awssdk.http.async.SdkHttpResponseHandler; import software.amazon.awssdk.http.async.SimpleSubscriber; +import software.amazon.awssdk.metrics.MetricCollector; public final class RecordingResponseHandler implements SdkAsyncHttpResponseHandler { List responses = new ArrayList<>(); private StringBuilder bodyParts = new StringBuilder(); CompletableFuture completeFuture = new CompletableFuture<>(); + CompletableFuture executionFuture = null; + MetricCollector collector = MetricCollector.create("test"); @Override public void onHeaders(SdkHttpResponse response) { diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java index 61ea7420ad38..a3ae76469359 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/GoAwayTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/GoAwayTest.java new file mode 100644 index 000000000000..957dcaa7fc71 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/GoAwayTest.java @@ -0,0 +1,450 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.fault; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2FrameReader; +import io.netty.handler.codec.http2.DefaultHttp2FrameWriter; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.Http2FrameAdapter; +import io.netty.handler.codec.http2.Http2FrameListener; +import io.netty.handler.codec.http2.Http2FrameReader; +import io.netty.handler.codec.http2.Http2FrameWriter; +import io.netty.handler.codec.http2.Http2Headers; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.util.AttributeKey; +import io.reactivex.Flowable; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Test; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; +import software.amazon.awssdk.http.nio.netty.internal.http2.GoAwayException; + +/** + * Tests to ensure that the client behaves as expected when it receives GOAWAY messages. + */ +public class GoAwayTest { + + private SdkAsyncHttpClient netty; + private SimpleEndpointDriver endpointDriver; + + @After + public void teardown() throws InterruptedException { + if (endpointDriver != null) { + endpointDriver.shutdown(); + } + endpointDriver = null; + + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void goAwayCanCloseAllStreams() throws InterruptedException { + Set serverChannels = ConcurrentHashMap.newKeySet(); + + CountDownLatch allRequestsReceived = new CountDownLatch(2); + Supplier frameListenerSupplier = () -> new TestFrameListener() { + @Override + public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endStream) { + onHeadersReadDelegator(ctx, streamId); + } + + @Override + public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency, short weight, boolean exclusive, int padding, boolean endStream) { + onHeadersReadDelegator(ctx, streamId); + } + + private void onHeadersReadDelegator(ChannelHandlerContext ctx, int streamId) { + serverChannels.add(ctx.channel().id().asShortText()); + + Http2Headers outboundHeaders = new DefaultHttp2Headers() + .status("200") + .add("content-type", "text/plain") + .addInt("content-length", 5); + + frameWriter().writeHeaders(ctx, streamId, outboundHeaders, 0, false, ctx.newPromise()); + ctx.flush(); + + allRequestsReceived.countDown(); + } + }; + + endpointDriver = new SimpleEndpointDriver(frameListenerSupplier); + endpointDriver.init(); + + netty = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .build(); + + CompletableFuture request1 = sendGetRequest(); + CompletableFuture request2 = sendGetRequest(); + + allRequestsReceived.await(); + + endpointDriver.channels.forEach(ch -> { + if (serverChannels.contains(ch.id().asShortText())) { + endpointDriver.goAway(ch, 0); + } + }); + + assertThatThrownBy(() -> request1.join()) + .hasMessageContaining("GOAWAY received from service") + .hasCauseInstanceOf(GoAwayException.class); + + assertThatThrownBy(() -> request2.join()) + .hasMessageContaining("GOAWAY received from service") + .hasCauseInstanceOf(GoAwayException.class); + + assertThat(endpointDriver.currentConnectionCount.get()).isEqualTo(0); + } + + @Test + public void execute_goAwayReceived_existingChannelsNotReused() throws InterruptedException { + // Frame listener supplier for each connection + Supplier frameListenerSupplier = () -> new TestFrameListener() { + @Override + public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endStream) { + onHeadersReadDelegator(ctx, streamId); + } + + @Override + public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency, short weight, boolean exclusive, int padding, boolean endStream) { + onHeadersReadDelegator(ctx, streamId); + } + + private void onHeadersReadDelegator(ChannelHandlerContext ctx, int streamId) { + frameWriter().writeHeaders(ctx, streamId, new DefaultHttp2Headers().add("content-length", "0").status("204"), 0, true, ctx.newPromise()); + ctx.flush(); + } + }; + + endpointDriver = new SimpleEndpointDriver(frameListenerSupplier); + endpointDriver.init(); + + netty = NettyNioAsyncHttpClient.builder() + .eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(1).build()) + .protocol(Protocol.HTTP2) + .build(); + + sendGetRequest().join(); + + // Note: It's possible the initial request can cause the client to allocate more than 1 channel + int initialChannelNum = endpointDriver.channels.size(); + + // Send GOAWAY to all the currently open channels + endpointDriver.channels.forEach(ch -> endpointDriver.goAway(ch, 1)); + + // Need to give a chance for the streams to get closed + Thread.sleep(1000); + + // Since the existing channels are now invalid, this request should cause a new channel to be opened + sendGetRequest().join(); + + assertThat(endpointDriver.channels).hasSize(initialChannelNum + 1); + } + + // The client should not close streams that are less than the 'last stream + // ID' given in the GOAWAY frame since it means they were processed fully + @Test + public void execute_goAwayReceived_lastStreamId_lowerStreamsNotClosed() throws InterruptedException { + ConcurrentHashMap> channelToStreams = new ConcurrentHashMap<>(); + + CompletableFuture stream3Received = new CompletableFuture<>(); + CountDownLatch allRequestsReceived = new CountDownLatch(2); + byte[] getPayload = "go away!".getBytes(StandardCharsets.UTF_8); + Supplier frameListenerSupplier = () -> new TestFrameListener() { + @Override + public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endStream) { + onHeadersReadDelegator(ctx, streamId); + } + + @Override + public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency, short weight, boolean exclusive, int padding, boolean endStream) { + onHeadersReadDelegator(ctx, streamId); + } + + private void onHeadersReadDelegator(ChannelHandlerContext ctx, int streamId) { + channelToStreams.computeIfAbsent(ctx.channel().id().asShortText(), (k) -> Collections.newSetFromMap(new ConcurrentHashMap<>())).add(streamId); + + if (streamId == 3) { + stream3Received.complete(null); + } + + if (streamId < 5) { + Http2Headers outboundHeaders = new DefaultHttp2Headers() + .status("200") + .add("content-type", "text/plain") + .addInt("content-length", getPayload.length); + + frameWriter().writeHeaders(ctx, streamId, outboundHeaders, 0, false, ctx.newPromise()); + ctx.flush(); + } + + allRequestsReceived.countDown(); + } + }; + + endpointDriver = new SimpleEndpointDriver(frameListenerSupplier); + endpointDriver.init(); + + netty = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .build(); + + CompletableFuture stream3Cf = sendGetRequest();// stream ID 3 + + // Wait for the request to be received just to ensure that it is given ID 3 + stream3Received.join(); + + CompletableFuture stream5Cf = sendGetRequest();// stream ID 5 + + allRequestsReceived.await(10, TimeUnit.SECONDS); + + // send the GOAWAY first, specifying that everything after 3 is not processed + endpointDriver.channels.forEach(ch -> { + Set streams = channelToStreams.getOrDefault(ch.id().asShortText(), Collections.emptySet()); + if (streams.contains(3)) { + endpointDriver.goAway(ch, 3); + } + }); + + // now send the DATA for stream 3, which should still be valid + endpointDriver.channels.forEach(ch -> { + Set streams = channelToStreams.getOrDefault(ch.id().asShortText(), Collections.emptySet()); + if (streams.contains(3)) { + endpointDriver.data(ch, 3, getPayload); + } + }); + + waitForFuture(stream3Cf); + waitForFuture(stream5Cf); + + assertThat(stream3Cf.isCompletedExceptionally()).isFalse(); + assertThat(stream5Cf.isCompletedExceptionally()).isTrue(); + stream5Cf.exceptionally(e -> { + assertThat(e).isInstanceOf(IOException.class); + return null; + }); + } + + private CompletableFuture sendGetRequest() { + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .responseHandler(new SdkAsyncHttpResponseHandler() { + private SdkHttpResponse headers; + + @Override + public void onHeaders(SdkHttpResponse headers) { + this.headers = headers; + } + + @Override + public void onStream(Publisher stream) { + // Consume the stream in order to complete request + Flowable.fromPublisher(stream).subscribe(b -> {}, t -> {}); + } + + @Override + public void onError(Throwable error) { + } + }) + .request(SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("http") + .host("localhost") + .port(endpointDriver.port()) + .build()) + .requestContentPublisher(new EmptyPublisher()) + .build(); + + return netty.execute(req); + } + + private static void waitForFuture(CompletableFuture cf) { + try { + cf.get(2, TimeUnit.SECONDS); + } catch (ExecutionException | InterruptedException t) { + } catch (TimeoutException t) { + throw new RuntimeException("Future did not complete after 2 seconds.", t); + } + } + + // Minimal class to simulate an H2 endpoint + private static class SimpleEndpointDriver extends ChannelInitializer { + private List channels = new ArrayList<>(); + private final NioEventLoopGroup group = new NioEventLoopGroup(); + private final Supplier frameListenerSupplier; + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private AtomicInteger currentConnectionCount = new AtomicInteger(0); + + public SimpleEndpointDriver(Supplier frameListenerSupplier) { + this.frameListenerSupplier = frameListenerSupplier; + } + + public void init() throws InterruptedException { + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(new NioEventLoopGroup()) + .childHandler(this) + .childOption(ChannelOption.SO_KEEPALIVE, true); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + + public void goAway(SocketChannel ch, int lastStreamId) { + ByteBuf b = ch.alloc().buffer(9 + 8); + + // Frame header + b.writeMedium(8); // Payload length + b.writeByte(0x7); // Type = GOAWAY + b.writeByte(0x0); // Flags + b.writeInt(0); // 0 = connection frame + + // GOAWAY payload + b.writeInt(lastStreamId); + b.writeInt(0); // Error code + + ch.writeAndFlush(b); + } + + public void data(SocketChannel ch, int streamId, byte[] payload) { + ByteBuf b = ch.alloc().buffer(9 + payload.length); + + // Header + b.writeMedium(payload.length); // Payload length + b.writeByte(0); // Type = DATA + b.writeByte(0x1); // 0x1 = EOF + b.writeInt(streamId); + + // Payload + b.writeBytes(payload); + + ch.writeAndFlush(b); + } + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + channels.add(ch); + ch.pipeline().addLast(new Http2ConnHandler(this, frameListenerSupplier.get())); + } + + @Override + public void channelActive(ChannelHandlerContext ctx) throws Exception { + currentConnectionCount.incrementAndGet(); + super.channelActive(ctx); + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + currentConnectionCount.decrementAndGet(); + super.channelInactive(ctx); + } + } + + private abstract class TestFrameListener extends Http2FrameAdapter { + private final Http2FrameWriter frameWriter = new DefaultHttp2FrameWriter(); + + protected final Http2FrameWriter frameWriter() { + return frameWriter; + } + + @Override + public void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) { + frameWriter().writeSettings(ctx, new Http2Settings(), ctx.newPromise()); + frameWriter().writeSettingsAck(ctx, ctx.newPromise()); + ctx.flush(); + } + } + + private static class Http2ConnHandler extends ChannelDuplexHandler { + // Prior knowledge preface + private static final String PREFACE = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + private static final AttributeKey H2_ESTABLISHED = AttributeKey.newInstance("h2-etablished"); + + private final Http2FrameReader frameReader = new DefaultHttp2FrameReader(); + private final SimpleEndpointDriver simpleEndpointDriver; + private final Http2FrameListener frameListener; + + public Http2ConnHandler(SimpleEndpointDriver simpleEndpointDriver, Http2FrameListener frameListener) { + this.simpleEndpointDriver = simpleEndpointDriver; + this.frameListener = frameListener; + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + ByteBuf bb = (ByteBuf) msg; + if (!isH2Established(ctx.channel())) { + String prefaceString = bb.readCharSequence(24, StandardCharsets.UTF_8).toString(); + if (PREFACE.equals(prefaceString)) { + ctx.channel().attr(H2_ESTABLISHED).set(true); + } + } + frameReader.readFrame(ctx, bb, frameListener); + } + + private boolean isH2Established(Channel ch) { + return Boolean.TRUE.equals(ch.attr(H2_ESTABLISHED).get()); + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H1ServerErrorTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H1ServerErrorTest.java new file mode 100644 index 000000000000..4df586e5a923 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H1ServerErrorTest.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.fault; + +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +import software.amazon.awssdk.http.SdkAsyncHttpClientH1TestSuite; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; +import software.amazon.awssdk.utils.AttributeMap; + + +/** + * Testing the scenario where h1 server sends 5xx errors. + */ +public class H1ServerErrorTest extends SdkAsyncHttpClientH1TestSuite { + + @Override + protected SdkAsyncHttpClient setupClient() { + return NettyNioAsyncHttpClient.builder() + .eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(2).build()) + .protocol(Protocol.HTTP1_1) + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H2ServerErrorTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H2ServerErrorTest.java new file mode 100644 index 000000000000..c4aed9966c19 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H2ServerErrorTest.java @@ -0,0 +1,192 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.fault; + +import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; +import static software.amazon.awssdk.http.HttpTestUtils.sendGetRequest; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2DataFrame; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2Headers; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.SelfSignedCertificate; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; +import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.Logger; + +/** + * Testing the scenario where h2 server sends 5xx errors. + */ +public class H2ServerErrorTest { + private static final Logger LOGGER = Logger.loggerFor(ServerNotRespondingTest.class); + private SdkAsyncHttpClient netty; + private Server server; + + @Before + public void setup() throws Exception { + server = new Server(); + server.init(); + + netty = NettyNioAsyncHttpClient.builder() + .eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(3).build()) + .protocol(Protocol.HTTP2) + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + } + + @After + public void teardown() throws InterruptedException { + if (server != null) { + server.shutdown(); + } + server = null; + + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void serviceReturn500_newRequestShouldUseNewConnection() { + server.return500OnFirstRequest = true; + CompletableFuture firstRequest = sendGetRequest(server.port(), netty); + firstRequest.join(); + + sendGetRequest(server.port(), netty).join(); + assertThat(server.h2ConnectionCount.get()).isEqualTo(2); + } + + @Test + public void serviceReturn200_newRequestShouldReuseNewConnection() { + server.return500OnFirstRequest = false; + CompletableFuture firstRequest = sendGetRequest(server.port(), netty); + firstRequest.join(); + + sendGetRequest(server.port(), netty).join(); + assertThat(server.h2ConnectionCount.get()).isEqualTo(1); + } + + private static class Server extends ChannelInitializer { + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private String[] channelIds = new String[5]; + private final NioEventLoopGroup group = new NioEventLoopGroup(); + private SslContext sslCtx; + private boolean return500OnFirstRequest; + private AtomicInteger h2ConnectionCount = new AtomicInteger(0); + + void init() throws Exception { + SelfSignedCertificate ssc = new SelfSignedCertificate(); + sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); + + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(group) + .childHandler(this); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + @Override + protected void initChannel(Channel ch) { + channelIds[h2ConnectionCount.get()] = ch.id().asShortText(); + LOGGER.debug(() -> "init channel " + ch); + h2ConnectionCount.incrementAndGet(); + + ChannelPipeline pipeline = ch.pipeline(); + pipeline.addLast(sslCtx.newHandler(ch.alloc())); + + + Http2FrameCodec http2Codec = Http2FrameCodecBuilder.forServer() + .autoAckPingFrame(true) + .initialSettings(Http2Settings.defaultSettings().maxConcurrentStreams(1)) + .build(); + + Http2MultiplexHandler http2Handler = new Http2MultiplexHandler(new ChannelInitializer() { + @Override + protected void initChannel(Channel ch) throws Exception { + ch.pipeline().addLast(new MightReturn500StreamFrameHandler()); + } + }); + + pipeline.addLast(http2Codec); + pipeline.addLast(http2Handler); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + serverSock.close(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + + private class MightReturn500StreamFrameHandler extends SimpleChannelInboundHandler { + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) { + if (frame instanceof Http2DataFrame) { + DefaultHttp2DataFrame dataFrame = new DefaultHttp2DataFrame(true); + + // returning 500 this is channel 1 + if (channelIds[0].equals(ctx.channel().parent().id().asShortText()) && return500OnFirstRequest) { + LOGGER.info(() -> "This is the first request, returning 500" + ctx.channel()); + Http2Headers headers = new DefaultHttp2Headers().status(INTERNAL_SERVER_ERROR.codeAsText()); + ctx.write(new DefaultHttp2HeadersFrame(headers, false)); + ctx.write(new DefaultHttp2DataFrame(true)); + ctx.flush(); + } else { + LOGGER.info(() -> "return empty data " + ctx.channel() + " frame " + frame.getClass()); + Http2Headers headers = new DefaultHttp2Headers().status(OK.codeAsText()); + ctx.write(new DefaultHttp2HeadersFrame(headers, false)); + ctx.write(dataFrame); + ctx.flush(); + } + } + } + } + } + +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/PingTimeoutTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/PingTimeoutTest.java new file mode 100644 index 000000000000..f88c5af2bfcd --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/PingTimeoutTest.java @@ -0,0 +1,257 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + + +package software.amazon.awssdk.http.nio.netty.fault; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2DataFrame; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2FrameLogger; +import io.netty.handler.codec.http2.Http2Headers; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.util.SelfSignedCertificate; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.nio.netty.Http2Configuration; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.internal.http2.PingFailedException; + +/** + * Testing the scenario where the server never acks PING + */ +public class PingTimeoutTest { + @Rule + public ExpectedException expected = ExpectedException.none(); + + private Server server; + private SdkAsyncHttpClient netty; + + @Before + public void methodSetup() throws Exception { + server = new Server(); + server.init(); + } + + @After + public void methodTeardown() throws InterruptedException { + server.shutdown(); + + if (netty != null) { + netty.close(); + } + + netty = null; + } + + @Test + public void pingHealthCheck_null_shouldThrowExceptionAfter5Sec() { + Instant a = Instant.now(); + assertThatThrownBy(() -> makeRequest(null).join()) + .hasMessageContaining("An error occurred on the connection") + .hasCauseInstanceOf(IOException.class) + .hasRootCauseInstanceOf(PingFailedException.class); + assertThat(Duration.between(a, Instant.now())).isBetween(Duration.ofSeconds(5), Duration.ofSeconds(7)); + } + + @Test + public void pingHealthCheck_10sec_shouldThrowExceptionAfter10Secs() { + Instant a = Instant.now(); + assertThatThrownBy(() -> makeRequest(Duration.ofSeconds(10)).join()).hasCauseInstanceOf(IOException.class) + .hasMessageContaining("An error occurred on the connection") + .hasRootCauseInstanceOf(PingFailedException.class); + assertThat(Duration.between(a, Instant.now())).isBetween(Duration.ofSeconds(10), Duration.ofSeconds(12)); + } + + @Test + public void pingHealthCheck_0_disabled_shouldNotThrowException() throws Exception { + expected.expect(TimeoutException.class); + CompletableFuture requestFuture = makeRequest(Duration.ofMillis(0)); + try { + requestFuture.get(8, TimeUnit.SECONDS); + } finally { + assertThat(requestFuture.isDone()).isFalse(); + } + } + + private CompletableFuture makeRequest(Duration healthCheckPingPeriod) { + netty = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .http2Configuration(Http2Configuration.builder().healthCheckPingPeriod(healthCheckPingPeriod).build()) + .build(); + + SdkHttpFullRequest request = SdkHttpFullRequest.builder() + .protocol("http") + .host("localhost") + .port(server.port()) + .method(SdkHttpMethod.GET) + .build(); + + AsyncExecuteRequest executeRequest = AsyncExecuteRequest.builder() + .fullDuplex(false) + .request(request) + .requestContentPublisher(new EmptyPublisher()) + .responseHandler(new SdkAsyncHttpResponseHandler() { + @Override + public void onHeaders(SdkHttpResponse headers) { + } + + @Override + public void onStream(Publisher stream) { + stream.subscribe(new Subscriber() { + @Override + public void onSubscribe(Subscription s) { + s.request(Integer.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + } + + @Override + public void onError(Throwable t) { + } + + @Override + public void onComplete() { + } + }); + } + + @Override + public void onError(Throwable error) { + } + }) + .build(); + + return netty.execute(executeRequest); + } + + private static class Server extends ChannelInitializer { + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private String[] channelIds = new String[5]; + private final NioEventLoopGroup group = new NioEventLoopGroup(); + private SslContext sslCtx; + private AtomicInteger h2ConnectionCount = new AtomicInteger(0); + + void init() throws Exception { + SelfSignedCertificate ssc = new SelfSignedCertificate(); + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(group) + .childHandler(this); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + @Override + protected void initChannel(Channel ch) { + channelIds[h2ConnectionCount.get()] = ch.id().asShortText(); + ch.pipeline().addFirst(new LoggingHandler(LogLevel.DEBUG)); + h2ConnectionCount.incrementAndGet(); + + ChannelPipeline pipeline = ch.pipeline(); + + Http2FrameCodec http2Codec = Http2FrameCodecBuilder.forServer() + // simulate not sending goaway + .decoupleCloseAndGoAway(true) + .autoAckPingFrame(false) + .initialSettings(Http2Settings.defaultSettings().maxConcurrentStreams(2)) + .frameLogger(new Http2FrameLogger(LogLevel.DEBUG, "WIRE")) + .build(); + + Http2MultiplexHandler http2Handler = new Http2MultiplexHandler(new ChannelInitializer() { + @Override + protected void initChannel(Channel ch) { + ch.pipeline().addLast(new StreamHandler()); + } + }); + + pipeline.addLast(http2Codec); + pipeline.addLast(http2Handler); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + serverSock.close(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + } + + private static final class StreamHandler extends SimpleChannelInboundHandler { + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Frame http2Frame) throws Exception { + if (http2Frame instanceof Http2DataFrame) { + Http2DataFrame dataFrame = (Http2DataFrame) http2Frame; + if (dataFrame.isEndStream()) { + Http2Headers headers = new DefaultHttp2Headers().status("200"); + ctx.writeAndFlush(new DefaultHttp2HeadersFrame(headers, false)); + ctx.executor().scheduleAtFixedRate(() -> { + DefaultHttp2DataFrame respData = new DefaultHttp2DataFrame(Unpooled.wrappedBuffer("hello".getBytes()), false); + ctx.writeAndFlush(respData); + }, 0, 2, TimeUnit.SECONDS); + } + } + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerCloseConnectionTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerCloseConnectionTest.java new file mode 100644 index 000000000000..cc6fbda166b5 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerCloseConnectionTest.java @@ -0,0 +1,243 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.fault; + +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2DataFrame; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2FrameLogger; +import io.netty.handler.codec.http2.Http2Headers; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.SelfSignedCertificate; +import io.reactivex.Flowable; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; +import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.Logger; + + +/** + * Testing the scenario where the connection gets inactive without GOAWAY frame + */ +public class ServerCloseConnectionTest { + private static final Logger LOGGER = Logger.loggerFor(ServerCloseConnectionTest.class); + private SdkAsyncHttpClient netty; + private Server server; + + @Before + public void setup() throws Exception { + server = new Server(); + server.init(); + + netty = NettyNioAsyncHttpClient.builder() + .readTimeout(Duration.ofMillis(500)) + .eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(3).build()) + .protocol(Protocol.HTTP2) + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + } + + @After + public void teardown() throws InterruptedException { + if (server != null) { + server.shutdown(); + } + server = null; + + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void connectionGetsInactive_shouldNotReuse() { + server.ackPingOnFirstChannel = true; + // The first request picks up a bad channel and should fail. Channel 1 + CompletableFuture firstRequest = sendGetRequest(); + assertThatThrownBy(() -> firstRequest.join()) + .hasMessageContaining("An error occurred on the connection") + .hasCauseInstanceOf(IOException.class) + .hasRootCauseInstanceOf(ClosedChannelException.class); + + server.failOnFirstChannel = false; + + // The second request should establish a new connection instead of reusing the bad channel 2 + LOGGER.info(() -> "sending out the second request"); + sendGetRequest().join(); + + // should be 2 connections + assertThat(server.h2ConnectionCount.get()).isEqualTo(2); + } + + private CompletableFuture sendGetRequest() { + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .responseHandler(new SdkAsyncHttpResponseHandler() { + private SdkHttpResponse headers; + + @Override + public void onHeaders(SdkHttpResponse headers) { + this.headers = headers; + } + + @Override + public void onStream(Publisher stream) { + Flowable.fromPublisher(stream).forEach(b -> { + }); + } + + @Override + public void onError(Throwable error) { + } + }) + .request(SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("https") + .host("localhost") + .port(server.port()) + .build()) + .requestContentPublisher(new EmptyPublisher()) + .build(); + + return netty.execute(req); + } + + + private static class Server extends ChannelInitializer { + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private String[] channelIds = new String[5]; + private final NioEventLoopGroup group = new NioEventLoopGroup(); + private SslContext sslCtx; + private AtomicInteger h2ConnectionCount = new AtomicInteger(0); + private boolean ackPingOnFirstChannel = false; + private boolean failOnFirstChannel = true; + + void init() throws Exception { + SelfSignedCertificate ssc = new SelfSignedCertificate(); + sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); + + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(group) + .childHandler(this); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + @Override + protected void initChannel(Channel ch) { + channelIds[h2ConnectionCount.get()] = ch.id().asShortText(); + ch.pipeline().addFirst(new LoggingHandler(LogLevel.DEBUG)); + LOGGER.debug(() -> "init channel " + ch); + h2ConnectionCount.incrementAndGet(); + + ChannelPipeline pipeline = ch.pipeline(); + pipeline.addLast(sslCtx.newHandler(ch.alloc())); + + + Http2FrameCodec http2Codec = Http2FrameCodecBuilder.forServer() + // simulate not sending goaway + .decoupleCloseAndGoAway(true) + .initialSettings(Http2Settings.defaultSettings().maxConcurrentStreams(2)) + .frameLogger(new Http2FrameLogger(LogLevel.DEBUG, "WIRE")) + .build(); + + Http2MultiplexHandler http2Handler = new Http2MultiplexHandler(new ChannelInitializer() { + @Override + protected void initChannel(Channel ch) { + ch.pipeline().addLast(new MightCloseConnectionStreamFrameHandler()); + } + }); + + pipeline.addLast(http2Codec); + pipeline.addLast(http2Handler); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + serverSock.close(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + + private class MightCloseConnectionStreamFrameHandler extends SimpleChannelInboundHandler { + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) { + if (frame instanceof Http2DataFrame) { + // Not respond if this is channel 1 + if (channelIds[0].equals(ctx.channel().parent().id().asShortText()) && failOnFirstChannel) { + ctx.channel().parent().close(); + } else { + DefaultHttp2DataFrame dataFrame = new DefaultHttp2DataFrame(false); + try { + LOGGER.info(() -> "return empty data " + ctx.channel() + " frame " + frame.getClass()); + Http2Headers headers = new DefaultHttp2Headers().status(OK.codeAsText()); + ctx.write(dataFrame); + ctx.write(new DefaultHttp2HeadersFrame(headers, true)); + ctx.flush(); + } finally { + dataFrame.release(); + } + } + } + } + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerNotRespondingTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerNotRespondingTest.java new file mode 100644 index 000000000000..88eb36716106 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerNotRespondingTest.java @@ -0,0 +1,284 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.fault; + +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2DataFrame; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.DefaultHttp2PingFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2FrameLogger; +import io.netty.handler.codec.http2.Http2GoAwayFrame; +import io.netty.handler.codec.http2.Http2Headers; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2PingFrame; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.SelfSignedCertificate; +import io.netty.handler.timeout.ReadTimeoutException; +import io.reactivex.Flowable; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; +import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.Logger; + + +/** + * Testing the scenario where the server fails to respond to periodic PING + */ +public class ServerNotRespondingTest { + private static final Logger LOGGER = Logger.loggerFor(ServerNotRespondingTest.class); + private SdkAsyncHttpClient netty; + private Server server; + + @Before + public void setup() throws Exception { + server = new Server(); + server.init(); + + netty = NettyNioAsyncHttpClient.builder() + .readTimeout(Duration.ofMillis(1000)) + .eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(3).build()) + .http2Configuration(h -> h.healthCheckPingPeriod(Duration.ofMillis(200))) + .protocol(Protocol.HTTP2) + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + } + + @After + public void teardown() throws InterruptedException { + if (server != null) { + server.shutdown(); + } + server = null; + + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void connectionNotAckPing_newRequestShouldUseNewConnection() throws InterruptedException { + server.ackPingOnFirstChannel = false; + server.notRespondOnFirstChannel = false; + CompletableFuture firstRequest = sendGetRequest(); + // First request should succeed + firstRequest.join(); + + // Wait for Ping to close the connection + Thread.sleep(200); + server.notRespondOnFirstChannel = false; + sendGetRequest().join(); + assertThat(server.h2ConnectionCount.get()).isEqualTo(2); + assertThat(server.closedByClientH2ConnectionCount.get()).isEqualTo(1); + } + + @Test + public void connectionNotRespond_newRequestShouldUseNewConnection() throws Exception { + server.ackPingOnFirstChannel = true; + server.notRespondOnFirstChannel = true; + + // The first request picks up a non-responding channel and should fail. Channel 1 + CompletableFuture firstRequest = sendGetRequest(); + + assertThatThrownBy(() -> firstRequest.join()).hasRootCauseInstanceOf(ReadTimeoutException.class); + + // The second request should pick up a new healthy channel - Channel 2 + sendGetRequest().join(); + + assertThat(server.h2ConnectionCount.get()).isEqualTo(2); + assertThat(server.closedByClientH2ConnectionCount.get()).isEqualTo(1); + } + + private CompletableFuture sendGetRequest() { + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .responseHandler(new SdkAsyncHttpResponseHandler() { + private SdkHttpResponse headers; + + @Override + public void onHeaders(SdkHttpResponse headers) { + this.headers = headers; + } + + @Override + public void onStream(Publisher stream) { + Flowable.fromPublisher(stream).forEach(b -> { + }); + } + + @Override + public void onError(Throwable error) { + } + }) + .request(SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("https") + .host("localhost") + .port(server.port()) + .build()) + .requestContentPublisher(new EmptyPublisher()) + .build(); + + return netty.execute(req); + } + + + private static class Server extends ChannelInitializer { + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private String[] channelIds = new String[5]; + private final NioEventLoopGroup group = new NioEventLoopGroup(); + private SslContext sslCtx; + private AtomicInteger h2ConnectionCount = new AtomicInteger(0); + private AtomicInteger closedByClientH2ConnectionCount = new AtomicInteger(0); + private volatile boolean ackPingOnFirstChannel = false; + private volatile boolean notRespondOnFirstChannel = true; + + void init() throws Exception { + SelfSignedCertificate ssc = new SelfSignedCertificate(); + sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); + + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(group) + .childHandler(this); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + @Override + protected void initChannel(Channel ch) { + channelIds[h2ConnectionCount.get()] = ch.id().asShortText(); + ch.pipeline().addFirst(new LoggingHandler(LogLevel.DEBUG)); + LOGGER.debug(() -> "init channel " + ch); + h2ConnectionCount.incrementAndGet(); + + ChannelPipeline pipeline = ch.pipeline(); + pipeline.addLast(sslCtx.newHandler(ch.alloc())); + + + Http2FrameCodec http2Codec = Http2FrameCodecBuilder.forServer() + //Disable auto ack ping + .autoAckPingFrame(false) + .initialSettings(Http2Settings.defaultSettings().maxConcurrentStreams(2)) + .frameLogger(new Http2FrameLogger(LogLevel.DEBUG, "WIRE")) + .build(); + + Http2MultiplexHandler http2Handler = new Http2MultiplexHandler(new ChannelInitializer() { + @Override + protected void initChannel(Channel ch) throws Exception { + ch.pipeline().addLast(new MightNotRespondStreamFrameHandler()); + } + }); + + pipeline.addLast(http2Codec); + pipeline.addLast(new MightNotRespondPingFrameHandler()); + pipeline.addLast(new VerifyGoAwayFrameHandler()); + pipeline.addLast(http2Handler); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + serverSock.close(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + + public final class MightNotRespondPingFrameHandler extends SimpleChannelInboundHandler { + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2PingFrame msg) { + if (channelIds[0].equals(ctx.channel().id().asShortText()) && !ackPingOnFirstChannel) { + // Not respond if this is the first request + LOGGER.info(() -> "yolo" + ctx.channel()); + } else { + ctx.writeAndFlush(new DefaultHttp2PingFrame(msg.content(), true)); + } + } + } + + + public final class VerifyGoAwayFrameHandler extends SimpleChannelInboundHandler { + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2GoAwayFrame msg) { + LOGGER.info(() -> "goaway" + ctx.channel()); + closedByClientH2ConnectionCount.incrementAndGet(); + msg.release(); + } + } + + private class MightNotRespondStreamFrameHandler extends SimpleChannelInboundHandler { + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) { + if (frame instanceof Http2DataFrame) { + // Not respond if this is channel 1 + if (channelIds[0].equals(ctx.channel().parent().id().asShortText()) && notRespondOnFirstChannel) { + LOGGER.info(() -> "This is the first request, not responding" + ctx.channel()); + } else { + DefaultHttp2DataFrame dataFrame = new DefaultHttp2DataFrame(false); + try { + LOGGER.info(() -> "return empty data " + ctx.channel() + " frame " + frame.getClass()); + Http2Headers headers = new DefaultHttp2Headers().status(OK.codeAsText()); + ctx.write(dataFrame); + ctx.write(new DefaultHttp2HeadersFrame(headers, true)); + ctx.flush(); + } finally { + dataFrame.release(); + } + } + } + } + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java index 1f46438bad30..e85e71a56f9d 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,51 +16,81 @@ package software.amazon.awssdk.http.nio.netty.internal; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static software.amazon.awssdk.http.SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TLS_KEY_MANAGERS_PROVIDER; -import io.netty.handler.ssl.SslProvider; import java.net.URI; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; -import java.util.concurrent.ExecutionException; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; + import org.apache.commons.lang3.RandomStringUtils; -import org.junit.BeforeClass; +import org.junit.After; +import org.junit.Rule; import org.junit.Test; +import org.mockito.Mockito; + +import io.netty.channel.Channel; +import io.netty.channel.pool.ChannelPool; +import io.netty.handler.ssl.SslProvider; +import io.netty.util.concurrent.Future; import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.TlsKeyManagersProvider; +import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; +import software.amazon.awssdk.http.nio.netty.RecordingNetworkTrafficListener; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; -import software.amazon.awssdk.http.nio.netty.internal.AwaitCloseChannelPoolMap.SimpleChannelPoolAwareChannelPool; +import software.amazon.awssdk.utils.AttributeMap; public class AwaitCloseChannelPoolMapTest { - private static AwaitCloseChannelPoolMap channelPoolMap; + private final RecordingNetworkTrafficListener recorder = new RecordingNetworkTrafficListener(); + private AwaitCloseChannelPoolMap channelPoolMap; - @BeforeClass - public static void setup() { - channelPoolMap = AwaitCloseChannelPoolMap.builder() - .sdkChannelOptions(new SdkChannelOptions()) - .sdkEventLoopGroup(SdkEventLoopGroup.builder().build()) - .configuration(new NettyConfiguration(GLOBAL_HTTP_DEFAULTS)) - .protocol(Protocol.HTTP1_1) - .maxStreams(100) - .sslProvider(SslProvider.OPENSSL) - .build(); + @Rule + public WireMockRule mockProxy = new WireMockRule(wireMockConfig() + .dynamicPort() + .networkTrafficListener(recorder)); + + @After + public void methodTeardown() { + if (channelPoolMap != null) { + channelPoolMap.close(); + } + channelPoolMap = null; + + recorder.reset(); } @Test - public void close_underlyingPoolsShouldBeClosed() throws ExecutionException, InterruptedException { + public void close_underlyingPoolsShouldBeClosed() { + channelPoolMap = AwaitCloseChannelPoolMap.builder() + .sdkChannelOptions(new SdkChannelOptions()) + .sdkEventLoopGroup(SdkEventLoopGroup.builder().build()) + .configuration(new NettyConfiguration(GLOBAL_HTTP_DEFAULTS)) + .protocol(Protocol.HTTP1_1) + .maxStreams(100) + .sslProvider(SslProvider.OPENSSL) + .build(); int numberOfChannelPools = 5; List channelPools = new ArrayList<>(); for (int i = 0; i < numberOfChannelPools; i++) { channelPools.add( - channelPoolMap.get(URI.create("http://" + RandomStringUtils.randomAlphabetic(2) + i + "localhost:" + numberOfChannelPools))); + channelPoolMap.get(URI.create("http://" + RandomStringUtils.randomAlphabetic(2) + i + "localhost:" + numberOfChannelPools))); } assertThat(channelPoolMap.pools().size()).isEqualTo(numberOfChannelPools); - channelPoolMap.close(); channelPools.forEach(channelPool -> { assertThat(channelPool.underlyingSimpleChannelPool().closeFuture()).isDone(); @@ -68,4 +98,141 @@ public void close_underlyingPoolsShouldBeClosed() throws ExecutionException, Int }); } + @Test + public void get_callsInjectedBootstrapProviderCorrectly() { + BootstrapProvider bootstrapProvider = Mockito.spy( + new BootstrapProvider(SdkEventLoopGroup.builder().build(), + new NettyConfiguration(GLOBAL_HTTP_DEFAULTS), + new SdkChannelOptions())); + + URI targetUri = URI.create("https://some-awesome-service-1234.amazonaws.com:8080"); + + AwaitCloseChannelPoolMap.Builder builder = + AwaitCloseChannelPoolMap.builder() + .sdkChannelOptions(new SdkChannelOptions()) + .sdkEventLoopGroup(SdkEventLoopGroup.builder().build()) + .configuration(new NettyConfiguration(GLOBAL_HTTP_DEFAULTS)) + .protocol(Protocol.HTTP1_1) + .maxStreams(100) + .sslProvider(SslProvider.OPENSSL); + + channelPoolMap = new AwaitCloseChannelPoolMap(builder, null, bootstrapProvider); + channelPoolMap.get(targetUri); + + verify(bootstrapProvider).createBootstrap("some-awesome-service-1234.amazonaws.com", 8080); + } + + @Test + public void get_usingProxy_callsInjectedBootstrapProviderCorrectly() { + BootstrapProvider bootstrapProvider = Mockito.spy( + new BootstrapProvider(SdkEventLoopGroup.builder().build(), + new NettyConfiguration(GLOBAL_HTTP_DEFAULTS), + new SdkChannelOptions())); + + URI targetUri = URI.create("https://some-awesome-service-1234.amazonaws.com:8080"); + Map shouldProxyCache = new HashMap<>(); + shouldProxyCache.put(targetUri, true); + + ProxyConfiguration proxyConfiguration = + ProxyConfiguration.builder() + .host("localhost") + .port(mockProxy.port()) + .build(); + + AwaitCloseChannelPoolMap.Builder builder = + AwaitCloseChannelPoolMap.builder() + .proxyConfiguration(proxyConfiguration) + .sdkChannelOptions(new SdkChannelOptions()) + .sdkEventLoopGroup(SdkEventLoopGroup.builder().build()) + .configuration(new NettyConfiguration(GLOBAL_HTTP_DEFAULTS)) + .protocol(Protocol.HTTP1_1) + .maxStreams(100) + .sslProvider(SslProvider.OPENSSL); + + channelPoolMap = new AwaitCloseChannelPoolMap(builder, shouldProxyCache, bootstrapProvider); + channelPoolMap.get(targetUri); + + verify(bootstrapProvider).createBootstrap("localhost", mockProxy.port()); + } + + @Test + public void usingProxy_usesCachedValueWhenPresent() { + URI targetUri = URI.create("https://some-awesome-service-1234.amazonaws.com"); + + Map shouldProxyCache = new HashMap<>(); + shouldProxyCache.put(targetUri, true); + + ProxyConfiguration proxyConfiguration = ProxyConfiguration.builder() + .host("localhost") + .port(mockProxy.port()) + // Deliberately set the target host as a non-proxy host to see if it will check the cache first + .nonProxyHosts(Stream.of(targetUri.getHost()).collect(Collectors.toSet())) + .build(); + + AwaitCloseChannelPoolMap.Builder builder = AwaitCloseChannelPoolMap.builder() + .proxyConfiguration(proxyConfiguration) + .sdkChannelOptions(new SdkChannelOptions()) + .sdkEventLoopGroup(SdkEventLoopGroup.builder().build()) + .configuration(new NettyConfiguration(GLOBAL_HTTP_DEFAULTS)) + .protocol(Protocol.HTTP1_1) + .maxStreams(100) + .sslProvider(SslProvider.OPENSSL); + + channelPoolMap = new AwaitCloseChannelPoolMap(builder, shouldProxyCache, null); + + // The target host does not exist so acquiring a channel should fail unless we're configured to connect to + // the mock proxy host for this URI. + SimpleChannelPoolAwareChannelPool channelPool = channelPoolMap.newPool(targetUri); + Future channelFuture = channelPool.underlyingSimpleChannelPool().acquire().awaitUninterruptibly(); + assertThat(channelFuture.isSuccess()).isTrue(); + channelPool.release(channelFuture.getNow()).awaitUninterruptibly(); + } + + @Test + public void usingProxy_noSchemeGiven_defaultsToHttp() { + ProxyConfiguration proxyConfiguration = ProxyConfiguration.builder() + .host("localhost") + .port(mockProxy.port()) + .build(); + + channelPoolMap = AwaitCloseChannelPoolMap.builder() + .proxyConfiguration(proxyConfiguration) + .sdkChannelOptions(new SdkChannelOptions()) + .sdkEventLoopGroup(SdkEventLoopGroup.builder().build()) + .configuration(new NettyConfiguration(GLOBAL_HTTP_DEFAULTS)) + .protocol(Protocol.HTTP1_1) + .maxStreams(100) + .sslProvider(SslProvider.OPENSSL) + .build(); + + SimpleChannelPoolAwareChannelPool simpleChannelPoolAwareChannelPool = channelPoolMap.newPool( + URI.create("https://some-awesome-service:443")); + + simpleChannelPoolAwareChannelPool.acquire().awaitUninterruptibly(); + + String requests = recorder.requests().toString(); + + assertThat(requests).contains("CONNECT some-awesome-service:443"); + } + + @Test + public void usesProvidedKeyManagersProvider() { + TlsKeyManagersProvider provider = mock(TlsKeyManagersProvider.class); + + AttributeMap config = AttributeMap.builder() + .put(TLS_KEY_MANAGERS_PROVIDER, provider) + .build(); + + channelPoolMap = AwaitCloseChannelPoolMap.builder() + .sdkChannelOptions(new SdkChannelOptions()) + .sdkEventLoopGroup(SdkEventLoopGroup.builder().build()) + .protocol(Protocol.HTTP1_1) + .configuration(new NettyConfiguration(config.merge(GLOBAL_HTTP_DEFAULTS))) + .build(); + + ChannelPool channelPool = channelPoolMap.newPool(URI.create("https://localhost:" + mockProxy.port())); + channelPool.acquire().awaitUninterruptibly(); + verify(provider).keyManagers(); + } + } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java new file mode 100644 index 000000000000..dc3789a34aab --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS; + +import java.net.InetSocketAddress; +import java.net.SocketAddress; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +import io.netty.bootstrap.Bootstrap; +import io.netty.resolver.AddressResolver; +import io.netty.resolver.AddressResolverGroup; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; + +@RunWith(MockitoJUnitRunner.class) +public class BootstrapProviderTest { + private final BootstrapProvider bootstrapProvider = + new BootstrapProvider(SdkEventLoopGroup.builder().build(), + new NettyConfiguration(GLOBAL_HTTP_DEFAULTS), + new SdkChannelOptions()); + + // IMPORTANT: This unit test asserts that the bootstrap provider creates bootstraps using 'unresolved + // InetSocketAddress'. If this test is replaced or removed, perhaps due to a different implementation of + // SocketAddress, a different test must be created that ensures that the hostname will be resolved on every + // connection attempt and not cached between connection attempts. + @Test + public void createBootstrap_usesUnresolvedInetSocketAddress() { + Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443); + + SocketAddress socketAddress = bootstrap.config().remoteAddress(); + + assertThat(socketAddress).isInstanceOf(InetSocketAddress.class); + InetSocketAddress inetSocketAddress = (InetSocketAddress)socketAddress; + + assertThat(inetSocketAddress.isUnresolved()).isTrue(); + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPoolTest.java index 616a7b416ae1..16b320167cc0 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPoolTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ import io.netty.channel.Channel; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.Promise; @@ -47,7 +46,7 @@ public class CancellableAcquireChannelPoolTest { private EventExecutor eventExecutor; @Mock - private ChannelPool mockDelegatePool; + private SdkChannelPool mockDelegatePool; private Channel channel; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializerTest.java new file mode 100644 index 000000000000..12e0627bb1e4 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializerTest.java @@ -0,0 +1,75 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertThat; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS; + +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelOption; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.channel.pool.ChannelPool; +import io.netty.handler.codec.http2.Http2SecurityUtil; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; +import io.netty.handler.ssl.SupportedCipherSuiteFilter; +import java.net.URI; +import java.time.Duration; +import java.util.concurrent.atomic.AtomicReference; +import javax.net.ssl.SSLException; +import org.junit.Test; +import software.amazon.awssdk.http.Protocol; + +public class ChannelPipelineInitializerTest { + + private ChannelPipelineInitializer pipelineInitializer; + + private URI targetUri; + + @Test + public void channelConfigOptionCheck() throws SSLException { + targetUri = URI.create("https://some-awesome-service-1234.amazonaws.com:8080"); + + SslContext sslContext = SslContextBuilder.forClient() + .sslProvider(SslProvider.JDK) + .ciphers(Http2SecurityUtil.CIPHERS, SupportedCipherSuiteFilter.INSTANCE) + .build(); + + AtomicReference channelPoolRef = new AtomicReference<>(); + + NettyConfiguration nettyConfiguration = new NettyConfiguration(GLOBAL_HTTP_DEFAULTS); + + pipelineInitializer = new ChannelPipelineInitializer(Protocol.HTTP1_1, + sslContext, + SslProvider.JDK, + 100, + 1024, + Duration.ZERO, + channelPoolRef, + nettyConfiguration, + targetUri); + + Channel channel = new EmbeddedChannel(); + + pipelineInitializer.channelCreated(channel); + + assertThat(channel.config().getOption(ChannelOption.ALLOCATOR), is(UnpooledByteBufAllocator.DEFAULT)); + + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ConnectionReaperTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ConnectionReaperTest.java index d596f60a9433..0ce25a0f6ebb 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ConnectionReaperTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ConnectionReaperTest.java @@ -1,3 +1,18 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + package software.amazon.awssdk.http.nio.netty.internal; import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; @@ -28,7 +43,7 @@ import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; -import software.amazon.awssdk.http.nio.netty.EmptyPublisher; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.RecordingResponseHandler; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FullResponseContentPublisherTckTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FullResponseContentPublisherTckTest.java index f8650a4dcab9..962194450366 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FullResponseContentPublisherTckTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FullResponseContentPublisherTckTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandlerTest.java index 083a3036dbf2..b80cd8e809d0 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandlerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -23,9 +23,7 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.DefaultEventLoopGroup; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.util.DefaultAttributeMap; import java.io.IOException; import java.util.concurrent.CancellationException; @@ -53,7 +51,7 @@ public class FutureCancelHandlerTest { private Channel channel; @Mock - private ChannelPool channelPool; + private SdkChannelPool channelPool; private RequestContext requestContext; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPoolTest.java index f7a1922923ef..cdbf063c9adc 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPoolTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -21,11 +21,10 @@ import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.REQUEST_CONTEXT_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.RESPONSE_COMPLETE_KEY; -import com.typesafe.netty.http.HttpStreamsClientHandler; +import software.amazon.awssdk.http.nio.netty.internal.nrs.HttpStreamsClientHandler; import io.netty.channel.Channel; import io.netty.channel.ChannelPipeline; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; import io.netty.handler.timeout.ReadTimeoutHandler; @@ -42,7 +41,7 @@ @RunWith(MockitoJUnitRunner.class) public class HandlerRemovingChannelPoolTest { @Mock - private ChannelPool channelPool; + private SdkChannelPool channelPool; @Mock private SdkAsyncHttpResponseHandler responseHandler; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPoolTest.java index 9a0ee5b3ad99..e610884e3eff 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPoolTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,14 +18,17 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.when; import static org.mockito.internal.verification.VerificationModeFactory.times; import static software.amazon.awssdk.http.SdkHttpConfigurationOption.CONNECTION_ACQUIRE_TIMEOUT; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.KEEP_ALIVE; import io.netty.channel.Channel; import io.netty.channel.EventLoop; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; +import io.netty.util.Attribute; import io.netty.util.concurrent.DefaultPromise; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GlobalEventExecutor; @@ -49,9 +52,10 @@ public class HealthCheckedChannelPoolTest { private EventLoopGroup eventLoopGroup = Mockito.mock(EventLoopGroup.class); private EventLoop eventLoop = Mockito.mock(EventLoop.class); - private ChannelPool downstreamChannelPool = Mockito.mock(ChannelPool.class); + private SdkChannelPool downstreamChannelPool = Mockito.mock(SdkChannelPool.class); private List channels = new ArrayList<>(); private ScheduledFuture scheduledFuture = Mockito.mock(ScheduledFuture.class); + private Attribute attribute = mock(Attribute.class); private static final NettyConfiguration NETTY_CONFIGURATION = new NettyConfiguration(AttributeMap.builder() @@ -64,7 +68,7 @@ public class HealthCheckedChannelPoolTest { @Before public void reset() { - Mockito.reset(eventLoopGroup, eventLoop, downstreamChannelPool, scheduledFuture); + Mockito.reset(eventLoopGroup, eventLoop, downstreamChannelPool, scheduledFuture, attribute); channels.clear(); Mockito.when(eventLoopGroup.next()).thenReturn(eventLoop); @@ -104,6 +108,39 @@ public void acquireCanMakeManyCalls() throws Exception { Mockito.verify(downstreamChannelPool, Mockito.times(5)).acquire(any()); } + @Test + public void acquireActiveAndKeepAliveTrue_shouldAcquireOnce() throws Exception { + stubForIgnoredTimeout(); + stubAcquireActiveAndKeepAlive(); + + Future acquire = channelPool.acquire(); + + acquire.get(5, TimeUnit.SECONDS); + + assertThat(acquire.isDone()).isTrue(); + assertThat(acquire.isSuccess()).isTrue(); + assertThat(acquire.getNow()).isEqualTo(channels.get(0)); + + Mockito.verify(downstreamChannelPool, Mockito.times(1)).acquire(any()); + } + + + @Test + public void acquire_firstChannelKeepAliveFalse_shouldAcquireAnother() throws Exception { + stubForIgnoredTimeout(); + stubAcquireTwiceFirstTimeNotKeepAlive(); + + Future acquire = channelPool.acquire(); + + acquire.get(5, TimeUnit.SECONDS); + + assertThat(acquire.isDone()).isTrue(); + assertThat(acquire.isSuccess()).isTrue(); + assertThat(acquire.getNow()).isEqualTo(channels.get(1)); + + Mockito.verify(downstreamChannelPool, Mockito.times(2)).acquire(any()); + } + @Test public void badDownstreamAcquiresCausesException() throws Exception { stubForIgnoredTimeout(); @@ -154,6 +191,7 @@ public void slowAcquireTimesOut() throws Exception { public void releaseHealthyDoesNotClose() { Channel channel = Mockito.mock(Channel.class); Mockito.when(channel.isActive()).thenReturn(true); + stubKeepAliveAttribute(channel, null); channelPool.release(channel); @@ -165,7 +203,7 @@ public void releaseHealthyDoesNotClose() { public void releaseHealthyCloses() { Channel channel = Mockito.mock(Channel.class); Mockito.when(channel.isActive()).thenReturn(false); - + stubKeepAliveAttribute(channel, null); channelPool.release(channel); Mockito.verify(channel, times(1)).close(); @@ -179,6 +217,7 @@ public void stubAcquireHealthySequence(Boolean... acquireHealthySequence) { Promise promise = invocation.getArgumentAt(0, Promise.class); Channel channel = Mockito.mock(Channel.class); Mockito.when(channel.isActive()).thenReturn(shouldAcquireBeHealthy); + stubKeepAliveAttribute(channel, null); channels.add(channel); promise.setSuccess(channel); return promise; @@ -186,6 +225,26 @@ public void stubAcquireHealthySequence(Boolean... acquireHealthySequence) { } } + private void stubAcquireActiveAndKeepAlive() { + OngoingStubbing> stubbing = Mockito.when(downstreamChannelPool.acquire(any())); + stubbing = stubbing.thenAnswer(invocation -> { + Promise promise = invocation.getArgumentAt(0, Promise.class); + Channel channel = Mockito.mock(Channel.class); + Mockito.when(channel.isActive()).thenReturn(true); + + stubKeepAliveAttribute(channel, true); + + channels.add(channel); + promise.setSuccess(channel); + return promise; + }); + } + + private void stubKeepAliveAttribute(Channel channel, Boolean isKeepAlive) { + Mockito.when(channel.attr(KEEP_ALIVE)).thenReturn(attribute); + when(attribute.get()).thenReturn(isKeepAlive); + } + public void stubBadDownstreamAcquire() { Mockito.when(downstreamChannelPool.acquire(any())).thenAnswer(invocation -> { Promise promise = invocation.getArgumentAt(0, Promise.class); @@ -202,4 +261,27 @@ public void stubForIgnoredTimeout() { Mockito.when(eventLoopGroup.schedule(any(Runnable.class), anyLong(), any())) .thenAnswer(i -> scheduledFuture); } + + private void stubAcquireTwiceFirstTimeNotKeepAlive() { + OngoingStubbing> stubbing = Mockito.when(downstreamChannelPool.acquire(any())); + stubbing = stubbing.thenAnswer(invocation -> { + Promise promise = invocation.getArgumentAt(0, Promise.class); + Channel channel = Mockito.mock(Channel.class); + stubKeepAliveAttribute(channel, false); + Mockito.when(channel.isActive()).thenReturn(true); + channels.add(channel); + promise.setSuccess(channel); + return promise; + }); + + stubbing.thenAnswer(invocation -> { + Promise promise = invocation.getArgumentAt(0, Promise.class); + Channel channel = Mockito.mock(Channel.class); + Mockito.when(channel.isActive()).thenReturn(true); + channels.add(channel); + promise.setSuccess(channel); + stubKeepAliveAttribute(channel, true); + return promise; + }); + } } \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HonorCloseOnReleaseChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HonorCloseOnReleaseChannelPoolTest.java index 9a53858d782a..71a0203794b2 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HonorCloseOnReleaseChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HonorCloseOnReleaseChannelPoolTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPoolTest.java new file mode 100644 index 000000000000..12100fcf8acc --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPoolTest.java @@ -0,0 +1,303 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.http.nio.netty.internal.Http1TunnelConnectionPool.TUNNEL_ESTABLISHED_KEY; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelId; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.pool.ChannelPool; +import io.netty.channel.pool.ChannelPoolHandler; +import io.netty.handler.ssl.ApplicationProtocolNegotiator; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslHandler; +import io.netty.util.Attribute; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import java.io.IOException; +import java.net.URI; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import javax.net.ssl.SSLSessionContext; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +/** + * Unit tests for {@link Http1TunnelConnectionPool}. + */ +@RunWith(MockitoJUnitRunner.class) +public class Http1TunnelConnectionPoolTest { + private static final NioEventLoopGroup GROUP = new NioEventLoopGroup(1); + + private static final URI HTTP_PROXY_ADDRESS = URI.create("http://localhost:1234"); + + private static final URI HTTPS_PROXY_ADDRESS = URI.create("https://localhost:5678"); + + private static final URI REMOTE_ADDRESS = URI.create("https://s3.amazonaws.com:5678"); + + @Mock + private ChannelPool delegatePool; + + @Mock + private ChannelPoolHandler mockHandler; + + @Mock + public Channel mockChannel; + + @Mock + public ChannelPipeline mockPipeline; + + @Mock + public Attribute mockAttr; + + @Mock + public ChannelHandlerContext mockCtx; + + @Mock + public ChannelId mockId; + + @Before + public void methodSetup() { + Future channelFuture = GROUP.next().newSucceededFuture(mockChannel); + when(delegatePool.acquire(any(Promise.class))).thenReturn(channelFuture); + + when(mockCtx.channel()).thenReturn(mockChannel); + when(mockCtx.pipeline()).thenReturn(mockPipeline); + + when(mockChannel.attr(eq(TUNNEL_ESTABLISHED_KEY))).thenReturn(mockAttr); + when(mockChannel.id()).thenReturn(mockId); + when(mockChannel.pipeline()).thenReturn(mockPipeline); + } + + @AfterClass + public static void teardown() { + GROUP.shutdownGracefully().awaitUninterruptibly(); + } + + @Test + public void tunnelAlreadyEstablished_doesNotAddInitHandler() { + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, null, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler); + + when(mockAttr.get()).thenReturn(true); + + tunnelPool.acquire().awaitUninterruptibly(); + + verify(mockPipeline, never()).addLast(anyObject()); + } + + @Test(timeout = 1000) + public void tunnelNotEstablished_addsInitHandler() throws InterruptedException { + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, null, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler); + + when(mockAttr.get()).thenReturn(false); + + CountDownLatch latch = new CountDownLatch(1); + when(mockPipeline.addLast(any(ChannelHandler.class))).thenAnswer(i -> { + latch.countDown(); + return mockPipeline; + }); + tunnelPool.acquire(); + latch.await(); + verify(mockPipeline, times(1)).addLast(any(ProxyTunnelInitHandler.class)); + } + + @Test + public void tunnelInitFails_acquireFutureFails() { + Http1TunnelConnectionPool.InitHandlerSupplier supplier = (srcPool, remoteAddr, initFuture) -> { + initFuture.setFailure(new IOException("boom")); + return mock(ChannelHandler.class); + }; + + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, null, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler, supplier); + + Future acquireFuture = tunnelPool.acquire(); + + assertThat(acquireFuture.awaitUninterruptibly().cause()).hasMessage("boom"); + } + + @Test + public void tunnelInitSucceeds_acquireFutureSucceeds() { + Http1TunnelConnectionPool.InitHandlerSupplier supplier = (srcPool, remoteAddr, initFuture) -> { + initFuture.setSuccess(mockChannel); + return mock(ChannelHandler.class); + }; + + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, null, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler, supplier); + + Future acquireFuture = tunnelPool.acquire(); + + assertThat(acquireFuture.awaitUninterruptibly().getNow()).isEqualTo(mockChannel); + } + + @Test + public void acquireFromDelegatePoolFails_failsFuture() { + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, null, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler); + + when(delegatePool.acquire(any(Promise.class))).thenReturn(GROUP.next().newFailedFuture(new IOException("boom"))); + + Future acquireFuture = tunnelPool.acquire(); + + assertThat(acquireFuture.awaitUninterruptibly().cause()).hasMessage("boom"); + } + + @Test + public void sslContextProvided_andProxyUsingHttps_addsSslHandler() { + SslHandler mockSslHandler = mock(SslHandler.class); + SSLEngine mockSslEngine = mock(SSLEngine.class); + when(mockSslHandler.engine()).thenReturn(mockSslEngine); + when(mockSslEngine.getSSLParameters()).thenReturn(mock(SSLParameters.class)); + TestSslContext mockSslCtx = new TestSslContext(mockSslHandler); + + Http1TunnelConnectionPool.InitHandlerSupplier supplier = (srcPool, remoteAddr, initFuture) -> { + initFuture.setSuccess(mockChannel); + return mock(ChannelHandler.class); + }; + + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, mockSslCtx, + HTTPS_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler, supplier); + + tunnelPool.acquire().awaitUninterruptibly(); + + ArgumentCaptor handlersCaptor = ArgumentCaptor.forClass(ChannelHandler.class); + verify(mockPipeline, times(2)).addLast(handlersCaptor.capture()); + + assertThat(handlersCaptor.getAllValues().get(0)).isEqualTo(mockSslHandler); + } + + @Test + public void sslContextProvided_andProxyNotUsingHttps_doesNotAddSslHandler() { + SslHandler mockSslHandler = mock(SslHandler.class); + TestSslContext mockSslCtx = new TestSslContext(mockSslHandler); + + Http1TunnelConnectionPool.InitHandlerSupplier supplier = (srcPool, remoteAddr, initFuture) -> { + initFuture.setSuccess(mockChannel); + return mock(ChannelHandler.class); + }; + + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, mockSslCtx, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler, supplier); + + tunnelPool.acquire().awaitUninterruptibly(); + + ArgumentCaptor handlersCaptor = ArgumentCaptor.forClass(ChannelHandler.class); + verify(mockPipeline).addLast(handlersCaptor.capture()); + + assertThat(handlersCaptor.getAllValues().get(0)).isNotInstanceOf(SslHandler.class); + } + + @Test + public void release_releasedToDelegatePool() { + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, null, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler); + tunnelPool.release(mockChannel); + verify(delegatePool).release(eq(mockChannel), any(Promise.class)); + } + + @Test + public void release_withGivenPromise_releasedToDelegatePool() { + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, null, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler); + Promise mockPromise = mock(Promise.class); + tunnelPool.release(mockChannel, mockPromise); + verify(delegatePool).release(eq(mockChannel), eq(mockPromise)); + } + + @Test + public void close_closesDelegatePool() { + Http1TunnelConnectionPool tunnelPool = new Http1TunnelConnectionPool(GROUP.next(), delegatePool, null, + HTTP_PROXY_ADDRESS, REMOTE_ADDRESS, mockHandler); + tunnelPool.close(); + verify(delegatePool).close(); + } + + private static class TestSslContext extends SslContext { + private final SslHandler handler; + + protected TestSslContext(SslHandler handler) { + this.handler = handler; + } + + @Override + public boolean isClient() { + return false; + } + + @Override + public List cipherSuites() { + return null; + } + + @Override + public long sessionCacheSize() { + return 0; + } + + @Override + public long sessionTimeout() { + return 0; + } + + @Override + public ApplicationProtocolNegotiator applicationProtocolNegotiator() { + return null; + } + + @Override + public SSLEngine newEngine(ByteBufAllocator alloc) { + return null; + } + + @Override + public SSLEngine newEngine(ByteBufAllocator alloc, String peerHost, int peerPort) { + return null; + } + + @Override + public SSLSessionContext sessionContext() { + return null; + } + + @Override + public SslHandler newHandler(ByteBufAllocator alloc, String host, int port, boolean startTls) { + return handler; + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPoolTest.java new file mode 100644 index 000000000000..a6d04b368be7 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPoolTest.java @@ -0,0 +1,210 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; + +import io.netty.channel.Channel; +import io.netty.channel.EventLoop; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.pool.ChannelPool; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCollector; + +public class IdleConnectionCountingChannelPoolTest { + private EventLoopGroup eventLoopGroup; + private ChannelPool delegatePool; + private IdleConnectionCountingChannelPool idleCountingPool; + + @Before + public void setup() { + delegatePool = mock(ChannelPool.class); + eventLoopGroup = new NioEventLoopGroup(4); + idleCountingPool = new IdleConnectionCountingChannelPool(eventLoopGroup.next(), delegatePool); + } + + @After + public void teardown() { + eventLoopGroup.shutdownGracefully(); + } + + @Test(timeout = 5_000) + public void acquiresAndReleasesOfNewChannelsIncreaseCount() throws InterruptedException { + stubDelegatePoolAcquires(createSuccessfulAcquire(), createSuccessfulAcquire()); + stubDelegatePoolReleasesForSuccess(); + + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel firstChannel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel secondChannel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + idleCountingPool.release(firstChannel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + + idleCountingPool.release(secondChannel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(2); + } + + @Test(timeout = 5_000) + public void channelsClosedInTheDelegatePoolAreNotCounted() throws InterruptedException { + stubDelegatePoolAcquires(createSuccessfulAcquire()); + stubDelegatePoolReleasesForSuccess(); + + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + + channel.close().await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + } + + @Test(timeout = 5_000) + public void channelsClosedWhenCheckedOutAreNotCounted() throws InterruptedException { + stubDelegatePoolAcquires(createSuccessfulAcquire()); + stubDelegatePoolReleasesForSuccess(); + + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + channel.close().await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + } + + @Test + public void checkingOutAnIdleChannelIsCountedCorrectly() throws InterruptedException { + Future successfulAcquire = createSuccessfulAcquire(); + stubDelegatePoolAcquires(successfulAcquire, successfulAcquire); + stubDelegatePoolReleasesForSuccess(); + + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel channel1 = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + idleCountingPool.release(channel1).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + + Channel channel2 = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + assertThat(channel1).isEqualTo(channel2); + } + + @Test + public void stochastic_rapidAcquireReleaseIsCalculatedCorrectly() throws InterruptedException { + Future successfulAcquire = createSuccessfulAcquire(); + Channel expectedChannel = successfulAcquire.getNow(); + stubDelegatePoolAcquires(successfulAcquire); + stubDelegatePoolReleasesForSuccess(); + + for (int i = 0; i < 1000; ++i) { + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(channel).isEqualTo(expectedChannel); + assertThat(getIdleConnectionCount()).isEqualTo(0); + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + } + } + + @Test + public void stochastic_rapidAcquireReleaseCloseIsCalculatedCorrectly() throws InterruptedException { + stubDelegatePoolAcquiresForSuccess(); + stubDelegatePoolReleasesForSuccess(); + + for (int i = 0; i < 1000; ++i) { + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + channel.close().await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + } + } + + @Test + public void stochastic_rapidAcquireCloseReleaseIsCalculatedCorrectly() throws InterruptedException { + stubDelegatePoolAcquiresForSuccess(); + stubDelegatePoolReleasesForSuccess(); + + for (int i = 0; i < 1000; ++i) { + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + channel.close().await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + } + } + + private int getIdleConnectionCount() { + MetricCollector metricCollector = MetricCollector.create("test"); + idleCountingPool.collectChannelPoolMetrics(metricCollector).join(); + return metricCollector.collect().metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0); + } + + @SafeVarargs + private final void stubDelegatePoolAcquires(Future result, Future... extraResults) { + Mockito.when(delegatePool.acquire(any())).thenReturn(result, extraResults); + } + + private void stubDelegatePoolAcquiresForSuccess() { + Mockito.when(delegatePool.acquire(any())).thenAnswer(a -> createSuccessfulAcquire()); + } + + private void stubDelegatePoolReleasesForSuccess() { + Mockito.when(delegatePool.release(any())).thenAnswer((Answer>) invocation -> { + Channel channel = invocation.getArgumentAt(0, Channel.class); + Promise result = channel.eventLoop().newPromise(); + return result.setSuccess(channel); + }); + } + + private Future createSuccessfulAcquire() { + try { + EventLoop eventLoop = this.eventLoopGroup.next(); + + Promise channelPromise = eventLoop.newPromise(); + MockChannel channel = new MockChannel(); + eventLoop.register(channel); + channelPromise.setSuccess(channel); + + return channelPromise; + } catch (Exception e) { + throw new Error(e); + } + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentHandlerTest.java new file mode 100644 index 000000000000..33263a5911d8 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentHandlerTest.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.LAST_HTTP_CONTENT_RECEIVED_KEY; + +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.http.LastHttpContent; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + + +public class LastHttpContentHandlerTest { + + private MockChannel channel; + private ChannelHandlerContext handlerContext; + private LastHttpContentHandler contentHandler = LastHttpContentHandler.create(); + + @Before + public void setup() throws Exception { + channel = new MockChannel(); + channel.attr(LAST_HTTP_CONTENT_RECEIVED_KEY).set(false); + handlerContext = Mockito.mock(ChannelHandlerContext.class); + Mockito.when(handlerContext.channel()).thenReturn(channel); + } + + @After + public void cleanup() { + channel.close(); + } + + @Test + public void lastHttpContentReceived_shouldSetAttribute() { + LastHttpContent lastHttpContent = LastHttpContent.EMPTY_LAST_CONTENT; + contentHandler.channelRead(handlerContext, lastHttpContent); + + assertThat(channel.attr(LAST_HTTP_CONTENT_RECEIVED_KEY).get()).isTrue(); + } + + @Test + public void otherContentReceived_shouldNotSetAttribute() { + String content = "some content"; + contentHandler.channelRead(handlerContext, content); + + assertThat(channel.attr(LAST_HTTP_CONTENT_RECEIVED_KEY).get()).isFalse(); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentSwallowerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentSwallowerTest.java index c03787e8ebea..75e90ef19f11 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentSwallowerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/LastHttpContentSwallowerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/MockChannel.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/MockChannel.java index 49dc9caa634b..37de10f36294 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/MockChannel.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/MockChannel.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutorTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutorTest.java index 97b570cec39c..c4a915991db0 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutorTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutorTest.java @@ -1,11 +1,32 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + package software.amazon.awssdk.http.nio.netty.internal; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import io.netty.channel.Channel; import io.netty.channel.EventLoop; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -14,17 +35,9 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.utils.AttributeMap; -import java.util.concurrent.CompletableFuture; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - public class NettyRequestExecutorTest { - private ChannelPool mockChannelPool; + private SdkChannelPool mockChannelPool; private EventLoopGroup eventLoopGroup; @@ -34,7 +47,7 @@ public class NettyRequestExecutorTest { @Before public void setup() { - mockChannelPool = mock(ChannelPool.class); + mockChannelPool = mock(SdkChannelPool.class); eventLoopGroup = new NioEventLoopGroup(); diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/OldConnectionReaperHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/OldConnectionReaperHandlerTest.java index 90332c9e74fa..e5e3f6f57210 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/OldConnectionReaperHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/OldConnectionReaperHandlerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/OneTimeReadTimeoutHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/OneTimeReadTimeoutHandlerTest.java index 469a60d4c9ac..b73355edb68d 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/OneTimeReadTimeoutHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/OneTimeReadTimeoutHandlerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandlerTest.java new file mode 100644 index 000000000000..22f534a85f37 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandlerTest.java @@ -0,0 +1,220 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.DefaultChannelPromise; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.pool.ChannelPool; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.HttpClientCodec; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.ssl.SslCloseCompletionEvent; +import io.netty.handler.ssl.SslHandler; +import io.netty.util.concurrent.Promise; +import java.io.IOException; +import java.net.URI; +import java.util.function.Supplier; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +/** + * Unit tests for {@link ProxyTunnelInitHandler}. + */ +@RunWith(MockitoJUnitRunner.class) +public class ProxyTunnelInitHandlerTest { + private static final NioEventLoopGroup GROUP = new NioEventLoopGroup(1); + + private static final URI REMOTE_HOST = URI.create("https://s3.amazonaws.com:1234"); + + @Mock + private ChannelHandlerContext mockCtx; + + @Mock + private Channel mockChannel; + + @Mock + private ChannelPipeline mockPipeline; + + @Mock + private ChannelPool mockChannelPool; + + @Before + public void methodSetup() { + when(mockCtx.channel()).thenReturn(mockChannel); + when(mockCtx.pipeline()).thenReturn(mockPipeline); + when(mockChannel.pipeline()).thenReturn(mockPipeline); + when(mockChannel.writeAndFlush(anyObject())).thenReturn(new DefaultChannelPromise(mockChannel, GROUP.next())); + } + + @AfterClass + public static void teardown() { + GROUP.shutdownGracefully().awaitUninterruptibly(); + } + + @Test + public void addedToPipeline_addsCodec() { + HttpClientCodec codec = new HttpClientCodec(); + Supplier codecSupplier = () -> codec; + when(mockCtx.name()).thenReturn("foo"); + + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, null, codecSupplier); + handler.handlerAdded(mockCtx); + + verify(mockPipeline).addBefore(eq("foo"), eq(null), eq(codec)); + } + + @Test + public void successfulProxyResponse_completesFuture() { + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + successResponse(handler); + + assertThat(promise.awaitUninterruptibly().getNow()).isEqualTo(mockChannel); + } + + @Test + public void successfulProxyResponse_removesSelfAndCodec() { + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + successResponse(handler); + + verify(mockPipeline).remove(eq(handler)); + verify(mockPipeline).remove(any(HttpClientCodec.class)); + } + + @Test + public void successfulProxyResponse_doesNotRemoveSslHandler() { + SslHandler sslHandler = mock(SslHandler.class); + when(mockPipeline.get(eq(SslHandler.class))).thenReturn(sslHandler); + + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + successResponse(handler); + + verify(mockPipeline, never()).remove(eq(SslHandler.class)); + } + + @Test + public void unexpectedMessage_failsPromise() { + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + handler.channelRead(mockCtx, new Object()); + + assertThat(promise.awaitUninterruptibly().isSuccess()).isFalse(); + } + + @Test + public void unsuccessfulResponse_failsPromise() { + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + + DefaultHttpResponse resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.FORBIDDEN); + handler.channelRead(mockCtx, resp); + + assertThat(promise.awaitUninterruptibly().isSuccess()).isFalse(); + } + + @Test + public void requestWriteFails_failsPromise() { + DefaultChannelPromise writePromise = new DefaultChannelPromise(mockChannel, GROUP.next()); + writePromise.setFailure(new IOException("boom")); + when(mockChannel.writeAndFlush(anyObject())).thenReturn(writePromise); + + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + handler.handlerAdded(mockCtx); + + assertThat(promise.awaitUninterruptibly().isSuccess()).isFalse(); + } + + @Test + public void channelInactive_shouldFailPromise() throws Exception { + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + SslCloseCompletionEvent event = new SslCloseCompletionEvent(new RuntimeException("")); + handler.channelInactive(mockCtx); + + assertThat(promise.awaitUninterruptibly().isSuccess()).isFalse(); + verify(mockCtx).close(); + } + + @Test + public void unexpectedExceptionThrown_shouldFailPromise() throws Exception { + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + handler.exceptionCaught(mockCtx, new RuntimeException("exception")); + + assertThat(promise.awaitUninterruptibly().isSuccess()).isFalse(); + verify(mockCtx).close(); + } + + @Test + public void handlerRemoved_removesCodec() { + HttpClientCodec codec = new HttpClientCodec(); + when(mockPipeline.get(eq(HttpClientCodec.class))).thenReturn(codec); + + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + + handler.handlerRemoved(mockCtx); + + verify(mockPipeline).remove(eq(HttpClientCodec.class)); + } + + @Test + public void handledAdded_writesRequest() { + Promise promise = GROUP.next().newPromise(); + ProxyTunnelInitHandler handler = new ProxyTunnelInitHandler(mockChannelPool, REMOTE_HOST, promise); + handler.handlerAdded(mockCtx); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(HttpRequest.class); + verify(mockChannel).writeAndFlush(requestCaptor.capture()); + + String uri = REMOTE_HOST.getHost() + ":" + REMOTE_HOST.getPort(); + HttpRequest expectedRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.CONNECT, uri, + Unpooled.EMPTY_BUFFER, false); + expectedRequest.headers().add(HttpHeaderNames.HOST, uri); + + assertThat(requestCaptor.getValue()).isEqualTo(expectedRequest); + } + + private void successResponse(ProxyTunnelInitHandler handler) { + DefaultHttpResponse resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + handler.channelRead(mockCtx, resp); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java index 58d9f8f41062..62a639c727d2 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,27 +20,23 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.EXECUTE_FUTURE_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.REQUEST_CONTEXT_KEY; -import com.typesafe.netty.http.DefaultStreamedHttpResponse; -import com.typesafe.netty.http.StreamedHttpResponse; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.EmptyByteBuf; +import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; -import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.DefaultHttpContent; import io.netty.handler.codec.http.DefaultHttpResponse; import io.netty.handler.codec.http.EmptyHttpHeaders; import io.netty.handler.codec.http.HttpContent; -import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpVersion; -import io.netty.util.AttributeKey; import io.reactivex.Flowable; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; @@ -49,11 +45,14 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; +import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.nio.netty.internal.nrs.DefaultStreamedHttpResponse; +import software.amazon.awssdk.http.nio.netty.internal.nrs.StreamedHttpResponse; @RunWith(MockitoJUnitRunner.class) public class PublisherAdapterTest { @@ -64,7 +63,7 @@ public class PublisherAdapterTest { private MockChannel channel; @Mock - private ChannelPool channelPool; + private SdkChannelPool channelPool; @Mock private EventLoopGroup eventLoopGroup; @@ -159,6 +158,75 @@ public void errorOccurred_shouldInvokeResponseHandler() { verify(responseHandler).onError(exception); } + @Test + public void subscriptionCancelled_upstreamPublisherCallsOnNext_httpContentReleased() { + HttpContent firstContent = mock(HttpContent.class); + when(firstContent.content()).thenReturn(Unpooled.EMPTY_BUFFER); + + HttpContent[] contentToIgnore = new HttpContent[8]; + for (int i = 0; i < contentToIgnore.length; ++i) { + contentToIgnore[i] = mock(HttpContent.class); + when(contentToIgnore[i].content()).thenReturn(Unpooled.EMPTY_BUFFER); + } + + Publisher publisher = subscriber -> subscriber.onSubscribe(new Subscription() { + @Override + public void request(long l) { + // We ignore any cancel signal and just publish all the content + subscriber.onNext(firstContent); + + for (int i = 0; i < l && i < contentToIgnore.length; ++i) { + subscriber.onNext(contentToIgnore[i]); + } + } + + @Override + public void cancel() { + // no-op + } + }); + + DefaultStreamedHttpResponse streamedResponse = new DefaultStreamedHttpResponse(HttpVersion.HTTP_1_1, + HttpResponseStatus.OK, publisher); + + Subscriber subscriber = new Subscriber() { + private Subscription subscription; + + @Override + public void onSubscribe(Subscription subscription) { + this.subscription = subscription; + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + subscription.cancel(); + } + + @Override + public void onError(Throwable throwable) { + } + + @Override + public void onComplete() { + } + }; + + ResponseHandler.PublisherAdapter publisherAdapter = new ResponseHandler.PublisherAdapter(streamedResponse, ctx, + requestContext, executeFuture); + + publisherAdapter.subscribe(subscriber); + + // First one should be accessed as normal + verify(firstContent).content(); + verify(firstContent).release(); + + for (int i = 0; i < contentToIgnore.length; ++i) { + verify(contentToIgnore[i]).release(); + verifyNoMoreInteractions(contentToIgnore[i]); + } + } + static final class TestSubscriber implements Subscriber { private Subscription subscription; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/RequestAdapterTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/RequestAdapterTest.java new file mode 100644 index 000000000000..31e2aca3c9e7 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/RequestAdapterTest.java @@ -0,0 +1,196 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil; +import java.net.URI; +import java.util.List; +import org.junit.Test; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; + +public class RequestAdapterTest { + + private final RequestAdapter h1Adapter = new RequestAdapter(Protocol.HTTP1_1); + private final RequestAdapter h2Adapter = new RequestAdapter(Protocol.HTTP2); + + @Test + public void adapt_h1Request_requestIsCorrect() { + SdkHttpRequest request = SdkHttpRequest.builder() + .uri(URI.create("http://localhost:12345/foo/bar/baz")) + .putRawQueryParameter("foo", "bar") + .putRawQueryParameter("bar", "baz") + .putHeader("header1", "header1val") + .putHeader("header2", "header2val") + .method(SdkHttpMethod.GET) + .build(); + + HttpRequest adapted = h1Adapter.adapt(request); + + assertThat(adapted.method()).isEqualTo(HttpMethod.valueOf("GET")); + assertThat(adapted.uri()).isEqualTo("/foo/bar/baz?foo=bar&bar=baz"); + assertThat(adapted.protocolVersion()).isEqualTo(HttpVersion.HTTP_1_1); + assertThat(adapted.headers().getAll("Host")).containsExactly("localhost:12345"); + assertThat(adapted.headers().getAll("header1")).containsExactly("header1val"); + assertThat(adapted.headers().getAll("header2")).containsExactly("header2val"); + } + + @Test + public void adapt_h2Request_addsSchemeExtension() { + SdkHttpRequest request = SdkHttpRequest.builder() + .uri(URI.create("http://localhost:12345/foo/bar/baz")) + .putRawQueryParameter("foo", "bar") + .putRawQueryParameter("bar", "baz") + .putHeader("header1", "header1val") + .putHeader("header2", "header2val") + .method(SdkHttpMethod.GET) + .build(); + + HttpRequest adapted = h2Adapter.adapt(request); + + assertThat(adapted.headers().getAll(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text())).containsExactly("http"); + } + + @Test + public void adapt_noPathContainsQueryParams() { + SdkHttpRequest request = SdkHttpRequest.builder() + .host("localhost:12345") + .protocol("http") + .putRawQueryParameter("foo", "bar") + .putRawQueryParameter("bar", "baz") + .putHeader("header1", "header1val") + .putHeader("header2", "header2val") + .method(SdkHttpMethod.GET) + .build(); + + HttpRequest adapted = h1Adapter.adapt(request); + + assertThat(adapted.method()).isEqualTo(HttpMethod.valueOf("GET")); + assertThat(adapted.uri()).isEqualTo("/?foo=bar&bar=baz"); + assertThat(adapted.protocolVersion()).isEqualTo(HttpVersion.HTTP_1_1); + assertThat(adapted.headers().getAll("Host")).containsExactly("localhost:12345"); + } + + @Test + public void adapt_hostHeaderSet() { + SdkHttpRequest sdkRequest = SdkHttpRequest.builder() + .uri(URI.create("http://localhost:12345/")) + .method(SdkHttpMethod.HEAD) + .build(); + HttpRequest result = h1Adapter.adapt(sdkRequest); + List hostHeaders = result.headers() + .getAll(HttpHeaderNames.HOST.toString()); + assertThat(hostHeaders).containsExactly("localhost:12345"); + } + + @Test + public void adapt_standardHttpsPort_omittedInHeader() { + SdkHttpRequest sdkRequest = SdkHttpRequest.builder() + .uri(URI.create("https://localhost:443/")) + .method(SdkHttpMethod.HEAD) + .build(); + HttpRequest result = h1Adapter.adapt(sdkRequest); + List hostHeaders = result.headers() + .getAll(HttpHeaderNames.HOST.toString()); + assertThat(hostHeaders).containsExactly("localhost"); + } + + @Test + public void adapt_containsQueryParamsRequiringEncoding() { + SdkHttpRequest request = SdkHttpRequest.builder() + .uri(URI.create("http://localhost:12345")) + .putRawQueryParameter("java", "☕") + .putRawQueryParameter("python", "\uD83D\uDC0D") + .method(SdkHttpMethod.GET) + .build(); + + HttpRequest adapted = h1Adapter.adapt(request); + + assertThat(adapted.uri()).isEqualTo("/?java=%E2%98%95&python=%F0%9F%90%8D"); + } + + @Test + public void adapt_pathEmpty_setToRoot() { + SdkHttpRequest request = SdkHttpRequest.builder() + .uri(URI.create("http://localhost:12345")) + .method(SdkHttpMethod.GET) + .build(); + + HttpRequest adapted = h1Adapter.adapt(request); + + assertThat(adapted.uri()).isEqualTo("/"); + } + + @Test + public void adapt_defaultPortUsed() { + SdkHttpRequest sdkRequest = SdkHttpRequest.builder() + .uri(URI.create("http://localhost:80/")) + .method(SdkHttpMethod.HEAD) + .build(); + HttpRequest result = h1Adapter.adapt(sdkRequest); + List hostHeaders = result.headers() + .getAll(HttpHeaderNames.HOST.toString()); + assertNotNull(hostHeaders); + assertEquals(1, hostHeaders.size()); + assertEquals("localhost", hostHeaders.get(0)); + + sdkRequest = SdkHttpRequest.builder() + .uri(URI.create("https://localhost:443/")) + .method(SdkHttpMethod.HEAD) + .build(); + result = h1Adapter.adapt(sdkRequest); + hostHeaders = result.headers() + .getAll(HttpHeaderNames.HOST.toString()); + assertNotNull(hostHeaders); + assertEquals(1, hostHeaders.size()); + assertEquals("localhost", hostHeaders.get(0)); + } + + @Test + public void adapt_nonStandardHttpPort() { + SdkHttpRequest sdkRequest = SdkHttpRequest.builder() + .uri(URI.create("http://localhost:8080/")) + .method(SdkHttpMethod.HEAD) + .build(); + HttpRequest result = h1Adapter.adapt(sdkRequest); + List hostHeaders = result.headers() + .getAll(HttpHeaderNames.HOST.toString()); + + assertThat(hostHeaders).containsExactly("localhost:8080"); + } + + @Test + public void adapt_nonStandardHttpsPort() { + SdkHttpRequest sdkRequest = SdkHttpRequest.builder() + .uri(URI.create("https://localhost:8443/")) + .method(SdkHttpMethod.HEAD) + .build(); + HttpRequest result = h1Adapter.adapt(sdkRequest); + List hostHeaders = result.headers() + .getAll(HttpHeaderNames.HOST.toString()); + + assertThat(hostHeaders).containsExactly("localhost:8443"); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ResponseCompletionTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ResponseCompletionTest.java new file mode 100644 index 000000000000..56601cf8bdb3 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ResponseCompletionTest.java @@ -0,0 +1,190 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION; +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; +import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE; +import static io.netty.handler.codec.http.HttpHeaderValues.TEXT_PLAIN; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.SelfSignedCertificate; +import io.reactivex.Flowable; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.After; +import org.junit.Test; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; +import software.amazon.awssdk.utils.AttributeMap; + + +public class ResponseCompletionTest { + private SdkAsyncHttpClient netty; + private Server server; + + @After + public void teardown() throws InterruptedException { + if (server != null) { + server.shutdown(); + } + server = null; + + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void connectionCloseAfterResponse_shouldNotReuseConnection() throws Exception { + server = new Server(); + server.init(); + + netty = NettyNioAsyncHttpClient.builder() + .eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(2).build()) + .protocol(Protocol.HTTP1_1) + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + + sendGetRequest().join(); + sendGetRequest().join(); + + assertThat(server.channels.size()).isEqualTo(2); + } + + private CompletableFuture sendGetRequest() { + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .responseHandler(new SdkAsyncHttpResponseHandler() { + private SdkHttpResponse headers; + + @Override + public void onHeaders(SdkHttpResponse headers) { + this.headers = headers; + } + + @Override + public void onStream(Publisher stream) { + Flowable.fromPublisher(stream).forEach(b -> { + }); + } + + @Override + public void onError(Throwable error) { + } + }) + .request(SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("https") + .host("localhost") + .port(server.port()) + .build()) + .requestContentPublisher(new EmptyPublisher()) + .build(); + + return netty.execute(req); + } + + + private static class Server extends ChannelInitializer { + private static final byte[] CONTENT = RandomStringUtils.randomAscii(7000).getBytes(); + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private List channels = new ArrayList<>(); + private final NioEventLoopGroup group = new NioEventLoopGroup(); + private SslContext sslCtx; + + public void init() throws Exception { + SelfSignedCertificate ssc = new SelfSignedCertificate(); + sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); + + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .handler(new LoggingHandler(LogLevel.DEBUG)) + .group(group) + .childHandler(this); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + channels.add(ch); + ChannelPipeline pipeline = ch.pipeline(); + pipeline.addLast(sslCtx.newHandler(ch.alloc())); + pipeline.addLast(new HttpServerCodec()); + pipeline.addLast(new AlwaysCloseConnectionChannelHandler()); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + + private static class AlwaysCloseConnectionChannelHandler extends ChannelDuplexHandler { + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (msg instanceof HttpRequest) { + FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, OK, + Unpooled.wrappedBuffer(CONTENT)); + + response.headers() + .set(CONTENT_TYPE, TEXT_PLAIN) + .set(CONNECTION, CLOSE) + .setInt(CONTENT_LENGTH, response.content().readableBytes()); + ctx.writeAndFlush(response).addListener(i -> ctx.channel().close()); + } + } + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelOptionsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelOptionsTest.java index 28f2b8c58d00..49fbf116b674 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelOptionsTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelOptionsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SharedSdkEventLoopGroupTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SharedSdkEventLoopGroupTest.java index 62d58084394d..755630f1ee31 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SharedSdkEventLoopGroupTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SharedSdkEventLoopGroupTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SslCloseCompletionEventHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SslCloseCompletionEventHandlerTest.java index cb9d27620096..1e1dcea1c6a9 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SslCloseCompletionEventHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SslCloseCompletionEventHandlerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SslContextProviderTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SslContextProviderTest.java new file mode 100644 index 000000000000..6f8954261fd7 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/SslContextProviderTest.java @@ -0,0 +1,110 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TLS_KEY_MANAGERS_PROVIDER; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TLS_TRUST_MANAGERS_PROVIDER; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +import io.netty.handler.codec.http2.Http2SecurityUtil; +import io.netty.handler.ssl.SslProvider; +import javax.net.ssl.TrustManager; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; +import software.amazon.awssdk.http.TlsKeyManagersProvider; +import software.amazon.awssdk.http.TlsTrustManagersProvider; +import software.amazon.awssdk.utils.AttributeMap; + +public class SslContextProviderTest { + + @Test + public void sslContext_h2WithJdk_h2CiphersShouldBeUsed() { + SslContextProvider sslContextProvider = new SslContextProvider(new NettyConfiguration(SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS), + Protocol.HTTP2, + SslProvider.JDK); + + assertThat(sslContextProvider.sslContext().cipherSuites()).isSubsetOf(Http2SecurityUtil.CIPHERS); + } + + @Test + public void sslContext_h2WithOpenSsl_h2CiphersShouldBeUsed() { + SslContextProvider sslContextProvider = new SslContextProvider(new NettyConfiguration(SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS), + Protocol.HTTP2, + SslProvider.OPENSSL); + + assertThat(sslContextProvider.sslContext().cipherSuites()).isSubsetOf(Http2SecurityUtil.CIPHERS); + } + + @Test + public void sslContext_h1_defaultCipherShouldBeUsed() { + SslContextProvider sslContextProvider = new SslContextProvider(new NettyConfiguration(SdkHttpConfigurationOption.GLOBAL_HTTP_DEFAULTS), + Protocol.HTTP1_1, + SslProvider.JDK); + + assertThat(sslContextProvider.sslContext().cipherSuites()).isNotIn(Http2SecurityUtil.CIPHERS); + } + + @Test + public void customizedKeyManagerPresent_shouldUseCustomized() { + TlsKeyManagersProvider mockProvider = Mockito.mock(TlsKeyManagersProvider.class); + SslContextProvider sslContextProvider = new SslContextProvider(new NettyConfiguration(AttributeMap.builder() + .put(TRUST_ALL_CERTIFICATES, false) + .put(TLS_KEY_MANAGERS_PROVIDER, mockProvider) + .build()), + Protocol.HTTP1_1, + SslProvider.JDK); + + sslContextProvider.sslContext(); + Mockito.verify(mockProvider).keyManagers(); + } + + @Test + public void customizedTrustManagerPresent_shouldUseCustomized() { + TlsTrustManagersProvider mockProvider = Mockito.mock(TlsTrustManagersProvider.class); + TrustManager mockTrustManager = Mockito.mock(TrustManager.class); + Mockito.when(mockProvider.trustManagers()).thenReturn(new TrustManager[] {mockTrustManager}); + SslContextProvider sslContextProvider = new SslContextProvider(new NettyConfiguration(AttributeMap.builder() + .put(TRUST_ALL_CERTIFICATES, false) + .put(TLS_TRUST_MANAGERS_PROVIDER, mockProvider) + .build()), + Protocol.HTTP1_1, + SslProvider.JDK); + + sslContextProvider.sslContext(); + Mockito.verify(mockProvider).trustManagers(); + } + + @Test + public void TlsTrustManagerAndTrustAllCertificates_shouldThrowException() { + TlsTrustManagersProvider mockProvider = Mockito.mock(TlsTrustManagersProvider.class); + assertThatThrownBy(() -> new SslContextProvider(new NettyConfiguration(AttributeMap.builder() + .put(TRUST_ALL_CERTIFICATES, true) + .put(TLS_TRUST_MANAGERS_PROVIDER, + mockProvider) + .build()), + Protocol.HTTP1_1, + SslProvider.JDK)).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("A TlsTrustManagerProvider can't" + + " be provided if " + + "TrustAllCertificates is also " + + "set"); + + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactorySpiTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactorySpiTest.java new file mode 100644 index 000000000000..58ef2ec6307e --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactorySpiTest.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import java.util.Arrays; +import java.util.stream.IntStream; +import javax.net.ssl.KeyManager; +import org.junit.Test; + +/** + * Tests for {@link StaticKeyManagerFactorySpi}. + */ +public class StaticKeyManagerFactorySpiTest { + + @Test(expected = NullPointerException.class) + public void nullListInConstructor_throws() { + new StaticKeyManagerFactorySpi(null); + } + + @Test + public void constructorCreatesArrayCopy() { + KeyManager[] keyManagers = IntStream.range(0,8) + .mapToObj(i -> mock(KeyManager.class)) + .toArray(KeyManager[]::new); + + KeyManager[] arg = Arrays.copyOf(keyManagers, keyManagers.length); + StaticKeyManagerFactorySpi spi = new StaticKeyManagerFactorySpi(arg); + for (int i = 0; i < keyManagers.length; ++i) { + arg[i] = null; + } + + assertThat(spi.engineGetKeyManagers()).containsExactly(keyManagers); + } + + @Test + public void engineGetKeyManagers_returnsProvidedList() { + KeyManager[] keyManagers = IntStream.range(0,8) + .mapToObj(i -> mock(KeyManager.class)) + .toArray(KeyManager[]::new); + + StaticKeyManagerFactorySpi spi = new StaticKeyManagerFactorySpi(keyManagers); + + assertThat(spi.engineGetKeyManagers()).containsExactly(keyManagers); + } + + @Test(expected = UnsupportedOperationException.class) + public void engineInit_storeAndPasswords_throws() { + StaticKeyManagerFactorySpi staticKeyManagerFactorySpi = new StaticKeyManagerFactorySpi(new KeyManager[0]); + staticKeyManagerFactorySpi.engineInit(null, null); + } + + @Test(expected = UnsupportedOperationException.class) + public void engineInit_spec_throws() { + StaticKeyManagerFactorySpi staticKeyManagerFactorySpi = new StaticKeyManagerFactorySpi(new KeyManager[0]); + staticKeyManagerFactorySpi.engineInit(null); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactoryTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactoryTest.java new file mode 100644 index 000000000000..4e79ad583d5e --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/StaticKeyManagerFactoryTest.java @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import java.util.stream.IntStream; +import javax.net.ssl.KeyManager; +import org.junit.Test; + +/** + * Tests for {@link StaticKeyManagerFactory}. + */ +public class StaticKeyManagerFactoryTest { + + @Test + public void createReturnFactoryWithCorrectKeyManagers() { + KeyManager[] keyManagers = IntStream.range(0,8) + .mapToObj(i -> mock(KeyManager.class)) + .toArray(KeyManager[]::new); + + StaticKeyManagerFactory staticKeyManagerFactory = StaticKeyManagerFactory.create(keyManagers); + + assertThat(staticKeyManagerFactory.getKeyManagers()) + .containsExactly(keyManagers); + } + +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/UnusedChannelExceptionHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/UnusedChannelExceptionHandlerTest.java index 1582af54cd97..3b87946e6c22 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/UnusedChannelExceptionHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/UnusedChannelExceptionHandlerTest.java @@ -1,3 +1,18 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + package software.amazon.awssdk.http.nio.netty.internal; import static org.assertj.core.api.Assertions.assertThat; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/FlushOnReadTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/FlushOnReadTest.java new file mode 100644 index 000000000000..aa7b2f545cc8 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/FlushOnReadTest.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class FlushOnReadTest { + + @Mock + private ChannelHandlerContext mockCtx; + + @Mock + private Channel mockChannel; + + @Mock + private Channel mockParentChannel; + + @Test + public void read_forwardsReadBeforeParentFlush() { + when(mockCtx.channel()).thenReturn(mockChannel); + when(mockChannel.parent()).thenReturn(mockParentChannel); + + FlushOnReadHandler handler = FlushOnReadHandler.getInstance(); + + handler.read(mockCtx); + + InOrder inOrder = Mockito.inOrder(mockCtx, mockParentChannel); + + inOrder.verify(mockCtx).read(); + inOrder.verify(mockParentChannel).flush(); + } + + @Test + public void getInstance_returnsSingleton() { + assertThat(FlushOnReadHandler.getInstance() == FlushOnReadHandler.getInstance()).isTrue(); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2GoAwayEventListenerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2GoAwayEventListenerTest.java new file mode 100644 index 000000000000..d642649dbea2 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2GoAwayEventListenerTest.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.handler.codec.http2.DefaultHttp2GoAwayFrame; +import io.netty.util.Attribute; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; + +public class Http2GoAwayEventListenerTest { + private ChannelHandlerContext ctx; + private Channel channel; + private ChannelPipeline channelPipeline; + private Attribute attribute; + + @Before + public void setup() { + this.ctx = mock(ChannelHandlerContext.class); + this.channel = mock(Channel.class); + this.channelPipeline = mock(ChannelPipeline.class); + this.attribute = mock(Attribute.class); + + when(ctx.channel()).thenReturn(channel); + when(channel.pipeline()).thenReturn(channelPipeline); + when(channel.attr(ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL)).thenReturn(attribute); + } + + @Test + public void goAwayWithNoChannelPoolRecordRaisesNoExceptions() throws Exception { + when(attribute.get()).thenReturn(null); + new Http2GoAwayEventListener(channel).onGoAwayReceived(0, 0, Unpooled.EMPTY_BUFFER); + verify(channelPipeline).fireExceptionCaught(isA(GoAwayException.class)); + } + + @Test + public void goAwayWithChannelPoolRecordPassesAlongTheFrame() throws Exception { + Http2MultiplexedChannelPool record = mock(Http2MultiplexedChannelPool.class); + when(attribute.get()).thenReturn(record); + new Http2GoAwayEventListener(channel).onGoAwayReceived(0, 0, Unpooled.EMPTY_BUFFER); + verify(record).handleGoAway(eq(channel), eq(0), isA(GoAwayException.class)); + verifyNoMoreInteractions(record); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPoolTest.java index 753988ad5964..fe5ae0948dc1 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPoolTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,24 +15,41 @@ package software.amazon.awssdk.http.nio.netty.internal.http2; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_CONNECTION; +import static software.amazon.awssdk.http.nio.netty.internal.http2.utils.Http2TestUtils.newHttp2Channel; + import io.netty.channel.Channel; import io.netty.channel.EventLoopGroup; +import io.netty.channel.embedded.EmbeddedChannel; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.pool.ChannelPool; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2LocalFlowController; +import io.netty.handler.codec.http2.Http2Stream; import io.netty.util.concurrent.DefaultPromise; +import io.netty.util.concurrent.FailedFuture; +import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.CompletableFuture; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; +import org.mockito.ArgumentCaptor; import org.mockito.InOrder; import org.mockito.Mockito; - -import java.util.Collections; -import java.util.concurrent.CompletableFuture; - -import static org.assertj.core.api.Assertions.assertThat; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; /** * Tests for {@link Http2MultiplexedChannelPool}. @@ -51,29 +68,40 @@ public static void teardown() { } @Test - public void closeWaitsForConnectionToBeReleasedBeforeClosingConnectionPool() throws InterruptedException { + public void failedConnectionAcquireNotifiesPromise() throws InterruptedException { + IOException exception = new IOException(); + ChannelPool connectionPool = mock(ChannelPool.class); + when(connectionPool.acquire()).thenReturn(new FailedFuture<>(loopGroup.next(), exception)); + + ChannelPool pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup.next(), null); + + Future acquirePromise = pool.acquire().await(); + assertThat(acquirePromise.isSuccess()).isFalse(); + assertThat(acquirePromise.cause()).isEqualTo(exception); + } + + @Test + public void releaseParentChannelIfReleasingLastChildChannelOnGoAwayChannel() { SocketChannel channel = new NioSocketChannel(); try { loopGroup.register(channel).awaitUninterruptibly(); - Promise channelPromise = new DefaultPromise<>(loopGroup.next()); - channelPromise.setSuccess(channel); - ChannelPool connectionPool = Mockito.mock(ChannelPool.class); - Promise releasePromise = Mockito.spy(new DefaultPromise<>(loopGroup.next())); - Mockito.doCallRealMethod().when(releasePromise).await(); - releasePromise.setSuccess(null); - Mockito.when(connectionPool.release(Mockito.eq(channel))).thenReturn(releasePromise); + ChannelPool connectionPool = mock(ChannelPool.class); + ArgumentCaptor releasePromise = ArgumentCaptor.forClass(Promise.class); + when(connectionPool.release(eq(channel), releasePromise.capture())).thenAnswer(invocation -> { + Promise promise = releasePromise.getValue(); + promise.setSuccess(null); + return promise; + }); - MultiplexedChannelRecord record = new MultiplexedChannelRecord(channelPromise, - channel, - 8, - (ch, rec) -> {}); - Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup.next(), 2, Collections.singletonList(record)); + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 8, null); + Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup, + Collections.singleton(record), null); h2Pool.close(); - InOrder inOrder = Mockito.inOrder(connectionPool, releasePromise); - inOrder.verify(releasePromise).await(); + InOrder inOrder = Mockito.inOrder(connectionPool); + inOrder.verify(connectionPool).release(eq(channel), isA(Promise.class)); inOrder.verify(connectionPool).close(); } finally { channel.close().awaitUninterruptibly(); @@ -82,13 +110,67 @@ public void closeWaitsForConnectionToBeReleasedBeforeClosingConnectionPool() thr @Test public void acquireAfterCloseFails() throws InterruptedException { - ChannelPool connectionPool = Mockito.mock(ChannelPool.class); - - Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup.next(), 2, Collections.emptyList()); + ChannelPool connectionPool = mock(ChannelPool.class); + Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup.next(), null); h2Pool.close(); - assertThat(h2Pool.acquire().await().isSuccess()).isFalse(); + Future acquireResult = h2Pool.acquire().await(); + assertThat(acquireResult.isSuccess()).isFalse(); + assertThat(acquireResult.cause()).isInstanceOf(IOException.class); + } + + @Test + public void closeWaitsForConnectionToBeReleasedBeforeClosingConnectionPool() { + SocketChannel channel = new NioSocketChannel(); + try { + loopGroup.register(channel).awaitUninterruptibly(); + + ChannelPool connectionPool = mock(ChannelPool.class); + ArgumentCaptor releasePromise = ArgumentCaptor.forClass(Promise.class); + when(connectionPool.release(eq(channel), releasePromise.capture())).thenAnswer(invocation -> { + Promise promise = releasePromise.getValue(); + promise.setSuccess(null); + return promise; + }); + + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 8, null); + Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup, + Collections.singleton(record), null); + + h2Pool.close(); + + InOrder inOrder = Mockito.inOrder(connectionPool); + inOrder.verify(connectionPool).release(eq(channel), isA(Promise.class)); + inOrder.verify(connectionPool).close(); + } finally { + channel.close().awaitUninterruptibly(); + } + } + + @Test + public void acquire_shouldAcquireAgainIfExistingNotReusable() throws Exception { + Channel channel = new EmbeddedChannel(); + + try { + ChannelPool connectionPool = Mockito.mock(ChannelPool.class); + + loopGroup.register(channel).awaitUninterruptibly(); + Promise channelPromise = new DefaultPromise<>(loopGroup.next()); + channelPromise.setSuccess(channel); + + Mockito.when(connectionPool.acquire()).thenReturn(channelPromise); + + Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup, + Collections.emptySet(), null); + + h2Pool.acquire().awaitUninterruptibly(); + h2Pool.acquire().awaitUninterruptibly(); + + Mockito.verify(connectionPool, Mockito.times(2)).acquire(); + } finally { + channel.close(); + } } @Test(timeout = 5_000) @@ -99,17 +181,14 @@ public void interruptDuringClosePreservesFlag() throws InterruptedException { Promise channelPromise = new DefaultPromise<>(loopGroup.next()); channelPromise.setSuccess(channel); - ChannelPool connectionPool = Mockito.mock(ChannelPool.class); + ChannelPool connectionPool = mock(ChannelPool.class); Promise releasePromise = Mockito.spy(new DefaultPromise<>(loopGroup.next())); - Mockito.when(connectionPool.release(Mockito.eq(channel))).thenReturn(releasePromise); + when(connectionPool.release(eq(channel))).thenReturn(releasePromise); - MultiplexedChannelRecord record = new MultiplexedChannelRecord(channelPromise, - channel, - 8, - (ch, rec) -> { - }); - Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup.next(), 2, Collections.singletonList(record)); + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 8, null); + Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup, + Collections.singleton(record), null); CompletableFuture interrupteFlagPreserved = new CompletableFuture<>(); @@ -131,4 +210,141 @@ public void interruptDuringClosePreservesFlag() throws InterruptedException { channel.close().awaitUninterruptibly(); } } + + @Test + public void acquire_shouldExpandConnectionWindowSizeProportionally() { + int maxConcurrentStream = 3; + EmbeddedChannel channel = newHttp2Channel(); + channel.attr(ChannelAttributeKey.MAX_CONCURRENT_STREAMS).set((long) maxConcurrentStream); + + try { + ChannelPool connectionPool = Mockito.mock(ChannelPool.class); + + loopGroup.register(channel).awaitUninterruptibly(); + Promise channelPromise = new DefaultPromise<>(loopGroup.next()); + channelPromise.setSuccess(channel); + + Mockito.when(connectionPool.acquire()).thenReturn(channelPromise); + + Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, loopGroup, + Collections.emptySet(), null); + + Future acquire = h2Pool.acquire(); + acquire.awaitUninterruptibly(); + channel.runPendingTasks(); + + Http2Connection http2Connection = channel.attr(HTTP2_CONNECTION).get(); + Http2LocalFlowController flowController = + http2Connection.local().flowController(); + + System.out.println(flowController.initialWindowSize()); + Http2Stream connectionStream = http2Connection.stream(0); + + // 1_048_576 (initial configured window size), 65535 (configured initial window size) + // (1048576 - 65535) *2 + 65535 = 2031617 + assertThat(flowController.windowSize(connectionStream)).isEqualTo(2031617); + + // 2031617 + 1048576 (configured initial window size) = 3080193 + assertThat(flowController.initialWindowSize(connectionStream)).isEqualTo(3080193); + + // acquire again + h2Pool.acquire().awaitUninterruptibly(); + channel.runPendingTasks(); + + // 3080193 + 1048576 (configured initial window size) = 4128769 + assertThat(flowController.initialWindowSize(connectionStream)).isEqualTo(4128769); + + Mockito.verify(connectionPool, Mockito.times(1)).acquire(); + } finally { + channel.close(); + } + } + + @Test + public void metricsShouldSumAllChildChannels() throws InterruptedException { + int maxConcurrentStream = 2; + EmbeddedChannel channel1 = newHttp2Channel(); + EmbeddedChannel channel2 = newHttp2Channel(); + channel1.attr(ChannelAttributeKey.MAX_CONCURRENT_STREAMS).set((long) maxConcurrentStream); + channel2.attr(ChannelAttributeKey.MAX_CONCURRENT_STREAMS).set((long) maxConcurrentStream); + + try { + ChannelPool connectionPool = Mockito.mock(ChannelPool.class); + + loopGroup.register(channel1).awaitUninterruptibly(); + loopGroup.register(channel2).awaitUninterruptibly(); + Promise channel1Promise = new DefaultPromise<>(loopGroup.next()); + Promise channel2Promise = new DefaultPromise<>(loopGroup.next()); + channel1Promise.setSuccess(channel1); + channel2Promise.setSuccess(channel2); + + Mockito.when(connectionPool.acquire()).thenReturn(channel1Promise, channel2Promise); + + Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, + Http2MultiplexedChannelPoolTest.loopGroup, + Collections.emptySet(), null); + MetricCollection metrics; + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + + doAcquire(channel1, channel2, h2Pool); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(1); + + doAcquire(channel1, channel2, h2Pool); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + + doAcquire(channel1, channel2, h2Pool); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(1); + + Channel lastAcquire = doAcquire(channel1, channel2, h2Pool); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + + lastAcquire.close(); + h2Pool.release(lastAcquire).awaitUninterruptibly(); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(1); + + channel1.close(); + h2Pool.release(channel1); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(1); + + channel2.close(); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + } finally { + channel1.close(); + channel2.close(); + } + } + + private Channel doAcquire(EmbeddedChannel channel1, EmbeddedChannel channel2, Http2MultiplexedChannelPool h2Pool) { + Future acquire = h2Pool.acquire(); + acquire.awaitUninterruptibly(); + runPendingTasks(channel1, channel2); + return acquire.getNow(); + } + + private void runPendingTasks(EmbeddedChannel channel1, EmbeddedChannel channel2) { + channel1.runPendingTasks(); + channel2.runPendingTasks(); + } + + private MetricCollection getMetrics(Http2MultiplexedChannelPool h2Pool) { + MetricCollector metricCollector = MetricCollector.create("test"); + h2Pool.collectChannelPoolMetrics(metricCollector); + return metricCollector.collect(); + } } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2PingHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2PingHandlerTest.java new file mode 100644 index 000000000000..2788337dacba --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2PingHandlerTest.java @@ -0,0 +1,242 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import static java.time.temporal.ChronoUnit.SECONDS; +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http2.DefaultHttp2PingFrame; +import io.netty.handler.codec.http2.Http2PingFrame; +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; + +public class Http2PingHandlerTest { + private static final int FAST_CHECKER_DURATION_MILLIS = 100; + + private Http2PingHandler fastChecker; + private Http2PingHandler slowChecker; + + @Before + public void setup() throws Exception { + this.fastChecker = new Http2PingHandler(FAST_CHECKER_DURATION_MILLIS); + this.slowChecker = new Http2PingHandler(30 * 1_000); + } + + @Test + public void register_withoutProtocol_Fails() { + EmbeddedChannel channel = new EmbeddedChannel(slowChecker); + assertThat(channel.pipeline().get(Http2PingHandler.class)).isNull(); + } + + @Test + public void register_withIncompleteProtocol_doesNotPing() { + EmbeddedChannel channel = createChannelWithoutProtocol(fastChecker); + channel.runPendingTasks(); + + DefaultHttp2PingFrame sentFrame = channel.readOutbound(); + + assertThat(sentFrame).isNull(); + } + + @Test + public void register_withHttp1Protocol_doesNotPing() { + EmbeddedChannel channel = createHttp1Channel(fastChecker); + channel.runPendingTasks(); + + DefaultHttp2PingFrame sentFrame = channel.readOutbound(); + + assertThat(sentFrame).isNull(); + } + + @Test + public void register_WithHttp2Protocol_pingsImmediately() { + EmbeddedChannel channel = createHttp2Channel(slowChecker); + channel.runPendingTasks(); + + DefaultHttp2PingFrame sentFrame = channel.readOutbound(); + + assertThat(sentFrame).isNotNull(); + assertThat(sentFrame.ack()).isFalse(); + } + + @Test + public void unregister_stopsRunning() throws InterruptedException { + EmbeddedChannel channel = createHttp2Channel(fastChecker); + channel.pipeline().remove(Http2PingHandler.class); + + // Flush out any tasks that happened before we closed + channel.runPendingTasks(); + + while (channel.readOutbound() != null) { + // Discard + } + + Thread.sleep(FAST_CHECKER_DURATION_MILLIS); + + DefaultHttp2PingFrame sentFrame = channel.readOutbound(); + + assertThat(sentFrame).isNull(); + } + + @Test + public void ignoredPingsResultInOneChannelException() throws InterruptedException { + PipelineExceptionCatcher catcher = new PipelineExceptionCatcher(); + EmbeddedChannel channel = createHttp2Channel(fastChecker, catcher); + + Thread.sleep(FAST_CHECKER_DURATION_MILLIS); + channel.runPendingTasks(); + + assertThat(catcher.caughtExceptions).hasSize(1); + assertThat(catcher.caughtExceptions.get(0)).isInstanceOf(IOException.class); + } + + @Test + public void respondedToPingsResultInNoAction() { + PipelineExceptionCatcher catcher = new PipelineExceptionCatcher(); + EmbeddedChannel channel = createHttp2Channel(fastChecker, catcher); + + channel.eventLoop().scheduleAtFixedRate(() -> channel.writeInbound(new DefaultHttp2PingFrame(0, true)), + 0, FAST_CHECKER_DURATION_MILLIS, TimeUnit.MILLISECONDS); + + Instant runEnd = Instant.now().plus(1, SECONDS); + while (Instant.now().isBefore(runEnd)) { + channel.runPendingTasks(); + } + + assertThat(catcher.caughtExceptions).isEmpty(); + } + + @Test + public void nonAckPingsResultInOneChannelException() { + PipelineExceptionCatcher catcher = new PipelineExceptionCatcher(); + EmbeddedChannel channel = createHttp2Channel(fastChecker, catcher); + + channel.eventLoop().scheduleAtFixedRate(() -> channel.writeInbound(new DefaultHttp2PingFrame(0, false)), + 0, FAST_CHECKER_DURATION_MILLIS, TimeUnit.MILLISECONDS); + + Instant runEnd = Instant.now().plus(1, SECONDS); + while (Instant.now().isBefore(runEnd)) { + channel.runPendingTasks(); + } + + assertThat(catcher.caughtExceptions).hasSize(1); + assertThat(catcher.caughtExceptions.get(0)).isInstanceOf(IOException.class); + } + + @Test + public void failedWriteResultsInOneChannelException() throws InterruptedException { + PipelineExceptionCatcher catcher = new PipelineExceptionCatcher(); + EmbeddedChannel channel = createHttp2Channel(fastChecker, catcher, new FailingWriter()); + channel.runPendingTasks(); + assertThat(catcher.caughtExceptions).hasSize(1); + assertThat(catcher.caughtExceptions.get(0)).isInstanceOf(IOException.class); + } + + @Test + public void ackPingsAreNotForwardedToOtherHandlers() throws InterruptedException { + PingReadCatcher catcher = new PingReadCatcher(); + EmbeddedChannel channel = createHttp2Channel(fastChecker, catcher); + channel.writeInbound(new DefaultHttp2PingFrame(0, true)); + + channel.runPendingTasks(); + + assertThat(catcher.caughtPings).isEmpty(); + } + + private static EmbeddedChannel createChannelWithoutProtocol(ChannelHandler... handlers) { + EmbeddedChannel channel = new EmbeddedChannel(); + channel.attr(ChannelAttributeKey.PROTOCOL_FUTURE).set(new CompletableFuture<>()); + channel.pipeline().addLast(handlers); + return channel; + } + + private static EmbeddedChannel createHttp1Channel(ChannelHandler... handlers) { + EmbeddedChannel channel = createChannelWithoutProtocol(handlers); + channel.attr(ChannelAttributeKey.PROTOCOL_FUTURE).get().complete(Protocol.HTTP1_1); + return channel; + } + + private static EmbeddedChannel createHttp2Channel(ChannelHandler... handlers) { + EmbeddedChannel channel = createChannelWithoutProtocol(handlers); + channel.attr(ChannelAttributeKey.PROTOCOL_FUTURE).get().complete(Protocol.HTTP2); + return channel; + } + + @Test + public void nonAckPingsAreForwardedToOtherHandlers() throws InterruptedException { + PingReadCatcher catcher = new PingReadCatcher(); + EmbeddedChannel channel = createHttp2Channel(fastChecker, catcher); + channel.writeInbound(new DefaultHttp2PingFrame(0, false)); + + channel.runPendingTasks(); + + assertThat(catcher.caughtPings).hasSize(1); + } + + @Test + public void channelInactive_shouldCancelTaskAndForwardToOtherHandlers() { + EmbeddedChannel channel = createHttp2Channel(fastChecker); + ChannelHandlerContext context = Mockito.mock(ChannelHandlerContext.class); + fastChecker.channelInactive(context); + Mockito.verify(context).fireChannelInactive(); + + channel.writeInbound(new DefaultHttp2PingFrame(0, false)); + assertThat(channel.runScheduledPendingTasks()).isEqualTo(-1L); + } + + private static final class PingReadCatcher extends SimpleChannelInboundHandler { + private final List caughtPings = Collections.synchronizedList(new ArrayList<>()); + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2PingFrame msg) { + caughtPings.add(msg); + } + } + + private static final class PipelineExceptionCatcher extends ChannelInboundHandlerAdapter { + private final List caughtExceptions = Collections.synchronizedList(new ArrayList<>()); + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + caughtExceptions.add(cause); + super.exceptionCaught(ctx, cause); + } + } + + private static final class FailingWriter extends ChannelOutboundHandlerAdapter { + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { + promise.setFailure(new IOException("Failed!")); + } + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2SettingsFrameHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2SettingsFrameHandlerTest.java index 8db3fa23b7c6..fafdf215685c 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2SettingsFrameHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2SettingsFrameHandlerTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.CHANNEL_POOL_RECORD; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.MAX_CONCURRENT_STREAMS; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; @@ -92,7 +92,7 @@ public void channelRead_useClientMaxStreams() { } @Test - public void exceptionCaught_shouldHandleErrorCloseChannel() throws InterruptedException { + public void exceptionCaught_shouldHandleErrorCloseChannel() throws Exception { Throwable cause = new Throwable(new RuntimeException("BOOM")); handler.exceptionCaught(context, cause); verifyChannelError(cause.getClass()); @@ -105,7 +105,7 @@ public void channelUnregistered_ProtocolFutureNotDone_ShouldRaiseError() throws } private void verifyChannelError(Class cause) throws InterruptedException { - channel.attr(CHANNEL_POOL_RECORD).set(null); + channel.attr(HTTP2_MULTIPLEXED_CHANNEL_POOL).set(null); channel.runAllPendingTasks(); diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandlerTest.java new file mode 100644 index 000000000000..ec1519a2b17e --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandlerTest.java @@ -0,0 +1,122 @@ + +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PING_TRACKER; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.handler.timeout.ReadTimeoutException; +import java.io.IOException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.http.nio.netty.internal.MockChannel; + +@RunWith(MockitoJUnitRunner.class) +public class Http2StreamExceptionHandlerTest { + + private static final NioEventLoopGroup GROUP = new NioEventLoopGroup(1); + private Http2StreamExceptionHandler handler; + + @Mock + private ChannelHandlerContext context; + + @Mock + private Channel mockParentChannel; + + private MockChannel embeddedParentChannel; + + @Mock + private Channel streamChannel; + + private TestVerifyExceptionHandler verifyExceptionHandler; + + + @Before + public void setup() throws Exception { + embeddedParentChannel = new MockChannel(); + verifyExceptionHandler = new TestVerifyExceptionHandler(); + embeddedParentChannel.pipeline().addLast(verifyExceptionHandler); + when(context.channel()).thenReturn(streamChannel); + handler = Http2StreamExceptionHandler.create(); + when(context.executor()).thenReturn(GROUP.next()); + } + + + @After + public void tearDown() { + embeddedParentChannel.close().awaitUninterruptibly(); + Mockito.reset(streamChannel, context, mockParentChannel); + } + + @AfterClass + public static void teardown() { + GROUP.shutdownGracefully().awaitUninterruptibly(); + } + + @Test + public void timeoutException_shouldFireExceptionAndPropagateException() { + when(streamChannel.parent()).thenReturn(embeddedParentChannel); + handler.exceptionCaught(context, ReadTimeoutException.INSTANCE); + + assertThat(verifyExceptionHandler.exceptionCaught).isExactlyInstanceOf(Http2ConnectionTerminatingException.class); + verify(context).fireExceptionCaught(ReadTimeoutException.INSTANCE); + } + + @Test + public void ioException_shouldFireExceptionAndPropagateException() { + IOException ioException = new IOException("yolo"); + when(streamChannel.parent()).thenReturn(embeddedParentChannel); + handler.exceptionCaught(context, ioException); + + assertThat(verifyExceptionHandler.exceptionCaught).isExactlyInstanceOf(Http2ConnectionTerminatingException.class); + verify(context).fireExceptionCaught(ioException); + } + + @Test + public void otherException_shouldJustPropagateException() { + when(streamChannel.parent()).thenReturn(embeddedParentChannel); + + RuntimeException otherException = new RuntimeException("test"); + handler.exceptionCaught(context, otherException); + + assertThat(embeddedParentChannel.attr(PING_TRACKER).get()).isNull(); + + verify(context).fireExceptionCaught(otherException); + assertThat(verifyExceptionHandler.exceptionCaught).isNull(); + } + + private static final class TestVerifyExceptionHandler extends ChannelInboundHandlerAdapter { + private Throwable exceptionCaught; + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + exceptionCaught = cause; + } + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPoolTest.java index a52ddb8a1922..170bce1e17f6 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPoolTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,6 +15,15 @@ package software.amazon.awssdk.http.nio.netty.internal.http2; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.CONNECTION_ACQUIRE_TIMEOUT; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.MAX_PENDING_CONNECTION_ACQUIRES; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.REAP_IDLE_CONNECTIONS; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; + import io.netty.channel.Channel; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; @@ -22,6 +31,8 @@ import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -29,21 +40,14 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.MockChannel; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.AttributeMap; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.CONNECTION_ACQUIRE_TIMEOUT; -import static software.amazon.awssdk.http.SdkHttpConfigurationOption.MAX_PENDING_CONNECTION_ACQUIRES; -import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; - /** * Tests for {@link HttpOrHttp2ChannelPool}. */ @@ -74,6 +78,7 @@ public void methodSetup() { new NettyConfiguration(AttributeMap.builder() .put(CONNECTION_ACQUIRE_TIMEOUT, Duration.ofSeconds(1)) .put(MAX_PENDING_CONNECTION_ACQUIRES, 5) + .put(REAP_IDLE_CONNECTIONS, false) .build())); } @@ -82,6 +87,34 @@ public void protocolConfigNotStarted_closeSucceeds() { httpOrHttp2ChannelPool.close(); } + @Test(timeout = 5_000) + public void invalidProtocolConfig_shouldFailPromise() throws Exception { + HttpOrHttp2ChannelPool invalidChannelPool = new HttpOrHttp2ChannelPool(mockDelegatePool, + eventLoopGroup, + 4, + new NettyConfiguration(AttributeMap.builder() + .put(CONNECTION_ACQUIRE_TIMEOUT, Duration.ofSeconds(1)) + .put(MAX_PENDING_CONNECTION_ACQUIRES, 0) + .build())); + + Promise acquirePromise = eventLoopGroup.next().newPromise(); + when(mockDelegatePool.acquire()).thenReturn(acquirePromise); + + Thread.sleep(500); + + Channel channel = new MockChannel(); + eventLoopGroup.register(channel); + + channel.attr(PROTOCOL_FUTURE).set(CompletableFuture.completedFuture(Protocol.HTTP1_1)); + + acquirePromise.setSuccess(channel); + + Future p = invalidChannelPool.acquire(); + assertThat(p.await().cause().getMessage()).contains("maxPendingAcquires: 0 (expected: >= 1)"); + verify(mockDelegatePool).release(channel); + assertThat(channel.isOpen()).isFalse(); + } + @Test public void protocolConfigNotStarted_closeClosesDelegatePool() throws InterruptedException { httpOrHttp2ChannelPool.close(); @@ -174,4 +207,64 @@ public void protocolConfigComplete_poolClosed_closesDelegatePool() throws Interr channel.close(); } } + + @Test(timeout = 5_000) + public void incompleteProtocolFutureDelaysMetricsDelegationAndForwardsFailures() throws InterruptedException { + Promise acquirePromise = eventLoopGroup.next().newPromise(); + when(mockDelegatePool.acquire()).thenReturn(acquirePromise); + + // startConnection + httpOrHttp2ChannelPool.acquire(); + + // query for metrics before the config can complete (we haven't completed acquirePromise yet) + CompletableFuture metrics = httpOrHttp2ChannelPool.collectChannelPoolMetrics(MetricCollector.create("test")); + + Thread.sleep(500); + + assertThat(metrics.isDone()).isFalse(); + acquirePromise.setFailure(new RuntimeException("Some failure")); + + Thread.sleep(500); + + assertThat(metrics.isCompletedExceptionally()).isTrue(); + } + + @Test(timeout = 5_000) + public void incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForHttp1() throws Exception { + incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForProtocol(Protocol.HTTP1_1); + } + + @Test(timeout = 5_000) + public void incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForHttp2() throws Exception { + incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForProtocol(Protocol.HTTP2); + } + + public void incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForProtocol(Protocol protocol) throws Exception { + Promise acquirePromise = eventLoopGroup.next().newPromise(); + when(mockDelegatePool.acquire()).thenReturn(acquirePromise); + + // startConnection + httpOrHttp2ChannelPool.acquire(); + + // query for metrics before the config can complete (we haven't completed acquirePromise yet) + MetricCollector metricCollector = MetricCollector.create("foo"); + CompletableFuture metricsFuture = httpOrHttp2ChannelPool.collectChannelPoolMetrics(metricCollector); + + Thread.sleep(500); + + assertThat(metricsFuture.isDone()).isFalse(); + + Channel channel = new MockChannel(); + eventLoopGroup.register(channel); + channel.attr(PROTOCOL_FUTURE).set(CompletableFuture.completedFuture(protocol)); + acquirePromise.setSuccess(channel); + + metricsFuture.join(); + MetricCollection metrics = metricCollector.collect(); + + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES).get(0)).isEqualTo(0); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY).get(0)).isEqualTo(4); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY).get(0)).isBetween(0, 1); + } } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpToHttp2OutboundAdapterTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpToHttp2OutboundAdapterTest.java new file mode 100644 index 000000000000..7e75717465f4 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpToHttp2OutboundAdapterTest.java @@ -0,0 +1,84 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.channel.DefaultChannelPromise; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil; +import java.util.List; +import org.junit.AfterClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class HttpToHttp2OutboundAdapterTest { + private static final NioEventLoopGroup EVENT_LOOP_GROUP = new NioEventLoopGroup(1); + + @Mock + public ChannelHandlerContext ctx; + + @Mock + public Channel channel; + + @AfterClass + public static void classTeardown() { + EVENT_LOOP_GROUP.shutdownGracefully(); + } + + @Test + public void aggregatesWritePromises() { + when(ctx.executor()).thenReturn(EVENT_LOOP_GROUP.next()); + when(ctx.channel()).thenReturn(channel); + + HttpToHttp2OutboundAdapter adapter = new HttpToHttp2OutboundAdapter(); + ChannelPromise writePromise = new DefaultChannelPromise(channel, EVENT_LOOP_GROUP.next()); + + writeRequest(adapter, writePromise); + + ArgumentCaptor writePromiseCaptor = ArgumentCaptor.forClass(ChannelPromise.class); + verify(ctx, atLeastOnce()).write(any(Object.class), writePromiseCaptor.capture()); + + List writePromises = writePromiseCaptor.getAllValues(); + + assertThat(writePromise.isDone()).isFalse(); + + writePromises.forEach(ChannelPromise::setSuccess); + + assertThat(writePromise.isDone()).isTrue(); + } + + private void writeRequest(HttpToHttp2OutboundAdapter adapter, ChannelPromise promise) { + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "/", Unpooled.wrappedBuffer(new byte[16])); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + adapter.write(ctx, request, promise); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecordTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecordTest.java new file mode 100644 index 000000000000..28a54eefc024 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecordTest.java @@ -0,0 +1,259 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.util.concurrent.DefaultPromise; +import io.netty.util.concurrent.Promise; +import java.io.IOException; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; +import software.amazon.awssdk.http.nio.netty.internal.MockChannel; +import software.amazon.awssdk.http.nio.netty.internal.UnusedChannelExceptionHandler; + +public class MultiplexedChannelRecordTest { + private EventLoopGroup loopGroup; + private MockChannel channel; + + @Before + public void setup() throws Exception { + loopGroup = new NioEventLoopGroup(4); + channel = new MockChannel(); + } + + @After + public void teardown() { + loopGroup.shutdownGracefully().awaitUninterruptibly(); + channel.close(); + } + + @Test + public void nullIdleTimeoutSeemsToDisableReaping() throws InterruptedException { + EmbeddedChannel channel = newHttp2Channel(); + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 1, null); + + Promise streamPromise = channel.eventLoop().newPromise(); + record.acquireStream(streamPromise); + + channel.runPendingTasks(); + + assertThat(streamPromise.isSuccess()).isTrue(); + assertThat(channel.isOpen()).isTrue(); + + record.closeAndReleaseChild(streamPromise.getNow()); + + assertThat(channel.isOpen()).isTrue(); + + Thread.sleep(1_000); + channel.runPendingTasks(); + + assertThat(channel.isOpen()).isTrue(); + } + + @Test + public void recordsWithoutReservedStreamsAreClosedAfterTimeout() throws InterruptedException { + int idleTimeoutMillis = 1000; + EmbeddedChannel channel = newHttp2Channel(); + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 1, Duration.ofMillis(idleTimeoutMillis)); + + Promise streamPromise = channel.eventLoop().newPromise(); + record.acquireStream(streamPromise); + + channel.runPendingTasks(); + + assertThat(streamPromise.isSuccess()).isTrue(); + assertThat(channel.isOpen()).isTrue(); + + record.closeAndReleaseChild(streamPromise.getNow()); + + assertThat(channel.isOpen()).isTrue(); + + Thread.sleep(idleTimeoutMillis * 2); + channel.runPendingTasks(); + + assertThat(channel.isOpen()).isFalse(); + } + + @Test + public void recordsWithReservedStreamsAreNotClosedAfterTimeout() throws InterruptedException { + int idleTimeoutMillis = 1000; + EmbeddedChannel channel = newHttp2Channel(); + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 2, Duration.ofMillis(idleTimeoutMillis)); + + Promise streamPromise = channel.eventLoop().newPromise(); + Promise streamPromise2 = channel.eventLoop().newPromise(); + record.acquireStream(streamPromise); + record.acquireStream(streamPromise2); + + channel.runPendingTasks(); + + assertThat(streamPromise.isSuccess()).isTrue(); + assertThat(streamPromise2.isSuccess()).isTrue(); + assertThat(channel.isOpen()).isTrue(); + + record.closeAndReleaseChild(streamPromise.getNow()); + + assertThat(channel.isOpen()).isTrue(); + + Thread.sleep(idleTimeoutMillis * 2); + channel.runPendingTasks(); + + assertThat(channel.isOpen()).isTrue(); + } + + @Test + public void acquireRequestResetsCloseTimer() throws InterruptedException { + int idleTimeoutMillis = 1000; + EmbeddedChannel channel = newHttp2Channel(); + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 2, Duration.ofMillis(idleTimeoutMillis)); + + for (int i = 0; i < 20; ++i) { + Thread.sleep(idleTimeoutMillis / 10); + channel.runPendingTasks(); + + Promise streamPromise = channel.eventLoop().newPromise(); + assertThat(record.acquireStream(streamPromise)).isTrue(); + channel.runPendingTasks(); + + assertThat(streamPromise.isSuccess()).isTrue(); + assertThat(channel.isOpen()).isTrue(); + + record.closeAndReleaseChild(streamPromise.getNow()); + channel.runPendingTasks(); + } + + assertThat(channel.isOpen()).isTrue(); + + Thread.sleep(idleTimeoutMillis * 2); + channel.runPendingTasks(); + + assertThat(channel.isOpen()).isFalse(); + } + + @Test + public void idleTimerDoesNotApplyBeforeFirstChannelIsCreated() throws InterruptedException { + int idleTimeoutMillis = 1000; + EmbeddedChannel channel = newHttp2Channel(); + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 2, Duration.ofMillis(idleTimeoutMillis)); + + Thread.sleep(idleTimeoutMillis * 2); + channel.runPendingTasks(); + + assertThat(channel.isOpen()).isTrue(); + } + + @Test + public void availableStream0_reusableShouldBeFalse() { + loopGroup.register(channel).awaitUninterruptibly(); + Promise channelPromise = new DefaultPromise<>(loopGroup.next()); + channelPromise.setSuccess(channel); + + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 0, Duration.ofSeconds(10)); + + assertThat(record.acquireStream(null)).isFalse(); + } + + @Test + public void acquireClaimedConnection_channelClosed_shouldThrowIOException() { + loopGroup.register(channel).awaitUninterruptibly(); + Promise channelPromise = new DefaultPromise<>(loopGroup.next()); + + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 1, Duration.ofSeconds(10)); + + record.closeChildChannels(); + + record.acquireClaimedStream(channelPromise); + + assertThatThrownBy(() -> channelPromise.get()).hasCauseInstanceOf(IOException.class); + } + + @Test + public void closeChildChannels_shouldDeliverException() throws ExecutionException, InterruptedException { + EmbeddedChannel channel = newHttp2Channel(); + loopGroup.register(channel).awaitUninterruptibly(); + Promise channelPromise = new DefaultPromise<>(loopGroup.next()); + channelPromise.setSuccess(channel); + + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 2, Duration.ofSeconds(10)); + + Promise streamPromise = channel.eventLoop().newPromise(); + record.acquireStream(streamPromise); + + channel.runPendingTasks(); + Channel childChannel = streamPromise.get(); + VerifyExceptionHandler verifyExceptionHandler = new VerifyExceptionHandler(); + childChannel.pipeline().addFirst(verifyExceptionHandler); + + IOException ioException = new IOException("foobar"); + record.closeChildChannels(ioException); + + assertThat(childChannel.pipeline().get(UnusedChannelExceptionHandler.class)).isNotNull(); + + assertThat(verifyExceptionHandler.exceptionCaught).hasStackTraceContaining("foobar") + .hasRootCauseInstanceOf(IOException.class); + + // should be closed by UnusedChannelExceptionHandler + assertThat(childChannel.isOpen()).isFalse(); + } + + @Test + public void closeToNewStreams_AcquireStreamShouldReturnFalse() { + MultiplexedChannelRecord record = new MultiplexedChannelRecord(channel, 2, Duration.ofSeconds(10)); + Promise streamPromise = channel.eventLoop().newPromise(); + assertThat(record.acquireStream(streamPromise)).isTrue(); + + record.closeToNewStreams(); + assertThat(record.acquireStream(streamPromise)).isFalse(); + } + + private static final class VerifyExceptionHandler extends ChannelInboundHandlerAdapter { + private Throwable exceptionCaught; + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + exceptionCaught = cause; + ctx.fireExceptionCaught(cause); + } + } + + private EmbeddedChannel newHttp2Channel() { + EmbeddedChannel channel = new EmbeddedChannel(Http2FrameCodecBuilder.forClient().build(), + new Http2MultiplexHandler(new NoOpHandler())); + channel.attr(ChannelAttributeKey.PROTOCOL_FUTURE).set(CompletableFuture.completedFuture(Protocol.HTTP2)); + return channel; + } + + private static class NoOpHandler extends ChannelInitializer { + @Override + protected void initChannel(Channel ch) { } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/ReadTimeoutTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/ReadTimeoutTest.java new file mode 100644 index 000000000000..f1d66aa9f726 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/ReadTimeoutTest.java @@ -0,0 +1,214 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.DefaultHttp2WindowUpdateFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2HeadersFrame; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.util.ReferenceCountUtil; +import io.reactivex.Flowable; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Optional; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Test; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.async.SdkHttpContentPublisher; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; + +public class ReadTimeoutTest { + private static final int N_FRAMES = 10; + private TestH2Server testServer; + private SdkAsyncHttpClient netty; + + @After + public void methodTeardown() throws InterruptedException { + if (testServer != null) { + testServer.shutdown(); + } + testServer = null; + + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void readTimeoutActivatedAfterRequestFullyWritten() throws InterruptedException { + testServer = new TestH2Server(StreamHandler::new); + testServer.init(); + + // Set a very short read timeout, shorter than it will take to transfer + // the body + netty = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .readTimeout(Duration.ofMillis(500)) + .build(); + + SdkHttpFullRequest sdkRequest = SdkHttpFullRequest.builder() + .method(SdkHttpMethod.PUT) + .protocol("http") + .host("localhost") + .port(testServer.port()) + .build(); + + // at 10 frames, should take approximately 3-4 seconds for the server + // to receive given that it sleeps for 500ms between data frames and + // sleeps for most of them + byte[] data = new byte[16384 * N_FRAMES]; + + Publisher dataPublisher = Flowable.just(ByteBuffer.wrap(data)); + + AsyncExecuteRequest executeRequest = AsyncExecuteRequest.builder() + .request(sdkRequest) + .responseHandler(new SdkAsyncHttpResponseHandler() { + @Override + public void onHeaders(SdkHttpResponse headers) { + } + + @Override + public void onStream(Publisher stream) { + Flowable.fromPublisher(stream).forEach(s -> {}); + } + + @Override + public void onError(Throwable error) { + } + }) + .requestContentPublisher(new SdkHttpContentPublisher() { + @Override + public Optional contentLength() { + return Optional.of((long) data.length); + } + + @Override + public void subscribe(Subscriber s) { + dataPublisher.subscribe(s); + } + }) + .build(); + + netty.execute(executeRequest).join(); + } + + private static final class TestH2Server extends ChannelInitializer { + private final Supplier handlerSupplier; + + private ServerBootstrap bootstrap; + private ServerSocketChannel channel; + + private TestH2Server(Supplier handlerSupplier) { + this.handlerSupplier = handlerSupplier; + } + + public void init() throws InterruptedException { + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(new NioEventLoopGroup()) + .childHandler(this) + .localAddress(0) + .childOption(ChannelOption.SO_KEEPALIVE, true); + + channel = ((ServerSocketChannel) bootstrap.bind().await().channel()); + } + + public int port() { + return channel.localAddress().getPort(); + } + + public void shutdown() throws InterruptedException { + channel.close().await(); + } + + @Override + protected void initChannel(SocketChannel ch) { + Http2FrameCodec codec = Http2FrameCodecBuilder.forServer() + .autoAckPingFrame(true) + .initialSettings(new Http2Settings() + .initialWindowSize(16384) + .maxFrameSize(16384) + .maxConcurrentStreams(5)) + .build(); + + ch.pipeline().addLast(codec); + ch.pipeline().addLast(handlerSupplier.get()); + } + } + + private static class StreamHandler extends ChannelInboundHandlerAdapter { + private int sleeps = N_FRAMES - 3; + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (!(msg instanceof Http2Frame)) { + ctx.fireChannelRead(msg); + return; + } + + Http2Frame frame = (Http2Frame) msg; + if (frame instanceof Http2DataFrame) { + Http2DataFrame dataFrame = (Http2DataFrame) frame; + ReferenceCountUtil.release(frame); + if (dataFrame.isEndStream()) { + Http2HeadersFrame respHeaders = new DefaultHttp2HeadersFrame( + new DefaultHttp2Headers().status("204"), true) + .stream(dataFrame.stream()); + ctx.writeAndFlush(respHeaders); + } + + if (sleeps > 0) { + --sleeps; + // Simulate a server that's slow to read data. Since our + // window size is equal to the max frame size, the client + // shouldn't be able to send more data until we update our + // window + try { + Thread.sleep(500); + } catch (InterruptedException ie) { + } + } + ctx.writeAndFlush(new DefaultHttp2WindowUpdateFrame(dataFrame.initialFlowControlledBytes()) + .stream(dataFrame.stream())); + } + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/WindowSizeTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/WindowSizeTest.java new file mode 100644 index 000000000000..e33ddfcb6e17 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/WindowSizeTest.java @@ -0,0 +1,286 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2; + +import static org.assertj.core.api.Assertions.assertThat; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2HeadersFrame; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.codec.http2.Http2SettingsFrame; +import io.netty.util.ReferenceCountUtil; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.nio.netty.Http2Configuration; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; + +public class WindowSizeTest { + private static final int DEFAULT_INIT_WINDOW_SIZE = 1024 * 1024; + + private TestH2Server server; + private SdkAsyncHttpClient netty; + + @Rule + public ExpectedException expected = ExpectedException.none(); + + @After + public void methodTeardown() throws InterruptedException { + if (netty != null) { + netty.close(); + } + netty = null; + + if (server != null) { + server.shutdown(); + } + server = null; + } + + @Test + public void builderSetter_negativeValue_throws() { + expected.expect(IllegalArgumentException.class); + + NettyNioAsyncHttpClient.builder() + .http2Configuration(Http2Configuration.builder() + .initialWindowSize(-1) + .build()) + .build(); + } + + @Test + public void builderSetter_0Value_throws() { + expected.expect(IllegalArgumentException.class); + + NettyNioAsyncHttpClient.builder() + .http2Configuration(Http2Configuration.builder() + .initialWindowSize(0) + .build()) + .build(); + } + + @Test + public void builderSetter_explicitNullSet_usesDefaultValue() throws InterruptedException { + expectCorrectWindowSizeValueTest(null, DEFAULT_INIT_WINDOW_SIZE); + } + + @Test + public void execute_customWindowValue_valueSentInSettings() throws InterruptedException { + int windowSize = 128 * 1024 * 1024; + expectCorrectWindowSizeValueTest(windowSize, windowSize); + } + + @Test + public void execute_noExplicitValueSet_sendsDefaultValueInSettings() throws InterruptedException { + ConcurrentLinkedQueue receivedFrames = new ConcurrentLinkedQueue<>(); + + server = new TestH2Server(() -> new StreamHandler(receivedFrames)); + + server.init(); + + netty = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .build(); + + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .requestContentPublisher(new EmptyPublisher()) + .request(SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("http") + .host("localhost") + .port(server.port()) + .build()) + .responseHandler(new SdkAsyncHttpResponseHandler() { + @Override + public void onHeaders(SdkHttpResponse headers) { + } + + @Override + public void onStream(Publisher stream) { + } + + @Override + public void onError(Throwable error) { + } + }) + .build(); + + netty.execute(req).join(); + + List receivedSettings = receivedFrames.stream() + .filter(f -> f instanceof Http2SettingsFrame) + .map(f -> (Http2SettingsFrame) f) + .map(Http2SettingsFrame::settings) + .collect(Collectors.toList()); + + assertThat(receivedSettings.size()).isGreaterThan(0); + for (Http2Settings s : receivedSettings) { + assertThat(s.initialWindowSize()).isEqualTo(DEFAULT_INIT_WINDOW_SIZE); + } + } + + private void expectCorrectWindowSizeValueTest(Integer builderSetterValue, int settingsFrameValue) throws InterruptedException { + ConcurrentLinkedQueue receivedFrames = new ConcurrentLinkedQueue<>(); + + server = new TestH2Server(() -> new StreamHandler(receivedFrames)); + + server.init(); + + netty = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .http2Configuration(Http2Configuration.builder() + .initialWindowSize(builderSetterValue) + .build()) + .build(); + + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .requestContentPublisher(new EmptyPublisher()) + .request(SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("http") + .host("localhost") + .port(server.port()) + .build()) + .responseHandler(new SdkAsyncHttpResponseHandler() { + @Override + public void onHeaders(SdkHttpResponse headers) { + } + + @Override + public void onStream(Publisher stream) { + } + + @Override + public void onError(Throwable error) { + } + }) + .build(); + + netty.execute(req).join(); + + + List receivedSettings = receivedFrames.stream() + .filter(f -> f instanceof Http2SettingsFrame) + .map(f -> (Http2SettingsFrame) f) + .map(Http2SettingsFrame::settings) + .collect(Collectors.toList()); + + assertThat(receivedSettings.size()).isGreaterThan(0); + for (Http2Settings s : receivedSettings) { + assertThat(s.initialWindowSize()).isEqualTo(settingsFrameValue); + } + } + + private static final class TestH2Server extends ChannelInitializer { + private final Supplier handlerSupplier; + + private ServerBootstrap bootstrap; + private ServerSocketChannel channel; + + private TestH2Server(Supplier handlerSupplier) { + this.handlerSupplier = handlerSupplier; + } + + public void init() throws InterruptedException { + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(new NioEventLoopGroup()) + .childHandler(this) + .localAddress(0) + .childOption(ChannelOption.SO_KEEPALIVE, true); + + channel = ((ServerSocketChannel) bootstrap.bind().await().channel()); + } + + public int port() { + return channel.localAddress().getPort(); + } + + public void shutdown() throws InterruptedException { + channel.close().await(); + } + + @Override + protected void initChannel(SocketChannel ch) { + Http2FrameCodec codec = Http2FrameCodecBuilder.forServer() + .initialSettings(new Http2Settings() + .maxConcurrentStreams(5)) + .build(); + + ch.pipeline().addLast(codec); + ch.pipeline().addLast(handlerSupplier.get()); + } + } + + private static class StreamHandler extends ChannelInboundHandlerAdapter { + private final Queue receivedFrames; + + private StreamHandler(Queue receivedFrames) { + this.receivedFrames = receivedFrames; + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (!(msg instanceof Http2Frame)) { + ctx.fireChannelRead(msg); + return; + } + + Http2Frame frame = (Http2Frame) msg; + receivedFrames.add(frame); + if (frame instanceof Http2DataFrame) { + Http2DataFrame dataFrame = (Http2DataFrame) frame; + if (dataFrame.isEndStream()) { + Http2HeadersFrame respHeaders = new DefaultHttp2HeadersFrame( + new DefaultHttp2Headers().status("204"), true) + .stream(dataFrame.stream()); + ctx.writeAndFlush(respHeaders); + } + } + ReferenceCountUtil.release(frame); + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/utils/Http2TestUtils.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/utils/Http2TestUtils.java new file mode 100644 index 000000000000..b91650047453 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/utils/Http2TestUtils.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.http2.utils; + + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2FrameLogger; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.logging.LogLevel; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; + +public final class Http2TestUtils { + public static final int INITIAL_WINDOW_SIZE = 1_048_576; + + public static EmbeddedChannel newHttp2Channel() { + return newHttp2Channel(new NoOpHandler()); + } + + public static EmbeddedChannel newHttp2Channel(ChannelHandler channelHandler) { + Http2FrameCodec http2FrameCodec = Http2FrameCodecBuilder.forClient().initialSettings( + Http2Settings.defaultSettings().initialWindowSize(INITIAL_WINDOW_SIZE)) + .frameLogger(new Http2FrameLogger(LogLevel.DEBUG)).build(); + EmbeddedChannel channel = new EmbeddedChannel(http2FrameCodec, + new Http2MultiplexHandler(channelHandler)); + + channel.attr(ChannelAttributeKey.HTTP2_CONNECTION).set(http2FrameCodec.connection()); + channel.attr(ChannelAttributeKey.HTTP2_INITIAL_WINDOW_SIZE).set(INITIAL_WINDOW_SIZE); + channel.attr(ChannelAttributeKey.PROTOCOL_FUTURE).set(CompletableFuture.completedFuture(Protocol.HTTP2)); + return channel; + } + + private static class NoOpHandler extends ChannelInitializer { + @Override + protected void initChannel(Channel ch) { + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/ChannelPublisherTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/ChannelPublisherTest.java new file mode 100644 index 000000000000..7f8755545108 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/ChannelPublisherTest.java @@ -0,0 +1,184 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelOption; +import io.netty.channel.EventLoop; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.util.concurrent.DefaultPromise; +import io.netty.util.concurrent.Promise; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class ChannelPublisherTest { + + private EventLoopGroup group; + private Channel channel; + private Publisher publisher; + private SubscriberProbe subscriber; + + @Before + public void start() throws Exception { + group = new NioEventLoopGroup(); + EventLoop eventLoop = group.next(); + + HandlerPublisher handlerPublisher = new HandlerPublisher<>(eventLoop, Channel.class); + Bootstrap bootstrap = new Bootstrap(); + + bootstrap + .channel(NioServerSocketChannel.class) + .group(eventLoop) + .option(ChannelOption.AUTO_READ, false) + .handler(handlerPublisher) + .localAddress("127.0.0.1", 0); + + channel = bootstrap.bind().await().channel(); + this.publisher = handlerPublisher; + + subscriber = new SubscriberProbe<>(); + } + + @After + public void stop() throws Exception { + channel.unsafe().closeForcibly(); + group.shutdownGracefully(); + } + + @Test + public void test() throws Exception { + publisher.subscribe(subscriber); + Subscription sub = subscriber.takeSubscription(); + + // Try one cycle + sub.request(1); + Socket socket1 = connect(); + receiveConnection(); + readWriteData(socket1, 1); + + // Check back pressure + Socket socket2 = connect(); + subscriber.expectNoElements(); + + // Now request the next connection + sub.request(1); + receiveConnection(); + readWriteData(socket2, 2); + + // Close the channel + channel.close(); + subscriber.expectNoElements(); + subscriber.expectComplete(); + } + + private Socket connect() throws Exception { + InetSocketAddress address = (InetSocketAddress) channel.localAddress(); + return new Socket(address.getAddress(), address.getPort()); + } + + private void readWriteData(Socket socket, int data) throws Exception { + OutputStream os = socket.getOutputStream(); + os.write(data); + os.flush(); + InputStream is = socket.getInputStream(); + int received = is.read(); + socket.close(); + assertEquals(received, data); + } + + private void receiveConnection() throws Exception { + Channel channel = subscriber.take(); + channel.pipeline().addLast(new ChannelInboundHandlerAdapter() { + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + ctx.writeAndFlush(msg); + } + }); + group.register(channel); + } + + private class SubscriberProbe implements Subscriber { + final BlockingQueue subscriptions = new LinkedBlockingQueue<>(); + final BlockingQueue elements = new LinkedBlockingQueue<>(); + final Promise promise = new DefaultPromise<>(group.next()); + + public void onSubscribe(Subscription s) { + subscriptions.add(s); + } + + public void onNext(T t) { + elements.add(t); + } + + public void onError(Throwable t) { + promise.setFailure(t); + } + + public void onComplete() { + promise.setSuccess(null); + } + + Subscription takeSubscription() throws Exception { + Subscription sub = subscriptions.poll(100, TimeUnit.MILLISECONDS); + assertNotNull(sub); + return sub; + } + + T take() throws Exception { + T t = elements.poll(1000, TimeUnit.MILLISECONDS); + assertNotNull(t); + return t; + } + + void expectNoElements() throws Exception { + T t = elements.poll(100, TimeUnit.MILLISECONDS); + assertNull(t); + } + + void expectComplete() throws Exception { + promise.get(100, TimeUnit.MILLISECONDS); + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerPublisherVerificationTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerPublisherVerificationTest.java new file mode 100644 index 000000000000..06199aaa58ca --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerPublisherVerificationTest.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.DefaultEventLoopGroup; +import io.netty.channel.local.LocalChannel; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import software.amazon.awssdk.http.nio.netty.internal.nrs.util.BatchedProducer; +import software.amazon.awssdk.http.nio.netty.internal.nrs.util.ClosedLoopChannel; +import software.amazon.awssdk.http.nio.netty.internal.nrs.util.ScheduledBatchedProducer; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class HandlerPublisherVerificationTest extends PublisherVerification { + + private final int batchSize; + // The number of elements to publish initially, before the subscriber is received + private final int publishInitial; + // Whether we should use scheduled publishing (with a small delay) + private final boolean scheduled; + + private ScheduledExecutorService executor; + private DefaultEventLoopGroup eventLoop; + + @Factory(dataProvider = "data") + public HandlerPublisherVerificationTest(int batchSize, int publishInitial, boolean scheduled) { + super(new TestEnvironment(200)); + this.batchSize = batchSize; + this.publishInitial = publishInitial; + this.scheduled = scheduled; + } + + @DataProvider + public static Object[][] data() { + final int defaultBatchSize = 3; + final int defaultPublishInitial = 3; + final boolean defaultScheduled = false; + + return new Object[][] { + { defaultBatchSize, defaultPublishInitial, defaultScheduled }, + { 1, defaultPublishInitial, defaultScheduled }, + { defaultBatchSize, 0, defaultScheduled }, + { defaultBatchSize, defaultPublishInitial, true } + }; + } + + // I tried making this before/after class, but encountered a strange error where after 32 publishers were created, + // the following tests complained about the executor being shut down when I registered the channel. Though, it + // doesn't happen if you create 32 publishers in a single test. + @BeforeMethod + public void startEventLoop() { + eventLoop = new DefaultEventLoopGroup(); + } + + @AfterMethod + public void stopEventLoop() { + eventLoop.shutdownGracefully(); + eventLoop = null; + } + + @BeforeClass + public void startExecutor() { + executor = Executors.newSingleThreadScheduledExecutor(); + } + + @AfterClass + public void stopExecutor() { + executor.shutdown(); + } + + @Override + public Publisher createPublisher(final long elements) { + final BatchedProducer out; + if (scheduled) { + out = new ScheduledBatchedProducer(elements, batchSize, publishInitial, executor, 5); + } else { + out = new BatchedProducer(elements, batchSize, publishInitial, executor); + } + + final ClosedLoopChannel channel = new ClosedLoopChannel(); + channel.config().setAutoRead(false); + ChannelFuture registered = eventLoop.register(channel); + + final HandlerPublisher publisher = new HandlerPublisher<>(registered.channel().eventLoop(), Long.class); + + registered.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + channel.pipeline().addLast("out", out); + channel.pipeline().addLast("publisher", publisher); + + for (long i = 0; i < publishInitial && i < elements; i++) { + channel.pipeline().fireChannelRead(i); + } + if (elements <= publishInitial) { + channel.pipeline().fireChannelInactive(); + } + } + }); + + return publisher; + } + + @Override + public Publisher createFailedPublisher() { + LocalChannel channel = new LocalChannel(); + eventLoop.register(channel); + HandlerPublisher publisher = new HandlerPublisher<>(channel.eventLoop(), Long.class); + channel.pipeline().addLast("publisher", publisher); + channel.pipeline().fireExceptionCaught(new RuntimeException("failed")); + + return publisher; + } + + @Override + public void stochastic_spec103_mustSignalOnMethodsSequentially() throws Throwable { + try { + super.stochastic_spec103_mustSignalOnMethodsSequentially(); + } catch (Throwable t) { + // CI is failing here, but maven doesn't tell us which parameters failed + System.out.println("Stochastic test failed with parameters batchSize=" + batchSize + + " publishInitial=" + publishInitial + " scheduled=" + scheduled); + throw t; + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberBlackboxVerificationTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberBlackboxVerificationTest.java new file mode 100644 index 000000000000..69346e06093c --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberBlackboxVerificationTest.java @@ -0,0 +1,98 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import org.reactivestreams.tck.SubscriberBlackboxVerification; +import org.reactivestreams.tck.TestEnvironment; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class HandlerSubscriberBlackboxVerificationTest extends SubscriberBlackboxVerification { + + public HandlerSubscriberBlackboxVerificationTest() { + super(new TestEnvironment()); + } + + @Override + public Subscriber createSubscriber() { + // Embedded channel requires at least one handler when it's created, but HandlerSubscriber + // needs the channels event loop in order to be created, so start with a dummy, then replace. + ChannelHandler dummy = new ChannelDuplexHandler(); + EmbeddedChannel channel = new EmbeddedChannel(dummy); + HandlerSubscriber subscriber = new HandlerSubscriber<>(channel.eventLoop(), 2, 4); + channel.pipeline().replace(dummy, "subscriber", subscriber); + + return new SubscriberWithChannel<>(channel, subscriber); + } + + @Override + public Long createElement(int element) { + return (long) element; + } + + @Override + public void triggerRequest(Subscriber subscriber) { + EmbeddedChannel channel = ((SubscriberWithChannel) subscriber).channel; + + channel.runPendingTasks(); + while (channel.readOutbound() != null) { + channel.runPendingTasks(); + } + channel.runPendingTasks(); + } + + /** + * Delegate subscriber that makes the embedded channel available so we can talk to it to trigger a request. + */ + private static class SubscriberWithChannel implements Subscriber { + final EmbeddedChannel channel; + final HandlerSubscriber subscriber; + + public SubscriberWithChannel(EmbeddedChannel channel, HandlerSubscriber subscriber) { + this.channel = channel; + this.subscriber = subscriber; + } + + public void onSubscribe(Subscription s) { + subscriber.onSubscribe(s); + } + + public void onNext(T t) { + subscriber.onNext(t); + } + + public void onError(Throwable t) { + subscriber.onError(t); + } + + public void onComplete() { + subscriber.onComplete(); + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberTest.java new file mode 100644 index 000000000000..4b47365fce1f --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberTest.java @@ -0,0 +1,220 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelPromise; +import io.netty.channel.DefaultChannelPromise; +import io.netty.channel.EventLoop; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.concurrent.AbstractEventExecutor; +import io.netty.util.concurrent.Future; +import io.netty.util.internal.ObjectUtil; +import java.util.ArrayDeque; +import java.util.Queue; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.reactivestreams.Subscription; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class HandlerSubscriberTest { + private EmbeddedChannel channel; + private CustomEmbeddedEventLoop eventLoop; + private HandlerSubscriber handler; + + @Before + public void setup() throws Exception { + channel = new CustomEmbeddedChannel(); + eventLoop = new CustomEmbeddedEventLoop(); + eventLoop.register(channel).syncUninterruptibly(); + + handler = new HandlerSubscriber<>(eventLoop); + channel.pipeline().addLast(handler); + } + + @After + public void teardown() { + channel.close(); + } + + /** + * Ensures that onNext invocations against the {@link HandlerSubscriber} do not order things based on which thread is calling + * onNext. + */ + @Test + public void onNextWritesInProperOrderFromAnyThread() { + HttpContent front = emptyHttpRequest(); + HttpContent back = emptyHttpRequest(); + + handler.onSubscribe(doNothingSubscription()); + eventLoop.inEventLoop(false); + handler.onNext(front); + eventLoop.inEventLoop(true); + handler.onNext(back); + + eventLoop.runTasks(); + + Queue outboundMessages = channel.outboundMessages(); + + assertThat(outboundMessages).hasSize(2); + assertThat(outboundMessages.poll()).isSameAs(front); + assertThat(outboundMessages.poll()).isSameAs(back); + } + + private DefaultFullHttpRequest emptyHttpRequest() { + return new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "http://fake.com"); + } + + private Subscription doNothingSubscription() { + return new Subscription() { + @Override + public void request(long n) { } + + @Override + public void cancel() { } + }; + } + + private static class CustomEmbeddedChannel extends EmbeddedChannel { + private CustomEmbeddedChannel() { + super(false, false); + } + + @Override + protected boolean isCompatible(EventLoop loop) { + return loop instanceof CustomEmbeddedEventLoop; + } + } + + private static class CustomEmbeddedEventLoop extends AbstractEventExecutor implements EventLoop { + private final Queue tasks = new ArrayDeque<>(2); + private volatile boolean inEventLoop = true; + + @Override + public EventLoopGroup parent() { + return (EventLoopGroup) super.parent(); + } + + @Override + public EventLoop next() { + return (EventLoop) super.next(); + } + + @Override + public void execute(Runnable runnable) { + tasks.add(runnable); + } + + public void runTasks() { + for (;;) { + Runnable task = tasks.poll(); + if (task == null) { + break; + } + + task.run(); + } + } + + @Override + public Future shutdownGracefully(long quietPeriod, long timeout, TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public Future terminationFuture() { + throw new UnsupportedOperationException(); + } + + @Override + @Deprecated + public void shutdown() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isShuttingDown() { + return false; + } + + @Override + public boolean isShutdown() { + return false; + } + + @Override + public boolean isTerminated() { + return false; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) { + return false; + } + + @Override + public ChannelFuture register(Channel channel) { + return register(new DefaultChannelPromise(channel, this)); + } + + @Override + public ChannelFuture register(ChannelPromise promise) { + ObjectUtil.checkNotNull(promise, "promise"); + promise.channel().unsafe().register(this, promise); + return promise; + } + + @Deprecated + @Override + public ChannelFuture register(Channel channel, ChannelPromise promise) { + channel.unsafe().register(this, promise); + return promise; + } + + public void inEventLoop(boolean inEventLoop) { + this.inEventLoop = inEventLoop; + } + + @Override + public boolean inEventLoop() { + return inEventLoop; + } + + @Override + public boolean inEventLoop(Thread thread) { + return inEventLoop; + } + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberWhiteboxVerificationTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberWhiteboxVerificationTest.java new file mode 100644 index 000000000000..0a9b7d76aa09 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/HandlerSubscriberWhiteboxVerificationTest.java @@ -0,0 +1,118 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs; + +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.DefaultEventLoopGroup; +import io.netty.util.concurrent.DefaultPromise; +import io.netty.util.concurrent.Promise; +import org.reactivestreams.Subscriber; +import org.reactivestreams.tck.SubscriberWhiteboxVerification; +import org.reactivestreams.tck.TestEnvironment; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import software.amazon.awssdk.http.nio.netty.internal.nrs.util.ClosedLoopChannel; +import software.amazon.awssdk.http.nio.netty.internal.nrs.util.ProbeHandler; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class HandlerSubscriberWhiteboxVerificationTest extends SubscriberWhiteboxVerification { + + private boolean workAroundIssue277; + + public HandlerSubscriberWhiteboxVerificationTest() { + super(new TestEnvironment()); + } + + private DefaultEventLoopGroup eventLoop; + + // I tried making this before/after class, but encountered a strange error where after 32 publishers were created, + // the following tests complained about the executor being shut down when I registered the channel. Though, it + // doesn't happen if you create 32 publishers in a single test. + @BeforeMethod + public void startEventLoop() { + workAroundIssue277 = false; + eventLoop = new DefaultEventLoopGroup(); + } + + @AfterMethod + public void stopEventLoop() { + eventLoop.shutdownGracefully(); + eventLoop = null; + } + + @Override + public Subscriber createSubscriber(WhiteboxSubscriberProbe probe) { + final ClosedLoopChannel channel = new ClosedLoopChannel(); + channel.config().setAutoRead(false); + ChannelFuture registered = eventLoop.register(channel); + + final HandlerSubscriber subscriber = new HandlerSubscriber<>(registered.channel().eventLoop(), 2, 4); + final ProbeHandler probeHandler = new ProbeHandler<>(probe, Long.class); + final Promise handlersInPlace = new DefaultPromise<>(eventLoop.next()); + + registered.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + channel.pipeline().addLast("probe", probeHandler); + channel.pipeline().addLast("subscriber", subscriber); + handlersInPlace.setSuccess(null); + // Channel needs to be active before the subscriber starts responding to demand + channel.pipeline().fireChannelActive(); + } + }); + + if (workAroundIssue277) { + try { + // Wait for the pipeline to be setup, so we're ready to receive elements even if they aren't requested, + // because https://github.com/reactive-streams/reactive-streams-jvm/issues/277 + handlersInPlace.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + return probeHandler.wrap(subscriber); + } + + @Override + public void required_spec208_mustBePreparedToReceiveOnNextSignalsAfterHavingCalledSubscriptionCancel() throws Throwable { + // See https://github.com/reactive-streams/reactive-streams-jvm/issues/277 + workAroundIssue277 = true; + super.required_spec208_mustBePreparedToReceiveOnNextSignalsAfterHavingCalledSubscriptionCancel(); + } + + @Override + public void required_spec308_requestMustRegisterGivenNumberElementsToBeProduced() throws Throwable { + workAroundIssue277 = true; + super.required_spec308_requestMustRegisterGivenNumberElementsToBeProduced(); + } + + @Override + public Long createElement(int element) { + return (long) element; + } + +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/BatchedProducer.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/BatchedProducer.java new file mode 100644 index 000000000000..d341d8f850ce --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/BatchedProducer.java @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs.util; + +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import java.util.concurrent.Executor; + +/** + * A batched producer. + * + * Responds to read requests with batches of elements according to batch size. When eofOn is reached, it closes the + * channel. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class BatchedProducer extends ChannelOutboundHandlerAdapter { + + protected final long eofOn; + protected final int batchSize; + private final Executor executor; + protected long sequence; + + public BatchedProducer(long eofOn, int batchSize, long sequence, Executor executor) { + this.eofOn = eofOn; + this.batchSize = batchSize; + this.sequence = sequence; + this.executor = executor; + } + + private boolean cancelled = false; + + + @Override + public void read(final ChannelHandlerContext ctx) throws Exception { + if (cancelled) { + throw new IllegalStateException("Received demand after being cancelled"); + } + executor.execute(() -> { + for (int i = 0; i < batchSize && sequence != eofOn; i++) { + ctx.fireChannelRead(sequence++); + } + if (eofOn == sequence) { + ctx.fireChannelInactive(); + } else { + ctx.fireChannelReadComplete(); + } + }); + } + + @Override + public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { + if (cancelled) { + throw new IllegalStateException("Cancelled twice"); + } + cancelled = true; + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ClosedLoopChannel.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ClosedLoopChannel.java new file mode 100644 index 000000000000..a687b95e59be --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ClosedLoopChannel.java @@ -0,0 +1,131 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs.util; + +import io.netty.channel.AbstractChannel; +import io.netty.channel.ChannelConfig; +import io.netty.channel.ChannelMetadata; +import io.netty.channel.ChannelOutboundBuffer; +import io.netty.channel.ChannelPromise; +import io.netty.channel.DefaultChannelConfig; +import io.netty.channel.EventLoop; +import java.net.SocketAddress; + +/** + * A closed loop channel that sends no events and receives no events, for testing purposes. + * + * Any outgoing events that reach the channel will throw an exception. All events should be caught + * be inserting a handler that catches them and responds accordingly. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class ClosedLoopChannel extends AbstractChannel { + + private final ChannelConfig config = new DefaultChannelConfig(this); + private static final ChannelMetadata metadata = new ChannelMetadata(false); + + private volatile boolean open = true; + private volatile boolean active = true; + + public ClosedLoopChannel() { + super(null); + } + + public void setOpen(boolean open) { + this.open = open; + } + + public void setActive(boolean active) { + this.active = active; + } + + @Override + protected AbstractUnsafe newUnsafe() { + return new AbstractUnsafe() { + @Override + public void connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + protected boolean isCompatible(EventLoop loop) { + return true; + } + + @Override + protected SocketAddress localAddress0() { + throw new UnsupportedOperationException(); + } + + @Override + protected SocketAddress remoteAddress0() { + throw new UnsupportedOperationException(); + } + + @Override + protected void doBind(SocketAddress localAddress) throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + protected void doDisconnect() throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + protected void doClose() throws Exception { + this.open = false; + } + + @Override + protected void doBeginRead() throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + protected void doWrite(ChannelOutboundBuffer in) throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + public ChannelConfig config() { + return config; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public boolean isActive() { + return active; + } + + @Override + public ChannelMetadata metadata() { + return metadata; + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/Probe.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/Probe.java new file mode 100644 index 000000000000..172eef460216 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/Probe.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs.util; + +import java.util.Date; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class Probe { + + protected final String name; + protected final Long start; + + /** + * Create a new probe and log that it started. + */ + protected Probe(String name) { + this.name = name; + start = System.nanoTime(); + log("Probe created at " + new Date()); + } + + /** + * Create a new probe with the start time from another probe. + */ + protected Probe(String name, long start) { + this.name = name; + this.start = start; + } + + protected void log(String message) { + System.out.println(String.format("%10d %-5s %-15s %s", (System.nanoTime() - start) / 1000, name, Thread.currentThread().getName(), message)); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ProbeHandler.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ProbeHandler.java new file mode 100644 index 000000000000..492aae10bda4 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ProbeHandler.java @@ -0,0 +1,128 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs.util; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import java.util.LinkedList; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicInteger; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import org.reactivestreams.tck.SubscriberWhiteboxVerification; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class ProbeHandler extends ChannelDuplexHandler implements SubscriberWhiteboxVerification.SubscriberPuppet { + + private static final int NO_CONTEXT = 0; + private static final int RUN = 1; + private static final int CANCEL = 2; + + private final SubscriberWhiteboxVerification.WhiteboxSubscriberProbe probe; + private final Class clazz; + private final Queue queue = new LinkedList<>(); + private final AtomicInteger state = new AtomicInteger(NO_CONTEXT); + private volatile ChannelHandlerContext ctx; + // Netty doesn't provide a way to send errors out, so we capture whether it was an error or complete here + private volatile Throwable receivedError = null; + + public ProbeHandler(SubscriberWhiteboxVerification.WhiteboxSubscriberProbe probe, Class clazz) { + this.probe = probe; + this.clazz = clazz; + } + + @Override + public void handlerAdded(ChannelHandlerContext ctx) throws Exception { + this.ctx = ctx; + if (!state.compareAndSet(NO_CONTEXT, RUN)) { + ctx.fireChannelInactive(); + } + } + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + queue.add(new WriteEvent(clazz.cast(msg), promise)); + } + + @Override + public void close(ChannelHandlerContext ctx, ChannelPromise future) throws Exception { + if (receivedError == null) { + probe.registerOnComplete(); + } else { + probe.registerOnError(receivedError); + } + } + + @Override + public void flush(ChannelHandlerContext ctx) throws Exception { + while (!queue.isEmpty()) { + WriteEvent event = queue.remove(); + probe.registerOnNext(event.msg); + event.future.setSuccess(); + } + } + + @Override + public void triggerRequest(long elements) { + // No need, the channel automatically requests + } + + @Override + public void signalCancel() { + if (!state.compareAndSet(NO_CONTEXT, CANCEL)) { + ctx.fireChannelInactive(); + } + } + + private class WriteEvent { + final T msg; + final ChannelPromise future; + + private WriteEvent(T msg, ChannelPromise future) { + this.msg = msg; + this.future = future; + } + } + + public Subscriber wrap(final Subscriber subscriber) { + return new Subscriber() { + public void onSubscribe(Subscription s) { + probe.registerOnSubscribe(ProbeHandler.this); + subscriber.onSubscribe(s); + } + public void onNext(T t) { + subscriber.onNext(t); + } + public void onError(Throwable t) { + receivedError = t; + subscriber.onError(t); + } + public void onComplete() { + subscriber.onComplete(); + } + }; + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/PublisherProbe.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/PublisherProbe.java new file mode 100644 index 000000000000..22a008eb7bfd --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/PublisherProbe.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs.util; + +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class PublisherProbe extends Probe implements Publisher { + + private final Publisher publisher; + + public PublisherProbe(Publisher publisher, String name) { + super(name); + this.publisher = publisher; + } + + @Override + public void subscribe(Subscriber s) { + String sName = s == null ? "null" : s.getClass().getName(); + log("invoke subscribe with subscriber " + sName); + publisher.subscribe(new SubscriberProbe<>(s, name, start)); + log("finish subscribe"); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ScheduledBatchedProducer.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ScheduledBatchedProducer.java new file mode 100644 index 000000000000..f814cfce1e80 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/ScheduledBatchedProducer.java @@ -0,0 +1,66 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs.util; + +import io.netty.channel.ChannelHandlerContext; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * A batched producer. + * + * Responds to read requests with batches of elements according to batch size. When eofOn is reached, it closes the + * channel. + * + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class ScheduledBatchedProducer extends BatchedProducer { + + private final ScheduledExecutorService executor; + private final long delay; + + public ScheduledBatchedProducer(long eofOn, int batchSize, long sequence, ScheduledExecutorService executor, long delay) { + super(eofOn, batchSize, sequence, executor); + this.executor = executor; + this.delay = delay; + } + + protected boolean complete; + + @Override + public void read(final ChannelHandlerContext ctx) throws Exception { + executor.schedule(() -> { + for (int i = 0; i < batchSize && sequence != eofOn; i++) { + ctx.fireChannelRead(sequence++); + } + complete = eofOn == sequence; + executor.schedule(() -> { + if (complete) { + ctx.fireChannelInactive(); + } else { + ctx.fireChannelReadComplete(); + } + }, delay, TimeUnit.MILLISECONDS); + }, delay, TimeUnit.MILLISECONDS); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/SubscriberProbe.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/SubscriberProbe.java new file mode 100644 index 000000000000..8003bd9395ba --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/nrs/util/SubscriberProbe.java @@ -0,0 +1,88 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Original source licensed under the Apache License 2.0 by playframework. + */ + +package software.amazon.awssdk.http.nio.netty.internal.nrs.util; + +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +/** + * This class contains source imported from https://github.com/playframework/netty-reactive-streams, + * licensed under the Apache License 2.0, available at the time of the fork (1/31/2020) here: + * https://github.com/playframework/netty-reactive-streams/blob/master/LICENSE.txt + * + * All original source licensed under the Apache License 2.0 by playframework. All modifications are + * licensed under the Apache License 2.0 by Amazon Web Services. + */ +public class SubscriberProbe extends Probe implements Subscriber { + + private final Subscriber subscriber; + + public SubscriberProbe(Subscriber subscriber, String name) { + super(name); + this.subscriber = subscriber; + } + + SubscriberProbe(Subscriber subscriber, String name, long start) { + super(name, start); + this.subscriber = subscriber; + } + + @Override + public void onSubscribe(final Subscription s) { + String sName = s == null ? "null" : s.getClass().getName(); + log("invoke onSubscribe with subscription " + sName); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + log("invoke request " + n); + s.request(n); + log("finish request"); + } + + @Override + public void cancel() { + log("invoke cancel"); + s.cancel(); + log("finish cancel"); + } + }); + log("finish onSubscribe"); + } + + @Override + public void onNext(T t) { + log("invoke onNext with message " + t); + subscriber.onNext(t); + log("finish onNext"); + } + + @Override + public void onError(Throwable t) { + String tName = t == null ? "null" : t.getClass().getName(); + log("invoke onError with " + tName); + subscriber.onError(t); + log("finish onError"); + } + + @Override + public void onComplete() { + log("invoke onComplete"); + subscriber.onComplete(); + log("finish onComplete"); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPoolTest.java new file mode 100644 index 000000000000..c429b2e7b882 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPoolTest.java @@ -0,0 +1,188 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.utils; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.mock; + +import io.netty.channel.Channel; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.nio.netty.internal.MockChannel; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; +import software.amazon.awssdk.http.nio.netty.internal.utils.BetterFixedChannelPool.AcquireTimeoutAction; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +public class BetterFixedChannelPoolTest { + private static EventLoopGroup eventLoopGroup; + + private BetterFixedChannelPool channelPool; + private SdkChannelPool delegatePool; + + @BeforeClass + public static void setupClass() { + eventLoopGroup = new NioEventLoopGroup(1); + } + + @AfterClass + public static void teardownClass() throws InterruptedException { + eventLoopGroup.shutdownGracefully().await(); + } + + @Before + public void setup() { + delegatePool = mock(SdkChannelPool.class); + + channelPool = BetterFixedChannelPool.builder() + .channelPool(delegatePool) + .maxConnections(2) + .maxPendingAcquires(2) + .acquireTimeoutAction(AcquireTimeoutAction.FAIL) + .acquireTimeoutMillis(10_000) + .executor(eventLoopGroup.next()) + .build(); + } + + @After + public void teardown() { + channelPool.close(); + } + + @Test + public void delegateChannelPoolMetricFailureIsReported() { + Throwable t = new Throwable(); + Mockito.when(delegatePool.collectChannelPoolMetrics(any())).thenReturn(CompletableFutureUtils.failedFuture(t)); + + CompletableFuture result = channelPool.collectChannelPoolMetrics(MetricCollector.create("test")); + waitForCompletion(result); + assertThat(result).hasFailedWithThrowableThat().isEqualTo(t); + } + + @Test(timeout = 5_000) + public void metricCollectionHasCorrectValuesAfterAcquiresAndReleases() throws Exception { + List> acquirePromises = Collections.synchronizedList(new ArrayList<>()); + Mockito.when(delegatePool.acquire(isA(Promise.class))).thenAnswer(i -> { + Promise promise = eventLoopGroup.next().newPromise(); + acquirePromises.add(promise); + return promise; + }); + + List> releasePromises = Collections.synchronizedList(new ArrayList<>()); + Mockito.when(delegatePool.release(isA(Channel.class), isA(Promise.class))).thenAnswer(i -> { + Promise promise = i.getArgumentAt(1, Promise.class); + releasePromises.add(promise); + return promise; + }); + + Mockito.when(delegatePool.collectChannelPoolMetrics(any())).thenReturn(CompletableFuture.completedFuture(null)); + + assertConnectionsCheckedOutAndPending(0, 0); + + channelPool.acquire(); + completePromise(acquirePromises, 0); + assertConnectionsCheckedOutAndPending(1, 0); + + channelPool.acquire(); + completePromise(acquirePromises, 1); + assertConnectionsCheckedOutAndPending(2, 0); + + channelPool.acquire(); + assertConnectionsCheckedOutAndPending(2, 1); + + channelPool.acquire(); + assertConnectionsCheckedOutAndPending(2, 2); + + Future f = channelPool.acquire(); + assertConnectionsCheckedOutAndPending(2, 2); + assertThat(f.isSuccess()).isFalse(); + assertThat(f.cause()).isInstanceOf(IllegalStateException.class); + + channelPool.release(acquirePromises.get(1).getNow()); + assertConnectionsCheckedOutAndPending(2, 2); + + completePromise(releasePromises, 0); + completePromise(acquirePromises, 2); + assertConnectionsCheckedOutAndPending(2, 1); + + channelPool.release(acquirePromises.get(2).getNow()); + completePromise(releasePromises, 1); + completePromise(acquirePromises, 3); + assertConnectionsCheckedOutAndPending(2, 0); + + channelPool.release(acquirePromises.get(0).getNow()); + completePromise(releasePromises, 2); + assertConnectionsCheckedOutAndPending(1, 0); + + channelPool.release(acquirePromises.get(3).getNow()); + completePromise(releasePromises, 3); + assertConnectionsCheckedOutAndPending(0, 0); + } + + private void completePromise(List> promises, int promiseIndex) throws Exception { + waitForPromise(promises, promiseIndex); + + MockChannel channel = new MockChannel(); + eventLoopGroup.next().register(channel); + promises.get(promiseIndex).setSuccess(channel); + } + + private void waitForPromise(List> promises, int promiseIndex) throws Exception { + while (promises.size() < promiseIndex + 1) { + Thread.sleep(1); + } + } + + private void assertConnectionsCheckedOutAndPending(int checkedOut, int pending) { + MetricCollector metricCollector = MetricCollector.create("foo"); + waitForCompletion(channelPool.collectChannelPoolMetrics(metricCollector)); + + MetricCollection metrics = metricCollector.collect(); + + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(2); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY)).containsExactly(checkedOut); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES)).containsExactly(pending); + } + + private void waitForCompletion(CompletableFuture future) { + try { + future.get(5, TimeUnit.SECONDS); + } catch (ExecutionException e) { + return; + } catch (InterruptedException | TimeoutException e) { + throw new Error(e); + } + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelUtilsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelUtilsTest.java index 42db5c4a7dcb..1bf5c0e509a5 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelUtilsTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelUtilsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ package software.amazon.awssdk.http.nio.netty.internal.utils; import static org.assertj.core.api.Assertions.assertThat; -import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.CHANNEL_POOL_RECORD; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.MAX_CONCURRENT_STREAMS; import io.netty.channel.Channel; @@ -37,7 +37,7 @@ public void testGetAttributes() throws Exception { channel = new MockChannel(); channel.attr(MAX_CONCURRENT_STREAMS).set(1L); assertThat(ChannelUtils.getAttribute(channel, MAX_CONCURRENT_STREAMS).get()).isEqualTo(1L); - assertThat(ChannelUtils.getAttribute(channel, CHANNEL_POOL_RECORD)).isNotPresent(); + assertThat(ChannelUtils.getAttribute(channel, HTTP2_MULTIPLEXED_CHANNEL_POOL)).isNotPresent(); } finally { Optional.ofNullable(channel).ifPresent(Channel::close); } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ExceptionHandlingUtilsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ExceptionHandlingUtilsTest.java index dd9ec85a532b..9b94849de556 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ExceptionHandlingUtilsTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ExceptionHandlingUtilsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java new file mode 100644 index 000000000000..d476e6b37c2f --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.utils; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.channel.Channel; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslHandler; +import io.netty.util.AttributeKey; +import javax.net.ssl.SSLEngine; +import org.junit.Test; +import software.amazon.awssdk.http.nio.netty.internal.MockChannel; + +public class NettyUtilsTest { + @Test + public void testGetOrCreateAttributeKey_calledTwiceWithSameName_returnsSameInstance() { + String attr = "NettyUtilsTest.Foo"; + AttributeKey fooAttr = NettyUtils.getOrCreateAttributeKey(attr); + assertThat(NettyUtils.getOrCreateAttributeKey(attr)).isSameAs(fooAttr); + } + + @Test + public void newSslHandler_sslEngineShouldBeConfigured() throws Exception { + SslContext sslContext = SslContextBuilder.forClient().build(); + Channel channel = null; + try { + channel = new MockChannel(); + SslHandler sslHandler = NettyUtils.newSslHandler(sslContext, channel.alloc(), "localhost", 80); + SSLEngine engine = sslHandler.engine(); + assertThat(engine.getSSLParameters().getEndpointIdentificationAlgorithm()).isEqualTo("HTTPS"); + } finally { + if (channel != null) { + channel.close(); + } + } + + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java index 24827ef211e8..2dbfefa4e46d 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/resources/jetty-logging.properties b/http-clients/netty-nio-client/src/test/resources/jetty-logging.properties index 9cfe41c4144e..4ee410e7fa92 100644 --- a/http-clients/netty-nio-client/src/test/resources/jetty-logging.properties +++ b/http-clients/netty-nio-client/src/test/resources/jetty-logging.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/http-clients/netty-nio-client/src/test/resources/log4j.properties b/http-clients/netty-nio-client/src/test/resources/log4j.properties index 0780928c7264..6fa311bc45f9 100644 --- a/http-clients/netty-nio-client/src/test/resources/log4j.properties +++ b/http-clients/netty-nio-client/src/test/resources/log4j.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # permissions and limitations under the License. # -log4j.rootLogger=ERROR, A1 +log4j.rootLogger=INFO, A1 log4j.appender.A1=org.apache.log4j.ConsoleAppender log4j.appender.A1.layout=org.apache.log4j.PatternLayout diff --git a/http-clients/netty-nio-client/src/test/resources/software/amazon/awssdk/http/netty/client1.p12 b/http-clients/netty-nio-client/src/test/resources/software/amazon/awssdk/http/netty/client1.p12 new file mode 100644 index 000000000000..a56e38c196b5 Binary files /dev/null and b/http-clients/netty-nio-client/src/test/resources/software/amazon/awssdk/http/netty/client1.p12 differ diff --git a/http-clients/netty-nio-client/src/test/resources/software/amazon/awssdk/http/netty/server-keystore b/http-clients/netty-nio-client/src/test/resources/software/amazon/awssdk/http/netty/server-keystore new file mode 100644 index 000000000000..55e8a7998c2d Binary files /dev/null and b/http-clients/netty-nio-client/src/test/resources/software/amazon/awssdk/http/netty/server-keystore differ diff --git a/http-clients/pom.xml b/http-clients/pom.xml index f05caf02db7e..f282b18ad2da 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -1,7 +1,7 @@ + 4.0.0 + + software.amazon.awssdk + metric-publishers + 2.15.62-SNAPSHOT + + + cloudwatch-metric-publisher + AWS Java SDK :: Metric Publishers :: CloudWatch + jar + + + 1.8 + + + + + software.amazon.awssdk + cloudwatch + ${awsjavasdk.version} + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + sdk-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-core + ${awsjavasdk.version} + + + software.amazon.awssdk + http-client-spi + ${awsjavasdk.version} + + + diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisher.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisher.java new file mode 100644 index 000000000000..565ac92f55d7 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisher.java @@ -0,0 +1,581 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch; + +import static software.amazon.awssdk.metrics.publishers.cloudwatch.internal.CloudWatchMetricLogger.METRIC_LOGGER; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.MetricUploader; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task.AggregateMetricsTask; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task.UploadMetricsTasks; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; +import software.amazon.awssdk.utils.ThreadFactoryBuilder; + +/** + * An implementation of {@link MetricPublisher} that aggregates and uploads metrics to Amazon CloudWatch on a periodic basis. + * + *

    This simplifies the process of uploading custom metrics to CloudWatch, and can also be configured on the AWS + * SDK clients directly to upload AWS SDK-specific metrics (e.g. request latencies, failure rates) to CloudWatch. + * + *

    Overview + * + *

    This publisher aggregates metric data in memory, and periodically uploads it to CloudWatch in a background thread. This + * minimizes the work necessary to upload metrics, allowing the caller to focus on collecting the data. + * + *

    The default settings of the metrics publisher are meant to minimize memory usage and CloudWatch cost, while still + * providing a useful amount of insight into the metric data. Care should be taken when overriding the default values on the + * publisher, because they can result in an associated increased in memory usage and CloudWatch cost. + * + *

    By default, all metrics are uploaded using summary statistics. This means that only count, maximum, minimum, sum and + * average data is available in CloudWatch. Metric details (e.g. p90, p99) can be enabled on a per-metric basis using + * {@link Builder#detailedMetrics(Collection)}. + * + *

    See {@link Builder} for the configuration values that are available for the publisher, and how they can be used to + * increase the functionality or decrease the cost the publisher. + * + *

    Logging + * + * The CloudWatchMetricPublisher logs all aggregation and upload-related logs to the + * {@code software.amazon.awssdk.metrics.publishers.cloudwatch} namespace. To determine how many metrics are being uploaded + * successfully without checking the CloudWatch console, you can check for a "success" message at the DEBUG level. At the TRACE + * level, you can see exactly which metrics are being uploaded. + * + *

    Configuring AWS SDK clients to upload client metrics + * + * TODO + * + *

    Uploading your own custom metrics + * + * Step 1: Define which metrics you wish to collect + * + *

    Metrics are described using the {@link SdkMetric#create} method. When you describe your metric, you specify + * the name that will appear in CloudWatch and the Java data-type of the metric. The metric should be described once for your + * entire application. + * + *

    Supported types: (1) {@link Number} types (e.g. {@link Integer}, {@link Double}, etc.), (2) {@link Duration}. + * + *

    + *     // In this and the following examples, we want to collect metrics about calls to a method we have defined: "myMethod"
    + *     public static final class MyMethodMetrics {
    + *         // The number of times "myMethod" has been called.
    + *         private static final SdkMetric<Integer> MY_METHOD_CALL_COUNT =
    + *                 SdkMetric.create("MyMethodCallCount", Integer.class, MetricLevel.INFO, MetricCategory.CUSTOM);
    + *
    + *         // The amount of time that "myMethod" took to execute.
    + *         private static final SdkMetric<Duration> MY_METHOD_LATENCY =
    + *                 SdkMetric.create("MyMethodLatency", Duration.class, MetricLevel.INFO, MetricCategory.CUSTOM);
    + *     }
    + * 
    + * + *

    Step 2: Create a {@code CloudWatchMetricPublisher} + * + *

    A {@code CloudWatchMetricPublisher} should be created once for your entire application, and be reused wherever it is + * needed. {@code CloudWatchMetricPublisher}s are thread-safe, so there should be no need to create multiple instances. Most + * people create and manage the publisher in their inversion-of-control (IoC) container (e.g. Spring/Dagger/Guice). + * + *

    Note: When your application is finished with the {@code CloudWatchMetricPublisher}, make sure to {@link #close()} it. Your + * inversion-of-control container may handle this for you on JVM shutdown. + * + *

    See {@link CloudWatchMetricPublisher.Builder} for all available configuration options. + * + *

    + *     // Create a CloudWatchMetricPublisher using a custom namespace.
    + *     MetricPublisher metricPublisher = CloudWatchMetricPublisher.builder()
    + *                                                                .namespace("MyApplication")
    + *                                                                .build();
    + * 
    + * + *

    Step 3: Collect and Publish Metrics + * + *

    Create and use a {@link MetricCollector} to collect data about your configured metrics. + * + *

    + *     // Call "myMethod" and collect metrics about the call.
    + *     Instant methodCallStartTime = Instant.now();
    + *     myMethod();
    + *     Duration methodCallDuration = Duration.between(methodCallStartTime, Instant.now());
    + *
    + *     // Write the metrics to the CloudWatchMetricPublisher.
    + *     MetricCollector metricCollector = MetricCollector.create("MyMethodCall");
    + *     metricCollector.reportMetric(MyCustomMetrics.MY_METHOD_CALL_COUNT, 1);
    + *     metricCollector.reportMetric(MyCustomMetrics.MY_METHOD_LATENCY, methodCallDuration);
    + *     MetricCollection metricCollection = metricCollector.collect();
    + *
    + *     metricPublisher.publish(metricCollection);
    + * 
    + * + *

    Warning: Make sure the {@link #close()} this publisher when it is done being used to release all resources it + * consumes. Failure to do so will result in possible thread or file descriptor leaks. + */ +@ThreadSafe +@Immutable +@SdkPublicApi +public final class CloudWatchMetricPublisher implements MetricPublisher { + /** + * The maximum queue size for the internal {@link #executor} that is used to aggregate metric data and upload it to + * CloudWatch. If this value is too high, memory is wasted. If this value is too low, metrics could be dropped. + * + * This value is not currently configurable, because it's unlikely that this is a value that customers should need to modify. + * If customers really need control over this value, we might consider letting them instead configure the + * {@link BlockingQueue} used on the executor. The value here depends on the type of {@code BlockingQueue} in use, and + * we should probably not indirectly couple people to the type of blocking queue we're using. + */ + private static final int MAXIMUM_TASK_QUEUE_SIZE = 128; + + private static final String DEFAULT_NAMESPACE = "AwsSdk/JavaSdk2"; + private static final int DEFAULT_MAXIMUM_CALLS_PER_UPLOAD = 10; + private static final Duration DEFAULT_UPLOAD_FREQUENCY = Duration.ofMinutes(1); + private static final Set> DEFAULT_DIMENSIONS = Stream.of(CoreMetric.SERVICE_ID, + CoreMetric.OPERATION_NAME) + .collect(Collectors.toSet()); + private static final Set DEFAULT_METRIC_CATEGORIES = Collections.singleton(MetricCategory.ALL); + private static final MetricLevel DEFAULT_METRIC_LEVEL = MetricLevel.INFO; + private static final Set> DEFAULT_DETAILED_METRICS = Collections.emptySet(); + + /** + * Whether {@link #close()} should call {@link CloudWatchAsyncClient#close()}. This is false when + * {@link Builder#cloudWatchClient(CloudWatchAsyncClient)} was specified, meaning the customer has to close the client + * themselves. + */ + private final boolean closeClientWithPublisher; + + /** + * The aggregator that takes {@link MetricCollection}s and converts them into {@link PutMetricDataRequest}s. This aggregator + * is *not* thread safe, so it should only ever be accessed from the {@link #executor}'s thread. + */ + private final MetricCollectionAggregator metricAggregator; + + /** + * The uploader that takes {@link PutMetricDataRequest}s and sends them to a {@link CloudWatchAsyncClient}. + */ + private final MetricUploader metricUploader; + + /** + * The executor that executes {@link AggregateMetricsTask}s and {@link UploadMetricsTasks}s. + */ + private final ExecutorService executor; + + /** + * A scheduled executor that periodically schedules a {@link UploadMetricsTasks} on the {@link #executor} thread. Note: this + * executor should never execute the flush task itself, because that needs access to the {@link #metricAggregator}, and the + * {@code metricAggregator} should only ever be accessed from the {@link #executor} thread. + */ + private final ScheduledExecutorService scheduledExecutor; + + /** + * The maximum number of {@link PutMetricDataRequest}s that should ever be executed as part of a single + * {@link UploadMetricsTasks}. + */ + private final int maximumCallsPerUpload; + + private CloudWatchMetricPublisher(Builder builder) { + this.closeClientWithPublisher = resolveCloseClientWithPublisher(builder); + this.metricAggregator = new MetricCollectionAggregator(resolveNamespace(builder), + resolveDimensions(builder), + resolveMetricCategories(builder), + resolveMetricLevel(builder), + resolveDetailedMetrics(builder)); + this.metricUploader = new MetricUploader(resolveClient(builder)); + this.maximumCallsPerUpload = resolveMaximumCallsPerUpload(builder); + + ThreadFactory threadFactory = new ThreadFactoryBuilder().threadNamePrefix("cloud-watch-metric-publisher").build(); + this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory); + + // Do not increase above 1 thread: access to MetricCollectionAggregator is not thread safe. + this.executor = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, + new ArrayBlockingQueue<>(MAXIMUM_TASK_QUEUE_SIZE), + threadFactory); + + long flushFrequencyInMillis = resolveUploadFrequency(builder).toMillis(); + this.scheduledExecutor.scheduleAtFixedRate(this::flushMetricsQuietly, + flushFrequencyInMillis, flushFrequencyInMillis, TimeUnit.MILLISECONDS); + } + + private Set resolveMetricCategories(Builder builder) { + return builder.metricCategories == null ? DEFAULT_METRIC_CATEGORIES : new HashSet<>(builder.metricCategories); + } + + private MetricLevel resolveMetricLevel(Builder builder) { + return builder.metricLevel == null ? DEFAULT_METRIC_LEVEL : builder.metricLevel; + } + + private Set> resolveDetailedMetrics(Builder builder) { + return builder.detailedMetrics == null ? DEFAULT_DETAILED_METRICS : new HashSet<>(builder.detailedMetrics); + } + + private Set> resolveDimensions(Builder builder) { + return builder.dimensions == null ? DEFAULT_DIMENSIONS : new HashSet<>(builder.dimensions); + } + + private boolean resolveCloseClientWithPublisher(Builder builder) { + return builder.client == null; + } + + private CloudWatchAsyncClient resolveClient(Builder builder) { + return builder.client == null ? CloudWatchAsyncClient.create() : builder.client; + } + + private Duration resolveUploadFrequency(Builder builder) { + return builder.uploadFrequency == null ? DEFAULT_UPLOAD_FREQUENCY : builder.uploadFrequency; + } + + private String resolveNamespace(Builder builder) { + return builder.namespace == null ? DEFAULT_NAMESPACE : builder.namespace; + } + + private int resolveMaximumCallsPerUpload(Builder builder) { + return builder.maximumCallsPerUpload == null ? DEFAULT_MAXIMUM_CALLS_PER_UPLOAD : builder.maximumCallsPerUpload; + } + + @Override + public void publish(MetricCollection metricCollection) { + try { + executor.submit(new AggregateMetricsTask(metricAggregator, metricCollection)); + } catch (RejectedExecutionException e) { + METRIC_LOGGER.warn(() -> "Some AWS SDK client-side metrics have been dropped because an internal executor did not " + + "accept them. This usually occurs because your publisher has been shut down or you have " + + "generated too many requests for the publisher to handle in a timely fashion.", e); + } + } + + /** + * Flush the metrics (via a {@link UploadMetricsTasks}). In the event that the {@link #executor} task queue is full, this + * this will retry automatically. + * + * This returns when the {@code UploadMetricsTask} has been submitted to the executor. The returned future is completed + * when the metrics upload to cloudwatch has started. The inner-most future is finally completed when the upload to cloudwatch + * has finished. + */ + private Future> flushMetrics() throws InterruptedException { + while (!executor.isShutdown()) { + try { + return executor.submit(new UploadMetricsTasks(metricAggregator, metricUploader, maximumCallsPerUpload)); + } catch (RejectedExecutionException e) { + Thread.sleep(100); + } + } + + return CompletableFuture.completedFuture(CompletableFuture.completedFuture(null)); + } + + private void flushMetricsQuietly() { + try { + flushMetrics(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + METRIC_LOGGER.error(() -> "Interrupted during metric flushing.", e); + } + } + + @Override + public void close() { + try { + scheduledExecutor.shutdownNow(); + + Future> flushFuture = flushMetrics(); + executor.shutdown(); + + flushFuture.get(60, TimeUnit.SECONDS) // Wait for flush to start + .get(60, TimeUnit.SECONDS); // Wait for flush to finish + + if (!executor.awaitTermination(60, TimeUnit.SECONDS)) { + throw new TimeoutException("Internal executor did not shut down in 60 seconds."); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + METRIC_LOGGER.error(() -> "Interrupted during graceful metric publisher shutdown.", e); + } catch (ExecutionException e) { + METRIC_LOGGER.error(() -> "Failed during graceful metric publisher shutdown.", e); + } catch (TimeoutException e) { + METRIC_LOGGER.error(() -> "Timed out during graceful metric publisher shutdown.", e); + } finally { + runQuietly(scheduledExecutor::shutdownNow, "shutting down scheduled executor"); + runQuietly(executor::shutdownNow, "shutting down executor"); + runQuietly(() -> metricUploader.close(closeClientWithPublisher), "closing metric uploader"); + } + } + + private void runQuietly(Runnable runnable, String taskName) { + try { + runnable.run(); + } catch (Exception e) { + METRIC_LOGGER.warn(() -> "Failed while " + taskName + ".", e); + } + } + + /** + * Create a new {@link Builder} that can be used to create {@link CloudWatchMetricPublisher}s. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Create a {@link CloudWatchMetricPublisher} using all default values. + */ + public static CloudWatchMetricPublisher create() { + return builder().build(); + } + + /** + * Returns {@code true} when the internal executors for this publisher are shut down. + */ + boolean isShutdown() { + return scheduledExecutor.isShutdown() && executor.isShutdown(); + } + + /** + * Builder class to construct {@link CloudWatchMetricPublisher} instances. See the individual properties for which + * configuration settings are available. + */ + public static final class Builder { + private CloudWatchAsyncClient client; + private Duration uploadFrequency; + private String namespace; + private Integer maximumCallsPerUpload; + private Collection> dimensions; + private Collection metricCategories; + private MetricLevel metricLevel; + private Collection> detailedMetrics; + + private Builder() { + } + + /** + * Configure the {@link PutMetricDataRequest#namespace()} used for all put-metric-data calls from this publisher. + * + *

    If this is not specified, {@code AwsSdk/JavaSdk2} will be used. + */ + public Builder namespace(String namespace) { + this.namespace = namespace; + return this; + } + + /** + * Configure the {@link CloudWatchAsyncClient} instance that should be used to communicate with CloudWatch. + * + *

    If this is not specified, the {@code CloudWatchAsyncClient} will be created via + * {@link CloudWatchAsyncClient#create()} (and will be closed when {@link #close()} is invoked). + * + *

    If you specify a {@code CloudWatchAsyncClient} via this method, it will not be closed when this publisher + * is closed. You will need to need to manage the lifecycle of the client yourself. + */ + public Builder cloudWatchClient(CloudWatchAsyncClient client) { + this.client = client; + return this; + } + + /** + * Configure the frequency at which aggregated metrics are uploaded to CloudWatch and released from memory. + * + *

    If this is not specified, metrics will be uploaded once per minute. + * + *

    Smaller values will: (1) reduce the amount of memory used by the library (particularly when + * {@link #detailedMetrics(Collection)} are enabled), (2) increase the number of CloudWatch calls (and therefore + * increase CloudWatch usage cost). + * + *

    Larger values will: (1) increase the amount of memory used by the library (particularly when + * {@code detailedMetrics} are enabled), (2) increase the time it takes for metric data to appear in + * CloudWatch, (3) reduce the number of CloudWatch calls (and therefore decrease CloudWatch usage cost). + * + *

    Warning: When {@code detailedMetrics} are enabled, all unique metric values are stored in memory until they + * can be published to CloudWatch. A high {@code uploadFrequency} with multiple {@code detailedMetrics} enabled can + * quickly consume heap memory while the values wait to be published to CloudWatch. In memory constrained environments, it + * is recommended to minimize the number of {@code detailedMetrics} configured on the publisher, or to upload metric data + * more frequently. As with all performance and resource concerns, profiling in a production-like environment is + * encouraged. + */ + public Builder uploadFrequency(Duration uploadFrequency) { + this.uploadFrequency = uploadFrequency; + return this; + } + + /** + * Configure the maximum number of {@link CloudWatchAsyncClient#putMetricData(PutMetricDataRequest)} calls that an + * individual "upload" event can make to CloudWatch. Any metrics that would exceed this limit are dropped during the + * upload, logging a warning on the {@code software.amazon.awssdk.metrics.publishers.cloudwatch} namespace. + * + *

    The SDK will always attempt to maximize the number of metrics per put-metric-data call, but uploads will be split + * into multiple put-metric-data calls if they include a lot of different metrics or if there are a lot of high-value- + * distribution {@link #detailedMetrics(Collection)} being monitored. + * + *

    This value combined with the {@link #uploadFrequency(Duration)} effectively provide a "hard cap" on the number of + * put-metric-data calls, to prevent unbounded cost in the event that too many metrics are enabled by the user. + * + *

    If this is not specified, put-metric-data calls will be capped at 10 per upload. + */ + public Builder maximumCallsPerUpload(Integer maximumCallsPerUpload) { + this.maximumCallsPerUpload = maximumCallsPerUpload; + return this; + } + + /** + * Configure the {@link SdkMetric}s that are used to define the {@link Dimension}s metrics are aggregated under. + * + *

    If this is not specified, {@link CoreMetric#SERVICE_ID} and {@link CoreMetric#OPERATION_NAME} are used, allowing + * you to compare metrics for different services and operations. + * + *

    Warning: Configuring the dimensions incorrectly can result in a large increase in the number of unique + * metrics and put-metric-data calls to cloudwatch, which have an associated monetary cost. Be sure you're choosing your + * metric dimensions wisely, and that you always evaluate the cost of modifying these values on your monthly usage costs. + * + *

    Example useful settings: + *

      + *
    • {@code CoreMetric.SERVICE_ID} and {@code CoreMetric.OPERATION_NAME} (default): Separate metrics by service and + * operation, so that you can compare latencies between AWS services and operations.
    • + *
    • {@code CoreMetric.SERVICE_ID}, {@code CoreMetric.OPERATION_NAME} and {@code CoreMetric.HOST_NAME}: Separate + * metrics by service, operation and host so that you can compare latencies across hosts in your fleet. Note: This should + * only be used when your fleet is relatively small. Large fleets result in a large number of unique metrics being + * generated.
    • + *
    • {@code CoreMetric.SERVICE_ID}, {@code CoreMetric.OPERATION_NAME} and {@code HttpMetric.HTTP_CLIENT_NAME}: Separate + * metrics by service, operation and HTTP client type so that you can compare latencies between different HTTP client + * implementations.
    • + *
    + */ + public Builder dimensions(Collection> dimensions) { + this.dimensions = new ArrayList<>(dimensions); + return this; + } + + /** + * @see #dimensions(SdkMetric[]) + */ + @SafeVarargs + public final Builder dimensions(SdkMetric... dimensions) { + return dimensions(Arrays.asList(dimensions)); + } + + /** + * Configure the {@link MetricCategory}s that should be uploaded to CloudWatch. + * + *

    If this is not specified, {@link MetricCategory#ALL} is used. + * + *

    All {@link SdkMetric}s are associated with at least one {@code MetricCategory}. This setting determines which + * category of metrics uploaded to CloudWatch. Any metrics {@link #publish(MetricCollection)}ed that do not fall under + * these configured categories are ignored. + * + *

    Note: If there are {@link #dimensions(Collection)} configured that do not fall under these {@code MetricCategory} + * values, the dimensions will NOT be ignored. In other words, the metric category configuration only affects which + * metrics are uploaded to CloudWatch, not which values can be used for {@code dimensions}. + */ + public Builder metricCategories(Collection metricCategories) { + this.metricCategories = new ArrayList<>(metricCategories); + return this; + } + + /** + * @see #metricCategories(Collection) + */ + public Builder metricCategories(MetricCategory... metricCategories) { + return metricCategories(Arrays.asList(metricCategories)); + } + + /** + * Configure the {@link MetricLevel} that should be uploaded to CloudWatch. + * + *

    If this is not specified, {@link MetricLevel#INFO} is used. + * + *

    All {@link SdkMetric}s are associated with one {@code MetricLevel}. This setting determines which level of metrics + * uploaded to CloudWatch. Any metrics {@link #publish(MetricCollection)}ed that do not fall under these configured + * categories are ignored. + * + *

    Note: If there are {@link #dimensions(Collection)} configured that do not fall under this {@code MetricLevel} + * values, the dimensions will NOT be ignored. In other words, the metric category configuration only affects which + * metrics are uploaded to CloudWatch, not which values can be used for {@code dimensions}. + */ + public Builder metricLevel(MetricLevel metricLevel) { + this.metricLevel = metricLevel; + return this; + } + + /** + * Configure the set of metrics for which detailed values and counts are uploaded to CloudWatch, instead of summaries. + * + *

    By default, all metrics published to this publisher are summarized using {@link StatisticSet}s. This saves memory, + * because it allows the publisher to store a fixed amount of information in memory, no matter how many different metric + * values are published. The drawback is that metrics other than count, sum, average, maximum and minimum are not made + * available in CloudWatch. The {@code detailedMetrics} setting instructs the publisher to store and publish itemized + * {@link MetricDatum#values()} and {@link MetricDatum#counts()}, which enables other metrics like p90 and p99 to be + * queried in CloudWatch. + * + *

    Warning: When {@code detailedMetrics} are enabled, all unique metric values are stored in memory until they + * can be published to CloudWatch. A high {@code uploadFrequency} with multiple {@code detailedMetrics} enabled can + * quickly consume heap memory while the values wait to be published to CloudWatch. In memory constrained environments, it + * is recommended to minimize the number of {@code detailedMetrics} configured on the publisher, or to upload metric data + * more frequently. As with all performance and resource concerns, profiling in a production-like environment is + * encouraged. + * + *

    In addition to additional heap memory usage, detailed metrics can result in more requests being sent to CloudWatch, + * which can also introduce additional usage cost. The {@link #maximumCallsPerUpload(Integer)} acts as a safeguard against + * too many calls being made, but if you configure multiple {@code detailedMetrics}, you may need to increase the + * {@code maximumCallsPerUpload} limit. + */ + public Builder detailedMetrics(Collection> detailedMetrics) { + this.detailedMetrics = new ArrayList<>(detailedMetrics); + return this; + } + + /** + * @see #detailedMetrics(Collection) + */ + public Builder detailedMetrics(SdkMetric... detailedMetrics) { + return detailedMetrics(Arrays.asList(detailedMetrics)); + } + + /** + * Build a {@link CloudWatchMetricPublisher} using the configuration currently configured on this publisher. + */ + public CloudWatchMetricPublisher build() { + return new CloudWatchMetricPublisher(this); + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/CloudWatchMetricLogger.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/CloudWatchMetricLogger.java new file mode 100644 index 000000000000..e161df64cb60 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/CloudWatchMetricLogger.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Logger; + +/** + * A holder for {@link #METRIC_LOGGER}. + */ +@SdkInternalApi +public class CloudWatchMetricLogger { + /** + * The logger via which all cloudwatch-metric-publisher logs are written. This allows customers to easily enable/disable logs + * written from this module. + */ + public static final Logger METRIC_LOGGER = Logger.loggerFor("software.amazon.awssdk.metrics.publishers.cloudwatch"); + + private CloudWatchMetricLogger() { + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploader.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploader.java new file mode 100644 index 000000000000..b77398989fa3 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploader.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal; + +import static software.amazon.awssdk.metrics.publishers.cloudwatch.internal.CloudWatchMetricLogger.METRIC_LOGGER; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; + +/** + * Uploads {@link PutMetricDataRequest}s to a {@link CloudWatchAsyncClient}, logging whether it was successful or a failure to + * the {@link CloudWatchMetricLogger#METRIC_LOGGER}. + */ +@SdkInternalApi +public class MetricUploader { + private final CloudWatchAsyncClient cloudWatchClient; + + public MetricUploader(CloudWatchAsyncClient cloudWatchClient) { + this.cloudWatchClient = cloudWatchClient; + } + + /** + * Upload the provided list of requests to CloudWatch, completing the returned future when the uploads complete. Note: This + * will log a message if one of the provided requests fails. + */ + public CompletableFuture upload(List requests) { + CompletableFuture[] publishResults = startCalls(requests); + return CompletableFuture.allOf(publishResults).whenComplete((r, t) -> { + int numRequests = publishResults.length; + if (t != null) { + METRIC_LOGGER.warn(() -> "Failed while publishing some or all AWS SDK client-side metrics to CloudWatch.", t); + } else { + METRIC_LOGGER.debug(() -> "Successfully published " + numRequests + + " AWS SDK client-side metric requests to CloudWatch."); + } + }); + } + + private CompletableFuture[] startCalls(List requests) { + return requests.stream() + .peek(this::logRequest) + .map(cloudWatchClient::putMetricData) + .toArray(CompletableFuture[]::new); + } + + private void logRequest(PutMetricDataRequest putMetricDataRequest) { + METRIC_LOGGER.trace(() -> "Sending request to CloudWatch: " + putMetricDataRequest); + } + + public void close(boolean closeClient) { + if (closeClient) { + this.cloudWatchClient.close(); + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/AggregateMetricsTask.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/AggregateMetricsTask.java new file mode 100644 index 000000000000..f7c997795efb --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/AggregateMetricsTask.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.publishers.cloudwatch.CloudWatchMetricPublisher; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; + +/** + * A task that is executed on the {@link CloudWatchMetricPublisher}'s executor to add a {@link MetricCollection} to a + * {@link MetricCollectionAggregator}. + */ +@SdkInternalApi +public class AggregateMetricsTask implements Runnable { + private final MetricCollectionAggregator collectionAggregator; + private final MetricCollection metricCollection; + + public AggregateMetricsTask(MetricCollectionAggregator collectionAggregator, + MetricCollection metricCollection) { + this.collectionAggregator = collectionAggregator; + this.metricCollection = metricCollection; + } + + @Override + public void run() { + collectionAggregator.addCollection(metricCollection); + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasks.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasks.java new file mode 100644 index 000000000000..70081c158598 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasks.java @@ -0,0 +1,65 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task; + +import static software.amazon.awssdk.metrics.publishers.cloudwatch.internal.CloudWatchMetricLogger.METRIC_LOGGER; + +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.publishers.cloudwatch.CloudWatchMetricPublisher; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.MetricUploader; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +/** + * A task that is executed on the {@link CloudWatchMetricPublisher}'s executor to collect requests from a + * {@link MetricCollectionAggregator} and write them to a {@link MetricUploader}. + */ +@SdkInternalApi +public class UploadMetricsTasks implements Callable> { + private final MetricCollectionAggregator collectionAggregator; + private final MetricUploader uploader; + private int maximumRequestsPerFlush; + + public UploadMetricsTasks(MetricCollectionAggregator collectionAggregator, + MetricUploader uploader, + int maximumRequestsPerFlush) { + this.collectionAggregator = collectionAggregator; + this.uploader = uploader; + this.maximumRequestsPerFlush = maximumRequestsPerFlush; + } + + @Override + public CompletableFuture call() { + try { + List allRequests = collectionAggregator.getRequests(); + List requests = allRequests; + if (requests.size() > maximumRequestsPerFlush) { + METRIC_LOGGER.warn(() -> "Maximum AWS SDK client-side metric call count exceeded: " + allRequests.size() + + " > " + maximumRequestsPerFlush + ". Some metric requests will be dropped. This occurs " + + "when the caller has configured too many metrics or too unique of dimensions without " + + "an associated increase in the maximum-calls-per-upload configured on the publisher."); + requests = requests.subList(0, maximumRequestsPerFlush); + } + return uploader.upload(requests); + } catch (Throwable t) { + return CompletableFutureUtils.failedFuture(t); + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/DetailedMetricAggregator.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/DetailedMetricAggregator.java new file mode 100644 index 000000000000..4ec56a053750 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/DetailedMetricAggregator.java @@ -0,0 +1,87 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; + +/** + * An implementation of {@link MetricAggregator} that stores all values and counts for a given metric/dimension pair + * until they can be added to a {@link MetricDatum}. + */ +@SdkInternalApi +class DetailedMetricAggregator implements MetricAggregator { + private final SdkMetric metric; + private final List dimensions; + private final StandardUnit unit; + + private final Map metricDetails = new HashMap<>(); + + DetailedMetricAggregator(MetricAggregatorKey key, StandardUnit unit) { + this.metric = key.metric(); + this.dimensions = key.dimensions(); + this.unit = unit; + } + + @Override + public SdkMetric metric() { + return metric; + } + + @Override + public List dimensions() { + return dimensions; + } + + @Override + public void addMetricValue(double value) { + metricDetails.computeIfAbsent(value, v -> new DetailedMetrics(value)).metricCount++; + } + + @Override + public StandardUnit unit() { + return unit; + } + + public Collection detailedMetrics() { + return Collections.unmodifiableCollection(metricDetails.values()); + } + + public static class DetailedMetrics { + private final double metricValue; + private int metricCount = 0; + + private DetailedMetrics(double metricValue) { + this.metricValue = metricValue; + } + + public double metricValue() { + return metricValue; + } + + public int metricCount() { + return metricCount; + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregator.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregator.java new file mode 100644 index 000000000000..9f088ad25c57 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregator.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.util.Collection; +import java.util.List; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.CloudWatchMetricPublisher; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; + +/** + * Used by {@link MetricCollectionAggregator} to aggregate metrics in memory until they are ready to be added to a + * {@link MetricDatum}. + * + *

    This is either a {@link SummaryMetricAggregator} or a {@link DetailedMetricAggregator}, depending on the configured + * {@link CloudWatchMetricPublisher.Builder#detailedMetrics(Collection)} setting. + */ +@SdkInternalApi +interface MetricAggregator { + /** + * The metric that this aggregator is aggregating. For example, this may be aggregating {@link CoreMetric#API_CALL_DURATION} + * metric values. There may be multiple aggregators for a single type of metric, when their {@link #dimensions()} differ. + */ + SdkMetric metric(); + + /** + * The dimensions associated with the metric values that this aggregator is aggregating. For example, this may be aggregating + * "S3's putObject" metrics or "DynamoDb's listTables" metrics. The exact metric being aggregated is available via + * {@link #metric()}. + */ + List dimensions(); + + /** + * Get the unit of the {@link #metric()} when it is published to CloudWatch. + */ + StandardUnit unit(); + + /** + * Add the provided metric value to this aggregator. + */ + void addMetricValue(double value); + + /** + * Execute the provided consumer if this {@code MetricAggregator} is a {@link SummaryMetricAggregator}. + */ + default void ifSummary(Consumer summaryConsumer) { + if (this instanceof SummaryMetricAggregator) { + summaryConsumer.accept((SummaryMetricAggregator) this); + } + } + + /** + * Execute the provided consumer if this {@code MetricAggregator} is a {@link DetailedMetricAggregator}. + */ + default void ifDetailed(Consumer detailsConsumer) { + if (this instanceof DetailedMetricAggregator) { + detailsConsumer.accept((DetailedMetricAggregator) this); + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregatorKey.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregatorKey.java new file mode 100644 index 000000000000..5c07b7744065 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregatorKey.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; + +/** + * A pairing of {@link SdkMetric} and {@link Dimension}s that can be used as a key in a map. This uniquely identifies a specific + * {@link MetricAggregator}. + */ +@SdkInternalApi +class MetricAggregatorKey { + private final SdkMetric metric; + private final List dimensions; + + MetricAggregatorKey(SdkMetric metric, List dimensions) { + this.metric = metric; + this.dimensions = dimensions; + } + + public final SdkMetric metric() { + return this.metric; + } + + public final List dimensions() { + return this.dimensions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MetricAggregatorKey that = (MetricAggregatorKey) o; + + if (!metric.equals(that.metric)) { + return false; + } + return dimensions.equals(that.dimensions); + } + + @Override + public int hashCode() { + int result = metric.hashCode(); + result = 31 * result + dimensions.hashCode(); + return result; + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregator.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregator.java new file mode 100644 index 000000000000..9a00b2d8fa04 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregator.java @@ -0,0 +1,215 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.ApiName; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.DetailedMetricAggregator.DetailedMetrics; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; + +/** + * Aggregates {@link MetricCollection}s by: (1) the minute in which they occurred, and (2) the dimensions in the collection + * associated with that metric. Allows retrieving the aggregated values as a list of {@link PutMetricDataRequest}s. + * + *

    It would be too expensive to upload every {@code MetricCollection} as a unique {@code PutMetricDataRequest}, so this + * class aggregates the data so that multiple {@code MetricCollection}s can be placed in the same {@code PutMetricDataRequest}. + * + *

    Warning: This class is *not* thread-safe. + */ +@SdkInternalApi +@NotThreadSafe +public class MetricCollectionAggregator { + /** + * The maximum number of {@link MetricDatum}s allowed in {@link PutMetricDataRequest#metricData()}. This limit is imposed by + * CloudWatch. + */ + public static final int MAX_METRIC_DATA_PER_REQUEST = 20; + + /** + * The maximum number of unique {@link MetricDatum#values()} allowed in a single {@link PutMetricDataRequest}. This limit is + * not imposed directly by CloudWatch, but they do impose a 40KB limit for a single request. This value was determined by + * trial-and-error to roughly equate to a 40KB limit when we are also at the {@link #MAX_METRIC_DATA_PER_REQUEST}. + */ + public static final int MAX_VALUES_PER_REQUEST = 300; + + /** + * The API name to include in the user agent for all {@link PutMetricDataRequest}s generated by this aggregator. + */ + private static final ApiName API_NAME = ApiName.builder().name("hll").version("cw-mp").build(); + + /** + * The {@link PutMetricDataRequest#namespace()} that should be used for all {@link PutMetricDataRequest}s returned from + * {@link #getRequests()}. + */ + private final String namespace; + + /** + * The {@link TimeBucketedMetrics} that actually performs the data aggregation whenever + * {@link #addCollection(MetricCollection)} is called. + */ + private final TimeBucketedMetrics timeBucketedMetrics; + + public MetricCollectionAggregator(String namespace, + Set> dimensions, + Set metricCategories, + MetricLevel metricLevel, + Set> detailedMetrics) { + this.namespace = namespace; + this.timeBucketedMetrics = new TimeBucketedMetrics(dimensions, metricCategories, metricLevel, detailedMetrics); + } + + /** + * Add a collection to this aggregator. + */ + public void addCollection(MetricCollection collection) { + timeBucketedMetrics.addMetrics(collection); + } + + /** + * Get all {@link PutMetricDataRequest}s that can be generated from the data that was added via + * {@link #addCollection(MetricCollection)}. This method resets the state of this {@code MetricCollectionAggregator}. + */ + public List getRequests() { + List requests = new ArrayList<>(); + + List requestMetricDatums = new ArrayList<>(); + ValuesInRequestCounter valuesInRequestCounter = new ValuesInRequestCounter(); + + Map> metrics = timeBucketedMetrics.timeBucketedMetrics(); + + for (Map.Entry> entry : metrics.entrySet()) { + Instant timeBucket = entry.getKey(); + for (MetricAggregator metric : entry.getValue()) { + if (requestMetricDatums.size() >= MAX_METRIC_DATA_PER_REQUEST) { + requests.add(newPutRequest(requestMetricDatums)); + requestMetricDatums.clear(); + } + + metric.ifSummary(summaryAggregator -> requestMetricDatums.add(summaryMetricDatum(timeBucket, summaryAggregator))); + + metric.ifDetailed(detailedAggregator -> { + int startIndex = 0; + Collection detailedMetrics = detailedAggregator.detailedMetrics(); + + while (startIndex < detailedMetrics.size()) { + if (valuesInRequestCounter.get() >= MAX_VALUES_PER_REQUEST) { + requests.add(newPutRequest(requestMetricDatums)); + requestMetricDatums.clear(); + valuesInRequestCounter.reset(); + } + + MetricDatum data = detailedMetricDatum(timeBucket, detailedAggregator, + startIndex, MAX_VALUES_PER_REQUEST - valuesInRequestCounter.get()); + int valuesAdded = data.values().size(); + startIndex += valuesAdded; + valuesInRequestCounter.add(valuesAdded); + requestMetricDatums.add(data); + } + }); + } + } + + if (!requestMetricDatums.isEmpty()) { + requests.add(newPutRequest(requestMetricDatums)); + } + + timeBucketedMetrics.reset(); + + return requests; + } + + private MetricDatum detailedMetricDatum(Instant timeBucket, + DetailedMetricAggregator metric, + int metricStartIndex, + int maxElements) { + List values = new ArrayList<>(); + List counts = new ArrayList<>(); + + Stream boundedMetrics = metric.detailedMetrics() + .stream() + .skip(metricStartIndex) + .limit(maxElements); + + boundedMetrics.forEach(detailedMetrics -> { + values.add(MetricValueNormalizer.normalize(detailedMetrics.metricValue())); + counts.add((double) detailedMetrics.metricCount()); + }); + + return MetricDatum.builder() + .timestamp(timeBucket) + .metricName(metric.metric().name()) + .dimensions(metric.dimensions()) + .unit(metric.unit()) + .values(values) + .counts(counts) + .build(); + } + + private MetricDatum summaryMetricDatum(Instant timeBucket, + SummaryMetricAggregator metric) { + StatisticSet stats = StatisticSet.builder() + .minimum(MetricValueNormalizer.normalize(metric.min())) + .maximum(MetricValueNormalizer.normalize(metric.max())) + .sum(MetricValueNormalizer.normalize(metric.sum())) + .sampleCount((double) metric.count()) + .build(); + return MetricDatum.builder() + .timestamp(timeBucket) + .metricName(metric.metric().name()) + .dimensions(metric.dimensions()) + .unit(metric.unit()) + .statisticValues(stats) + .build(); + } + + private PutMetricDataRequest newPutRequest(List metricData) { + return PutMetricDataRequest.builder() + .overrideConfiguration(r -> r.addApiName(API_NAME)) + .namespace(namespace) + .metricData(metricData) + .build(); + } + + private static class ValuesInRequestCounter { + private int valuesInRequest; + + private void add(int i) { + valuesInRequest += i; + } + + private int get() { + return valuesInRequest; + } + + private void reset() { + valuesInRequest = 0; + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricValueNormalizer.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricValueNormalizer.java new file mode 100644 index 000000000000..2767c39379a9 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricValueNormalizer.java @@ -0,0 +1,45 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +class MetricValueNormalizer { + /** + * Really small values (close to 0) result in CloudWatch failing with an "unsupported value" error. Make sure that we floor + * those values to 0 to prevent that error. + */ + private static final double ZERO_THRESHOLD = 0.0001; + + private MetricValueNormalizer() { + } + + /** + * Normalizes a metric value so that it won't upset CloudWatch when it is uploaded. + */ + public static double normalize(double value) { + if (value > ZERO_THRESHOLD) { + return value; + } + + if (value < -ZERO_THRESHOLD) { + return value; + } + + return 0; + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/SummaryMetricAggregator.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/SummaryMetricAggregator.java new file mode 100644 index 000000000000..3da4b87ed8f0 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/SummaryMetricAggregator.java @@ -0,0 +1,84 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; + +/** + * An implementation of {@link MetricAggregator} that stores summary statistics for a given metric/dimension pair until the + * summary can be added to a {@link MetricDatum}. + */ +@SdkInternalApi +class SummaryMetricAggregator implements MetricAggregator { + private final SdkMetric metric; + private final List dimensions; + private final StandardUnit unit; + + private double min = Double.MAX_VALUE; + private double max = Double.MIN_VALUE; + private double sum = 0; + private int count = 0; + + SummaryMetricAggregator(MetricAggregatorKey key, StandardUnit unit) { + this.metric = key.metric(); + this.dimensions = key.dimensions(); + this.unit = unit; + } + + @Override + public SdkMetric metric() { + return metric; + } + + @Override + public List dimensions() { + return dimensions; + } + + @Override + public void addMetricValue(double value) { + min = Double.min(value, min); + max = Double.max(value, max); + sum += value; + ++count; + } + + @Override + public StandardUnit unit() { + return unit; + } + + public double min() { + return min; + } + + public double max() { + return max; + } + + public double sum() { + return sum; + } + + public int count() { + return count; + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/TimeBucketedMetrics.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/TimeBucketedMetrics.java new file mode 100644 index 000000000000..949f16a01504 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/TimeBucketedMetrics.java @@ -0,0 +1,226 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import static java.time.temporal.ChronoUnit.MINUTES; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; + +/** + * "Buckets" metrics by the minute in which they were collected. This allows all metric data for a given 1-minute period to be + * aggregated under a specific {@link MetricAggregator}. + */ +@SdkInternalApi +class TimeBucketedMetrics { + /** + * A map from "the minute during which a metric value happened" to "the dimension and metric associated with the metric + * values" to "the aggregator for the metric values that occurred within that minute and for that dimension/metric". + */ + private final Map> timeBucketedMetrics = new HashMap<>(); + + /** + * The dimensions that should be used for aggregating metrics that occur within a given minute. These are optional values. + * The dimensions will be used if a {@link MetricCollection} includes them, but if it does not, it will be aggregated with + * whatever dimensions (if any) are available. + */ + private final Set> dimensions; + + /** + * The set of metrics for which {@link DetailedMetricAggregator}s should be used for aggregation. All other metrics will use + * a {@link SummaryMetricAggregator}. + */ + private final Set> detailedMetrics; + + /** + * The metric categories for which we should aggregate values. Any categories outside of this set will have their values + * ignored/dropped. + */ + private final Set metricCategories; + + /** + * The metric levels for which we should aggregate values. Any categories at a more "verbose" level than this one will have + * their values ignored/dropped. + */ + private final MetricLevel metricLevel; + + /** + * True, when the {@link #metricCategories} contains {@link MetricCategory#ALL}. + */ + private final boolean metricCategoriesContainsAll; + + + + TimeBucketedMetrics(Set> dimensions, + Set metricCategories, + MetricLevel metricLevel, + Set> detailedMetrics) { + this.dimensions = dimensions; + this.detailedMetrics = detailedMetrics; + this.metricCategories = metricCategories; + this.metricLevel = metricLevel; + this.metricCategoriesContainsAll = metricCategories.contains(MetricCategory.ALL); + } + + /** + * Add the provided collection to the proper bucket, based on the metric collection's time. + */ + public void addMetrics(MetricCollection metrics) { + Instant bucket = getBucket(metrics); + addMetricsToBucket(metrics, bucket); + } + + /** + * Reset this bucket, clearing all stored values. + */ + public void reset() { + timeBucketedMetrics.clear(); + } + + /** + * Retrieve all values in this collection. The map key is the minute in which the metric values were collected, and the + * map value are all of the metrics that were aggregated during that minute. + */ + public Map> timeBucketedMetrics() { + return timeBucketedMetrics.entrySet() + .stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().values())); + } + + private Instant getBucket(MetricCollection metrics) { + return metrics.creationTime().truncatedTo(MINUTES); + } + + private void addMetricsToBucket(MetricCollection metrics, Instant bucketId) { + aggregateMetrics(metrics, timeBucketedMetrics.computeIfAbsent(bucketId, i -> new HashMap<>())); + } + + private void aggregateMetrics(MetricCollection metrics, Map bucket) { + List dimensions = dimensions(metrics); + extractAllMetrics(metrics).forEach(metricRecord -> { + MetricAggregatorKey aggregatorKey = new MetricAggregatorKey(metricRecord.metric(), dimensions); + valueFor(metricRecord).ifPresent(metricValue -> { + bucket.computeIfAbsent(aggregatorKey, m -> newAggregator(aggregatorKey)) + .addMetricValue(MetricValueNormalizer.normalize(metricValue)); + }); + }); + } + + private List dimensions(MetricCollection metricCollection) { + List result = new ArrayList<>(); + for (MetricRecord metricRecord : metricCollection) { + if (dimensions.contains(metricRecord.metric())) { + result.add(Dimension.builder() + .name(metricRecord.metric().name()) + .value((String) metricRecord.value()) + .build()); + } + } + + // Sort the dimensions to make sure that the order in the input metric collection doesn't affect the result. + // We use descending order just so that "ServiceName" is before "OperationName" when we use the default dimensions. + result.sort(Comparator.comparing(Dimension::name).reversed()); + return result; + } + + private List> extractAllMetrics(MetricCollection metrics) { + List> result = new ArrayList<>(); + extractAllMetrics(metrics, result); + return result; + } + + private void extractAllMetrics(MetricCollection metrics, List> extractedMetrics) { + for (MetricRecord metric : metrics) { + extractedMetrics.add(metric); + } + metrics.children().forEach(child -> extractAllMetrics(child, extractedMetrics)); + } + + private MetricAggregator newAggregator(MetricAggregatorKey aggregatorKey) { + SdkMetric metric = aggregatorKey.metric(); + StandardUnit metricUnit = unitFor(metric); + if (detailedMetrics.contains(metric)) { + return new DetailedMetricAggregator(aggregatorKey, metricUnit); + } else { + return new SummaryMetricAggregator(aggregatorKey, metricUnit); + } + } + + private StandardUnit unitFor(SdkMetric metric) { + Class metricType = metric.valueClass(); + + if (Duration.class.isAssignableFrom(metricType)) { + return StandardUnit.MILLISECONDS; + } + + return StandardUnit.NONE; + } + + private Optional valueFor(MetricRecord metricRecord) { + if (!shouldReport(metricRecord)) { + return Optional.empty(); + } + + Class metricType = metricRecord.metric().valueClass(); + + if (Duration.class.isAssignableFrom(metricType)) { + Duration durationMetricValue = (Duration) metricRecord.value(); + long millis = durationMetricValue.toMillis(); + return Optional.of((double) millis); + } else if (Number.class.isAssignableFrom(metricType)) { + Number numberMetricValue = (Number) metricRecord.value(); + return Optional.of(numberMetricValue.doubleValue()); + } else if (Boolean.class.isAssignableFrom(metricType)) { + Boolean booleanMetricValue = (Boolean) metricRecord.value(); + return Optional.of(booleanMetricValue ? 1.0 : 0.0); + } + + return Optional.empty(); + } + + private boolean shouldReport(MetricRecord metricRecord) { + return isSupportedCategory(metricRecord) && isSupportedLevel(metricRecord); + } + + private boolean isSupportedCategory(MetricRecord metricRecord) { + return metricCategoriesContainsAll || + metricRecord.metric() + .categories() + .stream() + .anyMatch(metricCategories::contains); + } + + private boolean isSupportedLevel(MetricRecord metricRecord) { + return metricLevel.includesLevel(metricRecord.metric().level()); + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisherTest.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisherTest.java new file mode 100644 index 000000000000..d82aa1cc0ed0 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisherTest.java @@ -0,0 +1,298 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.never; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataResponse; + +public class CloudWatchMetricPublisherTest { + private CloudWatchAsyncClient cloudWatch; + + private CloudWatchMetricPublisher.Builder publisherBuilder; + + @Before + public void setup() { + cloudWatch = Mockito.mock(CloudWatchAsyncClient.class); + publisherBuilder = CloudWatchMetricPublisher.builder() + .cloudWatchClient(cloudWatch) + .uploadFrequency(Duration.ofMinutes(60)); + + Mockito.when(cloudWatch.putMetricData(any(PutMetricDataRequest.class))) + .thenReturn(CompletableFuture.completedFuture(PutMetricDataResponse.builder().build())); + } + + @Test + public void noMetricsNoCalls() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.build()) { + publisher.publish(MetricCollector.create("test").collect()); + } + assertNoPutMetricCalls(); + } + + @Test + public void interruptedShutdownStillTerminates() { + CloudWatchMetricPublisher publisher = publisherBuilder.build(); + Thread.currentThread().interrupt(); + publisher.close(); + assertThat(publisher.isShutdown()).isTrue(); + assertThat(Thread.interrupted()).isTrue(); // Clear interrupt flag + } + + @Test + public void closeDoesNotCloseConfiguredClient() { + CloudWatchMetricPublisher.builder().cloudWatchClient(cloudWatch).build().close(); + Mockito.verify(cloudWatch, never()).close(); + } + + @Test(timeout = 10_000) + public void closeWaitsForUploadToComplete() throws InterruptedException { + CountDownLatch cloudwatchPutCalledLatch = new CountDownLatch(1); + CompletableFuture result = new CompletableFuture<>(); + + CloudWatchAsyncClient cloudWatch = Mockito.mock(CloudWatchAsyncClient.class); + try (CloudWatchMetricPublisher publisher = CloudWatchMetricPublisher.builder() + .cloudWatchClient(cloudWatch) + .uploadFrequency(Duration.ofMinutes(60)) + .build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + + Mockito.when(cloudWatch.putMetricData(any(PutMetricDataRequest.class))).thenAnswer(x -> { + cloudwatchPutCalledLatch.countDown(); + return result; + }); + + publisher.publish(MetricCollector.create("test").collect()); + + Thread closeThread = new Thread(publisher::close); + + assertThat(publisher.isShutdown()).isFalse(); + + closeThread.start(); + + // Wait until cloudwatch is called + cloudwatchPutCalledLatch.await(); + + // Wait to make sure the close thread seems to be waiting for the cloudwatch call to complete + Thread.sleep(1_000); + + assertThat(closeThread.isAlive()).isTrue(); + + // Complete the cloudwatch call + result.complete(null); + + // Make sure the close thread finishes + closeThread.join(5_000); + assertThat(closeThread.isAlive()).isFalse(); + } + } + + @Test + public void defaultNamespaceIsCorrect() { + try (CloudWatchMetricPublisher publisher = CloudWatchMetricPublisher.builder() + .cloudWatchClient(cloudWatch) + .build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + assertThat(call.namespace()).isEqualTo("AwsSdk/JavaSdk2"); + } + + @Test + public void defaultDimensionsIsCorrect() { + try (CloudWatchMetricPublisher publisher = CloudWatchMetricPublisher.builder() + .cloudWatchClient(cloudWatch) + .build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + assertThat(call.metricData().get(0).dimensions()) + .containsExactlyInAnyOrder(Dimension.builder() + .name(CoreMetric.SERVICE_ID.name()) + .value("ServiceId") + .build(), + Dimension.builder() + .name(CoreMetric.OPERATION_NAME.name()) + .value("OperationName") + .build()); + } + + @Test + public void namespaceSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.namespace("namespace").build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + assertThat(getPutMetricCall().namespace()).isEqualTo("namespace"); + } + + @Test + public void dimensionsSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.dimensions(CoreMetric.SERVICE_ID).build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + assertThat(call.metricData().get(0).dimensions()).containsExactly(Dimension.builder() + .name(CoreMetric.SERVICE_ID.name()) + .value("ServiceId") + .build()); + } + + @Test + public void metricCategoriesSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.metricCategories(MetricCategory.HTTP_CLIENT).build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.API_CALL_SUCCESSFUL, true); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + MetricDatum metric = call.metricData().get(0); + assertThat(call.metricData()).hasSize(1); + assertThat(metric.dimensions()).containsExactly(Dimension.builder() + .name(CoreMetric.SERVICE_ID.name()) + .value("ServiceId") + .build()); + assertThat(metric.metricName()).isEqualTo(HttpMetric.AVAILABLE_CONCURRENCY.name()); + } + + @Test + public void metricLevelSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.metricLevel(MetricLevel.INFO).build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.API_CALL_SUCCESSFUL, true); + collector.reportMetric(HttpMetric.HTTP_STATUS_CODE, 404); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + MetricDatum metric = call.metricData().get(0); + assertThat(call.metricData()).hasSize(1); + assertThat(metric.dimensions()).containsExactly(Dimension.builder() + .name(CoreMetric.SERVICE_ID.name()) + .value("ServiceId") + .build()); + assertThat(metric.metricName()).isEqualTo(CoreMetric.API_CALL_SUCCESSFUL.name()); + } + + @Test + public void maximumCallsPerPublishSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.maximumCallsPerUpload(1) + .detailedMetrics(HttpMetric.AVAILABLE_CONCURRENCY) + .build()) { + for (int i = 0; i < MetricCollectionAggregator.MAX_VALUES_PER_REQUEST + 1; ++i) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, i); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + } + + assertThat(getPutMetricCalls()).hasSize(1); + } + + @Test + public void detailedMetricsSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.detailedMetrics(HttpMetric.AVAILABLE_CONCURRENCY).build()) { + for (int i = 0; i < 10; ++i) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 10); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, i); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + } + + PutMetricDataRequest call = getPutMetricCall(); + MetricDatum concurrencyMetric = getDatum(call, HttpMetric.MAX_CONCURRENCY); + MetricDatum availableConcurrency = getDatum(call, HttpMetric.AVAILABLE_CONCURRENCY); + + assertThat(concurrencyMetric.values()).isEmpty(); + assertThat(concurrencyMetric.counts()).isEmpty(); + assertThat(concurrencyMetric.statisticValues()).isNotNull(); + + assertThat(availableConcurrency.values()).isNotEmpty(); + assertThat(availableConcurrency.counts()).isNotEmpty(); + assertThat(availableConcurrency.statisticValues()).isNull(); + } + + private MetricDatum getDatum(PutMetricDataRequest call, SdkMetric metric) { + return call.metricData().stream().filter(m -> m.metricName().equals(metric.name())).findAny().get(); + } + + private PutMetricDataRequest getPutMetricCall() { + List calls = getPutMetricCalls(); + assertThat(calls).hasSize(1); + return calls.get(0); + } + + private List getPutMetricCalls() { + ArgumentCaptor captor = ArgumentCaptor.forClass(PutMetricDataRequest.class); + Mockito.verify(cloudWatch).putMetricData(captor.capture()); + return captor.getAllValues(); + } + + private void assertNoPutMetricCalls() { + Mockito.verify(cloudWatch, never()).putMetricData(any(PutMetricDataRequest.class)); + } + + private MetricCollector newCollector() { + return MetricCollector.create("test"); + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/FixedTimeMetricCollection.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/FixedTimeMetricCollection.java new file mode 100644 index 000000000000..3df2fd44c276 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/FixedTimeMetricCollection.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch; + +import java.time.Instant; +import java.util.Iterator; +import java.util.List; +import java.util.stream.Collectors; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; + +/** + * An implementation of {@link MetricCollection} that sets a static time for the {@link #creationTime()}. This makes it easier + * to test aggregation behavior, because the times can be fixed instead of regenerated each time the {@code MetricCollection} is + * created. + */ +public class FixedTimeMetricCollection implements MetricCollection { + private final MetricCollection delegate; + private final Instant creationTime; + + public FixedTimeMetricCollection(MetricCollection delegate) { + this(delegate, Instant.EPOCH); + } + + public FixedTimeMetricCollection(MetricCollection delegate, + Instant creationTime) { + this.delegate = delegate; + this.creationTime = creationTime; + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public List metricValues(SdkMetric metric) { + return delegate.metricValues(metric); + } + + @Override + public List children() { + return delegate.children() + .stream() + .map(c -> new FixedTimeMetricCollection(c, creationTime)) + .collect(Collectors.toList()); + } + + @Override + public Instant creationTime() { + return creationTime; + } + + @Override + public Iterator> iterator() { + return delegate.iterator(); + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploaderTest.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploaderTest.java new file mode 100644 index 000000000000..daaec59916c9 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploaderTest.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataResponse; + +public class MetricUploaderTest { + private List> putMetricDataResponseFutures = new ArrayList<>(); + + private CloudWatchAsyncClient client; + + private MetricUploader uploader; + + @Before + public void setUp() { + client = Mockito.mock(CloudWatchAsyncClient.class); + uploader = new MetricUploader(client); + + Mockito.when(client.putMetricData(any(PutMetricDataRequest.class))).thenAnswer(p -> { + CompletableFuture result = new CompletableFuture<>(); + putMetricDataResponseFutures.add(result); + return result; + }); + } + + @Test + public void uploadSuccessIsPropagated() { + CompletableFuture uploadFuture = uploader.upload(Arrays.asList(PutMetricDataRequest.builder().build(), + PutMetricDataRequest.builder().build())); + + assertThat(putMetricDataResponseFutures).hasSize(2); + assertThat(uploadFuture).isNotCompleted(); + + putMetricDataResponseFutures.get(0).complete(PutMetricDataResponse.builder().build()); + + assertThat(uploadFuture).isNotCompleted(); + + putMetricDataResponseFutures.get(1).complete(PutMetricDataResponse.builder().build()); + + assertThat(uploadFuture).isCompleted(); + } + + @Test + public void uploadFailureIsPropagated() { + CompletableFuture uploadFuture = uploader.upload(Arrays.asList(PutMetricDataRequest.builder().build(), + PutMetricDataRequest.builder().build())); + + assertThat(putMetricDataResponseFutures).hasSize(2); + assertThat(uploadFuture).isNotCompleted(); + + putMetricDataResponseFutures.get(0).completeExceptionally(new Throwable()); + putMetricDataResponseFutures.get(1).complete(PutMetricDataResponse.builder().build()); + + assertThat(uploadFuture).isCompletedExceptionally(); + } + + @Test + public void closeFalseDoesNotCloseClient() { + uploader.close(false); + Mockito.verify(client, never()).close(); + } + + @Test + public void closeTrueClosesClient() { + uploader.close(true); + Mockito.verify(client, times(1)).close(); + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasksTest.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasksTest.java new file mode 100644 index 000000000000..619802355503 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasksTest.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.MetricUploader; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; + +public class UploadMetricsTasksTest { + private MetricCollectionAggregator aggregator; + private MetricUploader uploader; + private UploadMetricsTasks task; + + @Before + public void setUp() { + aggregator = Mockito.mock(MetricCollectionAggregator.class); + uploader = Mockito.mock(MetricUploader.class); + task = new UploadMetricsTasks(aggregator, uploader, 2); + } + + + @Test + public void extraTasksAboveMaximumAreDropped() { + List requests = Arrays.asList(PutMetricDataRequest.builder().build(), + PutMetricDataRequest.builder().build(), + PutMetricDataRequest.builder().build()); + Mockito.when(aggregator.getRequests()).thenReturn(requests); + task.call(); + + + ArgumentCaptor captor = ArgumentCaptor.forClass(List.class); + Mockito.verify(uploader).upload(captor.capture()); + List uploadedRequests = captor.getValue(); + + assertThat(uploadedRequests).hasSize(2); + assertThat(uploadedRequests).containsOnlyElementsOf(requests); + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregatorTest.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregatorTest.java new file mode 100644 index 000000000000..e2d537853811 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregatorTest.java @@ -0,0 +1,485 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import static java.time.temporal.ChronoUnit.HOURS; +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.Assert; +import org.junit.Test; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.FixedTimeMetricCollection; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; + +public class MetricCollectionAggregatorTest { + private static final String DEFAULT_NAMESPACE = "namespace"; + private static final Set> DEFAULT_DIMENSIONS = Stream.of(CoreMetric.SERVICE_ID, CoreMetric.OPERATION_NAME) + .collect(Collectors.toSet()); + private static final MetricLevel DEFAULT_METRIC_LEVEL = MetricLevel.INFO; + private static final Set DEFAULT_CATEGORIES = Collections.singleton(MetricCategory.HTTP_CLIENT); + private static final Set> DEFAULT_DETAILED_METRICS = Collections.emptySet(); + + @Test + public void maximumRequestsIsHonored() { + List requests; + + requests = aggregatorWithUniqueMetricsAdded(MetricCollectionAggregator.MAX_METRIC_DATA_PER_REQUEST).getRequests(); + assertThat(requests).hasOnlyOneElementSatisfying(request -> { + assertThat(request.metricData()).hasSize(MetricCollectionAggregator.MAX_METRIC_DATA_PER_REQUEST); + }); + + requests = aggregatorWithUniqueMetricsAdded(MetricCollectionAggregator.MAX_METRIC_DATA_PER_REQUEST + 1).getRequests(); + assertThat(requests).hasSize(2); + assertThat(requests.get(0).metricData()).hasSize(MetricCollectionAggregator.MAX_METRIC_DATA_PER_REQUEST); + assertThat(requests.get(1).metricData()).hasSize(1); + } + + @Test + public void maximumMetricValuesIsHonored() { + List requests; + + requests = aggregatorWithUniqueValuesAdded(HttpMetric.MAX_CONCURRENCY, + MetricCollectionAggregator.MAX_VALUES_PER_REQUEST).getRequests(); + assertThat(requests).hasSize(1); + validateValuesCount(requests.get(0), MetricCollectionAggregator.MAX_VALUES_PER_REQUEST); + + requests = aggregatorWithUniqueValuesAdded(HttpMetric.MAX_CONCURRENCY, + MetricCollectionAggregator.MAX_VALUES_PER_REQUEST + 1).getRequests(); + assertThat(requests).hasSize(2); + validateValuesCount(requests.get(0), MetricCollectionAggregator.MAX_VALUES_PER_REQUEST); + validateValuesCount(requests.get(1), 1); + } + + private void validateValuesCount(PutMetricDataRequest request, int valuesExpected) { + assertThat(request.metricData().stream().flatMap(m -> m.values().stream())) + .hasSize(valuesExpected); + } + + @Test + public void smallValuesAreNormalizedToZeroWithSummaryMetrics() { + // Really small values (close to 0) result in CloudWatch failing with an "unsupported value" error. Make sure that we + // floor those values to 0 to prevent that error. + + MetricCollectionAggregator aggregator = defaultAggregator(); + + MetricCollector collector = collector(); + SdkMetric metric = someMetric(Double.class); + collector.reportMetric(metric, -1E-10); + collector.reportMetric(metric, 1E-10); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + StatisticSet stats = metricData.statisticValues(); + assertThat(stats.minimum()).isEqualTo(0.0); + assertThat(stats.maximum()).isEqualTo(0.0); + assertThat(stats.sum()).isEqualTo(0.0); + assertThat(stats.sampleCount()).isEqualTo(2.0); + }); + }); + } + + @Test + public void smallValuesAreNormalizedToZeroWithDetailedMetrics() { + // Really small values (close to 0) result in CloudWatch failing with an "unsupported value" error. Make sure that we + // floor those values to 0 to prevent that error. + + SdkMetric metric = someMetric(Double.class); + MetricCollectionAggregator aggregator = aggregatorWithCustomDetailedMetrics(metric); + + MetricCollector collector = collector(); + collector.reportMetric(metric, -1E-10); + collector.reportMetric(metric, 1E-10); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + assertThat(metricData.values()).hasOnlyOneElementSatisfying(metricValue -> { + assertThat(metricValue).isEqualTo(0.0); + }); + assertThat(metricData.counts()).hasOnlyOneElementSatisfying(metricCount -> { + assertThat(metricCount).isEqualTo(2.0); + }); + }); + }); + } + + @Test + public void dimensionOrderInCollectionDoesNotMatter() { + MetricCollectionAggregator aggregator = defaultAggregator(); + + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.metricData()).hasSize(1); + }); + } + + @Test + public void metricsAreAggregatedByDimensionMetricAndTime() { + MetricCollectionAggregator aggregator = defaultAggregator(); + + MetricCollector collector = collector(); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + aggregator.addCollection(collectToFixedTimeBucket(collector, 0)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + aggregator.addCollection(collectToFixedTimeBucket(collector, 0)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 3); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 4); + aggregator.addCollection(collectToFixedTimeBucket(collector, 0)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 5); + aggregator.addCollection(collectToFixedTimeBucket(collector, 1)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.namespace()).isEqualTo(DEFAULT_NAMESPACE); + assertThat(request.metricData()).hasSize(5).allSatisfy(data -> { + assertThat(data.values()).isEmpty(); + assertThat(data.counts()).isEmpty(); + if (data.dimensions().isEmpty()) { + assertThat(data.metricName()).isEqualTo(HttpMetric.MAX_CONCURRENCY.name()); + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(1); + } else if (data.dimensions().size() == 1) { + assertThat(data.metricName()).isEqualTo(HttpMetric.MAX_CONCURRENCY.name()); + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(2); + } else { + assertThat(data.dimensions().size()).isEqualTo(2); + if (data.timestamp().equals(Instant.EPOCH)) { + // Time bucket 0 + if (data.metricName().equals(HttpMetric.MAX_CONCURRENCY.name())) { + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(3); + } else { + assertThat(data.metricName()).isEqualTo(HttpMetric.AVAILABLE_CONCURRENCY.name()); + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(4); + } + } else { + // Time bucket 1 + assertThat(data.metricName()).isEqualTo(HttpMetric.MAX_CONCURRENCY.name()); + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(5); + } + } + }); + }); + } + + @Test + public void metricSummariesAreCorrectWithValuesInSameCollector() { + MetricCollectionAggregator aggregator = defaultAggregator(); + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 3); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.namespace()).isEqualTo(DEFAULT_NAMESPACE); + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + assertThat(metricData.dimensions()).hasOnlyOneElementSatisfying(dimension -> { + assertThat(dimension.name()).isEqualTo(CoreMetric.SERVICE_ID.name()); + assertThat(dimension.value()).isEqualTo("ServiceId"); + }); + assertThat(metricData.values()).isEmpty(); + assertThat(metricData.counts()).isEmpty(); + assertThat(metricData.statisticValues()).isEqualTo(StatisticSet.builder() + .minimum(1.0) + .maximum(4.0) + .sum(14.0) + .sampleCount(5.0) + .build()); + }); + }); + } + + @Test + public void metricSummariesAreCorrectWithValuesInDifferentCollector() { + MetricCollectionAggregator aggregator = defaultAggregator(); + + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 3); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.namespace()).isEqualTo(DEFAULT_NAMESPACE); + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + assertThat(metricData.dimensions()).hasOnlyOneElementSatisfying(dimension -> { + assertThat(dimension.name()).isEqualTo(CoreMetric.SERVICE_ID.name()); + assertThat(dimension.value()).isEqualTo("ServiceId"); + }); + assertThat(metricData.values()).isEmpty(); + assertThat(metricData.counts()).isEmpty(); + assertThat(metricData.statisticValues()).isEqualTo(StatisticSet.builder() + .minimum(1.0) + .maximum(4.0) + .sum(14.0) + .sampleCount(5.0) + .build()); + }); + }); + } + + @Test + public void detailedMetricsAreCorrect() { + MetricCollectionAggregator aggregator = aggregatorWithCustomDetailedMetrics(HttpMetric.MAX_CONCURRENCY); + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 3); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.namespace()).isEqualTo(DEFAULT_NAMESPACE); + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + assertThat(metricData.dimensions()).hasOnlyOneElementSatisfying(dimension -> { + assertThat(dimension.name()).isEqualTo(CoreMetric.SERVICE_ID.name()); + assertThat(dimension.value()).isEqualTo("ServiceId"); + }); + + assertThat(metricData.values()).hasSize(4); + assertThat(metricData.statisticValues()).isNull(); + for (int i = 0; i < metricData.values().size(); i++) { + Double value = metricData.values().get(i); + Double count = metricData.counts().get(i); + switch (value.toString()) { + case "1.0": + case "2.0": + case "3.0": + assertThat(count).isEqualTo(1.0); + break; + case "4.0": + assertThat(count).isEqualTo(2.0); + break; + default: + Assert.fail(); + } + } + }); + }); + } + + @Test + public void metricsFromOtherCategoriesAreIgnored() { + MetricCollectionAggregator aggregator = defaultAggregator(); + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.HTTP_STATUS_CODE, 404); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).isEmpty(); + } + + @Test + public void getRequestsResetsState() { + MetricCollectionAggregator aggregator = defaultAggregator(); + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasSize(1); + assertThat(aggregator.getRequests()).isEmpty(); + } + + @Test + public void numberTypesAreTransformedCorrectly() { + SdkMetric metric = someMetric(CustomNumber.class); + assertThat(transformMetricValueUsingAggregator(metric, new CustomNumber(-1000.5))).isEqualTo(-1000.5); + assertThat(transformMetricValueUsingAggregator(metric, new CustomNumber(0))).isEqualTo(0); + assertThat(transformMetricValueUsingAggregator(metric, new CustomNumber(1000.5))).isEqualTo(1000.5); + } + + @Test + public void durationsAreTransformedCorrectly() { + SdkMetric metric = someMetric(Duration.class); + assertThat(transformMetricValueUsingAggregator(metric, Duration.ofSeconds(-10))).isEqualTo(-10_000); + assertThat(transformMetricValueUsingAggregator(metric, Duration.ofSeconds(0))).isEqualTo(0); + assertThat(transformMetricValueUsingAggregator(metric, Duration.ofSeconds(10))).isEqualTo(10_000); + } + + @Test + public void booleansAreTransformedCorrectly() { + SdkMetric metric = someMetric(Boolean.class); + assertThat(transformMetricValueUsingAggregator(metric, false)).isEqualTo(0.0); + assertThat(transformMetricValueUsingAggregator(metric, true)).isEqualTo(1.0); + } + + private Double transformMetricValueUsingAggregator(SdkMetric metric, T input) { + MetricCollectionAggregator aggregator = aggregatorWithCustomDetailedMetrics(metric); + MetricCollector collector = collector(); + collector.reportMetric(metric, input); + aggregator.addCollection(collectToFixedTime(collector)); + + return aggregator.getRequests().get(0).metricData().get(0).values().get(0); + } + + private MetricCollectionAggregator aggregatorWithUniqueValuesAdded(SdkMetric metric, int numValues) { + MetricCollectionAggregator aggregator = aggregatorWithCustomDetailedMetrics(metric); + for (int i = 0; i < numValues; i++) { + MetricCollector collector = collector(); + collector.reportMetric(metric, i); + aggregator.addCollection(collectToFixedTime(collector)); + } + return aggregator; + } + + private MetricCollectionAggregator aggregatorWithUniqueMetricsAdded(int numMetrics) { + MetricCollectionAggregator aggregator = defaultAggregator(); + MetricCollector collector = collector(); + for (int i = 0; i < numMetrics; i++) { + collector.reportMetric(someMetric(), 0); + } + aggregator.addCollection(collectToFixedTime(collector)); + return aggregator; + } + + private MetricCollectionAggregator defaultAggregator() { + return new MetricCollectionAggregator(DEFAULT_NAMESPACE, + DEFAULT_DIMENSIONS, + DEFAULT_CATEGORIES, + DEFAULT_METRIC_LEVEL, + DEFAULT_DETAILED_METRICS); + } + + private MetricCollectionAggregator aggregatorWithCustomDetailedMetrics(SdkMetric... detailedMetrics) { + return new MetricCollectionAggregator(DEFAULT_NAMESPACE, + DEFAULT_DIMENSIONS, + DEFAULT_CATEGORIES, + DEFAULT_METRIC_LEVEL, + Stream.of(detailedMetrics).collect(Collectors.toSet())); + } + + private MetricCollector collector() { + return MetricCollector.create("test"); + } + + private SdkMetric someMetric() { + return someMetric(Integer.class); + } + + private SdkMetric someMetric(Class clazz) { + return SdkMetric.create(getClass().getSimpleName() + UUID.randomUUID().toString(), + clazz, + MetricLevel.INFO, + MetricCategory.HTTP_CLIENT); + } + + private MetricCollection collectToFixedTime(MetricCollector collector) { + return new FixedTimeMetricCollection(collector.collect()); + } + + private MetricCollection collectToFixedTimeBucket(MetricCollector collector, int timeBucket) { + // Make sure collectors in different "time buckets" are in a different minute than other collectors. We also offset the + // hour by a few seconds, to make sure the metric collection aggregator is actually ignoring the "seconds" portion of + // the collection time. + Instant metricTime = Instant.EPOCH.plus(timeBucket, HOURS) + .plusSeconds(Math.max(59, timeBucket)); + return new FixedTimeMetricCollection(collector.collect(), metricTime); + } + + private static class CustomNumber extends Number { + private final double value; + + public CustomNumber(double value) { + this.value = value; + } + + @Override + public int intValue() { + throw new UnsupportedOperationException(); + } + + @Override + public long longValue() { + throw new UnsupportedOperationException(); + } + + @Override + public float floatValue() { + throw new UnsupportedOperationException(); + } + + @Override + public double doubleValue() { + return value; + } + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/resources/log4j.properties b/metric-publishers/cloudwatch-metric-publisher/src/test/resources/log4j.properties new file mode 100644 index 000000000000..6fa311bc45f9 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/resources/log4j.properties @@ -0,0 +1,35 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +log4j.rootLogger=INFO, A1 +log4j.appender.A1=org.apache.log4j.ConsoleAppender +log4j.appender.A1.layout=org.apache.log4j.PatternLayout + +# Print the date in ISO 8601 format +log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + +# Adjust to see more / less logging +#log4j.logger.com.amazonaws.ec2=DEBUG + +# HttpClient 3 Wire Logging +#log4j.logger.httpclient.wire=DEBUG + +# HttpClient 4 Wire Logging +# log4j.logger.org.apache.http.wire=INFO +# log4j.logger.org.apache.http=DEBUG +# log4j.logger.org.apache.http.wire=DEBUG +# log4j.logger.software.amazonaws.awssdk=DEBUG + + diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml new file mode 100644 index 000000000000..505dec472c00 --- /dev/null +++ b/metric-publishers/pom.xml @@ -0,0 +1,101 @@ + + + 4.0.0 + + software.amazon.awssdk + aws-sdk-java-pom + 2.15.62-SNAPSHOT + + + metric-publishers + AWS Java SDK :: Metric Publishers + pom + + + cloudwatch-metric-publisher + + + + + + software.amazon.awssdk + bom-internal + ${awsjavasdk.version} + pom + import + + + + + + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + junit + junit + test + + + log4j + log4j + test + + + org.slf4j + slf4j-log4j12 + test + + + io.reactivex.rxjava2 + rxjava + test + + + org.assertj + assertj-core + test + + + software.amazon.awssdk + test-utils + ${awsjavasdk.version} + test + + + org.hamcrest + hamcrest-all + test + + + wiremock + com.github.tomakehurst + test + + + mockito-core + org.mockito + test + + + diff --git a/pom.xml b/pom.xml index dfd883549abd..b45c5930c767 100644 --- a/pom.xml +++ b/pom.xml @@ -1,6 +1,6 @@ - - 4.1.33.Final + + 4.1.53.Final 3.3 1.3 UTF-8 - 3.1.11 - 2.0.0 + 4.1.4 + 2.0.4 2.3.24-incubating - 1.11.1 + 1.13.0 3.10.0 3.5.101 2.1.9 1.10 1.21 + 0.9.0 4.12 5.4.2 1.3 1.10.19 + 2.28.2 3.8.0 - 26.0-jre + 28.2-jre 1.1 + 7.1.0 2.3 - 2.0.20.Final + 2.0.34.Final + 1.11.477 + 1.0.392 - 2.21.0 - 3.8.0 - 2.17 - 2.22.0 + 2.22.2 + 3.8.1 + 3.1.1 + 2.22.2 3.1.1 3.0.1 + yyyy 3.1.1 1.6 - 8.18 - 0.8.2 + 8.38 + 0.8.5 1.6.8 1.6.0 2.8.2 3.0.0 + 0.14.4 2.4.0 @@ -146,7 +155,7 @@ 1.8.2 1.8 - 4.5.9 + 4.5.13 4.4.11 @@ -155,11 +164,6 @@ 1.0.2 - - .. - jacoco - ${basedir}/target/jacoco.exec - ${skipTests} @@ -172,7 +176,7 @@ - aws-sdk-java-${project.artifactId}-${project.version} + aws-sdk-java-${project.artifactId}-${awsjavasdk.version} @@ -285,6 +289,25 @@ + + add-license-notice + generate-sources + + add-resource + + + + + ${maven.multiModuleProjectDirectory} + + LICENSE.txt + NOTICE.txt + + META-INF + + + + @@ -407,6 +430,11 @@ com.fasterxml.jackson.core:* org.slf4j:slf4j-api + + + com.typesafe.netty:* + software.amazon.awssdk:aws-sdk-java + @@ -420,9 +448,6 @@ org.jacoco jacoco-maven-plugin ${jacoco-maven-plugin.version} - - ${sonar.jacoco.reportPath} - default-prepare-agent @@ -464,10 +489,87 @@ ${spotbugs.version} + + + com.github.siom79.japicmp + japicmp-maven-plugin + ${japicmp-maven-plugin.version} + + + + ${project.groupId} + ${project.artifactId} + ${awsjavasdk.previous.version} + jar + + + + + ${project.build.directory}/aws-sdk-java-${project.artifactId}-${project.version}.${project.packaging} + + + + true + + *.internal.* + + + codegen-lite-maven-plugin + codegen-maven-plugin + codegen + codegen-lite + .*tests* + .*test* + protocol-tests-core + tests-coverage-reporting + aws-sdk-java + archetype-lambda + sdk-benchmarks + bundle + + aws-crt-client + + true + true + true + + + METHOD_NEW_DEFAULT + true + true + + + METHOD_ADDED_TO_INTERFACE + true + true + + + + + + + verify + + cmp + + + + + + + sonar-generate + + true + true + true + true + + + publishing @@ -493,7 +595,7 @@ true sonatype-nexus-staging - https://oss.sonatype.org + https://aws.oss.sonatype.org @@ -517,6 +619,7 @@ true true true + true @@ -532,6 +635,7 @@ true true true + true @@ -580,6 +684,7 @@ true true true + true @@ -622,6 +727,7 @@ true true true + true @@ -668,10 +774,10 @@ false true true - AWS SDK for Java - ${project.version} + AWS SDK for Java - ${awsjavasdk.version} UTF-8 UTF-8 - AWS SDK for Java API Reference - ${project.version} + AWS SDK for Java API Reference - ${awsjavasdk.version} AWS SDK for Java :*.codegen:software.amazon.awssdk.services.protocol* @@ -699,6 +805,10 @@ Resource Groups Tagging API software.amazon.awssdk.services.resourcegroupstaggingapi* + + S3 Control + software.amazon.awssdk.services.s3control* + S3 software.amazon.awssdk.services.s3* @@ -1047,7 +1157,9 @@ software.amazon.awssdk* - +

    AWS SDK for Java API Reference - ${project.version}]]>
    +
    + Copyright © ${maven.build.timestamp} Amazon Web Services, Inc. All Rights Reserved.]]> diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 15cd58aae45b..fcbeb1fdb92f 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services-custom + 2.15.62-SNAPSHOT + + dynamodb-enhanced + AWS Java SDK :: DynamoDB :: Enhanced Client + https://aws.amazon.com/sdkforjava + + + + dynamodblocal + AWS DynamoDB Local Release Repository + https://s3-us-west-2.amazonaws.com/dynamodb-local/release + + + + + ${project.parent.version} + 1.8 + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy + test-compile + + copy-dependencies + + + test + so,dll,dylib + ${project.build.directory}/native-libs + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.enhanced.dynamodb + + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + ${project.build.directory}/native-libs + + + + + + + + + + software.amazon.awssdk + aws-core + ${awsjavasdk.version} + + + software.amazon.awssdk + http-client-spi + ${awsjavasdk.version} + + + software.amazon.awssdk + sdk-core + ${awsjavasdk.version} + + + software.amazon.awssdk + auth + ${awsjavasdk.version} + + + software.amazon.awssdk + regions + ${awsjavasdk.version} + + + software.amazon.awssdk + dynamodb + ${awsjavasdk.version} + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + junit + junit + test + + + org.hamcrest + hamcrest-all + test + + + org.assertj + assertj-core + test + + + org.mockito + mockito-core + ${mockito2.version} + test + + + com.github.tomakehurst + wiremock + test + + + com.amazonaws + DynamoDBLocal + test + + + com.almworks.sqlite4java + sqlite4java + test + + + com.almworks.sqlite4java + sqlite4java-win32-x86 + dll + test + + + com.almworks.sqlite4java + sqlite4java-win32-x64 + dll + test + + + com.almworks.sqlite4java + libsqlite4java-osx + dylib + test + + + com.almworks.sqlite4java + libsqlite4java-linux-i386 + so + test + + + com.almworks.sqlite4java + libsqlite4java-linux-amd64 + so + test + + + diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverter.java new file mode 100644 index 000000000000..de36eab8c66f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverter.java @@ -0,0 +1,82 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.time.Instant; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringAttributeConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * Converts between a specific Java type and an {@link AttributeValue}. + * + *

    + * Examples: + *

      + *
    • The {@link StringAttributeConverter} converts a {@link String} into a DynamoDB string + * ({@link software.amazon.awssdk.services.dynamodb.model.AttributeValue#s()}).
    • + *
    • The {@link InstantAsStringAttributeConverter} converts an {@link Instant} into a DynamoDB string + * ({@link software.amazon.awssdk.services.dynamodb.model.AttributeValue#s()}).
    • + *
    + */ +@SdkPublicApi +@ThreadSafe +public interface AttributeConverter { + /** + * Convert the provided Java object into an {@link AttributeValue}. This will raise a {@link RuntimeException} if the + * conversion fails, or the input is null. + * + *

    + * Example: + *

    +     * {@code
    +     * InstantAsStringAttributeConverter converter = InstantAsStringAttributeConverter.create();
    +     * assertEquals(converter.transformFrom(Instant.EPOCH),
    +     *              EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z").toAttributeValue());
    +     * }
    +     * 
    + */ + AttributeValue transformFrom(T input); + + /** + * Convert the provided {@link AttributeValue} into a Java object. This will raise a {@link RuntimeException} if the + * conversion fails, or the input is null. + * + *

    + *

    +     * Example:
    +     * {@code
    +     * InstantAsStringAttributeConverter converter = InstantAsStringAttributeConverter.create();
    +     * assertEquals(converter.transformTo(EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z").toAttributeValue()),
    +     *              Instant.EPOCH);
    +     * }
    +     * 
    + */ + T transformTo(AttributeValue input); + + /** + * The type supported by this converter. + */ + EnhancedType type(); + + /** + * The {@link AttributeValueType} that a converter stores and reads values + * from DynamoDB via the {@link AttributeValue} class. + */ + AttributeValueType attributeValueType(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverterProvider.java new file mode 100644 index 000000000000..a20608535895 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverterProvider.java @@ -0,0 +1,45 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterProviderResolver; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * Interface for determining the {@link AttributeConverter} to use for + * converting a given {@link EnhancedType}. + */ +@SdkPublicApi +public interface AttributeConverterProvider { + + /** + * Finds a {@link AttributeConverter} for converting an object with a type + * specified by a {@link EnhancedType} to a {@link AttributeValue} and back. + * + * @param enhancedType The type of the object to be converted + * @return {@link AttributeConverter} for converting the given type. + */ + AttributeConverter converterFor(EnhancedType enhancedType); + + /** + * Returns a default implementation of AttributeConverterProvider with all + * standard Java type converters included. + */ + static AttributeConverterProvider defaultProvider() { + return ConverterProviderResolver.defaultConverterProvider(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeValueType.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeValueType.java new file mode 100644 index 000000000000..13e6bdb4d174 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeValueType.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; + +@SdkPublicApi +public enum AttributeValueType { + B(ScalarAttributeType.B), // binary + BOOL, // boolean + BS, // binary set + L, // list + M, // documentMap + N(ScalarAttributeType.N), // number + NS, // number set + S(ScalarAttributeType.S), // string + SS, // string set + NULL; // null + + private final ScalarAttributeType scalarAttributeType; + + AttributeValueType() { + this.scalarAttributeType = null; + } + + AttributeValueType(ScalarAttributeType scalarAttributeType) { + this.scalarAttributeType = scalarAttributeType; + } + + public ScalarAttributeType scalarAttributeType() { + return scalarAttributeType; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java new file mode 100644 index 000000000000..34a3f159750c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java @@ -0,0 +1,258 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.AtomicBooleanAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.AtomicIntegerAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.AtomicLongAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.BigDecimalAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.BigIntegerAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.BooleanAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ByteArrayAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ByteAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.CharSequenceAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.CharacterArrayAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.CharacterAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DocumentAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DoubleAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DurationAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnumAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.FloatAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.IntegerAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ListAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateTimeAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalTimeAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LongAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.MapAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.MonthDayAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OffsetDateTimeAsStringAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OptionalDoubleAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OptionalIntAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OptionalLongAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.PeriodAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.SdkBytesAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.SetAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ShortAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringBufferAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringBuilderAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.UriAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.UrlAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.UuidAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ZoneIdAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ZoneOffsetAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ZonedDateTimeAsStringAttributeConverter; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * This class is the default attribute converter provider in the DDB Enhanced library. When instantiated + * using the constructor {@link #DefaultAttributeConverterProvider()} or the {@link #create()} method, it's loaded + * with the currently supported attribute converters in the library. + *

    + * Given an input, the method {@link #converterFor(EnhancedType)} will identify a converter that can convert the + * specific Java type and invoke it. If a converter cannot be found, it will invoke a "parent" converter, + * which would be expected to be able to convert the value (or throw an exception). + */ +@SdkPublicApi +@ThreadSafe +@Immutable +public final class DefaultAttributeConverterProvider implements AttributeConverterProvider { + private static final Logger log = Logger.loggerFor(DefaultAttributeConverterProvider.class); + + private final ConcurrentHashMap, AttributeConverter> converterCache = + new ConcurrentHashMap<>(); + + private DefaultAttributeConverterProvider(Builder builder) { + // Converters are used in the REVERSE order of how they were added to the builder. + for (int i = builder.converters.size() - 1; i >= 0; i--) { + AttributeConverter converter = builder.converters.get(i); + converterCache.put(converter.type(), converter); + + if (converter instanceof PrimitiveConverter) { + PrimitiveConverter primitiveConverter = (PrimitiveConverter) converter; + converterCache.put(primitiveConverter.primitiveType(), converter); + } + } + } + + /** + * Returns an attribute converter provider with all default converters set. + */ + public DefaultAttributeConverterProvider() { + this(getDefaultBuilder()); + } + + /** + * Returns an attribute converter provider with all default converters set. + */ + public static DefaultAttributeConverterProvider create() { + return getDefaultBuilder().build(); + } + + + /** + * Equivalent to {@code builder(EnhancedType.of(Object.class))}. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Find a converter that matches the provided type. If one cannot be found, throw an exception. + */ + @Override + public AttributeConverter converterFor(EnhancedType type) { + return findConverter(type).orElseThrow(() -> new IllegalStateException("Converter not found for " + type)); + } + + /** + * Find a converter that matches the provided type. If one cannot be found, return empty. + */ + @SuppressWarnings("unchecked") + private Optional> findConverter(EnhancedType type) { + log.debug(() -> "Loading converter for " + type + "."); + + AttributeConverter converter = (AttributeConverter) converterCache.get(type); + if (converter != null) { + return Optional.of(converter); + } + + if (type.rawClass().isAssignableFrom(Map.class)) { + converter = createMapConverter(type); + } else if (type.rawClass().isAssignableFrom(Set.class)) { + converter = createSetConverter(type); + } else if (type.rawClass().isAssignableFrom(List.class)) { + EnhancedType innerType = (EnhancedType) type.rawClassParameters().get(0); + AttributeConverter innerConverter = findConverter(innerType) + .orElseThrow(() -> new IllegalStateException("Converter not found for " + type)); + return Optional.of((AttributeConverter) ListAttributeConverter.create(innerConverter)); + } else if (type.rawClass().isEnum()) { + return Optional.of(EnumAttributeConverter.create(((EnhancedType) type).rawClass())); + } + + if (type.tableSchema().isPresent()) { + converter = DocumentAttributeConverter.create(type.tableSchema().get(), type); + } + + if (converter != null && shouldCache(type.rawClass())) { + this.converterCache.put(type, converter); + } + + return Optional.ofNullable(converter); + } + + private boolean shouldCache(Class type) { + // Do not cache anonymous classes, to prevent memory leaks. + return !type.isAnonymousClass(); + } + + @SuppressWarnings("unchecked") + private AttributeConverter createMapConverter(EnhancedType type) { + EnhancedType keyType = type.rawClassParameters().get(0); + EnhancedType valueType = (EnhancedType) type.rawClassParameters().get(1); + + StringConverter keyConverter = StringConverterProvider.defaultProvider().converterFor(keyType); + AttributeConverter valueConverter = findConverter(valueType) + .orElseThrow(() -> new IllegalStateException("Converter not found for " + type)); + + return (AttributeConverter) MapAttributeConverter.mapConverter(keyConverter, valueConverter); + } + + @SuppressWarnings("unchecked") + private AttributeConverter createSetConverter(EnhancedType type) { + EnhancedType innerType = (EnhancedType) type.rawClassParameters().get(0); + AttributeConverter innerConverter = findConverter(innerType) + .orElseThrow(() -> new IllegalStateException("Converter not found for " + type)); + + return (AttributeConverter) SetAttributeConverter.setConverter(innerConverter); + } + + private static Builder getDefaultBuilder() { + return DefaultAttributeConverterProvider.builder() + .addConverter(AtomicBooleanAttributeConverter.create()) + .addConverter(AtomicIntegerAttributeConverter.create()) + .addConverter(AtomicLongAttributeConverter.create()) + .addConverter(BigDecimalAttributeConverter.create()) + .addConverter(BigIntegerAttributeConverter.create()) + .addConverter(BooleanAttributeConverter.create()) + .addConverter(ByteArrayAttributeConverter.create()) + .addConverter(ByteAttributeConverter.create()) + .addConverter(CharacterArrayAttributeConverter.create()) + .addConverter(CharacterAttributeConverter.create()) + .addConverter(CharSequenceAttributeConverter.create()) + .addConverter(DoubleAttributeConverter.create()) + .addConverter(DurationAttributeConverter.create()) + .addConverter(FloatAttributeConverter.create()) + .addConverter(InstantAsStringAttributeConverter.create()) + .addConverter(IntegerAttributeConverter.create()) + .addConverter(LocalDateAttributeConverter.create()) + .addConverter(LocalDateTimeAttributeConverter.create()) + .addConverter(LocalTimeAttributeConverter.create()) + .addConverter(LongAttributeConverter.create()) + .addConverter(MonthDayAttributeConverter.create()) + .addConverter(OffsetDateTimeAsStringAttributeConverter.create()) + .addConverter(OptionalDoubleAttributeConverter.create()) + .addConverter(OptionalIntAttributeConverter.create()) + .addConverter(OptionalLongAttributeConverter.create()) + .addConverter(PeriodAttributeConverter.create()) + .addConverter(SdkBytesAttributeConverter.create()) + .addConverter(ShortAttributeConverter.create()) + .addConverter(StringAttributeConverter.create()) + .addConverter(StringBufferAttributeConverter.create()) + .addConverter(StringBuilderAttributeConverter.create()) + .addConverter(UriAttributeConverter.create()) + .addConverter(UrlAttributeConverter.create()) + .addConverter(UuidAttributeConverter.create()) + .addConverter(ZonedDateTimeAsStringAttributeConverter.create()) + .addConverter(ZoneIdAttributeConverter.create()) + .addConverter(ZoneOffsetAttributeConverter.create()); + } + + /** + * A builder for configuring and creating {@link DefaultAttributeConverterProvider}s. + */ + public static class Builder { + private List> converters = new ArrayList<>(); + + private Builder() { + } + + public Builder addConverter(AttributeConverter converter) { + Validate.paramNotNull(converter, "converter"); + this.converters.add(converter); + return this; + } + + public DefaultAttributeConverterProvider build() { + return new DefaultAttributeConverterProvider(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Document.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Document.java new file mode 100644 index 000000000000..db7c6493156a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Document.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * A document representing a table item in the form of a map containing attributes and values. + *

    + * Use the {@link #getItem(MappedTableResource)} method to transform the collection of attributes into a typed item. + */ +@SdkPublicApi +public interface Document { + + /** + * Get the table item associated with the table schema in the mapped table resource. + * + * @param mappedTableResource the mapped table resource this item was retrieved from + * @param the type of items in the mapped table resource + * @return the item constructed from the document + */ + T getItem(MappedTableResource mappedTableResource); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncIndex.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncIndex.java new file mode 100644 index 000000000000..a4c2bce7d9b8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncIndex.java @@ -0,0 +1,241 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; + +/** + * Asynchronous interface for running commands against an object that is linked to a specific DynamoDb secondary index + * and knows how to map records from the table that index is linked to into a modelled object. + *

    + * By default, all command methods throw an {@link UnsupportedOperationException} to prevent interface extensions from breaking + * implementing classes. + * + * @param The type of the modelled object. + */ +@SdkPublicApi +public interface DynamoDbAsyncIndex { + + /** + * Executes a query against a secondary index using a {@link QueryConditional} expression to retrieve a list of + * items matching the given conditions. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. Results are sorted by sort key value in + * ascending order by default; this behavior can be overridden in the {@link QueryEnhancedRequest}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link QueryEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * QueryConditional queryConditional = QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build());
    +     * SdkPublisher> publisher = mappedIndex.query(QueryEnhancedRequest.builder()
    +     *                                                                              .queryConditional(queryConditional)
    +     *                                                                              .build());
    +     * }
    +     * 
    + * + * @param request A {@link QueryEnhancedRequest} defining the query conditions and how + * to handle the results. + * @return a publisher {@link SdkPublisher} with paginated results (see {@link Page}). + */ + default SdkPublisher> query(QueryEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against a secondary index using a {@link QueryConditional} expression to retrieve a list of + * items matching the given conditions. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. Results are sorted by sort key value in + * ascending order by default; this behavior can be overridden in the {@link QueryEnhancedRequest}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link QueryEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation for + * further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link QueryEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * SdkPublisher> publisher =
    +     *     mappedIndex.query(r -> r.queryConditional(QueryConditional.keyEqualTo(k -> k.partitionValue("id-value"))));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link QueryEnhancedRequest} defining the query conditions and how to + * handle the results. + * @return a publisher {@link SdkPublisher} with paginated results (see {@link Page}). + */ + default SdkPublisher> query(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against the secondary index of the table using a {@link QueryConditional} expression to retrieve + * a list of items matching the given conditions. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. Results are sorted by sort key value in + * ascending order. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * SdkPublisher> results =
    +     *     mappedIndex.query(QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build()));
    +     * }
    +     * 
    + * + * @param queryConditional A {@link QueryConditional} defining the matching criteria for records to be queried. + * @return a publisher {@link SdkPublisher} with paginated results (see {@link Page}). + */ + default SdkPublisher> query(QueryConditional queryConditional) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table against a secondary index and retrieves all items. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a scan call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link ScanEnhancedRequest}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * SdkPublisher> publisher = mappedTable.scan(ScanEnhancedRequest.builder().consistentRead(true).build());
    +     * }
    +     * 
    + * + * @param request A {@link ScanEnhancedRequest} defining how to handle the results. + * @return a publisher {@link SdkPublisher} with paginated results (see {@link Page}). + */ + default SdkPublisher> scan(ScanEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table against a secondary index and retrieves all items. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a scan call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link ScanEnhancedRequest}. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link ScanEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * SdkPublisher> publisher = mappedTable.scan(r -> r.limit(5));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link ScanEnhancedRequest} defining the query conditions and how to + * handle the results. + * @return a publisher {@link SdkPublisher} with paginated results (see {@link Page}). + */ + default SdkPublisher> scan(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table against a secondary index and retrieves all items using default settings. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a scan call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * SdkPublisher> publisher = mappedTable.scan();
    +     * }
    +     * 
    + * + * @return a publisher {@link SdkPublisher} with paginated results (see {@link Page}). + */ + default SdkPublisher> scan() { + throw new UnsupportedOperationException(); + } + + /** + * Gets the {@link DynamoDbEnhancedClientExtension} associated with this mapped resource. + * @return The {@link DynamoDbEnhancedClientExtension} associated with this mapped resource. + */ + DynamoDbEnhancedClientExtension mapperExtension(); + + /** + * Gets the {@link TableSchema} object that this mapped table was built with. + * @return The {@link TableSchema} object for this mapped table. + */ + TableSchema tableSchema(); + + /** + * Gets the physical table name that operations performed by this object will be executed against. + * @return The physical table name. + */ + String tableName(); + + /** + * Gets the physical secondary index name that operations performed by this object will be executed against. + * @return The physical secondary index name. + */ + String indexName(); + + /** + * Creates a {@link Key} object from a modelled item. This key can be used in query conditionals and get + * operations to locate a specific record. + * @param item The item to extract the key fields from. + * @return A key that has been initialized with the index values extracted from the modelled object. + */ + Key keyFrom(T item); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncTable.java new file mode 100644 index 000000000000..eb5a4bf15169 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncTable.java @@ -0,0 +1,670 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.PagePublisher; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +/** + * Asynchronous interface for running commands against an object that is linked to a specific DynamoDb table resource + * and therefore knows how to map records from that table into a modelled object. + *

    + * By default, all command methods throw an {@link UnsupportedOperationException} to prevent interface extensions from breaking + * implementing classes. + * + * @param The type of the modelled object. + */ +@SdkPublicApi +public interface DynamoDbAsyncTable extends MappedTableResource { + /** + * Returns a mapped index that can be used to execute commands against a secondary index belonging to the table + * being mapped by this object. Note that only a subset of the commands that work against a table will work + * against a secondary index. + * + * @param indexName The name of the secondary index to build the command interface for. + * @return An {@link DynamoDbAsyncIndex} object that can be used to execute database commands against. + */ + DynamoDbAsyncIndex index(String indexName); + + /** + * Creates a new table in DynamoDb with the name and schema already defined for this DynamoDbTable + * together with additional parameters specified in the supplied request object, {@link CreateTableEnhancedRequest}. + *

    + * Use {@link DynamoDbEnhancedClient#table(String, TableSchema)} to define the mapped table resource. + *

    + * This operation calls the low-level DynamoDB API CreateTable operation. Note that this is an asynchronous operation and that + * the table may not immediately be available for writes and reads. + * Consult the CreateTable documentation for further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * ProvisionedThroughput provisionedThroughput = ProvisionedThroughput.builder()
    +     *                                                                    .readCapacityUnits(50L)
    +     *                                                                    .writeCapacityUnits(50L)
    +     *                                                                    .build();
    +     * mappedTable.createTable(CreateTableEnhancedRequest.builder()
    +     *                                                   .provisionedThroughput(provisionedThroughput)
    +     *                                                   .build())
    +     *            .join();
    +     * }
    +     * 
    + * + * @param request A {@link CreateTableEnhancedRequest} containing optional parameters for table creation. + * @return a {@link CompletableFuture} of {@link Void}. + */ + default CompletableFuture createTable(CreateTableEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Creates a new table in DynamoDb with the name and schema already defined for this DynamoDbTable + * together with additional parameters specified in the supplied request object, {@link CreateTableEnhancedRequest}. + *

    + * Use {@link DynamoDbEnhancedClient#table(String, TableSchema)} to define the mapped table resource. + *

    + * This operation calls the low-level DynamoDB API CreateTable operation. Note that this is an asynchronous operation and that + * the table may not immediately be available for writes and reads. + * Consult the CreateTable documentation for further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link CreateTableEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * ProvisionedThroughput provisionedThroughput = ProvisionedThroughput.builder()
    +     *                                                                    .readCapacityUnits(50L)
    +     *                                                                    .writeCapacityUnits(50L)
    +     *                                                                    .build();
    +     * mappedTable.createTable(r -> r.provisionedThroughput(provisionedThroughput)).join();
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link CreateTableEnhancedRequest.Builder} containing optional parameters + * for table creation. + * @return a {@link CompletableFuture} of {@link Void}. + */ + default CompletableFuture createTable(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Creates a new table in DynamoDb with the name and schema already defined for this DynamoDbTable. + *

    + * Use {@link DynamoDbEnhancedClient#table(String, TableSchema)} to define the mapped table resource. + *

    + * This operation calls the low-level DynamoDB API CreateTable operation. Note that this is an asynchronous operation and that + * the table may not immediately be available for writes and reads. Currently, there is no mechanism supported within this + * library to wait for/check the status of a created table. You must provide this functionality yourself. + * Consult the CreateTable documentation for further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * mappedTable.createTable().join();
    +     * }
    +     * 
    + * + * @return a {@link CompletableFuture} of {@link Void}. + */ + default CompletableFuture createTable() { + throw new UnsupportedOperationException(); + } + + /** + * Deletes a single item from the mapped table using a supplied primary {@link Key}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link DeleteItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API DeleteItem operation. Consult the DeleteItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem previouslyPersistedItem = mappedTable.delete(DeleteItemEnhancedRequest.builder().key(key).build()).join();
    +     * }
    +     * 
    + * + * @param request A {@link DeleteItemEnhancedRequest} with key and optional directives for deleting an item from the table. + * @return a {@link CompletableFuture} of the item that was persisted in the database before it was deleted. + */ + default CompletableFuture deleteItem(DeleteItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Deletes a single item from the mapped table using a supplied primary {@link Key}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link DeleteItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API DeleteItem operation. Consult the DeleteItem documentation for + * further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link DeleteItemEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem previouslyPersistedItem = mappedTable.delete(r -> r.key(key)).join();
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link DeleteItemEnhancedRequest} with key and + * optional directives for deleting an item from the table. + * @return a {@link CompletableFuture} of the item that was persisted in the database before it was deleted. + */ + default CompletableFuture deleteItem(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Deletes a single item from the mapped table using a supplied primary {@link Key}. + *

    + * This operation calls the low-level DynamoDB API DeleteItem operation. Consult the DeleteItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem previouslyPersistedItem = mappedTable.delete(key).join;
    +     * }
    +     * 
    + * + * @param key A {@link Key} that will be used to match a specific record to delete from the database table. + * @return a {@link CompletableFuture} of the item that was persisted in the database before it was deleted. + */ + default CompletableFuture deleteItem(Key key) { + throw new UnsupportedOperationException(); + } + + /** + * Deletes a single item from the mapped table using just the key of a supplied modelled 'key item' object. + *

    + * This operation calls the low-level DynamoDB API DeleteItem operation. Consult the DeleteItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem previouslyPersistedItem = mappedTable.deleteItem(keyItem).join();
    +     * }
    +     * 
    + * + * @param keyItem A modelled item with the primary key fields set that will be used to match a specific record to + * delete from the database table. + * @return a {@link CompletableFuture} of the item that was persisted in the database before it was deleted. + */ + default CompletableFuture deleteItem(T keyItem) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves a single item from the mapped table using a supplied primary {@link Key}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link GetItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API GetItem operation. Consult the GetItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.getItem(GetItemEnhancedRequest.builder().key(key).build()).join();
    +     * }
    +     * 
    + * + * @param request A {@link GetItemEnhancedRequest} with key and optional directives for retrieving an item from the table. + * @return a {@link CompletableFuture} of the item that was persisted in the database before it was deleted. + */ + default CompletableFuture getItem(GetItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves a single item from the mapped table using a supplied primary {@link Key}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link GetItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API GetItem operation. Consult the GetItem documentation for + * further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link GetItemEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.getItem(r -> r.key(key)).join();
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link GetItemEnhancedRequest.Builder} with key and optional directives + * for retrieving an item from the table. + * @return a {@link CompletableFuture} of the retrieved item + */ + default CompletableFuture getItem(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves a single item from the mapped table using a supplied primary {@link Key}. + *

    + * This operation calls the low-level DynamoDB API GetItem operation. Consult the GetItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.getItem(key).join();
    +     * }
    +     * 
    + * + * @param key A {@link Key} that will be used to match a specific record to retrieve from the database table. + * @return a {@link CompletableFuture} of the retrieved item + */ + default CompletableFuture getItem(Key key) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves a single item from the mapped table using just the key of a supplied modelled 'key item'. + *

    + * This operation calls the low-level DynamoDB API GetItem operation. Consult the GetItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.getItem(keyItem).join();
    +     * }
    +     * 
    + * + * @param keyItem A modelled item with the primary key fields set that will be used to match a specific record to + * retrieve from the database table. + * @return a {@link CompletableFuture} of the retrieved item + */ + default CompletableFuture getItem(T keyItem) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against the primary index of the table using a {@link QueryConditional} expression to retrieve a list of + * items matching the given conditions. + *

    + * The return type is a custom publisher that can be subscribed to request a stream of {@link Page}s or + * a stream of items across all pages. Results are sorted by sort key value in + * ascending order by default; this behavior can be overridden in the {@link QueryEnhancedRequest}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link QueryEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation + * {@link DynamoDbAsyncClient#queryPaginator} for further details and constraints. + *

    + * Example: + *

    + * 1) Subscribing to {@link Page}s + *

    +     * {@code
    +     *
    +     * QueryConditional queryConditional = QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build());
    +     * PagePublisher publisher = mappedTable.query(QueryEnhancedRequest.builder()
    +     *                                                                         .queryConditional(queryConditional)
    +     *                                                                         .build());
    +     * publisher.subscribe(page -> page.items().forEach(item -> System.out.println(item)));
    +     * }
    +     * 
    + *

    + * 2) Subscribing to items across all pages + *

    +     * {@code
    +     *
    +     * QueryConditional queryConditional = QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build());
    +     * PagePublisher publisher = mappedTable.query(QueryEnhancedRequest.builder()
    +     *                                                                         .queryConditional(queryConditional)
    +     *                                                                         .build())
    +     *                                              .items();
    +     * publisher.items().subscribe(item -> System.out.println(item));
    +     * }
    +     * 
    + * + * @see #query(Consumer) + * @see #query(QueryConditional) + * @see DynamoDbAsyncClient#queryPaginator + * @param request A {@link QueryEnhancedRequest} defining the query conditions and how + * to handle the results. + * @return a publisher {@link PagePublisher} with paginated results (see {@link Page}). + */ + default PagePublisher query(QueryEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against the primary index of the table using a {@link QueryConditional} expression to retrieve a list of + * items matching the given conditions. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link QueryEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * PagePublisher publisher =
    +     *     mappedTable.query(r -> r.queryConditional(QueryConditional.keyEqualTo(k -> k.partitionValue("id-value"))));
    +     * }
    +     * 
    + * + * @see #query(QueryEnhancedRequest) + * @see #query(QueryConditional) + * @see DynamoDbAsyncClient#queryPaginator + * @param requestConsumer A {@link Consumer} of {@link QueryEnhancedRequest} defining the query conditions and how to + * handle the results. + * @return a publisher {@link PagePublisher} with paginated results (see {@link Page}). + */ + default PagePublisher query(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against the primary index of the table using a {@link QueryConditional} expression to retrieve a + * list of items matching the given conditions. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. Results are sorted by sort key value in + * ascending order. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * PagePublisher results =
    +     *     mappedTable.query(QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build()));
    +     * }
    +     * 
    + * + * @see #query(QueryEnhancedRequest) + * @see #query(Consumer) + * @see DynamoDbAsyncClient#queryPaginator + * @param queryConditional A {@link QueryConditional} defining the matching criteria for records to be queried. + * @return a publisher {@link PagePublisher} with paginated results (see {@link Page}). + */ + default PagePublisher query(QueryConditional queryConditional) { + throw new UnsupportedOperationException(); + } + + /** + * Puts a single item in the mapped table. If the table contains an item with the same primary key, it will be replaced with + * this item. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link PutItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API PutItem operation. Consult the PutItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * mappedTable.putItem(PutItemEnhancedRequest.builder(MyItem.class).item(item).build()).join();
    +     * }
    +     * 
    + * + * @param request A {@link PutItemEnhancedRequest} that includes the item to enter into + * the table, its class and optional directives. + * @return a {@link CompletableFuture} that returns no results which will complete when the operation is done. + */ + default CompletableFuture putItem(PutItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Puts a single item in the mapped table. If the table contains an item with the same primary key, it will be replaced with + * this item. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link PutItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API PutItem operation. Consult the PutItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * mappedTable.putItem(r -> r.item(item)).join();
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link PutItemEnhancedRequest.Builder} that includes the item + * to enter into the table, its class and optional directives. + * @return a {@link CompletableFuture} that returns no results which will complete when the operation is done. + */ + default CompletableFuture putItem(Consumer> requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Puts a single item in the mapped table. If the table contains an item with the same primary key, it will be + * replaced with this item. + *

    + * This operation calls the low-level DynamoDB API PutItem operation. Consult the PutItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * mappedTable.putItem(item);
    +     * }
    +     * 
    + * + * @param item the modelled item to be inserted into or overwritten in the database table. + * @return a {@link CompletableFuture} that returns no results which will complete when the operation is done. + */ + default CompletableFuture putItem(T item) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table and retrieves all items. + *

    + * The return type is a custom publisher that can be subscribed to request a stream of {@link Page}s or + * a stream of flattened items across all pages. Each time a result page is retrieved, a scan call is made + * to DynamoDb to get those entries. If no matches are found, the resulting iterator will contain an empty page. + * + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link ScanEnhancedRequest}. + *

    + * Example: + *

    + * 1) Subscribing to {@link Page}s + *

    +     * {@code
    +     *
    +     * PagePublisher publisher = mappedTable.scan(ScanEnhancedRequest.builder().consistentRead(true).build());
    +     * publisher.subscribe(page -> page.items().forEach(item -> System.out.println(item)));
    +     * }
    +     * 
    + * + *

    + * 2) Subscribing to items across all pages. + *

    +     * {@code
    +     *
    +     * PagePublisher publisher = mappedTable.scan(ScanEnhancedRequest.builder().consistentRead(true).build());
    +     * publisher.items().subscribe(item -> System.out.println(item));
    +     * }
    +     * 
    + * + * @see #scan(Consumer) + * @see #scan() + * @see DynamoDbAsyncClient#scanPaginator + * @param request A {@link ScanEnhancedRequest} defining how to handle the results. + * @return a publisher {@link PagePublisher} with paginated results (see {@link Page}). + */ + default PagePublisher scan(ScanEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table and retrieves all items. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * PagePublisher publisher = mappedTable.scan(r -> r.limit(5));
    +     * }
    +     * 
    + * + * @see #scan(ScanEnhancedRequest) + * @see #scan() + * @see DynamoDbAsyncClient#scanPaginator + * @param requestConsumer A {@link Consumer} of {@link ScanEnhancedRequest} defining the query conditions and how to + * handle the results. + * @return a publisher {@link PagePublisher} with paginated results (see {@link Page}). + */ + default PagePublisher scan(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table and retrieves all items using default settings. + * + * Example: + *
    +     * {@code
    +     *
    +     * PagePublisher publisher = mappedTable.scan();
    +     * }
    +     * 
    + * @see #scan(ScanEnhancedRequest) + * @see #scan(Consumer) + * @see DynamoDbAsyncClient#scanPaginator + * @return a publisher {@link PagePublisher} with paginated results (see {@link Page}). + */ + default PagePublisher scan() { + throw new UnsupportedOperationException(); + } + + /** + * Updates an item in the mapped table, or adds it if it doesn't exist. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link UpdateItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API UpdateItem operation. Consult the UpdateItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.updateItem(UpdateItemEnhancedRequest.builder(MyItem.class).item(item).build()).join();
    +     * }
    +     * 
    + * + * @param request A {@link UpdateItemEnhancedRequest} that includes the item to be updated, + * its class and optional directives. + * @return a {@link CompletableFuture} of the updated item + */ + default CompletableFuture updateItem(UpdateItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Updates an item in the mapped table, or adds it if it doesn't exist. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link UpdateItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API UpdateItem operation. Consult the UpdateItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.updateItem(r -> r.item(item)).join();
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link UpdateItemEnhancedRequest.Builder} that includes the item + * to be updated, its class and optional directives. + * @return a {@link CompletableFuture} of the updated item + */ + default CompletableFuture updateItem(Consumer> requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Updates an item in the mapped table, or adds it if it doesn't exist. + *

    + * This operation calls the low-level DynamoDB API UpdateItem operation. Consult the UpdateItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.updateItem(item).join();
    +     * }
    +     * 
    + * + * @param item the modelled item to be inserted into or updated in the database table. + * @return a {@link CompletableFuture} of the updated item + */ + default CompletableFuture updateItem(T item) { + throw new UnsupportedOperationException(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedAsyncClient.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedAsyncClient.java new file mode 100644 index 000000000000..e7cb9ff746b7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedAsyncClient.java @@ -0,0 +1,463 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPage; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPagePublisher; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteResult; +import software.amazon.awssdk.enhanced.dynamodb.model.ConditionCheck; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactGetItemsEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactWriteItemsEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +/** + * Asynchronous interface for running commands against a DynamoDb database. + *

    + * By default, all command methods throw an {@link UnsupportedOperationException} to prevent interface extensions from breaking + * implementing classes. + */ +@SdkPublicApi +public interface DynamoDbEnhancedAsyncClient extends DynamoDbEnhancedResource { + + /** + * Returns a mapped table that can be used to execute commands that work with mapped items against that table. + * + * @param tableName The name of the physical table persisted by DynamoDb. + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @return A {@link DynamoDbAsyncTable} object that can be used to execute table operations against. + * @param THe modelled object type being mapped to this table. + */ + DynamoDbAsyncTable table(String tableName, TableSchema tableSchema); + + /** + * Retrieves items from one or more tables by their primary keys, see {@link Key}. BatchGetItem is a composite operation + * where the request contains one batch of {@link GetItemEnhancedRequest} per targeted table. + * The operation makes several calls to the database; each time you iterate over the result to retrieve a page, + * a call is made for the items on that page. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link BatchGetItemEnhancedRequest}. + *

    + * Partial results. A single call to DynamoDb has restraints on how much data can be retrieved. + * If those limits are exceeded, the call yields a partial result. This may also be the case if + * provisional throughput is exceeded or there is an internal DynamoDb processing failure. The operation automatically + * retries any unprocessed keys returned from DynamoDb in subsequent calls for pages. + *

    + * This operation calls the low-level {@link DynamoDbAsyncClient#batchGetItemPaginator} operation. Consult the + * BatchGetItem documentation for further details and constraints as well as current limits of data retrieval. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * BatchGetResultPagePublisher publisher = enhancedClient.batchGetItem(
    +     *     BatchGetItemEnhancedRequest.builder()
    +     *                                .readBatches(ReadBatch.builder(FirstItem.class)
    +     *                                                      .mappedTableResource(firstItemTable)
    +     *                                                      .addGetItem(GetItemEnhancedRequest.builder().key(key1).build())
    +     *                                                      .addGetItem(GetItemEnhancedRequest.builder().key(key2).build())
    +     *                                                      .build(),
    +     *                                             ReadBatch.builder(SecondItem.class)
    +     *                                                      .mappedTableResource(secondItemTable)
    +     *                                                      .addGetItem(GetItemEnhancedRequest.builder().key(key3).build())
    +     *                                                      .build())
    +     *                                .build());
    +     * }
    +     * 
    + * + *

    + * The returned {@link BatchGetResultPagePublisher} can be subscribed to request a stream of {@link BatchGetResultPage}s + * or a stream of flattened results belonging to the supplied table across all pages. + * + *

    + * 1) Subscribing to {@link BatchGetResultPage}s + *

    +     * {@code
    +     * publisher.subscribe(page -> {
    +     *     page.resultsForTable(firstItemTable).forEach(item -> System.out.println(item));
    +     *     page.resultsForTable(secondItemTable).forEach(item -> System.out.println(item));
    +     * });
    +     * }
    +     * 
    + * + *

    + * 2) Subscribing to results across all pages + *

    +     * {@code
    +     * publisher.resultsForTable(firstItemTable).subscribe(item -> System.out.println(item));
    +     * publisher.resultsForTable(secondItemTable).subscribe(item -> System.out.println(item));
    +     * }
    +     * 
    + * @see #batchGetItem(Consumer) + * @see DynamoDbAsyncClient#batchGetItemPaginator + * @param request A {@link BatchGetItemEnhancedRequest} containing keys grouped by tables. + * @return a publisher {@link SdkPublisher} with paginated results of type {@link BatchGetResultPage}. + */ + default BatchGetResultPagePublisher batchGetItem(BatchGetItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves items from one or more tables by their primary keys, see {@link Key}. BatchGetItem is a composite operation + * where the request contains one batch of {@link GetItemEnhancedRequest} per targeted table. + * The operation makes several calls to the database; each time you iterate over the result to retrieve a page, + * a call is made for the items on that page. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link BatchGetItemEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * BatchGetResultPagePublisher batchResults = enhancedClient.batchGetItem(r -> r.addReadBatches(
    +     *     ReadBatch.builder(FirstItem.class)
    +     *              .mappedTableResource(firstItemTable)
    +     *              .addGetItem(i -> i.key(key1))
    +     *              .addGetItem(i -> i.key(key2))
    +     *              .build(),
    +     *     ReadBatch.builder(SecondItem.class)
    +     *              .mappedTableResource(secondItemTable)
    +     *              .addGetItem(i -> i.key(key3))
    +     *              .build()));
    +     * }
    +     * 
    + * + * @see #batchGetItem(BatchGetItemEnhancedRequest) + * @see DynamoDbAsyncClient#batchGetItem + * @param requestConsumer a {@link Consumer} of {@link BatchGetItemEnhancedRequest.Builder} containing keys grouped by tables. + * @return a publisher {@link SdkPublisher} with paginated results of type {@link BatchGetResultPage}. + */ + default BatchGetResultPagePublisher batchGetItem(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Puts and/or deletes multiple items in one or more tables. BatchWriteItem is a composite operation where the request + * contains one batch of (a mix of) {@link PutItemEnhancedRequest} and {@link DeleteItemEnhancedRequest} per targeted table. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link BatchWriteItemEnhancedRequest}. + *

    + * Note: BatchWriteItem cannot update items. Instead, use the individual updateItem operation + * {@link DynamoDbAsyncTable#updateItem(UpdateItemEnhancedRequest)}. + *

    + * Partial updates
    Each delete or put call is atomic, but the operation as a whole is not. + * If individual operations fail due to exceeded provisional throughput internal DynamoDb processing failures, + * the failed requests can be retrieved through the result, see {@link BatchWriteResult}. + *

    + * There are some conditions that cause the whole batch operation to fail. These include non-existing tables, erroneously + * defined primary key attributes, attempting to put and delete the same item as well as referring more than once to the same + * hash and range (sort) key. + *

    + * This operation calls the low-level DynamoDB API BatchWriteItem operation. Consult the BatchWriteItem documentation for + * further details and constraints, current limits of data to write and/or delete, how to handle partial updates and retries + * and under which conditions the operation will fail. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * BatchWriteResult batchResult = enhancedClient.batchWriteItem(
    +     *     BatchWriteItemEnhancedRequest.builder()
    +     *                                  .writeBatches(WriteBatch.builder(FirstItem.class)
    +     *                                                          .mappedTableResource(firstItemTable)
    +     *                                                          .addPutItem(PutItemEnhancedRequest.builder().item(item1).build())
    +     *                                                          .addDeleteItem(DeleteItemEnhancedRequest.builder()
    +     *                                                                                                  .key(key2)
    +     *                                                                                                  .build())
    +     *                                                          .build(),
    +     *                                                WriteBatch.builder(SecondItem.class)
    +     *                                                          .mappedTableResource(secondItemTable)
    +     *                                                          .addPutItem(PutItemEnhancedRequest.builder().item(item3).build())
    +     *                                                          .build())
    +     *                                  .build()).join();
    +     * }
    +     * 
    + * + * @param request A {@link BatchWriteItemEnhancedRequest} containing keys and items grouped by tables. + * @return a {@link CompletableFuture} of {@link BatchWriteResult}, containing any unprocessed requests. + */ + default CompletableFuture batchWriteItem(BatchWriteItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Puts and/or deletes multiple items in one or more tables. BatchWriteItem is a composite operation where the request + * contains one batch of (a mix of) {@link PutItemEnhancedRequest} and {@link DeleteItemEnhancedRequest} per targeted table. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link BatchWriteItemEnhancedRequest}. + *

    + * Note: BatchWriteItem cannot update items. Instead, use the individual updateItem operation + * {@link DynamoDbAsyncTable#updateItem}}. + *

    + * Partial updates
    Each delete or put call is atomic, but the operation as a whole is not. + * If individual operations fail due to exceeded provisional throughput internal DynamoDb processing failures, + * the failed requests can be retrieved through the result, see {@link BatchWriteResult}. + *

    + * There are some conditions that cause the whole batch operation to fail. These include non-existing tables, erroneously + * defined primary key attributes, attempting to put and delete the same item as well as referring more than once to the same + * hash and range (sort) key. + *

    + * This operation calls the low-level DynamoDB API BatchWriteItem operation. Consult the BatchWriteItem documentation for + * further details and constraints, current limits of data to write and/or delete, how to handle partial updates and retries + * and under which conditions the operation will fail. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link BatchWriteItemEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * BatchWriteResult batchResult = enhancedClient.batchWriteItem(r -> r.writeBatches(
    +     *     WriteBatch.builder(FirstItem.class)
    +     *               .mappedTableResource(firstItemTable)
    +     *               .addPutItem(i -> i.item(item1))
    +     *               .addDeleteItem(i -> i.key(key2))
    +     *               .build(),
    +     *     WriteBatch.builder(SecondItem.class)
    +     *               .mappedTableResource(secondItemTable)
    +     *               .addPutItem(i -> i.item(item3))
    +     *               .build())).join();
    +     * }
    +     * 
    + * + * @param requestConsumer a {@link Consumer} of {@link BatchWriteItemEnhancedRequest} containing keys and items grouped by + * tables. + * @return a {@link CompletableFuture} of {@link BatchWriteResult}, containing any unprocessed requests. + */ + default CompletableFuture batchWriteItem(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves multiple items from one or more tables in a single atomic transaction. TransactGetItem is a composite operation + * where the request contains a set of up to 25 get requests, each containing a table reference and a + * {@link GetItemEnhancedRequest}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link TransactGetItemsEnhancedRequest}. + *

    + * DynamoDb will reject a call to TransactGetItems if the call exceeds limits such as provisioned throughput or allowed size + * of items, if the request contains errors or if there are conflicting operations accessing the same item, for instance + * updating and reading at the same time. + *

    + * This operation calls the low-level DynamoDB API TransactGetItems operation. Consult the TransactGetItems documentation for + * further details and constraints. + *

    + * Examples: + *

    +     * {@code
    +     *
    +     * List results = enhancedClient.transactGetItems(
    +     *            TransactGetItemsEnhancedRequest.builder()
    +     *                                           .addGetItem(firstItemTable, GetItemEnhancedRequest.builder().key(key1).build())
    +     *                                           .addGetItem(firstItemTable, GetItemEnhancedRequest.builder().key(key2).build())
    +     *                                           .addGetItem(firstItemTable, GetItemEnhancedRequest.builder().key(key3).build())
    +     *                                           .addGetItem(secondItemTable, GetItemEnhancedRequest.builder().key(key4).build())
    +     *                                           .build()).join();
    +     * }
    +     * 
    + * + * @param request A {@link TransactGetItemsEnhancedRequest} containing keys with table references. + * @return a {@link CompletableFuture} containing a list of {@link Document} with the results. + */ + default CompletableFuture> transactGetItems(TransactGetItemsEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves multiple items from one or more tables in a single atomic transaction. TransactGetItem is a composite operation + * where the request contains a set of up to 25 get requests, each containing a table reference and a + * {@link GetItemEnhancedRequest}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link TransactGetItemsEnhancedRequest}. + *

    + * DynamoDb will reject a call to TransactGetItems if the call exceeds limits such as provisioned throughput or allowed size + * of items, if the request contains errors or if there are conflicting operations accessing the same item, for instance + * updating and reading at the same time. + *

    + * This operation calls the low-level DynamoDB API TransactGetItems operation. Consult the TransactGetItems documentation for + * further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link TransactGetItemsEnhancedRequest#builder()}. + *

    + * Examples: + *

    +     * {@code
    +     *
    +     * List results =  = enhancedClient.transactGetItems(
    +     *     r -> r.addGetItem(firstItemTable, i -> i.key(k -> k.partitionValue(0)))
    +     *           .addGetItem(firstItemTable, i -> i.key(k -> k.partitionValue(1)))
    +     *           .addGetItem(firstItemTable, i -> i.key(k -> k.partitionValue(2)))
    +     *           .addGetItem(secondItemTable, i -> i.key(k -> k.partitionValue(0)))).join();
    +     * }
    +     * 
    + * + * @param requestConsumer a {@link Consumer} of {@link TransactGetItemsEnhancedRequest} containing keys with table references. + * @return a {@link CompletableFuture} containing a list of {@link Document} with the results. + */ + default CompletableFuture> transactGetItems( + Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Writes and/or modifies multiple items from one or more tables in a single atomic transaction. TransactGetItem is a + * composite operation where the request contains a set of up to 25 action requests, each containing a table reference and + * one of the following requests: + *
      + *
    • Condition check of item - {@link ConditionCheck}
    • + *
    • Delete item - {@link DeleteItemEnhancedRequest}
    • + *
    • Put item - {@link PutItemEnhancedRequest}
    • + *
    • Update item - {@link UpdateItemEnhancedRequest}
    • + *
    + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link TransactWriteItemsEnhancedRequest}. + *

    + * DynamoDb will reject a call to TransactWriteItems if the call exceeds limits such as provisioned throughput or allowed size + * of items, if the request contains errors or if there are conflicting operations accessing the same item. If the request + * contains condition checks that aren't met, this will also cause rejection. + *

    + * This operation calls the low-level DynamoDB API TransactWriteItems operation. Consult the TransactWriteItems documentation + * for further details and constraints, current limits of data to write and/or delete and under which conditions the operation + * will fail. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * enhancedClient.transactWriteItems(
    +     *     TransactWriteItemsEnhancedRequest.builder()
    +     *                                      .addPutItem(firstItemTable, PutItemEnhancedRequest.builder().item(item1).build())
    +     *                                      .addDeleteItem(firstItemTable, DeleteItemEnhancedRequest.builder().key(key2).build())
    +     *                                      .addConditionCheck(firstItemTable,
    +     *                                                         ConditionCheck.builder()
    +     *                                                                       .key(key3)
    +     *                                                                       .conditionExpression(conditionExpression)
    +     *                                                                       .build())
    +     *                                      .addUpdateItem(secondItemTable,
    +     *                                                     UpdateItemEnhancedRequest.builder().item(item4).build())
    +     *                                      .build()).join();
    +     * }
    +     * 
    + * + * @param request A {@link BatchWriteItemEnhancedRequest} containing keys grouped by tables. + * @return a {@link CompletableFuture} of {@link Void}. + */ + default CompletableFuture transactWriteItems(TransactWriteItemsEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Writes and/or modifies multiple items from one or more tables in a single atomic transaction. TransactGetItem is a + * composite operation where the request contains a set of up to 25 action requests, each containing a table reference and + * one of the following requests: + *
      + *
    • Condition check of item - {@link ConditionCheck}
    • + *
    • Delete item - {@link DeleteItemEnhancedRequest}
    • + *
    • Put item - {@link PutItemEnhancedRequest}
    • + *
    • Update item - {@link UpdateItemEnhancedRequest}
    • + *
    + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link TransactWriteItemsEnhancedRequest}. + *

    + * DynamoDb will reject a call to TransactWriteItems if the call exceeds limits such as provisioned throughput or allowed size + * of items, if the request contains errors or if there are conflicting operations accessing the same item. If the request + * contains condition checks that aren't met, this will also cause rejection. + *

    + * This operation calls the low-level DynamoDB API TransactWriteItems operation. Consult the TransactWriteItems documentation + * for further details and constraints, current limits of data to write and/or delete and under which conditions the operation + * will fail. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link TransactWriteItemsEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * enhancedClient.transactWriteItems(r -> r.addPutItem(firstItemTable, i -> i.item(item1))
    +     *                                         .addDeleteItem(firstItemTable, i -> i.key(k -> k.partitionValue(2)))
    +     *                                         .addConditionCheck(firstItemTable, i -> i.key(key3)
    +     *                                                                                  .conditionExpression(conditionExpression))
    +     *                                         .addUpdateItem(secondItemTable, i -> i.item(item4))).join();
    +     * }
    +     * 
    + * + * @param requestConsumer a {@link Consumer} of {@link TransactWriteItemsEnhancedRequest} containing keys and items grouped by + * tables. + * @return a {@link CompletableFuture} of {@link Void}. + */ + default CompletableFuture transactWriteItems(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Creates a default builder for {@link DynamoDbEnhancedAsyncClient}. + */ + static DynamoDbEnhancedAsyncClient.Builder builder() { + return DefaultDynamoDbEnhancedAsyncClient.builder(); + } + + /** + * Creates a {@link DynamoDbEnhancedClient} with a default {@link DynamoDbAsyncClient} + */ + static DynamoDbEnhancedAsyncClient create() { + return builder().build(); + } + + /** + * The builder definition for a {@link DynamoDbEnhancedAsyncClient}. + */ + interface Builder extends DynamoDbEnhancedResource.Builder { + /** + * The regular low-level SDK client to use with the enhanced client. + * @param dynamoDbClient an initialized {@link DynamoDbAsyncClient} + */ + Builder dynamoDbClient(DynamoDbAsyncClient dynamoDbClient); + + @Override + Builder extensions(DynamoDbEnhancedClientExtension... dynamoDbEnhancedClientExtensions); + + @Override + Builder extensions(List dynamoDbEnhancedClientExtensions); + + /** + * Builds an enhanced client based on the settings supplied to this builder + * @return An initialized {@link DynamoDbEnhancedAsyncClient} + */ + DynamoDbEnhancedAsyncClient build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClient.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClient.java new file mode 100644 index 000000000000..fe9dd66d09b4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClient.java @@ -0,0 +1,465 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.List; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPage; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteResult; +import software.amazon.awssdk.enhanced.dynamodb.model.ConditionCheck; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactGetItemsEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactWriteItemsEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; + +/** + * Synchronous interface for running commands against a DynamoDb database. + *

    + * By default, all command methods throw an {@link UnsupportedOperationException} to prevent interface extensions from breaking + * implementing classes. + */ +@SdkPublicApi +public interface DynamoDbEnhancedClient extends DynamoDbEnhancedResource { + + /** + * Returns a mapped table that can be used to execute commands that work with mapped items against that table. + * + * @param tableName The name of the physical table persisted by DynamoDb. + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @return A {@link DynamoDbTable} object that can be used to execute table operations against. + * @param The modelled object type being mapped to this table. + */ + DynamoDbTable table(String tableName, TableSchema tableSchema); + + /** + * Retrieves items from one or more tables by their primary keys, see {@link Key}. BatchGetItem is a composite operation + * where the request contains one batch of {@link GetItemEnhancedRequest} per targeted table. + * The operation makes several calls to the database; each time you iterate over the result to retrieve a page, + * a call is made for the items on that page. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link BatchGetItemEnhancedRequest}. + *

    + * Partial results. A single call to DynamoDb has restraints on how much data can be retrieved. + * If those limits are exceeded, the call yields a partial result. This may also be the case if + * provisional throughput is exceeded or there is an internal DynamoDb processing failure. The operation automatically + * retries any unprocessed keys returned from DynamoDb in subsequent calls for pages. + *

    + * This operation calls the low-level {@link DynamoDbClient#batchGetItemPaginator} operation. Consult the BatchGetItem + * documentation for further details and constraints as well as current limits of data retrieval. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * BatchGetResultPageIterable batchResults = enhancedClient.batchGetItem(
    +     *            BatchGetItemEnhancedRequest.builder()
    +     *                                       .readBatches(ReadBatch.builder(FirstItem.class)
    +     *                                                             .mappedTableResource(firstItemTable)
    +     *                                                             .addGetItem(GetItemEnhancedRequest.builder().key(key1).build())
    +     *                                                             .addGetItem(GetItemEnhancedRequest.builder().key(key2).build())
    +     *                                                             .build(),
    +     *                                                    ReadBatch.builder(SecondItem.class)
    +     *                                                             .mappedTableResource(secondItemTable)
    +     *                                                             .addGetItem(GetItemEnhancedRequest.builder().key(key3).build())
    +     *                                                             .build())
    +     *                                       .build());
    +     * }
    +     * 
    + * + *

    + * The result can be accessed either through iterable {@link BatchGetResultPage}s or flattened results belonging to the + * supplied table across all pages. + * + *

    + * 1) Iterating through pages + *

    +     * {@code
    +     * batchResults.forEach(page -> {
    +     *     page.resultsForTable(firstItemTable).forEach(item -> System.out.println(item));
    +     *     page.resultsForTable(secondItemTable).forEach(item -> System.out.println(item));
    +     * });
    +     * }
    +     * 
    + * + *

    + * 2) Iterating through results across all pages + *

    +     * {@code
    +     * results.resultsForTable(firstItemTable).forEach(item -> System.out.println(item));
    +     * results.resultsForTable(secondItemTable).forEach(item -> System.out.println(item));
    +     * }
    +     * 
    + * + * @param request A {@link BatchGetItemEnhancedRequest} containing keys grouped by tables. + * @return an iterator of type {@link SdkIterable} with paginated results of type {@link BatchGetResultPage}. + * @see #batchGetItem(Consumer) + * @see DynamoDbClient#batchGetItemPaginator + */ + default BatchGetResultPageIterable batchGetItem(BatchGetItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves items from one or more tables by their primary keys, see {@link Key}. BatchGetItem is a composite operation + * where the request contains one batch of {@link GetItemEnhancedRequest} per targeted table. + * The operation makes several calls to the database; each time you iterate over the result to retrieve a page, + * a call is made for the items on that page. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link BatchGetItemEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * BatchGetResultPageIterable batchResults = enhancedClient.batchGetItem(r -> r.addReadBatches(
    +     *     ReadBatch.builder(FirstItem.class)
    +     *              .mappedTableResource(firstItemTable)
    +     *              .addGetItem(i -> i.key(key1))
    +     *              .addGetItem(i -> i.key(key2))
    +     *              .build(),
    +     *     ReadBatch.builder(SecondItem.class)
    +     *              .mappedTableResource(secondItemTable)
    +     *              .addGetItem(i -> i.key(key3))
    +     *              .build()));
    +     * }
    +     * 
    + * + * @param requestConsumer a {@link Consumer} of {@link BatchGetItemEnhancedRequest.Builder} containing keys grouped by tables. + * @return an iterator of type {@link SdkIterable} with paginated results of type {@link BatchGetResultPage}. + * @see #batchGetItem(BatchGetItemEnhancedRequest) + * @see DynamoDbClient#batchGetItemPaginator(BatchGetItemRequest) + */ + default BatchGetResultPageIterable batchGetItem(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Puts and/or deletes multiple items in one or more tables. BatchWriteItem is a composite operation where the request + * contains one batch of (a mix of) {@link PutItemEnhancedRequest} and {@link DeleteItemEnhancedRequest} per targeted table. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link BatchWriteItemEnhancedRequest}. + *

    + * Note: BatchWriteItem cannot update items. Instead, use the individual updateItem operation + * {@link DynamoDbTable#updateItem(UpdateItemEnhancedRequest)}. + *

    + * Partial updates
    Each delete or put call is atomic, but the operation as a whole is not. + * If individual operations fail due to exceeded provisional throughput internal DynamoDb processing failures, + * the failed requests can be retrieved through the result, see {@link BatchWriteResult}. + *

    + * There are some conditions that cause the whole batch operation to fail. These include non-existing tables, erroneously + * defined primary key attributes, attempting to put and delete the same item as well as referring more than once to the same + * hash and range (sort) key. + *

    + * This operation calls the low-level DynamoDB API BatchWriteItem operation. Consult the BatchWriteItem documentation for + * further details and constraints, current limits of data to write and/or delete, how to handle partial updates and retries + * and under which conditions the operation will fail. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * BatchWriteResult batchResult = enhancedClient.batchWriteItem(
    +     *     BatchWriteItemEnhancedRequest.builder()
    +     *                                  .writeBatches(WriteBatch.builder(FirstItem.class)
    +     *                                                          .mappedTableResource(firstItemTable)
    +     *                                                          .addPutItem(PutItemEnhancedRequest.builder().item(item1).build())
    +     *                                                          .addDeleteItem(DeleteItemEnhancedRequest.builder()
    +     *                                                                                                  .key(key2)
    +     *                                                                                                  .build())
    +     *                                                          .build(),
    +     *                                                WriteBatch.builder(SecondItem.class)
    +     *                                                          .mappedTableResource(secondItemTable)
    +     *                                                          .addPutItem(PutItemEnhancedRequest.builder().item(item3).build())
    +     *                                                          .build())
    +     *                                  .build());
    +     * }
    +     * 
    + * + * @param request A {@link BatchWriteItemEnhancedRequest} containing keys and items grouped by tables. + * @return a {@link BatchWriteResult} containing any unprocessed requests. + */ + default BatchWriteResult batchWriteItem(BatchWriteItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Puts and/or deletes multiple items in one or more tables. BatchWriteItem is a composite operation where the request + * contains one batch of (a mix of) {@link PutItemEnhancedRequest} and {@link DeleteItemEnhancedRequest} per targeted table. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link BatchWriteItemEnhancedRequest}. + *

    + * Note: BatchWriteItem cannot update items. Instead, use the individual updateItem operation + * {@link DynamoDbTable#updateItem(UpdateItemEnhancedRequest)}. + *

    + * Partial updates
    Each delete or put call is atomic, but the operation as a whole is not. + * If individual operations fail due to exceeded provisional throughput internal DynamoDb processing failures, + * the failed requests can be retrieved through the result, see {@link BatchWriteResult}. + *

    + * There are some conditions that cause the whole batch operation to fail. These include non-existing tables, erroneously + * defined primary key attributes, attempting to put and delete the same item as well as referring more than once to the same + * hash and range (sort) key. + *

    + * This operation calls the low-level DynamoDB API BatchWriteItem operation. Consult the BatchWriteItem documentation for + * further details and constraints, current limits of data to write and/or delete, how to handle partial updates and retries + * and under which conditions the operation will fail. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link BatchWriteItemEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * BatchWriteResult batchResult = enhancedClient.batchWriteItem(r -> r.writeBatches(
    +     *     WriteBatch.builder(FirstItem.class)
    +     *               .mappedTableResource(firstItemTable)
    +     *               .addPutItem(i -> i.item(item1))
    +     *               .addDeleteItem(i -> i.key(key2))
    +     *               .build(),
    +     *     WriteBatch.builder(SecondItem.class)
    +     *               .mappedTableResource(secondItemTable)
    +     *               .addPutItem(i -> i.item(item3))
    +     *               .build()));
    +     * }
    +     * 
    + * + * @param requestConsumer a {@link Consumer} of {@link BatchWriteItemEnhancedRequest} containing keys and items grouped by + * tables. + * @return a {@link BatchWriteResult} containing any unprocessed requests. + */ + default BatchWriteResult batchWriteItem(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves multiple items from one or more tables in a single atomic transaction. TransactGetItem is a composite operation + * where the request contains a set of up to 25 get requests, each containing a table reference and a + * {@link GetItemEnhancedRequest}. The list of results correspond to the ordering of the request definitions; for example + * the third addGetItem() call on the request builder will match the third result (index 2) of the result. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link TransactGetItemsEnhancedRequest}. + *

    + * DynamoDb will reject a call to TransactGetItems if the call exceeds limits such as provisioned throughput or allowed size + * of items, if the request contains errors or if there are conflicting operations accessing the same item, for instance + * updating and reading at the same time. + *

    + * This operation calls the low-level DynamoDB API TransactGetItems operation. Consult the TransactGetItems documentation for + * further details and constraints. + *

    + * Examples: + *

    +     * {@code
    +     *
    +     * List results = enhancedClient.transactGetItems(
    +     *            TransactGetItemsEnhancedRequest.builder()
    +     *                                           .addGetItem(firstItemTable, GetItemEnhancedRequest.builder().key(key1).build())
    +     *                                           .addGetItem(firstItemTable, GetItemEnhancedRequest.builder().key(key2).build())
    +     *                                           .addGetItem(firstItemTable, GetItemEnhancedRequest.builder().key(key3).build())
    +     *                                           .addGetItem(secondItemTable, GetItemEnhancedRequest.builder().key(key4).build())
    +     *                                           .build());
    +     * MyItem item = results.get(3).getItem(secondItemTable);
    +     * }
    +     * 
    + * + * @param request A {@link TransactGetItemsEnhancedRequest} containing keys with table references. + * @return a list of {@link Document} with the results. + */ + default List transactGetItems(TransactGetItemsEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves multiple items from one or more tables in a single atomic transaction. TransactGetItem is a composite operation + * where the request contains a set of up to 25 get requests, each containing a table reference and a + * {@link GetItemEnhancedRequest}. The list of results correspond to the ordering of the request definitions; for example + * the third addGetItem() call on the request builder will match the third result (index 2) of the result. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link TransactGetItemsEnhancedRequest}. + *

    + * DynamoDb will reject a call to TransactGetItems if the call exceeds limits such as provisioned throughput or allowed size + * of items, if the request contains errors or if there are conflicting operations accessing the same item, for instance + * updating and reading at the same time. + *

    + * This operation calls the low-level DynamoDB API TransactGetItems operation. Consult the TransactGetItems documentation for + * further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link TransactGetItemsEnhancedRequest#builder()}. + *

    + * Examples: + *

    +     * {@code
    +     *
    +     * List results = enhancedClient.transactGetItems(
    +     *     r -> r.addGetItem(firstItemTable, i -> i.key(k -> k.partitionValue(0)))
    +     *           .addGetItem(firstItemTable, i -> i.key(k -> k.partitionValue(1)))
    +     *           .addGetItem(firstItemTable, i -> i.key(k -> k.partitionValue(2)))
    +     *           .addGetItem(secondItemTable, i -> i.key(k -> k.partitionValue(0))));
    +     * MyItem item = results.get(3).getItem(secondItemTable);
    +     * }
    +     * 
    + * + * @param requestConsumer a {@link Consumer} of {@link TransactGetItemsEnhancedRequest} containing keys with table references. + * @return a list of {@link Document} with the results. + */ + default List transactGetItems(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Writes and/or modifies multiple items from one or more tables in a single atomic transaction. TransactGetItem is a + * composite operation where the request contains a set of up to 25 action requests, each containing a table reference and + * one of the following requests: + *
      + *
    • Condition check of item - {@link ConditionCheck}
    • + *
    • Delete item - {@link DeleteItemEnhancedRequest}
    • + *
    • Put item - {@link PutItemEnhancedRequest}
    • + *
    • Update item - {@link UpdateItemEnhancedRequest}
    • + *
    + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link TransactWriteItemsEnhancedRequest}. + *

    + * DynamoDb will reject a call to TransactWriteItems if the call exceeds limits such as provisioned throughput or allowed size + * of items, if the request contains errors or if there are conflicting operations accessing the same item. If the request + * contains condition checks that aren't met, this will also cause rejection. + *

    + * This operation calls the low-level DynamoDB API TransactWriteItems operation. Consult the TransactWriteItems documentation + * for further details and constraints, current limits of data to write and/or delete and under which conditions the operation + * will fail. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * result = enhancedClient.transactWriteItems(
    +     *     TransactWriteItemsEnhancedRequest.builder()
    +     *                                      .addPutItem(firstItemTable, PutItemEnhancedRequest.builder().item(item1).build())
    +     *                                      .addDeleteItem(firstItemTable, DeleteItemEnhancedRequest.builder().key(key2).build())
    +     *                                      .addConditionCheck(firstItemTable,
    +     *                                                         ConditionCheck.builder()
    +     *                                                                       .key(key3)
    +     *                                                                       .conditionExpression(conditionExpression)
    +     *                                                                       .build())
    +     *                                      .addUpdateItem(secondItemTable,
    +     *                                                     UpdateItemEnhancedRequest.builder().item(item4).build())
    +     *                                      .build());
    +     * }
    +     * 
    + * + * @param request A {@link BatchWriteItemEnhancedRequest} containing keys grouped by tables. + */ + default Void transactWriteItems(TransactWriteItemsEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Writes and/or modifies multiple items from one or more tables in a single atomic transaction. TransactGetItem is a + * composite operation where the request contains a set of up to 25 action requests, each containing a table reference and + * one of the following requests: + *
      + *
    • Condition check of item - {@link ConditionCheck}
    • + *
    • Delete item - {@link DeleteItemEnhancedRequest}
    • + *
    • Put item - {@link PutItemEnhancedRequest}
    • + *
    • Update item - {@link UpdateItemEnhancedRequest}
    • + *
    + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link TransactWriteItemsEnhancedRequest}. + *

    + * DynamoDb will reject a call to TransactWriteItems if the call exceeds limits such as provisioned throughput or allowed size + * of items, if the request contains errors or if there are conflicting operations accessing the same item. If the request + * contains condition checks that aren't met, this will also cause rejection. + *

    + * This operation calls the low-level DynamoDB API TransactWriteItems operation. Consult the TransactWriteItems documentation + * for further details and constraints, current limits of data to write and/or delete and under which conditions the operation + * will fail. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link TransactWriteItemsEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * result = enhancedClient.transactWriteItems(r -> r.addPutItem(firstItemTable, i -> i.item(item1))
    +     *                                                  .addDeleteItem(firstItemTable, i -> i.key(k -> k.partitionValue(2)))
    +     *                                                  .addConditionCheck(firstItemTable,
    +     *                                                                 i -> i.key(key3).conditionExpression(conditionExpression))
    +     *                                                  .addUpdateItem(secondItemTable, i -> i.item(item4)));
    +     * }
    +     * 
    + * + * @param requestConsumer a {@link Consumer} of {@link TransactWriteItemsEnhancedRequest} containing keys and items grouped + * by tables. + */ + default Void transactWriteItems(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Creates a default builder for {@link DynamoDbEnhancedClient}. + */ + static Builder builder() { + return DefaultDynamoDbEnhancedClient.builder(); + } + + /** + * Creates a {@link DynamoDbEnhancedClient} with a default {@link DynamoDbClient} + */ + static DynamoDbEnhancedClient create() { + return builder().build(); + } + + /** + * The builder definition for a {@link DynamoDbEnhancedClient}. + */ + interface Builder extends DynamoDbEnhancedResource.Builder { + /** + * The regular low-level SDK client to use with the enhanced client. + * @param dynamoDbClient an initialized {@link DynamoDbClient} + */ + Builder dynamoDbClient(DynamoDbClient dynamoDbClient); + + @Override + Builder extensions(DynamoDbEnhancedClientExtension... dynamoDbEnhancedClientExtensions); + + @Override + Builder extensions(List dynamoDbEnhancedClientExtensions); + + /** + * Builds an enhanced client based on the settings supplied to this builder + * @return An initialized {@link DynamoDbEnhancedClient} + */ + DynamoDbEnhancedClient build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClientExtension.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClientExtension.java new file mode 100644 index 000000000000..13fbd52f16a2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClientExtension.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; + +/** + * Interface for extending the DynamoDb Enhanced client. Two hooks are provided, one that is called just before a record + * is written to the database, and one called just after a record is read from the database. This gives the extension the + * opportunity to act as an invisible layer between the application and the database and transform the data accordingly. + *

    + * Multiple extensions can be used with the enhanced client, but the order in which they are loaded is important. For + * instance one extension may overwrite the value of an attribute that another extension then includes in a checksum + * calculation. + */ +@SdkPublicApi +public interface DynamoDbEnhancedClientExtension { + /** + * This hook is called just before an operation is going to write data to the database. The extension that + * implements this method can choose to transform the item itself, or add a condition to the write operation + * or both. + * + * @param context The {@link DynamoDbExtensionContext.BeforeWrite} context containing the state of the execution. + * @return A {@link WriteModification} object that can alter the behavior of the write operation. + */ + default WriteModification beforeWrite(DynamoDbExtensionContext.BeforeWrite context) { + return WriteModification.builder().build(); + } + + /** + * This hook is called just after an operation that has read data from the database. The extension that + * implements this method can choose to transform the item, and then it is the transformed item that will be + * mapped back to the application instead of the item that was actually read from the database. + * + * @param context The {@link DynamoDbExtensionContext.AfterRead} context containing the state of the execution. + * @return A {@link ReadModification} object that can alter the results of a read operation. + */ + default ReadModification afterRead(DynamoDbExtensionContext.AfterRead context) { + return ReadModification.builder().build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedResource.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedResource.java new file mode 100644 index 000000000000..0b63d1172139 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedResource.java @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * Shared interface components for {@link DynamoDbEnhancedClient} and {@link DynamoDbEnhancedAsyncClient}. Any common + * methods implemented by both of those classes or their builders are declared here. + */ +@SdkPublicApi +public interface DynamoDbEnhancedResource { + /** + * Shared interface components for the builders of {@link DynamoDbEnhancedClient} and + * {@link DynamoDbEnhancedAsyncClient} + */ + interface Builder { + /** + * Specifies the extensions to load with the enhanced client. The extensions will be loaded in the strict order + * they are supplied here. Calling this method will override any bundled extensions that are loaded by default, + * namely the {@link software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension}, so this + * extension must be included in the supplied list otherwise it will not be loaded. Providing an empty list here + * will cause no extensions to get loaded, effectively dropping the default ones. + * + * @param dynamoDbEnhancedClientExtensions a list of extensions to load with the enhanced client + */ + Builder extensions(DynamoDbEnhancedClientExtension... dynamoDbEnhancedClientExtensions); + + /** + * Specifies the extensions to load with the enhanced client. The extensions will be loaded in the strict order + * they are supplied here. Calling this method will override any bundled extensions that are loaded by default, + * namely the {@link software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension}, so this + * extension must be included in the supplied list otherwise it will not be loaded. Providing an empty list here + * will cause no extensions to get loaded, effectively dropping the default ones. + * + * @param dynamoDbEnhancedClientExtensions a list of extensions to load with the enhanced client + */ + Builder extensions(List dynamoDbEnhancedClientExtensions); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbExtensionContext.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbExtensionContext.java new file mode 100644 index 000000000000..960386d8406f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbExtensionContext.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.Map; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A wrapper for the immutable context objects that are visible to the {@link DynamoDbEnhancedClientExtension}s. + */ +@SdkPublicApi +public final class DynamoDbExtensionContext { + private DynamoDbExtensionContext() { + } + + @SdkPublicApi + public interface Context { + /** + * @return The {@link AttributeValue} map of the items that is about to be written or has just been read. + */ + Map items(); + + /** + * @return The context under which the operation to be modified is taking place. + */ + OperationContext operationContext(); + + /** + * @return A {@link TableMetadata} object describing the structure of the modelled table. + */ + TableMetadata tableMetadata(); + } + + /** + * The state of the execution when the {@link DynamoDbEnhancedClientExtension#beforeWrite} method is invoked. + */ + @SdkPublicApi + public interface BeforeWrite extends Context { + } + + /** + * The state of the execution when the {@link DynamoDbEnhancedClientExtension#afterRead} method is invoked. + */ + @SdkPublicApi + public interface AfterRead extends Context { + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbIndex.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbIndex.java new file mode 100644 index 000000000000..a9725ae1557c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbIndex.java @@ -0,0 +1,241 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; + +/** + * Synchronous interface for running commands against an object that is linked to a specific DynamoDb secondary index + * and knows how to map records from the table that index is linked to into a modelled object. + *

    + * By default, all command methods throw an {@link UnsupportedOperationException} to prevent interface extensions from breaking + * implementing classes. + * + * @param The type of the modelled object. + */ +@SdkPublicApi +public interface DynamoDbIndex { + + /** + * Executes a query against a secondary index using a {@link QueryConditional} expression to retrieve a list of + * items matching the given conditions. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. Results are sorted by sort key value in + * ascending order by default; this behavior can be overridden in the {@link QueryEnhancedRequest}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link QueryEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * QueryConditional queryConditional = QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build());
    +     * Iterator> results = mappedIndex.query(QueryEnhancedRequest.builder()
    +     *                                                                        .queryConditional(queryConditional)
    +     *                                                                        .build());
    +     * }
    +     * 
    + * + * @param request A {@link QueryEnhancedRequest} defining the query conditions and how + * to handle the results. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default SdkIterable> query(QueryEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against a secondary index using a {@link QueryConditional} expression to retrieve a list of + * items matching the given conditions. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. Results are sorted by sort key value in + * ascending order by default; this behavior can be overridden in the {@link QueryEnhancedRequest}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link QueryEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation for + * further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link QueryEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * Iterator> results =
    +     *     mappedIndex.query(r -> r.queryConditional(QueryConditional.keyEqualTo(k -> k.partitionValue("id-value"))));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link QueryEnhancedRequest} defining the query conditions and how to + * handle the results. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default SdkIterable> query(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against the secondary index of the table using a {@link QueryConditional} expression to retrieve + * a list of items matching the given conditions. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. Results are sorted by sort key value in + * ascending order. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * Iterator> results =
    +     *     mappedIndex.query(QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build()));
    +     * }
    +     * 
    + * + * @param queryConditional A {@link QueryConditional} defining the matching criteria for records to be queried. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default SdkIterable> query(QueryConditional queryConditional) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table against a secondary index and retrieves all items. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a scan call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link ScanEnhancedRequest}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * Iterator> results = mappedTable.scan(ScanEnhancedRequest.builder().consistentRead(true).build());
    +     * }
    +     * 
    + * + * @param request A {@link ScanEnhancedRequest} defining how to handle the results. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default SdkIterable> scan(ScanEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table against a secondary index and retrieves all items. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a scan call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link ScanEnhancedRequest}. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link ScanEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * Iterator> results = mappedTable.scan(r -> r.limit(5));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link ScanEnhancedRequest} defining the query conditions and how to + * handle the results. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default SdkIterable> scan(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table against a secondary index and retrieves all items using default settings. + *

    + * The result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a scan call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * Iterator> results = mappedTable.scan();
    +     * }
    +     * 
    + * + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default SdkIterable> scan() { + throw new UnsupportedOperationException(); + } + + /** + * Gets the {@link DynamoDbEnhancedClientExtension} associated with this mapped resource. + * @return The {@link DynamoDbEnhancedClientExtension} associated with this mapped resource. + */ + DynamoDbEnhancedClientExtension mapperExtension(); + + /** + * Gets the {@link TableSchema} object that this mapped table was built with. + * @return The {@link TableSchema} object for this mapped table. + */ + TableSchema tableSchema(); + + /** + * Gets the physical table name that operations performed by this object will be executed against. + * @return The physical table name. + */ + String tableName(); + + /** + * Gets the physical secondary index name that operations performed by this object will be executed against. + * @return The physical secondary index name. + */ + String indexName(); + + /** + * Creates a {@link Key} object from a modelled item. This key can be used in query conditionals and get + * operations to locate a specific record. + * @param item The item to extract the key fields from. + * @return A key that has been initialized with the index values extracted from the modelled object. + */ + Key keyFrom(T item); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbTable.java new file mode 100644 index 000000000000..1a1765cbdeba --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbTable.java @@ -0,0 +1,656 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Synchronous interface for running commands against an object that is linked to a specific DynamoDb table resource + * and therefore knows how to map records from that table into a modelled object. + *

    + * By default, all command methods throw an {@link UnsupportedOperationException} to prevent interface extensions from + * breaking implementing classes. + *

    + * @param The type of the modelled object. + */ +@SdkPublicApi +public interface DynamoDbTable extends MappedTableResource { + /** + * Returns a mapped index that can be used to execute commands against a secondary index belonging to the table + * being mapped by this object. Note that only a subset of the commands that work against a table will work + * against a secondary index. + * + * @param indexName The name of the secondary index to build the command interface for. + * @return A {@link DynamoDbIndex} object that can be used to execute database commands against. + */ + DynamoDbIndex index(String indexName); + + /** + * Creates a new table in DynamoDb with the name and schema already defined for this DynamoDbTable + * together with additional parameters specified in the supplied request object, {@link CreateTableEnhancedRequest}. + *

    + * Use {@link DynamoDbEnhancedClient#table(String, TableSchema)} to define the mapped table resource. + *

    + * This operation calls the low-level DynamoDB API CreateTable operation. Note that this is an asynchronous + * operation and that the table may not immediately be available for writes and reads. Currently, there is no + * mechanism supported within this library to wait for/check the status of a created table. You must provide this + * functionality yourself. Consult the CreateTable documentation for further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * ProvisionedThroughput provisionedThroughput = ProvisionedThroughput.builder()
    +     *                                                                    .readCapacityUnits(50L)
    +     *                                                                    .writeCapacityUnits(50L)
    +     *                                                                    .build();
    +     * mappedTable.createTable(CreateTableEnhancedRequest.builder()
    +     *                                                   .provisionedThroughput(provisionedThroughput)
    +     *                                                   .build());
    +     * }
    +     * 
    + * + * @param request A {@link CreateTableEnhancedRequest} containing optional parameters for table creation. + */ + default void createTable(CreateTableEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Creates a new table in DynamoDb with the name and schema already defined for this DynamoDbTable + * together with additional parameters specified in the supplied request object, {@link CreateTableEnhancedRequest}. + *

    + * Use {@link DynamoDbEnhancedClient#table(String, TableSchema)} to define the mapped table resource. + *

    + * This operation calls the low-level DynamoDB API CreateTable operation. Note that this is an asynchronous + * operation and that the table may not immediately be available for writes and reads. Currently, there is no + * mechanism supported within this library to wait for/check the status of a created table. You must provide this + * functionality yourself. Consult the CreateTable documentation for further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to + * create one manually via {@link CreateTableEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * ProvisionedThroughput provisionedThroughput = ProvisionedThroughput.builder()
    +     *                                                                    .readCapacityUnits(50L)
    +     *                                                                    .writeCapacityUnits(50L)
    +     *                                                                    .build();
    +     * mappedTable.createTable(r -> r.provisionedThroughput(provisionedThroughput));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link CreateTableEnhancedRequest.Builder} containing optional + * parameters for table creation. + */ + default void createTable(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Creates a new table in DynamoDb with the name and schema already defined for this DynamoDbTable. + *

    + * Use {@link DynamoDbEnhancedClient#table(String, TableSchema)} to define the mapped table resource. + *

    + * This operation calls the low-level DynamoDB API CreateTable operation. Note that this is an asynchronous + * operation and that the table may not immediately be available for writes and reads. Currently, there is no + * mechanism supported within this library to wait for/check the status of a created table. You must provide this + * functionality yourself. Consult the CreateTable documentation for further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * mappedTable.createTable();
    +     * }
    +     * 
    + * + */ + default void createTable() { + throw new UnsupportedOperationException(); + } + + /** + * Deletes a single item from the mapped table using a supplied primary {@link Key}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link DeleteItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API DeleteItem operation. Consult the DeleteItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem previouslyPersistedItem = mappedTable.delete(DeleteItemEnhancedRequest.builder().key(key).build());
    +     * }
    +     * 
    + * + * @param request A {@link DeleteItemEnhancedRequest} with key and optional directives for deleting an item from the + * table. + * @return The item that was persisted in the database before it was deleted. + */ + default T deleteItem(DeleteItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Deletes a single item from the mapped table using a supplied primary {@link Key}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link DeleteItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API DeleteItem operation. Consult the DeleteItem documentation for + * further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to + * create one manually via {@link DeleteItemEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem previouslyPersistedItem = mappedTable.delete(r -> r.key(key));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link DeleteItemEnhancedRequest} with key and + * optional directives for deleting an item from the table. + * @return The item that was persisted in the database before it was deleted. + */ + default T deleteItem(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Deletes a single item from the mapped table using a supplied primary {@link Key}. + *

    + * This operation calls the low-level DynamoDB API DeleteItem operation. Consult the DeleteItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem previouslyPersistedItem = mappedTable.delete(key);
    +     * }
    +     * 
    + * + * @param key A {@link Key} that will be used to match a specific record to delete from the database table. + * @return The item that was persisted in the database before it was deleted. + */ + default T deleteItem(Key key) { + throw new UnsupportedOperationException(); + } + + /** + * Deletes a single item from the mapped table using just the key of a supplied modelled 'key item' object. + *

    + * This operation calls the low-level DynamoDB API DeleteItem operation. Consult the DeleteItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem previouslyPersistedItem = mappedTable.deleteItem(keyItem);
    +     * }
    +     * 
    + * + * @param keyItem A modelled item with the primary key fields set that will be used to match a specific record to + * delete from the database table. + * @return The item that was persisted in the database before it was deleted. + */ + default T deleteItem(T keyItem) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves a single item from the mapped table using a supplied primary {@link Key}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link GetItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API GetItem operation. Consult the GetItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.getItem(GetItemEnhancedRequest.builder().key(key).build());
    +     * }
    +     * 
    + * + * @param request A {@link GetItemEnhancedRequest} with key and optional directives for retrieving an item from the + * table. + * @return The retrieved item + */ + default T getItem(GetItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves a single item from the mapped table using a supplied primary {@link Key}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link GetItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API GetItem operation. Consult the GetItem documentation for + * further details and constraints. + *

    + * Note: This is a convenience method that creates an instance of the request builder avoiding the need to + * create one manually via {@link GetItemEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.getItem(r -> r.key(key));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link GetItemEnhancedRequest.Builder} with key and optional + * directives for retrieving an item from the table. + * @return The retrieved item + */ + default T getItem(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves a single item from the mapped table using a supplied primary {@link Key}. + *

    + * This operation calls the low-level DynamoDB API GetItem operation. Consult the GetItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.getItem(key);
    +     * }
    +     * 
    + * + * @param key A {@link Key} that will be used to match a specific record to retrieve from the database table. + * @return The retrieved item + */ + default T getItem(Key key) { + throw new UnsupportedOperationException(); + } + + /** + * Retrieves a single item from the mapped table using just the key of a supplied modelled 'key item'. + *

    + * This operation calls the low-level DynamoDB API GetItem operation. Consult the GetItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.getItem(keyItem);
    +     * }
    +     * 
    + * + * @param keyItem A modelled item with the primary key fields set that will be used to match a specific record to + * retrieve from the database table. + * @return The retrieved item + */ + default T getItem(T keyItem) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against the primary index of the table using a {@link QueryConditional} expression to retrieve a + * list of items matching the given conditions. + *

    + * The result can be accessed either through iterable {@link Page}s or {@link Page#items()} directly. If you are iterating + * the pages, the result is accessed through iterable pages (see {@link Page}) in an interactive way; each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. Results are sorted by sort key value in ascending order by default; + * this behavior can be overridden in the {@link QueryEnhancedRequest}. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link QueryEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API Query operation. Consult the Query documentation for + * further details and constraints. + *

    + * Example: + *

    + * 1) Iterating through pages + * + *

    +     * {@code
    +     * QueryConditional queryConditional = QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build());
    +     * PageIterable results = table.query(QueryEnhancedRequest.builder()
    +     *                                                                        .queryConditional(queryConditional)
    +     *                                                                        .build());
    +     * results.stream().forEach(p -> p.items().forEach(item -> System.out.println(item)))
    +     * }
    +     * 
    + * + * 2) Iterating through items + * + *
    +     * {@code
    +     * results.items().stream().forEach(item -> System.out.println(item));
    +     * }
    +     * 
    + * + * @see #query(QueryConditional) + * @see #query(Consumer) + * @see DynamoDbClient#queryPaginator + * @param request A {@link QueryEnhancedRequest} defining the query conditions and how + * to handle the results. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default PageIterable query(QueryEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link QueryEnhancedRequest#builder()}. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * PageIterable results =
    +     *     mappedTable.query(r -> r.queryConditional(QueryConditional.keyEqualTo(k -> k.partitionValue("id-value"))));
    +     * }
    +     * 
    + * @see #query(QueryEnhancedRequest) + * @see #query(QueryConditional) + * @param requestConsumer A {@link Consumer} of {@link QueryEnhancedRequest} defining the query conditions and how to + * handle the results. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default PageIterable query(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Executes a query against the primary index of the table using a {@link QueryConditional} expression to retrieve a + * list of items matching the given conditions. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * PageIterable results =
    +     *     mappedTable.query(QueryConditional.keyEqualTo(Key.builder().partitionValue("id-value").build()));
    +     * }
    +     * 
    + * + * @see #query(QueryEnhancedRequest) + * @see #query(Consumer) + * @see DynamoDbClient#queryPaginator + * @param queryConditional A {@link QueryConditional} defining the matching criteria for records to be queried. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default PageIterable query(QueryConditional queryConditional) { + throw new UnsupportedOperationException(); + } + + /** + * Puts a single item in the mapped table. If the table contains an item with the same primary key, it will be + * replaced with this item. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link PutItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API PutItem operation. Consult the PutItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * mappedTable.putItem(PutItemEnhancedRequest.builder(MyItem.class).item(item).build());
    +     * }
    +     * 
    + * + * @param request A {@link PutItemEnhancedRequest} that includes the item to enter into + * the table, its class and optional directives. + */ + default void putItem(PutItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Puts a single item in the mapped table. If the table contains an item with the same primary key, it will be + * replaced with this item. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link PutItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API PutItem operation. Consult the PutItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * mappedTable.putItem(r -> r.item(item));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link PutItemEnhancedRequest.Builder} that includes the item + * to enter into the table, its class and optional directives. + */ + default void putItem(Consumer> requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Puts a single item in the mapped table. If the table contains an item with the same primary key, it will be + * replaced with this item. + *

    + * This operation calls the low-level DynamoDB API PutItem operation. Consult the PutItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * mappedTable.putItem(item);
    +     * }
    +     * 
    + * + * @param item the modelled item to be inserted into or overwritten in the database table. + */ + default void putItem(T item) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table and retrieves all items. + *

    + * The result can be accessed either through iterable {@link Page}s or items across all pages directly. Each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link ScanEnhancedRequest}. + *

    + * Example: + *

    + * 1) Iterating through pages + *

    +     * {@code
    +     *
    +     * PageIterable results = mappedTable.scan(ScanEnhancedRequest.builder().consistentRead(true).build());
    +     * results.stream().forEach(p -> p.items().forEach(item -> System.out.println(item)))
    +     * }
    +     * 
    + * + *

    + * 2) Iterating through items + *

    +     * {@code
    +     *
    +     * PageIterable results = mappedTable.scan(ScanEnhancedRequest.builder().consistentRead(true).build());
    +     * results.items().stream().forEach(item -> System.out.println(item));
    +     * }
    +     * 
    + * + * @see #scan(Consumer) + * @see #scan() + * @see DynamoDbClient#scanPaginator + * @param request A {@link ScanEnhancedRequest} defining how to handle the results. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default PageIterable scan(ScanEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * This is a convenience method that creates an instance of the request builder avoiding the need to create one + * manually via {@link ScanEnhancedRequest#builder()}. + * + *

    + * Example: + *

    +     * {@code
    +     *
    +     * PageIterable results = mappedTable.scan(r -> r.limit(5));
    +     * }
    +     * 
    + * + * @see #scan(ScanEnhancedRequest) + * @see #scan() + * @param requestConsumer A {@link Consumer} of {@link ScanEnhancedRequest} defining the query conditions and how to + * handle the results. + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default PageIterable scan(Consumer requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Scans the table and retrieves all items using default settings. + *

    + * The result can be accessed either through iterable {@link Page}s or items across all pages directly. Each time a + * result page is retrieved, a query call is made to DynamoDb to get those entries. If no matches are found, + * the resulting iterator will contain an empty page. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * PageIterable results = mappedTable.scan();
    +     * }
    +     * 
    + * + * @see #scan(ScanEnhancedRequest) + * @see #scan(Consumer) + * @see DynamoDbClient#scanPaginator + * @return an iterator of type {@link SdkIterable} with paginated results (see {@link Page}). + */ + default PageIterable scan() { + throw new UnsupportedOperationException(); + } + + /** + * Updates an item in the mapped table, or adds it if it doesn't exist. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link UpdateItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API UpdateItem operation. Consult the UpdateItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.updateItem(UpdateItemEnhancedRequest.builder(MyItem.class).item(item).build());
    +     * }
    +     * 
    + * + * @param request A {@link UpdateItemEnhancedRequest} that includes the item to be updated, + * its class and optional directives. + * @return The updated item + */ + default T updateItem(UpdateItemEnhancedRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Updates an item in the mapped table, or adds it if it doesn't exist. + *

    + * The additional configuration parameters that the enhanced client supports are defined + * in the {@link UpdateItemEnhancedRequest}. + *

    + * This operation calls the low-level DynamoDB API UpdateItem operation. Consult the UpdateItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.updateItem(r -> r.item(item));
    +     * }
    +     * 
    + * + * @param requestConsumer A {@link Consumer} of {@link UpdateItemEnhancedRequest.Builder} that includes the item + * to be updated, its class and optional directives. + * @return The updated item + */ + default T updateItem(Consumer> requestConsumer) { + throw new UnsupportedOperationException(); + } + + /** + * Updates an item in the mapped table, or adds it if it doesn't exist. + *

    + * This operation calls the low-level DynamoDB API UpdateItem operation. Consult the UpdateItem documentation for + * further details and constraints. + *

    + * Example: + *

    +     * {@code
    +     *
    +     * MyItem item = mappedTable.updateItem(item);
    +     * }
    +     * 
    + * + * @param item the modelled item to be inserted into or updated in the database table. + * @return The updated item + */ + default T updateItem(T item) { + throw new UnsupportedOperationException(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedType.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedType.java new file mode 100644 index 000000000000..c5761e5358f9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedType.java @@ -0,0 +1,558 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import static java.util.stream.Collectors.toList; + +import java.lang.reflect.GenericArrayType; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; +import java.lang.reflect.WildcardType; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.Optional; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.DefaultParameterizedType; +import software.amazon.awssdk.utils.Validate; + +/** + * Similar to {@link Class}, this represents a specific raw class type. Unlike {@code Class}, this allows representing type + * parameters that would usually be erased. + * + * @see #EnhancedType() + * @see #of(Class) + * @see #listOf(Class) + * @see #mapOf(Class, Class) + */ +@SdkPublicApi +@ThreadSafe +@Immutable +public class EnhancedType { + private final boolean isWildcard; + private final Class rawClass; + private final List> rawClassParameters; + private final TableSchema tableSchema; + + /** + * Create a type token, capturing the generic type arguments of the token as {@link Class}es. + * + *

    + * This must be called from an anonymous subclass. For example, + * {@code new EnhancedType>(){}} (note the extra {}) for a {@code EnhancedType>}. + */ + protected EnhancedType() { + this(null); + } + + private EnhancedType(Type type) { + if (type == null) { + type = captureGenericTypeArguments(); + } + + + if (type instanceof WildcardType) { + this.isWildcard = true; + this.rawClass = null; + this.rawClassParameters = null; + this.tableSchema = null; + } else { + this.isWildcard = false; + this.rawClass = validateAndConvert(type); + this.rawClassParameters = loadTypeParameters(type); + this.tableSchema = null; + } + } + + private EnhancedType(Class rawClass, List> rawClassParameters, TableSchema tableSchema) { + // This is only used internally, so we can make sure this cast is safe via testing. + this.rawClass = (Class) rawClass; + this.isWildcard = false; + this.rawClassParameters = rawClassParameters; + this.tableSchema = tableSchema; + } + + /** + * Create a type token for the provided non-parameterized class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType of(Class type) { + return new EnhancedType<>(type); + } + + /** + * Create a type token for the provided non-parameterized class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType of(Type type) { + return new EnhancedType<>(type); + } + + /** + * Create a type token for a optional, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> optionalOf(Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(Optional.class, valueType)); + } + + /** + * Create a type token for a list, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> listOf(Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(List.class, valueType)); + } + + /** + * Create a type token for a list, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> listOf(EnhancedType valueType) { + return new EnhancedType<>(List.class, Arrays.asList(valueType), null); + } + + /** + * Create a type token for a set, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> setOf(Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(Set.class, valueType)); + } + + /** + * Create a type token for a set, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> setOf(EnhancedType valueType) { + return new EnhancedType<>(Set.class, Arrays.asList(valueType), null); + } + + /** + * Create a type token for a sorted set, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> sortedSetOf(Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(SortedSet.class, valueType)); + } + + /** + * Create a type token for a sorted set, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> sortedSetOf(EnhancedType valueType) { + return new EnhancedType<>(SortedSet.class, Arrays.asList(valueType), null); + } + + /** + * Create a type token for a deque, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> dequeOf(Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(Deque.class, valueType)); + } + + /** + * Create a type token for a deque, with the provided value type token. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> dequeOf(EnhancedType valueType) { + return new EnhancedType<>(Deque.class, Arrays.asList(valueType), null); + } + + /** + * Create a type token for a navigable set, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> navigableSetOf(Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(NavigableSet.class, valueType)); + } + + /** + * Create a type token for a navigable set, with the provided value type token. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> navigableSetOf(EnhancedType valueType) { + return new EnhancedType<>(NavigableSet.class, Arrays.asList(valueType), null); + } + + /** + * Create a type token for a collection, with the provided value type class. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> collectionOf(Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(Collection.class, valueType)); + } + + /** + * Create a type token for a collection, with the provided value type token. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + */ + public static EnhancedType> collectionOf(EnhancedType valueType) { + return new EnhancedType<>(Collection.class, Arrays.asList(valueType), null); + } + + /** + * Create a type token for a map, with the provided key and value type classes. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided types are null.
    2. + *
    + */ + public static EnhancedType> mapOf(Class keyType, Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(Map.class, keyType, valueType)); + } + + /** + * Create a type token for a map, with the provided key and value type classes. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided types are null.
    2. + *
    + */ + public static EnhancedType> mapOf(EnhancedType keyType, EnhancedType valueType) { + return new EnhancedType<>(Map.class, Arrays.asList(keyType, valueType), null); + } + + /** + * Create a type token for a sorted map, with the provided key and value type classes. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided types are null.
    2. + *
    + */ + public static EnhancedType> sortedMapOf(Class keyType, Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(SortedMap.class, keyType, valueType)); + } + + /** + * Create a type token for a sorted map, with the provided key and value type classes. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided types are null.
    2. + *
    + */ + public static EnhancedType> sortedMapOf(EnhancedType keyType, + EnhancedType valueType) { + return new EnhancedType<>(SortedMap.class, Arrays.asList(keyType, valueType), null); + } + + /** + * Create a type token for a concurrent map, with the provided key and value type classes. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided types are null.
    2. + *
    + */ + public static EnhancedType> concurrentMapOf(Class keyType, Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(ConcurrentMap.class, keyType, valueType)); + } + + /** + * Create a type token for a concurrent map, with the provided key and value type classes. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided types are null.
    2. + *
    + */ + public static EnhancedType> concurrentMapOf(EnhancedType keyType, + EnhancedType valueType) { + return new EnhancedType<>(ConcurrentMap.class, Arrays.asList(keyType, valueType), null); + } + + /** + * Create a type token for a navigable map, with the provided key and value type classes. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided types are null.
    2. + *
    + */ + public static EnhancedType> navigableMapOf(Class keyType, Class valueType) { + return new EnhancedType<>(DefaultParameterizedType.parameterizedType(NavigableMap.class, keyType, valueType)); + } + + /** + * Create a type token for a navigable map, with the provided key and value type classes. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided types are null.
    2. + *
    + */ + public static EnhancedType> navigableMapOf(EnhancedType keyType, + EnhancedType valueType) { + return new EnhancedType<>(NavigableMap.class, Arrays.asList(keyType, valueType), null); + } + + /** + * Create a type token that represents a document that is specified by the provided {@link TableSchema}. + * + * @param documentClass The Class representing the modeled document. + * @param documentTableSchema A TableSchema that describes the properties of the document. + * @return a new {@link EnhancedType} representing the provided document. + */ + public static EnhancedType documentOf(Class documentClass, TableSchema documentTableSchema) { + return new EnhancedType<>(documentClass, null, documentTableSchema); + } + + private static Type validateIsSupportedType(Type type) { + Validate.validState(type != null, "Type must not be null."); + Validate.validState(!(type instanceof GenericArrayType), + "Array type %s is not supported. Use java.util.List instead of arrays.", type); + Validate.validState(!(type instanceof TypeVariable), "Type variable type %s is not supported.", type); + + if (type instanceof WildcardType) { + WildcardType wildcardType = (WildcardType) type; + Validate.validState(wildcardType.getUpperBounds().length == 1 && wildcardType.getUpperBounds()[0] == Object.class, + "Non-Object wildcard type upper bounds are not supported."); + Validate.validState(wildcardType.getLowerBounds().length == 0, + "Wildcard type lower bounds are not supported."); + } + + return type; + } + + /** + * Returns whether or not the type this {@link EnhancedType} was created with is a wildcard type. + */ + public boolean isWildcard() { + return isWildcard; + } + + /** + * Retrieve the {@link Class} object that this type token represents. + * + * e.g. For {@code EnhancedType}, this would return {@code String.class}. + */ + public Class rawClass() { + Validate.isTrue(!isWildcard, "A wildcard type is not expected here."); + return rawClass; + } + + /** + * Retrieve the {@link TableSchema} for a modeled document. This is used for + * converting nested documents within a schema. + */ + public Optional> tableSchema() { + return Optional.ofNullable(tableSchema); + } + + /** + * Retrieve the {@link Class} objects of any type parameters for the class that this type token represents. + * + *

    + * e.g. For {@code EnhancedType>}, this would return {@code String.class}, and {@link #rawClass()} would + * return {@code List.class}. + * + *

    + * If there are no type parameters, this will return an empty list. + */ + public List> rawClassParameters() { + Validate.isTrue(!isWildcard, "A wildcard type is not expected here."); + return rawClassParameters; + } + + private Type captureGenericTypeArguments() { + Type superclass = getClass().getGenericSuperclass(); + + ParameterizedType parameterizedSuperclass = + Validate.isInstanceOf(ParameterizedType.class, superclass, "%s isn't parameterized", superclass); + + return parameterizedSuperclass.getActualTypeArguments()[0]; + } + + private Class validateAndConvert(Type type) { + validateIsSupportedType(type); + + if (type instanceof Class) { + return (Class) type; + } else if (type instanceof ParameterizedType) { + ParameterizedType parameterizedType = (ParameterizedType) type; + return validateAndConvert(parameterizedType.getRawType()); + } else { + throw new IllegalStateException("Unsupported type: " + type); + } + } + + private List> loadTypeParameters(Type type) { + if (!(type instanceof ParameterizedType)) { + return Collections.emptyList(); + } + + ParameterizedType parameterizedType = (ParameterizedType) type; + + return Collections.unmodifiableList( + Arrays.stream(parameterizedType.getActualTypeArguments()) + .peek(t -> Validate.validState(t != null, "Invalid type argument.")) + .map(EnhancedType::new) + .collect(toList())); + } + + private StringBuilder innerToString() { + StringBuilder result = new StringBuilder(); + result.append(rawClass.getTypeName()); + + if (null != rawClassParameters && !rawClassParameters.isEmpty()) { + result.append("<"); + result.append(rawClassParameters.stream().map(EnhancedType::innerToString).collect(Collectors.joining(", "))); + result.append(">"); + } + + return result; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof EnhancedType)) { + return false; + } + + EnhancedType enhancedType = (EnhancedType) o; + + if (isWildcard != enhancedType.isWildcard) { + return false; + } + if (!rawClass.equals(enhancedType.rawClass)) { + return false; + } + if (rawClassParameters != null ? !rawClassParameters.equals(enhancedType.rawClassParameters) : + enhancedType.rawClassParameters != null) { + return false; + } + + return tableSchema != null ? tableSchema.equals(enhancedType.tableSchema) : enhancedType.tableSchema == null; + } + + @Override + public int hashCode() { + int result = (isWildcard ? 1 : 0); + result = 31 * result + rawClass.hashCode(); + result = 31 * result + (rawClassParameters != null ? rawClassParameters.hashCode() : 0); + result = 31 * result + (tableSchema != null ? tableSchema.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "EnhancedType(" + innerToString() + ")"; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Expression.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Expression.java new file mode 100644 index 000000000000..42f3561744fa --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Expression.java @@ -0,0 +1,286 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * High-level representation of a DynamoDB 'expression' that can be used in various situations where the API requires + * or accepts an expression. In addition various convenience methods are provided to help manipulate expressions. + *

    + * At a minimum, an expression must contain a string that is the expression itself. + *

    + * Optionally, attribute names can be substituted with tokens using the '#name_token' syntax; also attribute values can + * be substituted with tokens using the ':value_token' syntax. If tokens are used in the expression then the values or + * names associated with those tokens must be explicitly added to the expressionValues and expressionNames maps + * respectively that are also stored on this object. + *

    + * Example:- + * {@code + * Expression myExpression = Expression.builder() + * .expression("#a = :b") + * .putExpressionName("#a", "myAttribute") + * .putExpressionValue(":b", myAttributeValue) + * .build(); + * } + */ +@SdkPublicApi +public final class Expression { + private final String expression; + private final Map expressionValues; + private final Map expressionNames; + + private Expression(String expression, + Map expressionValues, + Map expressionNames) { + this.expression = expression; + this.expressionValues = expressionValues; + this.expressionNames = expressionNames; + } + + /** + * Constructs a new expression builder. + * @return a new expression builder. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Coalesces two complete expressions into a single expression. The expression string will be joined using the + * supplied join token, and the ExpressionNames and ExpressionValues maps will be merged. + * @param expression1 The first expression to coalesce + * @param expression2 The second expression to coalesce + * @param joinToken The join token to be used to join the expression strings (e.g.: 'AND', 'OR') + * @return The coalesced expression + * @throws IllegalArgumentException if a conflict occurs when merging ExpressionNames or ExpressionValues + */ + public static Expression join(Expression expression1, Expression expression2, String joinToken) { + if (expression1 == null) { + return expression2; + } + + if (expression2 == null) { + return expression1; + } + + return Expression.builder() + .expression(joinExpressions(expression1.expression, expression2.expression, joinToken)) + .expressionValues(joinValues(expression1.expressionValues(), + expression2.expressionValues())) + .expressionNames(joinNames(expression1.expressionNames(), + expression2.expressionNames())) + .build(); + } + + /** + * Coalesces two expression strings into a single expression string. The expression string will be joined using the + * supplied join token. + * @param expression1 The first expression string to coalesce + * @param expression2 The second expression string to coalesce + * @param joinToken The join token to be used to join the expression strings (e.g.: 'AND', 'OR) + * @return The coalesced expression + */ + public static String joinExpressions(String expression1, String expression2, String joinToken) { + if (expression1 == null) { + return expression2; + } + + if (expression2 == null) { + return expression1; + } + + return "(" + expression1 + ")" + joinToken + "(" + expression2 + ")"; + } + + /** + * Coalesces two ExpressionValues maps into a single ExpressionValues map. The ExpressionValues map is an optional + * component of an expression. + * @param expressionValues1 The first ExpressionValues map + * @param expressionValues2 The second ExpressionValues map + * @return The coalesced ExpressionValues map + * @throws IllegalArgumentException if a conflict occurs when merging ExpressionValues + */ + public static Map joinValues(Map expressionValues1, + Map expressionValues2) { + if (expressionValues1 == null) { + return expressionValues2; + } + + if (expressionValues2 == null) { + return expressionValues1; + } + + Map result = new HashMap<>(expressionValues1); + expressionValues2.forEach((key, value) -> { + AttributeValue oldValue = result.put(key, value); + + if (oldValue != null && !oldValue.equals(value)) { + throw new IllegalArgumentException( + String.format("Attempt to coalesce two expressions with conflicting expression values. " + + "Expression value key = '%s'", key)); + } + }); + + return Collections.unmodifiableMap(result); + } + + /** + * Coalesces two ExpressionNames maps into a single ExpressionNames map. The ExpressionNames map is an optional + * component of an expression. + * @param expressionNames1 The first ExpressionNames map + * @param expressionNames2 The second ExpressionNames map + * @return The coalesced ExpressionNames map + * @throws IllegalArgumentException if a conflict occurs when merging ExpressionNames + */ + public static Map joinNames(Map expressionNames1, + Map expressionNames2) { + if (expressionNames1 == null) { + return expressionNames2; + } + + if (expressionNames2 == null) { + return expressionNames1; + } + + Map result = new HashMap<>(expressionNames1); + expressionNames2.forEach((key, value) -> { + String oldValue = result.put(key, value); + + if (oldValue != null && !oldValue.equals(value)) { + throw new IllegalArgumentException( + String.format("Attempt to coalesce two expressions with conflicting expression names. " + + "Expression name key = '%s'", key)); + } + }); + + return Collections.unmodifiableMap(result); + } + + public String expression() { + return expression; + } + + public Map expressionValues() { + return expressionValues; + } + + public Map expressionNames() { + return expressionNames; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Expression that = (Expression) o; + + if (expression != null ? ! expression.equals(that.expression) : that.expression != null) { + return false; + } + if (expressionValues != null ? ! expressionValues.equals(that.expressionValues) : + that.expressionValues != null) { + return false; + } + return expressionNames != null ? expressionNames.equals(that.expressionNames) : that.expressionNames == null; + } + + @Override + public int hashCode() { + int result = expression != null ? expression.hashCode() : 0; + result = 31 * result + (expressionValues != null ? expressionValues.hashCode() : 0); + result = 31 * result + (expressionNames != null ? expressionNames.hashCode() : 0); + return result; + } + + /** + * A builder for {@link Expression} + */ + public static final class Builder { + private String expression; + private Map expressionValues; + private Map expressionNames; + + private Builder() { + } + + /** + * The expression string + */ + public Builder expression(String expression) { + this.expression = expression; + return this; + } + + /** + * The optional 'expression values' token map + */ + public Builder expressionValues(Map expressionValues) { + this.expressionValues = expressionValues == null ? null : new HashMap<>(expressionValues); + return this; + } + + /** + * Adds a single element to the optional 'expression values' token map + */ + public Builder putExpressionValue(String key, AttributeValue value) { + if (this.expressionValues == null) { + this.expressionValues = new HashMap<>(); + } + + this.expressionValues.put(key, value); + return this; + } + + /** + * The optional 'expression names' token map + */ + public Builder expressionNames(Map expressionNames) { + this.expressionNames = expressionNames == null ? null : new HashMap<>(expressionNames); + return this; + } + + /** + * Adds a single element to the optional 'expression names' token map + */ + public Builder putExpressionName(String key, String value) { + if (this.expressionNames == null) { + this.expressionNames = new HashMap<>(); + } + + this.expressionNames.put(key, value); + return this; + } + + /** + * Builds an {@link Expression} based on the values stored in this builder + */ + public Expression build() { + return new Expression(expression, + expressionValues == null ? null : Collections.unmodifiableMap(expressionValues), + expressionNames == null ? null : Collections.unmodifiableMap(expressionNames)); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/IndexMetadata.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/IndexMetadata.java new file mode 100644 index 000000000000..f38058102e14 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/IndexMetadata.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * A metadata class that stores information about an index + */ +@SdkPublicApi +public interface IndexMetadata { + /** + * The name of the index + */ + String name(); + + /** + * The partition key for the index; if there is one. + */ + Optional partitionKey(); + + /** + * The sort key for the index; if there is one. + */ + Optional sortKey(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Key.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Key.java new file mode 100644 index 000000000000..a3406787a42b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Key.java @@ -0,0 +1,225 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.nullAttributeValue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + +/** + * An object that represents a key that can be used to either identify a specific record or form part of a query + * conditional. Keys are literal and hence not typed, and can be re-used in commands for different modelled types if + * the literal values are to be the same. + *

    + * A key will always have a single partition key value associated with it, and optionally will have a sort key value. + * The names of the keys themselves are not part of this object. + */ +@SdkPublicApi +public final class Key { + private final AttributeValue partitionValue; + private final AttributeValue sortValue; + + private Key(Builder builder) { + Validate.isTrue(builder.partitionValue != null && !builder.partitionValue.equals(nullAttributeValue()), + "partitionValue should not be null"); + this.partitionValue = builder.partitionValue; + this.sortValue = builder.sortValue; + } + + /** + * Returns a new builder that can be used to construct an instance of this class. + * @return A newly initialized {@link Builder} object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Return a map of the key elements that can be passed directly to DynamoDb. + * @param tableSchema A tableschema to determine the key attribute names from. + * @param index The name of the index to use when determining the key attribute names. + * @return A map of attribute names to {@link AttributeValue}. + */ + public Map keyMap(TableSchema tableSchema, String index) { + Map keyMap = new HashMap<>(); + keyMap.put(tableSchema.tableMetadata().indexPartitionKey(index), partitionValue); + + if (sortValue != null) { + keyMap.put(tableSchema.tableMetadata().indexSortKey(index).orElseThrow( + () -> new IllegalArgumentException("A sort key value was supplied for an index that does not support " + + "one. Index: " + index)), sortValue); + } + + return Collections.unmodifiableMap(keyMap); + } + + /** + * Get the literal value of the partition key stored in this object. + * @return An {@link AttributeValue} representing the literal value of the partition key. + */ + public AttributeValue partitionKeyValue() { + return partitionValue; + } + + /** + * Get the literal value of the sort key stored in this object if available. + * @return An optional {@link AttributeValue} representing the literal value of the sort key, or empty if there + * is no sort key value in this Key. + */ + public Optional sortKeyValue() { + return Optional.ofNullable(sortValue); + } + + /** + * Return a map of the key elements that form the primary key of a table that can be passed directly to DynamoDb. + * @param tableSchema A tableschema to determine the key attribute names from. + * @return A map of attribute names to {@link AttributeValue}. + */ + public Map primaryKeyMap(TableSchema tableSchema) { + return keyMap(tableSchema, TableMetadata.primaryIndexName()); + } + + /** + * Converts an existing key into a builder object that can be used to modify its values and then create a new key. + * @return A {@link Builder} initialized with the values of this key. + */ + public Builder toBuilder() { + return new Builder().partitionValue(this.partitionValue).sortValue(this.sortValue); + } + + /** + * Builder for {@link Key} + */ + public static final class Builder { + private AttributeValue partitionValue; + private AttributeValue sortValue; + + private Builder() { + } + + /** + * Value to be used for the partition key + * @param partitionValue partition key value + */ + public Builder partitionValue(AttributeValue partitionValue) { + this.partitionValue = partitionValue; + return this; + } + + /** + * String value to be used for the partition key. The string will be converted into an AttributeValue of type S. + * @param partitionValue partition key value + */ + public Builder partitionValue(String partitionValue) { + this.partitionValue = AttributeValues.stringValue(partitionValue); + return this; + } + + /** + * Numeric value to be used for the partition key. The number will be converted into an AttributeValue of type N. + * @param partitionValue partition key value + */ + public Builder partitionValue(Number partitionValue) { + this.partitionValue = AttributeValues.numberValue(partitionValue); + return this; + } + + /** + * Binary value to be used for the partition key. The input will be converted into an AttributeValue of type B. + * @param partitionValue the bytes to be used for the binary key value. + */ + public Builder partitionValue(SdkBytes partitionValue) { + this.partitionValue = AttributeValues.binaryValue(partitionValue); + return this; + } + + /** + * Value to be used for the sort key + * @param sortValue sort key value + */ + public Builder sortValue(AttributeValue sortValue) { + this.sortValue = sortValue; + return this; + } + + /** + * String value to be used for the sort key. The string will be converted into an AttributeValue of type S. + * @param sortValue sort key value + */ + public Builder sortValue(String sortValue) { + this.sortValue = AttributeValues.stringValue(sortValue); + return this; + } + + /** + * Numeric value to be used for the sort key. The number will be converted into an AttributeValue of type N. + * @param sortValue sort key value + */ + public Builder sortValue(Number sortValue) { + this.sortValue = AttributeValues.numberValue(sortValue); + return this; + } + + /** + * Binary value to be used for the sort key. The input will be converted into an AttributeValue of type B. + * @param sortValue the bytes to be used for the binary key value. + */ + public Builder sortValue(SdkBytes sortValue) { + this.sortValue = AttributeValues.binaryValue(sortValue); + return this; + } + + /** + * Construct a {@link Key} from this builder. + */ + public Key build() { + return new Key(this); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Key key = (Key) o; + + if (partitionValue != null ? ! partitionValue.equals(key.partitionValue) : + key.partitionValue != null) { + return false; + } + return sortValue != null ? sortValue.equals(key.sortValue) : key.sortValue == null; + } + + @Override + public int hashCode() { + int result = partitionValue != null ? partitionValue.hashCode() : 0; + result = 31 * result + (sortValue != null ? sortValue.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/KeyAttributeMetadata.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/KeyAttributeMetadata.java new file mode 100644 index 000000000000..b5b0826d30b8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/KeyAttributeMetadata.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * A metadata class that stores information about a key attribute + */ +@SdkPublicApi +public interface KeyAttributeMetadata { + /** + * The name of the key attribute + */ + String name(); + + /** + * The DynamoDB type of the key attribute + */ + AttributeValueType attributeValueType(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/MappedTableResource.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/MappedTableResource.java new file mode 100644 index 000000000000..528589e8fa85 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/MappedTableResource.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * Interface for a resource object that is part of either a {@link DynamoDbTable} or {@link DynamoDbAsyncTable}. This + * part of the interface is common between both of those higher order interfaces and has methods to access the + * metadata associated with the mapped entity, such as the schema and the table name, but knows nothing about how to + * actually execute operations against it. + * + * @param The type of the modelled object. + */ +@SdkPublicApi +public interface MappedTableResource { + /** + * Gets the {@link DynamoDbEnhancedClientExtension} associated with this mapped resource. + * @return The {@link DynamoDbEnhancedClientExtension} associated with this mapped resource. + */ + DynamoDbEnhancedClientExtension mapperExtension(); + + /** + * Gets the {@link TableSchema} object that this mapped table was built with. + * @return The {@link TableSchema} object for this mapped table. + */ + TableSchema tableSchema(); + + /** + * Gets the physical table name that operations performed by this object will be executed against. + * @return The physical table name. + */ + String tableName(); + + /** + * Creates a {@link Key} object from a modelled item. This key can be used in query conditionals and get + * operations to locate a specific record. + * @param item The item to extract the key fields from. + * @return A key that has been initialized with the index values extracted from the modelled object. + */ + Key keyFrom(T item); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/NestedAttributeName.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/NestedAttributeName.java new file mode 100644 index 000000000000..21404c4b47c8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/NestedAttributeName.java @@ -0,0 +1,235 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.Validate; + +/** + * High-level representation of a DynamoDB 'NestedAttributeName' that can be used in various situations where the API requires + * or accepts an Nested Attribute Name. + * Simple Attribute Name can be represented by passing just the name of the attribute. + * Nested Attributes are represented by List of String where each index of list corresponds to Nesting level Names. + *

    While using attributeToProject in {@link software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest} + * and {@link software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest} we need way to represent Nested Attributes. + * The normal DOT(.) separator is not recognized as a Nesting level separator by DynamoDB request, + * thus we need to use NestedAttributeName + * which can be used to represent Nested attributes. + *

    Example : NestedAttributeName.create("foo") corresponds to a NestedAttributeName with elements list + * with single element foo which represents Simple attribute name "foo" without nesting. + *

    NestedAttributeName.create("foo", "bar") corresponds to a NestedAttributeName with elements list "foo", "bar" + * respresenting nested attribute name "foo.bar". + */ +@SdkPublicApi +public final class NestedAttributeName { + + private final List elements; + + private NestedAttributeName(List nestedAttributeNames) { + Validate.validState(nestedAttributeNames != null, "nestedAttributeNames must not be null."); + Validate.notEmpty(nestedAttributeNames, "nestedAttributeNames must not be empty"); + Validate.noNullElements(nestedAttributeNames, "nestedAttributeNames must not contain null values"); + this.elements = Collections.unmodifiableList(nestedAttributeNames); + } + + /** + * Creates a NestedAttributeName with a single element, which is effectively just a simple attribute name without nesting. + *

    + * Example:create("foo") will create NestedAttributeName corresponding to Attribute foo. + * + * @param element Attribute Name. Single String represents just a simple attribute name without nesting. + * @return NestedAttributeName with attribute name as specified element. + */ + public static NestedAttributeName create(String element) { + return new Builder().addElement(element).build(); + } + + /** + * Creates a NestedAttributeName from a list of elements that compose the full path of the nested attribute. + *

    + * Example:create("foo", "bar") will create NestedAttributeName which represents foo.bar nested attribute. + * + * @param elements Nested Attribute Names. Each of strings in varargs represent the nested attribute name + * at subsequent levels. + * @return NestedAttributeName with Nested attribute name set as specified in elements var args. + */ + public static NestedAttributeName create(String... elements) { + return new Builder().elements(elements).build(); + } + + /** + * Creates a NestedAttributeName from a list of elements that compose the full path of the nested attribute. + *

    + * Example:create(Arrays.asList("foo", "bar")) will create NestedAttributeName + * which represents foo.bar nested attribute. + * + * @param elements List of Nested Attribute Names. Each of strings in List represent the nested attribute name + * at subsequent levels. + * @return NestedAttributeName with Nested attribute name set as specified in elements Collections. + */ + public static NestedAttributeName create(List elements) { + return new Builder().elements(elements).build(); + } + + /** + * Create a builder that can be used to create a {@link NestedAttributeName}. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets elements of NestedAttributeName in the form of List. Each element in the list corresponds + * to the subsequent Nested Attribute name. + * + * @return List of nested attributes, each entry in the list represent one level of nesting. + * Example, A Two level Attribute name foo.bar will be represented as ["foo", "bar"] + */ + public List elements() { + return elements; + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return builder().elements(elements); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + NestedAttributeName that = (NestedAttributeName) o; + + return elements != null + ? elements.equals(that.elements) : that.elements == null; + } + + @Override + public int hashCode() { + return elements != null ? elements.hashCode() : 0; + } + + /** + * A builder for {@link NestedAttributeName}. + */ + public static class Builder { + private List elements = null; + + private Builder() { + + } + + /** + * Adds a single element of NestedAttributeName. + * Subsequent calls to this method can add attribute Names at subsequent nesting levels. + *

    + * Example:builder().addElement("foo").addElement("bar") will add elements in NestedAttributeName + * which represent a Nested Attribute Name foo.bar + * + * @param element Attribute Name. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addElement(String element) { + if (elements == null) { + elements = new ArrayList<>(); + } + elements.add(element); + return this; + } + + /** + * Adds a single element of NestedAttributeName. + * Subsequent calls to this method will append the new elements to the end of the existing chain of elements + * creating new levels of nesting. + *

    + * Example:builder().addElements("foo","bar") will add elements in NestedAttributeName + * which represent a Nested Attribute Name foo.bar + * + * @param elements Nested Attribute Names. Each of strings in varargs represent the nested attribute name + * at subsequent levels. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addElements(String... elements) { + if (this.elements == null) { + this.elements = new ArrayList<>(); + } + this.elements.addAll(Arrays.asList(elements)); + return this; + } + + /** + * Adds a List of elements to NestedAttributeName. + * Subsequent calls to this method will append the new elements to the end of the existing chain of elements + * creating new levels of nesting. + *

    + * Example:builder().addElements(Arrays.asList("foo","bar")) will add elements in NestedAttributeName + * to represent a Nested Attribute Name foo.bar + * + * @param elements List of Strings where each string corresponds to subsequent nesting attribute name. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addElements(List elements) { + if (this.elements == null) { + this.elements = new ArrayList<>(); + } + this.elements.addAll(elements); + return this; + } + + /** + * Set elements of NestedAttributeName with list of Strings. Will overwrite any existing elements stored by this builder. + *

    + * Example:builder().elements("foo","bar") will set the elements in NestedAttributeName + * to represent a nested attribute name of 'foo.bar' + * + * @param elements a list of strings that correspond to the elements in a nested attribute name. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder elements(String... elements) { + this.elements = new ArrayList<>(Arrays.asList(elements)); + return this; + } + + /** + * Sets the elements that compose a nested attribute name. Will overwrite any existing elements stored by this builder. + *

    + * Example:builder().elements(Arrays.asList("foo","bar")) will add elements in NestedAttributeName + * which represent a Nested Attribute Name foo.bar + * + * @param elements a list of strings that correspond to the elements in a nested attribute name. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder elements(List elements) { + this.elements = new ArrayList<>(elements); + return this; + } + + + public NestedAttributeName build() { + return new NestedAttributeName(elements); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/OperationContext.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/OperationContext.java new file mode 100644 index 000000000000..f0e48375bd8e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/OperationContext.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * A context object that is associated with a specific operation and identifies the resources that the operation is + * meant to operate on. + *

    + * This context is passed to and can be read by extension hooks (see {@link DynamoDbEnhancedClientExtension}). + */ +@SdkPublicApi +public interface OperationContext { + /** + * The name of the table being operated on + */ + String tableName(); + + /** + * The name of the index within the table being operated on. If it is the primary index, then this value will be + * set to the constant {@link TableMetadata#primaryIndexName()}. + */ + String indexName(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableMetadata.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableMetadata.java new file mode 100644 index 000000000000..a7249a5a7bfa --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableMetadata.java @@ -0,0 +1,155 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.Collection; +import java.util.Map; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; + +/** + * Interface for an object the stores structural information about a DynamoDb table. + */ +@SdkPublicApi +public interface TableMetadata { + /** + * Returns the attribute name of the partition key for an index. + * + * @param indexName The name of the index. + * @return The attribute name representing the partition key for this index. + * @throws IllegalArgumentException if the index does not exist in the metadata or does not have a partition key + * associated with it.. + */ + String indexPartitionKey(String indexName); + + /** + * Returns the attribute name of the sort key for an index. + * + * @param indexName The name of the index. + * @return Optional of the attribute name representing the sort key for this index; empty if the index does not + * have a sort key. + */ + Optional indexSortKey(String indexName); + + /** + * Returns a custom metadata object. These objects are used by extensions to the library, therefore the type of + * object stored is flexible and does not need to be known by the interface. + * + * @param key A unique key for the metadata object. This namespace is shared by all extensions, so it is + * recommended best practice to qualify it with the name of your extension. + * @param objectClass The java class that the object will be cast to before returning. An exception will be + * thrown if the stored object cannot be cast to this class. + * @param The flexible type for the object being returned. The compiler will typically infer this. + * @return An optional containing custom metadata object or empty if the object was not found. + */ + Optional customMetadataObject(String key, Class objectClass); + + /** + * Returns all the names of attributes associated with the keys of a specified index. + * + * @param indexName The name of the index. + * @return A collection of all key attribute names for that index. + */ + Collection indexKeys(String indexName); + + /** + * Returns all the names of attributes associated with any index (primary or secondary) known for this table. + * Additionally any additional attributes that are deemed to be 'key-like' in how they should be treated will + * also be returned. An example of a 'key-like' attribute that is not actually a key is one tagged as a 'version' + * attribute when using the versioned record extension. + * + * @return A collection of all key attribute names for the table. + * + * @deprecated Use {@link #keyAttributes()} instead. + */ + @Deprecated + Collection allKeys(); + + /** + * Returns metadata about all the known indices for this table. + * @return A collection of {@link IndexMetadata} containing information about the indices. + */ + Collection indices(); + + /** + * Returns all custom metadata for this table. These entries are used by extensions to the library, therefore the + * value type of each metadata object stored in the map is not known and is provided as {@link Object}. + *

    + * This method should not be used to inspect individual custom metadata objects, instead use + * {@link TableMetadata#customMetadataObject(String, Class)} ()} as that will perform a type-safety check on the + * retrieved object. + * @return A map of all the custom metadata for this table. + */ + Map customMetadata(); + + /** + * Returns metadata about all the known 'key' attributes for this table, such as primary and secondary index keys, + * or any other attribute that forms part of the structure of the table. + * @return A collection of {@link KeyAttributeMetadata} containing information about the keys. + */ + Collection keyAttributes(); + + /** + * Returns the DynamoDb scalar attribute type associated with a key attribute if one is applicable. + * @param keyAttribute The key attribute name to return the scalar attribute type of. + * @return Optional {@link ScalarAttributeType} of the attribute, or empty if attribute is a non-scalar type. + * @throws IllegalArgumentException if the keyAttribute is not found. + */ + Optional scalarAttributeType(String keyAttribute); + + /** + * Returns the attribute name used as the primary partition key for the table. + * + * @return The primary partition key attribute name. + * @throws IllegalArgumentException if the primary partition key is not known. + */ + default String primaryPartitionKey() { + return indexPartitionKey(primaryIndexName()); + } + + /** + * Returns the attribute name used as the primary sort key for the table. + * + * @return An optional of the primary sort key attribute name; empty if this key is not known. + */ + default Optional primarySortKey() { + return indexSortKey(primaryIndexName()); + } + + /** + * Returns the names of the attributes that make up the primary key for the table. + * + * @return A collection of attribute names that make up the primary key for the table. + */ + default Collection primaryKeys() { + return indexKeys(primaryIndexName()); + } + + /** + * Returns an arbitrary constant that should be used as the primary index name. This pattern creates a + * common abstraction and simplifies the implementation of operations that also work on secondary indices such as + * scan() and query(). + * + * @return An arbitrary constant that internally represents the primary index name. + */ + static String primaryIndexName() { + // Must include illegal symbols that cannot be used by a real index. + // This value is arbitrary and ephemeral but could end up being serialized with TableMetadata through the + // actions of a client, so it should not be altered unless absolutely necessary. + return "$PRIMARY_INDEX"; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java new file mode 100644 index 000000000000..6eb77a389fba --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java @@ -0,0 +1,202 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.ImmutableTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticImmutableTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * Interface for a mapper that is capable of mapping a modelled Java object into a map of {@link AttributeValue} that is + * understood by the DynamoDb low-level SDK and back again. This object is also expected to know about the + * structure of the table it is modelling, which is stored in a {@link TableMetadata} object. + * + * @param The type of model object that is being mapped to records in the DynamoDb table. + */ +@SdkPublicApi +public interface TableSchema { + /** + * Returns a builder for the {@link StaticTableSchema} implementation of this interface which allows all attributes, + * tags and table structure to be directly declared in the builder. + * + * @param itemClass The class of the item this {@link TableSchema} will map records to. + * @param The type of the item this {@link TableSchema} will map records to. + * @return A newly initialized {@link StaticTableSchema.Builder}. + */ + static StaticTableSchema.Builder builder(Class itemClass) { + return StaticTableSchema.builder(itemClass); + } + + /** + * Returns a builder for the {@link StaticImmutableTableSchema} implementation of this interface which allows all + * attributes, tags and table structure to be directly declared in the builder. + * + * @param immutableItemClass The class of the immutable item this {@link TableSchema} will map records to. + * @param immutableBuilderClass The class that can be used to construct immutable items this {@link TableSchema} + * maps records to. + * @param The type of the immutable item this {@link TableSchema} will map records to. + * @param The type of the builder used by this {@link TableSchema} to construct immutable items with. + * @return A newly initialized {@link StaticImmutableTableSchema.Builder} + */ + static StaticImmutableTableSchema.Builder builder(Class immutableItemClass, + Class immutableBuilderClass) { + return StaticImmutableTableSchema.builder(immutableItemClass, immutableBuilderClass); + } + + /** + * Scans a bean class that has been annotated with DynamoDb bean annotations and then returns a + * {@link BeanTableSchema} implementation of this interface that can map records to and from items of that bean + * class. + * + * Creating a {@link BeanTableSchema} is a moderately expensive operation, and should be performed sparingly. This is + * usually done once at application startup. + * + * @param beanClass The bean class this {@link TableSchema} will map records to. + * @param The type of the item this {@link TableSchema} will map records to. + * @return An initialized {@link BeanTableSchema}. + */ + static BeanTableSchema fromBean(Class beanClass) { + return BeanTableSchema.create(beanClass); + } + + /** + * Scans an immutable class that has been annotated with DynamoDb immutable annotations and then returns a + * {@link ImmutableTableSchema} implementation of this interface that can map records to and from items of that + * immutable class. + * + * Creating a {@link ImmutableTableSchema} is a moderately expensive operation, and should be performed sparingly. This is + * usually done once at application startup. + * + * @param immutableClass The immutable class this {@link TableSchema} will map records to. + * @param The type of the item this {@link TableSchema} will map records to. + * @return An initialized {@link ImmutableTableSchema}. + */ + static ImmutableTableSchema fromImmutableClass(Class immutableClass) { + return ImmutableTableSchema.create(immutableClass); + } + + /** + * Scans a class that has been annotated with DynamoDb enhanced client annotations and then returns an appropriate + * {@link TableSchema} implementation that can map records to and from items of that class. Currently supported + * top level annotations (see documentation on those classes for more information on how to use them): + *

    + * {@link software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean}
    + * {@link software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable} + * + * This is a moderately expensive operation, and should be performed sparingly. This is usually done once at + * application startup. + * + * @param annotatedClass A class that has been annotated with DynamoDb enhanced client annotations. + * @param The type of the item this {@link TableSchema} will map records to. + * @return An initialized {@link TableSchema} + */ + static TableSchema fromClass(Class annotatedClass) { + if (annotatedClass.getAnnotation(DynamoDbImmutable.class) != null) { + return fromImmutableClass(annotatedClass); + } + + if (annotatedClass.getAnnotation(DynamoDbBean.class) != null) { + return fromBean(annotatedClass); + } + + throw new IllegalArgumentException("Class does not appear to be a valid DynamoDb annotated class. [class = " + + "\"" + annotatedClass + "\"]"); + } + + /** + * Takes a raw DynamoDb SDK representation of a record in a table and maps it to a Java object. A new object is + * created to fulfil this operation. + *

    + * If attributes are missing from the map, that will not cause an error, however if attributes are found in the + * map which the mapper does not know how to map, an exception will be thrown. + * + * @param attributeMap A map of String to {@link AttributeValue} that contains all the raw attributes to map. + * @return A new instance of a Java object with all the attributes mapped onto it. + * @throws IllegalArgumentException if any attributes in the map could not be mapped onto the new model object. + */ + T mapToItem(Map attributeMap); + + /** + * Takes a modelled object and converts it into a raw map of {@link AttributeValue} that the DynamoDb low-level + * SDK can work with. + * + * @param item The modelled Java object to convert into a map of attributes. + * @param ignoreNulls If set to true; any null values in the Java object will not be added to the output map. + * If set to false; null values in the Java object will be added as {@link AttributeValue} of + * type 'nul' to the output map. + * @return A map of String to {@link AttributeValue} representing all the modelled attributes in the model object. + */ + Map itemToMap(T item, boolean ignoreNulls); + + /** + * Takes a modelled object and extracts a specific set of attributes which are then returned as a map of + * {@link AttributeValue} that the DynamoDb low-level SDK can work with. This method is typically used to extract + * just the key attributes of a modelled item and will not ignore nulls on the modelled object. + * + * @param item The modelled Java object to extract the map of attributes from. + * @param attributes A collection of attribute names to extract into the output map. + * @return A map of String to {@link AttributeValue} representing the requested modelled attributes in the model + * object. + */ + Map itemToMap(T item, Collection attributes); + + /** + * Returns a single attribute value from the modelled object. + * + * @param item The modelled Java object to extract the attribute from. + * @param attributeName The attribute name describing which attribute to extract. + * @return A single {@link AttributeValue} representing the requested modelled attribute in the model object or + * null if the attribute has not been set with a value in the modelled object. + */ + AttributeValue attributeValue(T item, String attributeName); + + /** + * Returns the object that describes the structure of the table being modelled by the mapper. This includes + * information such as the table name, index keys and attribute tags. + * @return A {@link TableMetadata} object that contains structural information about the table being modelled. + */ + TableMetadata tableMetadata(); + + /** + * Returns the {@link EnhancedType} that represents the 'Type' of the Java object this table schema object maps to + * and from. + * @return The {@link EnhancedType} of the modelled item this TableSchema maps to. + */ + EnhancedType itemType(); + + /** + * Returns a complete list of attribute names that are mapped by this {@link TableSchema} + */ + List attributeNames(); + + /** + * A boolean value that represents whether this {@link TableSchema} is abstract which means that it cannot be used + * to directly create records as it is lacking required structural elements to map to a table, such as a primary + * key, but can be referred to and embedded by other schemata. + * + * @return true if it is abstract, and therefore cannot be used directly to create records but can be referred to + * by other schemata, and false if it is concrete and may be used to map records directly. + */ + boolean isAbstract(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ReadModification.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ReadModification.java new file mode 100644 index 000000000000..cc2e1920c559 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ReadModification.java @@ -0,0 +1,76 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.extensions; + +import java.util.Map; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * Simple object for storing a modification to a read operation. If a transformedItem is supplied then this item will + * be completely substituted in place of the item that was actually read. + */ +@SdkPublicApi +public final class ReadModification { + private final Map transformedItem; + + private ReadModification(Map transformedItem) { + this.transformedItem = transformedItem; + } + + public static Builder builder() { + return new Builder(); + } + + public Map transformedItem() { + return transformedItem; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ReadModification that = (ReadModification) o; + + return transformedItem != null ? transformedItem.equals(that.transformedItem) : that.transformedItem == null; + } + + @Override + public int hashCode() { + return transformedItem != null ? transformedItem.hashCode() : 0; + } + + public static final class Builder { + private Map transformedItem; + + private Builder() { + } + + public Builder transformedItem(Map transformedItem) { + this.transformedItem = transformedItem; + return this; + } + + public ReadModification build() { + return new ReadModification(transformedItem); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtension.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtension.java new file mode 100644 index 000000000000..11bdce42beec --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtension.java @@ -0,0 +1,146 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.extensions; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.isNullAttributeValue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.Consumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTag; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableMetadata; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * This extension implements optimistic locking on record writes by means of a 'record version number' that is used + * to automatically track each revision of the record as it is modified. + *

    + * This extension is loaded by default when you instantiate a + * {@link software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient} so unless you are using a custom extension + * there is no need to specify it. + *

    + * To utilize versioned record locking, first create an attribute in your model that will be used to store the record + * version number. This attribute must be an 'integer' type numeric (long or integer), and you need to tag it as the + * version attribute. If you are using the {@link software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema} then + * you should use the {@link software.amazon.awssdk.enhanced.dynamodb.extensions.annotations.DynamoDbVersionAttribute} + * annotation, otherwise if you are using the {@link software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema} + * then you should use the {@link AttributeTags#versionAttribute()} static attribute tag. + *

    + * Then, whenever a record is written the write operation will only succeed if the version number of the record has not + * been modified since it was last read by the application. Every time a new version of the record is successfully + * written to the database, the record version number will be automatically incremented. + */ +@SdkPublicApi +public final class VersionedRecordExtension implements DynamoDbEnhancedClientExtension { + private static final Function EXPRESSION_KEY_MAPPER = key -> ":old_" + key + "_value"; + private static final String CUSTOM_METADATA_KEY = "VersionedRecordExtension:VersionAttribute"; + private static final VersionAttribute VERSION_ATTRIBUTE = new VersionAttribute(); + + private VersionedRecordExtension() { + } + + public static Builder builder() { + return new Builder(); + } + + public static final class AttributeTags { + private AttributeTags() { + } + + public static StaticAttributeTag versionAttribute() { + return VERSION_ATTRIBUTE; + } + } + + private static class VersionAttribute implements StaticAttributeTag { + @Override + public Consumer modifyMetadata(String attributeName, + AttributeValueType attributeValueType) { + if (!AttributeValueType.N.equals(attributeValueType)) { + throw new IllegalArgumentException(String.format( + "Attribute '%s' of type %s is not a suitable type to be used as a version attribute. Only type 'N' " + + "is supported.", attributeName, attributeValueType.name())); + } + + return metadata -> metadata.addCustomMetadataObject(CUSTOM_METADATA_KEY, attributeName) + .markAttributeAsKey(attributeName, attributeValueType); + } + } + + @Override + public WriteModification beforeWrite(DynamoDbExtensionContext.BeforeWrite context) { + Optional versionAttributeKey = context.tableMetadata() + .customMetadataObject(CUSTOM_METADATA_KEY, String.class); + + if (!versionAttributeKey.isPresent()) { + return WriteModification.builder().build(); + } + + Map itemToTransform = new HashMap<>(context.items()); + AttributeValue newVersionValue; + Expression condition; + Optional existingVersionValue = + Optional.ofNullable(itemToTransform.get(versionAttributeKey.get())); + + if (!existingVersionValue.isPresent() || isNullAttributeValue(existingVersionValue.get())) { + // First version of the record + newVersionValue = AttributeValue.builder().n("1").build(); + condition = Expression.builder() + .expression(String.format("attribute_not_exists(%s)", versionAttributeKey.get())) + .build(); + } else { + // Existing record, increment version + if (existingVersionValue.get().n() == null) { + // In this case a non-null version attribute is present, but it's not an N + throw new IllegalArgumentException("Version attribute appears to be the wrong type. N is required."); + } + + int existingVersion = Integer.parseInt(existingVersionValue.get().n()); + String existingVersionValueKey = EXPRESSION_KEY_MAPPER.apply(versionAttributeKey.get()); + newVersionValue = AttributeValue.builder().n(Integer.toString(existingVersion + 1)).build(); + condition = Expression.builder() + .expression(String.format("%s = %s", versionAttributeKey.get(), + existingVersionValueKey)) + .expressionValues(Collections.singletonMap(existingVersionValueKey, + existingVersionValue.get())) + .build(); + } + + itemToTransform.put(versionAttributeKey.get(), newVersionValue); + + return WriteModification.builder() + .transformedItem(Collections.unmodifiableMap(itemToTransform)) + .additionalConditionalExpression(condition) + .build(); + } + + public static final class Builder { + private Builder() { + } + + public VersionedRecordExtension build() { + return new VersionedRecordExtension(); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/WriteModification.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/WriteModification.java new file mode 100644 index 000000000000..85297f0352b6 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/WriteModification.java @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.extensions; + +import java.util.Map; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * Simple object for storing a modification to a write operation. + *

    + * If a transformedItem is supplied then this item will be completely substituted in place of the item that was + * previously going to be written. + *

    + * If an additionalConditionalExpression is supplied then this condition will be coalesced with any other conditions + * and added as a parameter to the write operation. + */ +@SdkPublicApi +public final class WriteModification { + private final Map transformedItem; + private final Expression additionalConditionalExpression; + + private WriteModification(Map transformedItem, Expression additionalConditionalExpression) { + this.transformedItem = transformedItem; + this.additionalConditionalExpression = additionalConditionalExpression; + } + + public static Builder builder() { + return new Builder(); + } + + public Map transformedItem() { + return transformedItem; + } + + public Expression additionalConditionalExpression() { + return additionalConditionalExpression; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WriteModification that = (WriteModification) o; + + if (transformedItem != null ? ! transformedItem.equals(that.transformedItem) : that.transformedItem != null) { + return false; + } + return additionalConditionalExpression != null ? + additionalConditionalExpression.equals(that.additionalConditionalExpression) : + that.additionalConditionalExpression == null; + } + + @Override + public int hashCode() { + int result = transformedItem != null ? transformedItem.hashCode() : 0; + result = 31 * result + (additionalConditionalExpression != null ? additionalConditionalExpression.hashCode() : 0); + return result; + } + + public static final class Builder { + private Map transformedItem; + private Expression additionalConditionalExpression; + + private Builder() { + } + + public Builder transformedItem(Map transformedItem) { + this.transformedItem = transformedItem; + return this; + } + + public Builder additionalConditionalExpression(Expression additionalConditionalExpression) { + this.additionalConditionalExpression = additionalConditionalExpression; + return this; + } + + public WriteModification build() { + return new WriteModification(transformedItem, additionalConditionalExpression); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/annotations/DynamoDbVersionAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/annotations/DynamoDbVersionAttribute.java new file mode 100644 index 000000000000..21f3beeeb446 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/annotations/DynamoDbVersionAttribute.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.extensions.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.VersionRecordAttributeTags; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.BeanTableSchemaAttributeTag; + +/** + * Denotes this attribute as recording the version record number to be used for optimistic locking. Every time a record + * with this attribute is written to the database it will be incremented and a condition added to the request to check + * for an exact match of the old version. + */ +@SdkPublicApi +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@BeanTableSchemaAttributeTag(VersionRecordAttributeTags.class) +public @interface DynamoDbVersionAttribute { +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/ApplyUserAgentInterceptor.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/ApplyUserAgentInterceptor.java new file mode 100644 index 000000000000..a0b5183f35f0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/ApplyUserAgentInterceptor.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.core.ApiName; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.services.dynamodb.model.DynamoDbRequest; + +/** + * Apply dynamodb enhanced client specific user agent to the request + */ +@SdkInternalApi +public final class ApplyUserAgentInterceptor implements ExecutionInterceptor { + private static final ApiName API_NAME = + ApiName.builder().version("ddb-enh").name("hll").build(); + private static final Consumer USER_AGENT_APPLIER = + b -> b.addApiName(API_NAME); + + @Override + public SdkRequest modifyRequest(Context.ModifyRequest context, ExecutionAttributes executionAttributes) { + if (!(context.request() instanceof DynamoDbRequest)) { + // should never happen + return context.request(); + } + + DynamoDbRequest request = (DynamoDbRequest) context.request(); + AwsRequestOverrideConfiguration overrideConfiguration = + request.overrideConfiguration().map(c -> c.toBuilder() + .applyMutation(USER_AGENT_APPLIER) + .build()) + .orElse((AwsRequestOverrideConfiguration.builder() + .applyMutation(USER_AGENT_APPLIER) + .build())); + + return request.toBuilder().overrideConfiguration(overrideConfiguration).build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/AttributeValues.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/AttributeValues.java new file mode 100644 index 000000000000..7915da9e886f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/AttributeValues.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * This static helper class contains some literal {@link AttributeValue} constants and converters. Primarily these + * will be used if constructing a literal key object or for use in a custom filter expression. Eg: + * + * {@code Key myKey = Key.create(stringValue("id123"), numberValue(4.23)); + * Expression filterExpression = Expression.of("id = :filter_id", singletonMap(":filter_id", stringValue("id123")); } + */ +@SdkInternalApi +public final class AttributeValues { + private static final AttributeValue NULL_ATTRIBUTE_VALUE = AttributeValue.builder().nul(true).build(); + + private AttributeValues() { + } + + /** + * The constant that represents a 'null' in a DynamoDb record. + * @return An {@link AttributeValue} of type NUL that represents 'null'. + */ + public static AttributeValue nullAttributeValue() { + return NULL_ATTRIBUTE_VALUE; + } + + /** + * Creates a literal string {@link AttributeValue}. + * @param value A string to create the literal from. + * @return An {@link AttributeValue} of type S that represents the string literal. + */ + public static AttributeValue stringValue(String value) { + return AttributeValue.builder().s(value).build(); + } + + /** + * Creates a literal numeric {@link AttributeValue} from any type of Java number. + * @param value A number to create the literal from. + * @return An {@link AttributeValue} of type n that represents the numeric literal. + */ + public static AttributeValue numberValue(Number value) { + return AttributeValue.builder().n(value.toString()).build(); + } + + /** + * Creates a literal binary {@link AttributeValue} from raw bytes. + * @param value bytes to create the literal from. + * @return An {@link AttributeValue} of type B that represents the binary literal. + */ + public static AttributeValue binaryValue(SdkBytes value) { + return AttributeValue.builder().b(value).build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocument.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocument.java new file mode 100644 index 000000000000..fe2fcae88502 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocument.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.readAndTransformSingleItem; + +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Document; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@SdkInternalApi +public final class DefaultDocument implements Document { + private final Map itemMap; + + private DefaultDocument(Map itemMap) { + this.itemMap = itemMap; + } + + public static DefaultDocument create(Map itemMap) { + return new DefaultDocument(itemMap); + } + + public T getItem(MappedTableResource mappedTableResource) { + return readAndTransformSingleItem(itemMap, + mappedTableResource.tableSchema(), + DefaultOperationContext.create(mappedTableResource.tableName()), + mappedTableResource.mapperExtension()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultDocument that = (DefaultDocument) o; + + return itemMap != null ? itemMap.equals(that.itemMap) : that.itemMap == null; + } + + @Override + public int hashCode() { + return itemMap != null ? itemMap.hashCode() : 0; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtils.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtils.java new file mode 100644 index 000000000000..c1d4c387a42b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtils.java @@ -0,0 +1,171 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@SdkInternalApi +public final class EnhancedClientUtils { + private EnhancedClientUtils() { + + } + + /** There is a divergence in what constitutes an acceptable attribute name versus a token used in expression + * names or values. Since the mapper translates one to the other, it is necessary to scrub out all these + * 'illegal' characters before adding them to expression values or expression names. + * + * @param key A key that may contain non alpha-numeric characters acceptable to a DynamoDb attribute name. + * @return A key that has all these characters scrubbed and overwritten with an underscore. + */ + public static String cleanAttributeName(String key) { + boolean somethingChanged = false; + char[] chars = key.toCharArray(); + + for (int i = 0; i < chars.length; ++i) { + if (chars[i] == '*' + || chars[i] == '.' + || chars[i] == '-' + || chars[i] == '#' + || chars[i] == ':') { + chars[i] = '_'; + somethingChanged = true; + } + } + + return somethingChanged ? new String(chars) : key; + } + + public static T readAndTransformSingleItem(Map itemMap, + TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + if (itemMap == null || itemMap.isEmpty()) { + return null; + } + + if (dynamoDbEnhancedClientExtension != null) { + ReadModification readModification = dynamoDbEnhancedClientExtension.afterRead( + DefaultDynamoDbExtensionContext.builder() + .items(itemMap) + .operationContext(operationContext) + .tableMetadata(tableSchema.tableMetadata()) + .build()); + if (readModification != null && readModification.transformedItem() != null) { + return tableSchema.mapToItem(readModification.transformedItem()); + } + } + + return tableSchema.mapToItem(itemMap); + } + + public static Page readAndTransformPaginatedItems( + ResponseT response, + TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension, + Function>> getItems, + Function> getLastEvaluatedKey) { + + if (getLastEvaluatedKey.apply(response) == null || getLastEvaluatedKey.apply(response).isEmpty()) { + // Last page + return Page.create(getItems.apply(response) + .stream() + .map(itemMap -> readAndTransformSingleItem(itemMap, + tableSchema, + operationContext, + dynamoDbEnhancedClientExtension)) + .collect(Collectors.toList())); + } else { + // More pages to come; add the lastEvaluatedKey + return Page.create(getItems.apply(response) + .stream() + .map(itemMap -> readAndTransformSingleItem(itemMap, + tableSchema, + operationContext, + dynamoDbEnhancedClientExtension)) + .collect(Collectors.toList()), + getLastEvaluatedKey.apply(response)); + } + } + + public static Key createKeyFromItem(T item, TableSchema tableSchema, String indexName) { + String partitionKeyName = tableSchema.tableMetadata().indexPartitionKey(indexName); + Optional sortKeyName = tableSchema.tableMetadata().indexSortKey(indexName); + AttributeValue partitionKeyValue = tableSchema.attributeValue(item, partitionKeyName); + Optional sortKeyValue = sortKeyName.map(key -> tableSchema.attributeValue(item, key)); + + return sortKeyValue.map( + attributeValue -> Key.builder() + .partitionValue(partitionKeyValue) + .sortValue(attributeValue) + .build()) + .orElseGet( + () -> Key.builder() + .partitionValue(partitionKeyValue).build()); + } + + public static Key createKeyFromMap(Map itemMap, + TableSchema tableSchema, + String indexName) { + String partitionKeyName = tableSchema.tableMetadata().indexPartitionKey(indexName); + Optional sortKeyName = tableSchema.tableMetadata().indexSortKey(indexName); + AttributeValue partitionKeyValue = itemMap.get(partitionKeyName); + Optional sortKeyValue = sortKeyName.map(itemMap::get); + + return sortKeyValue.map( + attributeValue -> Key.builder() + .partitionValue(partitionKeyValue) + .sortValue(attributeValue) + .build()) + .orElseGet( + () -> Key.builder() + .partitionValue(partitionKeyValue).build()); + } + + public static List getItemsFromSupplier(List> itemSupplierList) { + if (itemSupplierList == null || itemSupplierList.isEmpty()) { + return null; + } + return Collections.unmodifiableList(itemSupplierList.stream() + .map(Supplier::get) + .collect(Collectors.toList())); + } + + /** + * A helper method to test if an {@link AttributeValue} is a 'null' constant. This will not test if the + * AttributeValue object is null itself, and in fact will throw a NullPointerException if you pass in null. + * @param attributeValue An {@link AttributeValue} to test for null. + * @return true if the supplied AttributeValue represents a null value, or false if it does not. + */ + public static boolean isNullAttributeValue(AttributeValue attributeValue) { + return attributeValue.nul() != null && attributeValue.nul(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/ProjectionExpressionConvertor.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/ProjectionExpressionConvertor.java new file mode 100644 index 000000000000..f5d03ac9fdd7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/ProjectionExpressionConvertor.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.cleanAttributeName; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.NestedAttributeName; + +/** + * Wrapper method to get Projection Expression Name map and Projection Expressions from NestedAttributeNames. + */ +@SdkInternalApi +public class ProjectionExpressionConvertor { + + private static final String AMZN_MAPPED = "#AMZN_MAPPED_"; + private static final UnaryOperator PROJECTION_EXPRESSION_KEY_MAPPER = k -> AMZN_MAPPED + cleanAttributeName(k); + private final List nestedAttributeNames; + + private ProjectionExpressionConvertor(List nestedAttributeNames) { + this.nestedAttributeNames = nestedAttributeNames; + } + + public static ProjectionExpressionConvertor create(List nestedAttributeNames) { + return new ProjectionExpressionConvertor(nestedAttributeNames); + } + + private static Optional> convertToExpressionNameMap(NestedAttributeName attributeName) { + List nestedAttributeNames = attributeName.elements(); + if (nestedAttributeNames != null) { + Map resultNameMap = new LinkedHashMap<>(); + nestedAttributeNames.stream().forEach(nestedAttribute -> + resultNameMap.put(PROJECTION_EXPRESSION_KEY_MAPPER.apply(nestedAttribute), nestedAttribute)); + return Optional.of(resultNameMap); + } + return Optional.empty(); + } + + private static Optional convertToNameExpression(NestedAttributeName nestedAttributeName) { + + String name = nestedAttributeName.elements().stream().findFirst().orElse(null); + + List nestedAttributes = null; + if (nestedAttributeName.elements().size() > 1) { + nestedAttributes = nestedAttributeName.elements().subList(1, nestedAttributeName.elements().size()); + } + if (name != null) { + List hashSeparatedNestedStringList = + new ArrayList<>(Arrays.asList(PROJECTION_EXPRESSION_KEY_MAPPER.apply(name))); + if (nestedAttributes != null) { + nestedAttributes.stream().forEach(hashSeparatedNestedStringList::add); + } + return Optional.of(String.join(".".concat(AMZN_MAPPED), hashSeparatedNestedStringList)); + } + return Optional.empty(); + } + + public List nestedAttributeNames() { + return nestedAttributeNames; + } + + public Map convertToExpressionMap() { + Map attributeNameMap = new LinkedHashMap<>(); + if (this.nestedAttributeNames() != null) { + this.nestedAttributeNames().stream().forEach(attribs -> convertToExpressionNameMap(attribs) + .ifPresent(attributeNameMap::putAll)); + } + return attributeNameMap; + } + + public Optional convertToProjectionExpression() { + if (nestedAttributeNames != null) { + List expressionList = new ArrayList<>(); + this.nestedAttributeNames().stream().filter(Objects::nonNull) + .filter(item -> item.elements() != null && !item.elements().isEmpty()) + .forEach(attributeName -> convertToNameExpression(attributeName) + .ifPresent(expressionList::add)); + String joinedExpression = String.join(",", expressionList.stream() + .distinct().collect(Collectors.toList())); + return Optional.ofNullable(joinedExpression.isEmpty() ? null : joinedExpression); + } + return Optional.empty(); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TransformIterable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TransformIterable.java new file mode 100644 index 000000000000..3c58365daf05 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TransformIterable.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import java.util.Iterator; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; + +// TODO: Consider moving to SDK core +@SdkInternalApi +public class TransformIterable implements SdkIterable { + private final Iterable wrappedIterable; + private final Function transformFunction; + + private TransformIterable(Iterable wrappedIterable, Function transformFunction) { + this.wrappedIterable = wrappedIterable; + this.transformFunction = transformFunction; + } + + public static TransformIterable of(SdkIterable iterable, Function transformFunction) { + return new TransformIterable<>(iterable, transformFunction); + } + + @Override + public Iterator iterator() { + return TransformIterator.create(wrappedIterable.iterator(), transformFunction); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TransformIterator.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TransformIterator.java new file mode 100644 index 000000000000..ebe726d1bd9e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TransformIterator.java @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import java.util.Iterator; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; + +// TODO: Consider moving to SDK core +@SdkInternalApi +public class TransformIterator implements Iterator { + private final Iterator wrappedIterator; + private final Function transformFunction; + + private TransformIterator(Iterator wrappedIterator, Function transformFunction) { + this.wrappedIterator = wrappedIterator; + this.transformFunction = transformFunction; + } + + public static TransformIterator create(Iterator iterator, Function transformFunction) { + return new TransformIterator<>(iterator, transformFunction); + } + + @Override + public boolean hasNext() { + return wrappedIterator.hasNext(); + } + + @Override + public R next() { + return transformFunction.apply(wrappedIterator.next()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncIndex.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncIndex.java new file mode 100644 index 000000000000..34fa7a36d83e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncIndex.java @@ -0,0 +1,156 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PaginatedIndexOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.QueryOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.ScanOperation; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +@SdkInternalApi +public final class DefaultDynamoDbAsyncIndex implements DynamoDbAsyncIndex { + private final DynamoDbAsyncClient dynamoDbClient; + private final DynamoDbEnhancedClientExtension extension; + private final TableSchema tableSchema; + private final String tableName; + private final String indexName; + + DefaultDynamoDbAsyncIndex(DynamoDbAsyncClient dynamoDbClient, + DynamoDbEnhancedClientExtension extension, + TableSchema tableSchema, + String tableName, + String indexName) { + this.dynamoDbClient = dynamoDbClient; + this.extension = extension; + this.tableSchema = tableSchema; + this.tableName = tableName; + this.indexName = indexName; + } + + @Override + public SdkPublisher> query(QueryEnhancedRequest request) { + PaginatedIndexOperation operation = QueryOperation.create(request); + return operation.executeOnSecondaryIndexAsync(tableSchema, tableName, indexName, extension, dynamoDbClient); + } + + @Override + public SdkPublisher> query(Consumer requestConsumer) { + QueryEnhancedRequest.Builder builder = QueryEnhancedRequest.builder(); + requestConsumer.accept(builder); + return query(builder.build()); + } + + @Override + public SdkPublisher> query(QueryConditional queryConditional) { + return query(r -> r.queryConditional(queryConditional)); + } + + @Override + public SdkPublisher> scan(ScanEnhancedRequest request) { + PaginatedIndexOperation operation = ScanOperation.create(request); + return operation.executeOnSecondaryIndexAsync(tableSchema, tableName, indexName, extension, dynamoDbClient); + } + + @Override + public SdkPublisher> scan(Consumer requestConsumer) { + ScanEnhancedRequest.Builder builder = ScanEnhancedRequest.builder(); + requestConsumer.accept(builder); + return scan(builder.build()); + } + + @Override + public SdkPublisher> scan() { + return scan(ScanEnhancedRequest.builder().build()); + } + + @Override + public DynamoDbEnhancedClientExtension mapperExtension() { + return this.extension; + } + + @Override + public TableSchema tableSchema() { + return tableSchema; + } + + public DynamoDbAsyncClient dynamoDbClient() { + return dynamoDbClient; + } + + public String tableName() { + return tableName; + } + + public String indexName() { + return indexName; + } + + @Override + public Key keyFrom(T item) { + return createKeyFromItem(item, tableSchema, indexName); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultDynamoDbAsyncIndex that = (DefaultDynamoDbAsyncIndex) o; + + if (dynamoDbClient != null ? ! dynamoDbClient.equals(that.dynamoDbClient) + : that.dynamoDbClient != null) { + + return false; + } + if (extension != null ? ! extension.equals(that.extension) : that.extension != null) { + return false; + } + if (tableSchema != null ? ! tableSchema.equals(that.tableSchema) : that.tableSchema != null) { + return false; + } + if (tableName != null ? ! tableName.equals(that.tableName) : that.tableName != null) { + return false; + } + return indexName != null ? indexName.equals(that.indexName) : that.indexName == null; + } + + @Override + public int hashCode() { + int result = dynamoDbClient != null ? dynamoDbClient.hashCode() : 0; + result = 31 * result + (extension != null ? extension.hashCode() : 0); + result = 31 * result + (tableSchema != null ? tableSchema.hashCode() : 0); + result = 31 * result + (tableName != null ? tableName.hashCode() : 0); + result = 31 * result + (indexName != null ? indexName.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java new file mode 100644 index 000000000000..b983c3639276 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java @@ -0,0 +1,268 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.CreateTableOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.GetItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PaginatedTableOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PutItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.QueryOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.ScanOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TableOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.UpdateItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.PagePublisher; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +@SdkInternalApi +public final class DefaultDynamoDbAsyncTable implements DynamoDbAsyncTable { + private final DynamoDbAsyncClient dynamoDbClient; + private final DynamoDbEnhancedClientExtension extension; + private final TableSchema tableSchema; + private final String tableName; + + DefaultDynamoDbAsyncTable(DynamoDbAsyncClient dynamoDbClient, + DynamoDbEnhancedClientExtension extension, + TableSchema tableSchema, + String tableName) { + this.dynamoDbClient = dynamoDbClient; + this.extension = extension; + this.tableSchema = tableSchema; + this.tableName = tableName; + } + + @Override + public DynamoDbEnhancedClientExtension mapperExtension() { + return this.extension; + } + + @Override + public TableSchema tableSchema() { + return this.tableSchema; + } + + public DynamoDbAsyncClient dynamoDbClient() { + return dynamoDbClient; + } + + public String tableName() { + return tableName; + } + + @Override + public DefaultDynamoDbAsyncIndex index(String indexName) { + // Force a check for the existence of the index + tableSchema.tableMetadata().indexPartitionKey(indexName); + + return new DefaultDynamoDbAsyncIndex<>(dynamoDbClient, extension, tableSchema, tableName, indexName); + } + + @Override + public CompletableFuture createTable(CreateTableEnhancedRequest request) { + TableOperation operation = CreateTableOperation.create(request); + return operation.executeOnPrimaryIndexAsync(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public CompletableFuture createTable(Consumer requestConsumer) { + CreateTableEnhancedRequest.Builder builder = CreateTableEnhancedRequest.builder(); + requestConsumer.accept(builder); + return createTable(builder.build()); + } + + + @Override + public CompletableFuture createTable() { + return createTable(CreateTableEnhancedRequest.builder().build()); + } + + @Override + public CompletableFuture deleteItem(DeleteItemEnhancedRequest request) { + TableOperation operation = DeleteItemOperation.create(request); + return operation.executeOnPrimaryIndexAsync(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public CompletableFuture deleteItem(Consumer requestConsumer) { + DeleteItemEnhancedRequest.Builder builder = DeleteItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return deleteItem(builder.build()); + } + + @Override + public CompletableFuture deleteItem(Key key) { + return deleteItem(r -> r.key(key)); + } + + @Override + public CompletableFuture deleteItem(T keyItem) { + return deleteItem(keyFrom(keyItem)); + } + + @Override + public CompletableFuture getItem(GetItemEnhancedRequest request) { + TableOperation operation = GetItemOperation.create(request); + return operation.executeOnPrimaryIndexAsync(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public CompletableFuture getItem(Consumer requestConsumer) { + GetItemEnhancedRequest.Builder builder = GetItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return getItem(builder.build()); + } + + @Override + public CompletableFuture getItem(Key key) { + return getItem(r -> r.key(key)); + } + + @Override + public CompletableFuture getItem(T keyItem) { + return getItem(keyFrom(keyItem)); + } + + @Override + public PagePublisher query(QueryEnhancedRequest request) { + PaginatedTableOperation operation = QueryOperation.create(request); + return operation.executeOnPrimaryIndexAsync(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public PagePublisher query(Consumer requestConsumer) { + QueryEnhancedRequest.Builder builder = QueryEnhancedRequest.builder(); + requestConsumer.accept(builder); + return query(builder.build()); + } + + @Override + public PagePublisher query(QueryConditional queryConditional) { + return query(r -> r.queryConditional(queryConditional)); + } + + @Override + public CompletableFuture putItem(PutItemEnhancedRequest request) { + TableOperation operation = PutItemOperation.create(request); + return operation.executeOnPrimaryIndexAsync(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public CompletableFuture putItem(Consumer> requestConsumer) { + PutItemEnhancedRequest.Builder builder = + PutItemEnhancedRequest.builder(this.tableSchema.itemType().rawClass()); + requestConsumer.accept(builder); + return putItem(builder.build()); + } + + @Override + public CompletableFuture putItem(T item) { + return putItem(r -> r.item(item)); + } + + @Override + public PagePublisher scan(ScanEnhancedRequest request) { + PaginatedTableOperation operation = ScanOperation.create(request); + return operation.executeOnPrimaryIndexAsync(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public PagePublisher scan(Consumer requestConsumer) { + ScanEnhancedRequest.Builder builder = ScanEnhancedRequest.builder(); + requestConsumer.accept(builder); + return scan(builder.build()); + } + + @Override + public PagePublisher scan() { + return scan(ScanEnhancedRequest.builder().build()); + } + + @Override + public CompletableFuture updateItem(UpdateItemEnhancedRequest request) { + TableOperation operation = UpdateItemOperation.create(request); + return operation.executeOnPrimaryIndexAsync(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public CompletableFuture updateItem(Consumer> requestConsumer) { + UpdateItemEnhancedRequest.Builder builder = + UpdateItemEnhancedRequest.builder(this.tableSchema.itemType().rawClass()); + requestConsumer.accept(builder); + return updateItem(builder.build()); + } + + @Override + public CompletableFuture updateItem(T item) { + return updateItem(r -> r.item(item)); + } + + @Override + public Key keyFrom(T item) { + return createKeyFromItem(item, tableSchema, TableMetadata.primaryIndexName()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultDynamoDbAsyncTable that = (DefaultDynamoDbAsyncTable) o; + + if (dynamoDbClient != null ? ! dynamoDbClient.equals(that.dynamoDbClient) + : that.dynamoDbClient != null) { + + return false; + } + if (extension != null ? ! extension.equals(that.extension) : that.extension != null) { + return false; + } + if (tableSchema != null ? ! tableSchema.equals(that.tableSchema) : that.tableSchema != null) { + return false; + } + return tableName != null ? tableName.equals(that.tableName) : that.tableName == null; + } + + @Override + public int hashCode() { + int result = dynamoDbClient != null ? dynamoDbClient.hashCode() : 0; + result = 31 * result + (extension != null ? extension.hashCode() : 0); + result = 31 * result + (tableSchema != null ? tableSchema.hashCode() : 0); + result = 31 * result + (tableName != null ? tableName.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedAsyncClient.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedAsyncClient.java new file mode 100644 index 000000000000..0e8090718407 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedAsyncClient.java @@ -0,0 +1,184 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Document; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.BatchGetItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.BatchWriteItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactGetItemsOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactWriteItemsOperation; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPagePublisher; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteResult; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactGetItemsEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactWriteItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +@SdkInternalApi +public final class DefaultDynamoDbEnhancedAsyncClient implements DynamoDbEnhancedAsyncClient { + private final DynamoDbAsyncClient dynamoDbClient; + private final DynamoDbEnhancedClientExtension extension; + + private DefaultDynamoDbEnhancedAsyncClient(Builder builder) { + this.dynamoDbClient = builder.dynamoDbClient == null ? DynamoDbAsyncClient.create() : builder.dynamoDbClient; + this.extension = ExtensionResolver.resolveExtensions(builder.dynamoDbEnhancedClientExtensions); + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public DefaultDynamoDbAsyncTable table(String tableName, TableSchema tableSchema) { + return new DefaultDynamoDbAsyncTable<>(dynamoDbClient, extension, tableSchema, tableName); + } + + @Override + public BatchGetResultPagePublisher batchGetItem(BatchGetItemEnhancedRequest request) { + BatchGetItemOperation operation = BatchGetItemOperation.create(request); + return BatchGetResultPagePublisher.create(operation.executeAsync(dynamoDbClient, extension)); + } + + @Override + public BatchGetResultPagePublisher batchGetItem( + Consumer requestConsumer) { + + BatchGetItemEnhancedRequest.Builder builder = BatchGetItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return batchGetItem(builder.build()); + } + + @Override + public CompletableFuture batchWriteItem(BatchWriteItemEnhancedRequest request) { + BatchWriteItemOperation operation = BatchWriteItemOperation.create(request); + return operation.executeAsync(dynamoDbClient, extension); + } + + @Override + public CompletableFuture batchWriteItem( + Consumer requestConsumer) { + + BatchWriteItemEnhancedRequest.Builder builder = BatchWriteItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return batchWriteItem(builder.build()); + } + + @Override + public CompletableFuture> transactGetItems(TransactGetItemsEnhancedRequest request) { + TransactGetItemsOperation operation = TransactGetItemsOperation.create(request); + return operation.executeAsync(dynamoDbClient, extension); + } + + @Override + public CompletableFuture> transactGetItems( + Consumer requestConsumer) { + TransactGetItemsEnhancedRequest.Builder builder = TransactGetItemsEnhancedRequest.builder(); + requestConsumer.accept(builder); + return transactGetItems(builder.build()); + } + + @Override + public CompletableFuture transactWriteItems(TransactWriteItemsEnhancedRequest request) { + TransactWriteItemsOperation operation = TransactWriteItemsOperation.create(request); + return operation.executeAsync(dynamoDbClient, extension); + } + + @Override + public CompletableFuture transactWriteItems( + Consumer requestConsumer) { + + TransactWriteItemsEnhancedRequest.Builder builder = TransactWriteItemsEnhancedRequest.builder(); + requestConsumer.accept(builder); + return transactWriteItems(builder.build()); + } + + public DynamoDbAsyncClient dynamoDbAsyncClient() { + return dynamoDbClient; + } + + public DynamoDbEnhancedClientExtension mapperExtension() { + return extension; + } + + public Builder toBuilder() { + return builder().dynamoDbClient(this.dynamoDbClient).extensions(this.extension); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultDynamoDbEnhancedAsyncClient that = (DefaultDynamoDbEnhancedAsyncClient) o; + + if (dynamoDbClient != null ? ! dynamoDbClient.equals(that.dynamoDbClient) + : that.dynamoDbClient != null) { + + return false; + } + return extension != null ? extension.equals(that.extension) : that.extension == null; + } + + @Override + public int hashCode() { + int result = dynamoDbClient != null ? dynamoDbClient.hashCode() : 0; + result = 31 * result + (extension != null ? extension.hashCode() : 0); + return result; + } + + public static final class Builder implements DynamoDbEnhancedAsyncClient.Builder { + private DynamoDbAsyncClient dynamoDbClient; + private List dynamoDbEnhancedClientExtensions = + new ArrayList<>(ExtensionResolver.defaultExtensions()); + + @Override + public DefaultDynamoDbEnhancedAsyncClient build() { + return new DefaultDynamoDbEnhancedAsyncClient(this); + } + + @Override + public Builder dynamoDbClient(DynamoDbAsyncClient dynamoDbClient) { + this.dynamoDbClient = dynamoDbClient; + return this; + } + + @Override + public Builder extensions(DynamoDbEnhancedClientExtension... dynamoDbEnhancedClientExtensions) { + this.dynamoDbEnhancedClientExtensions = Arrays.asList(dynamoDbEnhancedClientExtensions); + return this; + } + + @Override + public Builder extensions(List dynamoDbEnhancedClientExtensions) { + this.dynamoDbEnhancedClientExtensions = new ArrayList<>(dynamoDbEnhancedClientExtensions); + return this; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedClient.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedClient.java new file mode 100644 index 000000000000..c68bccef057f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedClient.java @@ -0,0 +1,179 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Document; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.BatchGetItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.BatchWriteItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactGetItemsOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactWriteItemsOperation; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteResult; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactGetItemsEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactWriteItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@SdkInternalApi +public final class DefaultDynamoDbEnhancedClient implements DynamoDbEnhancedClient { + private final DynamoDbClient dynamoDbClient; + private final DynamoDbEnhancedClientExtension extension; + + private DefaultDynamoDbEnhancedClient(Builder builder) { + this.dynamoDbClient = builder.dynamoDbClient == null ? DynamoDbClient.create() : builder.dynamoDbClient; + this.extension = ExtensionResolver.resolveExtensions(builder.dynamoDbEnhancedClientExtensions); + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public DefaultDynamoDbTable table(String tableName, TableSchema tableSchema) { + return new DefaultDynamoDbTable<>(dynamoDbClient, extension, tableSchema, tableName); + } + + @Override + public BatchGetResultPageIterable batchGetItem(BatchGetItemEnhancedRequest request) { + BatchGetItemOperation operation = BatchGetItemOperation.create(request); + return BatchGetResultPageIterable.create(operation.execute(dynamoDbClient, extension)); + } + + @Override + public BatchGetResultPageIterable batchGetItem(Consumer requestConsumer) { + BatchGetItemEnhancedRequest.Builder builder = BatchGetItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return batchGetItem(builder.build()); + } + + @Override + public BatchWriteResult batchWriteItem(BatchWriteItemEnhancedRequest request) { + BatchWriteItemOperation operation = BatchWriteItemOperation.create(request); + return operation.execute(dynamoDbClient, extension); + } + + @Override + public BatchWriteResult batchWriteItem(Consumer requestConsumer) { + BatchWriteItemEnhancedRequest.Builder builder = BatchWriteItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return batchWriteItem(builder.build()); + } + + @Override + public List transactGetItems(TransactGetItemsEnhancedRequest request) { + TransactGetItemsOperation operation = TransactGetItemsOperation.create(request); + return operation.execute(dynamoDbClient, extension); + } + + @Override + public List transactGetItems( + Consumer requestConsumer) { + + TransactGetItemsEnhancedRequest.Builder builder = TransactGetItemsEnhancedRequest.builder(); + requestConsumer.accept(builder); + return transactGetItems(builder.build()); + } + + @Override + public Void transactWriteItems(TransactWriteItemsEnhancedRequest request) { + TransactWriteItemsOperation operation = TransactWriteItemsOperation.create(request); + return operation.execute(dynamoDbClient, extension); + } + + @Override + public Void transactWriteItems(Consumer requestConsumer) { + TransactWriteItemsEnhancedRequest.Builder builder = TransactWriteItemsEnhancedRequest.builder(); + requestConsumer.accept(builder); + return transactWriteItems(builder.build()); + } + + public DynamoDbClient dynamoDbClient() { + return dynamoDbClient; + } + + public DynamoDbEnhancedClientExtension mapperExtension() { + return extension; + } + + public Builder toBuilder() { + return builder().dynamoDbClient(this.dynamoDbClient).extensions(this.extension); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultDynamoDbEnhancedClient that = (DefaultDynamoDbEnhancedClient) o; + + if (dynamoDbClient != null ? ! dynamoDbClient.equals(that.dynamoDbClient) : that.dynamoDbClient != null) { + return false; + } + return extension != null ? + extension.equals(that.extension) : + that.extension == null; + } + + @Override + public int hashCode() { + int result = dynamoDbClient != null ? dynamoDbClient.hashCode() : 0; + result = 31 * result + (extension != null ? + extension.hashCode() : 0); + return result; + } + + public static final class Builder implements DynamoDbEnhancedClient.Builder { + private DynamoDbClient dynamoDbClient; + private List dynamoDbEnhancedClientExtensions = + new ArrayList<>(ExtensionResolver.defaultExtensions()); + + @Override + public DefaultDynamoDbEnhancedClient build() { + return new DefaultDynamoDbEnhancedClient(this); + } + + @Override + public Builder dynamoDbClient(DynamoDbClient dynamoDbClient) { + this.dynamoDbClient = dynamoDbClient; + return this; + } + + @Override + public Builder extensions(DynamoDbEnhancedClientExtension... dynamoDbEnhancedClientExtensions) { + this.dynamoDbEnhancedClientExtensions = Arrays.asList(dynamoDbEnhancedClientExtensions); + return this; + } + + @Override + public Builder extensions(List dynamoDbEnhancedClientExtensions) { + this.dynamoDbEnhancedClientExtensions = new ArrayList<>(dynamoDbEnhancedClientExtensions); + return this; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbIndex.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbIndex.java new file mode 100644 index 000000000000..cb4b7be7619d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbIndex.java @@ -0,0 +1,154 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbIndex; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PaginatedIndexOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.QueryOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.ScanOperation; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@SdkInternalApi +public class DefaultDynamoDbIndex implements DynamoDbIndex { + private final DynamoDbClient dynamoDbClient; + private final DynamoDbEnhancedClientExtension extension; + private final TableSchema tableSchema; + private final String tableName; + private final String indexName; + + DefaultDynamoDbIndex(DynamoDbClient dynamoDbClient, + DynamoDbEnhancedClientExtension extension, + TableSchema tableSchema, + String tableName, + String indexName) { + this.dynamoDbClient = dynamoDbClient; + this.extension = extension; + this.tableSchema = tableSchema; + this.tableName = tableName; + this.indexName = indexName; + } + + @Override + public SdkIterable> query(QueryEnhancedRequest request) { + PaginatedIndexOperation operation = QueryOperation.create(request); + return operation.executeOnSecondaryIndex(tableSchema, tableName, indexName, extension, dynamoDbClient); + } + + @Override + public SdkIterable> query(Consumer requestConsumer) { + QueryEnhancedRequest.Builder builder = QueryEnhancedRequest.builder(); + requestConsumer.accept(builder); + return query(builder.build()); + } + + @Override + public SdkIterable> query(QueryConditional queryConditional) { + return query(r -> r.queryConditional(queryConditional)); + } + + @Override + public SdkIterable> scan(ScanEnhancedRequest request) { + PaginatedIndexOperation operation = ScanOperation.create(request); + return operation.executeOnSecondaryIndex(tableSchema, tableName, indexName, extension, dynamoDbClient); + } + + @Override + public SdkIterable> scan(Consumer requestConsumer) { + ScanEnhancedRequest.Builder builder = ScanEnhancedRequest.builder(); + requestConsumer.accept(builder); + return scan(builder.build()); + } + + @Override + public SdkIterable> scan() { + return scan(ScanEnhancedRequest.builder().build()); + } + + @Override + public DynamoDbEnhancedClientExtension mapperExtension() { + return this.extension; + } + + @Override + public TableSchema tableSchema() { + return tableSchema; + } + + public DynamoDbClient dynamoDbClient() { + return dynamoDbClient; + } + + public String tableName() { + return tableName; + } + + public String indexName() { + return indexName; + } + + @Override + public Key keyFrom(T item) { + return createKeyFromItem(item, tableSchema, indexName); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultDynamoDbIndex that = (DefaultDynamoDbIndex) o; + + if (dynamoDbClient != null ? ! dynamoDbClient.equals(that.dynamoDbClient) : that.dynamoDbClient != null) { + return false; + } + if (extension != null ? ! extension.equals(that.extension) : that.extension != null) { + return false; + } + if (tableSchema != null ? ! tableSchema.equals(that.tableSchema) : that.tableSchema != null) { + return false; + } + if (tableName != null ? ! tableName.equals(that.tableName) : that.tableName != null) { + return false; + } + return indexName != null ? indexName.equals(that.indexName) : that.indexName == null; + } + + @Override + public int hashCode() { + int result = dynamoDbClient != null ? dynamoDbClient.hashCode() : 0; + result = 31 * result + (extension != null ? extension.hashCode() : 0); + result = 31 * result + (tableSchema != null ? tableSchema.hashCode() : 0); + result = 31 * result + (tableName != null ? tableName.hashCode() : 0); + result = 31 * result + (indexName != null ? indexName.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java new file mode 100644 index 000000000000..d30327b1e8d4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java @@ -0,0 +1,272 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.CreateTableOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.GetItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PaginatedTableOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PutItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.QueryOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.ScanOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TableOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.UpdateItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@SdkInternalApi +public class DefaultDynamoDbTable implements DynamoDbTable { + private final DynamoDbClient dynamoDbClient; + private final DynamoDbEnhancedClientExtension extension; + private final TableSchema tableSchema; + private final String tableName; + + DefaultDynamoDbTable(DynamoDbClient dynamoDbClient, + DynamoDbEnhancedClientExtension extension, + TableSchema tableSchema, + String tableName) { + this.dynamoDbClient = dynamoDbClient; + this.extension = extension; + this.tableSchema = tableSchema; + this.tableName = tableName; + } + + @Override + public DynamoDbEnhancedClientExtension mapperExtension() { + return this.extension; + } + + @Override + public TableSchema tableSchema() { + return this.tableSchema; + } + + public DynamoDbClient dynamoDbClient() { + return dynamoDbClient; + } + + public String tableName() { + return tableName; + } + + @Override + public DefaultDynamoDbIndex index(String indexName) { + // Force a check for the existence of the index + tableSchema.tableMetadata().indexPartitionKey(indexName); + + return new DefaultDynamoDbIndex<>(dynamoDbClient, + extension, + tableSchema, + tableName, + indexName); + } + + @Override + public void createTable(CreateTableEnhancedRequest request) { + TableOperation operation = CreateTableOperation.create(request); + operation.executeOnPrimaryIndex(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public void createTable(Consumer requestConsumer) { + CreateTableEnhancedRequest.Builder builder = CreateTableEnhancedRequest.builder(); + requestConsumer.accept(builder); + createTable(builder.build()); + } + + @Override + public void createTable() { + createTable(CreateTableEnhancedRequest.builder().build()); + } + + @Override + public T deleteItem(DeleteItemEnhancedRequest request) { + TableOperation operation = DeleteItemOperation.create(request); + return operation.executeOnPrimaryIndex(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public T deleteItem(Consumer requestConsumer) { + DeleteItemEnhancedRequest.Builder builder = DeleteItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return deleteItem(builder.build()); + } + + @Override + public T deleteItem(Key key) { + return deleteItem(r -> r.key(key)); + } + + @Override + public T deleteItem(T keyItem) { + return deleteItem(keyFrom(keyItem)); + } + + @Override + public T getItem(GetItemEnhancedRequest request) { + TableOperation operation = GetItemOperation.create(request); + return operation.executeOnPrimaryIndex(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public T getItem(Consumer requestConsumer) { + GetItemEnhancedRequest.Builder builder = GetItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return getItem(builder.build()); + } + + @Override + public T getItem(Key key) { + return getItem(r -> r.key(key)); + } + + @Override + public T getItem(T keyItem) { + return getItem(keyFrom(keyItem)); + } + + @Override + public PageIterable query(QueryEnhancedRequest request) { + PaginatedTableOperation operation = QueryOperation.create(request); + return operation.executeOnPrimaryIndex(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public PageIterable query(Consumer requestConsumer) { + QueryEnhancedRequest.Builder builder = QueryEnhancedRequest.builder(); + requestConsumer.accept(builder); + return query(builder.build()); + } + + @Override + public PageIterable query(QueryConditional queryConditional) { + return query(r -> r.queryConditional(queryConditional)); + } + + @Override + public void putItem(PutItemEnhancedRequest request) { + TableOperation operation = PutItemOperation.create(request); + operation.executeOnPrimaryIndex(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public void putItem(Consumer> requestConsumer) { + PutItemEnhancedRequest.Builder builder = + PutItemEnhancedRequest.builder(this.tableSchema.itemType().rawClass()); + requestConsumer.accept(builder); + putItem(builder.build()); + } + + @Override + public void putItem(T item) { + putItem(r -> r.item(item)); + } + + @Override + public PageIterable scan(ScanEnhancedRequest request) { + PaginatedTableOperation operation = ScanOperation.create(request); + return operation.executeOnPrimaryIndex(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public PageIterable scan(Consumer requestConsumer) { + ScanEnhancedRequest.Builder builder = ScanEnhancedRequest.builder(); + requestConsumer.accept(builder); + return scan(builder.build()); + } + + @Override + public PageIterable scan() { + return scan(ScanEnhancedRequest.builder().build()); + } + + @Override + public T updateItem(UpdateItemEnhancedRequest request) { + TableOperation operation = UpdateItemOperation.create(request); + return operation.executeOnPrimaryIndex(tableSchema, tableName, extension, dynamoDbClient); + } + + @Override + public T updateItem(Consumer> requestConsumer) { + UpdateItemEnhancedRequest.Builder builder = + UpdateItemEnhancedRequest.builder(this.tableSchema.itemType().rawClass()); + requestConsumer.accept(builder); + return updateItem(builder.build()); + } + + @Override + public T updateItem(T item) { + return updateItem(r -> r.item(item)); + } + + @Override + public Key keyFrom(T item) { + return createKeyFromItem(item, tableSchema, TableMetadata.primaryIndexName()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultDynamoDbTable that = (DefaultDynamoDbTable) o; + + if (dynamoDbClient != null ? ! dynamoDbClient.equals(that.dynamoDbClient) : that.dynamoDbClient != null) { + return false; + } + if (extension != null ? + !extension.equals(that.extension) : + that.extension != null) { + + return false; + } + if (tableSchema != null ? ! tableSchema.equals(that.tableSchema) : that.tableSchema != null) { + return false; + } + return tableName != null ? tableName.equals(that.tableName) : that.tableName == null; + } + + @Override + public int hashCode() { + int result = dynamoDbClient != null ? dynamoDbClient.hashCode() : 0; + result = 31 * result + (extension != null ? + extension.hashCode() : 0); + result = 31 * result + (tableSchema != null ? tableSchema.hashCode() : 0); + result = 31 * result + (tableName != null ? tableName.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/ExtensionResolver.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/ExtensionResolver.java new file mode 100644 index 000000000000..4ca2fc5a4884 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/ExtensionResolver.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.ChainExtension; + +/** + * Static module to assist with the initialization of an extension for a DynamoDB Enhanced Client based on supplied + * configuration. + */ +@SdkInternalApi +public final class ExtensionResolver { + private static final DynamoDbEnhancedClientExtension DEFAULT_VERSIONED_RECORD_EXTENSION = + VersionedRecordExtension.builder().build(); + private static final List DEFAULT_EXTENSIONS = + Collections.singletonList(DEFAULT_VERSIONED_RECORD_EXTENSION); + + private ExtensionResolver() { + } + + /** + * Static provider for the default extensions that are bundled with the DynamoDB Enhanced Client. Currently this is + * just the {@link software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension}. + * + * These extensions will be used by default unless overridden in the enhanced client builder. + */ + public static List defaultExtensions() { + return DEFAULT_EXTENSIONS; + } + + /** + * Resolves a list of extensions into a single extension. If the list is a singleton, will just return that extension + * otherwise it will combine them with the {@link software.amazon.awssdk.enhanced.dynamodb.internal.extensions.ChainExtension} + * meta-extension using the order provided in the list. + * + * @param extensions A list of extensions to be combined in strict order + * @return A single extension that combines all the supplied extensions or null if no extensions were provided + */ + public static DynamoDbEnhancedClientExtension resolveExtensions(List extensions) { + if (extensions == null || extensions.isEmpty()) { + return null; + } + + if (extensions.size() == 1) { + return extensions.get(0); + } + + return ChainExtension.create(extensions); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/BeginsWithConditional.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/BeginsWithConditional.java new file mode 100644 index 000000000000..3f63556ef4c4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/BeginsWithConditional.java @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.conditional; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.nullAttributeValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.cleanAttributeName; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.UnaryOperator; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@SdkInternalApi +public class BeginsWithConditional implements QueryConditional { + private static final UnaryOperator EXPRESSION_KEY_MAPPER = + k -> "#AMZN_MAPPED_" + cleanAttributeName(k); + private static final UnaryOperator EXPRESSION_VALUE_KEY_MAPPER = + k -> ":AMZN_MAPPED_" + cleanAttributeName(k); + + private final Key key; + + public BeginsWithConditional(Key key) { + this.key = key; + } + + @Override + public Expression expression(TableSchema tableSchema, String indexName) { + QueryConditionalKeyValues queryConditionalKeyValues = QueryConditionalKeyValues.from(key, tableSchema, indexName); + + if (queryConditionalKeyValues.sortValue().equals(nullAttributeValue())) { + throw new IllegalArgumentException("Attempt to query using a 'beginsWith' condition operator against a " + + "null sort key."); + } + + if (queryConditionalKeyValues.sortValue().n() != null) { + throw new IllegalArgumentException("Attempt to query using a 'beginsWith' condition operator against " + + "a numeric sort key."); + } + + String partitionKeyToken = EXPRESSION_KEY_MAPPER.apply(queryConditionalKeyValues.partitionKey()); + String partitionValueToken = EXPRESSION_VALUE_KEY_MAPPER.apply(queryConditionalKeyValues.partitionKey()); + String sortKeyToken = EXPRESSION_KEY_MAPPER.apply(queryConditionalKeyValues.sortKey()); + String sortValueToken = EXPRESSION_VALUE_KEY_MAPPER.apply(queryConditionalKeyValues.sortKey()); + + String queryExpression = String.format("%s = %s AND begins_with ( %s, %s )", + partitionKeyToken, + partitionValueToken, + sortKeyToken, + sortValueToken); + Map expressionAttributeValues = new HashMap<>(); + expressionAttributeValues.put(partitionValueToken, queryConditionalKeyValues.partitionValue()); + expressionAttributeValues.put(sortValueToken, queryConditionalKeyValues.sortValue()); + Map expressionAttributeNames = new HashMap<>(); + expressionAttributeNames.put(partitionKeyToken, queryConditionalKeyValues.partitionKey()); + expressionAttributeNames.put(sortKeyToken, queryConditionalKeyValues.sortKey()); + + return Expression.builder() + .expression(queryExpression) + .expressionValues(Collections.unmodifiableMap(expressionAttributeValues)) + .expressionNames(expressionAttributeNames) + .build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BeginsWithConditional that = (BeginsWithConditional) o; + + return key != null ? key.equals(that.key) : that.key == null; + } + + @Override + public int hashCode() { + return key != null ? key.hashCode() : 0; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/BetweenConditional.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/BetweenConditional.java new file mode 100644 index 000000000000..33d27f69a962 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/BetweenConditional.java @@ -0,0 +1,110 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.conditional; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.nullAttributeValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.cleanAttributeName; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.UnaryOperator; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@SdkInternalApi +public class BetweenConditional implements QueryConditional { + private static final UnaryOperator EXPRESSION_KEY_MAPPER = + k -> "#AMZN_MAPPED_" + cleanAttributeName(k); + private static final UnaryOperator EXPRESSION_VALUE_KEY_MAPPER = + k -> ":AMZN_MAPPED_" + cleanAttributeName(k); + private static final UnaryOperator EXPRESSION_OTHER_VALUE_KEY_MAPPER = + k -> ":AMZN_MAPPED_" + cleanAttributeName(k) + "2"; + + private final Key key1; + private final Key key2; + + public BetweenConditional(Key key1, Key key2) { + this.key1 = key1; + this.key2 = key2; + } + + @Override + public Expression expression(TableSchema tableSchema, String indexName) { + QueryConditionalKeyValues queryConditionalKeyValues1 = QueryConditionalKeyValues.from(key1, tableSchema, indexName); + QueryConditionalKeyValues queryConditionalKeyValues2 = QueryConditionalKeyValues.from(key2, tableSchema, indexName); + + if (queryConditionalKeyValues1.sortValue().equals(nullAttributeValue()) || + queryConditionalKeyValues2.sortValue().equals(nullAttributeValue())) { + throw new IllegalArgumentException("Attempt to query using a 'between' condition operator where one " + + "of the items has a null sort key."); + } + + String partitionKeyToken = EXPRESSION_KEY_MAPPER.apply(queryConditionalKeyValues1.partitionKey()); + String partitionValueToken = EXPRESSION_VALUE_KEY_MAPPER.apply(queryConditionalKeyValues1.partitionKey()); + String sortKeyToken = EXPRESSION_KEY_MAPPER.apply(queryConditionalKeyValues1.sortKey()); + String sortKeyValueToken1 = EXPRESSION_VALUE_KEY_MAPPER.apply(queryConditionalKeyValues1.sortKey()); + String sortKeyValueToken2 = EXPRESSION_OTHER_VALUE_KEY_MAPPER.apply(queryConditionalKeyValues2.sortKey()); + + String queryExpression = String.format("%s = %s AND %s BETWEEN %s AND %s", + partitionKeyToken, + partitionValueToken, + sortKeyToken, + sortKeyValueToken1, + sortKeyValueToken2); + Map expressionAttributeValues = new HashMap<>(); + expressionAttributeValues.put(partitionValueToken, queryConditionalKeyValues1.partitionValue()); + expressionAttributeValues.put(sortKeyValueToken1, queryConditionalKeyValues1.sortValue()); + expressionAttributeValues.put(sortKeyValueToken2, queryConditionalKeyValues2.sortValue()); + Map expressionAttributeNames = new HashMap<>(); + expressionAttributeNames.put(partitionKeyToken, queryConditionalKeyValues1.partitionKey()); + expressionAttributeNames.put(sortKeyToken, queryConditionalKeyValues1.sortKey()); + + return Expression.builder() + .expression(queryExpression) + .expressionValues(Collections.unmodifiableMap(expressionAttributeValues)) + .expressionNames(expressionAttributeNames) + .build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BetweenConditional that = (BetweenConditional) o; + + if (key1 != null ? ! key1.equals(that.key1) : that.key1 != null) { + return false; + } + return key2 != null ? key2.equals(that.key2) : that.key2 == null; + } + + @Override + public int hashCode() { + int result = key1 != null ? key1.hashCode() : 0; + result = 31 * result + (key2 != null ? key2.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/EqualToConditional.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/EqualToConditional.java new file mode 100644 index 000000000000..90e9c808b103 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/EqualToConditional.java @@ -0,0 +1,144 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.conditional; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.nullAttributeValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.cleanAttributeName; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.isNullAttributeValue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.UnaryOperator; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@SdkInternalApi +public class EqualToConditional implements QueryConditional { + private static final UnaryOperator EXPRESSION_KEY_MAPPER = + k -> "#AMZN_MAPPED_" + cleanAttributeName(k); + private static final UnaryOperator EXPRESSION_VALUE_KEY_MAPPER = + k -> ":AMZN_MAPPED_" + cleanAttributeName(k); + + private final Key key; + + public EqualToConditional(Key key) { + this.key = key; + } + + @Override + public Expression expression(TableSchema tableSchema, String indexName) { + String partitionKey = tableSchema.tableMetadata().indexPartitionKey(indexName); + AttributeValue partitionValue = key.partitionKeyValue(); + + if (partitionValue == null || partitionValue.equals(nullAttributeValue())) { + throw new IllegalArgumentException("Partition key must be a valid scalar value to execute a query " + + "against. The provided partition key was set to null."); + } + + Optional sortKeyValue = key.sortKeyValue(); + + if (sortKeyValue.isPresent()) { + Optional sortKey = tableSchema.tableMetadata().indexSortKey(indexName); + + if (!sortKey.isPresent()) { + throw new IllegalArgumentException("A sort key was supplied as part of a query conditional " + + "against an index that does not support a sort key. Index: " + + indexName); + } + + return partitionAndSortExpression(partitionKey, + sortKey.get(), + partitionValue, + sortKeyValue.get()); + } else { + return partitionOnlyExpression(partitionKey, partitionValue); + } + } + + private Expression partitionOnlyExpression(String partitionKey, + AttributeValue partitionValue) { + + String partitionKeyToken = EXPRESSION_KEY_MAPPER.apply(partitionKey); + String partitionKeyValueToken = EXPRESSION_VALUE_KEY_MAPPER.apply(partitionKey); + String queryExpression = String.format("%s = %s", partitionKeyToken, partitionKeyValueToken); + + return Expression.builder() + .expression(queryExpression) + .expressionNames(Collections.singletonMap(partitionKeyToken, partitionKey)) + .expressionValues(Collections.singletonMap(partitionKeyValueToken, partitionValue)) + .build(); + } + + private Expression partitionAndSortExpression(String partitionKey, + String sortKey, + AttributeValue partitionValue, + AttributeValue sortKeyValue) { + + + // When a sort key is explicitly provided as null treat as partition only expression + if (isNullAttributeValue(sortKeyValue)) { + return partitionOnlyExpression(partitionKey, partitionValue); + } + + String partitionKeyToken = EXPRESSION_KEY_MAPPER.apply(partitionKey); + String partitionKeyValueToken = EXPRESSION_VALUE_KEY_MAPPER.apply(partitionKey); + String sortKeyToken = EXPRESSION_KEY_MAPPER.apply(sortKey); + String sortKeyValueToken = EXPRESSION_VALUE_KEY_MAPPER.apply(sortKey); + + String queryExpression = String.format("%s = %s AND %s = %s", + partitionKeyToken, + partitionKeyValueToken, + sortKeyToken, + sortKeyValueToken); + Map expressionAttributeValues = new HashMap<>(); + expressionAttributeValues.put(partitionKeyValueToken, partitionValue); + expressionAttributeValues.put(sortKeyValueToken, sortKeyValue); + Map expressionAttributeNames = new HashMap<>(); + expressionAttributeNames.put(partitionKeyToken, partitionKey); + expressionAttributeNames.put(sortKeyToken, sortKey); + + return Expression.builder() + .expression(queryExpression) + .expressionValues(expressionAttributeValues) + .expressionNames(expressionAttributeNames) + .build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EqualToConditional that = (EqualToConditional) o; + + return key != null ? key.equals(that.key) : that.key == null; + } + + @Override + public int hashCode() { + return key != null ? key.hashCode() : 0; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/QueryConditionalKeyValues.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/QueryConditionalKeyValues.java new file mode 100644 index 000000000000..342eeaad15e7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/QueryConditionalKeyValues.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.conditional; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * Internal helper class to act as a struct to store specific key values that are used throughout various + * {@link software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional} implementations. + */ +@SdkInternalApi +class QueryConditionalKeyValues { + private final String partitionKey; + private final AttributeValue partitionValue; + private final String sortKey; + private final AttributeValue sortValue; + + private QueryConditionalKeyValues(String partitionKey, + AttributeValue partitionValue, + String sortKey, + AttributeValue sortValue) { + this.partitionKey = partitionKey; + this.partitionValue = partitionValue; + this.sortKey = sortKey; + this.sortValue = sortValue; + } + + static QueryConditionalKeyValues from(Key key, TableSchema tableSchema, String indexName) { + String partitionKey = tableSchema.tableMetadata().indexPartitionKey(indexName); + AttributeValue partitionValue = key.partitionKeyValue(); + String sortKey = tableSchema.tableMetadata().indexSortKey(indexName).orElseThrow( + () -> new IllegalArgumentException("A query conditional requires a sort key to be present on the table " + + "or index being queried, yet none have been defined in the " + + "model")); + AttributeValue sortValue = + key.sortKeyValue().orElseThrow( + () -> new IllegalArgumentException("A query conditional requires a sort key to compare with, " + + "however one was not provided.")); + + return new QueryConditionalKeyValues(partitionKey, partitionValue, sortKey, sortValue); + } + + String partitionKey() { + return partitionKey; + } + + AttributeValue partitionValue() { + return partitionValue; + } + + String sortKey() { + return sortKey; + } + + AttributeValue sortValue() { + return sortValue; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/SingleKeyItemConditional.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/SingleKeyItemConditional.java new file mode 100644 index 000000000000..d8d85f73e0d9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/conditional/SingleKeyItemConditional.java @@ -0,0 +1,110 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.conditional; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.nullAttributeValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.cleanAttributeName; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.UnaryOperator; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A {@link QueryConditional} implementation that matches values from a specific key using a supplied operator for the + * sort key value comparison. The partition key value will always have an equivalence comparison applied. + *

    + * This class is used by higher-level (more specific) {@link QueryConditional} implementations such as + * {@link QueryConditional#sortGreaterThan(Key)} to reduce code duplication. + */ +@SdkInternalApi +public class SingleKeyItemConditional implements QueryConditional { + private static final UnaryOperator EXPRESSION_KEY_MAPPER = + k -> "#AMZN_MAPPED_" + cleanAttributeName(k); + private static final UnaryOperator EXPRESSION_VALUE_KEY_MAPPER = + k -> ":AMZN_MAPPED_" + cleanAttributeName(k); + + private final Key key; + private final String operator; + + public SingleKeyItemConditional(Key key, String operator) { + this.key = key; + this.operator = operator; + } + + @Override + public Expression expression(TableSchema tableSchema, String indexName) { + QueryConditionalKeyValues queryConditionalKeyValues = QueryConditionalKeyValues.from(key, tableSchema, indexName); + + if (queryConditionalKeyValues.sortValue().equals(nullAttributeValue())) { + throw new IllegalArgumentException("Attempt to query using a relative condition operator against a " + + "null sort key."); + } + + String partitionKeyToken = EXPRESSION_KEY_MAPPER.apply(queryConditionalKeyValues.partitionKey()); + String partitionValueToken = EXPRESSION_VALUE_KEY_MAPPER.apply(queryConditionalKeyValues.partitionKey()); + String sortKeyToken = EXPRESSION_KEY_MAPPER.apply(queryConditionalKeyValues.sortKey()); + String sortValueToken = EXPRESSION_VALUE_KEY_MAPPER.apply(queryConditionalKeyValues.sortKey()); + + String queryExpression = String.format("%s = %s AND %s %s %s", + partitionKeyToken, + partitionValueToken, + sortKeyToken, + operator, + sortValueToken); + Map expressionAttributeValues = new HashMap<>(); + expressionAttributeValues.put(partitionValueToken, queryConditionalKeyValues.partitionValue()); + expressionAttributeValues.put(sortValueToken, queryConditionalKeyValues.sortValue()); + Map expressionAttributeNames = new HashMap<>(); + expressionAttributeNames.put(partitionKeyToken, queryConditionalKeyValues.partitionKey()); + expressionAttributeNames.put(sortKeyToken, queryConditionalKeyValues.sortKey()); + + return Expression.builder() + .expression(queryExpression) + .expressionValues(expressionAttributeValues) + .expressionNames(expressionAttributeNames) + .build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SingleKeyItemConditional that = (SingleKeyItemConditional) o; + + if (key != null ? ! key.equals(that.key) : that.key != null) { + return false; + } + return operator != null ? operator.equals(that.operator) : that.operator == null; + } + + @Override + public int hashCode() { + int result = key != null ? key.hashCode() : 0; + result = 31 * result + (operator != null ? operator.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProvider.java new file mode 100644 index 000000000000..a455051adb5f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProvider.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +/** + * A {@link AttributeConverterProvider} that allows multiple providers to be chained in a specified order + * to act as a single composite provider. When searching for an attribute converter for a type, + * the providers will be called in forward/ascending order, attempting to find a converter from the + * first provider, then the second, and so on, until a match is found or the operation fails. + */ +@SdkInternalApi +public final class ChainConverterProvider implements AttributeConverterProvider { + private final List providerChain; + + private ChainConverterProvider(List providers) { + this.providerChain = new ArrayList<>(providers); + } + + /** + * Construct a new instance of {@link ChainConverterProvider}. + * @param providers A list of {@link AttributeConverterProvider} to chain together. + * @return A constructed {@link ChainConverterProvider} object. + */ + public static ChainConverterProvider create(AttributeConverterProvider... providers) { + return new ChainConverterProvider(Arrays.asList(providers)); + } + + /** + * Construct a new instance of {@link ChainConverterProvider}. + * @param providers A list of {@link AttributeConverterProvider} to chain together. + * @return A constructed {@link ChainConverterProvider} object. + */ + public static ChainConverterProvider create(List providers) { + return new ChainConverterProvider(providers); + } + + public List chainedProviders() { + return Collections.unmodifiableList(this.providerChain); + } + + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return this.providerChain.stream() + .filter(provider -> provider.converterFor(enhancedType) != null) + .map(p -> p.converterFor(enhancedType)) + .findFirst().orElse(null); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolver.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolver.java new file mode 100644 index 000000000000..7f3cdf99ffe5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolver.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; + +/** + * Static module to assist with the initialization of attribute converter providers for a StaticTableSchema. + */ +@SdkInternalApi +public final class ConverterProviderResolver { + + private static final AttributeConverterProvider DEFAULT_ATTRIBUTE_CONVERTER = + DefaultAttributeConverterProvider.create(); + + private ConverterProviderResolver() { + } + + /** + * Static provider for the default attribute converters that are bundled with the DynamoDB Enhanced Client. + * This provider will be used by default unless overridden in the static table schema builder or using bean + * annotations. + */ + public static AttributeConverterProvider defaultConverterProvider() { + return DEFAULT_ATTRIBUTE_CONVERTER; + } + + /** + * Resolves a list of attribute converter providers into a single provider. If the list is a singleton, + * it will just return that provider, otherwise it will combine them into a + * {@link ChainConverterProvider} using the order provided in the list. + * + * @param providers A list of providers to be combined in strict order + * @return A single provider that combines all the supplied providers or null if no providers were supplied + */ + public static AttributeConverterProvider resolveProviders(List providers) { + if (providers == null || providers.isEmpty()) { + return null; + } + + if (providers.size() == 1) { + return providers.get(0); + } + + return ChainConverterProvider.create(providers); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterUtils.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterUtils.java new file mode 100644 index 000000000000..13e68b25cc01 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterUtils.java @@ -0,0 +1,80 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DoubleAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.FloatAttributeConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + +/** + * Internal utilities that are used by some {@link AttributeConverter}s in the aid + * of converting to an {@link AttributeValue} and vice-versa. + */ +@SdkInternalApi +public class ConverterUtils { + private ConverterUtils() { + } + + /** + * Validates that a given Double input is a valid double supported by {@link DoubleAttributeConverter}. + * @param input + */ + public static void validateDouble(Double input) { + Validate.isTrue(!Double.isNaN(input), "NaN is not supported by the default converters."); + Validate.isTrue(Double.isFinite(input), "Infinite numbers are not supported by the default converters."); + } + + /** + * Validates that a given Float input is a valid double supported by {@link FloatAttributeConverter}. + * @param input + */ + public static void validateFloat(Float input) { + Validate.isTrue(!Float.isNaN(input), "NaN is not supported by the default converters."); + Validate.isTrue(Float.isFinite(input), "Infinite numbers are not supported by the default converters."); + } + + public static String padLeft(int paddingAmount, int valueToPad) { + String value = Integer.toString(valueToPad); + int padding = paddingAmount - value.length(); + StringBuilder result = new StringBuilder(paddingAmount); + for (int i = 0; i < padding; i++) { + result.append('0'); + } + result.append(value); + return result.toString(); + } + + public static String[] splitNumberOnDecimal(String valueToSplit) { + int i = valueToSplit.indexOf('.'); + if (i == -1) { + return new String[] { valueToSplit, "0" }; + } else { + // Ends with '.' is not supported. + return new String[] { valueToSplit.substring(0, i), valueToSplit.substring(i + 1) }; + } + } + + public static LocalDateTime convertFromLocalDate(LocalDate localDate) { + return LocalDateTime.of(localDate, LocalTime.MIDNIGHT); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/PrimitiveConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/PrimitiveConverter.java new file mode 100644 index 000000000000..3a8d9e623580 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/PrimitiveConverter.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +/** + * Interface for {@link StringConverter} and {@link AttributeConverter} implementations + * that support boxed and primitive types. + */ +@SdkInternalApi +@ThreadSafe +public interface PrimitiveConverter { + /** + * The type supported by this converter. + */ + EnhancedType primitiveType(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/StringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/StringConverter.java new file mode 100644 index 000000000000..a7e76943adb9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/StringConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +/** + * Converts a specific Java type to/from a {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public interface StringConverter { + /** + * Convert the provided object into a string. + */ + default String toString(T object) { + return object.toString(); + } + + /** + * Convert the provided string into an object. + */ + T fromString(String string); + + /** + * The type supported by this converter. + */ + EnhancedType type(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/StringConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/StringConverterProvider.java new file mode 100644 index 000000000000..d3aa6a675b68 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/StringConverterProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.DefaultStringConverterProvider; + +/** + * Interface for providing string converters for Java objects. + */ +@SdkInternalApi +public interface StringConverterProvider { + StringConverter converterFor(EnhancedType enhancedType); + + static StringConverterProvider defaultProvider() { + return DefaultStringConverterProvider.create(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/TypeConvertingVisitor.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/TypeConvertingVisitor.java new file mode 100644 index 000000000000..7b1fa332b03c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/TypeConvertingVisitor.java @@ -0,0 +1,194 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + +/** + * A visitor across all possible types of a {@link EnhancedAttributeValue}. + * + *

    + * This is useful in {@link AttributeConverter} implementations, without having to write a switch statement on the + * {@link EnhancedAttributeValue#type()}. + * + * @see EnhancedAttributeValue#convert(TypeConvertingVisitor) + */ +@SdkInternalApi +public abstract class TypeConvertingVisitor { + protected final Class targetType; + private final Class converterClass; + + /** + * Called by subclasses to provide enhanced logging when a specific type isn't handled. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + * + * @param targetType The type to which this visitor is converting. + */ + protected TypeConvertingVisitor(Class targetType) { + this(targetType, null); + } + + /** + * Called by subclasses to provide enhanced logging when a specific type isn't handled. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided type is null.
    2. + *
    + * + * @param targetType The type to which this visitor is converting. + * @param converterClass The converter implementation that is creating this visitor. This may be null. + */ + protected TypeConvertingVisitor(Class targetType, + Class converterClass) { + Validate.paramNotNull(targetType, "targetType"); + this.targetType = targetType; + this.converterClass = converterClass; + } + + /** + * Convert the provided value into the target type. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the value cannot be converted by this visitor.
    2. + *
    + */ + public final T convert(EnhancedAttributeValue value) { + switch (value.type()) { + case NULL: return convertNull(); + case M: return convertMap(value.asMap()); + case S: return convertString(value.asString()); + case N: return convertNumber(value.asNumber()); + case B: return convertBytes(value.asBytes()); + case BOOL: return convertBoolean(value.asBoolean()); + case SS: return convertSetOfStrings(value.asSetOfStrings()); + case NS: return convertSetOfNumbers(value.asSetOfNumbers()); + case BS: return convertSetOfBytes(value.asSetOfBytes()); + case L: return convertListOfAttributeValues(value.asListOfAttributeValues()); + default: throw new IllegalStateException("Unsupported type: " + value.type()); + } + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isNull()} is true. + */ + public T convertNull() { + return null; + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isMap()} is true. The provided value is the + * underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertMap(Map value) { + return defaultConvert(AttributeValueType.M, value); + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isString()} is true. The provided value is the + * underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertString(String value) { + return defaultConvert(AttributeValueType.S, value); + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isNumber()} is true. The provided value is the + * underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertNumber(String value) { + return defaultConvert(AttributeValueType.N, value); + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isBytes()} is true. The provided value is the + * underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertBytes(SdkBytes value) { + return defaultConvert(AttributeValueType.B, value); + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isBoolean()} is true. The provided value is the + * underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertBoolean(Boolean value) { + return defaultConvert(AttributeValueType.BOOL, value); + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isSetOfStrings()} is true. The provided value is + * the underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertSetOfStrings(List value) { + return defaultConvert(AttributeValueType.SS, value); + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isSetOfNumbers()} is true. The provided value is + * the underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertSetOfNumbers(List value) { + return defaultConvert(AttributeValueType.NS, value); + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isSetOfBytes()} is true. The provided value is + * the underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertSetOfBytes(List value) { + return defaultConvert(AttributeValueType.BS, value); + } + + /** + * Invoked when visiting an attribute in which {@link EnhancedAttributeValue#isListOfAttributeValues()} is true. The provided + * value is the underlying value of the {@link EnhancedAttributeValue} being converted. + */ + public T convertListOfAttributeValues(List value) { + return defaultConvert(AttributeValueType.L, value); + } + + /** + * This is invoked by default if a different "convert" method is not overridden. By default, this throws an exception. + * + * @param type The type that wasn't handled by another "convert" method. + * @param value The value that wasn't handled by another "convert" method. + */ + public T defaultConvert(AttributeValueType type, Object value) { + if (converterClass != null) { + throw new IllegalStateException(converterClass.getTypeName() + " cannot convert an attribute of type " + type + + " into the requested type " + targetType); + } + + throw new IllegalStateException("Cannot convert attribute of type " + type + " into a " + targetType); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicBooleanAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicBooleanAttributeConverter.java new file mode 100644 index 000000000000..fa58f8d5cb7d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicBooleanAttributeConverter.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.concurrent.atomic.AtomicBoolean; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link AtomicBoolean} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a boolean. + * + *

    + * This supports reading every boolean value supported by DynamoDB, making it fully compatible with custom converters as + * well as internal converters (e.g. {@link BooleanAttributeConverter}). + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class AtomicBooleanAttributeConverter implements AttributeConverter { + private static final BooleanAttributeConverter BOOLEAN_CONVERTER = BooleanAttributeConverter.create(); + + private AtomicBooleanAttributeConverter() { + } + + @Override + public EnhancedType type() { + return EnhancedType.of(AtomicBoolean.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.BOOL; + } + + public static AtomicBooleanAttributeConverter create() { + return new AtomicBooleanAttributeConverter(); + } + + @Override + public AttributeValue transformFrom(AtomicBoolean input) { + return AttributeValue.builder().bool(input.get()).build(); + } + + @Override + public AtomicBoolean transformTo(AttributeValue input) { + return new AtomicBoolean(BOOLEAN_CONVERTER.transformTo(input)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicIntegerAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicIntegerAttributeConverter.java new file mode 100644 index 000000000000..c3f4948403ac --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicIntegerAttributeConverter.java @@ -0,0 +1,102 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.concurrent.atomic.AtomicInteger; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.AtomicIntegerStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link AtomicInteger} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports reading numbers between {@link Integer#MIN_VALUE} and {@link Integer#MAX_VALUE} from DynamoDB. For smaller + * numbers, consider using {@link ShortAttributeConverter}. For larger numbers, consider using {@link LongAttributeConverter} + * or {@link BigIntegerAttributeConverter}. Numbers outside of the supported range will cause a {@link NumberFormatException} + * on conversion. + * + *

    + * This does not support reading decimal numbers. For decimal numbers, consider using {@link FloatAttributeConverter}, + * {@link DoubleAttributeConverter} or {@link BigDecimalAttributeConverter}. Decimal numbers will cause a + * {@link NumberFormatException} on conversion. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class AtomicIntegerAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private static final AtomicIntegerStringConverter STRING_CONVERTER = AtomicIntegerStringConverter.create(); + + private AtomicIntegerAttributeConverter() { + } + + @Override + public EnhancedType type() { + return EnhancedType.of(AtomicInteger.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + public static AtomicIntegerAttributeConverter create() { + return new AtomicIntegerAttributeConverter(); + } + + @Override + public AttributeValue transformFrom(AtomicInteger input) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public AtomicInteger transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(AtomicInteger.class, AtomicIntegerAttributeConverter.class); + } + + @Override + public AtomicInteger convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public AtomicInteger convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicLongAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicLongAttributeConverter.java new file mode 100644 index 000000000000..427a97ca7071 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicLongAttributeConverter.java @@ -0,0 +1,102 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.concurrent.atomic.AtomicLong; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.AtomicLongStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link AtomicLong} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports reading numbers between {@link Long#MIN_VALUE} and {@link Long#MAX_VALUE} from DynamoDB. For smaller + * numbers, consider using {@link ShortAttributeConverter} or {@link IntegerAttributeConverter}. For larger numbers, consider + * using {@link BigIntegerAttributeConverter}. Numbers outside of the supported range will cause a {@link NumberFormatException} + * on conversion. + * + *

    + * This does not support reading decimal numbers. For decimal numbers, consider using {@link FloatAttributeConverter}, + * {@link DoubleAttributeConverter} or {@link BigDecimalAttributeConverter}. Decimal numbers will cause a + * {@link NumberFormatException} on conversion. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class AtomicLongAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private static final AtomicLongStringConverter STRING_CONVERTER = AtomicLongStringConverter.create(); + + private AtomicLongAttributeConverter() { + } + + @Override + public EnhancedType type() { + return EnhancedType.of(AtomicLong.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + public static AtomicLongAttributeConverter create() { + return new AtomicLongAttributeConverter(); + } + + @Override + public AttributeValue transformFrom(AtomicLong input) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public AtomicLong transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(AtomicLong.class, AtomicLongAttributeConverter.class); + } + + @Override + public AtomicLong convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public AtomicLong convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BigDecimalAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BigDecimalAttributeConverter.java new file mode 100644 index 000000000000..b730c114ae5d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BigDecimalAttributeConverter.java @@ -0,0 +1,98 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.math.BigDecimal; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.BigDecimalStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link BigDecimal} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports perfect precision with the full range of numbers that can be stored in DynamoDB. For less precision or + * smaller values, consider using {@link FloatAttributeConverter} or {@link DoubleAttributeConverter}. + * + *

    + * If values are known to be whole numbers, it is recommended to use a perfect-precision whole number representation like those + * provided by {@link ShortAttributeConverter}, {@link IntegerAttributeConverter} or {@link BigIntegerAttributeConverter}. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class BigDecimalAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private static final BigDecimalStringConverter STRING_CONVERTER = BigDecimalStringConverter.create(); + + private BigDecimalAttributeConverter() { + } + + public static BigDecimalAttributeConverter create() { + return new BigDecimalAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(BigDecimal.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public AttributeValue transformFrom(BigDecimal input) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public BigDecimal transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(BigDecimal.class, BigDecimalAttributeConverter.class); + } + + @Override + public BigDecimal convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public BigDecimal convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BigIntegerAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BigIntegerAttributeConverter.java new file mode 100644 index 000000000000..a5c8c52e0ba2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BigIntegerAttributeConverter.java @@ -0,0 +1,99 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.math.BigInteger; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.BigIntegerStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link BigInteger} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports reading the full range of integers supported by DynamoDB. For smaller numbers, consider using + * {@link ShortAttributeConverter}, {@link IntegerAttributeConverter} or {@link LongAttributeConverter}. + * + *

    + * This does not support reading decimal numbers. For decimal numbers, consider using {@link FloatAttributeConverter}, + * {@link DoubleAttributeConverter} or {@link BigDecimalAttributeConverter}. Decimal numbers will cause a + * {@link NumberFormatException} on conversion. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class BigIntegerAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private static final BigIntegerStringConverter STRING_CONVERTER = BigIntegerStringConverter.create(); + + private BigIntegerAttributeConverter() { + } + + public static BigIntegerAttributeConverter create() { + return new BigIntegerAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(BigInteger.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public AttributeValue transformFrom(BigInteger input) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public BigInteger transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(BigInteger.class, BigIntegerAttributeConverter.class); + } + + @Override + public BigInteger convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public BigInteger convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BooleanAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BooleanAttributeConverter.java new file mode 100644 index 000000000000..e9e8a0a1ebe1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BooleanAttributeConverter.java @@ -0,0 +1,109 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.concurrent.atomic.AtomicBoolean; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.BooleanStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link AtomicBoolean} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a boolean. + * + *

    + * This supports reading every boolean value supported by DynamoDB, making it fully compatible with custom converters as well + * as internal converters (e.g. {@link AtomicBooleanAttributeConverter}). + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class BooleanAttributeConverter implements AttributeConverter, PrimitiveConverter { + private static final Visitor VISITOR = new Visitor(); + private static final BooleanStringConverter STRING_CONVERTER = BooleanStringConverter.create(); + + private BooleanAttributeConverter() { + } + + public static BooleanAttributeConverter create() { + return new BooleanAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Boolean.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.BOOL; + } + + @Override + public AttributeValue transformFrom(Boolean input) { + return AttributeValue.builder().bool(input).build(); + } + + @Override + public Boolean transformTo(AttributeValue input) { + if (input.bool() != null) { + return EnhancedAttributeValue.fromBoolean(input.bool()).convert(VISITOR); + } + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(boolean.class); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Boolean.class, BooleanAttributeConverter.class); + } + + @Override + public Boolean convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public Boolean convertNumber(String value) { + switch (value) { + case "0": return false; + case "1": return true; + default: throw new IllegalArgumentException("Number could not be converted to boolean: " + value); + } + } + + @Override + public Boolean convertBoolean(Boolean value) { + return value; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ByteArrayAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ByteArrayAttributeConverter.java new file mode 100644 index 000000000000..2cf110a6814d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ByteArrayAttributeConverter.java @@ -0,0 +1,88 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@code byte[]} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a binary blob. + * + *

    + * This supports reading every byte value supported by DynamoDB, making it fully compatible with custom converters as + * well as internal converters (e.g. {@link SdkBytesAttributeConverter}). + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class ByteArrayAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + private ByteArrayAttributeConverter() { + } + + public static ByteArrayAttributeConverter create() { + return new ByteArrayAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(byte[].class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.B; + } + + @Override + public AttributeValue transformFrom(byte[] input) { + return AttributeValue.builder().b(SdkBytes.fromByteArray(input)).build(); + } + + @Override + public byte[] transformTo(AttributeValue input) { + if (input.b() != null) { + return EnhancedAttributeValue.fromBytes(input.b()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(byte[].class, ByteArrayAttributeConverter.class); + } + + @Override + public byte[] convertBytes(SdkBytes value) { + return value.asByteArray(); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ByteAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ByteAttributeConverter.java new file mode 100644 index 000000000000..d867b53fd732 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ByteAttributeConverter.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.ByteStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Byte} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a single byte. + * + *

    + * This only supports reading a single byte from DynamoDB. Any binary data greater than 1 byte will cause a RuntimeException + * during conversion. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class ByteAttributeConverter implements AttributeConverter, PrimitiveConverter { + private static final ByteStringConverter STRING_CONVERTER = ByteStringConverter.create(); + private static final Visitor VISITOR = new Visitor(); + + private ByteAttributeConverter() { + } + + public static ByteAttributeConverter create() { + return new ByteAttributeConverter(); + } + + @Override + public AttributeValue transformFrom(Byte input) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public Byte transformTo(AttributeValue input) { + if (input.b() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Byte.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(byte.class); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Byte.class, ByteAttributeConverter.class); + } + + @Override + public Byte convertNumber(String number) { + return Byte.parseByte(number); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharSequenceAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharSequenceAttributeConverter.java new file mode 100644 index 000000000000..d6016b3c9b57 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharSequenceAttributeConverter.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.CharSequenceStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link CharSequence} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * This supports reading every string value supported by DynamoDB, making it fully compatible with custom converters as + * well as internal converters (e.g. {@link StringAttributeConverter}). + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class CharSequenceAttributeConverter implements AttributeConverter { + private static final CharSequenceStringConverter CHAR_SEQUENCE_STRING_CONVERTER = CharSequenceStringConverter.create(); + private static final StringAttributeConverter STRING_ATTRIBUTE_CONVERTER = StringAttributeConverter.create(); + + private CharSequenceAttributeConverter() { + } + + public static CharSequenceAttributeConverter create() { + return new CharSequenceAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(CharSequence.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(CharSequence input) { + return AttributeValue.builder().s(CHAR_SEQUENCE_STRING_CONVERTER.toString(input)).build(); + } + + @Override + public CharSequence transformTo(AttributeValue input) { + String string = STRING_ATTRIBUTE_CONVERTER.transformTo(input); + return CHAR_SEQUENCE_STRING_CONVERTER.fromString(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharacterArrayAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharacterArrayAttributeConverter.java new file mode 100644 index 000000000000..a1965d793148 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharacterArrayAttributeConverter.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.CharacterArrayStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@code char[]} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * This supports reading every string value supported by DynamoDB, making it fully compatible with custom converters as + * well as internal converters (e.g. {@link StringAttributeConverter}). + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class CharacterArrayAttributeConverter implements AttributeConverter { + private static final CharacterArrayStringConverter CHAR_ARRAY_STRING_CONVERTER = CharacterArrayStringConverter.create(); + private static final StringAttributeConverter STRING_ATTRIBUTE_CONVERTER = StringAttributeConverter.create(); + + private CharacterArrayAttributeConverter() { + } + + public static CharacterArrayAttributeConverter create() { + return new CharacterArrayAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(char[].class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(char[] input) { + return AttributeValue.builder().s(CHAR_ARRAY_STRING_CONVERTER.toString(input)).build(); + } + + @Override + public char[] transformTo(AttributeValue input) { + return CHAR_ARRAY_STRING_CONVERTER.fromString(STRING_ATTRIBUTE_CONVERTER.transformTo(input)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharacterAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharacterAttributeConverter.java new file mode 100644 index 000000000000..f1d243683d71 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/CharacterAttributeConverter.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.CharacterStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Character} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a single-character string. + * + *

    + * This only supports reading a single character from DynamoDB. Any string longer than 1 character will cause a RuntimeException + * during conversion. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class CharacterAttributeConverter implements AttributeConverter, PrimitiveConverter { + private static final Visitor VISITOR = new Visitor(); + private static final CharacterStringConverter STRING_CONVERTER = CharacterStringConverter.create(); + + private CharacterAttributeConverter() { + } + + public static CharacterAttributeConverter create() { + return new CharacterAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Character.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(Character input) { + return AttributeValue.builder().s(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public Character transformTo(AttributeValue input) { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(char.class); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Character.class, CharacterAttributeConverter.class); + } + + @Override + public Character convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DocumentAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DocumentAttributeConverter.java new file mode 100644 index 000000000000..de7335471efb --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DocumentAttributeConverter.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * {@link AttributeConverter} for converting nested table schemas + */ +@SdkInternalApi +public class DocumentAttributeConverter implements AttributeConverter { + + private final TableSchema tableSchema; + private final EnhancedType enhancedType; + + private DocumentAttributeConverter(TableSchema tableSchema, + EnhancedType enhancedType) { + this.tableSchema = tableSchema; + this.enhancedType = enhancedType; + } + + public static DocumentAttributeConverter create(TableSchema tableSchema, + EnhancedType enhancedType) { + return new DocumentAttributeConverter(tableSchema, enhancedType); + } + + @Override + public AttributeValue transformFrom(T input) { + return AttributeValue.builder().m(tableSchema.itemToMap(input, false)).build(); + } + + @Override + public T transformTo(AttributeValue input) { + return tableSchema.mapToItem(input.m()); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.M; + } + + @Override + public EnhancedType type() { + return enhancedType; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DoubleAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DoubleAttributeConverter.java new file mode 100644 index 000000000000..4eb2b2d91682 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DoubleAttributeConverter.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.DoubleStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Double} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports converting numbers stored in DynamoDB into a double-precision floating point number, within the range + * {@link Double#MIN_VALUE}, {@link Double#MAX_VALUE}. For less precision or smaller values, consider using + * {@link FloatAttributeConverter}. For greater precision or larger values, consider using {@link BigDecimalAttributeConverter}. + * + *

    + * If values are known to be whole numbers, it is recommended to use a perfect-precision whole number representation like those + * provided by {@link ShortAttributeConverter}, {@link IntegerAttributeConverter} or {@link BigIntegerAttributeConverter}. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class DoubleAttributeConverter implements AttributeConverter, PrimitiveConverter { + private static final Visitor VISITOR = new Visitor(); + private static final DoubleStringConverter STRING_CONVERTER = DoubleStringConverter.create(); + + private DoubleAttributeConverter() { + } + + public static DoubleAttributeConverter create() { + return new DoubleAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Double.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public AttributeValue transformFrom(Double input) { + ConverterUtils.validateDouble(input); + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public Double transformTo(AttributeValue input) { + Double result; + if (input.n() != null) { + result = EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } else { + result = EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + ConverterUtils.validateDouble(result); + return result; + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(double.class); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Double.class, DoubleAttributeConverter.class); + } + + @Override + public Double convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public Double convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DurationAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DurationAttributeConverter.java new file mode 100644 index 000000000000..f312dc72aacd --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/DurationAttributeConverter.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.padLeft; + +import java.time.Duration; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Duration} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a number, so that they can be sorted numerically as part of a sort key. + * + *

    + * Durations are stored in the format "[-]X[.YYYYYYYYY]", where X is the number of seconds in the duration, and Y is the number of + * nanoseconds in the duration, left padded with zeroes to a length of 9. The Y and decimal point may be excluded for durations + * that are of whole seconds. The duration may be preceded by a - to indicate a negative duration. + * + *

    + * Examples: + *

      + *
    • {@code Duration.ofDays(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("86400")}
    • + *
    • {@code Duration.ofSeconds(9)} is stored as {@code ItemAttributeValueMapper.fromNumber("9")}
    • + *
    • {@code Duration.ofSeconds(-9)} is stored as {@code ItemAttributeValueMapper.fromNumber("-9")}
    • + *
    • {@code Duration.ofNanos(1_234_567_890)} is stored as {@code ItemAttributeValueMapper.fromNumber("1.234567890")}
    • + *
    • {@code Duration.ofMillis(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("0.001000000")}
    • + *
    • {@code Duration.ofNanos(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("0.000000001")}
    • + *
    • {@code Duration.ofNanos(-1)} is stored as {@code ItemAttributeValueMapper.fromNumber("-0.000000001")}
    • + *
    + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class DurationAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + private DurationAttributeConverter() { + } + + public static DurationAttributeConverter create() { + return new DurationAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Duration.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public AttributeValue transformFrom(Duration input) { + return AttributeValue.builder() + .n(input.getSeconds() + + (input.getNano() == 0 ? "" : "." + padLeft(9, input.getNano()))) + .build(); + } + + @Override + public Duration transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Duration.class, DurationAttributeConverter.class); + } + + @Override + public Duration convertNumber(String value) { + String[] splitOnDecimal = ConverterUtils.splitNumberOnDecimal(value); + + long seconds = Long.parseLong(splitOnDecimal[0]); + int nanoAdjustment = Integer.parseInt(splitOnDecimal[1]); + + if (seconds < 0) { + nanoAdjustment = -nanoAdjustment; + } + + return Duration.ofSeconds(seconds, nanoAdjustment); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnhancedAttributeValue.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnhancedAttributeValue.java new file mode 100644 index 000000000000..4996058a4ef7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnhancedAttributeValue.java @@ -0,0 +1,845 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.util.SdkAutoConstructMap; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +/** + * A simpler, and more user-friendly version of the generated {@link AttributeValue}. + * + *

    + * This is a union type of the types exposed by DynamoDB, exactly as they're exposed by DynamoDB. + * + *

    + * An instance of {@link EnhancedAttributeValue} represents exactly one DynamoDB type, like String (s), Number (n) or Bytes (b). + * This type can be determined with the {@link #type()} method or the {@code is*} methods like {@link #isString()} or + * {@link #isNumber()}. Once the type is known, the value can be extracted with {@code as*} methods like {@link #asString()} + * or {@link #asNumber()}. + * + *

    + * When converting an {@link EnhancedAttributeValue} into a concrete Java type, it can be tedious to use the {@link #type()} or + * {@code is*} methods. For this reason, a {@link #convert(TypeConvertingVisitor)} method is provided that exposes a polymorphic + * way of converting a value into another type. + * + *

    + * An instance of {@link EnhancedAttributeValue} is created with the {@code from*} methods, like {@link #fromString(String)} or + * {@link #fromNumber(String)}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class EnhancedAttributeValue { + private final AttributeValueType type; + private final boolean isNull; + private final Map mapValue; + private final String stringValue; + private final String numberValue; + private final SdkBytes bytesValue; + private final Boolean booleanValue; + private final List setOfStringsValue; + private final List setOfNumbersValue; + private final List setOfBytesValue; + private final List listOfAttributeValuesValue; + + private EnhancedAttributeValue(InternalBuilder builder) { + this.type = builder.type; + this.isNull = builder.isNull; + this.stringValue = builder.stringValue; + this.numberValue = builder.numberValue; + this.bytesValue = builder.bytesValue; + this.booleanValue = builder.booleanValue; + this.mapValue = builder.mapValue == null ? null + : Collections.unmodifiableMap(builder.mapValue); + this.setOfStringsValue = builder.setOfStringsValue == null + ? null : Collections.unmodifiableList(builder.setOfStringsValue); + this.setOfNumbersValue = builder.setOfNumbersValue == null + ? null : Collections.unmodifiableList(builder.setOfNumbersValue); + this.setOfBytesValue = builder.setOfBytesValue == null + ? null : Collections.unmodifiableList(builder.setOfBytesValue); + this.listOfAttributeValuesValue = builder.listOfAttributeValuesValue == null + ? null : Collections.unmodifiableList(builder.listOfAttributeValuesValue); + } + + /** + * Create an {@link EnhancedAttributeValue} for the null DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().nul(true).build())} + * + *

    + * This call should never fail with an {@link Exception}. + */ + public static EnhancedAttributeValue nullValue() { + return new InternalBuilder().isNull().build(); + } + + /** + * Create an {@link EnhancedAttributeValue} for a map (m) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().m(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided map is null or has null keys. + */ + public static EnhancedAttributeValue fromMap(Map mapValue) { + Validate.paramNotNull(mapValue, "mapValue"); + Validate.noNullElements(mapValue.keySet(), "Map must not have null keys."); + return new InternalBuilder().mapValue(mapValue).build(); + } + + /** + * Create an {@link EnhancedAttributeValue} for a string (s) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().s(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null. Use {@link #nullValue()} for + * null values. + */ + public static EnhancedAttributeValue fromString(String stringValue) { + Validate.paramNotNull(stringValue, "stringValue"); + return new InternalBuilder().stringValue(stringValue).build(); + } + + /** + * Create an {@link EnhancedAttributeValue} for a number (n) DynamoDB type. + * + *

    + * This is a String, because it matches the underlying DynamoDB representation. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().n(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null. Use {@link #nullValue()} for + * null values. + */ + public static EnhancedAttributeValue fromNumber(String numberValue) { + Validate.paramNotNull(numberValue, "numberValue"); + return new InternalBuilder().numberValue(numberValue).build(); + } + + /** + * Create an {@link EnhancedAttributeValue} for a bytes (b) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().b(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null. Use {@link #nullValue()} for + * null values. + */ + public static EnhancedAttributeValue fromBytes(SdkBytes bytesValue) { + Validate.paramNotNull(bytesValue, "bytesValue"); + return new InternalBuilder().bytesValue(bytesValue).build(); + } + + + /** + * Create an {@link EnhancedAttributeValue} for a boolean (bool) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().bool(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null. Use {@link #nullValue()} for + * null values. + */ + public static EnhancedAttributeValue fromBoolean(Boolean booleanValue) { + Validate.paramNotNull(booleanValue, "booleanValue"); + return new InternalBuilder().booleanValue(booleanValue).build(); + } + + /** + * Create an {@link EnhancedAttributeValue} for a set-of-strings (ss) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().ss(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null or contains a null value. Use + * {@link #fromListOfAttributeValues(List)} for null values. This will not validate that there are no + * duplicate values. + */ + public static EnhancedAttributeValue fromSetOfStrings(String... setOfStringsValue) { + Validate.paramNotNull(setOfStringsValue, "setOfStringsValue"); + return fromSetOfStrings(Arrays.asList(setOfStringsValue)); + } + + /** + * Create an {@link EnhancedAttributeValue} for a set-of-strings (ss) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().ss(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null or contains a null value. Use + * {@link #fromListOfAttributeValues(List)} for null values. This will not validate that there are no + * duplicate values. + */ + public static EnhancedAttributeValue fromSetOfStrings(List setOfStringsValue) { + Validate.paramNotNull(setOfStringsValue, "setOfStringsValue"); + Validate.noNullElements(setOfStringsValue, "Set must not have null values."); + return new InternalBuilder().setOfStringsValue(setOfStringsValue).build(); + } + + /** + * Create an {@link EnhancedAttributeValue} for a set-of-numbers (ns) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().ns(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null or contains a null value. Use + * {@link #fromListOfAttributeValues(List)} for null values. This will not validate that there are no + * duplicate values. + */ + public static EnhancedAttributeValue fromSetOfNumbers(String... setOfNumbersValue) { + Validate.paramNotNull(setOfNumbersValue, "setOfNumbersValue"); + return fromSetOfNumbers(Arrays.asList(setOfNumbersValue)); + } + + /** + * Create an {@link EnhancedAttributeValue} for a set-of-numbers (ns) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().ns(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null or contains a null value. Use + * {@link #fromListOfAttributeValues(List)} for null values. This will not validate that there are no + * duplicate values. + */ + public static EnhancedAttributeValue fromSetOfNumbers(List setOfNumbersValue) { + Validate.paramNotNull(setOfNumbersValue, "setOfNumbersValue"); + Validate.noNullElements(setOfNumbersValue, "Set must not have null values."); + return new InternalBuilder().setOfNumbersValue(setOfNumbersValue).build(); + } + + /** + * Create an {@link EnhancedAttributeValue} for a set-of-bytes (bs) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().bs(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null or contains a null value. Use + * {@link #fromListOfAttributeValues(List)} for null values. This will not validate that there are no + * duplicate values. + */ + public static EnhancedAttributeValue fromSetOfBytes(SdkBytes... setOfBytesValue) { + Validate.paramNotNull(setOfBytesValue, "setOfBytesValue"); + return fromSetOfBytes(Arrays.asList(setOfBytesValue)); + } + + /** + * Create an {@link EnhancedAttributeValue} for a set-of-bytes (bs) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().bs(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null or contains a null value. Use + * {@link #fromListOfAttributeValues(List)} for null values. This will not validate that there are no + * duplicate values. + */ + public static EnhancedAttributeValue fromSetOfBytes(List setOfBytesValue) { + Validate.paramNotNull(setOfBytesValue, "setOfBytesValue"); + Validate.noNullElements(setOfBytesValue, "Set must not have null values."); + return new InternalBuilder().setOfBytesValue(setOfBytesValue).build(); + } + + /** + * Create an {@link EnhancedAttributeValue} for a list-of-attributes (l) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().l(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null or contains a null value. Use + * {@link #nullValue()} for null values. + */ + public static EnhancedAttributeValue fromListOfAttributeValues(AttributeValue... listOfAttributeValuesValue) { + Validate.paramNotNull(listOfAttributeValuesValue, "listOfAttributeValuesValue"); + return fromListOfAttributeValues(Arrays.asList(listOfAttributeValuesValue)); + } + + /** + * Create an {@link EnhancedAttributeValue} for a list-of-attributes (l) DynamoDB type. + * + *

    + * Equivalent to: {@code EnhancedAttributeValue.fromGeneratedAttributeValue(AttributeValue.builder().l(...).build())} + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null or contains a null value. Use + * {@link #nullValue()} for null values. + */ + public static EnhancedAttributeValue fromListOfAttributeValues(List listOfAttributeValuesValue) { + Validate.paramNotNull(listOfAttributeValuesValue, "listOfAttributeValuesValue"); + Validate.noNullElements(listOfAttributeValuesValue, "List must not have null values."); + return new InternalBuilder().listOfAttributeValuesValue(listOfAttributeValuesValue).build(); + } + + /** + * Create an {@link EnhancedAttributeValue} from a generated {@link AttributeValue}. + * + *

    + * This call will fail with a {@link RuntimeException} if the provided value is null ({@link AttributeValue#nul()} is okay). + */ + public static EnhancedAttributeValue fromAttributeValue(AttributeValue attributeValue) { + Validate.notNull(attributeValue, "Generated attribute value must not contain null values. " + + "Use AttributeValue#nul() instead."); + if (attributeValue.s() != null) { + return EnhancedAttributeValue.fromString(attributeValue.s()); + } + if (attributeValue.n() != null) { + return EnhancedAttributeValue.fromNumber(attributeValue.n()); + } + if (attributeValue.bool() != null) { + return EnhancedAttributeValue.fromBoolean(attributeValue.bool()); + } + if (Boolean.TRUE.equals(attributeValue.nul())) { + return EnhancedAttributeValue.nullValue(); + } + if (attributeValue.b() != null) { + return EnhancedAttributeValue.fromBytes(attributeValue.b()); + } + if (attributeValue.hasM()) { + return EnhancedAttributeValue.fromMap(attributeValue.m()); + } + if (attributeValue.hasL()) { + return EnhancedAttributeValue.fromListOfAttributeValues(attributeValue.l()); + } + if (attributeValue.hasBs()) { + return EnhancedAttributeValue.fromSetOfBytes(attributeValue.bs()); + } + if (attributeValue.hasSs()) { + return EnhancedAttributeValue.fromSetOfStrings(attributeValue.ss()); + } + if (attributeValue.hasNs()) { + return EnhancedAttributeValue.fromSetOfNumbers(attributeValue.ns()); + } + + throw new IllegalStateException("Unable to convert attribute value: " + attributeValue); + } + + /** + * Retrieve the underlying DynamoDB type of this value, such as String (s) or Number (n). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public AttributeValueType type() { + return type; + } + + /** + * Apply the provided visitor to this item attribute value, converting it into a specific type. This is useful in + * {@link AttributeConverter} implementations, without having to write a switch statement on the {@link #type()}. + * + *

    + * Reasons this call may fail with a {@link RuntimeException}: + *

      + *
    1. If the provided visitor is null.
    2. + *
    3. If the value cannot be converted by this visitor.
    4. + *
    + */ + public T convert(TypeConvertingVisitor convertingVisitor) { + Validate.paramNotNull(convertingVisitor, "convertingVisitor"); + return convertingVisitor.convert(this); + } + + /** + * Returns true if the underlying DynamoDB type of this value is a Map (m). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isMap() { + return mapValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is a String (s). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isString() { + return stringValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is a Number (n). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isNumber() { + return numberValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is Bytes (b). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isBytes() { + return bytesValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is a Boolean (bool). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isBoolean() { + return booleanValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is a Set of Strings (ss). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isSetOfStrings() { + return setOfStringsValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is a Set of Numbers (ns). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isSetOfNumbers() { + return setOfNumbersValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is a Set of Bytes (bs). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isSetOfBytes() { + return setOfBytesValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is a List of AttributeValues (l). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isListOfAttributeValues() { + return listOfAttributeValuesValue != null; + } + + /** + * Returns true if the underlying DynamoDB type of this value is Null (null). + * + *

    + * This call should never fail with an {@link Exception}. + */ + public boolean isNull() { + return isNull; + } + + /** + * Retrieve this value as a map. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isMap()} is false. + */ + public Map asMap() { + Validate.isTrue(isMap(), "Value is not a map."); + return mapValue; + } + + /** + * Retrieve this value as a string. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isString()} is false. + */ + public String asString() { + Validate.isTrue(isString(), "Value is not a string."); + return stringValue; + } + + /** + * Retrieve this value as a number. + * + * Note: This returns a {@code String} (instead of a {@code Number}), because that's the generated type from + * DynamoDB: {@link AttributeValue#n()}. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isNumber()} is false. + */ + public String asNumber() { + Validate.isTrue(isNumber(), "Value is not a number."); + return numberValue; + } + + /** + * Retrieve this value as bytes. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isBytes()} is false. + */ + public SdkBytes asBytes() { + Validate.isTrue(isBytes(), "Value is not bytes."); + return bytesValue; + } + + /** + * Retrieve this value as a boolean. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isBoolean()} is false. + */ + public Boolean asBoolean() { + Validate.isTrue(isBoolean(), "Value is not a boolean."); + return booleanValue; + } + + /** + * Retrieve this value as a set of strings. + * + *

    + * Note: This returns a {@code List} (instead of a {@code Set}), because that's the generated type from + * DynamoDB: {@link AttributeValue#ss()}. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isSetOfStrings()} is false. + */ + public List asSetOfStrings() { + Validate.isTrue(isSetOfStrings(), "Value is not a list of strings."); + return setOfStringsValue; + } + + /** + * Retrieve this value as a set of numbers. + * + *

    + * Note: This returns a {@code List} (instead of a {@code Set}), because that's the generated type from + * DynamoDB: {@link AttributeValue#ns()}. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isSetOfNumbers()} is false. + */ + public List asSetOfNumbers() { + Validate.isTrue(isSetOfNumbers(), "Value is not a list of numbers."); + return setOfNumbersValue; + } + + /** + * Retrieve this value as a set of bytes. + * + *

    + * Note: This returns a {@code List} (instead of a {@code Set}), because that's the generated type from + * DynamoDB: {@link AttributeValue#bs()}. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isSetOfBytes()} is false. + */ + public List asSetOfBytes() { + Validate.isTrue(isSetOfBytes(), "Value is not a list of bytes."); + return setOfBytesValue; + } + + /** + * Retrieve this value as a list of attribute values. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isListOfAttributeValues()} is false. + */ + public List asListOfAttributeValues() { + Validate.isTrue(isListOfAttributeValues(), "Value is not a list of attribute values."); + return listOfAttributeValuesValue; + } + + /** + * Convert this {@link EnhancedAttributeValue} into a generated {@code Map}. + * + *

    + * This call will fail with a {@link RuntimeException} if {@link #isMap()} is false. + */ + public Map toAttributeValueMap() { + Validate.validState(isMap(), "Cannot convert an attribute value of type %s to a generated item. Must be %s.", + type(), AttributeValueType.M); + + AttributeValue generatedAttributeValue = toAttributeValue(); + + Validate.validState(generatedAttributeValue.m() != null && !(generatedAttributeValue.m() instanceof SdkAutoConstructMap), + "Map EnhancedAttributeValue was not converted into a Map AttributeValue."); + return generatedAttributeValue.m(); + } + + /** + * Convert this {@link EnhancedAttributeValue} into a generated {@link AttributeValue}. + * + *

    + * This call should never fail with an {@link Exception}. + */ + public AttributeValue toAttributeValue() { + return convert(ToGeneratedAttributeValueVisitor.INSTANCE); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EnhancedAttributeValue that = (EnhancedAttributeValue) o; + + if (isNull != that.isNull) { + return false; + } + if (type != that.type) { + return false; + } + if (mapValue != null ? !mapValue.equals(that.mapValue) : that.mapValue != null) { + return false; + } + if (stringValue != null ? !stringValue.equals(that.stringValue) : that.stringValue != null) { + return false; + } + if (numberValue != null ? !numberValue.equals(that.numberValue) : that.numberValue != null) { + return false; + } + if (bytesValue != null ? !bytesValue.equals(that.bytesValue) : that.bytesValue != null) { + return false; + } + if (booleanValue != null ? !booleanValue.equals(that.booleanValue) : that.booleanValue != null) { + return false; + } + if (setOfStringsValue != null ? !setOfStringsValue.equals(that.setOfStringsValue) : that.setOfStringsValue != null) { + return false; + } + if (setOfNumbersValue != null ? !setOfNumbersValue.equals(that.setOfNumbersValue) : that.setOfNumbersValue != null) { + return false; + } + if (setOfBytesValue != null ? !setOfBytesValue.equals(that.setOfBytesValue) : that.setOfBytesValue != null) { + return false; + } + return listOfAttributeValuesValue != null ? listOfAttributeValuesValue.equals(that.listOfAttributeValuesValue) + : that.listOfAttributeValuesValue == null; + } + + @Override + public int hashCode() { + int result = type.hashCode(); + result = 31 * result + (isNull ? 1 : 0); + result = 31 * result + (mapValue != null ? mapValue.hashCode() : 0); + result = 31 * result + (stringValue != null ? stringValue.hashCode() : 0); + result = 31 * result + (numberValue != null ? numberValue.hashCode() : 0); + result = 31 * result + (bytesValue != null ? bytesValue.hashCode() : 0); + result = 31 * result + (booleanValue != null ? booleanValue.hashCode() : 0); + result = 31 * result + (setOfStringsValue != null ? setOfStringsValue.hashCode() : 0); + result = 31 * result + (setOfNumbersValue != null ? setOfNumbersValue.hashCode() : 0); + result = 31 * result + (setOfBytesValue != null ? setOfBytesValue.hashCode() : 0); + result = 31 * result + (listOfAttributeValuesValue != null ? listOfAttributeValuesValue.hashCode() : 0); + return result; + } + + @Override + public String toString() { + Object value = convert(ToStringVisitor.INSTANCE); + return ToString.builder("EnhancedAttributeValue") + .add("type", type) + .add("value", value) + .build(); + } + + private static class ToGeneratedAttributeValueVisitor extends TypeConvertingVisitor { + private static final ToGeneratedAttributeValueVisitor INSTANCE = new ToGeneratedAttributeValueVisitor(); + + private ToGeneratedAttributeValueVisitor() { + super(AttributeValue.class); + } + + @Override + public AttributeValue convertNull() { + return AttributeValue.builder().nul(true).build(); + } + + @Override + public AttributeValue convertMap(Map value) { + return AttributeValue.builder().m(value).build(); + } + + @Override + public AttributeValue convertString(String value) { + return AttributeValue.builder().s(value).build(); + } + + @Override + public AttributeValue convertNumber(String value) { + return AttributeValue.builder().n(value).build(); + } + + @Override + public AttributeValue convertBytes(SdkBytes value) { + return AttributeValue.builder().b(value).build(); + } + + @Override + public AttributeValue convertBoolean(Boolean value) { + return AttributeValue.builder().bool(value).build(); + } + + @Override + public AttributeValue convertSetOfStrings(List value) { + return AttributeValue.builder().ss(value).build(); + } + + @Override + public AttributeValue convertSetOfNumbers(List value) { + return AttributeValue.builder().ns(value).build(); + } + + @Override + public AttributeValue convertSetOfBytes(List value) { + return AttributeValue.builder().bs(value).build(); + } + + @Override + public AttributeValue convertListOfAttributeValues(List value) { + return AttributeValue.builder().l(value).build(); + } + } + + private static class ToStringVisitor extends TypeConvertingVisitor { + private static final ToStringVisitor INSTANCE = new ToStringVisitor(); + + private ToStringVisitor() { + super(Object.class); + } + + @Override + public Object convertNull() { + return "null"; + } + + @Override + public Object defaultConvert(AttributeValueType type, Object value) { + return value; + } + } + + private static class InternalBuilder { + private AttributeValueType type; + private boolean isNull = false; + private Map mapValue; + private String stringValue; + private String numberValue; + private SdkBytes bytesValue; + private Boolean booleanValue; + private List setOfStringsValue; + private List setOfNumbersValue; + private List setOfBytesValue; + private List listOfAttributeValuesValue; + + public InternalBuilder isNull() { + this.type = AttributeValueType.NULL; + this.isNull = true; + return this; + } + + private InternalBuilder mapValue(Map mapValue) { + this.type = AttributeValueType.M; + this.mapValue = mapValue; + return this; + } + + private InternalBuilder stringValue(String stringValue) { + this.type = AttributeValueType.S; + this.stringValue = stringValue; + return this; + } + + private InternalBuilder numberValue(String numberValue) { + this.type = AttributeValueType.N; + this.numberValue = numberValue; + return this; + } + + private InternalBuilder bytesValue(SdkBytes bytesValue) { + this.type = AttributeValueType.B; + this.bytesValue = bytesValue; + return this; + } + + private InternalBuilder booleanValue(Boolean booleanValue) { + this.type = AttributeValueType.BOOL; + this.booleanValue = booleanValue; + return this; + } + + private InternalBuilder setOfStringsValue(List setOfStringsValue) { + this.type = AttributeValueType.SS; + this.setOfStringsValue = setOfStringsValue; + return this; + } + + private InternalBuilder setOfNumbersValue(List setOfNumbersValue) { + this.type = AttributeValueType.NS; + this.setOfNumbersValue = setOfNumbersValue; + return this; + } + + private InternalBuilder setOfBytesValue(List setOfBytesValue) { + this.type = AttributeValueType.BS; + this.setOfBytesValue = setOfBytesValue; + return this; + } + + private InternalBuilder listOfAttributeValuesValue(List listOfAttributeValuesValue) { + this.type = AttributeValueType.L; + this.listOfAttributeValuesValue = listOfAttributeValuesValue; + return this; + } + + private EnhancedAttributeValue build() { + return new EnhancedAttributeValue(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java new file mode 100644 index 000000000000..18395a82656b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java @@ -0,0 +1,85 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + +/** + * A converter between an {@link Enum} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * This can be created via {@link #create(Class)}. + */ +@SdkInternalApi +public class EnumAttributeConverter> implements AttributeConverter { + + private final Class enumClass; + private final Map enumValueMap; + + private EnumAttributeConverter(Class enumClass) { + this.enumClass = enumClass; + + Map mutableEnumValueMap = new LinkedHashMap<>(); + Arrays.stream(enumClass.getEnumConstants()) + .forEach(enumConstant -> mutableEnumValueMap.put(enumConstant.toString(), enumConstant)); + + this.enumValueMap = Collections.unmodifiableMap(mutableEnumValueMap); + } + + public static > EnumAttributeConverter create(Class enumClass) { + return new EnumAttributeConverter<>(enumClass); + } + + @Override + public AttributeValue transformFrom(T input) { + return AttributeValue.builder().s(input.toString()).build(); + } + + @Override + public T transformTo(AttributeValue input) { + Validate.isTrue(input.s() != null, "Cannot convert non-string value to enum."); + T returnValue = enumValueMap.get(input.s()); + + if (returnValue == null) { + throw new IllegalArgumentException(String.format("Unable to convert string value '%s' to enum type '%s'", + input.s(), enumClass)); + } + + return returnValue; + } + + @Override + public EnhancedType type() { + return EnhancedType.of(enumClass); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/FloatAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/FloatAttributeConverter.java new file mode 100644 index 000000000000..ab83f06d2d7f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/FloatAttributeConverter.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.FloatStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Float} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports converting numbers stored in DynamoDB into a single-precision floating point number, within the range + * {@link Float#MIN_VALUE}, {@link Float#MAX_VALUE}. For more precision or larger values, consider using + * {@link DoubleAttributeConverter} or {@link BigDecimalAttributeConverter}. + * + *

    + * If values are known to be whole numbers, it is recommended to use a perfect-precision whole number representation like those + * provided by {@link ShortAttributeConverter}, {@link IntegerAttributeConverter} or {@link BigIntegerAttributeConverter}. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class FloatAttributeConverter implements AttributeConverter, PrimitiveConverter { + private static final Visitor VISITOR = new Visitor(); + private static final FloatStringConverter STRING_CONVERTER = FloatStringConverter.create(); + + private FloatAttributeConverter() { + } + + public static FloatAttributeConverter create() { + return new FloatAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Float.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public AttributeValue transformFrom(Float input) { + ConverterUtils.validateFloat(input); + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public Float transformTo(AttributeValue input) { + Float result; + if (input.n() != null) { + result = EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } else { + result = EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + ConverterUtils.validateFloat(result); + return result; + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(float.class); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Float.class, FloatAttributeConverter.class); + } + + @Override + public Float convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public Float convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsStringAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsStringAttributeConverter.java new file mode 100644 index 000000000000..abd2332ffa63 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsStringAttributeConverter.java @@ -0,0 +1,117 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.Instant; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Instant} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * Values are stored in ISO-8601 format, with nanosecond precision and a time zone of UTC. + * + *

    + * Examples: + *

      + *
    • {@code Instant.EPOCH.plusSeconds(1)} is stored as + * an AttributeValue with the String "1970-01-01T00:00:01Z"}
    • + *
    • {@code Instant.EPOCH.minusSeconds(1)} is stored as + * an AttributeValue with the String "1969-12-31T23:59:59Z"}
    • + *
    • {@code Instant.EPOCH.plusMillis(1)} is stored as + * an AttributeValue with the String "1970-01-01T00:00:00.001Z"}
    • + *
    • {@code Instant.EPOCH.minusMillis(1)} is stored as + * an AttributeValue with the String "1969-12-31T23:59:59.999Z"}
    • + *
    • {@code Instant.EPOCH.plusNanos(1)} is stored as + * an AttributeValue with the String "1970-01-01T00:00:00.000000001Z"}
    • + *
    • {@code Instant.EPOCH.minusNanos(1)} is stored as + * an AttributeValue with the String "1969-12-31T23:59:59.999999999Z"}
    • + *
    + * See {@link Instant} for more details on the serialization format. + *

    + * This converter can read any values written by itself, or values with zero offset written by + * {@link OffsetDateTimeAsStringAttributeConverter}, and values with zero offset and without time zone named written by + * {@link ZoneOffsetAttributeConverter}. Offset and zoned times will be automatically converted to the + * equivalent {@link Instant}. + * + *

    + * This serialization is lexicographically orderable when the year is not negative. + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class InstantAsStringAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + private InstantAsStringAttributeConverter() { + } + + public static InstantAsStringAttributeConverter create() { + return new InstantAsStringAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Instant.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(Instant input) { + return AttributeValue.builder().s(input.toString()).build(); + } + + @Override + public Instant transformTo(AttributeValue input) { + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Instant.class, InstantAsStringAttributeConverter.class); + } + + @Override + public Instant convertString(String value) { + return Instant.parse(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/IntegerAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/IntegerAttributeConverter.java new file mode 100644 index 000000000000..6b1ffd0247db --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/IntegerAttributeConverter.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.Instant; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.IntegerStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Integer} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports reading numbers between {@link Integer#MIN_VALUE} and {@link Integer#MAX_VALUE} from DynamoDB. For smaller + * numbers, consider using {@link ShortAttributeConverter}. For larger numbers, consider using {@link LongAttributeConverter} + * or {@link BigIntegerAttributeConverter}. Numbers outside of the supported range will cause a {@link NumberFormatException} + * on conversion. + * + *

    + * This does not support reading decimal numbers. For decimal numbers, consider using {@link FloatAttributeConverter}, + * {@link DoubleAttributeConverter} or {@link BigDecimalAttributeConverter}. Decimal numbers will cause a + * {@link NumberFormatException} on conversion. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class IntegerAttributeConverter implements AttributeConverter, PrimitiveConverter { + public static final IntegerStringConverter INTEGER_STRING_CONVERTER = IntegerStringConverter.create(); + + private IntegerAttributeConverter() { + } + + public static IntegerAttributeConverter create() { + return new IntegerAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Integer.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public AttributeValue transformFrom(Integer input) { + return AttributeValue.builder().n(INTEGER_STRING_CONVERTER.toString(input)).build(); + } + + @Override + public Integer transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(Visitor.INSTANCE); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(Visitor.INSTANCE); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(int.class); + } + + private static final class Visitor extends TypeConvertingVisitor { + private static final Visitor INSTANCE = new Visitor(); + + private Visitor() { + super(Instant.class, IntegerAttributeConverter.class); + } + + @Override + public Integer convertString(String value) { + return INTEGER_STRING_CONVERTER.fromString(value); + } + + @Override + public Integer convertNumber(String value) { + return INTEGER_STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ListAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ListAttributeConverter.java new file mode 100644 index 000000000000..40984c049334 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ListAttributeConverter.java @@ -0,0 +1,216 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import static java.util.stream.Collectors.toList; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between a specific {@link Collection} type and {@link EnhancedAttributeValue}. + * + *

    + * This stores values in DynamoDB as a list of attribute values. This uses a configured {@link AttributeConverter} to convert + * the collection contents to an attribute value. + * + *

    + * This supports reading a list of attribute values. This uses a configured {@link AttributeConverter} to convert + * the collection contents. + * + *

    + * A builder is exposed to allow defining how the collection and element types are created and converted: + *

    + * + * {@literal AttributeConverter> listConverter = + * CollectionAttributeConverter.builder(EnhancedType.listOf(Integer.class)) + * .collectionConstructor(ArrayList::new) + * .elementConverter(IntegerAttributeConverter.create()) + * .build()} + * + * + *

    + * For frequently-used types, static methods are exposed to reduce the amount of boilerplate involved in creation: + *

    + * + * {@literal AttributeConverter> listConverter = + * CollectionAttributeConverter.listConverter(IntegerAttributeConverter.create());} + * + *

    + * + * {@literal AttributeConverter> collectionConverer = + * CollectionAttributeConverter.collectionConverter(IntegerAttributeConverter.create());} + * + *

    + * + * {@literal AttributeConverter> setConverter = + * CollectionAttributeConverter.setConverter(IntegerAttributeConverter.create());} + * + *

    + * + * {@literal AttributeConverter> sortedSetConverter = + * CollectionAttributeConverter.sortedSetConverter(IntegerAttributeConverter.create());} + * + * + * @see MapAttributeConverter + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class ListAttributeConverter> implements AttributeConverter { + private final Delegate delegate; + + private ListAttributeConverter(Delegate delegate) { + this.delegate = delegate; + } + + public static ListAttributeConverter> create(AttributeConverter elementConverter) { + return builder(EnhancedType.listOf(elementConverter.type())) + .collectionConstructor(ArrayList::new) + .elementConverter(elementConverter) + .build(); + } + + public static , U> ListAttributeConverter.Builder builder(EnhancedType collectionType) { + return new Builder<>(collectionType); + } + + @Override + public EnhancedType type() { + return delegate.type(); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.L; + } + + @Override + public AttributeValue transformFrom(T input) { + return delegate.transformFrom(input); + } + + @Override + public T transformTo(AttributeValue input) { + return delegate.transformTo(input); + } + + private static final class Delegate, U> implements AttributeConverter { + private final EnhancedType type; + private final Supplier collectionConstructor; + private final AttributeConverter elementConverter; + + private Delegate(Builder builder) { + this.type = builder.collectionType; + this.collectionConstructor = builder.collectionConstructor; + this.elementConverter = builder.elementConverter; + } + + @Override + public EnhancedType type() { + return type; + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.L; + } + + @Override + public AttributeValue transformFrom(T input) { + return EnhancedAttributeValue.fromListOfAttributeValues(input.stream() + .map(elementConverter::transformFrom) + .collect(toList())) + .toAttributeValue(); + } + + @Override + public T transformTo(AttributeValue input) { + return EnhancedAttributeValue.fromAttributeValue(input) + .convert(new TypeConvertingVisitor(type.rawClass(), ListAttributeConverter.class) { + @Override + public T convertSetOfStrings(List value) { + return convertCollection(value, v -> AttributeValue.builder().s(v).build()); + } + + @Override + public T convertSetOfNumbers(List value) { + return convertCollection(value, v -> AttributeValue.builder().n(v).build()); + } + + @Override + public T convertSetOfBytes(List value) { + return convertCollection(value, v -> AttributeValue.builder().b(v).build()); + } + + @Override + public T convertListOfAttributeValues(List value) { + return convertCollection(value, Function.identity()); + } + + private T convertCollection(Collection collection, + Function transformFrom) { + Collection result = (Collection) collectionConstructor.get(); + + collection.stream() + .map(transformFrom) + .map(elementConverter::transformTo) + .forEach(result::add); + + // This is a safe cast - We know the values we added to the list + // match the type that the customer requested. + return (T) result; + } + }); + } + } + + public static final class Builder, U> { + private final EnhancedType collectionType; + private Supplier collectionConstructor; + private AttributeConverter elementConverter; + + private Builder(EnhancedType collectionType) { + this.collectionType = collectionType; + } + + public Builder collectionConstructor(Supplier collectionConstructor) { + this.collectionConstructor = collectionConstructor; + return this; + } + + public Builder elementConverter(AttributeConverter elementConverter) { + this.elementConverter = elementConverter; + return this; + } + + public ListAttributeConverter build() { + return new ListAttributeConverter<>(new Delegate<>(this)); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateAttributeConverter.java new file mode 100644 index 000000000000..0966c933aeff --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateAttributeConverter.java @@ -0,0 +1,114 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.Year; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link LocalDate} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a String. + * + *

    + * LocalDates are stored in the official {@link LocalDate} format "[-]YYYY-MM-DD", where: + *

      + *
    1. Y is a year between {@link Year#MIN_VALUE} and {@link Year#MAX_VALUE} (prefixed with - if it is negative)
    2. + *
    3. M is a 2-character, zero-prefixed month between 01 and 12
    4. + *
    5. D is a 2-character, zero-prefixed day between 01 and 31
    6. + *
    + * See {@link LocalDate} for more details on the serialization format. + * + *

    + * This is unidirectional format-compatible with the {@link LocalDateTimeAttributeConverter}, allowing values + * stored as {@link LocalDate} to be retrieved as {@link LocalDateTime}s. + * + *

    + * This serialization is lexicographically orderable when the year is not negative. + *

    + * + * Examples: + *

      + *
    • {@code LocalDate.of(1988, 5, 21)} is stored as as an AttributeValue with the String "1988-05-21"}
    • + *
    • {@code LocalDate.of(0, 1, 1)} is stored as as an AttributeValue with the String "0000-01-01"}
    • + *
    + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class LocalDateAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + private LocalDateAttributeConverter() { + } + + public static LocalDateAttributeConverter create() { + return new LocalDateAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(LocalDate.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(LocalDate input) { + return AttributeValue.builder().s(input.toString()).build(); + } + + @Override + public LocalDate transformTo(AttributeValue input) { + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(LocalDate.class, InstantAsStringAttributeConverter.class); + } + + @Override + public LocalDate convertString(String value) { + return LocalDate.parse(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateTimeAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateTimeAttributeConverter.java new file mode 100644 index 000000000000..c33ab2e05578 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateTimeAttributeConverter.java @@ -0,0 +1,134 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.Year; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link LocalDateTime} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a string. + * + *

    + * Values are stored with nanosecond precision. + * + *

    + * LocalDateTimes are stored in the official {@link LocalDateTime} format "[-]YYYY-MM-DDTHH:II:SS[.NNNNNNNNN]", where: + *

      + *
    1. Y is a year between {@link Year#MIN_VALUE} and {@link Year#MAX_VALUE} (prefixed with - if it is negative)
    2. + *
    3. M is a 2-character, zero-prefixed month between 01 and 12
    4. + *
    5. D is a 2-character, zero-prefixed day between 01 and 31
    6. + *
    7. H is a 2-character, zero-prefixed hour between 00 and 23
    8. + *
    9. I is a 2-character, zero-prefixed minute between 00 and 59
    10. + *
    11. S is a 2-character, zero-prefixed second between 00 and 59
    12. + *
    13. N is a 9-character, zero-prefixed nanosecond between 000,000,000 and 999,999,999. + * The . and N may be excluded if N is 0.
    14. + *
    + * See {@link LocalDateTime} for more details on the serialization format. + *

    + * This is format-compatible with the {@link LocalDateAttributeConverter}, allowing values stored as {@link LocalDate} to be + * retrieved as {@link LocalDateTime}s. The time associated with a value stored as a {@link LocalDate} is the + * beginning of the day (midnight). + * + *

    + * This serialization is lexicographically orderable when the year is not negative. + *

    + * + * Examples: + *
      + *
    • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0)} is stored as + * an AttributeValue with the String "1988-05-21T00:00"}
    • + *
    • {@code LocalDateTime.of(-1988, 5, 21, 0, 0, 0)} is stored as + * an AttributeValue with the String "-1988-05-21T00:00"}
    • + *
    • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0).plusSeconds(1)} is stored as + * an AttributeValue with the String "1988-05-21T00:00:01"}
    • + *
    • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0).minusSeconds(1)} is stored as + * an AttributeValue with the String "1988-05-20T23:59:59"}
    • + *
    • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0).plusNanos(1)} is stored as + * an AttributeValue with the String "1988-05-21T00:00:00.0000000001"}
    • + *
    • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0).minusNanos(1)} is stored as + * an AttributeValue with the String "1988-05-20T23:59:59.999999999"}
    • + *
    + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class LocalDateTimeAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + public static LocalDateTimeAttributeConverter create() { + return new LocalDateTimeAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(LocalDateTime.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(LocalDateTime input) { + return AttributeValue.builder().s(input.toString()).build(); + } + + @Override + public LocalDateTime transformTo(AttributeValue input) { + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(LocalDateTime.class, InstantAsStringAttributeConverter.class); + } + + @Override + public LocalDateTime convertString(String value) { + if (value.contains("T")) { // AttributeValue.S in LocalDateTime format + return LocalDateTime.parse(value); + } else { // AttributeValue.S in LocalDate format + return ConverterUtils.convertFromLocalDate(LocalDate.parse(value)); + } + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalTimeAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalTimeAttributeConverter.java new file mode 100644 index 000000000000..4e52cefdf056 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalTimeAttributeConverter.java @@ -0,0 +1,110 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.DateTimeException; +import java.time.LocalTime; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link LocalTime} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a String. + * + *

    + * LocalTimes are stored in the official {@link LocalTime} format "HH:II:SS[.NNNNNNNNN]", where: + *

      + *
    1. H is a 2-character, zero-prefixed hour between 00 and 23
    2. + *
    3. I is a 2-character, zero-prefixed minute between 00 and 59
    4. + *
    5. S is a 2-character, zero-prefixed second between 00 and 59
    6. + *
    7. N is a 9-character, zero-prefixed nanosecond between 000,000,000 and 999,999,999. + * The . and N may be excluded if N is 0.
    8. + *
    + * See {@link LocalTime} for more details on the serialization format. + * + *

    + * This serialization is lexicographically orderable. + *

    + * + * Examples: + *

      + *
    • {@code LocalTime.of(5, 30, 0)} is stored as an AttributeValue with the String "05:30"}
    • + *
    • {@code LocalTime.of(5, 30, 0, 1)} is stored as an AttributeValue with the String "05:30:00.000000001"}
    • + *
    + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class LocalTimeAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + private LocalTimeAttributeConverter() { + } + + public static LocalTimeAttributeConverter create() { + return new LocalTimeAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(LocalTime.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(LocalTime input) { + return AttributeValue.builder().s(input.toString()).build(); + } + + @Override + public LocalTime transformTo(AttributeValue input) { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(LocalTime.class, InstantAsStringAttributeConverter.class); + } + + @Override + public LocalTime convertString(String value) { + try { + return LocalTime.parse(value); + } catch (DateTimeException e) { + throw new IllegalArgumentException(e); + } + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LongAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LongAttributeConverter.java new file mode 100644 index 000000000000..2494e64f9bea --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LongAttributeConverter.java @@ -0,0 +1,104 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.LongStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Long} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports reading numbers between {@link Long#MIN_VALUE} and {@link Long#MAX_VALUE} from DynamoDB. For smaller + * numbers, consider using {@link ShortAttributeConverter} or {@link IntegerAttributeConverter}. For larger numbers, consider + * using {@link BigIntegerAttributeConverter}. Numbers outside of the supported range will cause a {@link NumberFormatException} + * on conversion. + * + *

    + * This does not support reading decimal numbers. For decimal numbers, consider using {@link FloatAttributeConverter}, + * {@link DoubleAttributeConverter} or {@link BigDecimalAttributeConverter}. Decimal numbers will cause a + * {@link NumberFormatException} on conversion. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class LongAttributeConverter implements AttributeConverter, PrimitiveConverter { + private static final Visitor VISITOR = new Visitor(); + private static final LongStringConverter STRING_CONVERTER = LongStringConverter.create(); + + private LongAttributeConverter() { + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Long.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + public static LongAttributeConverter create() { + return new LongAttributeConverter(); + } + + @Override + public AttributeValue transformFrom(Long input) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public Long transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(long.class); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Long.class, LongAttributeConverter.class); + } + + @Override + public Long convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public Long convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MapAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MapAttributeConverter.java new file mode 100644 index 000000000000..54a683acbcd9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MapAttributeConverter.java @@ -0,0 +1,214 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.NavigableMap; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between a specific {@link Map} type and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a map from string to attribute value. This uses a configured {@link StringAttributeConverter} + * to convert the map keys to a string, and a configured {@link AttributeConverter} to convert the map values to an attribute + * value. + * + *

    + * This supports reading maps from DynamoDB. This uses a configured {@link StringAttributeConverter} to convert the map keys, and + * a configured {@link AttributeConverter} to convert the map values. + * + *

    + * A builder is exposed to allow defining how the map, key and value types are created and converted: + *

    + * + * {@literal AttributeConverter> mapConverter = + * MapAttributeConverter.builder(EnhancedType.mapOf(Integer.class, String.class)) + * .mapConstructor(HashMap::new) + * .keyConverter(MonthDayStringConverter.create()) + * .valueConverter(StringAttributeConverter.create()) + * .build();} + * + * + *

    + * For frequently-used types, static methods are exposed to reduce the amount of boilerplate involved in creation: + * + * {@literal AttributeConverter> mapConverter = + * MapAttributeConverter.mapConverter(MonthDayStringConverter.create(), + * StringAttributeConverter.create());} + * + *

    + * + * {@literal AttributeConverter> sortedMapConverter = + * MapAttributeConverter.sortedMapConverter(MonthDayStringConverter.create(), + * StringAttributeConverter.create());} + * + * + * @see MapAttributeConverter + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class MapAttributeConverter> implements AttributeConverter { + private final Delegate delegate; + + private MapAttributeConverter(Delegate delegate) { + this.delegate = delegate; + } + + public static MapAttributeConverter> mapConverter(StringConverter keyConverter, + AttributeConverter valueConverter) { + return builder(EnhancedType.mapOf(keyConverter.type(), valueConverter.type())) + .mapConstructor(LinkedHashMap::new) + .keyConverter(keyConverter) + .valueConverter(valueConverter) + .build(); + } + + public static MapAttributeConverter> concurrentMapConverter(StringConverter keyConverter, + AttributeConverter valueConverter) { + return builder(EnhancedType.concurrentMapOf(keyConverter.type(), valueConverter.type())) + .mapConstructor(ConcurrentHashMap::new) + .keyConverter(keyConverter) + .valueConverter(valueConverter) + .build(); + } + + public static MapAttributeConverter> sortedMapConverter(StringConverter keyConverter, + AttributeConverter valueConverter) { + return builder(EnhancedType.sortedMapOf(keyConverter.type(), valueConverter.type())) + .mapConstructor(TreeMap::new) + .keyConverter(keyConverter) + .valueConverter(valueConverter) + .build(); + } + + public static MapAttributeConverter> navigableMapConverter(StringConverter keyConverter, + AttributeConverter valueConverter) { + return builder(EnhancedType.navigableMapOf(keyConverter.type(), valueConverter.type())) + .mapConstructor(TreeMap::new) + .keyConverter(keyConverter) + .valueConverter(valueConverter) + .build(); + } + + public static , K, V> Builder builder(EnhancedType mapType) { + return new Builder<>(mapType); + } + + @Override + public EnhancedType type() { + return delegate.type(); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.M; + } + + @Override + public AttributeValue transformFrom(T input) { + return delegate.toAttributeValue(input).toAttributeValue(); + } + + @Override + public T transformTo(AttributeValue input) { + return delegate.fromAttributeValue(input); + } + + private static final class Delegate, K, V> { + private final EnhancedType type; + private final Supplier mapConstructor; + private final StringConverter keyConverter; + private final AttributeConverter valueConverter; + + private Delegate(Builder builder) { + this.type = builder.mapType; + this.mapConstructor = builder.mapConstructor; + this.keyConverter = builder.keyConverter; + this.valueConverter = builder.valueConverter; + } + + public EnhancedType type() { + return type; + } + + public EnhancedAttributeValue toAttributeValue(T input) { + Map result = new LinkedHashMap<>(); + input.forEach((k, v) -> result.put(keyConverter.toString(k), valueConverter.transformFrom(v))); + return EnhancedAttributeValue.fromMap(result); + } + + public T fromAttributeValue(AttributeValue input) { + return EnhancedAttributeValue.fromAttributeValue(input) + .convert(new TypeConvertingVisitor(Map.class, MapAttributeConverter.class) { + @Override + public T convertMap(Map value) { + T result = mapConstructor.get(); + value.forEach((k, v) -> + result.put(keyConverter.fromString(k), + valueConverter.transformTo(v))); + return result; + } + }); + } + } + + public static final class Builder, K, V> { + private final EnhancedType mapType; + + private StringConverter keyConverter; + private AttributeConverter valueConverter; + private Supplier mapConstructor; + + private Builder(EnhancedType mapType) { + this.mapType = mapType; + } + + public Builder mapConstructor(Supplier mapConstructor) { + this.mapConstructor = (Supplier) mapConstructor; + return this; + } + + public Builder keyConverter(StringConverter keyConverter) { + this.keyConverter = keyConverter; + return this; + } + + public Builder valueConverter(AttributeConverter valueConverter) { + this.valueConverter = valueConverter; + return this; + } + + public MapAttributeConverter build() { + return new MapAttributeConverter<>(new Delegate<>(this)); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MonthDayAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MonthDayAttributeConverter.java new file mode 100644 index 000000000000..dc85eaf69bc0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MonthDayAttributeConverter.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.MonthDay; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link MonthDay} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a String. + * + *

    + * MonthDays are stored in the official {@link MonthDay} format "--MM-DD", where: + *

      + *
    1. M is a 2-character, zero-prefixed month between 01 and 12
    2. + *
    3. D is a 2-character, zero-prefixed day between 01 and 31
    4. + *
    + * See {@link MonthDay} for more details on the serialization format. + * + *

    + * This serialization is lexicographically orderable. + *

    + * + * Examples: + *

      + *
    • {@code MonthDay.of(5, 21)} is stored as as an AttributeValue with the String "--05-21"}
    • + *
    • {@code MonthDay.of(12, 1)} is stored as as an AttributeValue with the String "--12-01"}
    • + *
    + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class MonthDayAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + private MonthDayAttributeConverter() { + } + + public static MonthDayAttributeConverter create() { + return new MonthDayAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(MonthDay.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(MonthDay input) { + return AttributeValue.builder().s(input.toString()).build(); + } + + @Override + public MonthDay transformTo(AttributeValue input) { + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(MonthDay.class, MonthDayAttributeConverter.class); + } + + @Override + public MonthDay convertString(String value) { + return MonthDay.parse(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OffsetDateTimeAsStringAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OffsetDateTimeAsStringAttributeConverter.java new file mode 100644 index 000000000000..0fbddcc412be --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OffsetDateTimeAsStringAttributeConverter.java @@ -0,0 +1,120 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.OffsetDateTime; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link OffsetDateTime} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * Values are stored in ISO-8601 format, with nanosecond precision. If the offset has seconds then they will also be included, + * even though this is not part of the ISO-8601 standard. For full ISO-8601 compliance, ensure your {@code OffsetDateTime}s do + * not have offsets at the precision level of seconds. + * + *

    + * Examples: + *

      + *
    • {@code OffsetDateTime.MIN} is stored as + * an AttributeValue with the String "-999999999-01-01T00:00+18:00"}
    • + *
    • {@code OffsetDateTime.MAX} is stored as + * an AttributeValue with the String "+999999999-12-31T23:59:59.999999999-18:00"}
    • + *
    • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).plusSeconds(1)} is stored as + * an AttributeValue with the String "1970-01-01T00:00:01Z"}
    • + *
    • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).minusSeconds(1)} is stored as + * an AttributeValue with the String "1969-12-31T23:59:59Z"}
    • + *
    • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).plusMillis(1)} is stored as + * an AttributeValue with the String "1970-01-01T00:00:00.001Z"}
    • + *
    • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).minusMillis(1)} is stored as + * an AttributeValue with the String "1969-12-31T23:59:59.999Z"}
    • + *
    • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).plusNanos(1)} is stored as + * an AttributeValue with the String "1970-01-01T00:00:00.000000001Z"}
    • + *
    • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).minusNanos(1)} is stored as + * an AttributeValue with the String "1969-12-31T23:59:59.999999999Z"}
    • + *
    + * See {@link OffsetDateTime} for more details on the serialization format. + *

    + * This converter can read any values written by itself or {@link InstantAsStringAttributeConverter}, + * and values without a time zone named written by{@link ZonedDateTimeAsStringAttributeConverter}. + * Values written by {@code Instant} converters are treated as if they are in the UTC time zone + * (and an offset of 0 seconds will be returned). + * + *

    + * This serialization is lexicographically orderable when the year is not negative. + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class OffsetDateTimeAsStringAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + public static OffsetDateTimeAsStringAttributeConverter create() { + return new OffsetDateTimeAsStringAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OffsetDateTime.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(OffsetDateTime input) { + return AttributeValue.builder().s(input.toString()).build(); + } + + @Override + public OffsetDateTime transformTo(AttributeValue input) { + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(OffsetDateTime.class, InstantAsStringAttributeConverter.class); + } + + @Override + public OffsetDateTime convertString(String value) { + return OffsetDateTime.parse(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeConverter.java new file mode 100644 index 000000000000..da6550acfaec --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeConverter.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.Optional; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Optional} and {@link EnhancedAttributeValue}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class OptionalAttributeConverter implements AttributeConverter> { + private final AttributeConverter delegate; + + private OptionalAttributeConverter(AttributeConverter delegate) { + this.delegate = delegate; + + } + + public static OptionalAttributeConverter create(AttributeConverter delegate) { + return new OptionalAttributeConverter(delegate); + } + + @Override + public EnhancedType> type() { + return EnhancedType.optionalOf(delegate.type().rawClass()); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(Optional input) { + if (!input.isPresent()) { + return AttributeValues.nullAttributeValue(); + } + + return delegate.transformFrom(input.get()); + } + + @SuppressWarnings("unchecked") + @Override + public Optional transformTo(AttributeValue input) { + Optional result; + if (Boolean.TRUE.equals(input.nul())) { + // This is safe - An Optional.empty() can be used for any Optional subtype. + result = Optional.empty(); + } else { + result = (Optional) Optional.ofNullable(delegate.transformTo(input)); + } + + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalDoubleAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalDoubleAttributeConverter.java new file mode 100644 index 000000000000..be0db01b1c7d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalDoubleAttributeConverter.java @@ -0,0 +1,120 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.OptionalDouble; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.OptionalDoubleStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link OptionalDouble} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports converting numbers stored in DynamoDB into a double-precision floating point number, within the range + * {@link Double#MIN_VALUE}, {@link Double#MAX_VALUE}. Null values are converted to {@code OptionalDouble.empty()}. For less + * precision or smaller values, consider using {@link OptionalAttributeConverter} along with a {@link Float} type. + * For greater precision or larger values, consider using {@link OptionalAttributeConverter} along with a + * {@link BigDecimal} type. + * + *

    + * If values are known to be whole numbers, it is recommended to use a perfect-precision whole number representation like those + * provided by {@link OptionalIntAttributeConverter}, {@link OptionalLongAttributeConverter}, or a + * {@link OptionalAttributeConverter} along with a {@link BigInteger} type. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class OptionalDoubleAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private static final OptionalDoubleStringConverter STRING_CONVERTER = OptionalDoubleStringConverter.create(); + + private OptionalDoubleAttributeConverter() { + } + + public static OptionalDoubleAttributeConverter create() { + return new OptionalDoubleAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OptionalDouble.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public AttributeValue transformFrom(OptionalDouble input) { + if (input.isPresent()) { + ConverterUtils.validateDouble(input.getAsDouble()); + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } else { + return AttributeValues.nullAttributeValue(); + } + } + + @Override + public OptionalDouble transformTo(AttributeValue input) { + OptionalDouble result; + if (input.n() != null) { + result = EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } else { + result = EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + result.ifPresent(ConverterUtils::validateDouble); + return result; + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(OptionalDouble.class, OptionalDoubleAttributeConverter.class); + } + + @Override + public OptionalDouble convertNull() { + return OptionalDouble.empty(); + } + + @Override + public OptionalDouble convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public OptionalDouble convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalIntAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalIntAttributeConverter.java new file mode 100644 index 000000000000..e433ea106264 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalIntAttributeConverter.java @@ -0,0 +1,114 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.OptionalInt; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.OptionalIntStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link OptionalInt} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports reading numbers between {@link Integer#MIN_VALUE} and {@link Integer#MAX_VALUE} from DynamoDB. Null values are + * converted to {@code OptionalInt.empty()}. For larger numbers, consider using the {@link OptionalLongAttributeConverter} or + * the {@link OptionalAttributeConverter} along with a {@link BigInteger}. For shorter numbers, consider using the + * {@link OptionalAttributeConverter} along with a {@link Short} type. + * + *

    + * This does not support reading decimal numbers. For decimal numbers, consider using {@link OptionalDoubleAttributeConverter}, + * or the {@link OptionalAttributeConverter} with a {@link Float} or {@link BigDecimal}. Decimal numbers will cause a + * {@link NumberFormatException} on conversion. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class OptionalIntAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private static final OptionalIntStringConverter STRING_CONVERTER = OptionalIntStringConverter.create(); + + private OptionalIntAttributeConverter() { + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OptionalInt.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + public static OptionalIntAttributeConverter create() { + return new OptionalIntAttributeConverter(); + } + + @Override + public AttributeValue transformFrom(OptionalInt input) { + if (input.isPresent()) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } else { + return AttributeValues.nullAttributeValue(); + } + } + + @Override + public OptionalInt transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(OptionalInt.class, OptionalIntAttributeConverter.class); + } + + @Override + public OptionalInt convertNull() { + return OptionalInt.empty(); + } + + @Override + public OptionalInt convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public OptionalInt convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalLongAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalLongAttributeConverter.java new file mode 100644 index 000000000000..c7f3adbbcfd5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalLongAttributeConverter.java @@ -0,0 +1,114 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.OptionalLong; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.OptionalLongStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link OptionalLong} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports reading numbers between {@link Long#MIN_VALUE} and {@link Long#MAX_VALUE} from DynamoDB. Null values are + * converted to {@code OptionalLong.empty()}. For larger numbers, consider using the {@link OptionalAttributeConverter} + * along with a {@link BigInteger}. For shorter numbers, consider using the {@link OptionalIntAttributeConverter} or + * {@link OptionalAttributeConverter} along with a {@link Short} type. + * + *

    + * This does not support reading decimal numbers. For decimal numbers, consider using {@link OptionalDoubleAttributeConverter}, + * or the {@link OptionalAttributeConverter} with a {@link Float} or {@link BigDecimal}. Decimal numbers will cause a + * {@link NumberFormatException} on conversion. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class OptionalLongAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private static final OptionalLongStringConverter STRING_CONVERTER = OptionalLongStringConverter.create(); + + private OptionalLongAttributeConverter() { + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OptionalLong.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + public static OptionalLongAttributeConverter create() { + return new OptionalLongAttributeConverter(); + } + + @Override + public AttributeValue transformFrom(OptionalLong input) { + if (input.isPresent()) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } else { + return AttributeValues.nullAttributeValue(); + } + } + + @Override + public OptionalLong transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(OptionalLong.class, OptionalLongAttributeConverter.class); + } + + @Override + public OptionalLong convertNull() { + return OptionalLong.empty(); + } + + @Override + public OptionalLong convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public OptionalLong convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/PeriodAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/PeriodAttributeConverter.java new file mode 100644 index 000000000000..afdcc14afca0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/PeriodAttributeConverter.java @@ -0,0 +1,87 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.Period; +import java.time.format.DateTimeParseException; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.PeriodStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Period} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a string, according to the format of {@link Period#parse(CharSequence)} and + * {@link Period#toString()}. + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class PeriodAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private static final PeriodStringConverter STRING_CONVERTER = PeriodStringConverter.create(); + + private PeriodAttributeConverter() { + } + + public static PeriodAttributeConverter create() { + return new PeriodAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Period.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(Period input) { + return AttributeValue.builder().s(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public Period transformTo(AttributeValue input) { + try { + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (DateTimeParseException e) { + throw new IllegalArgumentException(e); + } + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Period.class, PeriodAttributeConverter.class); + } + + @Override + public Period convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SdkBytesAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SdkBytesAttributeConverter.java new file mode 100644 index 000000000000..dfaad4e9620d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SdkBytesAttributeConverter.java @@ -0,0 +1,84 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link SdkBytes} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a binary blob. + * + *

    + * This supports reading every byte value supported by DynamoDB, making it fully compatible with custom converters as + * well as internal converters (e.g. {@link ByteArrayAttributeConverter}). + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class SdkBytesAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + private SdkBytesAttributeConverter() { + } + + @Override + public EnhancedType type() { + return EnhancedType.of(SdkBytes.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.B; + } + + public static SdkBytesAttributeConverter create() { + return new SdkBytesAttributeConverter(); + } + + @Override + public AttributeValue transformFrom(SdkBytes input) { + return AttributeValue.builder().b(input).build(); + } + + @Override + public SdkBytes transformTo(AttributeValue input) { + return EnhancedAttributeValue.fromBytes(input.b()).convert(VISITOR); + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(SdkBytes.class, SdkBytesAttributeConverter.class); + } + + @Override + public SdkBytes convertBytes(SdkBytes value) { + return value; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SetAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SetAttributeConverter.java new file mode 100644 index 000000000000..0346c59b5bf1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SetAttributeConverter.java @@ -0,0 +1,273 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import static java.util.stream.Collectors.toList; + +import java.util.Collection; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + + +/** + * A converter between a specific {@link Collection} type and {@link EnhancedAttributeValue}. + * + *

    + * This stores values in DynamoDB as a list of attribute values. This uses a configured {@link AttributeConverter} to convert + * the collection contents to an attribute value. + * + *

    + * This supports reading a list of attribute values. This uses a configured {@link AttributeConverter} to convert + * the collection contents. + * + *

    + * A builder is exposed to allow defining how the collection and element types are created and converted: + * + * {@literal AttributeConverter> listConverter = + * CollectionAttributeConverter.builder(EnhancedType.listOf(Integer.class)) + * .collectionConstructor(ArrayList::new) + * .elementConverter(IntegerAttributeConverter.create()) + * .build()} + * + * + *

    + * For frequently-used types, static methods are exposed to reduce the amount of boilerplate involved in creation: + * + * {@literal AttributeConverter> listConverter = + * CollectionAttributeConverter.listConverter(IntegerAttributeConverter.create());} + * + *

    + * + * {@literal AttributeConverter> collectionConverer = + * CollectionAttributeConverter.collectionConverter(IntegerAttributeConverter.create());} + * + *

    + * + * {@literal AttributeConverter> setConverter = + * CollectionAttributeConverter.setConverter(IntegerAttributeConverter.create());} + * + *

    + * + * {@literal AttributeConverter> sortedSetConverter = + * CollectionAttributeConverter.sortedSetConverter(IntegerAttributeConverter.create());} + * + * + * @see MapAttributeConverter + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class SetAttributeConverter> implements AttributeConverter { + private final Delegate delegate; + + private SetAttributeConverter(Delegate delegate) { + this.delegate = delegate; + } + + public static SetAttributeConverter> setConverter(AttributeConverter elementConverter) { + return builder(EnhancedType.setOf(elementConverter.type())) + .collectionConstructor(LinkedHashSet::new) + .elementConverter(elementConverter) + .build(); + } + + public static , U> SetAttributeConverter.Builder builder(EnhancedType collectionType) { + return new Builder<>(collectionType); + } + + @Override + public EnhancedType type() { + return delegate.type(); + } + + @Override + public AttributeValueType attributeValueType() { + return delegate.attributeValueType(); + } + + @Override + public AttributeValue transformFrom(T input) { + return delegate.transformFrom(input); + } + + @Override + public T transformTo(AttributeValue input) { + return delegate.transformTo(input); + } + + private static final class Delegate, U> implements AttributeConverter { + private final EnhancedType type; + private final Supplier collectionConstructor; + private final AttributeConverter elementConverter; + private final AttributeValueType attributeValueType; + + private Delegate(Builder builder) { + this.type = builder.collectionType; + this.collectionConstructor = builder.collectionConstructor; + this.elementConverter = builder.elementConverter; + this.attributeValueType = attributeValueTypeForSet(this.elementConverter); + } + + @Override + public EnhancedType type() { + return type; + } + + @Override + public AttributeValueType attributeValueType() { + return attributeValueType; + } + + @Override + public AttributeValue transformFrom(T input) { + return flatten(input.stream() + .map(elementConverter::transformFrom) + .collect(toList())); + } + + @Override + public T transformTo(AttributeValue input) { + return EnhancedAttributeValue.fromAttributeValue(input) + .convert(new TypeConvertingVisitor(type.rawClass(), SetAttributeConverter.class) { + @Override + public T convertSetOfStrings(List value) { + return convertCollection(value, v -> AttributeValue.builder().s(v).build()); + } + + @Override + public T convertSetOfNumbers(List value) { + return convertCollection(value, v -> AttributeValue.builder().n(v).build()); + } + + @Override + public T convertSetOfBytes(List value) { + return convertCollection(value, v -> AttributeValue.builder().b(v).build()); + } + + @Override + public T convertListOfAttributeValues(List value) { + return convertCollection(value, Function.identity()); + } + + private T convertCollection(Collection collection, + Function transformFrom) { + Collection result = (Collection) collectionConstructor.get(); + + collection.stream() + .map(transformFrom) + .map(elementConverter::transformTo) + .forEach(result::add); + + // This is a safe cast - We know the values we added to the list + // match the type that the customer requested. + return (T) result; + } + }); + } + + private AttributeValueType attributeValueTypeForSet(AttributeConverter innerType) { + switch (innerType.attributeValueType()) { + case N: + return AttributeValueType.NS; + case S: + return AttributeValueType.SS; + case B: + return AttributeValueType.BS; + default: + throw new IllegalArgumentException( + String.format("SetAttributeConverter cannot be created with a parameterized type of '%s'. " + + "Supported parameterized types must convert to B, S or N DynamoDB " + + "AttributeValues.", innerType.type().rawClass())); + } + } + + /** + * Takes a list of {@link AttributeValue}s and flattens into a resulting + * single {@link AttributeValue} set of the corresponding type. + */ + public AttributeValue flatten(List listOfAttributeValues) { + Validate.paramNotNull(listOfAttributeValues, "listOfAttributeValues"); + Validate.noNullElements(listOfAttributeValues, "List must not have null values."); + + switch (attributeValueType) { + case NS: + return AttributeValue.builder() + .ns(listOfAttributeValues.stream() + .peek(av -> Validate.isTrue(av.n() != null, + "Attribute value must be N.")) + .map(AttributeValue::n) + .collect(Collectors.toList())) + .build(); + case SS: + return AttributeValue.builder() + .ss(listOfAttributeValues.stream() + .peek(av -> Validate.isTrue(av.s() != null, + "Attribute value must be S.")) + .map(AttributeValue::s) + .collect(Collectors.toList())) + .build(); + case BS: + return AttributeValue.builder() + .bs(listOfAttributeValues.stream() + .peek(av -> Validate.isTrue(av.b() != null, + "Attribute value must be B.")) + .map(AttributeValue::b) + .collect(Collectors.toList())) + .build(); + default: + throw new IllegalStateException("Unsupported set attribute value type: " + attributeValueType); + } + } + } + + public static final class Builder, U> { + private final EnhancedType collectionType; + private Supplier collectionConstructor; + private AttributeConverter elementConverter; + + private Builder(EnhancedType collectionType) { + this.collectionType = collectionType; + } + + public Builder collectionConstructor(Supplier collectionConstructor) { + this.collectionConstructor = collectionConstructor; + return this; + } + + public Builder elementConverter(AttributeConverter elementConverter) { + this.elementConverter = elementConverter; + return this; + } + + public SetAttributeConverter build() { + return new SetAttributeConverter<>(new Delegate<>(this)); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ShortAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ShortAttributeConverter.java new file mode 100644 index 000000000000..9bb112650d37 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ShortAttributeConverter.java @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.ShortStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link Short} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a number. + * + *

    + * This supports reading numbers between {@link Short#MIN_VALUE} and {@link Short#MAX_VALUE} from DynamoDB. For larger numbers, + * consider using {@link IntegerAttributeConverter}, {@link LongAttributeConverter} or {@link BigIntegerAttributeConverter}. + * Numbers outside of the supported range will cause a {@link NumberFormatException} on conversion. + * + *

    + * This does not support reading decimal numbers. For decimal numbers, consider using {@link FloatAttributeConverter}, + * {@link DoubleAttributeConverter} or {@link BigDecimalAttributeConverter}. Decimal numbers will cause a + * {@link NumberFormatException} on conversion. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class ShortAttributeConverter implements AttributeConverter, PrimitiveConverter { + public static final ShortStringConverter STRING_CONVERTER = ShortStringConverter.create(); + + public static ShortAttributeConverter create() { + return new ShortAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Short.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + + @Override + public AttributeValue transformFrom(Short input) { + return AttributeValue.builder().n(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public Short transformTo(AttributeValue input) { + if (input.n() != null) { + return EnhancedAttributeValue.fromNumber(input.n()).convert(Visitor.INSTANCE); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(Visitor.INSTANCE); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(short.class); + } + + private static final class Visitor extends TypeConvertingVisitor { + private static final Visitor INSTANCE = new Visitor(); + + private Visitor() { + super(Short.class, ShortAttributeConverter.class); + } + + @Override + public Short convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + + @Override + public Short convertNumber(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringAttributeConverter.java new file mode 100644 index 000000000000..542d8e84ec84 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringAttributeConverter.java @@ -0,0 +1,144 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import static java.util.stream.Collectors.toList; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BinaryOperator; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.BooleanStringConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.ByteArrayStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link String} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * This supports reading any DynamoDB attribute type into a string type, so it is very useful for logging information stored in + * DynamoDB. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class StringAttributeConverter implements AttributeConverter { + public static StringAttributeConverter create() { + return new StringAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(String input) { + return AttributeValue.builder().s(input).build(); + } + + @Override + public String transformTo(AttributeValue input) { + return Visitor.toString(input); + } + + private static final class Visitor extends TypeConvertingVisitor { + private static final Visitor INSTANCE = new Visitor(); + + private Visitor() { + super(String.class, StringAttributeConverter.class); + } + + @Override + public String convertString(String value) { + return value; + } + + @Override + public String convertNumber(String value) { + return value; + } + + @Override + public String convertBytes(SdkBytes value) { + return ByteArrayStringConverter.create().toString(value.asByteArray()); + } + + @Override + public String convertBoolean(Boolean value) { + return BooleanStringConverter.create().toString(value); + } + + @Override + public String convertSetOfStrings(List value) { + return value.toString(); + } + + @Override + public String convertSetOfNumbers(List value) { + return value.toString(); + } + + @Override + public String convertSetOfBytes(List value) { + return value.stream() + .map(this::convertBytes) + .collect(Collectors.joining(",", "[", "]")); + } + + @Override + public String convertMap(Map value) { + BinaryOperator throwingMerger = (l, r) -> { + // Should not happen: we're converting from map. + throw new IllegalStateException(); + }; + + return value.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, i -> toString(i.getValue()), + throwingMerger, LinkedHashMap::new)) + .toString(); + } + + @Override + public String convertListOfAttributeValues(List value) { + return value.stream() + .map(Visitor::toString) + .collect(toList()) + .toString(); + } + + public static String toString(AttributeValue attributeValue) { + return EnhancedAttributeValue.fromAttributeValue(attributeValue).convert(Visitor.INSTANCE); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringBufferAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringBufferAttributeConverter.java new file mode 100644 index 000000000000..8696f2ff0b69 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringBufferAttributeConverter.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link StringBuffer} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * This supports reading any DynamoDB attribute type into a string buffer. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class StringBufferAttributeConverter implements AttributeConverter { + public static final StringAttributeConverter STRING_CONVERTER = StringAttributeConverter.create(); + + public static StringBufferAttributeConverter create() { + return new StringBufferAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(StringBuffer.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(StringBuffer input) { + return STRING_CONVERTER.transformFrom(input.toString()); + } + + @Override + public StringBuffer transformTo(AttributeValue input) { + return new StringBuffer(STRING_CONVERTER.transformTo(input)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringBuilderAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringBuilderAttributeConverter.java new file mode 100644 index 000000000000..6ab3ae48fc35 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/StringBuilderAttributeConverter.java @@ -0,0 +1,65 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + + +/** + * A converter between {@link StringBuffer} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * This supports reading any DynamoDB attribute type into a string builder. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class StringBuilderAttributeConverter implements AttributeConverter { + public static final StringAttributeConverter STRING_CONVERTER = StringAttributeConverter.create(); + + public static StringBuilderAttributeConverter create() { + return new StringBuilderAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(StringBuilder.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(StringBuilder input) { + return STRING_CONVERTER.transformFrom(input.toString()); + } + + @Override + public StringBuilder transformTo(AttributeValue input) { + return new StringBuilder(STRING_CONVERTER.transformTo(input)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UriAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UriAttributeConverter.java new file mode 100644 index 000000000000..cefe7bd5af07 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UriAttributeConverter.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.net.URI; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.UriStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link URI} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a string, according to the format of {@link URI#create(String)} and + * {@link URI#toString()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class UriAttributeConverter implements AttributeConverter { + public static final UriStringConverter STRING_CONVERTER = UriStringConverter.create(); + + public static UriAttributeConverter create() { + return new UriAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(URI.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(URI input) { + return AttributeValue.builder().s(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public URI transformTo(AttributeValue input) { + return EnhancedAttributeValue.fromAttributeValue(input).convert(Visitor.INSTANCE); + } + + private static final class Visitor extends TypeConvertingVisitor { + private static final Visitor INSTANCE = new Visitor(); + + private Visitor() { + super(URI.class, UriAttributeConverter.class); + } + + @Override + public URI convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UrlAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UrlAttributeConverter.java new file mode 100644 index 000000000000..03f56ce3f94c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UrlAttributeConverter.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.net.URL; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.UrlStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link URL} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a string, according to the format of {@link URL#URL(String)} and + * {@link URL#toString()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class UrlAttributeConverter implements AttributeConverter { + public static final UrlStringConverter STRING_CONVERTER = UrlStringConverter.create(); + + public static UrlAttributeConverter create() { + return new UrlAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(URL.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(URL input) { + return AttributeValue.builder().s(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public URL transformTo(AttributeValue input) { + return EnhancedAttributeValue.fromAttributeValue(input).convert(Visitor.INSTANCE); + } + + private static final class Visitor extends TypeConvertingVisitor { + private static final Visitor INSTANCE = new Visitor(); + + private Visitor() { + super(URL.class, UrlAttributeConverter.class); + } + + @Override + public URL convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UuidAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UuidAttributeConverter.java new file mode 100644 index 000000000000..b35f093510c3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/UuidAttributeConverter.java @@ -0,0 +1,77 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.util.UUID; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.UuidStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link UUID} and {@link AttributeValue}. + * + *

    + * This supports storing and reading values in DynamoDB as a string. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class UuidAttributeConverter implements AttributeConverter { + public static final UuidStringConverter STRING_CONVERTER = UuidStringConverter.create(); + + public static UuidAttributeConverter create() { + return new UuidAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(UUID.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(UUID input) { + return AttributeValue.builder().s(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public UUID transformTo(AttributeValue input) { + return EnhancedAttributeValue.fromAttributeValue(input).convert(Visitor.INSTANCE); + } + + private static final class Visitor extends TypeConvertingVisitor { + private static final Visitor INSTANCE = new Visitor(); + + private Visitor() { + super(UUID.class, UuidAttributeConverter.class); + } + + @Override + public UUID convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZoneIdAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZoneIdAttributeConverter.java new file mode 100644 index 000000000000..604bba52f730 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZoneIdAttributeConverter.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.ZoneId; +import java.time.zone.ZoneRulesException; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.ZoneIdStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link ZoneId} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a string using {@link ZoneId#toString()} and {@link ZoneId#of(String)}. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class ZoneIdAttributeConverter implements AttributeConverter { + public static final ZoneIdStringConverter STRING_CONVERTER = ZoneIdStringConverter.create(); + + public static ZoneIdAttributeConverter create() { + return new ZoneIdAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(ZoneId.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(ZoneId input) { + return AttributeValue.builder().s(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public ZoneId transformTo(AttributeValue input) { + try { + return STRING_CONVERTER.fromString(input.s()); + } catch (ZoneRulesException e) { + throw new IllegalArgumentException(e); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZoneOffsetAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZoneOffsetAttributeConverter.java new file mode 100644 index 000000000000..f03dd698fea8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZoneOffsetAttributeConverter.java @@ -0,0 +1,85 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.DateTimeException; +import java.time.ZoneOffset; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.ZoneOffsetStringConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link ZoneOffset} and {@link AttributeValue}. + * + *

    + * This stores and reads values in DynamoDB as a string using {@link ZoneOffset#toString()} and {@link ZoneOffset#of(String)}. + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class ZoneOffsetAttributeConverter implements AttributeConverter { + public static final ZoneOffsetStringConverter STRING_CONVERTER = ZoneOffsetStringConverter.create(); + + public static ZoneOffsetAttributeConverter create() { + return new ZoneOffsetAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(ZoneOffset.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(ZoneOffset input) { + return AttributeValue.builder().s(STRING_CONVERTER.toString(input)).build(); + } + + @Override + public ZoneOffset transformTo(AttributeValue input) { + try { + return EnhancedAttributeValue.fromAttributeValue(input).convert(Visitor.INSTANCE); + } catch (DateTimeException e) { + throw new IllegalArgumentException(e); + } + } + + private static final class Visitor extends TypeConvertingVisitor { + private static final Visitor INSTANCE = new Visitor(); + + private Visitor() { + super(ZoneOffset.class, ZoneOffsetAttributeConverter.class); + } + + @Override + public ZoneOffset convertString(String value) { + return STRING_CONVERTER.fromString(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZonedDateTimeAsStringAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZonedDateTimeAsStringAttributeConverter.java new file mode 100644 index 000000000000..5f0a7a386c73 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZonedDateTimeAsStringAttributeConverter.java @@ -0,0 +1,114 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZonedDateTime; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A converter between {@link ZonedDateTime} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * Values are stored in a ISO-8601-like format, with the non-offset zone IDs being added at the end of the string in square + * brackets. If the zone ID offset has seconds, then they will also be included, even though this is not part of the ISO-8601 + * standard. For full ISO-8601 compliance, it is better to use {@link OffsetDateTime}s (without second-level precision in its + * offset) or {@link Instant}s, assuming the time zone information is not strictly required. + * + *

    + * Examples: + *

      + *
    • {@code Instant.EPOCH.atZone(ZoneId.of("Europe/Paris"))} is stored as + * an AttributeValue with the String "1970-01-01T01:00+01:00[Europe/Paris]"}
    • + *
    • {@code OffsetDateTime.MIN.toZonedDateTime()} is stored as + * an AttributeValue with the String "-999999999-01-01T00:00+18:00"}
    • + *
    • {@code OffsetDateTime.MAX.toZonedDateTime()} is stored as + * an AttributeValue with the String "+999999999-12-31T23:59:59.999999999-18:00"}
    • + *
    • {@code Instant.EPOCH.atZone(ZoneOffset.UTC)} is stored as + * an AttributeValue with the String "1970-01-01T00:00Z"}
    • + *
    + * See {@link OffsetDateTime} for more details on the serialization format. + *

    + * This converter can read any values written by itself, {@link InstantAsStringAttributeConverter}, + * or {@link OffsetDateTimeAsStringAttributeConverter}. Values written by + * {@code Instant} converters are treated as if they are in the UTC time zone. + * + *

    + * This serialization is lexicographically orderable when the year is not negative. + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public final class ZonedDateTimeAsStringAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + + public static ZonedDateTimeAsStringAttributeConverter create() { + return new ZonedDateTimeAsStringAttributeConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(ZonedDateTime.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + @Override + public AttributeValue transformFrom(ZonedDateTime input) { + return AttributeValue.builder().s(input.toString()).build(); + } + + @Override + public ZonedDateTime transformTo(AttributeValue input) { + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(ZonedDateTime.class, InstantAsStringAttributeConverter.class); + } + + @Override + public ZonedDateTime convertString(String value) { + return ZonedDateTime.parse(value); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicBooleanStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicBooleanStringConverter.java new file mode 100644 index 000000000000..bfef11069f7e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicBooleanStringConverter.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.util.concurrent.atomic.AtomicBoolean; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link AtomicBoolean} and {@link String}. + * + *

    + * This converts values using {@link BooleanStringConverter}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class AtomicBooleanStringConverter implements StringConverter { + private static BooleanStringConverter BOOLEAN_CONVERTER = BooleanStringConverter.create(); + + private AtomicBooleanStringConverter() { + } + + public static AtomicBooleanStringConverter create() { + return new AtomicBooleanStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(AtomicBoolean.class); + } + + @Override + public String toString(AtomicBoolean object) { + return BOOLEAN_CONVERTER.toString(object.get()); + } + + @Override + public AtomicBoolean fromString(String string) { + return new AtomicBoolean(BOOLEAN_CONVERTER.fromString(string)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicIntegerStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicIntegerStringConverter.java new file mode 100644 index 000000000000..f679f472a3c5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicIntegerStringConverter.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.util.concurrent.atomic.AtomicInteger; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link AtomicInteger} and {@link String}. + * + *

    + * This converts values using {@link IntegerStringConverter}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class AtomicIntegerStringConverter implements StringConverter { + private static IntegerStringConverter INTEGER_CONVERTER = IntegerStringConverter.create(); + + private AtomicIntegerStringConverter() { + } + + public static AtomicIntegerStringConverter create() { + return new AtomicIntegerStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(AtomicInteger.class); + } + + @Override + public String toString(AtomicInteger object) { + return INTEGER_CONVERTER.toString(object.get()); + } + + @Override + public AtomicInteger fromString(String string) { + return new AtomicInteger(INTEGER_CONVERTER.fromString(string)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicLongStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicLongStringConverter.java new file mode 100644 index 000000000000..7d91e86efece --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/AtomicLongStringConverter.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.util.concurrent.atomic.AtomicLong; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link AtomicLong} and {@link String}. + * + *

    + * This converts values using {@link LongStringConverter}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class AtomicLongStringConverter implements StringConverter { + private static LongStringConverter LONG_CONVERTER = LongStringConverter.create(); + + private AtomicLongStringConverter() { + } + + public static AtomicLongStringConverter create() { + return new AtomicLongStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(AtomicLong.class); + } + + @Override + public String toString(AtomicLong object) { + return LONG_CONVERTER.toString(object.get()); + } + + @Override + public AtomicLong fromString(String string) { + return new AtomicLong(LONG_CONVERTER.fromString(string)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BigDecimalStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BigDecimalStringConverter.java new file mode 100644 index 000000000000..918867602002 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BigDecimalStringConverter.java @@ -0,0 +1,51 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.math.BigDecimal; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link BigDecimal} and {@link String}. + * + *

    + * This converts values using {@link BigDecimal#toString()} and {@link BigDecimal#BigDecimal(String)}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class BigDecimalStringConverter implements StringConverter { + private BigDecimalStringConverter() { + } + + public static BigDecimalStringConverter create() { + return new BigDecimalStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(BigDecimal.class); + } + + @Override + public BigDecimal fromString(String string) { + return new BigDecimal(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BigIntegerStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BigIntegerStringConverter.java new file mode 100644 index 000000000000..ea362c9033cc --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BigIntegerStringConverter.java @@ -0,0 +1,51 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.math.BigInteger; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link BigInteger} and {@link String}. + * + *

    + * This converts values using {@link BigInteger#toString()} and {@link BigInteger#BigInteger(String)}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class BigIntegerStringConverter implements StringConverter { + private BigIntegerStringConverter() { + } + + public static BigIntegerStringConverter create() { + return new BigIntegerStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(BigInteger.class); + } + + @Override + public BigInteger fromString(String string) { + return new BigInteger(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BooleanStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BooleanStringConverter.java new file mode 100644 index 000000000000..f0c555020778 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/BooleanStringConverter.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.math.BigInteger; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link BigInteger} and {@link String}. + * + *

    + * This converts values to strings using {@link Boolean#toString()}. This converts the literal string values "true" and "false" + * to a boolean. Any other string values will result in an exception. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class BooleanStringConverter implements StringConverter, PrimitiveConverter { + private BooleanStringConverter() { + } + + public static BooleanStringConverter create() { + return new BooleanStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Boolean.class); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(boolean.class); + } + + @Override + public Boolean fromString(String string) { + switch (string) { + case "true": return true; + case "false": return false; + default: throw new IllegalArgumentException("Boolean string was not 'true' or 'false': " + string); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ByteArrayStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ByteArrayStringConverter.java new file mode 100644 index 000000000000..1a9ac6bcb287 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ByteArrayStringConverter.java @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.math.BigInteger; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; +import software.amazon.awssdk.utils.BinaryUtils; + +/** + * A converter between {@link BigInteger} and {@link String}. + * + *

    + * This converts bytes to a base 64 string. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class ByteArrayStringConverter implements StringConverter { + private ByteArrayStringConverter() { + } + + public static ByteArrayStringConverter create() { + return new ByteArrayStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(byte[].class); + } + + @Override + public String toString(byte[] object) { + return BinaryUtils.toBase64(object); + } + + @Override + public byte[] fromString(String string) { + return BinaryUtils.fromBase64(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ByteStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ByteStringConverter.java new file mode 100644 index 000000000000..64406ee1dff1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ByteStringConverter.java @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.math.BigInteger; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link BigInteger} and {@link String}. + * + *

    + * This converts values using {@link Byte#toString()} and {@link Byte#valueOf(String)}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class ByteStringConverter implements StringConverter, PrimitiveConverter { + private ByteStringConverter() { + } + + public static ByteStringConverter create() { + return new ByteStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Byte.class); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(byte.class); + } + + @Override + public Byte fromString(String string) { + return Byte.valueOf(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharSequenceStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharSequenceStringConverter.java new file mode 100644 index 000000000000..d6fb5efa40bd --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharSequenceStringConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link CharSequence} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class CharSequenceStringConverter implements StringConverter { + private CharSequenceStringConverter() { + } + + public static CharSequenceStringConverter create() { + return new CharSequenceStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(CharSequence.class); + } + + @Override + public CharSequence fromString(String string) { + return string; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharacterArrayStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharacterArrayStringConverter.java new file mode 100644 index 000000000000..420a346e0cd9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharacterArrayStringConverter.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@code char[]} and {@link String}. + * + *

    + * This converts values using {@link String#String(char[])} and {@link String#toCharArray()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class CharacterArrayStringConverter implements StringConverter { + private CharacterArrayStringConverter() { + } + + public static CharacterArrayStringConverter create() { + return new CharacterArrayStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(char[].class); + } + + @Override + public String toString(char[] object) { + return new String(object); + } + + @Override + public char[] fromString(String string) { + return string.toCharArray(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharacterStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharacterStringConverter.java new file mode 100644 index 000000000000..b7965c522933 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/CharacterStringConverter.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; +import software.amazon.awssdk.utils.Validate; + +/** + * A converter between {@link Character} and {@link String}. + * + *

    + * This converts values using {@link Character#toString()} and {@link String#charAt(int)}. If the string value is longer + * than 1 character, an exception will be raised. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class CharacterStringConverter implements StringConverter, PrimitiveConverter { + private CharacterStringConverter() { + } + + public static CharacterStringConverter create() { + return new CharacterStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Character.class); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(char.class); + } + + @Override + public Character fromString(String string) { + Validate.isTrue(string.length() == 1, "Character string was not of length 1: %s", string); + return string.charAt(0); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DefaultStringConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DefaultStringConverterProvider.java new file mode 100644 index 000000000000..b4303f557c30 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DefaultStringConverterProvider.java @@ -0,0 +1,192 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverterProvider; +import software.amazon.awssdk.utils.Validate; + +/** + *

    + * Included converters: + *

      + *
    • {@link AtomicIntegerStringConverter}
    • + *
    • {@link AtomicLongStringConverter}
    • + *
    • {@link BigDecimalStringConverter}
    • + *
    • {@link BigIntegerStringConverter}
    • + *
    • {@link DoubleStringConverter}
    • + *
    • {@link DurationStringConverter}
    • + *
    • {@link FloatStringConverter}
    • + *
    • {@link InstantStringConverter}
    • + *
    • {@link IntegerStringConverter}
    • + *
    • {@link LocalDateStringConverter}
    • + *
    • {@link LocalDateTimeStringConverter}
    • + *
    • {@link LocalTimeStringConverter}
    • + *
    • {@link LongStringConverter}
    • + *
    • {@link MonthDayStringConverter}
    • + *
    • {@link OptionalDoubleStringConverter}
    • + *
    • {@link OptionalIntStringConverter}
    • + *
    • {@link OptionalLongStringConverter}
    • + *
    • {@link ShortStringConverter}
    • + *
    • {@link CharacterArrayStringConverter}
    • + *
    • {@link CharacterStringConverter}
    • + *
    • {@link CharSequenceStringConverter}
    • + *
    • {@link OffsetDateTimeStringConverter}
    • + *
    • {@link PeriodStringConverter}
    • + *
    • {@link StringStringConverter}
    • + *
    • {@link StringBufferStringConverter}
    • + *
    • {@link StringBuilderStringConverter}
    • + *
    • {@link UriStringConverter}
    • + *
    • {@link UrlStringConverter}
    • + *
    • {@link UuidStringConverter}
    • + *
    • {@link ZonedDateTimeStringConverter}
    • + *
    • {@link ZoneIdStringConverter}
    • + *
    • {@link ZoneOffsetStringConverter}
    • + *
    • {@link ByteArrayStringConverter}
    • + *
    • {@link ByteStringConverter}
    • + *
    • {@link SdkBytesStringConverter}
    • + *
    • {@link AtomicBooleanStringConverter}
    • + *
    • {@link BooleanStringConverter}
    • + *
    + * + *

    + * This can be created via {@link #create()}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class DefaultStringConverterProvider implements StringConverterProvider { + + private final ConcurrentHashMap, StringConverter> converterCache = + new ConcurrentHashMap<>(); + + private DefaultStringConverterProvider(Builder builder) { + // Converters are used in the REVERSE order of how they were added to the builder. + for (int i = builder.converters.size() - 1; i >= 0; i--) { + StringConverter converter = builder.converters.get(i); + converterCache.put(converter.type(), converter); + + if (converter instanceof PrimitiveConverter) { + PrimitiveConverter primitiveConverter = (PrimitiveConverter) converter; + converterCache.put(primitiveConverter.primitiveType(), converter); + } + } + } + + /** + * Create a builder for a {@link DefaultStringConverterProvider}. + */ + public static Builder builder() { + return new Builder(); + } + + public static DefaultStringConverterProvider create() { + return DefaultStringConverterProvider.builder() + .addConverter(ByteArrayStringConverter.create()) + .addConverter(CharacterArrayStringConverter.create()) + .addConverter(BooleanStringConverter.create()) + .addConverter(ShortStringConverter.create()) + .addConverter(IntegerStringConverter.create()) + .addConverter(LongStringConverter.create()) + .addConverter(FloatStringConverter.create()) + .addConverter(DoubleStringConverter.create()) + .addConverter(CharacterStringConverter.create()) + .addConverter(ByteStringConverter.create()) + .addConverter(StringStringConverter.create()) + .addConverter(CharSequenceStringConverter.create()) + .addConverter(StringBufferStringConverter.create()) + .addConverter(StringBuilderStringConverter.create()) + .addConverter(BigIntegerStringConverter.create()) + .addConverter(BigDecimalStringConverter.create()) + .addConverter(AtomicLongStringConverter.create()) + .addConverter(AtomicIntegerStringConverter.create()) + .addConverter(AtomicBooleanStringConverter.create()) + .addConverter(OptionalIntStringConverter.create()) + .addConverter(OptionalLongStringConverter.create()) + .addConverter(OptionalDoubleStringConverter.create()) + .addConverter(InstantStringConverter.create()) + .addConverter(DurationStringConverter.create()) + .addConverter(LocalDateStringConverter.create()) + .addConverter(LocalTimeStringConverter.create()) + .addConverter(LocalDateTimeStringConverter.create()) + .addConverter(OffsetTimeStringConverter.create()) + .addConverter(OffsetDateTimeStringConverter.create()) + .addConverter(ZonedDateTimeStringConverter.create()) + .addConverter(YearStringConverter.create()) + .addConverter(YearMonthStringConverter.create()) + .addConverter(MonthDayStringConverter.create()) + .addConverter(PeriodStringConverter.create()) + .addConverter(ZoneOffsetStringConverter.create()) + .addConverter(ZoneIdStringConverter.create()) + .addConverter(UuidStringConverter.create()) + .addConverter(UrlStringConverter.create()) + .addConverter(UriStringConverter.create()) + .build(); + } + + @Override + public StringConverter converterFor(EnhancedType enhancedType) { + @SuppressWarnings("unchecked") // We initialized correctly, so this is safe. + StringConverter converter = (StringConverter) converterCache.get(enhancedType); + + if (converter == null) { + throw new IllegalArgumentException("No string converter exists for " + enhancedType.rawClass()); + } + + return converter; + } + + /** + * A builder for configuring and creating {@link DefaultStringConverterProvider}s. + */ + public static class Builder { + private List> converters = new ArrayList<>(); + + private Builder() { + } + + public Builder addConverters(Collection> converters) { + Validate.paramNotNull(converters, "converters"); + Validate.noNullElements(converters, "Converters must not contain null members."); + this.converters.addAll(converters); + return this; + } + + public Builder addConverter(StringConverter converter) { + Validate.paramNotNull(converter, "converter"); + this.converters.add(converter); + return this; + } + + public Builder clearConverters() { + this.converters.clear(); + return this; + } + + public DefaultStringConverterProvider build() { + return new DefaultStringConverterProvider(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DoubleStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DoubleStringConverter.java new file mode 100644 index 000000000000..f6be14dd54aa --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DoubleStringConverter.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Double} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class DoubleStringConverter implements StringConverter, PrimitiveConverter { + private DoubleStringConverter() { + } + + public static DoubleStringConverter create() { + return new DoubleStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Double.class); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(double.class); + } + + @Override + public Double fromString(String string) { + return Double.valueOf(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DurationStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DurationStringConverter.java new file mode 100644 index 000000000000..4e1ff544e91b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/DurationStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.Duration; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Duration} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class DurationStringConverter implements StringConverter { + private DurationStringConverter() { + } + + public static DurationStringConverter create() { + return new DurationStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Duration.class); + } + + @Override + public Duration fromString(String string) { + return Duration.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/FloatStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/FloatStringConverter.java new file mode 100644 index 000000000000..9f2582729f4c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/FloatStringConverter.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Float} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class FloatStringConverter implements StringConverter, PrimitiveConverter { + private FloatStringConverter() { + } + + public static FloatStringConverter create() { + return new FloatStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Float.class); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(float.class); + } + + @Override + public Float fromString(String string) { + return Float.valueOf(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/InstantStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/InstantStringConverter.java new file mode 100644 index 000000000000..7d039e223c9c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/InstantStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.Instant; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Instant} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class InstantStringConverter implements StringConverter { + private InstantStringConverter() { + } + + public static InstantStringConverter create() { + return new InstantStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Instant.class); + } + + @Override + public Instant fromString(String string) { + return Instant.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/IntegerStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/IntegerStringConverter.java new file mode 100644 index 000000000000..9c5b6cd8327f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/IntegerStringConverter.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Integer} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class IntegerStringConverter implements StringConverter, PrimitiveConverter { + private IntegerStringConverter() { + } + + public static IntegerStringConverter create() { + return new IntegerStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Integer.class); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(int.class); + } + + @Override + public Integer fromString(String string) { + return Integer.valueOf(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalDateStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalDateStringConverter.java new file mode 100644 index 000000000000..11153080d796 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalDateStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.LocalDate; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link LocalDate} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class LocalDateStringConverter implements StringConverter { + private LocalDateStringConverter() { + } + + public static LocalDateStringConverter create() { + return new LocalDateStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(LocalDate.class); + } + + @Override + public LocalDate fromString(String string) { + return LocalDate.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalDateTimeStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalDateTimeStringConverter.java new file mode 100644 index 000000000000..f1272e52b086 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalDateTimeStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.LocalDateTime; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link LocalDateTime} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class LocalDateTimeStringConverter implements StringConverter { + private LocalDateTimeStringConverter() { + } + + public static LocalDateTimeStringConverter create() { + return new LocalDateTimeStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(LocalDateTime.class); + } + + @Override + public LocalDateTime fromString(String string) { + return LocalDateTime.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalTimeStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalTimeStringConverter.java new file mode 100644 index 000000000000..08d93ae04aaf --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LocalTimeStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.LocalTime; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link LocalTime} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class LocalTimeStringConverter implements StringConverter { + private LocalTimeStringConverter() { + } + + public static LocalTimeStringConverter create() { + return new LocalTimeStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(LocalTime.class); + } + + @Override + public LocalTime fromString(String string) { + return LocalTime.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LongStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LongStringConverter.java new file mode 100644 index 000000000000..e7c70c4fcb10 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/LongStringConverter.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Long} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class LongStringConverter implements StringConverter, PrimitiveConverter { + private LongStringConverter() { + } + + public static LongStringConverter create() { + return new LongStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Long.class); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(long.class); + } + + @Override + public Long fromString(String string) { + return Long.valueOf(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/MonthDayStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/MonthDayStringConverter.java new file mode 100644 index 000000000000..a7a3c4f00eff --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/MonthDayStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.MonthDay; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link MonthDay} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class MonthDayStringConverter implements StringConverter { + private MonthDayStringConverter() { + } + + public static MonthDayStringConverter create() { + return new MonthDayStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(MonthDay.class); + } + + @Override + public MonthDay fromString(String string) { + return MonthDay.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OffsetDateTimeStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OffsetDateTimeStringConverter.java new file mode 100644 index 000000000000..7863a9d454dc --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OffsetDateTimeStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.OffsetDateTime; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link OffsetDateTime} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class OffsetDateTimeStringConverter implements StringConverter { + private OffsetDateTimeStringConverter() { + } + + public static OffsetDateTimeStringConverter create() { + return new OffsetDateTimeStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OffsetDateTime.class); + } + + @Override + public OffsetDateTime fromString(String string) { + return OffsetDateTime.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OffsetTimeStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OffsetTimeStringConverter.java new file mode 100644 index 000000000000..f40ca19f9d1c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OffsetTimeStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.OffsetTime; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link OffsetTime} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class OffsetTimeStringConverter implements StringConverter { + private OffsetTimeStringConverter() { + } + + public static OffsetTimeStringConverter create() { + return new OffsetTimeStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OffsetTime.class); + } + + @Override + public OffsetTime fromString(String string) { + return OffsetTime.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalDoubleStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalDoubleStringConverter.java new file mode 100644 index 000000000000..d3443f76ce09 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalDoubleStringConverter.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.util.OptionalDouble; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link OptionalDouble} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class OptionalDoubleStringConverter implements StringConverter { + private static DoubleStringConverter DOUBLE_CONVERTER = DoubleStringConverter.create(); + + private OptionalDoubleStringConverter() { + } + + public static OptionalDoubleStringConverter create() { + return new OptionalDoubleStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OptionalDouble.class); + } + + @Override + public String toString(OptionalDouble object) { + if (!object.isPresent()) { + return null; + } + return DOUBLE_CONVERTER.toString(object.getAsDouble()); + } + + @Override + public OptionalDouble fromString(String string) { + if (string == null) { + return OptionalDouble.empty(); + } + return OptionalDouble.of(DOUBLE_CONVERTER.fromString(string)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalIntStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalIntStringConverter.java new file mode 100644 index 000000000000..024560daf6f9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalIntStringConverter.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.util.OptionalInt; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link OptionalInt} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class OptionalIntStringConverter implements StringConverter { + private static IntegerStringConverter INTEGER_CONVERTER = IntegerStringConverter.create(); + + private OptionalIntStringConverter() { + } + + public static OptionalIntStringConverter create() { + return new OptionalIntStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OptionalInt.class); + } + + @Override + public String toString(OptionalInt object) { + if (!object.isPresent()) { + return null; + } + return INTEGER_CONVERTER.toString(object.getAsInt()); + } + + @Override + public OptionalInt fromString(String string) { + if (string == null) { + return OptionalInt.empty(); + } + return OptionalInt.of(INTEGER_CONVERTER.fromString(string)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalLongStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalLongStringConverter.java new file mode 100644 index 000000000000..0c396c042d3c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/OptionalLongStringConverter.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.util.OptionalLong; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link OptionalLong} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class OptionalLongStringConverter implements StringConverter { + private static LongStringConverter LONG_CONVERTER = LongStringConverter.create(); + + private OptionalLongStringConverter() { + } + + public static OptionalLongStringConverter create() { + return new OptionalLongStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(OptionalLong.class); + } + + @Override + public String toString(OptionalLong object) { + if (!object.isPresent()) { + return null; + } + return LONG_CONVERTER.toString(object.getAsLong()); + } + + @Override + public OptionalLong fromString(String string) { + if (string == null) { + return OptionalLong.empty(); + } + return OptionalLong.of(LONG_CONVERTER.fromString(string)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/PeriodStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/PeriodStringConverter.java new file mode 100644 index 000000000000..3c5f1cf7ec5b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/PeriodStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.Period; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Period} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class PeriodStringConverter implements StringConverter { + private PeriodStringConverter() { + } + + public static PeriodStringConverter create() { + return new PeriodStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Period.class); + } + + @Override + public Period fromString(String string) { + return Period.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/SdkBytesStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/SdkBytesStringConverter.java new file mode 100644 index 000000000000..e4261242ac86 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/SdkBytesStringConverter.java @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; +import software.amazon.awssdk.utils.BinaryUtils; + +/** + * A converter between {@link SdkBytes} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class SdkBytesStringConverter implements StringConverter { + private SdkBytesStringConverter() { + } + + public static SdkBytesStringConverter create() { + return new SdkBytesStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(SdkBytes.class); + } + + @Override + public String toString(SdkBytes object) { + return BinaryUtils.toBase64(object.asByteArray()); + } + + @Override + public SdkBytes fromString(String string) { + return SdkBytes.fromByteArray(BinaryUtils.fromBase64(string)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ShortStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ShortStringConverter.java new file mode 100644 index 000000000000..8f77c394a67a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ShortStringConverter.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Short} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class ShortStringConverter implements StringConverter, PrimitiveConverter { + private ShortStringConverter() { + } + + public static ShortStringConverter create() { + return new ShortStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Short.class); + } + + @Override + public EnhancedType primitiveType() { + return EnhancedType.of(short.class); + } + + @Override + public Short fromString(String string) { + return Short.valueOf(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringBufferStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringBufferStringConverter.java new file mode 100644 index 000000000000..2cdb6208b7a3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringBufferStringConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link StringBuffer} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class StringBufferStringConverter implements StringConverter { + private StringBufferStringConverter() { + } + + public static StringBufferStringConverter create() { + return new StringBufferStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(StringBuffer.class); + } + + @Override + public StringBuffer fromString(String string) { + return new StringBuffer(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringBuilderStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringBuilderStringConverter.java new file mode 100644 index 000000000000..22f4f962830d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringBuilderStringConverter.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link StringBuilder} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class StringBuilderStringConverter implements StringConverter { + private StringBuilderStringConverter() { + } + + public static StringBuilderStringConverter create() { + return new StringBuilderStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(StringBuilder.class); + } + + @Override + public StringBuilder fromString(String string) { + return new StringBuilder(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringStringConverter.java new file mode 100644 index 000000000000..9b054aed332b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/StringStringConverter.java @@ -0,0 +1,52 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link String} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class StringStringConverter implements StringConverter { + private StringStringConverter() { + } + + public static StringStringConverter create() { + return new StringStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public String toString(String object) { + return object; + } + + @Override + public String fromString(String string) { + return string; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UriStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UriStringConverter.java new file mode 100644 index 000000000000..e54751d56274 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UriStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.net.URI; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link URI} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class UriStringConverter implements StringConverter { + private UriStringConverter() { + } + + public static UriStringConverter create() { + return new UriStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(URI.class); + } + + @Override + public URI fromString(String string) { + return URI.create(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UrlStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UrlStringConverter.java new file mode 100644 index 000000000000..cbfd3c61e726 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UrlStringConverter.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.net.MalformedURLException; +import java.net.URL; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link URL} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class UrlStringConverter implements StringConverter { + private UrlStringConverter() { + } + + public static UrlStringConverter create() { + return new UrlStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(URL.class); + } + + @Override + public URL fromString(String string) { + try { + return new URL(string); + } catch (MalformedURLException e) { + throw new IllegalArgumentException("URL format was incorrect: " + string, e); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UuidStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UuidStringConverter.java new file mode 100644 index 000000000000..6161bae316b4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/UuidStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.util.UUID; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link UUID} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class UuidStringConverter implements StringConverter { + private UuidStringConverter() { + } + + public static UuidStringConverter create() { + return new UuidStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(UUID.class); + } + + @Override + public UUID fromString(String string) { + return UUID.fromString(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/YearMonthStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/YearMonthStringConverter.java new file mode 100644 index 000000000000..c74272868c8d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/YearMonthStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.YearMonth; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link YearMonth} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class YearMonthStringConverter implements StringConverter { + private YearMonthStringConverter() { + } + + public static YearMonthStringConverter create() { + return new YearMonthStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(YearMonth.class); + } + + @Override + public YearMonth fromString(String string) { + return YearMonth.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/YearStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/YearStringConverter.java new file mode 100644 index 000000000000..4c1e462bce2f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/YearStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.Year; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link Year} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class YearStringConverter implements StringConverter { + private YearStringConverter() { + } + + public static YearStringConverter create() { + return new YearStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Year.class); + } + + @Override + public Year fromString(String string) { + return Year.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZoneIdStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZoneIdStringConverter.java new file mode 100644 index 000000000000..15ea887334b3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZoneIdStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.ZoneId; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link ZoneId} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class ZoneIdStringConverter implements StringConverter { + private ZoneIdStringConverter() { + } + + public static ZoneIdStringConverter create() { + return new ZoneIdStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(ZoneId.class); + } + + @Override + public ZoneId fromString(String string) { + return ZoneId.of(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZoneOffsetStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZoneOffsetStringConverter.java new file mode 100644 index 000000000000..f098283ab7d3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZoneOffsetStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.ZoneOffset; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link ZoneOffset} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class ZoneOffsetStringConverter implements StringConverter { + private ZoneOffsetStringConverter() { + } + + public static ZoneOffsetStringConverter create() { + return new ZoneOffsetStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(ZoneOffset.class); + } + + @Override + public ZoneOffset fromString(String string) { + return ZoneOffset.of(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZonedDateTimeStringConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZonedDateTimeStringConverter.java new file mode 100644 index 000000000000..3874402b5d0b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/string/ZonedDateTimeStringConverter.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.string; + +import java.time.ZonedDateTime; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; + +/** + * A converter between {@link ZonedDateTime} and {@link String}. + */ +@SdkInternalApi +@ThreadSafe +@Immutable +public class ZonedDateTimeStringConverter implements StringConverter { + private ZonedDateTimeStringConverter() { + } + + public static ZonedDateTimeStringConverter create() { + return new ZonedDateTimeStringConverter(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(ZonedDateTime.class); + } + + @Override + public ZonedDateTime fromString(String string) { + return ZonedDateTime.parse(string); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/ChainExtension.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/ChainExtension.java new file mode 100644 index 000000000000..0dcc81dd5fb9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/ChainExtension.java @@ -0,0 +1,166 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.extensions; + +import java.util.ArrayDeque; +import java.util.Arrays; +import java.util.Deque; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * A meta-extension that allows multiple extensions to be chained in a specified order to act as a single composite + * extension. The order in which extensions will be used depends on the operation, for write operations they will be + * called in forward order, for read operations they will be called in reverse order. For example :- + * + *

    + * If you create a chain of three extensions: + * ChainMapperExtension.create(extension1, extension2, extension3); + * + *

    + * When performing any kind of write operation (eg: PutItem, UpdateItem) the beforeWrite() method will be called in + * forward order: + * + * {@literal extension1 -> extension2 -> extension3} + * + *

    + * So the output of extension1 will be passed into extension2, and then the output of extension2 into extension3 and + * so on. For operations that read (eg: GetItem, UpdateItem) the afterRead() method will be called in reverse order: + * + * {@literal extension3 -> extension2 -> extension1} + * + *

    + * This is designed to create a layered pattern when dealing with multiple extensions. One thing to note is that + * UpdateItem acts as both a write operation and a read operation so the chain will be called both ways within a + * single operation. + */ +@SdkInternalApi +public final class ChainExtension implements DynamoDbEnhancedClientExtension { + private final Deque extensionChain; + + private ChainExtension(List extensions) { + this.extensionChain = new ArrayDeque<>(extensions); + } + + /** + * Construct a new instance of {@link ChainExtension}. + * @param extensions A list of {@link DynamoDbEnhancedClientExtension} to chain together. + * @return A constructed {@link ChainExtension} object. + */ + public static ChainExtension create(DynamoDbEnhancedClientExtension... extensions) { + return new ChainExtension(Arrays.asList(extensions)); + } + + /** + * Construct a new instance of {@link ChainExtension}. + * @param extensions A list of {@link DynamoDbEnhancedClientExtension} to chain together. + * @return A constructed {@link ChainExtension} object. + */ + public static ChainExtension create(List extensions) { + return new ChainExtension(extensions); + } + + /** + * Implementation of the {@link DynamoDbEnhancedClientExtension} interface that will call all the chained extensions + * in forward order, passing the results of each one to the next and coalescing the results into a single modification. + * Multiple conditional statements will be separated by the string " AND ". Expression values will be coalesced + * unless they conflict in which case an exception will be thrown. + * + * @param context A {@link DynamoDbExtensionContext.BeforeWrite} context + * @return A single {@link WriteModification} representing the coalesced results of all the chained extensions. + */ + @Override + public WriteModification beforeWrite(DynamoDbExtensionContext.BeforeWrite context) { + Map transformedItem = null; + Expression conditionalExpression = null; + + for (DynamoDbEnhancedClientExtension extension : this.extensionChain) { + Map itemToTransform = transformedItem == null ? context.items() : transformedItem; + + DynamoDbExtensionContext.BeforeWrite beforeWrite = + DefaultDynamoDbExtensionContext.builder() + .items(itemToTransform) + .operationContext(context.operationContext()) + .tableMetadata(context.tableMetadata()) + .build(); + + WriteModification writeModification = extension.beforeWrite(beforeWrite); + + if (writeModification.transformedItem() != null) { + transformedItem = writeModification.transformedItem(); + } + + if (writeModification.additionalConditionalExpression() != null) { + if (conditionalExpression == null) { + conditionalExpression = writeModification.additionalConditionalExpression(); + } else { + conditionalExpression = + Expression.join(conditionalExpression, + writeModification.additionalConditionalExpression(), + " AND "); + } + } + } + + return WriteModification.builder() + .transformedItem(transformedItem) + .additionalConditionalExpression(conditionalExpression) + .build(); + } + + /** + * Implementation of the {@link DynamoDbEnhancedClientExtension} interface that will call all the chained extensions + * in reverse order, passing the results of each one to the next and coalescing the results into a single modification. + * + * @param context A {@link DynamoDbExtensionContext.AfterRead} context + * @return A single {@link ReadModification} representing the final transformation of all the chained extensions. + */ + @Override + public ReadModification afterRead(DynamoDbExtensionContext.AfterRead context) { + Map transformedItem = null; + + Iterator iterator = extensionChain.descendingIterator(); + + while (iterator.hasNext()) { + Map itemToTransform = + transformedItem == null ? context.items() : transformedItem; + + DynamoDbExtensionContext.AfterRead afterRead = + DefaultDynamoDbExtensionContext.builder().items(itemToTransform) + .operationContext(context.operationContext()) + .tableMetadata(context.tableMetadata()) + .build(); + + ReadModification readModification = iterator.next().afterRead(afterRead); + + if (readModification.transformedItem() != null) { + transformedItem = readModification.transformedItem(); + } + } + + return ReadModification.builder() + .transformedItem(transformedItem) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/DefaultDynamoDbExtensionContext.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/DefaultDynamoDbExtensionContext.java new file mode 100644 index 000000000000..f89d9a22ae7e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/DefaultDynamoDbExtensionContext.java @@ -0,0 +1,114 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.extensions; + +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * An SDK-internal implementation of {@link DynamoDbExtensionContext.BeforeWrite} and + * {@link DynamoDbExtensionContext.AfterRead}. + */ +@SdkInternalApi +public final class DefaultDynamoDbExtensionContext implements DynamoDbExtensionContext.BeforeWrite, + DynamoDbExtensionContext.AfterRead { + private final Map items; + private final OperationContext operationContext; + private final TableMetadata tableMetadata; + + private DefaultDynamoDbExtensionContext(Builder builder) { + this.items = builder.items; + this.operationContext = builder.operationContext; + this.tableMetadata = builder.tableMetadata; + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public Map items() { + return items; + } + + @Override + public OperationContext operationContext() { + return operationContext; + } + + @Override + public TableMetadata tableMetadata() { + return tableMetadata; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultDynamoDbExtensionContext that = (DefaultDynamoDbExtensionContext) o; + + if (!Objects.equals(items, that.items)) { + return false; + } + if (!Objects.equals(operationContext, that.operationContext)) { + return false; + } + return Objects.equals(tableMetadata, that.tableMetadata); + } + + @Override + public int hashCode() { + int result = items != null ? items.hashCode() : 0; + result = 31 * result + (operationContext != null ? operationContext.hashCode() : 0); + result = 31 * result + (tableMetadata != null ? tableMetadata.hashCode() : 0); + return result; + } + + public static final class Builder { + private Map items; + private OperationContext operationContext; + private TableMetadata tableMetadata; + + public Builder items(Map item) { + this.items = item; + return this; + } + + public Builder operationContext(OperationContext operationContext) { + this.operationContext = operationContext; + return this; + } + + public Builder tableMetadata(TableMetadata tableMetadata) { + this.tableMetadata = tableMetadata; + return this; + } + + public DefaultDynamoDbExtensionContext build() { + return new DefaultDynamoDbExtensionContext(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/VersionRecordAttributeTags.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/VersionRecordAttributeTags.java new file mode 100644 index 000000000000..e1c2d527866b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/VersionRecordAttributeTags.java @@ -0,0 +1,31 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.extensions; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension; +import software.amazon.awssdk.enhanced.dynamodb.extensions.annotations.DynamoDbVersionAttribute; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTag; + +@SdkInternalApi +public final class VersionRecordAttributeTags { + private VersionRecordAttributeTags() { + } + + public static StaticAttributeTag attributeTagFor(DynamoDbVersionAttribute annotation) { + return VersionedRecordExtension.AttributeTags.versionAttribute(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableInfo.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableInfo.java new file mode 100644 index 000000000000..d64155345fa6 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableInfo.java @@ -0,0 +1,98 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.immutable; + +import java.lang.reflect.Method; +import java.util.Collection; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public class ImmutableInfo { + private final Class immutableClass; + private final Class builderClass; + private final Method staticBuilderMethod; + private final Method buildMethod; + private final Collection propertyDescriptors; + + private ImmutableInfo(Builder b) { + this.immutableClass = b.immutableClass; + this.builderClass = b.builderClass; + this.staticBuilderMethod = b.staticBuilderMethod; + this.buildMethod = b.buildMethod; + this.propertyDescriptors = b.propertyDescriptors; + } + + public Class immutableClass() { + return immutableClass; + } + + public Class builderClass() { + return builderClass; + } + + public Optional staticBuilderMethod() { + return Optional.ofNullable(staticBuilderMethod); + } + + public Method buildMethod() { + return buildMethod; + } + + public Collection propertyDescriptors() { + return propertyDescriptors; + } + + public static Builder builder(Class immutableClass) { + return new Builder<>(immutableClass); + } + + public static final class Builder { + private final Class immutableClass; + private Class builderClass; + private Method staticBuilderMethod; + private Method buildMethod; + private Collection propertyDescriptors; + + private Builder(Class immutableClass) { + this.immutableClass = immutableClass; + } + + public Builder builderClass(Class builderClass) { + this.builderClass = builderClass; + return this; + } + + public Builder staticBuilderMethod(Method builderMethod) { + this.staticBuilderMethod = builderMethod; + return this; + } + + public Builder buildMethod(Method buildMethod) { + this.buildMethod = buildMethod; + return this; + } + + public Builder propertyDescriptors(Collection propertyDescriptors) { + this.propertyDescriptors = propertyDescriptors; + return this; + } + + public ImmutableInfo build() { + return new ImmutableInfo<>(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableIntrospector.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableIntrospector.java new file mode 100644 index 000000000000..af7059469247 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableIntrospector.java @@ -0,0 +1,250 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.immutable; + +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbIgnore; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; + +@SdkInternalApi +public class ImmutableIntrospector { + private static final String BUILD_METHOD = "build"; + private static final String BUILDER_METHOD = "builder"; + private static final String GET_PREFIX = "get"; + private static final String IS_PREFIX = "is"; + private static final String SET_PREFIX = "set"; + + private static volatile ImmutableIntrospector INSTANCE = null; + + // Methods from Object are commonly overridden and confuse the mapper, automatically exclude any method with a name + // that matches a method defined on Object. + private final Set namesToExclude; + + private ImmutableIntrospector() { + this.namesToExclude = Collections.unmodifiableSet(Arrays.stream(Object.class.getMethods()) + .map(Method::getName) + .collect(Collectors.toSet())); + } + + public static ImmutableInfo getImmutableInfo(Class immutableClass) { + if (INSTANCE == null) { + synchronized (ImmutableIntrospector.class) { + if (INSTANCE == null) { + INSTANCE = new ImmutableIntrospector(); + } + } + } + + return INSTANCE.introspect(immutableClass); + } + + private ImmutableInfo introspect(Class immutableClass) { + Class builderClass = validateAndGetBuilderClass(immutableClass); + Optional staticBuilderMethod = findStaticBuilderMethod(immutableClass, builderClass); + List getters = filterAndCollectGetterMethods(immutableClass.getMethods()); + Map indexedBuilderMethods = filterAndIndexBuilderMethods(builderClass.getMethods()); + Method buildMethod = extractBuildMethod(indexedBuilderMethods, immutableClass) + .orElseThrow( + () -> new IllegalArgumentException( + "An immutable builder class must have a public method named 'build()' that takes no arguments " + + "and returns an instance of the immutable class it builds")); + + List propertyDescriptors = + getters.stream() + .map(getter -> { + validateGetter(getter); + String propertyName = normalizeGetterName(getter); + + Method setter = extractSetterMethod(propertyName, indexedBuilderMethods, getter, builderClass) + .orElseThrow( + () -> generateExceptionForMethod( + getter, + "A method was found on the immutable class that does not appear to have a " + + "matching setter on the builder class.")); + + return ImmutablePropertyDescriptor.create(propertyName, getter, setter); + }).collect(Collectors.toList()); + + if (!indexedBuilderMethods.isEmpty()) { + throw generateExceptionForMethod(indexedBuilderMethods.values().iterator().next(), + "A method was found on the immutable class builder that does not appear " + + "to have a matching getter on the immutable class."); + } + + return ImmutableInfo.builder(immutableClass) + .builderClass(builderClass) + .staticBuilderMethod(staticBuilderMethod.orElse(null)) + .buildMethod(buildMethod) + .propertyDescriptors(propertyDescriptors) + .build(); + } + + private boolean isMappableMethod(Method method) { + return method.getDeclaringClass() != Object.class + && method.getAnnotation(DynamoDbIgnore.class) == null + && !method.isSynthetic() + && !method.isBridge() + && !Modifier.isStatic(method.getModifiers()) + && !namesToExclude.contains(method.getName()); + } + + private Optional findStaticBuilderMethod(Class immutableClass, Class builderClass) { + try { + Method method = immutableClass.getMethod(BUILDER_METHOD); + + if (Modifier.isStatic(method.getModifiers()) && method.getReturnType().isAssignableFrom(builderClass)) { + return Optional.of(method); + } + } catch (NoSuchMethodException ignored) { + // no-op + } + + return Optional.empty(); + } + + private IllegalArgumentException generateExceptionForMethod(Method getter, String message) { + return new IllegalArgumentException( + message + " Use the @DynamoDbIgnore annotation on the method if you do not want it to be included in the " + + "TableSchema introspection. [Method = \"" + getter + "\"]"); + } + + private Class validateAndGetBuilderClass(Class immutableClass) { + DynamoDbImmutable dynamoDbImmutable = immutableClass.getAnnotation(DynamoDbImmutable.class); + + if (dynamoDbImmutable == null) { + throw new IllegalArgumentException("A DynamoDb immutable class must be annotated with @DynamoDbImmutable"); + } + + return dynamoDbImmutable.builder(); + } + + private void validateGetter(Method getter) { + if (getter.getReturnType() == void.class || getter.getReturnType() == Void.class) { + throw generateExceptionForMethod(getter, "A method was found on the immutable class that does not appear " + + "to be a valid getter due to the return type being void."); + } + + if (getter.getParameterCount() != 0) { + throw generateExceptionForMethod(getter, "A method was found on the immutable class that does not appear " + + "to be a valid getter due to it having one or more parameters."); + } + } + + private List filterAndCollectGetterMethods(Method[] rawMethods) { + return Arrays.stream(rawMethods) + .filter(this::isMappableMethod) + .collect(Collectors.toList()); + } + + private Map filterAndIndexBuilderMethods(Method[] rawMethods) { + return Arrays.stream(rawMethods) + .filter(this::isMappableMethod) + .collect(Collectors.toMap(this::normalizeSetterName, m -> m)); + } + + private String normalizeSetterName(Method setter) { + String setterName = setter.getName(); + + if (setterName.length() > 3 + && Character.isUpperCase(setterName.charAt(3)) + && setterName.startsWith(SET_PREFIX)) { + + return Character.toLowerCase(setterName.charAt(3)) + setterName.substring(4); + } + + return setterName; + } + + private String normalizeGetterName(Method getter) { + String getterName = getter.getName(); + + if (getterName.length() > 2 + && Character.isUpperCase(getterName.charAt(2)) + && getterName.startsWith(IS_PREFIX) + && isMethodBoolean(getter)) { + + return Character.toLowerCase(getterName.charAt(2)) + getterName.substring(3); + } + + if (getterName.length() > 3 + && Character.isUpperCase(getterName.charAt(3)) + && getterName.startsWith(GET_PREFIX)) { + + return Character.toLowerCase(getterName.charAt(3)) + getterName.substring(4); + } + + return getterName; + } + + private boolean isMethodBoolean(Method method) { + return method.getReturnType() == boolean.class || method.getReturnType() == Boolean.class; + } + + private Optional extractBuildMethod(Map indexedBuilderMethods, Class immutableClass) { + Method buildMethod = indexedBuilderMethods.get(BUILD_METHOD); + + if (buildMethod == null + || buildMethod.getParameterCount() != 0 + || !immutableClass.equals(buildMethod.getReturnType())) { + + return Optional.empty(); + } + + indexedBuilderMethods.remove(BUILD_METHOD); + return Optional.of(buildMethod); + } + + private Optional extractSetterMethod(String propertyName, + Map indexedBuilderMethods, + Method getterMethod, + Class builderClass) { + Method setterMethod = indexedBuilderMethods.get(propertyName); + + if (setterMethod == null + || !setterHasValidSignature(setterMethod, getterMethod.getReturnType(), builderClass)) { + return Optional.empty(); + } + + indexedBuilderMethods.remove(propertyName); + return Optional.of(setterMethod); + } + + private boolean setterHasValidSignature(Method setterMethod, Class expectedType, Class builderClass) { + return setterHasValidParameterSignature(setterMethod, expectedType) + && setterHasValidReturnType(setterMethod, builderClass); + } + + private boolean setterHasValidParameterSignature(Method setterMethod, Class expectedType) { + return setterMethod.getParameterCount() == 1 && expectedType.equals(setterMethod.getParameterTypes()[0]); + } + + private boolean setterHasValidReturnType(Method setterMethod, Class builderClass) { + if (setterMethod.getReturnType() == void.class || setterMethod.getReturnType() == Void.class) { + return true; + } + + return setterMethod.getReturnType().isAssignableFrom(builderClass); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutablePropertyDescriptor.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutablePropertyDescriptor.java new file mode 100644 index 000000000000..fa49da849f99 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutablePropertyDescriptor.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.immutable; + +import java.lang.reflect.Method; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public final class ImmutablePropertyDescriptor { + private final String name; + private final Method getter; + private final Method setter; + + private ImmutablePropertyDescriptor(String name, Method getter, Method setter) { + this.name = name; + this.getter = getter; + this.setter = setter; + } + + public static ImmutablePropertyDescriptor create(String name, Method getter, Method setter) { + return new ImmutablePropertyDescriptor(name, getter, setter); + } + + public String name() { + return name; + } + + public Method getter() { + return getter; + } + + public Method setter() { + return setter; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/AttributeType.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/AttributeType.java new file mode 100644 index 000000000000..3ffdd3c669f8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/AttributeType.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@SdkInternalApi +public interface AttributeType { + AttributeValue objectToAttributeValue(T object); + + T attributeValueToObject(AttributeValue attributeValue); + + AttributeValueType attributeValueType(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanAttributeGetter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanAttributeGetter.java new file mode 100644 index 000000000000..a4df014a64a8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanAttributeGetter.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.lang.reflect.Method; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +@FunctionalInterface +@SdkInternalApi +@SuppressWarnings("unchecked") +public interface BeanAttributeGetter extends Function { + static BeanAttributeGetter create(Class beanClass, Method getter) { + Validate.isTrue(getter.getParameterCount() == 0, + "%s.%s has parameters, despite being named like a getter.", + beanClass, getter.getName()); + + return LambdaToMethodBridgeBuilder.create(BeanAttributeGetter.class) + .lambdaMethodName("apply") + .runtimeLambdaSignature(Object.class, Object.class) + .compileTimeLambdaSignature(getter.getReturnType(), beanClass) + .targetMethod(getter) + .build(); + + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanAttributeSetter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanAttributeSetter.java new file mode 100644 index 000000000000..fe0de2744fe1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanAttributeSetter.java @@ -0,0 +1,43 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.lang.reflect.Method; +import java.util.function.BiConsumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.internal.ReflectionUtils; + +@FunctionalInterface +@SdkInternalApi +public interface BeanAttributeSetter extends BiConsumer { + @SuppressWarnings("unchecked") + static BeanAttributeSetter create(Class beanClass, Method setter) { + Validate.isTrue(setter.getParameterCount() == 1, + "%s.%s doesn't have just 1 parameter, despite being named like a setter.", + beanClass, setter.getName()); + + Class setterInputClass = setter.getParameters()[0].getType(); + Class boxedInputClass = ReflectionUtils.getWrappedClass(setterInputClass); + + return LambdaToMethodBridgeBuilder.create(BeanAttributeSetter.class) + .lambdaMethodName("accept") + .runtimeLambdaSignature(void.class, Object.class, Object.class) + .compileTimeLambdaSignature(void.class, beanClass, boxedInputClass) + .targetMethod(setter) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanTableSchemaAttributeTags.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanTableSchemaAttributeTags.java new file mode 100644 index 000000000000..0d19520badaf --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/BeanTableSchemaAttributeTags.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.util.Arrays; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTag; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.BeanTableSchemaAttributeTag; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbSecondaryPartitionKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbSecondarySortKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbSortKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbUpdateBehavior; + +/** + * Static provider class for core {@link BeanTableSchema} attribute tags. Each of the implemented annotations has a + * corresponding reference to this class in a + * {@link BeanTableSchemaAttributeTag} + * meta-annotation. + */ +@SdkInternalApi +public final class BeanTableSchemaAttributeTags { + private BeanTableSchemaAttributeTags() { + } + + public static StaticAttributeTag attributeTagFor(DynamoDbPartitionKey annotation) { + return StaticAttributeTags.primaryPartitionKey(); + } + + public static StaticAttributeTag attributeTagFor(DynamoDbSortKey annotation) { + return StaticAttributeTags.primarySortKey(); + } + + public static StaticAttributeTag attributeTagFor(DynamoDbSecondaryPartitionKey annotation) { + return StaticAttributeTags.secondaryPartitionKey(Arrays.asList(annotation.indexNames())); + } + + public static StaticAttributeTag attributeTagFor(DynamoDbSecondarySortKey annotation) { + return StaticAttributeTags.secondarySortKey(Arrays.asList(annotation.indexNames())); + } + + public static StaticAttributeTag attributeTagFor(DynamoDbUpdateBehavior annotation) { + return StaticAttributeTags.updateBehavior(annotation.value()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/DefaultParameterizedType.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/DefaultParameterizedType.java new file mode 100644 index 000000000000..ba0a1c74d7e4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/DefaultParameterizedType.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.utils.Validate; + +/** + * An implementation of {@link ParameterizedType} that guarantees its raw type is always a {@link Class}. + */ +@SdkInternalApi +@ThreadSafe +public final class DefaultParameterizedType implements ParameterizedType { + private final Class rawType; + private final Type[] arguments; + + private DefaultParameterizedType(Class rawType, Type... arguments) { + Validate.notEmpty(arguments, "Arguments must not be empty."); + Validate.noNullElements(arguments, "Arguments cannot contain null values."); + this.rawType = Validate.paramNotNull(rawType, "rawType"); + this.arguments = arguments; + + } + + public static ParameterizedType parameterizedType(Class rawType, Type... arguments) { + return new DefaultParameterizedType(rawType, arguments); + } + + @Override + public Class getRawType() { + return rawType; + } + + @Override + public Type[] getActualTypeArguments() { + return arguments.clone(); + } + + @Override + public Type getOwnerType() { + return null; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/LambdaToMethodBridgeBuilder.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/LambdaToMethodBridgeBuilder.java new file mode 100644 index 000000000000..9c48447ebf95 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/LambdaToMethodBridgeBuilder.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.lang.invoke.LambdaMetafactory; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Either; + +@SdkInternalApi +public class LambdaToMethodBridgeBuilder { + private static final MethodHandles.Lookup LOOKUP = MethodHandles.lookup(); + + private final Class lambdaType; + private String lambdaMethodName; + private Class postEraseLambdaReturnType; + private Class[] postEraseLambdaParameters; + private Class preEraseLambdaReturnType; + private Class[] preEraseLambdaParameters; + private Either> targetMethod; + + private LambdaToMethodBridgeBuilder(Class lambdaType) { + this.lambdaType = lambdaType; + } + + public static LambdaToMethodBridgeBuilder create(Class lambdaType) { + return new LambdaToMethodBridgeBuilder<>(lambdaType); + } + + public LambdaToMethodBridgeBuilder lambdaMethodName(String lambdaMethodName) { + this.lambdaMethodName = lambdaMethodName; + return this; + } + + public LambdaToMethodBridgeBuilder runtimeLambdaSignature(Class returnType, Class... parameters) { + this.postEraseLambdaReturnType = returnType; + this.postEraseLambdaParameters = parameters.clone(); + return this; + } + + public LambdaToMethodBridgeBuilder compileTimeLambdaSignature(Class returnType, Class... parameters) { + this.preEraseLambdaReturnType = returnType; + this.preEraseLambdaParameters = parameters.clone(); + return this; + } + + public LambdaToMethodBridgeBuilder targetMethod(Method method) { + this.targetMethod = Either.left(method); + return this; + } + + public LambdaToMethodBridgeBuilder targetMethod(Constructor method) { + this.targetMethod = Either.right(method); + return this; + } + + public T build() { + try { + MethodHandle targetMethodHandle = targetMethod.map( + m -> invokeSafely(() -> LOOKUP.unreflect(m)), + c -> invokeSafely(() -> LOOKUP.unreflectConstructor(c))); + + return lambdaType.cast( + LambdaMetafactory.metafactory(LOOKUP, + lambdaMethodName, + MethodType.methodType(lambdaType), + MethodType.methodType(postEraseLambdaReturnType, postEraseLambdaParameters), + targetMethodHandle, + MethodType.methodType(preEraseLambdaReturnType, preEraseLambdaParameters)) + .getTarget() + .invoke()); + } catch (Throwable e) { + throw new IllegalArgumentException("Failed to generate method handle.", e); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchema.java new file mode 100644 index 000000000000..1f32b9177403 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchema.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * An implementation of {@link TableSchema} that can be instantiated as an uninitialized reference and then lazily + * initialized later with a concrete {@link TableSchema} at which point it will behave as the real object. + *

    + * This allows an immutable {@link TableSchema} to be declared and used in a self-referential recursive way within its + * builder/definition path. Any attempt to use the {@link MetaTableSchema} as a concrete {@link TableSchema} before + * calling {@link #initialize(TableSchema)} will cause an exception to be thrown. + */ +@SdkInternalApi +public class MetaTableSchema implements TableSchema { + private TableSchema concreteTableSchema; + + private MetaTableSchema() { + } + + public static MetaTableSchema create(Class itemClass) { + return new MetaTableSchema<>(); + } + + @Override + public T mapToItem(Map attributeMap) { + return concreteTableSchema().mapToItem(attributeMap); + } + + @Override + public Map itemToMap(T item, boolean ignoreNulls) { + return concreteTableSchema().itemToMap(item, ignoreNulls); + } + + @Override + public Map itemToMap(T item, Collection attributes) { + return concreteTableSchema().itemToMap(item, attributes); + } + + @Override + public AttributeValue attributeValue(T item, String attributeName) { + return concreteTableSchema().attributeValue(item, attributeName); + } + + @Override + public TableMetadata tableMetadata() { + return concreteTableSchema().tableMetadata(); + } + + @Override + public EnhancedType itemType() { + return concreteTableSchema().itemType(); + } + + @Override + public List attributeNames() { + return concreteTableSchema().attributeNames(); + } + + @Override + public boolean isAbstract() { + return concreteTableSchema().isAbstract(); + } + + public void initialize(TableSchema realTableSchema) { + if (this.concreteTableSchema != null) { + throw new IllegalStateException("A MetaTableSchema can only be initialized with a concrete TableSchema " + + "instance once."); + } + + this.concreteTableSchema = realTableSchema; + } + + public TableSchema concreteTableSchema() { + if (this.concreteTableSchema == null) { + throw new IllegalStateException("A MetaTableSchema must be initialized with a concrete TableSchema " + + "instance by calling 'initialize' before it can be used as a " + + "TableSchema itself"); + } + + return this.concreteTableSchema; + } + + public boolean isInitialized() { + return this.concreteTableSchema != null; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchemaCache.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchemaCache.java new file mode 100644 index 000000000000..a02236c787ac --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchemaCache.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * A cache that can store lazily initialized MetaTableSchema objects used by the TableSchema creation classes to + * facilitate self-referencing recursive builds. + */ +@SdkInternalApi +@SuppressWarnings("unchecked") +public class MetaTableSchemaCache { + private final Map, MetaTableSchema> cacheMap = new HashMap<>(); + + public MetaTableSchema getOrCreate(Class mappedClass) { + return (MetaTableSchema) cacheMap().computeIfAbsent( + mappedClass, ignored -> MetaTableSchema.create(mappedClass)); + } + + public Optional> get(Class mappedClass) { + return Optional.ofNullable((MetaTableSchema) cacheMap().get(mappedClass)); + } + + private Map, MetaTableSchema> cacheMap() { + return this.cacheMap; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ObjectConstructor.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ObjectConstructor.java new file mode 100644 index 000000000000..0c6cab50a1f7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ObjectConstructor.java @@ -0,0 +1,39 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.lang.reflect.Constructor; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +@FunctionalInterface +@SdkInternalApi +@SuppressWarnings("unchecked") +public interface ObjectConstructor extends Supplier { + static ObjectConstructor create(Class beanClass, Constructor noArgsConstructor) { + Validate.isTrue(noArgsConstructor.getParameterCount() == 0, + "%s has no default constructor.", + beanClass); + + return LambdaToMethodBridgeBuilder.create(ObjectConstructor.class) + .lambdaMethodName("get") + .runtimeLambdaSignature(Object.class) + .compileTimeLambdaSignature(beanClass) + .targetMethod(noArgsConstructor) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ObjectGetterMethod.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ObjectGetterMethod.java new file mode 100644 index 000000000000..d4e60a0b12c7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ObjectGetterMethod.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.lang.reflect.Method; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@FunctionalInterface +@SdkInternalApi +@SuppressWarnings("unchecked") +public interface ObjectGetterMethod extends Function { + static ObjectGetterMethod create(Class beanClass, Method buildMethod) { + return LambdaToMethodBridgeBuilder.create(ObjectGetterMethod.class) + .lambdaMethodName("apply") + .runtimeLambdaSignature(Object.class, Object.class) + .compileTimeLambdaSignature(buildMethod.getReturnType(), beanClass) + .targetMethod(buildMethod) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ResolvedImmutableAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ResolvedImmutableAttribute.java new file mode 100644 index 000000000000..7ec6a03a1a19 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/ResolvedImmutableAttribute.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.nullAttributeValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.isNullAttributeValue; + +import java.util.function.BiConsumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.mapper.ImmutableAttribute; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableMetadata; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@SdkInternalApi +public final class ResolvedImmutableAttribute { + private final String attributeName; + private final Function getAttributeMethod; + private final BiConsumer updateBuilderMethod; + private final StaticTableMetadata tableMetadata; + + private ResolvedImmutableAttribute(String attributeName, + Function getAttributeMethod, + BiConsumer updateBuilderMethod, + StaticTableMetadata tableMetadata) { + this.attributeName = attributeName; + this.getAttributeMethod = getAttributeMethod; + this.updateBuilderMethod = updateBuilderMethod; + this.tableMetadata = tableMetadata; + } + + public static ResolvedImmutableAttribute create(ImmutableAttribute immutableAttribute, + AttributeType attributeType) { + Function getAttributeValueWithTransform = item -> { + R value = immutableAttribute.getter().apply(item); + return value == null ? nullAttributeValue() : attributeType.objectToAttributeValue(value); + }; + + // When setting a value on the java object, do not explicitly set nulls as this can cause an NPE to be thrown + // if the target attribute type is a primitive. + BiConsumer updateBuilderWithTransform = + (builder, attributeValue) -> { + // If the attributeValue is null, do not attempt to marshal + if (isNullAttributeValue(attributeValue)) { + return; + } + + R value = attributeType.attributeValueToObject(attributeValue); + + if (value != null) { + immutableAttribute.setter().accept(builder, value); + } + }; + + StaticTableMetadata.Builder tableMetadataBuilder = StaticTableMetadata.builder(); + immutableAttribute.tags().forEach( + tag -> tag.modifyMetadata(immutableAttribute.name(), attributeType.attributeValueType()) + .accept(tableMetadataBuilder)); + + return new ResolvedImmutableAttribute<>(immutableAttribute.name(), + getAttributeValueWithTransform, + updateBuilderWithTransform, + tableMetadataBuilder.build()); + } + + public ResolvedImmutableAttribute transform( + Function transformItem, + Function transformBuilder) { + + return new ResolvedImmutableAttribute<>( + attributeName, + item -> { + T otherItem = transformItem.apply(item); + + // If the containing object is null don't attempt to read attributes from it + return otherItem == null ? + nullAttributeValue() : getAttributeMethod.apply(otherItem); + }, + (item, value) -> updateBuilderMethod.accept(transformBuilder.apply(item), value), + tableMetadata); + } + + public String attributeName() { + return attributeName; + } + + public Function attributeGetterMethod() { + return getAttributeMethod; + } + + public BiConsumer updateItemMethod() { + return updateBuilderMethod; + } + + public StaticTableMetadata tableMetadata() { + return tableMetadata; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticAttributeType.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticAttributeType.java new file mode 100644 index 000000000000..c3b7ae5be3bb --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticAttributeType.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@SdkInternalApi +public final class StaticAttributeType implements AttributeType { + private final AttributeConverter attributeConverter; + private final AttributeValueType attributeValueType; + + private StaticAttributeType(AttributeConverter attributeConverter) { + this.attributeConverter = attributeConverter; + this.attributeValueType = attributeConverter.attributeValueType(); + } + + public static AttributeType create( + AttributeConverter attributeConverter) { + + return new StaticAttributeType<>(attributeConverter); + } + + public AttributeValue objectToAttributeValue(T object) { + return this.attributeConverter.transformFrom(object); + } + + public T attributeValueToObject(AttributeValue attributeValue) { + return this.attributeConverter.transformTo(attributeValue); + } + + public AttributeValueType attributeValueType() { + return attributeValueType; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticGetterMethod.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticGetterMethod.java new file mode 100644 index 000000000000..37965942e09f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticGetterMethod.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.lang.reflect.Method; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@FunctionalInterface +@SdkInternalApi +@SuppressWarnings("unchecked") +public interface StaticGetterMethod extends Supplier { + static StaticGetterMethod create(Method buildMethod) { + return LambdaToMethodBridgeBuilder.create(StaticGetterMethod.class) + .lambdaMethodName("get") + .runtimeLambdaSignature(Object.class) + .compileTimeLambdaSignature(buildMethod.getReturnType()) + .targetMethod(buildMethod) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticIndexMetadata.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticIndexMetadata.java new file mode 100644 index 000000000000..fe54ab78b2c7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticIndexMetadata.java @@ -0,0 +1,115 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.IndexMetadata; +import software.amazon.awssdk.enhanced.dynamodb.KeyAttributeMetadata; + +@SdkInternalApi +public class StaticIndexMetadata implements IndexMetadata { + private final String name; + private final KeyAttributeMetadata partitionKey; + private final KeyAttributeMetadata sortKey; + + private StaticIndexMetadata(Builder b) { + this.name = b.name; + this.partitionKey = b.partitionKey; + this.sortKey = b.sortKey; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builderFrom(IndexMetadata index) { + return index == null ? builder() : builder().name(index.name()) + .partitionKey(index.partitionKey().orElse(null)) + .sortKey(index.sortKey().orElse(null)); + } + + @Override + public String name() { + return this.name; + } + + @Override + public Optional partitionKey() { + return Optional.ofNullable(this.partitionKey); + } + + @Override + public Optional sortKey() { + return Optional.ofNullable(this.sortKey); + } + + public static class Builder { + private String name; + private KeyAttributeMetadata partitionKey; + private KeyAttributeMetadata sortKey; + + private Builder() { + } + + public Builder name(String name) { + this.name = name; + return this; + } + + public Builder partitionKey(KeyAttributeMetadata partitionKey) { + this.partitionKey = partitionKey; + return this; + } + + public Builder sortKey(KeyAttributeMetadata sortKey) { + this.sortKey = sortKey; + return this; + } + + public StaticIndexMetadata build() { + return new StaticIndexMetadata(this); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + StaticIndexMetadata that = (StaticIndexMetadata) o; + + if (name != null ? !name.equals(that.name) : that.name != null) { + return false; + } + if (partitionKey != null ? !partitionKey.equals(that.partitionKey) : that.partitionKey != null) { + return false; + } + return sortKey != null ? sortKey.equals(that.sortKey) : that.sortKey == null; + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + (partitionKey != null ? partitionKey.hashCode() : 0); + result = 31 * result + (sortKey != null ? sortKey.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticKeyAttributeMetadata.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticKeyAttributeMetadata.java new file mode 100644 index 000000000000..05af635cbce0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/StaticKeyAttributeMetadata.java @@ -0,0 +1,69 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.KeyAttributeMetadata; + +@SdkInternalApi +public class StaticKeyAttributeMetadata implements KeyAttributeMetadata { + private final String name; + private final AttributeValueType attributeValueType; + + private StaticKeyAttributeMetadata(String name, AttributeValueType attributeValueType) { + this.name = name; + this.attributeValueType = attributeValueType; + } + + public static StaticKeyAttributeMetadata create(String name, AttributeValueType attributeValueType) { + return new StaticKeyAttributeMetadata(name, attributeValueType); + } + + @Override + public String name() { + return this.name; + } + + @Override + public AttributeValueType attributeValueType() { + return this.attributeValueType; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + StaticKeyAttributeMetadata staticKey = (StaticKeyAttributeMetadata) o; + + if (name != null ? !name.equals(staticKey.name) : staticKey.name != null) { + return false; + } + return attributeValueType == staticKey.attributeValueType; + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + (attributeValueType != null ? attributeValueType.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/UpdateBehaviorTag.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/UpdateBehaviorTag.java new file mode 100644 index 000000000000..4b948a154d5c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/UpdateBehaviorTag.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTag; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.mapper.UpdateBehavior; + +@SdkInternalApi +public class UpdateBehaviorTag implements StaticAttributeTag { + private static final String CUSTOM_METADATA_KEY_PREFIX = "UpdateBehavior:"; + private static final UpdateBehavior DEFAULT_UPDATE_BEHAVIOR = UpdateBehavior.WRITE_ALWAYS; + private static final UpdateBehaviorTag WRITE_ALWAYS_TAG = new UpdateBehaviorTag(UpdateBehavior.WRITE_ALWAYS); + private static final UpdateBehaviorTag WRITE_IF_NOT_EXISTS_TAG = + new UpdateBehaviorTag(UpdateBehavior.WRITE_IF_NOT_EXISTS); + + private final UpdateBehavior updateBehavior; + + private UpdateBehaviorTag(UpdateBehavior updateBehavior) { + this.updateBehavior = updateBehavior; + } + + public static UpdateBehaviorTag fromUpdateBehavior(UpdateBehavior updateBehavior) { + switch (updateBehavior) { + case WRITE_ALWAYS: + return WRITE_ALWAYS_TAG; + case WRITE_IF_NOT_EXISTS: + return WRITE_IF_NOT_EXISTS_TAG; + default: + throw new IllegalArgumentException("Update behavior '" + updateBehavior + "' not supported"); + } + } + + public static UpdateBehavior resolveForAttribute(String attributeName, TableMetadata tableMetadata) { + String metadataKey = CUSTOM_METADATA_KEY_PREFIX + attributeName; + return tableMetadata.customMetadataObject(metadataKey, UpdateBehavior.class).orElse(DEFAULT_UPDATE_BEHAVIOR); + } + + @Override + public Consumer modifyMetadata(String attributeName, + AttributeValueType attributeValueType) { + return metadata -> + metadata.addCustomMetadataObject(CUSTOM_METADATA_KEY_PREFIX + attributeName, this.updateBehavior); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchGetItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchGetItemOperation.java new file mode 100644 index 000000000000..620e529ffd64 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchGetItemOperation.java @@ -0,0 +1,124 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPage; +import software.amazon.awssdk.enhanced.dynamodb.model.ReadBatch; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; +import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; +import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; + +@SdkInternalApi +public class BatchGetItemOperation + implements PaginatedDatabaseOperation { + + private final BatchGetItemEnhancedRequest request; + + private BatchGetItemOperation(BatchGetItemEnhancedRequest request) { + this.request = request; + } + + public static BatchGetItemOperation create(BatchGetItemEnhancedRequest request) { + return new BatchGetItemOperation(request); + } + + @Override + public BatchGetItemRequest generateRequest(DynamoDbEnhancedClientExtension extension) { + Map requestItems = new HashMap<>(); + request.readBatches().forEach(readBatch -> addReadRequestsToMap(readBatch, requestItems)); + + return BatchGetItemRequest.builder() + .requestItems(Collections.unmodifiableMap(requestItems)) + .build(); + } + + @Override + public BatchGetResultPage transformResponse(BatchGetItemResponse response, + DynamoDbEnhancedClientExtension extension) { + return BatchGetResultPage.builder().batchGetItemResponse(response).mapperExtension(extension).build(); + } + + @Override + public Function> serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::batchGetItemPaginator; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::batchGetItemPaginator; + } + + private void addReadRequestsToMap(ReadBatch readBatch, Map readRequestMap) { + + KeysAndAttributes newKeysAndAttributes = readBatch.keysAndAttributes(); + KeysAndAttributes existingKeysAndAttributes = readRequestMap.get(readBatch.tableName()); + + if (existingKeysAndAttributes == null) { + readRequestMap.put(readBatch.tableName(), newKeysAndAttributes); + return; + } + + KeysAndAttributes mergedKeysAndAttributes = + mergeKeysAndAttributes(existingKeysAndAttributes, newKeysAndAttributes); + readRequestMap.put(readBatch.tableName(), mergedKeysAndAttributes); + } + + private static KeysAndAttributes mergeKeysAndAttributes(KeysAndAttributes first, KeysAndAttributes second) { + if (!compareNullableBooleans(first.consistentRead(), second.consistentRead())) { + throw new IllegalArgumentException("All batchable read requests for the same table must have the " + + "same 'consistentRead' setting."); + } + + Boolean consistentRead = first.consistentRead() == null ? second.consistentRead() : first.consistentRead(); + List> keys = + Stream.concat(first.keys().stream(), second.keys().stream()).collect(Collectors.toList()); + + return KeysAndAttributes.builder() + .keys(keys) + .consistentRead(consistentRead) + .build(); + } + + private static boolean compareNullableBooleans(Boolean one, Boolean two) { + if (one == null && two == null) { + return true; + } + + if (one != null) { + return one.equals(two); + } else { + return false; + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchWriteItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchWriteItemOperation.java new file mode 100644 index 000000000000..6b8e0b4ed12a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchWriteItemOperation.java @@ -0,0 +1,86 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteResult; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemRequest; +import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemResponse; +import software.amazon.awssdk.services.dynamodb.model.WriteRequest; +import software.amazon.awssdk.utils.CollectionUtils; + +@SdkInternalApi +public class BatchWriteItemOperation + implements DatabaseOperation { + + private final BatchWriteItemEnhancedRequest request; + + private BatchWriteItemOperation(BatchWriteItemEnhancedRequest request) { + this.request = request; + } + + public static BatchWriteItemOperation create(BatchWriteItemEnhancedRequest request) { + return new BatchWriteItemOperation(request); + } + + @Override + public BatchWriteItemRequest generateRequest(DynamoDbEnhancedClientExtension extension) { + Map> allRequestItems = new HashMap<>(); + + request.writeBatches().forEach(writeBatch -> { + Collection writeRequestsForTable = allRequestItems.computeIfAbsent( + writeBatch.tableName(), + ignored -> new ArrayList<>()); + writeRequestsForTable.addAll(writeBatch.writeRequests()); + }); + + return BatchWriteItemRequest.builder() + .requestItems( + Collections.unmodifiableMap(CollectionUtils.deepCopyMap(allRequestItems))) + .build(); + } + + @Override + public BatchWriteResult transformResponse(BatchWriteItemResponse response, + DynamoDbEnhancedClientExtension extension) { + return BatchWriteResult.builder().unprocessedRequests(response.unprocessedItems()).build(); + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::batchWriteItem; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::batchWriteItem; + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableReadOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableReadOperation.java new file mode 100644 index 000000000000..f523157f4404 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableReadOperation.java @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Key; + +@SdkInternalApi +public interface BatchableReadOperation { + Boolean consistentRead(); + + Key key(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableWriteOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableWriteOperation.java new file mode 100644 index 000000000000..e57520d11645 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableWriteOperation.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.WriteRequest; + +@SdkInternalApi +public interface BatchableWriteOperation { + WriteRequest generateWriteRequest(TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperation.java new file mode 100644 index 000000000000..df24b62a392e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperation.java @@ -0,0 +1,143 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + + +/** + * Common interface for a single operation that can be executed in a synchronous or non-blocking asynchronous fashion + * against a mapped database table. These operations can be made against either the primary index of a table or a + * secondary index, although some implementations of this interface do not support secondary indices and will throw + * an exception when executed against one. Conceptually an operation maps 1:1 with an actual DynamoDb call. + *

    + * This interface is extended by {@link TableOperation} and {@link IndexOperation} which contain implementations of + * the behavior to actually execute the operation in the context of a table or secondary index and are used by + * {@link DynamoDbTable} or {@link DynamoDbAsyncTable} and {@link DynamoDbIndex} or {@link DynamoDbAsyncIndex} + * respectively. By sharing this common interface operations are able to re-use code regardless of whether they are + * executed in the context of a primary or secondary index or whether they are being executed in a synchronous or + * non-blocking asynchronous fashion. + * + * @param The modelled object that this table maps records to. + * @param The type of the request object for the DynamoDb call in the low level {@link DynamoDbClient} or + * {@link DynamoDbAsyncClient}. + * @param The type of the response object for the DynamoDb call in the low level {@link DynamoDbClient} + * or {@link DynamoDbAsyncClient}. + * @param The type of the mapped result object that will be returned by the execution of this operation. + */ +@SdkInternalApi +public interface CommonOperation { + /** + * This method generates the request that needs to be sent to a low level {@link DynamoDbClient}. + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param context An object containing the context, or target, of the command execution. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request of this operation. A null + * value here will result in no modifications. + * @return A request that can be used as an argument to a {@link DynamoDbClient} call to perform the operation. + */ + RequestT generateRequest(TableSchema tableSchema, OperationContext context, + DynamoDbEnhancedClientExtension extension); + + /** + * Provides a function for making the low level synchronous SDK call to DynamoDb. + * @param dynamoDbClient A low level {@link DynamoDbClient} to make the call against. + * @return A function that calls DynamoDb with a provided request object and returns the response object. + */ + Function serviceCall(DynamoDbClient dynamoDbClient); + + /** + * Provides a function for making the low level non-blocking asynchronous SDK call to DynamoDb. + * @param dynamoDbAsyncClient A low level {@link DynamoDbAsyncClient} to make the call against. + * @return A function that calls DynamoDb with a provided request object and returns the response object. + */ + Function> asyncServiceCall(DynamoDbAsyncClient dynamoDbAsyncClient); + + /** + * Takes the response object returned by the actual DynamoDb call and maps it into a higher level abstracted + * result object. + * @param response The response object returned by the DynamoDb call for this operation. + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param context An object containing the context, or target, of the command execution. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the result of this operation. A null + * value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + ResultT transformResponse(ResponseT response, + TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension); + + /** + * Default implementation of a complete synchronous execution of this operation against either the primary or a + * secondary index. + * It performs three steps: + * 1) Call generateRequest() to get the request object. + * 2) Call getServiceCall() and call it using the request object generated in the previous step. + * 3) Call transformResponse() to convert the response object returned in the previous step to a high level result. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param context An object containing the context, or target, of the command execution. + * @param dynamoDbClient A {@link DynamoDbClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default ResultT execute(TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension, + DynamoDbClient dynamoDbClient) { + RequestT request = generateRequest(tableSchema, context, extension); + ResponseT response = serviceCall(dynamoDbClient).apply(request); + return transformResponse(response, tableSchema, context, extension); + } + + /** + * Default implementation of a complete non-blocking asynchronous execution of this operation against either the + * primary or a secondary index. + * It performs three steps: + * 1) Call generateRequest() to get the request object. + * 2) Call getServiceCall() and call it using the request object generated in the previous step. + * 3) Wraps the {@link CompletableFuture} returned by the SDK in a new one that calls transformResponse() to + * convert the response object returned in the previous step to a high level result. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param context An object containing the context, or target, of the command execution. + * @param dynamoDbAsyncClient A {@link DynamoDbAsyncClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A {@link CompletableFuture} of the high level result object as specified by the implementation of this + * operation. + */ + default CompletableFuture executeAsync(TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension, + DynamoDbAsyncClient dynamoDbAsyncClient) { + RequestT request = generateRequest(tableSchema, context, extension); + CompletableFuture response = asyncServiceCall(dynamoDbAsyncClient).apply(request); + return response.thenApply(r -> transformResponse(r, tableSchema, context, extension)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java new file mode 100644 index 000000000000..36b7b7e2117b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java @@ -0,0 +1,184 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; +import software.amazon.awssdk.services.dynamodb.model.BillingMode; +import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; +import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse; +import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; +import software.amazon.awssdk.services.dynamodb.model.KeyType; + +@SdkInternalApi +public class CreateTableOperation implements TableOperation { + + private final CreateTableEnhancedRequest request; + + private CreateTableOperation(CreateTableEnhancedRequest request) { + this.request = request; + } + + public static CreateTableOperation create(CreateTableEnhancedRequest request) { + return new CreateTableOperation<>(request); + } + + @Override + public CreateTableRequest generateRequest(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + if (!TableMetadata.primaryIndexName().equals(operationContext.indexName())) { + throw new IllegalArgumentException("PutItem cannot be executed against a secondary index."); + } + + String primaryPartitionKey = tableSchema.tableMetadata().primaryPartitionKey(); + Optional primarySortKey = tableSchema.tableMetadata().primarySortKey(); + Set dedupedIndexKeys = new HashSet<>(); + dedupedIndexKeys.add(primaryPartitionKey); + primarySortKey.ifPresent(dedupedIndexKeys::add); + List sdkGlobalSecondaryIndices = null; + List sdkLocalSecondaryIndices = null; + + if (this.request.globalSecondaryIndices() != null) { + sdkGlobalSecondaryIndices = + this.request.globalSecondaryIndices().stream().map(gsi -> { + String indexPartitionKey = tableSchema.tableMetadata().indexPartitionKey(gsi.indexName()); + Optional indexSortKey = tableSchema.tableMetadata().indexSortKey(gsi.indexName()); + dedupedIndexKeys.add(indexPartitionKey); + indexSortKey.ifPresent(dedupedIndexKeys::add); + + return software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex + .builder() + .indexName(gsi.indexName()) + .keySchema(generateKeySchema(indexPartitionKey, indexSortKey.orElse(null))) + .projection(gsi.projection()) + .provisionedThroughput(gsi.provisionedThroughput()) + .build(); + }).collect(Collectors.toList()); + } + + if (this.request.localSecondaryIndices() != null) { + sdkLocalSecondaryIndices = + this.request.localSecondaryIndices().stream().map(lsi -> { + Optional indexSortKey = tableSchema.tableMetadata().indexSortKey(lsi.indexName()); + indexSortKey.ifPresent(dedupedIndexKeys::add); + + if (!primaryPartitionKey.equals( + tableSchema.tableMetadata().indexPartitionKey(lsi.indexName()))) { + throw new IllegalArgumentException("Attempt to create a local secondary index with a partition " + + "key that is not the primary partition key. Index name: " + + lsi.indexName()); + } + + return software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex + .builder() + .indexName(lsi.indexName()) + .keySchema(generateKeySchema(primaryPartitionKey, indexSortKey.orElse(null))) + .projection(lsi.projection()) + .build(); + }).collect(Collectors.toList()); + } + + List attributeDefinitions = + dedupedIndexKeys.stream() + .map(attribute -> + AttributeDefinition.builder() + .attributeName(attribute) + .attributeType(tableSchema + .tableMetadata().scalarAttributeType(attribute) + .orElseThrow(() -> + new IllegalArgumentException( + "Could not map the key attribute '" + attribute + + "' to a valid scalar type."))) + .build()) + .collect(Collectors.toList()); + + BillingMode billingMode = this.request.provisionedThroughput() == null ? + BillingMode.PAY_PER_REQUEST : + BillingMode.PROVISIONED; + + return CreateTableRequest.builder() + .tableName(operationContext.tableName()) + .keySchema(generateKeySchema(primaryPartitionKey, primarySortKey.orElse(null))) + .globalSecondaryIndexes(sdkGlobalSecondaryIndices) + .localSecondaryIndexes(sdkLocalSecondaryIndices) + .attributeDefinitions(attributeDefinitions) + .billingMode(billingMode) + .provisionedThroughput(this.request.provisionedThroughput()) + .build(); + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::createTable; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::createTable; + } + + @Override + public Void transformResponse(CreateTableResponse response, + TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + // This operation does not return results + return null; + } + + private static Collection generateKeySchema(String partitionKey, String sortKey) { + if (sortKey == null) { + return generateKeySchema(partitionKey); + } + + return Collections.unmodifiableList(Arrays.asList(KeySchemaElement.builder() + .attributeName(partitionKey) + .keyType(KeyType.HASH) + .build(), + KeySchemaElement.builder() + .attributeName(sortKey) + .keyType(KeyType.RANGE) + .build())); + } + + private static Collection generateKeySchema(String partitionKey) { + return Collections.singletonList(KeySchemaElement.builder() + .attributeName(partitionKey) + .keyType(KeyType.HASH) + .build()); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DatabaseOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DatabaseOperation.java new file mode 100644 index 000000000000..be6616efe577 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DatabaseOperation.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Interface for a single operation that can be executed against a mapped database. These operations do not operate + * on a specific table or index, and may reference multiple tables and indexes (eg: batch operations). Conceptually an + * operation maps 1:1 with an actual DynamoDb call. + * + * @param The type of the request object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the response object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the mapped result object that will be returned by the execution of this operation. + */ +@SdkInternalApi +public interface DatabaseOperation { + /** + * This method generates the request that needs to be sent to a low level {@link DynamoDbClient}. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request of this operation. A null + * value here will result in no modifications. + * @return A request that can be used as an argument to a {@link DynamoDbClient} call to perform the operation. + */ + RequestT generateRequest(DynamoDbEnhancedClientExtension extension); + + /** + * Provides a function for making the low level synchronous SDK call to DynamoDb. + * @param dynamoDbClient A low level {@link DynamoDbClient} to make the call against. + * @return A function that calls DynamoDb with a provided request object and returns the response object. + */ + Function serviceCall(DynamoDbClient dynamoDbClient); + + /** + * Provides a function for making the low level non-blocking asynchronous SDK call to DynamoDb. + * @param dynamoDbAsyncClient A low level {@link DynamoDbAsyncClient} to make the call against. + * @return A function that calls DynamoDb with a provided request object and returns a {@link CompletableFuture} + * for the response object. + */ + Function> asyncServiceCall(DynamoDbAsyncClient dynamoDbAsyncClient); + + /** + * Takes the response object returned by the actual DynamoDb call and maps it into a higher level abstracted + * result object. + * @param response The response object returned by the DynamoDb call for this operation. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the result of this operation. A null + * value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + ResultT transformResponse(ResponseT response, DynamoDbEnhancedClientExtension extension); + + /** + * Default implementation of a complete synchronous execution of this operation. It performs three steps: + * 1) Call generateRequest() to get the request object. + * 2) Call getServiceCall() and call it using the request object generated in the previous step. + * 3) Call transformResponse() to convert the response object returned in the previous step to a high level result. + * + * @param dynamoDbClient A {@link DynamoDbClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default ResultT execute(DynamoDbClient dynamoDbClient, DynamoDbEnhancedClientExtension extension) { + RequestT request = generateRequest(extension); + ResponseT response = serviceCall(dynamoDbClient).apply(request); + return transformResponse(response, extension); + } + + /** + * Default implementation of a complete non-blocking asynchronous execution of this operation. It performs three + * steps: + * 1) Call generateRequest() to get the request object. + * 2) Call getServiceCall() and call it using the request object generated in the previous step. + * 3) Wraps the {@link CompletableFuture} returned by the SDK in a new one that calls transformResponse() to + * convert the response object returned in the previous step to a high level result. + * + * @param dynamoDbAsyncClient A {@link DynamoDbAsyncClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default CompletableFuture executeAsync(DynamoDbAsyncClient dynamoDbAsyncClient, + DynamoDbEnhancedClientExtension extension) { + + RequestT request = generateRequest(extension); + CompletableFuture response = asyncServiceCall(dynamoDbAsyncClient).apply(request); + return response.thenApply(r -> transformResponse(r, extension)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContext.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContext.java new file mode 100644 index 000000000000..254616f21f70 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContext.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; + +@SdkInternalApi +public class DefaultOperationContext implements OperationContext { + private final String tableName; + private final String indexName; + + private DefaultOperationContext(String tableName, String indexName) { + this.tableName = tableName; + this.indexName = indexName; + } + + public static DefaultOperationContext create(String tableName, String indexName) { + return new DefaultOperationContext(tableName, indexName); + } + + public static DefaultOperationContext create(String tableName) { + return new DefaultOperationContext(tableName, TableMetadata.primaryIndexName()); + } + + @Override + public String tableName() { + return tableName; + } + + @Override + public String indexName() { + return indexName; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultOperationContext that = (DefaultOperationContext) o; + + if (tableName != null ? ! tableName.equals(that.tableName) : that.tableName != null) { + return false; + } + return indexName != null ? indexName.equals(that.indexName) : that.indexName == null; + } + + @Override + public int hashCode() { + int result = tableName != null ? tableName.hashCode() : 0; + result = 31 * result + (indexName != null ? indexName.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperation.java new file mode 100644 index 000000000000..1fd5726b4284 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperation.java @@ -0,0 +1,143 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.Delete; +import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; +import software.amazon.awssdk.services.dynamodb.model.DeleteItemResponse; +import software.amazon.awssdk.services.dynamodb.model.DeleteRequest; +import software.amazon.awssdk.services.dynamodb.model.ReturnValue; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; +import software.amazon.awssdk.services.dynamodb.model.WriteRequest; + +@SdkInternalApi +public class DeleteItemOperation + implements TableOperation, + TransactableWriteOperation, + BatchableWriteOperation { + + private final DeleteItemEnhancedRequest request; + + private DeleteItemOperation(DeleteItemEnhancedRequest request) { + this.request = request; + } + + public static DeleteItemOperation create(DeleteItemEnhancedRequest request) { + return new DeleteItemOperation<>(request); + } + + @Override + public DeleteItemRequest generateRequest(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + + if (!TableMetadata.primaryIndexName().equals(operationContext.indexName())) { + throw new IllegalArgumentException("DeleteItem cannot be executed against a secondary index."); + } + + DeleteItemRequest.Builder requestBuilder = + DeleteItemRequest.builder() + .tableName(operationContext.tableName()) + .key(this.request.key().keyMap(tableSchema, operationContext.indexName())) + .returnValues(ReturnValue.ALL_OLD); + + requestBuilder = addExpressionsIfExist(requestBuilder); + + return requestBuilder.build(); + } + + @Override + public T transformResponse(DeleteItemResponse response, + TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + return EnhancedClientUtils.readAndTransformSingleItem(response.attributes(), tableSchema, operationContext, extension); + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::deleteItem; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::deleteItem; + } + + @Override + public WriteRequest generateWriteRequest(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + DeleteItemRequest deleteItemRequest = generateRequest(tableSchema, operationContext, extension); + + return WriteRequest.builder() + .deleteRequest(DeleteRequest.builder().key(deleteItemRequest.key()).build()) + .build(); + } + + @Override + public TransactWriteItem generateTransactWriteItem(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + DeleteItemRequest deleteItemRequest = generateRequest(tableSchema, operationContext, dynamoDbEnhancedClientExtension); + + Delete delete = Delete.builder() + .key(deleteItemRequest.key()) + .tableName(deleteItemRequest.tableName()) + .conditionExpression(deleteItemRequest.conditionExpression()) + .expressionAttributeValues(deleteItemRequest.expressionAttributeValues()) + .expressionAttributeNames(deleteItemRequest.expressionAttributeNames()) + .build(); + + return TransactWriteItem.builder() + .delete(delete) + .build(); + } + + private DeleteItemRequest.Builder addExpressionsIfExist(DeleteItemRequest.Builder requestBuilder) { + if (this.request.conditionExpression() != null) { + requestBuilder = requestBuilder.conditionExpression(this.request.conditionExpression().expression()); + Map expressionNames = this.request.conditionExpression().expressionNames(); + Map expressionValues = this.request.conditionExpression().expressionValues(); + + // Avoiding adding empty collections that the low level SDK will propagate to DynamoDb where it causes error. + if (expressionNames != null && !expressionNames.isEmpty()) { + requestBuilder = requestBuilder.expressionAttributeNames(expressionNames); + } + + if (expressionValues != null && !expressionValues.isEmpty()) { + requestBuilder = requestBuilder.expressionAttributeValues(expressionValues); + } + } + return requestBuilder; + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperation.java new file mode 100644 index 000000000000..e04fb6bba453 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperation.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.Get; +import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; +import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItem; + +@SdkInternalApi +public class GetItemOperation implements TableOperation, + BatchableReadOperation, + TransactableReadOperation { + + private final GetItemEnhancedRequest request; + + private GetItemOperation(GetItemEnhancedRequest request) { + this.request = request; + } + + public static GetItemOperation create(GetItemEnhancedRequest request) { + return new GetItemOperation<>(request); + } + + @Override + public Boolean consistentRead() { + return this.request.consistentRead(); + } + + @Override + public Key key() { + return this.request.key(); + } + + @Override + public GetItemRequest generateRequest(TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension) { + if (!TableMetadata.primaryIndexName().equals(context.indexName())) { + throw new IllegalArgumentException("GetItem cannot be executed against a secondary index."); + } + + return GetItemRequest.builder() + .tableName(context.tableName()) + .key(this.request.key().keyMap(tableSchema, context.indexName())) + .consistentRead(this.request.consistentRead()) + .build(); + } + + @Override + public T transformResponse(GetItemResponse response, + TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension) { + return EnhancedClientUtils.readAndTransformSingleItem(response.item(), tableSchema, context, extension); + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::getItem; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::getItem; + } + + @Override + public TransactGetItem generateTransactGetItem(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + return TransactGetItem.builder() + .get(Get.builder() + .tableName(operationContext.tableName()) + .key(this.request.key().keyMap(tableSchema, operationContext.indexName())) + .build()) + .build(); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperation.java new file mode 100644 index 000000000000..8fa6e0b4eff2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperation.java @@ -0,0 +1,88 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Interface for a single operation that can be executed against a secondary index of a mapped database table. + * Conceptually an operation maps 1:1 with an actual DynamoDb call. + *

    + * A concrete implementation of this interface should also implement {@link TableOperation} with the same types if + * the operation supports being executed against both the primary index and secondary indices. + * + * @param The modelled object that this table maps records to. + * @param The type of the request object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the response object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the mapped result object that will be returned by the execution of this operation. + */ +@SdkInternalApi +public interface IndexOperation + extends CommonOperation { + /** + * Default implementation of a complete synchronous execution of this operation against a secondary index. It will + * construct a context based on the given table name and secondary index name and then call execute() on the + * {@link CommonOperation} interface to perform the operation. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param tableName The physical name of the table that contains the secondary index to execute the operation + * against. + * @param indexName The physical name of the secondary index to execute the operation against. + * @param dynamoDbClient A {@link DynamoDbClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default ResultT executeOnSecondaryIndex(TableSchema tableSchema, + String tableName, + String indexName, + DynamoDbEnhancedClientExtension extension, + DynamoDbClient dynamoDbClient) { + OperationContext context = + DefaultOperationContext.create(tableName, indexName); + return execute(tableSchema, context, extension, dynamoDbClient); + } + + /** + * Default implementation of a complete non-blocking asynchronous execution of this operation against a secondary + * index. It will construct a context based on the given table name and secondary index name and then call + * executeAsync() on the {@link CommonOperation} interface to perform the operation. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param tableName The physical name of the table that contains the secondary index to execute the operation + * against. + * @param indexName The physical name of the secondary index to execute the operation against. + * @param dynamoDbAsyncClient A {@link DynamoDbAsyncClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default CompletableFuture executeOnSecondaryIndexAsync(TableSchema tableSchema, + String tableName, + String indexName, + DynamoDbEnhancedClientExtension extension, + DynamoDbAsyncClient dynamoDbAsyncClient) { + OperationContext context = + DefaultOperationContext.create(tableName, indexName); + return executeAsync(tableSchema, context, extension, dynamoDbAsyncClient); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedDatabaseOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedDatabaseOperation.java new file mode 100644 index 000000000000..36bf8dbd80f3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedDatabaseOperation.java @@ -0,0 +1,113 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.internal.TransformIterable; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Interface for an operation that can be executed against a mapped database and is expected to return a paginated + * list of results. These operations do not operate on a specific table or index, and may reference multiple tables + * and indexes (eg: batch operations). Typically, each page of results that is served will automatically perform an + * additional service call to DynamoDb to retrieve the next set of results. + * + * @param The type of the request object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the response object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the mapped result object that will be returned by the execution of this operation. + */ +@SdkInternalApi +public interface PaginatedDatabaseOperation { + /** + * This method generates the request that needs to be sent to a low level {@link DynamoDbClient}. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request of this operation. A null value + * here will result in no modifications. + * @return A request that can be used as an argument to a {@link DynamoDbClient} call to perform the operation. + */ + RequestT generateRequest(DynamoDbEnhancedClientExtension extension); + + /** + * Provides a function for making the low level synchronous paginated SDK call to DynamoDb. + * @param dynamoDbClient A low level {@link DynamoDbClient} to make the call against. + * @return A function that calls DynamoDb with a provided request object and returns the response object. + */ + Function> serviceCall(DynamoDbClient dynamoDbClient); + + /** + * Provides a function for making the low level non-blocking asynchronous paginated SDK call to DynamoDb. + * @param dynamoDbAsyncClient A low level {@link DynamoDbAsyncClient} to make the call against. + * @return A function that calls DynamoDb with a provided request object and returns the response object. + */ + Function> asyncServiceCall(DynamoDbAsyncClient dynamoDbAsyncClient); + + /** + * Takes the response object returned by the actual DynamoDb call and maps it into a higher level abstracted + * result object. + * @param response The response object returned by the DynamoDb call for this operation. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the result of this operation. A null value + * here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + ResultT transformResponse(ResponseT response, DynamoDbEnhancedClientExtension extension); + + /** + * Default implementation of a complete synchronous execution of this operation against a database. + * It performs three steps: + * 1) Call generateRequest() to get the request object. + * 2) Call getServiceCall() and call it using the request object generated in the previous step. + * 3) Wraps the {@link SdkIterable} that was returned by the previous step with a transformation that turns each + * object returned to a high level result. + * + * @param dynamoDbClient A {@link DynamoDbClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this operation. A + * null value here will result in no modifications. + * @return An {@link SdkIterable} that will iteratively return pages of high level result objects as specified by + * the implementation of this operation. + */ + default SdkIterable execute(DynamoDbClient dynamoDbClient, DynamoDbEnhancedClientExtension extension) { + RequestT request = generateRequest(extension); + SdkIterable response = serviceCall(dynamoDbClient).apply(request); + return TransformIterable.of(response, r -> transformResponse(r, extension)); + } + + /** + * Default implementation of a complete non-blocking asynchronous execution of this operation against a database. + * It performs three steps: + * 1) Call generateRequest() to get the request object. + * 2) Call getServiceCall() and call it using the request object generated in the previous step. + * 3) Wraps the {@link CompletableFuture} returned by the SDK in a new one that calls transformResponse() to + * convert the response object returned in the previous step to a high level result. + * + * @param dynamoDbAsyncClient A {@link DynamoDbAsyncClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this operation. A + * null value here will result in no modifications. + * @return An {@link SdkPublisher} that will publish pages of the high level result object as specified by the + * implementation of this operation. + */ + default SdkPublisher executeAsync(DynamoDbAsyncClient dynamoDbAsyncClient, + DynamoDbEnhancedClientExtension extension) { + + RequestT request = generateRequest(extension); + SdkPublisher response = asyncServiceCall(dynamoDbAsyncClient).apply(request); + return response.map(r -> transformResponse(r, extension)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedIndexOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedIndexOperation.java new file mode 100644 index 000000000000..ac516e9f1fc4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedIndexOperation.java @@ -0,0 +1,88 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Interface for an operation that can be executed against a secondary index of a mapped database table and is + * expected to return a paginated list of results. Typically, each page of results that is served will automatically + * perform an additional service call to DynamoDb to retrieve the next set of results. + *

    + * A concrete implementation of this interface should also implement {@link PaginatedTableOperation} with the same + * types if the operation supports being executed against both the primary index and secondary indices. + * + * @param The modelled object that this table maps records to. + * @param The type of the request object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the response object for the DynamoDb call in the low level {@link DynamoDbClient}. + */ +@SdkInternalApi +public interface PaginatedIndexOperation + extends PaginatedOperation { + /** + * Default implementation of a complete synchronous execution of this operation against a secondary index. It will + * construct a context based on the given table name and secondary index name and then call execute() on the + * {@link PaginatedOperation} interface to perform the operation. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param tableName The physical name of the table that contains the secondary index to execute the operation + * against. + * @param indexName The physical name of the secondary index to execute the operation against. + * @param dynamoDbClient A {@link DynamoDbClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default PageIterable executeOnSecondaryIndex(TableSchema tableSchema, + String tableName, + String indexName, + DynamoDbEnhancedClientExtension extension, + DynamoDbClient dynamoDbClient) { + OperationContext context = DefaultOperationContext.create(tableName, indexName); + return execute(tableSchema, context, extension, dynamoDbClient); + } + + /** + * Default implementation of a complete non-blocking asynchronous execution of this operation against a secondary + * index. It will construct a context based on the given table name and secondary index name and then call + * executeAsync() on the {@link PaginatedOperation} interface to perform the operation. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param tableName The physical name of the table that contains the secondary index to execute the operation + * against. + * @param indexName The physical name of the secondary index to execute the operation against. + * @param dynamoDbAsyncClient A {@link DynamoDbAsyncClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default SdkPublisher> executeOnSecondaryIndexAsync(TableSchema tableSchema, + String tableName, + String indexName, + DynamoDbEnhancedClientExtension extension, + DynamoDbAsyncClient dynamoDbAsyncClient) { + OperationContext context = DefaultOperationContext.create(tableName, indexName); + return executeAsync(tableSchema, context, extension, dynamoDbAsyncClient); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedOperation.java new file mode 100644 index 000000000000..09a64ae9f9f0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedOperation.java @@ -0,0 +1,160 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.TransformIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.PagePublisher; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Common interface for an operation that can be executed in a synchronous or non-blocking asynchronous fashion + * against a mapped database table and is expected to return a paginated list of results. These operations can be made + * against either the primary index of a table or a secondary index, although some implementations of this interface + * do not support secondary indices and will throw an exception when executed against one. Typically, each page of + * results that is served will automatically perform an additional service call to DynamoDb to retrieve the next set + * of results. + *

    + * This interface is extended by {@link PaginatedTableOperation} and {@link PaginatedIndexOperation} which contain + * implementations of the behavior to actually execute the operation in the context of a table or secondary index and + * are used by {@link DynamoDbTable} or {@link DynamoDbAsyncTable} and {@link DynamoDbIndex} or {@link DynamoDbAsyncIndex} + * respectively. By sharing this common interface operations are able to re-use code regardless of whether they are + * executed in the context of a primary or secondary index or whether they are being executed in a synchronous or + * non-blocking asynchronous fashion. + * + * @param The modelled object that this table maps records to. + * @param The type of the request object for the DynamoDb call in the low level {@link DynamoDbClient} or + * {@link DynamoDbAsyncClient}. + * @param The type of the response object for the DynamoDb call in the low level {@link DynamoDbClient} + * or {@link DynamoDbAsyncClient}. + */ +@SdkInternalApi +public interface PaginatedOperation { + /** + * This method generates the request that needs to be sent to a low level {@link DynamoDbClient}. + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param context An object containing the context, or target, of the command execution. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request of this operation. A null + * value here will result in no modifications. + * @return A request that can be used as an argument to a {@link DynamoDbClient} call to perform the operation. + */ + RequestT generateRequest(TableSchema tableSchema, OperationContext context, + DynamoDbEnhancedClientExtension extension); + + /** + * Provides a function for making the low level synchronous SDK call to DynamoDb. + * @param dynamoDbClient A low level {@link DynamoDbClient} to make the call against. + * @return A function that calls a paginated DynamoDb operation with a provided request object and returns the + * response object. + */ + Function> serviceCall(DynamoDbClient dynamoDbClient); + + /** + * Provides a function for making the low level non-blocking asynchronous SDK call to DynamoDb. + * @param dynamoDbAsyncClient A low level {@link DynamoDbAsyncClient} to make the call against. + * @return A function that calls a paginated DynamoDb operation with a provided request object and returns the + * response object. + */ + Function> asyncServiceCall(DynamoDbAsyncClient dynamoDbAsyncClient); + + /** + * Takes the response object returned by the actual DynamoDb call and maps it into a higher level abstracted + * result object. + * @param response The response object returned by the DynamoDb call for this operation. + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param context An object containing the context, or target, of the command execution. + * @param dynamoDbEnhancedClientExtension A {@link DynamoDbEnhancedClientExtension} that may modify the result of + * this operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + Page transformResponse(ResponseT response, + TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension); + + /** + * Default implementation of a complete synchronous execution of this operation against either the primary or a + * secondary index. + *

    + * It performs three steps: + *

      + *
    1. Call {@link #generateRequest} to get the request object.
    2. + *
    3. Call {@link #asyncServiceCall} and call it using the request object generated in the previous step.
    4. + *
    5. Wraps the {@link SdkIterable} that was returned by the previous step with a transformation that turns each + * object returned to a high level result.
    6. + *
    + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param context An object containing the context, or target, of the command execution. + * @param dynamoDbClient A {@link DynamoDbClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default PageIterable execute(TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension, + DynamoDbClient dynamoDbClient) { + RequestT request = generateRequest(tableSchema, context, extension); + SdkIterable response = serviceCall(dynamoDbClient).apply(request); + + SdkIterable> pageIterables = + TransformIterable.of(response, r -> transformResponse(r, tableSchema, context, extension)); + return PageIterable.create(pageIterables); + } + + /** + * Default implementation of a complete non-blocking asynchronous execution of this operation against either the + * primary or a secondary index. + *

    + * It performs three steps: + *

      + *
    1. Call {@link #generateRequest} to get the request object. + *
    2. Call {@link #asyncServiceCall} and call it using the request object generated in the previous step. + *
    3. Wraps the {@link SdkPublisher} returned by the SDK in a new one that calls transformResponse() to + * convert the response objects published to a high level result. + *
    + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param context An object containing the context, or target, of the command execution. + * @param dynamoDbAsyncClient A {@link DynamoDbAsyncClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return An {@link SdkPublisher} that will publish pages of the high level result object as specified by the + * implementation of this operation. + */ + default PagePublisher executeAsync(TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension, + DynamoDbAsyncClient dynamoDbAsyncClient) { + RequestT request = generateRequest(tableSchema, context, extension); + SdkPublisher response = asyncServiceCall(dynamoDbAsyncClient).apply(request); + return PagePublisher.create(response.map(r -> transformResponse(r, tableSchema, context, extension))); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedTableOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedTableOperation.java new file mode 100644 index 000000000000..b8da44df3d3b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PaginatedTableOperation.java @@ -0,0 +1,85 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.PagePublisher; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Interface for an operation that can be executed against a mapped database table and is expected to return a + * paginated list of results. These operations will be executed against the primary index of the table. Typically, + * each page of results that is served will automatically perform an additional service call to DynamoDb to retrieve + * the next set of results. + *

    + * A concrete implementation of this interface should also implement {@link PaginatedIndexOperation} with the same + * types if the operation supports being executed against both the primary index and secondary indices. + * + * @param The modelled object that this table maps records to. + * @param The type of the request object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the response object for the DynamoDb call in the low level {@link DynamoDbClient}. + */ +@SdkInternalApi +public interface PaginatedTableOperation + extends PaginatedOperation { + /** + * Default implementation of a complete synchronous execution of this operation against the primary index. It will + * construct a context based on the given table name and then call execute() on the {@link PaginatedOperation} + * interface to perform the operation. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param tableName The physical name of the table to execute the operation against. + * @param dynamoDbClient A {@link DynamoDbClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this operation. A + * null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default PageIterable executeOnPrimaryIndex(TableSchema tableSchema, + String tableName, + DynamoDbEnhancedClientExtension extension, + DynamoDbClient dynamoDbClient) { + + OperationContext context = DefaultOperationContext.create(tableName, TableMetadata.primaryIndexName()); + return execute(tableSchema, context, extension, dynamoDbClient); + } + + /** + * Default implementation of a complete non-blocking asynchronous execution of this operation against the primary + * index. It will construct a context based on the given table name and then call executeAsync() on the + * {@link PaginatedOperation} interface to perform the operation. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param tableName The physical name of the table to execute the operation against. + * @param dynamoDbAsyncClient A {@link DynamoDbAsyncClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this operation. A + * null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default PagePublisher executeOnPrimaryIndexAsync(TableSchema tableSchema, + String tableName, + DynamoDbEnhancedClientExtension extension, + DynamoDbAsyncClient dynamoDbAsyncClient) { + + OperationContext context = DefaultOperationContext.create(tableName, TableMetadata.primaryIndexName()); + return executeAsync(tableSchema, context, extension, dynamoDbAsyncClient); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperation.java new file mode 100644 index 000000000000..3fa0bbbd5f2f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperation.java @@ -0,0 +1,181 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.Put; +import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; +import software.amazon.awssdk.services.dynamodb.model.PutItemResponse; +import software.amazon.awssdk.services.dynamodb.model.PutRequest; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; +import software.amazon.awssdk.services.dynamodb.model.WriteRequest; + +@SdkInternalApi +public class PutItemOperation + implements BatchableWriteOperation, + TransactableWriteOperation, + TableOperation { + + private final PutItemEnhancedRequest request; + + private PutItemOperation(PutItemEnhancedRequest request) { + this.request = request; + } + + public static PutItemOperation create(PutItemEnhancedRequest request) { + return new PutItemOperation<>(request); + } + + @Override + public PutItemRequest generateRequest(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + + if (!TableMetadata.primaryIndexName().equals(operationContext.indexName())) { + throw new IllegalArgumentException("PutItem cannot be executed against a secondary index."); + } + + TableMetadata tableMetadata = tableSchema.tableMetadata(); + + // Fail fast if required primary partition key does not exist and avoid the call to DynamoDb + tableMetadata.primaryPartitionKey(); + + boolean alwaysIgnoreNulls = true; + Map itemMap = tableSchema.itemToMap(this.request.item(), alwaysIgnoreNulls); + + WriteModification transformation = + extension != null ? extension.beforeWrite( + DefaultDynamoDbExtensionContext.builder() + .items(itemMap) + .operationContext(operationContext) + .tableMetadata(tableMetadata) + .build()) + : null; + + if (transformation != null && transformation.transformedItem() != null) { + itemMap = transformation.transformedItem(); + } + + PutItemRequest.Builder requestBuilder = PutItemRequest.builder() + .tableName(operationContext.tableName()) + .item(itemMap); + + requestBuilder = addExpressionsIfExist(requestBuilder, transformation); + + return requestBuilder.build(); + } + + @Override + public Void transformResponse(PutItemResponse response, + TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + // No results are returned by this operation + return null; + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::putItem; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::putItem; + } + + @Override + public WriteRequest generateWriteRequest(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + + PutItemRequest putItemRequest = generateRequest(tableSchema, operationContext, extension); + + if (putItemRequest.conditionExpression() != null) { + throw new IllegalArgumentException("A mapper extension inserted a conditionExpression in a PutItem " + + "request as part of a BatchWriteItemRequest. This is not supported by " + + "DynamoDb. An extension known to do this is the " + + "VersionedRecordExtension which is loaded by default unless overridden. " + + "To fix this use a table schema that does not " + + "have a versioned attribute in it or do not load the offending extension."); + } + + return WriteRequest.builder().putRequest(PutRequest.builder().item(putItemRequest.item()).build()).build(); + } + + @Override + public TransactWriteItem generateTransactWriteItem(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + PutItemRequest putItemRequest = generateRequest(tableSchema, operationContext, dynamoDbEnhancedClientExtension); + + Put put = Put.builder() + .item(putItemRequest.item()) + .tableName(putItemRequest.tableName()) + .conditionExpression(putItemRequest.conditionExpression()) + .expressionAttributeValues(putItemRequest.expressionAttributeValues()) + .expressionAttributeNames(putItemRequest.expressionAttributeNames()) + .build(); + + return TransactWriteItem.builder() + .put(put) + .build(); + } + + private PutItemRequest.Builder addExpressionsIfExist(PutItemRequest.Builder requestBuilder, + WriteModification transformation) { + Expression mergedConditionExpression; + + if (transformation != null && transformation.additionalConditionalExpression() != null) { + mergedConditionExpression = Expression.join(this.request.conditionExpression(), + transformation.additionalConditionalExpression(), " AND "); + } else { + mergedConditionExpression = this.request.conditionExpression(); + } + + if (mergedConditionExpression != null) { + requestBuilder = requestBuilder.conditionExpression(mergedConditionExpression.expression()); + + // Avoiding adding empty collections that the low level SDK will propagate to DynamoDb where it causes error. + if (mergedConditionExpression.expressionValues() != null && !mergedConditionExpression.expressionValues().isEmpty()) { + requestBuilder = requestBuilder.expressionAttributeValues(mergedConditionExpression.expressionValues()); + + } + + if (mergedConditionExpression.expressionNames() != null && !mergedConditionExpression.expressionNames().isEmpty()) { + requestBuilder = requestBuilder.expressionAttributeNames(mergedConditionExpression.expressionNames()); + } + } + return requestBuilder; + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperation.java new file mode 100644 index 000000000000..01a98fb9bb7e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperation.java @@ -0,0 +1,119 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.Map; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; +import software.amazon.awssdk.enhanced.dynamodb.internal.ProjectionExpressionConvertor; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.QueryRequest; +import software.amazon.awssdk.services.dynamodb.model.QueryResponse; + +@SdkInternalApi +public class QueryOperation implements PaginatedTableOperation, + PaginatedIndexOperation { + + private final QueryEnhancedRequest request; + + private QueryOperation(QueryEnhancedRequest request) { + this.request = request; + } + + public static QueryOperation create(QueryEnhancedRequest request) { + return new QueryOperation<>(request); + } + + @Override + public QueryRequest generateRequest(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + Expression queryExpression = this.request.queryConditional().expression(tableSchema, operationContext.indexName()); + Map expressionValues = queryExpression.expressionValues(); + Map expressionNames = queryExpression.expressionNames(); + + if (this.request.filterExpression() != null) { + expressionValues = Expression.joinValues(expressionValues, this.request.filterExpression().expressionValues()); + expressionNames = Expression.joinNames(expressionNames, this.request.filterExpression().expressionNames()); + } + + ProjectionExpressionConvertor attributeToProject = + ProjectionExpressionConvertor.create(this.request.nestedAttributesToProject()); + Map projectionNameMap = attributeToProject.convertToExpressionMap(); + if (!projectionNameMap.isEmpty()) { + expressionNames = Expression.joinNames(expressionNames, projectionNameMap); + } + String projectionExpression = attributeToProject.convertToProjectionExpression().orElse(null); + + QueryRequest.Builder queryRequest = QueryRequest.builder() + .tableName(operationContext.tableName()) + .keyConditionExpression(queryExpression.expression()) + .expressionAttributeValues(expressionValues) + .expressionAttributeNames(expressionNames) + .scanIndexForward(this.request.scanIndexForward()) + .limit(this.request.limit()) + .exclusiveStartKey(this.request.exclusiveStartKey()) + .consistentRead(this.request.consistentRead()) + .projectionExpression(projectionExpression); + + if (!TableMetadata.primaryIndexName().equals(operationContext.indexName())) { + queryRequest = queryRequest.indexName(operationContext.indexName()); + } + + if (this.request.filterExpression() != null) { + queryRequest = queryRequest.filterExpression(this.request.filterExpression().expression()); + } + + return queryRequest.build(); + } + + @Override + public Function> serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::queryPaginator; + } + + @Override + public Function> asyncServiceCall(DynamoDbAsyncClient dynamoDbAsyncClient) { + return dynamoDbAsyncClient::queryPaginator; + } + + @Override + public Page transformResponse(QueryResponse response, + TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + + return EnhancedClientUtils.readAndTransformPaginatedItems(response, + tableSchema, + context, + dynamoDbEnhancedClientExtension, + QueryResponse::items, + QueryResponse::lastEvaluatedKey); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperation.java new file mode 100644 index 000000000000..9d6243290cc7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperation.java @@ -0,0 +1,117 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.Map; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; +import software.amazon.awssdk.enhanced.dynamodb.internal.ProjectionExpressionConvertor; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.ScanRequest; +import software.amazon.awssdk.services.dynamodb.model.ScanResponse; + +@SdkInternalApi +public class ScanOperation implements PaginatedTableOperation, + PaginatedIndexOperation { + + + private final ScanEnhancedRequest request; + + private ScanOperation(ScanEnhancedRequest request) { + this.request = request; + } + + public static ScanOperation create(ScanEnhancedRequest request) { + return new ScanOperation<>(request); + } + + @Override + public ScanRequest generateRequest(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + Map expressionValues = null; + Map expressionNames = null; + + if (this.request.filterExpression() != null) { + expressionValues = this.request.filterExpression().expressionValues(); + expressionNames = this.request.filterExpression().expressionNames(); + } + + ProjectionExpressionConvertor attributeToProject = + ProjectionExpressionConvertor.create(this.request.nestedAttributesToProject()); + Map projectionNameMap = attributeToProject.convertToExpressionMap(); + if (!projectionNameMap.isEmpty()) { + expressionNames = Expression.joinNames(expressionNames, projectionNameMap); + } + String projectionExpression = attributeToProject.convertToProjectionExpression().orElse(null); + + ScanRequest.Builder scanRequest = ScanRequest.builder() + .tableName(operationContext.tableName()) + .limit(this.request.limit()) + .exclusiveStartKey(this.request.exclusiveStartKey()) + .consistentRead(this.request.consistentRead()) + .expressionAttributeValues(expressionValues) + .expressionAttributeNames(expressionNames) + .projectionExpression(projectionExpression); + + if (!TableMetadata.primaryIndexName().equals(operationContext.indexName())) { + scanRequest = scanRequest.indexName(operationContext.indexName()); + } + + if (this.request.filterExpression() != null) { + scanRequest = scanRequest.filterExpression(this.request.filterExpression().expression()); + } + + return scanRequest.build(); + } + + @Override + public Page transformResponse(ScanResponse response, + TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + + return EnhancedClientUtils.readAndTransformPaginatedItems(response, + tableSchema, + context, + dynamoDbEnhancedClientExtension, + ScanResponse::items, + ScanResponse::lastEvaluatedKey); + } + + @Override + public Function> serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::scanPaginator; + } + + @Override + public Function> asyncServiceCall(DynamoDbAsyncClient dynamoDbAsyncClient) { + return dynamoDbAsyncClient::scanPaginator; + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperation.java new file mode 100644 index 000000000000..f1f98121100c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperation.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Interface for a single operation that can be executed against a mapped database table. These operations will be + * executed against the primary index of the table. Conceptually an operation maps 1:1 with an actual DynamoDb call. + *

    + * A concrete implementation of this interface should also implement {@link IndexOperation} with the same types if + * the operation supports being executed against both the primary index and secondary indices. + * + * @param The modelled object that this table maps records to. + * @param The type of the request object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the response object for the DynamoDb call in the low level {@link DynamoDbClient}. + * @param The type of the mapped result object that will be returned by the execution of this operation. + */ +@SdkInternalApi +public interface TableOperation + extends CommonOperation { + /** + * Default implementation of a complete synchronous execution of this operation against the primary index. It will + * construct a context based on the given table name and then call execute() on the {@link CommonOperation} interface to + * perform the operation. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param tableName The physical name of the table to execute the operation against. + * @param dynamoDbClient A {@link DynamoDbClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A high level result object as specified by the implementation of this operation. + */ + default ResultT executeOnPrimaryIndex(TableSchema tableSchema, + String tableName, + DynamoDbEnhancedClientExtension extension, + DynamoDbClient dynamoDbClient) { + OperationContext context = DefaultOperationContext.create(tableName, TableMetadata.primaryIndexName()); + return execute(tableSchema, context, extension, dynamoDbClient); + } + + /** + * Default implementation of a complete non-blocking asynchronous execution of this operation against the primary + * index. It will construct a context based on the given table name and then call executeAsync() on the + * {@link CommonOperation} interface to perform the operation. + * + * @param tableSchema A {@link TableSchema} that maps the table to a modelled object. + * @param tableName The physical name of the table to execute the operation against. + * @param dynamoDbAsyncClient A {@link DynamoDbAsyncClient} to make the call against. + * @param extension A {@link DynamoDbEnhancedClientExtension} that may modify the request or result of this + * operation. A null value here will result in no modifications. + * @return A {@link CompletableFuture} of the high level result object as specified by the implementation of this + * operation. + */ + default CompletableFuture executeOnPrimaryIndexAsync(TableSchema tableSchema, + String tableName, + DynamoDbEnhancedClientExtension extension, + DynamoDbAsyncClient dynamoDbAsyncClient) { + + OperationContext context = DefaultOperationContext.create(tableName, TableMetadata.primaryIndexName()); + return executeAsync(tableSchema, context, extension, dynamoDbAsyncClient); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactGetItemsOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactGetItemsOperation.java new file mode 100644 index 000000000000..a1d1caf424b7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactGetItemsOperation.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.Document; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.internal.DefaultDocument; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactGetItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItemsRequest; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItemsResponse; + +@SdkInternalApi +public class TransactGetItemsOperation + implements DatabaseOperation> { + + private TransactGetItemsEnhancedRequest request; + + private TransactGetItemsOperation(TransactGetItemsEnhancedRequest request) { + this.request = request; + } + + public static TransactGetItemsOperation create(TransactGetItemsEnhancedRequest request) { + return new TransactGetItemsOperation(request); + } + + @Override + public TransactGetItemsRequest generateRequest(DynamoDbEnhancedClientExtension extension) { + return TransactGetItemsRequest.builder() + .transactItems(this.request.transactGetItems()) + .build(); + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::transactGetItems; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::transactGetItems; + } + + @Override + public List transformResponse(TransactGetItemsResponse response, + DynamoDbEnhancedClientExtension extension) { + return response.responses() + .stream() + .map(r -> r == null ? null : DefaultDocument.create(r.item())) + .collect(Collectors.toList()); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperation.java new file mode 100644 index 000000000000..45a01f773ed9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperation.java @@ -0,0 +1,69 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactWriteItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItemsRequest; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItemsResponse; + +@SdkInternalApi +public class TransactWriteItemsOperation + implements DatabaseOperation { + + private TransactWriteItemsEnhancedRequest request; + + private TransactWriteItemsOperation(TransactWriteItemsEnhancedRequest request) { + this.request = request; + } + + public static TransactWriteItemsOperation create(TransactWriteItemsEnhancedRequest request) { + return new TransactWriteItemsOperation(request); + } + + @Override + public TransactWriteItemsRequest generateRequest(DynamoDbEnhancedClientExtension extension) { + return TransactWriteItemsRequest.builder() + .transactItems(this.request.transactWriteItems()) + .clientRequestToken(this.request.clientRequestToken()) + .build(); + } + + @Override + public Void transformResponse(TransactWriteItemsResponse response, DynamoDbEnhancedClientExtension extension) { + return null; // this operation does not return results + } + + @Override + public Function serviceCall( + DynamoDbClient dynamoDbClient) { + + return dynamoDbClient::transactWriteItems; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::transactWriteItems; + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableReadOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableReadOperation.java new file mode 100644 index 000000000000..616e73fab2b1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableReadOperation.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItem; + +@SdkInternalApi +public interface TransactableReadOperation { + TransactGetItem generateTransactGetItem(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableWriteOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableWriteOperation.java new file mode 100644 index 000000000000..1f88e8044e00 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableWriteOperation.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; + +@SdkInternalApi +public interface TransactableWriteOperation { + TransactWriteItem generateTransactWriteItem(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java new file mode 100644 index 000000000000..1e34cb470a29 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java @@ -0,0 +1,273 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.isNullAttributeValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.readAndTransformSingleItem; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; +import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.UpdateBehaviorTag; +import software.amazon.awssdk.enhanced.dynamodb.mapper.UpdateBehavior; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.ReturnValue; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; +import software.amazon.awssdk.services.dynamodb.model.Update; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; + +@SdkInternalApi +public class UpdateItemOperation + implements TableOperation, + TransactableWriteOperation { + + private static final Function EXPRESSION_VALUE_KEY_MAPPER = + key -> ":AMZN_MAPPED_" + EnhancedClientUtils.cleanAttributeName(key); + + private static final Function EXPRESSION_KEY_MAPPER = + key -> "#AMZN_MAPPED_" + EnhancedClientUtils.cleanAttributeName(key); + + private static final Function CONDITIONAL_UPDATE_MAPPER = + key -> "if_not_exists(" + EXPRESSION_KEY_MAPPER.apply(key) + ", " + + EXPRESSION_VALUE_KEY_MAPPER.apply(key) + ")"; + + private final UpdateItemEnhancedRequest request; + + private UpdateItemOperation(UpdateItemEnhancedRequest request) { + this.request = request; + } + + public static UpdateItemOperation create(UpdateItemEnhancedRequest request) { + return new UpdateItemOperation<>(request); + } + + @Override + public UpdateItemRequest generateRequest(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + if (!TableMetadata.primaryIndexName().equals(operationContext.indexName())) { + throw new IllegalArgumentException("UpdateItem cannot be executed against a secondary index."); + } + + Map itemMap = tableSchema.itemToMap(this.request.item(), + Boolean.TRUE.equals(this.request.ignoreNulls())); + TableMetadata tableMetadata = tableSchema.tableMetadata(); + + WriteModification transformation = + extension != null ? extension.beforeWrite(DefaultDynamoDbExtensionContext.builder() + .items(itemMap) + .operationContext(operationContext) + .tableMetadata(tableMetadata) + .build()) : null; + + if (transformation != null && transformation.transformedItem() != null) { + itemMap = transformation.transformedItem(); + } + + Collection primaryKeys = tableSchema.tableMetadata().primaryKeys(); + + Map keyAttributeValues = itemMap.entrySet().stream() + .filter(entry -> primaryKeys.contains(entry.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + UpdateItemRequest.Builder requestBuilder = UpdateItemRequest.builder() + .tableName(operationContext.tableName()) + .key(keyAttributeValues) + .returnValues(ReturnValue.ALL_NEW); + + Map filteredAttributeValues = itemMap.entrySet().stream() + .filter(entry -> !primaryKeys.contains(entry.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + requestBuilder = addExpressionsIfExist(transformation, filteredAttributeValues, requestBuilder, tableMetadata); + + return requestBuilder.build(); + } + + @Override + public T transformResponse(UpdateItemResponse response, + TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension extension) { + try { + return readAndTransformSingleItem(response.attributes(), tableSchema, operationContext, extension); + } catch (RuntimeException e) { + // With a partial update it's possible to update the record into a state that the mapper can no longer + // read or validate. This is more likely to happen with signed and encrypted records that undergo partial + // updates (that practice is discouraged for this reason). + throw new IllegalStateException("Unable to read the new item returned by UpdateItem after the update " + + "occurred. Rollbacks are not supported by this operation, therefore the " + + "record may no longer be readable using this model.", e); + } + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return dynamoDbClient::updateItem; + } + + @Override + public Function> asyncServiceCall( + DynamoDbAsyncClient dynamoDbAsyncClient) { + + return dynamoDbAsyncClient::updateItem; + } + + @Override + public TransactWriteItem generateTransactWriteItem(TableSchema tableSchema, OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + UpdateItemRequest updateItemRequest = generateRequest(tableSchema, operationContext, dynamoDbEnhancedClientExtension); + + Update update = Update.builder() + .key(updateItemRequest.key()) + .tableName(updateItemRequest.tableName()) + .updateExpression(updateItemRequest.updateExpression()) + .conditionExpression(updateItemRequest.conditionExpression()) + .expressionAttributeValues(updateItemRequest.expressionAttributeValues()) + .expressionAttributeNames(updateItemRequest.expressionAttributeNames()) + .build(); + + return TransactWriteItem.builder() + .update(update) + .build(); + } + + private static Expression generateUpdateExpression(Map attributeValuesToUpdate, + TableMetadata tableMetadata) { + // Sort the updates into 'SET' or 'REMOVE' based on null value + List updateSetActions = new ArrayList<>(); + List updateRemoveActions = new ArrayList<>(); + + attributeValuesToUpdate.forEach((key, value) -> { + if (!isNullAttributeValue(value)) { + UpdateBehavior updateBehavior = UpdateBehaviorTag.resolveForAttribute(key, tableMetadata); + updateSetActions.add(EXPRESSION_KEY_MAPPER.apply(key) + " = " + + updateExpressionMapperForBehavior(updateBehavior).apply(key)); + } else { + updateRemoveActions.add(EXPRESSION_KEY_MAPPER.apply(key)); + } + }); + + // Combine the expressions + List updateActions = new ArrayList<>(); + + if (!updateSetActions.isEmpty()) { + updateActions.add("SET " + String.join(", ", updateSetActions)); + } + + if (!updateRemoveActions.isEmpty()) { + updateActions.add("REMOVE " + String.join(", ", updateRemoveActions)); + } + + String updateExpression = String.join(" ", updateActions); + + Map expressionAttributeValues = + attributeValuesToUpdate.entrySet() + .stream() + .filter(entry -> !isNullAttributeValue(entry.getValue())) + .collect(Collectors.toMap( + entry -> EXPRESSION_VALUE_KEY_MAPPER.apply(entry.getKey()), + Map.Entry::getValue)); + + Map expressionAttributeNames = + attributeValuesToUpdate.keySet() + .stream() + .collect(Collectors.toMap(EXPRESSION_KEY_MAPPER, key -> key)); + + return Expression.builder() + .expression(updateExpression) + .expressionValues(Collections.unmodifiableMap(expressionAttributeValues)) + .expressionNames(expressionAttributeNames) + .build(); + } + + private static Function updateExpressionMapperForBehavior(UpdateBehavior updateBehavior) { + switch (updateBehavior) { + case WRITE_ALWAYS: + return EXPRESSION_VALUE_KEY_MAPPER; + case WRITE_IF_NOT_EXISTS: + return CONDITIONAL_UPDATE_MAPPER; + default: + throw new IllegalArgumentException("Unsupported update behavior '" + updateBehavior + "'"); + } + } + + private UpdateItemRequest.Builder addExpressionsIfExist(WriteModification transformation, + Map filteredAttributeValues, + UpdateItemRequest.Builder requestBuilder, + TableMetadata tableMetadata) { + Map expressionNames = null; + Map expressionValues = null; + String conditionExpressionString = null; + + /* Add update expression for transformed non-key attributes if applicable */ + if (!filteredAttributeValues.isEmpty()) { + Expression fullUpdateExpression = generateUpdateExpression(filteredAttributeValues, tableMetadata); + expressionNames = fullUpdateExpression.expressionNames(); + expressionValues = fullUpdateExpression.expressionValues(); + requestBuilder = requestBuilder.updateExpression(fullUpdateExpression.expression()); + } + + /* Merge in conditional expression from extension WriteModification if applicable */ + if (transformation != null && transformation.additionalConditionalExpression() != null) { + expressionNames = + Expression.joinNames(expressionNames, + transformation.additionalConditionalExpression().expressionNames()); + expressionValues = + Expression.joinValues(expressionValues, + transformation.additionalConditionalExpression().expressionValues()); + conditionExpressionString = transformation.additionalConditionalExpression().expression(); + } + + /* Merge in conditional expression from specified 'conditionExpression' if applicable */ + if (this.request.conditionExpression() != null) { + expressionNames = Expression.joinNames(expressionNames, this.request.conditionExpression().expressionNames()); + expressionValues = Expression.joinValues(expressionValues, this.request.conditionExpression().expressionValues()); + conditionExpressionString = Expression.joinExpressions(conditionExpressionString, + this.request.conditionExpression().expression(), " AND "); + } + + // Avoiding adding empty collections that the low level SDK will propagate to DynamoDb where it causes error. + if (expressionNames != null && !expressionNames.isEmpty()) { + requestBuilder = requestBuilder.expressionAttributeNames(expressionNames); + } + + if (expressionValues != null && !expressionValues.isEmpty()) { + requestBuilder = requestBuilder.expressionAttributeValues(expressionValues); + } + + return requestBuilder.conditionExpression(conditionExpressionString); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchema.java new file mode 100644 index 000000000000..d4d65cb4deb0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchema.java @@ -0,0 +1,382 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.beans.BeanInfo; +import java.beans.IntrospectionException; +import java.beans.Introspector; +import java.beans.PropertyDescriptor; +import java.lang.annotation.Annotation; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanAttributeGetter; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanAttributeSetter; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.MetaTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.MetaTableSchemaCache; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.ObjectConstructor; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.BeanTableSchemaAttributeTag; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbAttribute; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbConvertedBy; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbFlatten; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbIgnore; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; + +/** + * Implementation of {@link TableSchema} that builds a table schema based on properties and annotations of a bean + * class. Example: + *

    + * 
    + * {@literal @}DynamoDbBean
    + * public class Customer {
    + *     private String accountId;
    + *     private int subId;            // primitive types are supported
    + *     private String name;
    + *     private Instant createdDate;
    + *
    + *     {@literal @}DynamoDbPartitionKey
    + *     public String getAccountId() { return this.accountId; }
    + *     public void setAccountId(String accountId) { this.accountId = accountId; }
    + *
    + *     {@literal @}DynamoDbSortKey
    + *     public int getSubId() { return this.subId; }
    + *     public void setSubId(int subId) { this.subId = subId; }
    + *
    + *     // Defines a GSI (customers_by_name) with a partition key of 'name'
    + *     {@literal @}DynamoDbSecondaryPartitionKey(indexNames = "customers_by_name")
    + *     public String getName() { return this.name; }
    + *     public void setName(String name) { this.name = name; }
    + *
    + *     // Defines an LSI (customers_by_date) with a sort key of 'createdDate' and also declares the
    + *     // same attribute as a sort key for the GSI named 'customers_by_name'
    + *     {@literal @}DynamoDbSecondarySortKey(indexNames = {"customers_by_date", "customers_by_name"})
    + *     public Instant getCreatedDate() { return this.createdDate; }
    + *     public void setCreatedDate(Instant createdDate) { this.createdDate = createdDate; }
    + * }
    + *
    + * 
    + * + * Creating an {@link BeanTableSchema} is a moderately expensive operation, and should be performed sparingly. This is + * usually done once at application startup. + * + * @param The type of object that this {@link TableSchema} maps to. + */ +@SdkPublicApi +public final class BeanTableSchema extends WrappedTableSchema> { + private static final String ATTRIBUTE_TAG_STATIC_SUPPLIER_NAME = "attributeTagFor"; + + private BeanTableSchema(StaticTableSchema staticTableSchema) { + super(staticTableSchema); + } + + /** + * Scans a bean class and builds a {@link BeanTableSchema} from it that can be used with the + * {@link DynamoDbEnhancedClient}. + * + * Creating an {@link BeanTableSchema} is a moderately expensive operation, and should be performed sparingly. This is + * usually done once at application startup. + * + * @param beanClass The bean class to build the table schema from. + * @param The bean class type. + * @return An initialized {@link BeanTableSchema} + */ + public static BeanTableSchema create(Class beanClass) { + return create(beanClass, new MetaTableSchemaCache()); + } + + private static BeanTableSchema create(Class beanClass, MetaTableSchemaCache metaTableSchemaCache) { + // Fetch or create a new reference to this yet-to-be-created TableSchema in the cache + MetaTableSchema metaTableSchema = metaTableSchemaCache.getOrCreate(beanClass); + + BeanTableSchema newTableSchema = + new BeanTableSchema<>(createStaticTableSchema(beanClass, metaTableSchemaCache)); + metaTableSchema.initialize(newTableSchema); + return newTableSchema; + } + + // Called when creating an immutable TableSchema recursively. Utilizes the MetaTableSchema cache to stop infinite + // recursion + static TableSchema recursiveCreate(Class beanClass, MetaTableSchemaCache metaTableSchemaCache) { + Optional> metaTableSchema = metaTableSchemaCache.get(beanClass); + + // If we get a cache hit... + if (metaTableSchema.isPresent()) { + // Either: use the cached concrete TableSchema if we have one + if (metaTableSchema.get().isInitialized()) { + return metaTableSchema.get().concreteTableSchema(); + } + + // Or: return the uninitialized MetaTableSchema as this must be a recursive reference and it will be + // initialized later as the chain completes + return metaTableSchema.get(); + } + + // Otherwise: cache doesn't know about this class; create a new one from scratch + return create(beanClass); + + } + + private static StaticTableSchema createStaticTableSchema(Class beanClass, + MetaTableSchemaCache metaTableSchemaCache) { + DynamoDbBean dynamoDbBean = beanClass.getAnnotation(DynamoDbBean.class); + + if (dynamoDbBean == null) { + throw new IllegalArgumentException("A DynamoDb bean class must be annotated with @DynamoDbBean"); + } + + BeanInfo beanInfo; + + try { + beanInfo = Introspector.getBeanInfo(beanClass); + } catch (IntrospectionException e) { + throw new IllegalArgumentException(e); + } + + Supplier newObjectSupplier = newObjectSupplierForClass(beanClass); + + StaticTableSchema.Builder builder = StaticTableSchema.builder(beanClass) + .newItemSupplier(newObjectSupplier); + + builder.attributeConverterProviders(createConverterProvidersFromAnnotation(dynamoDbBean)); + + List> attributes = new ArrayList<>(); + + Arrays.stream(beanInfo.getPropertyDescriptors()) + .filter(BeanTableSchema::isMappableProperty) + .forEach(propertyDescriptor -> { + DynamoDbFlatten dynamoDbFlatten = getPropertyAnnotation(propertyDescriptor, DynamoDbFlatten.class); + + if (dynamoDbFlatten != null) { + builder.flatten(TableSchema.fromClass(propertyDescriptor.getReadMethod().getReturnType()), + getterForProperty(propertyDescriptor, beanClass), + setterForProperty(propertyDescriptor, beanClass)); + } else { + StaticAttribute.Builder attributeBuilder = + staticAttributeBuilder(propertyDescriptor, beanClass, metaTableSchemaCache); + + Optional attributeConverter = + createAttributeConverterFromAnnotation(propertyDescriptor); + attributeConverter.ifPresent(attributeBuilder::attributeConverter); + + addTagsToAttribute(attributeBuilder, propertyDescriptor); + attributes.add(attributeBuilder.build()); + } + }); + + builder.attributes(attributes); + + return builder.build(); + } + + private static List createConverterProvidersFromAnnotation(DynamoDbBean dynamoDbBean) { + Class[] providerClasses = dynamoDbBean.converterProviders(); + + return Arrays.stream(providerClasses) + .map(c -> (AttributeConverterProvider) newObjectSupplierForClass(c).get()) + .collect(Collectors.toList()); + } + + private static StaticAttribute.Builder staticAttributeBuilder(PropertyDescriptor propertyDescriptor, + Class beanClass, + MetaTableSchemaCache metaTableSchemaCache) { + + Type propertyType = propertyDescriptor.getReadMethod().getGenericReturnType(); + EnhancedType propertyTypeToken = convertTypeToEnhancedType(propertyType, metaTableSchemaCache); + return StaticAttribute.builder(beanClass, propertyTypeToken) + .name(attributeNameForProperty(propertyDescriptor)) + .getter(getterForProperty(propertyDescriptor, beanClass)) + .setter(setterForProperty(propertyDescriptor, beanClass)); + } + + /** + * Converts a {@link Type} to an {@link EnhancedType}. Usually {@link EnhancedType#of} is capable of doing this all + * by itself, but for the BeanTableSchema we want to detect if a parameterized class is being passed without a + * converter that is actually another annotated class in which case we want to capture its schema and add it to the + * EnhancedType. Unfortunately this means we have to duplicate some of the recursive Type parsing that + * EnhancedClient otherwise does all by itself. + */ + @SuppressWarnings("unchecked") + private static EnhancedType convertTypeToEnhancedType(Type type, MetaTableSchemaCache metaTableSchemaCache) { + Class clazz = null; + + if (type instanceof ParameterizedType) { + ParameterizedType parameterizedType = (ParameterizedType) type; + Type rawType = parameterizedType.getRawType(); + + if (List.class.equals(rawType)) { + return EnhancedType.listOf(convertTypeToEnhancedType(parameterizedType.getActualTypeArguments()[0], + metaTableSchemaCache)); + } + + if (Map.class.equals(rawType)) { + return EnhancedType.mapOf(EnhancedType.of(parameterizedType.getActualTypeArguments()[0]), + convertTypeToEnhancedType(parameterizedType.getActualTypeArguments()[1], + metaTableSchemaCache)); + } + + if (rawType instanceof Class) { + clazz = (Class) rawType; + } + } else if (type instanceof Class) { + clazz = (Class) type; + } + + if (clazz != null) { + if (clazz.getAnnotation(DynamoDbImmutable.class) != null) { + return EnhancedType.documentOf( + (Class) clazz, + (TableSchema) ImmutableTableSchema.recursiveCreate(clazz, metaTableSchemaCache)); + } else if (clazz.getAnnotation(DynamoDbBean.class) != null) { + return EnhancedType.documentOf( + (Class) clazz, + (TableSchema) BeanTableSchema.recursiveCreate(clazz, metaTableSchemaCache)); + } + } + + return EnhancedType.of(type); + } + + private static Optional createAttributeConverterFromAnnotation( + PropertyDescriptor propertyDescriptor) { + DynamoDbConvertedBy attributeConverterBean = + getPropertyAnnotation(propertyDescriptor, DynamoDbConvertedBy.class); + Optional> optionalClass = Optional.ofNullable(attributeConverterBean) + .map(DynamoDbConvertedBy::value); + return optionalClass.map(clazz -> (AttributeConverter) newObjectSupplierForClass(clazz).get()); + } + + /** + * This method scans all the annotations on a property and looks for a meta-annotation of + * {@link BeanTableSchemaAttributeTag}. If the meta-annotation is found, it attempts to create + * an annotation tag based on a standard named static method + * of the class that tag has been annotated with passing in the original property annotation as an argument. + */ + private static void addTagsToAttribute(StaticAttribute.Builder attributeBuilder, + PropertyDescriptor propertyDescriptor) { + + propertyAnnotations(propertyDescriptor).forEach(annotation -> { + BeanTableSchemaAttributeTag beanTableSchemaAttributeTag = + annotation.annotationType().getAnnotation(BeanTableSchemaAttributeTag.class); + + if (beanTableSchemaAttributeTag != null) { + Class tagClass = beanTableSchemaAttributeTag.value(); + + Method tagMethod; + try { + tagMethod = tagClass.getDeclaredMethod(ATTRIBUTE_TAG_STATIC_SUPPLIER_NAME, + annotation.annotationType()); + } catch (NoSuchMethodException e) { + throw new RuntimeException( + String.format("Could not find a static method named '%s' on class '%s' that returns " + + "an AttributeTag for annotation '%s'", ATTRIBUTE_TAG_STATIC_SUPPLIER_NAME, + tagClass, annotation.annotationType()), e); + } + + if (!Modifier.isStatic(tagMethod.getModifiers())) { + throw new RuntimeException( + String.format("Could not find a static method named '%s' on class '%s' that returns " + + "an AttributeTag for annotation '%s'", ATTRIBUTE_TAG_STATIC_SUPPLIER_NAME, + tagClass, annotation.annotationType())); + } + + StaticAttributeTag staticAttributeTag; + try { + staticAttributeTag = (StaticAttributeTag) tagMethod.invoke(null, annotation); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new RuntimeException( + String.format("Could not invoke method to create AttributeTag for annotation '%s' on class " + + "'%s'.", annotation.annotationType(), tagClass), e); + } + + attributeBuilder.addTag(staticAttributeTag); + } + }); + } + + private static Supplier newObjectSupplierForClass(Class clazz) { + try { + return ObjectConstructor.create(clazz, clazz.getConstructor()); + } catch (NoSuchMethodException e) { + throw new IllegalArgumentException( + String.format("Class '%s' appears to have no default constructor thus cannot be used with the " + + "BeanTableSchema", clazz), e); + } + } + + private static Function getterForProperty(PropertyDescriptor propertyDescriptor, Class beanClass) { + Method readMethod = propertyDescriptor.getReadMethod(); + return BeanAttributeGetter.create(beanClass, readMethod); + } + + private static BiConsumer setterForProperty(PropertyDescriptor propertyDescriptor, + Class beanClass) { + Method writeMethod = propertyDescriptor.getWriteMethod(); + return BeanAttributeSetter.create(beanClass, writeMethod); + } + + private static String attributeNameForProperty(PropertyDescriptor propertyDescriptor) { + DynamoDbAttribute dynamoDbAttribute = getPropertyAnnotation(propertyDescriptor, DynamoDbAttribute.class); + if (dynamoDbAttribute != null) { + return dynamoDbAttribute.value(); + } + + return propertyDescriptor.getName(); + } + + private static boolean isMappableProperty(PropertyDescriptor propertyDescriptor) { + return propertyDescriptor.getReadMethod() != null + && propertyDescriptor.getWriteMethod() != null + && getPropertyAnnotation(propertyDescriptor, DynamoDbIgnore.class) == null; + } + + private static R getPropertyAnnotation(PropertyDescriptor propertyDescriptor, + Class annotationType) { + R getterAnnotation = propertyDescriptor.getReadMethod().getAnnotation(annotationType); + R setterAnnotation = propertyDescriptor.getWriteMethod().getAnnotation(annotationType); + + if (getterAnnotation != null) { + return getterAnnotation; + } + + return setterAnnotation; + } + + private static List propertyAnnotations(PropertyDescriptor propertyDescriptor) { + return Stream.concat(Arrays.stream(propertyDescriptor.getReadMethod().getAnnotations()), + Arrays.stream(propertyDescriptor.getWriteMethod().getAnnotations())) + .collect(Collectors.toList()); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java new file mode 100644 index 000000000000..dff524336bcf --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java @@ -0,0 +1,256 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.ResolvedImmutableAttribute; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.StaticAttributeType; +import software.amazon.awssdk.utils.Validate; + +/** + * A class that represents an attribute on an mapped immutable item. A {@link StaticImmutableTableSchema} composes + * multiple attributes that map to a common immutable item class. + *

    + * The recommended way to use this class is by calling + * {@link software.amazon.awssdk.enhanced.dynamodb.TableSchema#builder(Class, Class)}. + * Example: + * {@code + * TableSchema.builder(Customer.class, Customer.Builder.class) + * .addAttribute(String.class, + * a -> a.name("customer_name").getter(Customer::name).setter(Customer.Builder::name)) + * // ... + * .build(); + * } + *

    + * It's also possible to construct this class on its own using the static builder. Example: + * {@code + * ImmutableAttribute customerNameAttribute = + * ImmutableAttribute.builder(Customer.class, Customer.Builder.class, String.class) + * .name("customer_name") + * .getter(Customer::name) + * .setter(Customer.Builder::name) + * .build(); + * } + * @param the class of the immutable item this attribute maps into. + * @param the class of the builder for the immutable item this attribute maps into. + * @param the class that the value of this attribute converts to. + */ +@SdkPublicApi +public final class ImmutableAttribute { + private final String name; + private final Function getter; + private final BiConsumer setter; + private final Collection tags; + private final EnhancedType type; + private final AttributeConverter attributeConverter; + + private ImmutableAttribute(Builder builder) { + this.name = Validate.paramNotNull(builder.name, "name"); + this.getter = Validate.paramNotNull(builder.getter, "getter"); + this.setter = Validate.paramNotNull(builder.setter, "setter"); + this.tags = builder.tags == null ? Collections.emptyList() : Collections.unmodifiableCollection(builder.tags); + this.type = Validate.paramNotNull(builder.type, "type"); + this.attributeConverter = builder.attributeConverter; + } + + /** + * Constructs a new builder for this class using supplied types. + * @param itemClass The class of the immutable item that this attribute composes. + * @param builderClass The class of the builder for the immutable item that this attribute composes. + * @param attributeType A {@link EnhancedType} that represents the type of the value this attribute stores. + * @return A new typed builder for an attribute. + */ + public static Builder builder(Class itemClass, + Class builderClass, + EnhancedType attributeType) { + return new Builder<>(attributeType); + } + + /** + * Constructs a new builder for this class using supplied types. + * @param itemClass The class of the item that this attribute composes. + * @param builderClass The class of the builder for the immutable item that this attribute composes. + * @param attributeClass A class that represents the type of the value this attribute stores. + * @return A new typed builder for an attribute. + */ + public static Builder builder(Class itemClass, + Class builderClass, + Class attributeClass) { + return new Builder<>(EnhancedType.of(attributeClass)); + } + + /** + * The name of this attribute + */ + public String name() { + return this.name; + } + + /** + * A function that can get the value of this attribute from a modelled immutable item it composes. + */ + public Function getter() { + return this.getter; + } + + /** + * A function that can set the value of this attribute on a builder for the immutable modelled item it composes. + */ + public BiConsumer setter() { + return this.setter; + } + + /** + * A collection of {@link StaticAttributeTag} associated with this attribute. + */ + public Collection tags() { + return this.tags; + } + + /** + * A {@link EnhancedType} that represents the type of the value this attribute stores. + */ + public EnhancedType type() { + return this.type; + } + + /** + * A custom {@link AttributeConverter} that will be used to convert this attribute. + * If no custom converter was provided, the value will be null. + * @see Builder#attributeConverter + */ + public AttributeConverter attributeConverter() { + return this.attributeConverter; + } + + /** + * Converts an instance of this class to a {@link Builder} that can be used to modify and reconstruct it. + */ + public Builder toBuilder() { + return new Builder(this.type).name(this.name) + .getter(this.getter) + .setter(this.setter) + .tags(this.tags) + .attributeConverter(this.attributeConverter); + } + + + ResolvedImmutableAttribute resolve(AttributeConverterProvider attributeConverterProvider) { + return ResolvedImmutableAttribute.create(this, + StaticAttributeType.create(converterFrom(attributeConverterProvider))); + } + + private AttributeConverter converterFrom(AttributeConverterProvider attributeConverterProvider) { + return (attributeConverter != null) ? attributeConverter : attributeConverterProvider.converterFor(type); + } + + /** + * A typed builder for {@link ImmutableAttribute}. + * @param the class of the item this attribute maps into. + * @param the class that the value of this attribute converts to. + */ + public static final class Builder { + private final EnhancedType type; + private String name; + private Function getter; + private BiConsumer setter; + private List tags; + private AttributeConverter attributeConverter; + + private Builder(EnhancedType type) { + this.type = type; + } + + /** + * The name of this attribute + */ + public Builder name(String name) { + this.name = name; + return this; + } + + /** + * A function that can get the value of this attribute from a modelled item it composes. + */ + public Builder getter(Function getter) { + this.getter = getter; + return this; + } + + /** + * A function that can set the value of this attribute on a modelled item it composes. + */ + public Builder setter(BiConsumer setter) { + this.setter = setter; + return this; + } + + /** + * A collection of {@link StaticAttributeTag} associated with this attribute. Overwrites any existing tags. + */ + public Builder tags(Collection tags) { + this.tags = new ArrayList<>(tags); + return this; + } + + /** + * A collection of {@link StaticAttributeTag} associated with this attribute. Overwrites any existing tags. + */ + public Builder tags(StaticAttributeTag... tags) { + this.tags = Arrays.asList(tags); + return this; + } + + /** + * Associates a single {@link StaticAttributeTag} with this attribute. Adds to any existing tags. + */ + public Builder addTag(StaticAttributeTag tag) { + if (this.tags == null) { + this.tags = new ArrayList<>(); + } + + this.tags.add(tag); + return this; + } + + /** + * An {@link AttributeConverter} for the attribute type ({@link EnhancedType}), that can convert this attribute. + * It takes precedence over any converter for this type provided by the table schema + * {@link AttributeConverterProvider}. + */ + public Builder attributeConverter(AttributeConverter attributeConverter) { + this.attributeConverter = attributeConverter; + return this; + } + + /** + * Builds a {@link StaticAttributeTag} from the values stored in this builder. + */ + public ImmutableAttribute build() { + return new ImmutableAttribute<>(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableTableSchema.java new file mode 100644 index 000000000000..c791f4f7f0f9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableTableSchema.java @@ -0,0 +1,391 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.lang.annotation.Annotation; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.immutable.ImmutableInfo; +import software.amazon.awssdk.enhanced.dynamodb.internal.immutable.ImmutableIntrospector; +import software.amazon.awssdk.enhanced.dynamodb.internal.immutable.ImmutablePropertyDescriptor; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanAttributeGetter; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanAttributeSetter; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.MetaTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.MetaTableSchemaCache; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.ObjectConstructor; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.ObjectGetterMethod; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.StaticGetterMethod; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.BeanTableSchemaAttributeTag; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbAttribute; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbConvertedBy; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbFlatten; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; + +/** + * Implementation of {@link TableSchema} that builds a table schema based on properties and annotations of an immutable + * class with an associated builder class. Example: + *

    + * 
    + * {@literal @}DynamoDbImmutable(builder = Customer.Builder.class)
    + * public class Customer {
    + *     {@literal @}DynamoDbPartitionKey
    + *     public String accountId() { ... }
    + *
    + *     {@literal @}DynamoDbSortKey
    + *     public int subId() { ... }
    + *
    + *     // Defines a GSI (customers_by_name) with a partition key of 'name'
    + *     {@literal @}DynamoDbSecondaryPartitionKey(indexNames = "customers_by_name")
    + *     public String name() { ... }
    + *
    + *     // Defines an LSI (customers_by_date) with a sort key of 'createdDate' and also declares the
    + *     // same attribute as a sort key for the GSI named 'customers_by_name'
    + *     {@literal @}DynamoDbSecondarySortKey(indexNames = {"customers_by_date", "customers_by_name"})
    + *     public Instant createdDate() { ... }
    + *
    + *     // Not required to be an inner-class, but builders often are
    + *     public static final class Builder {
    + *         public Builder accountId(String accountId) { ... };
    + *         public Builder subId(int subId) { ... };
    + *         public Builder name(String name) { ... };
    + *         public Builder createdDate(Instant createdDate) { ... };
    + *
    + *         public Customer build() { ... };
    + *     }
    + * }
    + * 
    + * + * Creating an {@link ImmutableTableSchema} is a moderately expensive operation, and should be performed sparingly. This is + * usually done once at application startup. + * + * @param The type of object that this {@link TableSchema} maps to. + */ +@SdkPublicApi +public final class ImmutableTableSchema extends WrappedTableSchema> { + private static final String ATTRIBUTE_TAG_STATIC_SUPPLIER_NAME = "attributeTagFor"; + + private ImmutableTableSchema(StaticImmutableTableSchema wrappedTableSchema) { + super(wrappedTableSchema); + } + + /** + * Scans an immutable class and builds an {@link ImmutableTableSchema} from it that can be used with the + * {@link DynamoDbEnhancedClient}. + * + * Creating an {@link ImmutableTableSchema} is a moderately expensive operation, and should be performed sparingly. This is + * usually done once at application startup. + * + * @param immutableClass The annotated immutable class to build the table schema from. + * @param The immutable class type. + * @return An initialized {@link ImmutableTableSchema} + */ + public static ImmutableTableSchema create(Class immutableClass) { + return create(immutableClass, new MetaTableSchemaCache()); + } + + private static ImmutableTableSchema create(Class immutableClass, + MetaTableSchemaCache metaTableSchemaCache) { + // Fetch or create a new reference to this yet-to-be-created TableSchema in the cache + MetaTableSchema metaTableSchema = metaTableSchemaCache.getOrCreate(immutableClass); + + ImmutableTableSchema newTableSchema = + new ImmutableTableSchema<>(createStaticImmutableTableSchema(immutableClass, metaTableSchemaCache)); + metaTableSchema.initialize(newTableSchema); + return newTableSchema; + } + + // Called when creating an immutable TableSchema recursively. Utilizes the MetaTableSchema cache to stop infinite + // recursion + static TableSchema recursiveCreate(Class immutableClass, MetaTableSchemaCache metaTableSchemaCache) { + Optional> metaTableSchema = metaTableSchemaCache.get(immutableClass); + + // If we get a cache hit... + if (metaTableSchema.isPresent()) { + // Either: use the cached concrete TableSchema if we have one + if (metaTableSchema.get().isInitialized()) { + return metaTableSchema.get().concreteTableSchema(); + } + + // Or: return the uninitialized MetaTableSchema as this must be a recursive reference and it will be + // initialized later as the chain completes + return metaTableSchema.get(); + } + + // Otherwise: cache doesn't know about this class; create a new one from scratch + return create(immutableClass, metaTableSchemaCache); + + } + + private static StaticImmutableTableSchema createStaticImmutableTableSchema( + Class immutableClass, MetaTableSchemaCache metaTableSchemaCache) { + ImmutableInfo immutableInfo = ImmutableIntrospector.getImmutableInfo(immutableClass); + Class builderClass = immutableInfo.builderClass(); + return createStaticImmutableTableSchema(immutableClass, builderClass, immutableInfo, metaTableSchemaCache); + } + + private static StaticImmutableTableSchema createStaticImmutableTableSchema( + Class immutableClass, + Class builderClass, + ImmutableInfo immutableInfo, + MetaTableSchemaCache metaTableSchemaCache) { + + Supplier newBuilderSupplier = newObjectSupplier(immutableInfo, builderClass); + Function buildFunction = ObjectGetterMethod.create(builderClass, immutableInfo.buildMethod()); + + StaticImmutableTableSchema.Builder builder = + StaticImmutableTableSchema.builder(immutableClass, builderClass) + .newItemBuilder(newBuilderSupplier, buildFunction); + + builder.attributeConverterProviders( + createConverterProvidersFromAnnotation(immutableClass.getAnnotation(DynamoDbImmutable.class))); + + List> attributes = new ArrayList<>(); + + immutableInfo.propertyDescriptors() + .forEach(propertyDescriptor -> { + DynamoDbFlatten dynamoDbFlatten = getPropertyAnnotation(propertyDescriptor, DynamoDbFlatten.class); + + if (dynamoDbFlatten != null) { + builder.flatten(TableSchema.fromClass(propertyDescriptor.getter().getReturnType()), + getterForProperty(propertyDescriptor, immutableClass), + setterForProperty(propertyDescriptor, builderClass)); + } else { + ImmutableAttribute.Builder attributeBuilder = + immutableAttributeBuilder(propertyDescriptor, + immutableClass, + builderClass, + metaTableSchemaCache); + + Optional attributeConverter = + createAttributeConverterFromAnnotation(propertyDescriptor); + attributeConverter.ifPresent(attributeBuilder::attributeConverter); + + addTagsToAttribute(attributeBuilder, propertyDescriptor); + attributes.add(attributeBuilder.build()); + } + }); + + builder.attributes(attributes); + + return builder.build(); + } + + private static List createConverterProvidersFromAnnotation( + DynamoDbImmutable dynamoDbImmutable) { + + Class[] providerClasses = dynamoDbImmutable.converterProviders(); + + return Arrays.stream(providerClasses) + .map(c -> (AttributeConverterProvider) newObjectSupplierForClass(c).get()) + .collect(Collectors.toList()); + } + + private static ImmutableAttribute.Builder immutableAttributeBuilder( + ImmutablePropertyDescriptor propertyDescriptor, + Class immutableClass, Class builderClass, + MetaTableSchemaCache metaTableSchemaCache) { + + Type propertyType = propertyDescriptor.getter().getGenericReturnType(); + EnhancedType propertyTypeToken = convertTypeToEnhancedType(propertyType, metaTableSchemaCache); + return ImmutableAttribute.builder(immutableClass, builderClass, propertyTypeToken) + .name(attributeNameForProperty(propertyDescriptor)) + .getter(getterForProperty(propertyDescriptor, immutableClass)) + .setter(setterForProperty(propertyDescriptor, builderClass)); + } + + /** + * Converts a {@link Type} to an {@link EnhancedType}. Usually {@link EnhancedType#of} is capable of doing this all + * by itself, but for the ImmutableTableSchema we want to detect if a parameterized class is being passed without a + * converter that is actually another annotated class in which case we want to capture its schema and add it to the + * EnhancedType. Unfortunately this means we have to duplicate some of the recursive Type parsing that + * EnhancedClient otherwise does all by itself. + */ + @SuppressWarnings("unchecked") + private static EnhancedType convertTypeToEnhancedType(Type type, MetaTableSchemaCache metaTableSchemaCache) { + Class clazz = null; + + if (type instanceof ParameterizedType) { + ParameterizedType parameterizedType = (ParameterizedType) type; + Type rawType = parameterizedType.getRawType(); + + if (List.class.equals(rawType)) { + return EnhancedType.listOf(convertTypeToEnhancedType(parameterizedType.getActualTypeArguments()[0], + metaTableSchemaCache)); + } + + if (Map.class.equals(rawType)) { + return EnhancedType.mapOf(EnhancedType.of(parameterizedType.getActualTypeArguments()[0]), + convertTypeToEnhancedType(parameterizedType.getActualTypeArguments()[1], + metaTableSchemaCache)); + } + + if (rawType instanceof Class) { + clazz = (Class) rawType; + } + } else if (type instanceof Class) { + clazz = (Class) type; + } + + if (clazz != null) { + if (clazz.getAnnotation(DynamoDbImmutable.class) != null) { + return EnhancedType.documentOf( + (Class) clazz, + (TableSchema) ImmutableTableSchema.recursiveCreate(clazz, metaTableSchemaCache)); + } else if (clazz.getAnnotation(DynamoDbBean.class) != null) { + return EnhancedType.documentOf( + (Class) clazz, + (TableSchema) BeanTableSchema.recursiveCreate(clazz, metaTableSchemaCache)); + } + } + + return EnhancedType.of(type); + } + + private static Optional createAttributeConverterFromAnnotation( + ImmutablePropertyDescriptor propertyDescriptor) { + DynamoDbConvertedBy attributeConverterBean = + getPropertyAnnotation(propertyDescriptor, DynamoDbConvertedBy.class); + Optional> optionalClass = Optional.ofNullable(attributeConverterBean) + .map(DynamoDbConvertedBy::value); + return optionalClass.map(clazz -> (AttributeConverter) newObjectSupplierForClass(clazz).get()); + } + + /** + * This method scans all the annotations on a property and looks for a meta-annotation of + * {@link BeanTableSchemaAttributeTag}. If the meta-annotation is found, it attempts to create + * an annotation tag based on a standard named static method + * of the class that tag has been annotated with passing in the original property annotation as an argument. + */ + private static void addTagsToAttribute(ImmutableAttribute.Builder attributeBuilder, + ImmutablePropertyDescriptor propertyDescriptor) { + + propertyAnnotations(propertyDescriptor).forEach(annotation -> { + BeanTableSchemaAttributeTag beanTableSchemaAttributeTag = + annotation.annotationType().getAnnotation(BeanTableSchemaAttributeTag.class); + + if (beanTableSchemaAttributeTag != null) { + Class tagClass = beanTableSchemaAttributeTag.value(); + + Method tagMethod; + try { + tagMethod = tagClass.getDeclaredMethod(ATTRIBUTE_TAG_STATIC_SUPPLIER_NAME, + annotation.annotationType()); + } catch (NoSuchMethodException e) { + throw new RuntimeException( + String.format("Could not find a static method named '%s' on class '%s' that returns " + + "an AttributeTag for annotation '%s'", ATTRIBUTE_TAG_STATIC_SUPPLIER_NAME, + tagClass, annotation.annotationType()), e); + } + + if (!Modifier.isStatic(tagMethod.getModifiers())) { + throw new RuntimeException( + String.format("Could not find a static method named '%s' on class '%s' that returns " + + "an AttributeTag for annotation '%s'", ATTRIBUTE_TAG_STATIC_SUPPLIER_NAME, + tagClass, annotation.annotationType())); + } + + StaticAttributeTag staticAttributeTag; + try { + staticAttributeTag = (StaticAttributeTag) tagMethod.invoke(null, annotation); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new RuntimeException( + String.format("Could not invoke method to create AttributeTag for annotation '%s' on class " + + "'%s'.", annotation.annotationType(), tagClass), e); + } + + attributeBuilder.addTag(staticAttributeTag); + } + }); + } + + private static Supplier newObjectSupplier(ImmutableInfo immutableInfo, Class builderClass) { + if (immutableInfo.staticBuilderMethod().isPresent()) { + return StaticGetterMethod.create(immutableInfo.staticBuilderMethod().get()); + } + + return newObjectSupplierForClass(builderClass); + } + + private static Supplier newObjectSupplierForClass(Class clazz) { + try { + return ObjectConstructor.create(clazz, clazz.getConstructor()); + } catch (NoSuchMethodException e) { + throw new IllegalArgumentException( + String.format("Builder class '%s' appears to have no default constructor thus cannot be used with " + + "the ImmutableTableSchema", clazz), e); + } + } + + private static Function getterForProperty(ImmutablePropertyDescriptor propertyDescriptor, + Class immutableClass) { + Method readMethod = propertyDescriptor.getter(); + return BeanAttributeGetter.create(immutableClass, readMethod); + } + + private static BiConsumer setterForProperty(ImmutablePropertyDescriptor propertyDescriptor, + Class builderClass) { + Method writeMethod = propertyDescriptor.setter(); + return BeanAttributeSetter.create(builderClass, writeMethod); + } + + private static String attributeNameForProperty(ImmutablePropertyDescriptor propertyDescriptor) { + DynamoDbAttribute dynamoDbAttribute = getPropertyAnnotation(propertyDescriptor, DynamoDbAttribute.class); + if (dynamoDbAttribute != null) { + return dynamoDbAttribute.value(); + } + + return propertyDescriptor.name(); + } + + private static R getPropertyAnnotation(ImmutablePropertyDescriptor propertyDescriptor, + Class annotationType) { + R getterAnnotation = propertyDescriptor.getter().getAnnotation(annotationType); + R setterAnnotation = propertyDescriptor.setter().getAnnotation(annotationType); + + if (getterAnnotation != null) { + return getterAnnotation; + } + + return setterAnnotation; + } + + private static List propertyAnnotations(ImmutablePropertyDescriptor propertyDescriptor) { + return Stream.concat(Arrays.stream(propertyDescriptor.getter().getAnnotations()), + Arrays.stream(propertyDescriptor.setter().getAnnotations())) + .collect(Collectors.toList()); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java new file mode 100644 index 000000000000..68f9efca58c3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java @@ -0,0 +1,218 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.util.Collection; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +/** + * A class that represents an attribute that can be read from and written to an mapped item. A {@link StaticTableSchema} + * composes multiple attributes that map to a common item class. + *

    + * The recommended way to use this class is by calling {@link StaticTableSchema.Builder#addAttribute(Class, Consumer)}. + * Example: + *

    {@code
    + * StaticTableSchema.builder()
    + *                  .addAttribute(String.class,
    + *                                a -> a.name("customer_name").getter(Customer::getName).setter(Customer::setName))
    + *                  // ...
    + *                  .build();
    + * }
    + *

    + * It's also possible to construct this class on its own using the static builder. Example: + *

    {@code
    + * StaticAttribute customerNameAttribute =
    + *     StaticAttribute.builder(Customer.class, String.class)
    + *                    .name("customer_name")
    + *                    .getter(Customer::getName)
    + *                    .setter(Customer::setName)
    + *                    .build();
    + * }
    + * 
    + * @param the class of the item this attribute maps into. + * @param the class that the value of this attribute converts to. + */ +@SdkPublicApi +public final class StaticAttribute { + private final ImmutableAttribute delegateAttribute; + + private StaticAttribute(Builder builder) { + this.delegateAttribute = builder.delegateBuilder.build(); + } + + /** + * Constructs a new builder for this class using supplied types. + * @param itemClass The class of the item that this attribute composes. + * @param attributeType A {@link EnhancedType} that represents the type of the value this attribute stores. + * @return A new typed builder for an attribute. + */ + public static Builder builder(Class itemClass, EnhancedType attributeType) { + return new Builder<>(itemClass, attributeType); + } + + /** + * Constructs a new builder for this class using supplied types. + * @param itemClass The class of the item that this attribute composes. + * @param attributeClass A class that represents the type of the value this attribute stores. + * @return A new typed builder for an attribute. + */ + public static Builder builder(Class itemClass, Class attributeClass) { + return new Builder<>(itemClass, EnhancedType.of(attributeClass)); + } + + /** + * The name of this attribute + */ + public String name() { + return this.delegateAttribute.name(); + } + + /** + * A function that can get the value of this attribute from a modelled item it composes. + */ + public Function getter() { + return this.delegateAttribute.getter(); + } + + /** + * A function that can set the value of this attribute on a modelled item it composes. + */ + public BiConsumer setter() { + return this.delegateAttribute.setter(); + } + + /** + * A collection of {@link StaticAttributeTag} associated with this attribute. + */ + public Collection tags() { + return this.delegateAttribute.tags(); + } + + /** + * A {@link EnhancedType} that represents the type of the value this attribute stores. + */ + public EnhancedType type() { + return this.delegateAttribute.type(); + } + + /** + * A custom {@link AttributeConverter} that will be used to convert this attribute. + * If no custom converter was provided, the value will be null. + * @see Builder#attributeConverter + */ + public AttributeConverter attributeConverter() { + return this.delegateAttribute.attributeConverter(); + } + + /** + * Converts an instance of this class to a {@link Builder} that can be used to modify and reconstruct it. + */ + public Builder toBuilder() { + return new Builder<>(this.delegateAttribute.toBuilder()); + } + + ImmutableAttribute toImmutableAttribute() { + return this.delegateAttribute; + } + + /** + * A typed builder for {@link StaticAttribute}. + * @param the class of the item this attribute maps into. + * @param the class that the value of this attribute converts to. + */ + public static final class Builder { + private final ImmutableAttribute.Builder delegateBuilder; + + private Builder(Class itemClass, EnhancedType type) { + this.delegateBuilder = ImmutableAttribute.builder(itemClass, itemClass, type); + } + + private Builder(ImmutableAttribute.Builder delegateBuilder) { + this.delegateBuilder = delegateBuilder; + } + + /** + * The name of this attribute + */ + public Builder name(String name) { + this.delegateBuilder.name(name); + return this; + } + + /** + * A function that can get the value of this attribute from a modelled item it composes. + */ + public Builder getter(Function getter) { + this.delegateBuilder.getter(getter); + return this; + } + + /** + * A function that can set the value of this attribute on a modelled item it composes. + */ + public Builder setter(BiConsumer setter) { + this.delegateBuilder.setter(setter); + return this; + } + + /** + * A collection of {@link StaticAttributeTag} associated with this attribute. Overwrites any existing tags. + */ + public Builder tags(Collection tags) { + this.delegateBuilder.tags(tags); + return this; + } + + /** + * A collection of {@link StaticAttributeTag} associated with this attribute. Overwrites any existing tags. + */ + public Builder tags(StaticAttributeTag... tags) { + this.delegateBuilder.tags(tags); + return this; + } + + /** + * Associates a single {@link StaticAttributeTag} with this attribute. Adds to any existing tags. + */ + public Builder addTag(StaticAttributeTag tag) { + this.delegateBuilder.addTag(tag); + return this; + } + + /** + * An {@link AttributeConverter} for the attribute type ({@link EnhancedType}), that can convert this attribute. + * It takes precedence over any converter for this type provided by the table schema + * {@link AttributeConverterProvider}. + */ + public Builder attributeConverter(AttributeConverter attributeConverter) { + this.delegateBuilder.attributeConverter(attributeConverter); + return this; + } + + /** + * Builds a {@link StaticAttributeTag} from the values stored in this builder. + */ + public StaticAttribute build() { + return new StaticAttribute<>(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTag.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTag.java new file mode 100644 index 000000000000..d4a3ec9a922c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTag.java @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; + +/** + * Interface for a tag that can be applied to any {@link StaticAttribute}. When a tagged attribute is added to a + * {@link software.amazon.awssdk.enhanced.dynamodb.TableSchema}, the table metadata stored on the schema will be updated + * by calling the {@link #modifyMetadata(String, AttributeValueType)} method for every tag associated with the + * attribute. + *

    + * Common implementations of this interface that can be used to declare indices in your schema can be found in + * {@link StaticAttributeTags}. + */ +@SdkPublicApi +public interface StaticAttributeTag { + /** + * A function that modifies an existing {@link StaticTableSchema.Builder} when this tag is applied to a specific + * attribute. This will be used by the {@link StaticTableSchema} to capture all the metadata associated with + * tagged attributes when constructing the table schema. + * + * @param attributeName The name of the attribute this tag has been applied to. + * @param attributeValueType The type of the attribute this tag has been applied to. This can be used for + * validation, for instance if you have an attribute tag that should only be associated + * with a string. + * @return a consumer that modifies an existing {@link StaticTableSchema.Builder}. + */ + Consumer modifyMetadata(String attributeName, + AttributeValueType attributeValueType); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTags.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTags.java new file mode 100644 index 000000000000..6bd6255a4caf --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTags.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.util.Collection; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.UpdateBehaviorTag; + +/** + * Common implementations of {@link StaticAttributeTag}. These tags can be used to mark your attributes as primary or + * secondary keys in your {@link StaticTableSchema} definitions. + */ +@SdkPublicApi +public final class StaticAttributeTags { + private static final StaticAttributeTag PRIMARY_PARTITION_KEY_SINGLETON = + new KeyAttributeTag((tableMetadataBuilder, attribute) -> + tableMetadataBuilder.addIndexPartitionKey(TableMetadata.primaryIndexName(), + attribute.getAttributeName(), + attribute.getAttributeValueType())); + private static final StaticAttributeTag PRIMARY_SORT_KEY_SINGLETON = + new KeyAttributeTag((tableMetadataBuilder, attribute) -> + tableMetadataBuilder.addIndexSortKey(TableMetadata.primaryIndexName(), + attribute.getAttributeName(), + attribute.getAttributeValueType())); + + private StaticAttributeTags() { + } + + /** + * Marks an attribute as being the primary partition key of the table it participates in. Only one attribute can + * be marked this way in a given table schema. + */ + public static StaticAttributeTag primaryPartitionKey() { + return PRIMARY_PARTITION_KEY_SINGLETON; + } + + /** + * Marks an attribute as being the primary sort key of the table it participates in. Only one attribute can be + * marked this way in a given table schema. + */ + public static StaticAttributeTag primarySortKey() { + return PRIMARY_SORT_KEY_SINGLETON; + } + + /** + * Marks an attribute as being a partition key for a secondary index. + * @param indexName The name of the index this key participates in. + */ + public static StaticAttributeTag secondaryPartitionKey(String indexName) { + return new KeyAttributeTag((tableMetadataBuilder, attribute) -> + tableMetadataBuilder.addIndexPartitionKey(indexName, + attribute.getAttributeName(), + attribute.getAttributeValueType())); + } + + /** + * Marks an attribute as being a partition key for multiple secondary indices. + * @param indexNames The names of the indices this key participates in. + */ + public static StaticAttributeTag secondaryPartitionKey(Collection indexNames) { + return new KeyAttributeTag( + (tableMetadataBuilder, attribute) -> + indexNames.forEach( + indexName -> tableMetadataBuilder.addIndexPartitionKey(indexName, + attribute.getAttributeName(), + attribute.getAttributeValueType()))); + } + + /** + * Marks an attribute as being a sort key for a secondary index. + * @param indexName The name of the index this key participates in. + */ + public static StaticAttributeTag secondarySortKey(String indexName) { + return new KeyAttributeTag((tableMetadataBuilder, attribute) -> + tableMetadataBuilder.addIndexSortKey(indexName, + attribute.getAttributeName(), + attribute.getAttributeValueType())); + } + + /** + * Marks an attribute as being a sort key for multiple secondary indices. + * @param indexNames The names of the indices this key participates in. + */ + public static StaticAttributeTag secondarySortKey(Collection indexNames) { + return new KeyAttributeTag( + (tableMetadataBuilder, attribute) -> + indexNames.forEach( + indexName -> tableMetadataBuilder.addIndexSortKey(indexName, + attribute.getAttributeName(), + attribute.getAttributeValueType()))); + } + + /** + * Specifies the behavior when this attribute is updated as part of an 'update' operation such as UpdateItem. See + * documentation of {@link UpdateBehavior} for details on the different behaviors supported and the default + * behavior. + * @param updateBehavior The {@link UpdateBehavior} to be applied to this attribute + */ + public static StaticAttributeTag updateBehavior(UpdateBehavior updateBehavior) { + return UpdateBehaviorTag.fromUpdateBehavior(updateBehavior); + } + + private static class KeyAttributeTag implements StaticAttributeTag { + private final BiConsumer tableMetadataKeySetter; + + private KeyAttributeTag(BiConsumer tableMetadataKeySetter) { + this.tableMetadataKeySetter = tableMetadataKeySetter; + } + + @Override + public Consumer modifyMetadata(String attributeName, + AttributeValueType attributeValueType) { + return metadata -> { + if (attributeValueType.scalarAttributeType() == null) { + throw new IllegalArgumentException( + String.format("Attribute '%s' of type %s is not a suitable type to be used as a key.", + attributeName, attributeValueType.name())); + } + + tableMetadataKeySetter.accept(metadata, new AttributeAndType(attributeName, attributeValueType)); + }; + } + } + + private static class AttributeAndType { + private final String attributeName; + private final AttributeValueType attributeValueType; + + private AttributeAndType(String attributeName, AttributeValueType attributeValueType) { + this.attributeName = attributeName; + this.attributeValueType = attributeValueType; + } + + private String getAttributeName() { + return attributeName; + } + + private AttributeValueType getAttributeValueType() { + return attributeValueType; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java new file mode 100644 index 000000000000..896b4901b59b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java @@ -0,0 +1,572 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static java.util.Collections.unmodifiableMap; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.isNullAttributeValue; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterProviderResolver; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.ResolvedImmutableAttribute; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * Implementation of {@link TableSchema} that builds a schema for immutable data objects based on directly declared + * attributes. Just like {@link StaticTableSchema} which is the equivalent implementation for mutable objects, this is + * the most direct, and thus fastest, implementation of {@link TableSchema}. + *

    + * Example using a fictional 'Customer' immutable data item class that has an inner builder class named 'Builder':- + * {@code + * static final TableSchema CUSTOMER_TABLE_SCHEMA = + * StaticImmutableTableSchema.builder(Customer.class, Customer.Builder.class) + * .newItemBuilder(Customer::builder, Customer.Builder::build) + * .addAttribute(String.class, a -> a.name("account_id") + * .getter(Customer::accountId) + * .setter(Customer.Builder::accountId) + * .tags(primaryPartitionKey())) + * .addAttribute(Integer.class, a -> a.name("sub_id") + * .getter(Customer::subId) + * .setter(Customer.Builder::subId) + * .tags(primarySortKey())) + * .addAttribute(String.class, a -> a.name("name") + * .getter(Customer::name) + * .setter(Customer.Builder::name) + * .tags(secondaryPartitionKey("customers_by_name"))) + * .addAttribute(Instant.class, a -> a.name("created_date") + * .getter(Customer::createdDate) + * .setter(Customer.Builder::createdDate) + * .tags(secondarySortKey("customers_by_date"), + * secondarySortKey("customers_by_name"))) + * .build(); + * } + */ +@SdkPublicApi +public final class StaticImmutableTableSchema implements TableSchema { + private final List> attributeMappers; + private final Supplier newBuilderSupplier; + private final Function buildItemFunction; + private final Map> indexedMappers; + private final StaticTableMetadata tableMetadata; + private final EnhancedType itemType; + private final AttributeConverterProvider attributeConverterProvider; + private final Map> indexedFlattenedMappers; + private final List attributeNames; + + private static class FlattenedMapper { + private final Function otherItemGetter; + private final BiConsumer otherItemSetter; + private final TableSchema otherItemTableSchema; + + private FlattenedMapper(Function otherItemGetter, + BiConsumer otherItemSetter, + TableSchema otherItemTableSchema) { + this.otherItemGetter = otherItemGetter; + this.otherItemSetter = otherItemSetter; + this.otherItemTableSchema = otherItemTableSchema; + + + } + + public TableSchema getOtherItemTableSchema() { + return otherItemTableSchema; + } + + private B mapToItem(B thisBuilder, + Supplier thisBuilderConstructor, + Map attributeValues) { + T1 otherItem = this.otherItemTableSchema.mapToItem(attributeValues); + + if (otherItem != null) { + if (thisBuilder == null) { + thisBuilder = thisBuilderConstructor.get(); + } + + this.otherItemSetter.accept(thisBuilder, otherItem); + } + + return thisBuilder; + } + + private Map itemToMap(T item, boolean ignoreNulls) { + T1 otherItem = this.otherItemGetter.apply(item); + + if (otherItem == null) { + return Collections.emptyMap(); + } + + return this.otherItemTableSchema.itemToMap(otherItem, ignoreNulls); + } + + private AttributeValue attributeValue(T item, String attributeName) { + T1 otherItem = this.otherItemGetter.apply(item); + + if (otherItem == null) { + return null; + } + + AttributeValue attributeValue = this.otherItemTableSchema.attributeValue(otherItem, attributeName); + return isNullAttributeValue(attributeValue) ? null : attributeValue; + } + } + + private StaticImmutableTableSchema(Builder builder) { + StaticTableMetadata.Builder tableMetadataBuilder = StaticTableMetadata.builder(); + + this.attributeConverterProvider = + ConverterProviderResolver.resolveProviders(builder.attributeConverterProviders); + + // Resolve declared attributes and find converters for them + Stream> attributesStream = builder.attributes == null ? + Stream.empty() : builder.attributes.stream().map(a -> a.resolve(this.attributeConverterProvider)); + + // Merge resolved declared attributes + List> mutableAttributeMappers = new ArrayList<>(); + Map> mutableIndexedMappers = new HashMap<>(); + Set mutableAttributeNames = new LinkedHashSet<>(); + Stream.concat(attributesStream, builder.additionalAttributes.stream()).forEach( + resolvedAttribute -> { + String attributeName = resolvedAttribute.attributeName(); + + if (mutableAttributeNames.contains(attributeName)) { + throw new IllegalArgumentException( + "Attempt to add an attribute to a mapper that already has one with the same name. " + + "[Attribute name: " + attributeName + "]"); + } + + mutableAttributeNames.add(attributeName); + mutableAttributeMappers.add(resolvedAttribute); + mutableIndexedMappers.put(attributeName, resolvedAttribute); + + // Merge in metadata associated with attribute + tableMetadataBuilder.mergeWith(resolvedAttribute.tableMetadata()); + } + ); + + Map> mutableFlattenedMappers = new HashMap<>(); + builder.flattenedMappers.forEach( + flattenedMapper -> { + flattenedMapper.otherItemTableSchema.attributeNames().forEach( + attributeName -> { + if (mutableAttributeNames.contains(attributeName)) { + throw new IllegalArgumentException( + "Attempt to add an attribute to a mapper that already has one with the same name. " + + "[Attribute name: " + attributeName + "]"); + } + + mutableAttributeNames.add(attributeName); + mutableFlattenedMappers.put(attributeName, flattenedMapper); + } + ); + + tableMetadataBuilder.mergeWith(flattenedMapper.getOtherItemTableSchema().tableMetadata()); + } + ); + + // Apply table-tags to table metadata + if (builder.tags != null) { + builder.tags.forEach(staticTableTag -> staticTableTag.modifyMetadata().accept(tableMetadataBuilder)); + } + + this.attributeMappers = Collections.unmodifiableList(mutableAttributeMappers); + this.indexedMappers = Collections.unmodifiableMap(mutableIndexedMappers); + this.attributeNames = Collections.unmodifiableList(new ArrayList<>(mutableAttributeNames)); + this.indexedFlattenedMappers = Collections.unmodifiableMap(mutableFlattenedMappers); + this.newBuilderSupplier = builder.newBuilderSupplier; + this.buildItemFunction = builder.buildItemFunction; + this.tableMetadata = tableMetadataBuilder.build(); + this.itemType = EnhancedType.of(builder.itemClass); + } + + /** + * Creates a builder for a {@link StaticImmutableTableSchema} typed to specific immutable data item class. + * @param itemClass The immutable data item class object that the {@link StaticImmutableTableSchema} is to map to. + * @param builderClass The builder class object that can be used to construct instances of the immutable data item. + * @return A newly initialized builder + */ + public static Builder builder(Class itemClass, Class builderClass) { + return new Builder<>(itemClass, builderClass); + } + + /** + * Builder for a {@link StaticImmutableTableSchema} + * @param The immutable data item class object that the {@link StaticImmutableTableSchema} is to map to. + * @param The builder class object that can be used to construct instances of the immutable data item. + */ + public static final class Builder { + private final Class itemClass; + private final Class builderClass; + private final List> additionalAttributes = new ArrayList<>(); + private final List> flattenedMappers = new ArrayList<>(); + + private List> attributes; + private Supplier newBuilderSupplier; + private Function buildItemFunction; + private List tags; + private List attributeConverterProviders = + Collections.singletonList(ConverterProviderResolver.defaultConverterProvider()); + + private Builder(Class itemClass, Class builderClass) { + this.itemClass = itemClass; + this.builderClass = builderClass; + } + + /** + * Methods used to construct a new instance of the immutable data object. + * @param newBuilderMethod A method to create a new builder for the immutable data object. + * @param buildMethod A method on the builder to build a new instance of the immutable data object. + */ + public Builder newItemBuilder(Supplier newBuilderMethod, Function buildMethod) { + this.newBuilderSupplier = newBuilderMethod; + this.buildItemFunction = buildMethod; + return this; + } + + /** + * A list of attributes that can be mapped between the data item object and the database record that are to + * be associated with the schema. Will overwrite any existing attributes. + */ + @SafeVarargs + public final Builder attributes(ImmutableAttribute... immutableAttributes) { + this.attributes = Arrays.asList(immutableAttributes); + return this; + } + + /** + * A list of attributes that can be mapped between the data item object and the database record that are to + * be associated with the schema. Will overwrite any existing attributes. + */ + public Builder attributes(Collection> immutableAttributes) { + this.attributes = new ArrayList<>(immutableAttributes); + return this; + } + + /** + * Adds a single attribute to the table schema that can be mapped between the data item object and the database + * record. + */ + public Builder addAttribute(EnhancedType attributeType, + Consumer> immutableAttribute) { + + ImmutableAttribute.Builder builder = + ImmutableAttribute.builder(itemClass, builderClass, attributeType); + immutableAttribute.accept(builder); + return addAttribute(builder.build()); + } + + /** + * Adds a single attribute to the table schema that can be mapped between the data item object and the database + * record. + */ + public Builder addAttribute(Class attributeClass, + Consumer> immutableAttribute) { + return addAttribute(EnhancedType.of(attributeClass), immutableAttribute); + } + + /** + * Adds a single attribute to the table schema that can be mapped between the data item object and the database + * record. + */ + public Builder addAttribute(ImmutableAttribute immutableAttribute) { + if (this.attributes == null) { + this.attributes = new ArrayList<>(); + } + + this.attributes.add(immutableAttribute); + return this; + } + + /** + * Associate one or more {@link StaticTableTag} with this schema. See documentation on the tags themselves to + * understand what each one does. This method will overwrite any existing table tags. + */ + public Builder tags(StaticTableTag... staticTableTags) { + this.tags = Arrays.asList(staticTableTags); + return this; + } + + /** + * Associate one or more {@link StaticTableTag} with this schema. See documentation on the tags themselves to + * understand what each one does. This method will overwrite any existing table tags. + */ + public Builder tags(Collection staticTableTags) { + this.tags = new ArrayList<>(staticTableTags); + return this; + } + + /** + * Associates a {@link StaticTableTag} with this schema. See documentation on the tags themselves to understand + * what each one does. This method will add the tag to the list of existing table tags. + */ + public Builder addTag(StaticTableTag staticTableTag) { + if (this.tags == null) { + this.tags = new ArrayList<>(); + } + + this.tags.add(staticTableTag); + return this; + } + + /** + * Flattens all the attributes defined in another {@link TableSchema} into the database record this schema + * maps to. Functions to get and set an object that the flattened schema maps to is required. + */ + public Builder flatten(TableSchema otherTableSchema, + Function otherItemGetter, + BiConsumer otherItemSetter) { + if (otherTableSchema.isAbstract()) { + throw new IllegalArgumentException("Cannot flatten an abstract TableSchema. You must supply a concrete " + + "TableSchema that is able to create items"); + } + + FlattenedMapper flattenedMapper = + new FlattenedMapper<>(otherItemGetter, otherItemSetter, otherTableSchema); + this.flattenedMappers.add(flattenedMapper); + return this; + } + + /** + * Extends the {@link StaticImmutableTableSchema} of a super-class, effectively rolling all the attributes modelled by + * the super-class into the {@link StaticImmutableTableSchema} of the sub-class. The extended immutable table schema + * must be using a builder class that is also a super-class of the builder being used for the current immutable + * table schema. + */ + public Builder extend(StaticImmutableTableSchema superTableSchema) { + Stream> attributeStream = + upcastingTransformForAttributes(superTableSchema.attributeMappers); + attributeStream.forEach(this.additionalAttributes::add); + return this; + } + + /** + * Specifies the {@link AttributeConverterProvider}s to use with the table schema. + * The list of attribute converter providers must provide {@link AttributeConverter}s for all types used + * in the schema. The attribute converter providers will be loaded in the strict order they are supplied here. + *

    + * Calling this method will override the default attribute converter provider + * {@link DefaultAttributeConverterProvider}, which provides standard converters for most primitive + * and common Java types, so that provider must included in the supplied list if it is to be + * used. Providing an empty list here will cause no providers to get loaded. + *

    + * Adding one custom attribute converter provider and using the default as fallback: + * {@code + * builder.attributeConverterProviders(customAttributeConverter, AttributeConverterProvider.defaultProvider()) + * } + * + * @param attributeConverterProviders a list of attribute converter providers to use with the table schema + */ + public Builder attributeConverterProviders(AttributeConverterProvider... attributeConverterProviders) { + this.attributeConverterProviders = Arrays.asList(attributeConverterProviders); + return this; + } + + /** + * Specifies the {@link AttributeConverterProvider}s to use with the table schema. + * The list of attribute converter providers must provide {@link AttributeConverter}s for all types used + * in the schema. The attribute converter providers will be loaded in the strict order they are supplied here. + *

    + * Calling this method will override the default attribute converter provider + * {@link DefaultAttributeConverterProvider}, which provides standard converters + * for most primitive and common Java types, so that provider must included in the supplied list if it is to be + * used. Providing an empty list here will cause no providers to get loaded. + *

    + * Adding one custom attribute converter provider and using the default as fallback: + * {@code + * List providers = new ArrayList<>( + * customAttributeConverter, + * AttributeConverterProvider.defaultProvider()); + * builder.attributeConverterProviders(providers); + * } + * + * @param attributeConverterProviders a list of attribute converter providers to use with the table schema + */ + public Builder attributeConverterProviders(List attributeConverterProviders) { + this.attributeConverterProviders = new ArrayList<>(attributeConverterProviders); + return this; + } + + + /** + * Builds a {@link StaticImmutableTableSchema} based on the values this builder has been configured with + */ + public StaticImmutableTableSchema build() { + return new StaticImmutableTableSchema<>(this); + } + + private static Stream> + upcastingTransformForAttributes(Collection> superAttributes) { + + return superAttributes.stream().map(attribute -> attribute.transform(x -> x, x -> x)); + } + } + + @Override + public StaticTableMetadata tableMetadata() { + return tableMetadata; + } + + @Override + public T mapToItem(Map attributeMap) { + // Lazily instantiate the builder once we have an attribute to write + B builder = null; + Map, Map> flattenedAttributeValuesMap = new LinkedHashMap<>(); + + for (Map.Entry entry : attributeMap.entrySet()) { + String key = entry.getKey(); + AttributeValue value = entry.getValue(); + + if (!isNullAttributeValue(value)) { + ResolvedImmutableAttribute attributeMapper = indexedMappers.get(key); + + if (attributeMapper != null) { + if (builder == null) { + builder = constructNewBuilder(); + } + + attributeMapper.updateItemMethod().accept(builder, value); + } else { + FlattenedMapper flattenedMapper = this.indexedFlattenedMappers.get(key); + + if (flattenedMapper != null) { + Map flattenedAttributeValues = + flattenedAttributeValuesMap.get(flattenedMapper); + + if (flattenedAttributeValues == null) { + flattenedAttributeValues = new HashMap<>(); + } + + flattenedAttributeValues.put(key, value); + flattenedAttributeValuesMap.put(flattenedMapper, flattenedAttributeValues); + } + } + } + } + + for (Map.Entry, Map> entry : + flattenedAttributeValuesMap.entrySet()) { + builder = entry.getKey().mapToItem(builder, this::constructNewBuilder, entry.getValue()); + } + + return builder == null ? null : buildItemFunction.apply(builder); + } + + @Override + public Map itemToMap(T item, boolean ignoreNulls) { + Map attributeValueMap = new HashMap<>(); + + attributeMappers.forEach(attributeMapper -> { + String attributeKey = attributeMapper.attributeName(); + AttributeValue attributeValue = attributeMapper.attributeGetterMethod().apply(item); + + if (!ignoreNulls || !isNullAttributeValue(attributeValue)) { + attributeValueMap.put(attributeKey, attributeValue); + } + }); + + indexedFlattenedMappers.forEach((name, flattenedMapper) -> { + attributeValueMap.putAll(flattenedMapper.itemToMap(item, ignoreNulls)); + }); + + return unmodifiableMap(attributeValueMap); + } + + @Override + public Map itemToMap(T item, Collection attributes) { + Map attributeValueMap = new HashMap<>(); + + attributes.forEach(key -> { + AttributeValue attributeValue = attributeValue(item, key); + + if (attributeValue == null || !isNullAttributeValue(attributeValue)) { + attributeValueMap.put(key, attributeValue); + } + }); + + return unmodifiableMap(attributeValueMap); + } + + @Override + public AttributeValue attributeValue(T item, String key) { + ResolvedImmutableAttribute attributeMapper = indexedMappers.get(key); + + if (attributeMapper == null) { + FlattenedMapper flattenedMapper = indexedFlattenedMappers.get(key); + + if (flattenedMapper == null) { + throw new IllegalArgumentException(String.format("TableSchema does not know how to retrieve requested " + + "attribute '%s' from mapped object.", key)); + } + + return flattenedMapper.attributeValue(item, key); + } + + AttributeValue attributeValue = attributeMapper.attributeGetterMethod().apply(item); + + return isNullAttributeValue(attributeValue) ? null : attributeValue; + } + + @Override + public EnhancedType itemType() { + return this.itemType; + } + + @Override + public List attributeNames() { + return this.attributeNames; + } + + @Override + public boolean isAbstract() { + return this.buildItemFunction == null; + } + + /** + * The table schema {@link AttributeConverterProvider}. + * @see Builder#attributeConverterProvider + */ + public AttributeConverterProvider attributeConverterProvider() { + return this.attributeConverterProvider; + } + + private B constructNewBuilder() { + if (newBuilderSupplier == null) { + throw new UnsupportedOperationException("An abstract TableSchema cannot be used to map a database record " + + "to a concrete object. Add a 'newItemBuilder' to the " + + "TableSchema to give it the ability to create mapped objects."); + } + + return newBuilderSupplier.get(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableMetadata.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableMetadata.java new file mode 100644 index 000000000000..e531be13d1e3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableMetadata.java @@ -0,0 +1,319 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.IndexMetadata; +import software.amazon.awssdk.enhanced.dynamodb.KeyAttributeMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.StaticIndexMetadata; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.StaticKeyAttributeMetadata; +import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; + +/** + * Implementation of {@link TableMetadata} that can be constructed directly using literal values for metadata objects. + * This implementation is used by {@link StaticTableSchema} and associated interfaces such as {@link StaticAttributeTag} + * and {@link StaticTableTag} which permit manipulation of the table metadata. + */ +@SdkPublicApi +public final class StaticTableMetadata implements TableMetadata { + private final Map customMetadata; + private final Map indexByNameMap; + private final Map keyAttributes; + + private StaticTableMetadata(Builder builder) { + this.customMetadata = Collections.unmodifiableMap(builder.customMetadata); + this.indexByNameMap = Collections.unmodifiableMap(builder.indexByNameMap); + this.keyAttributes = Collections.unmodifiableMap(builder.keyAttributes); + } + + /** + * Create a new builder for this class + * @return A newly initialized {@link Builder} for building a {@link StaticTableMetadata} object. + */ + public static Builder builder() { + return new Builder(); + } + + @Override + public Optional customMetadataObject(String key, Class objectClass) { + Object genericObject = customMetadata.get(key); + + if (genericObject == null) { + return Optional.empty(); + } + + if (!objectClass.isAssignableFrom(genericObject.getClass())) { + throw new IllegalArgumentException("Attempt to retrieve a custom metadata object as a type that is not " + + "assignable for that object. Custom metadata key: " + key + "; " + + "requested object class: " + objectClass.getCanonicalName() + "; " + + "found object class: " + genericObject.getClass().getCanonicalName()); + } + + return Optional.of(objectClass.cast(genericObject)); + } + + @Override + public String indexPartitionKey(String indexName) { + IndexMetadata index = getIndex(indexName); + + if (!index.partitionKey().isPresent()) { + if (!TableMetadata.primaryIndexName().equals(indexName) && index.sortKey().isPresent()) { + // Local secondary index, use primary partition key + return primaryPartitionKey(); + } + + throw new IllegalArgumentException("Attempt to execute an operation against an index that requires a " + + "partition key without assigning a partition key to that index. " + + "Index name: " + indexName); + } + + return index.partitionKey().get().name(); + } + + @Override + public Optional indexSortKey(String indexName) { + IndexMetadata index = getIndex(indexName); + + return index.sortKey().map(KeyAttributeMetadata::name); + } + + @Override + public Collection indexKeys(String indexName) { + IndexMetadata index = getIndex(indexName); + + if (index.sortKey().isPresent()) { + if (!TableMetadata.primaryIndexName().equals(indexName) && !index.partitionKey().isPresent()) { + // Local secondary index, use primary index for partition key + return Collections.unmodifiableList(Arrays.asList(primaryPartitionKey(), index.sortKey().get().name())); + } + return Collections.unmodifiableList(Arrays.asList(index.partitionKey().get().name(), index.sortKey().get().name())); + } else { + return Collections.singletonList(index.partitionKey().get().name()); + } + } + + @Override + public Collection allKeys() { + return this.keyAttributes.keySet(); + } + + @Override + public Collection indices() { + return indexByNameMap.values(); + } + + @Override + public Map customMetadata() { + return this.customMetadata; + } + + @Override + public Collection keyAttributes() { + return this.keyAttributes.values(); + } + + private IndexMetadata getIndex(String indexName) { + IndexMetadata index = indexByNameMap.get(indexName); + + if (index == null) { + if (TableMetadata.primaryIndexName().equals(indexName)) { + throw new IllegalArgumentException("Attempt to execute an operation that requires a primary index " + + "without defining any primary key attributes in the table " + + "metadata."); + } else { + throw new IllegalArgumentException("Attempt to execute an operation that requires a secondary index " + + "without defining the index attributes in the table metadata. " + + "Index name: " + indexName); + } + } + + return index; + } + + @Override + public Optional scalarAttributeType(String keyAttribute) { + KeyAttributeMetadata key = this.keyAttributes.get(keyAttribute); + + if (key == null) { + throw new IllegalArgumentException("Key attribute '" + keyAttribute + "' not found in table metadata."); + } + + return Optional.ofNullable(key.attributeValueType().scalarAttributeType()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + StaticTableMetadata that = (StaticTableMetadata) o; + + if (customMetadata != null ? ! customMetadata.equals(that.customMetadata) : that.customMetadata != null) { + return false; + } + if (indexByNameMap != null ? ! indexByNameMap.equals(that.indexByNameMap) : that.indexByNameMap != null) { + return false; + } + return keyAttributes != null ? keyAttributes.equals(that.keyAttributes) : that.keyAttributes == null; + } + + @Override + public int hashCode() { + int result = customMetadata != null ? customMetadata.hashCode() : 0; + result = 31 * result + (indexByNameMap != null ? indexByNameMap.hashCode() : 0); + result = 31 * result + (keyAttributes != null ? keyAttributes.hashCode() : 0); + return result; + } + + /** + * Builder for {@link StaticTableMetadata} + */ + public static class Builder { + private final Map customMetadata = new LinkedHashMap<>(); + private final Map indexByNameMap = new LinkedHashMap<>(); + private final Map keyAttributes = new LinkedHashMap<>(); + + private Builder() { + } + + /** + * Builds an immutable instance of {@link StaticTableMetadata} from the values supplied to the builder. + */ + public StaticTableMetadata build() { + return new StaticTableMetadata(this); + } + + /** + * Adds a single custom object to the metadata, keyed by a string. Attempting to add a metadata object with a + * key that matches one that has already been added will cause an exception to be thrown. + * @param key a string key that will be used to retrieve the custom metadata + * @param object an object that will be stored in the custom metadata map + * @throws IllegalArgumentException if the custom metadata map already contains an entry with the same key + */ + public Builder addCustomMetadataObject(String key, Object object) { + if (customMetadata.containsKey(key)) { + throw new IllegalArgumentException("Attempt to set a custom metadata object that has already been set. " + + "Custom metadata object key: " + key); + } + + customMetadata.put(key, object); + return this; + } + + /** + * Adds information about a partition key associated with a specific index. + * @param indexName the name of the index to associate the partition key with + * @param attributeName the name of the attribute that represents the partition key + * @param attributeValueType the {@link AttributeValueType} of the partition key + * @throws IllegalArgumentException if a partition key has already been defined for this index + */ + public Builder addIndexPartitionKey(String indexName, String attributeName, AttributeValueType attributeValueType) { + IndexMetadata index = indexByNameMap.get(indexName); + + if (index != null && index.partitionKey().isPresent()) { + throw new IllegalArgumentException("Attempt to set an index partition key that conflicts with an " + + "existing index partition key of the same name and index. Index " + + "name: " + indexName + "; attribute name: " + attributeName); + } + + KeyAttributeMetadata partitionKey = StaticKeyAttributeMetadata.create(attributeName, attributeValueType); + indexByNameMap.put(indexName, + StaticIndexMetadata.builderFrom(index).name(indexName).partitionKey(partitionKey).build()); + markAttributeAsKey(attributeName, attributeValueType); + return this; + } + + /** + * Adds information about a sort key associated with a specific index. + * @param indexName the name of the index to associate the sort key with + * @param attributeName the name of the attribute that represents the sort key + * @param attributeValueType the {@link AttributeValueType} of the sort key + * @throws IllegalArgumentException if a sort key has already been defined for this index + */ + public Builder addIndexSortKey(String indexName, String attributeName, AttributeValueType attributeValueType) { + IndexMetadata index = indexByNameMap.get(indexName); + + if (index != null && index.sortKey().isPresent()) { + throw new IllegalArgumentException("Attempt to set an index sort key that conflicts with an existing" + + " index sort key of the same name and index. Index name: " + + indexName + "; attribute name: " + attributeName); + } + + KeyAttributeMetadata sortKey = StaticKeyAttributeMetadata.create(attributeName, attributeValueType); + indexByNameMap.put(indexName, + StaticIndexMetadata.builderFrom(index).name(indexName).sortKey(sortKey).build()); + markAttributeAsKey(attributeName, attributeValueType); + return this; + } + + /** + * Declares a 'key-like' attribute that is not an actual DynamoDB key. These pseudo-keys can then be recognized + * by extensions and treated appropriately, often being protected from manipulations as those would alter the + * meaning of the record. One example usage of this is a 'versioned record attribute': although the version is + * not part of the primary key of the record, it effectively serves as such. + * @param attributeName the name of the attribute to mark as a pseudo-key + * @param attributeValueType the {@link AttributeValueType} of the pseudo-key + */ + public Builder markAttributeAsKey(String attributeName, AttributeValueType attributeValueType) { + KeyAttributeMetadata existing = keyAttributes.get(attributeName); + + if (existing != null && !existing.attributeValueType().equals(attributeValueType)) { + throw new IllegalArgumentException("Attempt to mark an attribute as a key with a different " + + "AttributeValueType than one that has already been recorded."); + } + + if (existing == null) { + keyAttributes.put(attributeName, StaticKeyAttributeMetadata.create(attributeName, attributeValueType)); + } + + return this; + } + + /** + * Package-private method to merge the contents of a constructed {@link TableMetadata} into this builder. + */ + Builder mergeWith(TableMetadata other) { + other.indices().forEach( + index -> { + index.partitionKey().ifPresent( + partitionKey -> addIndexPartitionKey(index.name(), + partitionKey.name(), + partitionKey.attributeValueType())); + + index.sortKey().ifPresent( + sortKey -> addIndexSortKey(index.name(), sortKey.name(), sortKey.attributeValueType()) + ); + }); + + other.customMetadata().forEach(this::addCustomMetadataObject); + other.keyAttributes().forEach(keyAttribute -> markAttributeAsKey(keyAttribute.name(), + keyAttribute.attributeValueType())); + return this; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java new file mode 100644 index 000000000000..7dfdcd1f5023 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java @@ -0,0 +1,272 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; + +/** + * Implementation of {@link TableSchema} that builds a schema based on directly declared attributes and methods to + * get and set those attributes. Just like {@link StaticImmutableTableSchema} which is the equivalent implementation for + * immutable objects, this is the most direct, and thus fastest, implementation of {@link TableSchema}. + *

    + * Example using a fictional 'Customer' data item class:- + *

    {@code
    + * static final TableSchema CUSTOMER_TABLE_SCHEMA =
    + *      StaticTableSchema.builder(Customer.class)
    + *        .newItemSupplier(Customer::new)
    + *        .addAttribute(String.class, a -> a.name("account_id")
    + *                                          .getter(Customer::getAccountId)
    + *                                          .setter(Customer::setAccountId)
    + *                                          .tags(primaryPartitionKey()))
    + *        .addAttribute(Integer.class, a -> a.name("sub_id")
    + *                                           .getter(Customer::getSubId)
    + *                                           .setter(Customer::setSubId)
    + *                                           .tags(primarySortKey()))
    + *        .addAttribute(String.class, a -> a.name("name")
    + *                                          .getter(Customer::getName)
    + *                                          .setter(Customer::setName)
    + *                                          .tags(secondaryPartitionKey("customers_by_name")))
    + *        .addAttribute(Instant.class, a -> a.name("created_date")
    + *                                           .getter(Customer::getCreatedDate)
    + *                                           .setter(Customer::setCreatedDate)
    + *                                           .tags(secondarySortKey("customers_by_date"),
    + *                                                 secondarySortKey("customers_by_name")))
    + *        .build();
    + * }
    + */ +@SdkPublicApi +public final class StaticTableSchema extends WrappedTableSchema> { + private StaticTableSchema(Builder builder) { + super(builder.delegateBuilder.build()); + } + + /** + * Creates a builder for a {@link StaticTableSchema} typed to specific data item class. + * @param itemClass The data item class object that the {@link StaticTableSchema} is to map to. + * @return A newly initialized builder + */ + public static Builder builder(Class itemClass) { + return new Builder<>(itemClass); + } + + /** + * Builder for a {@link StaticTableSchema} + * @param The data item type that the {@link StaticTableSchema} this builder will build is to map to. + */ + public static final class Builder { + private final StaticImmutableTableSchema.Builder delegateBuilder; + private final Class itemClass; + + private Builder(Class itemClass) { + this.delegateBuilder = StaticImmutableTableSchema.builder(itemClass, itemClass); + this.itemClass = itemClass; + } + + /** + * A function that can be used to create new instances of the data item class. + */ + public Builder newItemSupplier(Supplier newItemSupplier) { + this.delegateBuilder.newItemBuilder(newItemSupplier, Function.identity()); + return this; + } + + /** + * A list of attributes that can be mapped between the data item object and the database record that are to + * be associated with the schema. Will overwrite any existing attributes. + */ + @SafeVarargs + public final Builder attributes(StaticAttribute... staticAttributes) { + this.delegateBuilder.attributes(Arrays.stream(staticAttributes) + .map(StaticAttribute::toImmutableAttribute) + .collect(Collectors.toList())); + + return this; + } + + /** + * A list of attributes that can be mapped between the data item object and the database record that are to + * be associated with the schema. Will overwrite any existing attributes. + */ + public Builder attributes(Collection> staticAttributes) { + this.delegateBuilder.attributes(staticAttributes.stream() + .map(StaticAttribute::toImmutableAttribute) + .collect(Collectors.toList())); + return this; + } + + /** + * Adds a single attribute to the table schema that can be mapped between the data item object and the database + * record. + */ + public Builder addAttribute(EnhancedType attributeType, + Consumer> staticAttribute) { + StaticAttribute.Builder builder = StaticAttribute.builder(itemClass, attributeType); + staticAttribute.accept(builder); + this.delegateBuilder.addAttribute(builder.build().toImmutableAttribute()); + return this; + } + + /** + * Adds a single attribute to the table schema that can be mapped between the data item object and the database + * record. + */ + public Builder addAttribute(Class attributeClass, + Consumer> staticAttribute) { + StaticAttribute.Builder builder = StaticAttribute.builder(itemClass, attributeClass); + staticAttribute.accept(builder); + this.delegateBuilder.addAttribute(builder.build().toImmutableAttribute()); + return this; + } + + /** + * Adds a single attribute to the table schema that can be mapped between the data item object and the database + * record. + */ + public Builder addAttribute(StaticAttribute staticAttribute) { + this.delegateBuilder.addAttribute(staticAttribute.toImmutableAttribute()); + return this; + } + + /** + * Flattens all the attributes defined in another {@link StaticTableSchema} into the database record this schema + * maps to. Functions to get and set an object that the flattened schema maps to is required. + */ + public Builder flatten(TableSchema otherTableSchema, + Function otherItemGetter, + BiConsumer otherItemSetter) { + this.delegateBuilder.flatten(otherTableSchema, otherItemGetter, otherItemSetter); + return this; + } + + /** + * Extends the {@link StaticTableSchema} of a super-class, effectively rolling all the attributes modelled by + * the super-class into the {@link StaticTableSchema} of the sub-class. + */ + public Builder extend(StaticTableSchema superTableSchema) { + this.delegateBuilder.extend(superTableSchema.toImmutableTableSchema()); + return this; + } + + /** + * Associate one or more {@link StaticTableTag} with this schema. See documentation on the tags themselves to + * understand what each one does. This method will overwrite any existing table tags. + */ + public Builder tags(StaticTableTag... staticTableTags) { + this.delegateBuilder.tags(staticTableTags); + return this; + } + + /** + * Associate one or more {@link StaticTableTag} with this schema. See documentation on the tags themselves to + * understand what each one does. This method will overwrite any existing table tags. + */ + public Builder tags(Collection staticTableTags) { + this.delegateBuilder.tags(staticTableTags); + return this; + } + + /** + * Associates a {@link StaticTableTag} with this schema. See documentation on the tags themselves to understand + * what each one does. This method will add the tag to the list of existing table tags. + */ + public Builder addTag(StaticTableTag staticTableTag) { + this.delegateBuilder.addTag(staticTableTag); + return this; + } + + /** + * Specifies the {@link AttributeConverterProvider}s to use with the table schema. + * The list of attribute converter providers must provide {@link AttributeConverter}s for all types used + * in the schema. The attribute converter providers will be loaded in the strict order they are supplied here. + *

    + * Calling this method will override the default attribute converter provider + * {@link DefaultAttributeConverterProvider}, which provides standard converters for most primitive + * and common Java types, so that provider must included in the supplied list if it is to be + * used. Providing an empty list here will cause no providers to get loaded. + *

    + * Adding one custom attribute converter provider and using the default as fallback: + * {@code + * builder.attributeConverterProviders(customAttributeConverter, AttributeConverterProvider.defaultProvider()) + * } + * + * @param attributeConverterProviders a list of attribute converter providers to use with the table schema + */ + public Builder attributeConverterProviders(AttributeConverterProvider... attributeConverterProviders) { + this.delegateBuilder.attributeConverterProviders(attributeConverterProviders); + return this; + } + + /** + * Specifies the {@link AttributeConverterProvider}s to use with the table schema. + * The list of attribute converter providers must provide {@link AttributeConverter}s for all types used + * in the schema. The attribute converter providers will be loaded in the strict order they are supplied here. + *

    + * Calling this method will override the default attribute converter provider + * {@link DefaultAttributeConverterProvider}, which provides standard converters + * for most primitive and common Java types, so that provider must included in the supplied list if it is to be + * used. Providing an empty list here will cause no providers to get loaded. + *

    + * Adding one custom attribute converter provider and using the default as fallback: + * {@code + * List providers = new ArrayList<>( + * customAttributeConverter, + * AttributeConverterProvider.defaultProvider()); + * builder.attributeConverterProviders(providers); + * } + * + * @param attributeConverterProviders a list of attribute converter providers to use with the table schema + */ + public Builder attributeConverterProviders(List attributeConverterProviders) { + this.delegateBuilder.attributeConverterProviders(attributeConverterProviders); + return this; + } + + + /** + * Builds a {@link StaticTableSchema} based on the values this builder has been configured with + */ + public StaticTableSchema build() { + return new StaticTableSchema<>(this); + } + + } + + private StaticImmutableTableSchema toImmutableTableSchema() { + return delegateTableSchema(); + } + + /** + * The table schema {@link AttributeConverterProvider}. + * @see Builder#attributeConverterProvider + */ + public AttributeConverterProvider attributeConverterProvider() { + return delegateTableSchema().attributeConverterProvider(); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableTag.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableTag.java new file mode 100644 index 000000000000..f8657e0649d8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableTag.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * Interface for a tag that can be applied to any {@link StaticTableSchema}. When the table schema is instantiated, + * the table metadata stored on the schema will be updated by calling the {@link #modifyMetadata()} method for every tag + * associated with the table. + */ +@SdkPublicApi +public interface StaticTableTag { + /** + * A function that modifies an existing {@link StaticTableSchema.Builder} when this tag is applied to a table. This + * will be used by the {@link StaticTableSchema} to capture all the metadata associated with tags that have been + * applied to the table. + * + * @return a consumer that modifies an existing {@link StaticTableSchema.Builder}. + */ + Consumer modifyMetadata(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/UpdateBehavior.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/UpdateBehavior.java new file mode 100644 index 000000000000..f4d78deb7d49 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/UpdateBehavior.java @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * Update behaviors that can be applied to individual attributes. This behavior will only apply to 'update' operations + * such as UpdateItem, and not 'put' operations such as PutItem. + *

    + * If an update behavior is not specified for an attribute, the default behavior of {@link #WRITE_ALWAYS} will be + * applied. + */ +@SdkPublicApi +public enum UpdateBehavior { + /** + * Always overwrite with the new value if one is provided, or remove any existing value if a null value is + * provided and 'ignoreNulls' is set to false. + *

    + * This is the default behavior applied to all attributes unless otherwise specified. + */ + WRITE_ALWAYS, + + /** + * Write the new value if there is no existing value in the persisted record or a new record is being written, + * otherwise leave the existing value. + *

    + * IMPORTANT: If a null value is provided and 'ignoreNulls' is set to false, the attribute + * will always be removed from the persisted record as DynamoDb does not support conditional removal with this + * method. + */ + WRITE_IF_NOT_EXISTS +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/WrappedTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/WrappedTableSchema.java new file mode 100644 index 000000000000..5b0a0f91744b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/WrappedTableSchema.java @@ -0,0 +1,91 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * Base class for any {@link TableSchema} implementation that wraps and acts as a different {@link TableSchema} + * implementation. + * @param The parameterized type of the {@link TableSchema} being proxied. + * @param The actual type of the {@link TableSchema} being proxied. + */ +@SdkPublicApi +public abstract class WrappedTableSchema> implements TableSchema { + private final R delegateTableSchema; + + /** + * Standard constructor. + * @param delegateTableSchema An instance of {@link TableSchema} to be wrapped and proxied by this class. + */ + protected WrappedTableSchema(R delegateTableSchema) { + this.delegateTableSchema = delegateTableSchema; + } + + /** + * The delegate table schema that is wrapped and proxied by this class. + */ + protected R delegateTableSchema() { + return this.delegateTableSchema; + } + + @Override + public T mapToItem(Map attributeMap) { + return this.delegateTableSchema.mapToItem(attributeMap); + } + + @Override + public Map itemToMap(T item, boolean ignoreNulls) { + return this.delegateTableSchema.itemToMap(item, ignoreNulls); + } + + @Override + public Map itemToMap(T item, Collection attributes) { + return this.delegateTableSchema.itemToMap(item, attributes); + } + + @Override + public AttributeValue attributeValue(T item, String attributeName) { + return this.delegateTableSchema.attributeValue(item, attributeName); + } + + @Override + public TableMetadata tableMetadata() { + return this.delegateTableSchema.tableMetadata(); + } + + @Override + public EnhancedType itemType() { + return this.delegateTableSchema.itemType(); + } + + @Override + public List attributeNames() { + return this.delegateTableSchema.attributeNames(); + } + + @Override + public boolean isAbstract() { + return this.delegateTableSchema.isAbstract(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/BeanTableSchemaAttributeTag.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/BeanTableSchemaAttributeTag.java new file mode 100644 index 000000000000..ed09ae10e6ba --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/BeanTableSchemaAttributeTag.java @@ -0,0 +1,45 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanTableSchemaAttributeTags; +import software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTag; + +/** + * This meta-annotation is not used directly in DynamoDb beans, it is used to annotate other annotations that are + * used with DynamoDb beans. You should use this meta-annotation if you are creating new annotations for the + * BeanTableSchema. + * + * Meta-annotation for BeanTableSchema annotations that are used to assign an {@link StaticAttributeTag} to a property on the + * bean. When an annotation that is annotated with this meta-annotation is found on a property being scanned by the + * {@link BeanTableSchema} then a static method + * named 'attributeTagFor' will be invoked passing in a single argument which is the property annotation itself. + * + * See {@link BeanTableSchemaAttributeTags} for an example of how to implement the {@link StaticAttributeTag} suppliers for + * bean mapper annotations. + */ +@Target({ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@SdkPublicApi +public @interface BeanTableSchemaAttributeTag { + Class value(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbAttribute.java new file mode 100644 index 000000000000..50d78b3f962f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbAttribute.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * Used to explicitly designate a field or getter or setter to participate as an attribute in the mapped database + * object with a custom name. A string value must be specified to specify a different name for the attribute than the + * mapper would automatically infer using a naming strategy. + */ +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@SdkPublicApi +public @interface DynamoDbAttribute { + /** + * The attribute name that this property should map to in the DynamoDb record. The value is case sensitive. + */ + String value(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java new file mode 100644 index 000000000000..cf84af9013e5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema; + +/** + * Class level annotation that identifies this class as being a DynamoDb mappable entity. Any class used to initialize + * a {@link BeanTableSchema} must have this annotation. If a class is used as an attribute type within another + * annotated DynamoDb class, either as a document or flattened with the {@link DynamoDbFlatten} annotation, it will also + * require this annotation to work automatically without an explicit {@link AttributeConverter}. + *

    + * Attribute Converter Providers
    + * Using {@link AttributeConverterProvider}s is optional and, if used, the supplied provider supersedes the default + * converter provided by the table schema. + *

    + * Note: + *

      + *
    • The converter(s) must provide {@link AttributeConverter}s for all types used in the schema.
    • + *
    • The table schema DefaultAttributeConverterProvider provides standard converters for most primitive + * and common Java types. Use custom AttributeConverterProviders when you have specific needs for type conversion + * that the defaults do not cover.
    • + *
    • If you provide a list of attribute converter providers, you can add DefaultAttributeConverterProvider + * to the end of the list to fall back on the defaults.
    • + *
    • Providing an empty list {} will cause no providers to get loaded.
    • + *
    + * + * Example using attribute converter providers with one custom provider and the default provider: + *
    + * {@code
    + * (converterProviders = {CustomAttributeConverter.class, DefaultAttributeConverterProvider.class});
    + * }
    + * 
    + */ +@Target({ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@SdkPublicApi +public @interface DynamoDbBean { + Class[] converterProviders() + default { DefaultAttributeConverterProvider.class }; +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbConvertedBy.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbConvertedBy.java new file mode 100644 index 000000000000..8c4d15e739b3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbConvertedBy.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; + +/** + * Associates a custom {@link AttributeConverter} with this attribute. This annotation is optional and takes + * precedence over any converter for this type provided by the table schema {@link AttributeConverterProvider} + * if it exists. Use custom AttributeConverterProvider when you have specific needs for type conversion + * that the defaults do not cover. + */ +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@SdkPublicApi +public @interface DynamoDbConvertedBy { + Class value(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbFlatten.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbFlatten.java new file mode 100644 index 000000000000..006142b721e6 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbFlatten.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * This annotation is used to flatten all the attributes of a separate DynamoDb bean that is stored in the current bean + * object and add them as top level attributes to the record that is read and written to the database. The target bean + * to flatten must be specified as part of this annotation. + */ +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@SdkPublicApi +public @interface DynamoDbFlatten { + /** + * @deprecated This is no longer used, the class type of the attribute will be used instead. + */ + @Deprecated + Class dynamoDbBeanClass() default Object.class; +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbIgnore.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbIgnore.java new file mode 100644 index 000000000000..79c34616cce2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbIgnore.java @@ -0,0 +1,31 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * Opts this attribute out of participating in the table schema. It will be completely ignored by the mapper. + */ +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@SdkPublicApi +public @interface DynamoDbIgnore { +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbImmutable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbImmutable.java new file mode 100644 index 000000000000..7dc79af6fa77 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbImmutable.java @@ -0,0 +1,67 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.mapper.ImmutableTableSchema; + +/** + * Class level annotation that identifies this class as being a DynamoDb mappable entity. Any class used to initialize + * a {@link ImmutableTableSchema} must have this annotation. If a class is used as an attribute type within another + * annotated DynamoDb class, either as a document or flattened with the {@link DynamoDbFlatten} annotation, it will also + * require this annotation to work automatically without an explicit {@link AttributeConverter}. + *

    + * Attribute Converter Providers
    + * Using {@link AttributeConverterProvider}s is optional and, if used, the supplied provider supersedes the default + * converter provided by the table schema. + *

    + * Note: + *

      + *
    • The converter(s) must provide {@link AttributeConverter}s for all types used in the schema.
    • + *
    • The table schema DefaultAttributeConverterProvider provides standard converters for most primitive + * and common Java types. Use custom AttributeConverterProviders when you have specific needs for type conversion + * that the defaults do not cover.
    • + *
    • If you provide a list of attribute converter providers, you can add DefaultAttributeConverterProvider + * to the end of the list to fall back on the defaults.
    • + *
    • Providing an empty list {} will cause no providers to get loaded.
    • + *
    + * + * Example using attribute converter providers with one custom provider and the default provider: + *
    + * {@code
    + * (converterProviders = {CustomAttributeConverter.class, DefaultAttributeConverterProvider.class});
    + * }
    + * 
    + */ +@Target({ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@SdkPublicApi +public @interface DynamoDbImmutable { + Class[] converterProviders() + default { DefaultAttributeConverterProvider.class }; + + /** + * The builder class that can be used to construct instances of the annotated immutable class + */ + Class builder(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbPartitionKey.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbPartitionKey.java new file mode 100644 index 000000000000..a9c1e59c5561 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbPartitionKey.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanTableSchemaAttributeTags; + +/** + * Denotes this attribute as being the primary partition key of the DynamoDb table. This attribute must map to a + * DynamoDb scalar type (string, number or binary) to be valid. Every mapped table schema must have exactly one of these. + */ +@SdkPublicApi +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@BeanTableSchemaAttributeTag(BeanTableSchemaAttributeTags.class) +public @interface DynamoDbPartitionKey { +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSecondaryPartitionKey.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSecondaryPartitionKey.java new file mode 100644 index 000000000000..01f285f4d47d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSecondaryPartitionKey.java @@ -0,0 +1,39 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanTableSchemaAttributeTags; + +/** + * Denotes a partition key for a global secondary index. You must also specify at least one index name, although this + * name is only referenced internally by the enhanced client to disambiguate the index and does not actually need to + * match the real name of the index. + */ +@SdkPublicApi +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@BeanTableSchemaAttributeTag(BeanTableSchemaAttributeTags.class) +public @interface DynamoDbSecondaryPartitionKey { + /** + * The names of one or more global secondary indices that this partition key should participate in. + */ + String[] indexNames(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSecondarySortKey.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSecondarySortKey.java new file mode 100644 index 000000000000..a39b982b4980 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSecondarySortKey.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanTableSchemaAttributeTags; + +/** + * Denotes an optional sort key for a global or local secondary index. You must also specify the index name which in the + * case of a global secondary index must match the index name supplied with the secondary partition key for the same + * index. This name is only referenced internally by the enhanced client to disambiguate the index and does not actually + * need to match the real name of the index. + */ +@SdkPublicApi +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@BeanTableSchemaAttributeTag(BeanTableSchemaAttributeTags.class) +public @interface DynamoDbSecondarySortKey { + /** + * The names of one or more local or global secondary indices that this sort key should participate in. + */ + String[] indexNames(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSortKey.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSortKey.java new file mode 100644 index 000000000000..de64ab181deb --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbSortKey.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanTableSchemaAttributeTags; + +/** + * Denotes this attribute as being the optional primary sort key of the DynamoDb table. This attribute must map to a + * DynamoDb scalar type (string, number or binary) to be valid. + */ +@SdkPublicApi +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@BeanTableSchemaAttributeTag(BeanTableSchemaAttributeTags.class) +public @interface DynamoDbSortKey { +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbUpdateBehavior.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbUpdateBehavior.java new file mode 100644 index 000000000000..fa161446c1a4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbUpdateBehavior.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.BeanTableSchemaAttributeTags; +import software.amazon.awssdk.enhanced.dynamodb.mapper.UpdateBehavior; + +/** + * Specifies the behavior when this attribute is updated as part of an 'update' operation such as UpdateItem. See + * documentation of {@link UpdateBehavior} for details on the different behaviors supported and the default behavior. + */ +@SdkPublicApi +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@BeanTableSchemaAttributeTag(BeanTableSchemaAttributeTags.class) +public @interface DynamoDbUpdateBehavior { + UpdateBehavior value(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetItemEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetItemEnhancedRequest.java new file mode 100644 index 000000000000..0ae91348ef90 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetItemEnhancedRequest.java @@ -0,0 +1,136 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; + +/** + * Defines parameters used for the batchGetItem() operation (such as + * {@link DynamoDbEnhancedClient#batchGetItem(BatchGetItemEnhancedRequest)}). + *

    + * A request contains references to keys and tables organized into one {@link ReadBatch} object per queried table. + */ +@SdkPublicApi +public final class BatchGetItemEnhancedRequest { + + private final List readBatches; + + private BatchGetItemEnhancedRequest(Builder builder) { + this.readBatches = getListIfExist(builder.readBatches); + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return new Builder().readBatches(readBatches); + } + + /** + * Returns the collection of {@link ReadBatch} in this request object. + */ + public Collection readBatches() { + return readBatches; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BatchGetItemEnhancedRequest that = (BatchGetItemEnhancedRequest) o; + + return readBatches != null ? readBatches.equals(that.readBatches) : that.readBatches == null; + } + + @Override + public int hashCode() { + return readBatches != null ? readBatches.hashCode() : 0; + } + + private static List getListIfExist(List readBatches) { + return readBatches != null ? Collections.unmodifiableList(readBatches) : null; + } + + /** + * A builder that is used to create a request with the desired parameters. + */ + public static final class Builder { + private List readBatches; + + private Builder() { + } + + /** + * Sets a collection of read batches to use in the batchGetItem operation. + * + * @param readBatches the collection of read batches + * @return a builder of this type + */ + public Builder readBatches(Collection readBatches) { + this.readBatches = readBatches != null ? new ArrayList<>(readBatches) : null; + return this; + } + + /** + * Sets one or more read batches to use in the batchGetItem operation. + * + * @param readBatches one or more {@link ReadBatch}, separated by comma. + * @return a builder of this type + */ + public Builder readBatches(ReadBatch... readBatches) { + this.readBatches = Arrays.asList(readBatches); + return this; + } + + /** + * Adds a read batch to the collection of batches on this builder. + * If this is the first batch, the method creates a new list. + * + * @param readBatch a single read batch + * @return a builder of this type + */ + public Builder addReadBatch(ReadBatch readBatch) { + if (readBatches == null) { + readBatches = new ArrayList<>(); + } + readBatches.add(readBatch); + return this; + } + + public BatchGetItemEnhancedRequest build() { + return new BatchGetItemEnhancedRequest(this); + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPage.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPage.java new file mode 100644 index 000000000000..1a2668b5c660 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPage.java @@ -0,0 +1,115 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static java.util.Collections.emptyList; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.readAndTransformSingleItem; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; + +/** + * Defines one result page with retrieved items in the result of a batchGetItem() operation, such as + * {@link DynamoDbEnhancedClient#batchGetItem(BatchGetItemEnhancedRequest)}. + *

    + * Use the {@link #resultsForTable(MappedTableResource)} method once for each table present in the request + * to retrieve items from that table in the page. + */ +@SdkPublicApi +public final class BatchGetResultPage { + private final BatchGetItemResponse batchGetItemResponse; + private final DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension; + + private BatchGetResultPage(Builder builder) { + this.batchGetItemResponse = builder.batchGetItemResponse; + this.dynamoDbEnhancedClientExtension = builder.dynamoDbEnhancedClientExtension; + } + + /** + * Creates a newly initialized builder for a result object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Retrieve all items on this result page belonging to the supplied table. Call this method once for each table present in the + * batch request. + * + * @param mappedTable the table to retrieve items for + * @param the type of the table items + * @return a list of items + */ + public List resultsForTable(MappedTableResource mappedTable) { + List> results = + batchGetItemResponse.responses() + .getOrDefault(mappedTable.tableName(), emptyList()); + + return results.stream() + .map(itemMap -> readAndTransformSingleItem(itemMap, + mappedTable.tableSchema(), + DefaultOperationContext.create(mappedTable.tableName()), + dynamoDbEnhancedClientExtension)) + .collect(Collectors.toList()); + } + + /** + * A builder that is used to create a result object with the desired parameters. + */ + public static final class Builder { + + private BatchGetItemResponse batchGetItemResponse; + private DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension; + + private Builder() { + } + + /** + * Adds a response to the result object. Required. + * + * @param batchGetItemResponse + * @return a builder of this type + */ + public Builder batchGetItemResponse(BatchGetItemResponse batchGetItemResponse) { + this.batchGetItemResponse = batchGetItemResponse; + return this; + } + + /** + * Adds a mapper extension that can be used to modify the values read from the database. + * @see DynamoDbEnhancedClientExtension + * + * @param dynamoDbEnhancedClientExtension the supplied mapper extension + * @return a builder of this type + */ + public Builder mapperExtension(DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + this.dynamoDbEnhancedClientExtension = dynamoDbEnhancedClientExtension; + return this; + } + + public BatchGetResultPage build() { + return new BatchGetResultPage(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPageIterable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPageIterable.java new file mode 100644 index 000000000000..5a68e3fba8b5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPageIterable.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.pagination.sync.PaginatedItemsIterable; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; + +/** + * Defines the result of {@link DynamoDbEnhancedClient#batchGetItem} operation. + * + *

    + * The result can be accessed either through iterable {@link BatchGetResultPage}s or flattened items + * across all pages via {@link #resultsForTable} + * + *

    + * Example: + *

    + * 1) Iterating through pages + * + *

    + * {@code
    + * batchResults.forEach(page -> {
    + *     page.resultsForTable(firstItemTable).forEach(item -> System.out.println(item));
    + *     page.resultsForTable(secondItemTable).forEach(item -> System.out.println(item));
    + * });
    + * }
    + * 
    + * + * 2) Iterating through items across all pages + * + *
    + * {@code
    + * results.resultsForTable(firstItemTable).forEach(item -> System.out.println(item));
    + * results.resultsForTable(secondItemTable).forEach(item -> System.out.println(item));
    + * }
    + * 
    + */ +@SdkPublicApi +public interface BatchGetResultPageIterable extends SdkIterable { + + static BatchGetResultPageIterable create(SdkIterable pageIterable) { + return pageIterable::iterator; + } + + /** + * Retrieve all items belonging to the supplied table across all pages. + * + * @param mappedTable the table to retrieve items for + * @param the type of the table items + * @return iterable items + */ + default SdkIterable resultsForTable(MappedTableResource mappedTable) { + return PaginatedItemsIterable.builder() + .pagesIterable(this) + .itemIteratorFunction(page -> page.resultsForTable(mappedTable).iterator()) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPagePublisher.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPagePublisher.java new file mode 100644 index 000000000000..9ce93dbe4118 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPagePublisher.java @@ -0,0 +1,75 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; + +/** + * Defines the result of {@link DynamoDbEnhancedAsyncClient#batchGetItem} operation. + * + *

    + * You can either subscribe to the {@link BatchGetResultPage}s or flattened items across all pages via + * {@link #resultsForTable(MappedTableResource)}. + * + * Example: + *

    + * 1) Subscribing to {@link BatchGetResultPage}s + *

    + * {@code
    + * batchGetResultPagePublisher.subscribe(page -> {
    + *     page.resultsForTable(firstItemTable).forEach(item -> System.out.println(item));
    + *     page.resultsForTable(secondItemTable).forEach(item -> System.out.println(item));
    + * });
    + * }
    + * 
    + * + *

    + * 2) Subscribing to results across all pages. + *

    + * {@code
    + * batchGetResultPagePublisher.resultsForTable(firstItemTable).subscribe(item -> System.out.println(item));
    + * batchGetResultPagePublisher.resultsForTable(secondItemTable).subscribe(item -> System.out.println(item));
    + * }
    + * 
    + */ +@SdkPublicApi +public interface BatchGetResultPagePublisher extends SdkPublisher { + + /** + * Creates a flattened items publisher with the underlying page publisher. + */ + static BatchGetResultPagePublisher create(SdkPublisher publisher) { + return publisher::subscribe; + } + + /** + * Returns a publisher that can be used to request a stream of results belonging to the supplied table across all pages. + * + *

    + * This method is useful if you are interested in subscribing to the items in all response pages + * instead of the top level pages. + * + * @param mappedTable the table to retrieve items for + * @param the type of the table items + * @return a {@link SdkPublisher} + */ + default SdkPublisher resultsForTable(MappedTableResource mappedTable) { + return this.flatMapIterable(p -> p.resultsForTable(mappedTable)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteItemEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteItemEnhancedRequest.java new file mode 100644 index 000000000000..9f882d87e771 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteItemEnhancedRequest.java @@ -0,0 +1,137 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; + +/** + * Defines parameters used for the batchWriteItem() operation (such as + * {@link DynamoDbEnhancedClient#batchWriteItem(BatchWriteItemEnhancedRequest)}). + *

    + * A request contains references to keys for delete actions and items for put actions, + * organized into one {@link WriteBatch} object per accessed table. + */ +@SdkPublicApi +public final class BatchWriteItemEnhancedRequest { + + private final List writeBatches; + + private BatchWriteItemEnhancedRequest(Builder builder) { + this.writeBatches = getListIfExist(builder.writeBatches); + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return new Builder().writeBatches(writeBatches); + } + + /** + * Returns the collection of {@link WriteBatch} in this request object. + */ + public Collection writeBatches() { + return writeBatches; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BatchWriteItemEnhancedRequest that = (BatchWriteItemEnhancedRequest) o; + + return writeBatches != null ? writeBatches.equals(that.writeBatches) : that.writeBatches == null; + } + + @Override + public int hashCode() { + return writeBatches != null ? writeBatches.hashCode() : 0; + } + + private static List getListIfExist(List writeBatches) { + return writeBatches != null ? Collections.unmodifiableList(writeBatches) : null; + } + + /** + * A builder that is used to create a request with the desired parameters. + */ + public static final class Builder { + private List writeBatches; + + private Builder() { + } + + /** + * Sets a collection of write batches to use in the batchWriteItem operation. + * + * @param writeBatches the collection of write batches + * @return a builder of this type + */ + public Builder writeBatches(Collection writeBatches) { + this.writeBatches = writeBatches != null ? new ArrayList<>(writeBatches) : null; + return this; + } + + /** + * Sets one or more write batches to use in the batchWriteItem operation. + * + * @param writeBatches one or more {@link WriteBatch}, separated by comma. + * @return a builder of this type + */ + public Builder writeBatches(WriteBatch... writeBatches) { + this.writeBatches = Arrays.asList(writeBatches); + return this; + } + + /** + * Adds a write batch to the collection of batches on this builder. + * If this is the first batch, the method creates a new list. + * + * @param writeBatch a single write batch + * @return a builder of this type + */ + public Builder addWriteBatch(WriteBatch writeBatch) { + if (writeBatches == null) { + writeBatches = new ArrayList<>(); + } + writeBatches.add(writeBatch); + return this; + } + + public BatchWriteItemEnhancedRequest build() { + return new BatchWriteItemEnhancedRequest(this); + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteResult.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteResult.java new file mode 100644 index 000000000000..118fdf4733a4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteResult.java @@ -0,0 +1,137 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromMap; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.readAndTransformSingleItem; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.services.dynamodb.model.DeleteRequest; +import software.amazon.awssdk.services.dynamodb.model.PutRequest; +import software.amazon.awssdk.services.dynamodb.model.WriteRequest; + +/** + * Defines the result of the batchWriteItem() operation, such as + * {@link DynamoDbEnhancedClient#batchWriteItem(BatchWriteItemEnhancedRequest)}. The result describes any unprocessed items + * after the operation completes. + *

      + *
    • Use the {@link #unprocessedPutItemsForTable(MappedTableResource)} method once for each table present in the request + * to get any unprocessed items from a put action on that table.
    • + *
    • Use the {@link #unprocessedDeleteItemsForTable(MappedTableResource)} method once for each table present in the request + * to get any unprocessed items from a delete action on that table.
    • + *
    + * + */ +@SdkPublicApi +public final class BatchWriteResult { + private final Map> unprocessedRequests; + + private BatchWriteResult(Builder builder) { + this.unprocessedRequests = Collections.unmodifiableMap(builder.unprocessedRequests); + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Retrieve any unprocessed put action items belonging to the supplied table from the result . + * Call this method once for each table present in the batch request. + * + * @param mappedTable the table to retrieve unprocessed items for + * @param the type of the table items + * @return a list of items + */ + public List unprocessedPutItemsForTable(MappedTableResource mappedTable) { + List writeRequests = + unprocessedRequests.getOrDefault(mappedTable.tableName(), + Collections.emptyList()); + + return writeRequests.stream() + .filter(writeRequest -> writeRequest.putRequest() != null) + .map(WriteRequest::putRequest) + .map(PutRequest::item) + .map(item -> readAndTransformSingleItem(item, + mappedTable.tableSchema(), + DefaultOperationContext.create(mappedTable.tableName()), + mappedTable.mapperExtension())) + .collect(Collectors.toList()); + } + + /** + * Retrieve any unprocessed delete action keys belonging to the supplied table from the result. + * Call this method once for each table present in the batch request. + * + * @param mappedTable the table to retrieve unprocessed items for. + * @return a list of keys that were not processed as part of the batch request. + */ + public List unprocessedDeleteItemsForTable(MappedTableResource mappedTable) { + List writeRequests = + unprocessedRequests.getOrDefault(mappedTable.tableName(), + Collections.emptyList()); + + return writeRequests.stream() + .filter(writeRequest -> writeRequest.deleteRequest() != null) + .map(WriteRequest::deleteRequest) + .map(DeleteRequest::key) + .map(itemMap -> createKeyFromMap(itemMap, + mappedTable.tableSchema(), + TableMetadata.primaryIndexName())) + .collect(Collectors.toList()); + } + + /** + * A builder that is used to create a result with the desired parameters. + */ + public static final class Builder { + private Map> unprocessedRequests; + + private Builder() { + } + + /** + * Add a map of unprocessed requests to this result object. + * + * @param unprocessedRequests the map of table to write request representing the unprocessed requests + * @return a builder of this type + */ + public Builder unprocessedRequests(Map> unprocessedRequests) { + this.unprocessedRequests = + unprocessedRequests.entrySet() + .stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + entry -> Collections.unmodifiableList(entry.getValue()))); + return this; + } + + public BatchWriteResult build() { + return new BatchWriteResult(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ConditionCheck.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ConditionCheck.java new file mode 100644 index 000000000000..879d31bea2c5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ConditionCheck.java @@ -0,0 +1,176 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactableWriteOperation; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; + +/** + * Use ConditionCheck as a part of the composite operation transactGetItems (for example + * {@link DynamoDbEnhancedClient#transactGetItems(TransactGetItemsEnhancedRequest)}) to determine + * if the other actions that are part of the same transaction should take effect. + *

    + * A valid ConditionCheck object should contain a reference to the primary key of the table that finds items with a matching key, + * together with a condition (of type {@link Expression}) to evaluate the primary key. + * + * @param The type of the modelled object. + */ +@SdkPublicApi +public final class ConditionCheck implements TransactableWriteOperation { + private final Key key; + private final Expression conditionExpression; + + private ConditionCheck(Key key, Expression conditionExpression) { + this.key = key; + this.conditionExpression = conditionExpression; + } + + /** + * Creates a newly initialized builder for this object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a builder initialized with all existing values on the object. + */ + public Builder toBuilder() { + return new Builder().key(key).conditionExpression(conditionExpression); + } + + @Override + public TransactWriteItem generateTransactWriteItem(TableSchema tableSchema, + OperationContext operationContext, + DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + software.amazon.awssdk.services.dynamodb.model.ConditionCheck conditionCheck = + software.amazon.awssdk.services.dynamodb.model.ConditionCheck + .builder() + .tableName(operationContext.tableName()) + .key(key.keyMap(tableSchema, operationContext.indexName())) + .conditionExpression(conditionExpression.expression()) + .expressionAttributeNames(conditionExpression.expressionNames()) + .expressionAttributeValues(conditionExpression.expressionValues()) + .build(); + + return TransactWriteItem.builder() + .conditionCheck(conditionCheck) + .build(); + } + + /** + * Returns the primary {@link Key} that the condition is valid for, or null if it doesn't exist. + */ + public Key key() { + return key; + } + + /** + * Returns the condition {@link Expression} set on this object, or null if it doesn't exist. + */ + public Expression conditionExpression() { + return conditionExpression; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConditionCheck that = (ConditionCheck) o; + + if (key != null ? ! key.equals(that.key) : that.key != null) { + return false; + } + return conditionExpression != null ? conditionExpression.equals(that.conditionExpression) : + that.conditionExpression == null; + } + + @Override + public int hashCode() { + int result = key != null ? key.hashCode() : 0; + result = 31 * result + (conditionExpression != null ? conditionExpression.hashCode() : 0); + return result; + } + + /** + * A builder that is used to create a condition check with the desired parameters. + *

    + * A valid builder must define both a {@link Key} and an {@link Expression}. + */ + public static final class Builder { + private Key key; + private Expression conditionExpression; + + private Builder() { + } + + /** + * Sets the primary {@link Key} that will be used together with the condition expression. + * + * @param key the primary key to use in the operation. + * @return a builder of this type + */ + public Builder key(Key key) { + this.key = key; + return this; + } + + /** + * Sets the primary {@link Key} that will be used together with the condition expression + * on the builder by accepting a consumer of {@link Key.Builder}. + * + * @param keyConsumer a {@link Consumer} of {@link Key} + * @return a builder of this type + */ + public Builder key(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return key(builder.build()); + } + + /** + * Defines a logical expression on the attributes of table items that match the supplied primary key value(s). + * If the expression evaluates to true, the transaction operation succeeds. If the expression evaluates to false, + * the transaction will not succeed. + *

    + * See {@link Expression} for condition syntax and examples. + * + * @param conditionExpression a condition written as an {@link Expression} + * @return a builder of this type + */ + public Builder conditionExpression(Expression conditionExpression) { + this.conditionExpression = conditionExpression; + return this; + } + + public ConditionCheck build() { + return new ConditionCheck<>(key, conditionExpression); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/CreateTableEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/CreateTableEnhancedRequest.java new file mode 100644 index 000000000000..dec594772557 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/CreateTableEnhancedRequest.java @@ -0,0 +1,179 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.Arrays; +import java.util.Collection; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; + +/** + * Defines parameters used to create a DynamoDb table using the createTable() operation (such as + * {@link DynamoDbTable#createTable(CreateTableEnhancedRequest)} or + * {@link DynamoDbAsyncTable#createTable(CreateTableEnhancedRequest)}). + *

    + * All parameters are optional. + */ +@SdkPublicApi +public final class CreateTableEnhancedRequest { + private final ProvisionedThroughput provisionedThroughput; + private final Collection localSecondaryIndices; + private final Collection globalSecondaryIndices; + + private CreateTableEnhancedRequest(Builder builder) { + this.provisionedThroughput = builder.provisionedThroughput; + this.localSecondaryIndices = builder.localSecondaryIndices; + this.globalSecondaryIndices = builder.globalSecondaryIndices; + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return builder().provisionedThroughput(provisionedThroughput) + .localSecondaryIndices(localSecondaryIndices) + .globalSecondaryIndices(globalSecondaryIndices); + } + + /** + * Returns the provisioned throughput value set on this request object, or null if it has not been set. + */ + public ProvisionedThroughput provisionedThroughput() { + return provisionedThroughput; + } + + /** + * Returns the local secondary index set on this request object, or null if it has not been set. + */ + public Collection localSecondaryIndices() { + return localSecondaryIndices; + } + + /** + * Returns the global secondary index set on this request object, or null if it has not been set. + */ + public Collection globalSecondaryIndices() { + return globalSecondaryIndices; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreateTableEnhancedRequest that = (CreateTableEnhancedRequest) o; + + if (provisionedThroughput != null ? ! provisionedThroughput.equals(that.provisionedThroughput) : + that.provisionedThroughput != null) { + return false; + } + if (localSecondaryIndices != null ? ! localSecondaryIndices.equals(that.localSecondaryIndices) : + that.localSecondaryIndices != null) { + return false; + } + return globalSecondaryIndices != null ? globalSecondaryIndices.equals(that.globalSecondaryIndices) : + that.globalSecondaryIndices == null; + } + + @Override + public int hashCode() { + int result = provisionedThroughput != null ? provisionedThroughput.hashCode() : 0; + result = 31 * result + (localSecondaryIndices != null ? localSecondaryIndices.hashCode() : 0); + result = 31 * result + (globalSecondaryIndices != null ? globalSecondaryIndices.hashCode() : 0); + return result; + } + + /** + * A builder that is used to create a request with the desired parameters. + */ + public static final class Builder { + private ProvisionedThroughput provisionedThroughput; + private Collection localSecondaryIndices; + private Collection globalSecondaryIndices; + + private Builder() { + } + + /** + * Sets the provisioned throughput for this table. Use this parameter to set the table's + * read and write capacity units. + *

    + * See the DynamoDb documentation for more information on default throughput values. + */ + public Builder provisionedThroughput(ProvisionedThroughput provisionedThroughput) { + this.provisionedThroughput = provisionedThroughput; + return this; + } + + /** + * Defines a local secondary index for this table. + *

    + * See {@link EnhancedLocalSecondaryIndex} for more information on creating and using a local secondary index. + */ + public Builder localSecondaryIndices(Collection localSecondaryIndices) { + this.localSecondaryIndices = localSecondaryIndices; + return this; + } + + /** + * Defines a local secondary index for this table. + *

    + * See {@link EnhancedLocalSecondaryIndex} for more information on creating and using a local secondary index. + */ + public Builder localSecondaryIndices(EnhancedLocalSecondaryIndex... localSecondaryIndices) { + this.localSecondaryIndices = Arrays.asList(localSecondaryIndices); + return this; + } + + /** + * Defines a global secondary index for this table. + *

    + * See {@link EnhancedGlobalSecondaryIndex} for more information on creating and using a global secondary index. + */ + public Builder globalSecondaryIndices(Collection globalSecondaryIndices) { + this.globalSecondaryIndices = globalSecondaryIndices; + return this; + } + + /** + * Defines a global secondary index for this table. + *

    + * See {@link EnhancedGlobalSecondaryIndex} for more information on creating and using a global secondary index. + */ + public Builder globalSecondaryIndices(EnhancedGlobalSecondaryIndex... globalSecondaryIndices) { + this.globalSecondaryIndices = Arrays.asList(globalSecondaryIndices); + return this; + } + + public CreateTableEnhancedRequest build() { + return new CreateTableEnhancedRequest(this); + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/DeleteItemEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/DeleteItemEnhancedRequest.java new file mode 100644 index 000000000000..ae9f873cda3e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/DeleteItemEnhancedRequest.java @@ -0,0 +1,144 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; + +/** + * Defines parameters used to remove an item from a DynamoDb table using the deleteItem() operation (such as + * {@link DynamoDbTable#deleteItem(DeleteItemEnhancedRequest)} or + * {@link DynamoDbAsyncTable#deleteItem(DeleteItemEnhancedRequest)}). + *

    + * A valid request object must contain a primary {@link Key} to reference the item to delete. + */ +@SdkPublicApi +public final class DeleteItemEnhancedRequest { + + private final Key key; + private final Expression conditionExpression; + + private DeleteItemEnhancedRequest(Builder builder) { + this.key = builder.key; + this.conditionExpression = builder.conditionExpression; + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return builder().key(key).conditionExpression(conditionExpression); + } + + /** + * Returns the primary {@link Key} for the item to delete. + */ + public Key key() { + return key; + } + + /** + * Returns the condition {@link Expression} set on this request object, or null if it doesn't exist. + */ + public Expression conditionExpression() { + return conditionExpression; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DeleteItemEnhancedRequest that = (DeleteItemEnhancedRequest) o; + + return key != null ? key.equals(that.key) : that.key == null; + } + + @Override + public int hashCode() { + return key != null ? key.hashCode() : 0; + } + + /** + * A builder that is used to create a request with the desired parameters. + *

    + * Note: A valid request builder must define a {@link Key}. + */ + public static final class Builder { + private Key key; + private Expression conditionExpression; + + private Builder() { + } + + /** + * Sets the primary {@link Key} that will be used to match the item to delete. + * + * @param key the primary key to use in the request. + * @return a builder of this type + */ + public Builder key(Key key) { + this.key = key; + return this; + } + + /** + * Sets the primary {@link Key} that will be used to match the item to delete + * on the builder by accepting a consumer of {@link Key.Builder}. + * + * @param keyConsumer a {@link Consumer} of {@link Key} + * @return a builder of this type + */ + public Builder key(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return key(builder.build()); + } + + /** + * Defines a logical expression on an item's attribute values which, if evaluating to true, + * will allow the delete operation to succeed. If evaluating to false, the operation will not succeed. + *

    + * See {@link Expression} for condition syntax and examples. + * + * @param conditionExpression a condition written as an {@link Expression} + * @return a builder of this type + */ + public Builder conditionExpression(Expression conditionExpression) { + this.conditionExpression = conditionExpression; + return this; + } + + public DeleteItemEnhancedRequest build() { + return new DeleteItemEnhancedRequest(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/EnhancedGlobalSecondaryIndex.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/EnhancedGlobalSecondaryIndex.java new file mode 100644 index 000000000000..869c223ef252 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/EnhancedGlobalSecondaryIndex.java @@ -0,0 +1,168 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.Projection; +import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; +import software.amazon.awssdk.utils.Validate; + +/** + * Enhanced model representation of a 'global secondary index' of a DynamoDb table. This is optionally used with the + * 'createTable' operation in the enhanced client. + */ +@SdkPublicApi +public final class EnhancedGlobalSecondaryIndex { + private final String indexName; + private final Projection projection; + private final ProvisionedThroughput provisionedThroughput; + + private EnhancedGlobalSecondaryIndex(Builder builder) { + this.indexName = Validate.paramNotBlank(builder.indexName, "indexName"); + this.projection = builder.projection; + this.provisionedThroughput = builder.provisionedThroughput; + } + + /** + * Creates a newly initialized builder for an {@link EnhancedLocalSecondaryIndex} + * @return A new builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a builder initialized with the attributes of an existing {@link EnhancedLocalSecondaryIndex} + * @return A new builder + */ + public Builder toBuilder() { + return builder().indexName(indexName) + .projection(projection) + .provisionedThroughput(provisionedThroughput); + } + + /** + * The name of the global secondary index + */ + public String indexName() { + return indexName; + } + + /** + * The attribute projection setting for this global secondary index. + */ + public Projection projection() { + return projection; + } + + /** + * The provisioned throughput setting for this global secondary index. + */ + public ProvisionedThroughput provisionedThroughput() { + return provisionedThroughput; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EnhancedGlobalSecondaryIndex that = (EnhancedGlobalSecondaryIndex) o; + + if (indexName != null ? ! indexName.equals(that.indexName) : that.indexName != null) { + return false; + } + if (projection != null ? ! projection.equals(that.projection) : that.projection != null) { + return false; + } + return provisionedThroughput != null ? provisionedThroughput.equals(that.provisionedThroughput) : + that.provisionedThroughput == null; + } + + @Override + public int hashCode() { + int result = indexName != null ? indexName.hashCode() : 0; + result = 31 * result + (projection != null ? projection.hashCode() : 0); + result = 31 * result + (provisionedThroughput != null ? provisionedThroughput.hashCode() : 0); + return result; + } + + /** + * A builder for {@link EnhancedGlobalSecondaryIndex} + */ + public static final class Builder { + private String indexName; + private Projection projection; + private ProvisionedThroughput provisionedThroughput; + + private Builder() { + } + + /** + * The name of the global secondary index + */ + public Builder indexName(String indexName) { + this.indexName = indexName; + return this; + } + + /** + * The attribute projection setting for this global secondary index. + */ + public Builder projection(Projection projection) { + this.projection = projection; + return this; + } + + /** + * The attribute projection setting for this global secondary index. + */ + public Builder projection(Consumer projection) { + Projection.Builder builder = Projection.builder(); + projection.accept(builder); + return projection(builder.build()); + } + + /** + * The provisioned throughput setting for this global secondary index. + */ + public Builder provisionedThroughput(ProvisionedThroughput provisionedThroughput) { + this.provisionedThroughput = provisionedThroughput; + return this; + } + + /** + * The provisioned throughput setting for this global secondary index. + */ + public Builder provisionedThroughput(Consumer provisionedThroughput) { + ProvisionedThroughput.Builder builder = ProvisionedThroughput.builder(); + provisionedThroughput.accept(builder); + return provisionedThroughput(builder.build()); + } + + /** + * Builds a {@link EnhancedGlobalSecondaryIndex} based on the values stored in this builder + */ + public EnhancedGlobalSecondaryIndex build() { + return new EnhancedGlobalSecondaryIndex(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/EnhancedLocalSecondaryIndex.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/EnhancedLocalSecondaryIndex.java new file mode 100644 index 000000000000..7512ac9c4300 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/EnhancedLocalSecondaryIndex.java @@ -0,0 +1,128 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.Projection; +import software.amazon.awssdk.utils.Validate; + +/** + * Enhanced model representation of a 'local secondary index' of a DynamoDb table. This is optionally used with the + * 'createTable' operation in the enhanced client. + */ +@SdkPublicApi +public final class EnhancedLocalSecondaryIndex { + private final String indexName; + private final Projection projection; + + private EnhancedLocalSecondaryIndex(Builder builder) { + this.indexName = Validate.paramNotBlank(builder.indexName, "indexName"); + this.projection = builder.projection; + } + + public static EnhancedLocalSecondaryIndex create(String indexName, + Projection projection) { + + return builder().indexName(indexName).projection(projection).build(); + } + + public static Builder builder() { + return new Builder(); + } + + public Builder toBuilder() { + return builder().indexName(indexName).projection(projection); + } + + /** + * The name of this local secondary index + */ + public String indexName() { + return indexName; + } + + /** + * The attribute projection setting for this local secondary index. + */ + public Projection projection() { + return projection; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EnhancedLocalSecondaryIndex that = (EnhancedLocalSecondaryIndex) o; + + if (indexName != null ? ! indexName.equals(that.indexName) : that.indexName != null) { + return false; + } + return projection != null ? projection.equals(that.projection) : that.projection == null; + } + + @Override + public int hashCode() { + int result = indexName != null ? indexName.hashCode() : 0; + result = 31 * result + (projection != null ? projection.hashCode() : 0); + return result; + } + + /** + * A builder for {@link EnhancedLocalSecondaryIndex} + */ + public static final class Builder { + private String indexName; + private Projection projection; + + private Builder() { + } + + /** + * The name of this local secondary index + */ + public Builder indexName(String indexName) { + this.indexName = indexName; + return this; + } + + /** + * The attribute projection setting for this local secondary index. + */ + public Builder projection(Projection projection) { + this.projection = projection; + return this; + } + + /** + * The attribute projection setting for this local secondary index. + */ + public Builder projection(Consumer projection) { + Projection.Builder builder = Projection.builder(); + projection.accept(builder); + return projection(builder.build()); + } + + public EnhancedLocalSecondaryIndex build() { + return new EnhancedLocalSecondaryIndex(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/GetItemEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/GetItemEnhancedRequest.java new file mode 100644 index 000000000000..7025bb35eb3a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/GetItemEnhancedRequest.java @@ -0,0 +1,148 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; + +/** + * Defines parameters used to retrieve an item from a DynamoDb table using the getItem() operation (such as + * {@link DynamoDbTable#getItem(GetItemEnhancedRequest)} or {@link DynamoDbAsyncTable#getItem(GetItemEnhancedRequest)}). + *

    + * A valid request object must contain a primary {@link Key} to reference the item to get. + */ +@SdkPublicApi +public final class GetItemEnhancedRequest { + + private final Key key; + private final Boolean consistentRead; + + private GetItemEnhancedRequest(Builder builder) { + this.key = builder.key; + this.consistentRead = builder.consistentRead; + } + + /** + * All requests must be constructed using a Builder. + * @return a builder of this type + */ + public static Builder builder() { + return new Builder(); + } + + /** + * @return a builder with all existing values set + */ + public Builder toBuilder() { + return builder().key(key).consistentRead(consistentRead); + } + + /** + * @return whether or not this request will use consistent read + */ + public Boolean consistentRead() { + return this.consistentRead; + } + + /** + * Returns the primary {@link Key} for the item to get. + */ + public Key key() { + return this.key; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + GetItemEnhancedRequest getItem = (GetItemEnhancedRequest) o; + + if (key != null ? ! key.equals(getItem.key) : getItem.key != null) { + return false; + } + return consistentRead != null ? consistentRead.equals(getItem.consistentRead) : getItem.consistentRead == null; + } + + @Override + public int hashCode() { + int result = key != null ? key.hashCode() : 0; + result = 31 * result + (consistentRead != null ? consistentRead.hashCode() : 0); + return result; + } + + /** + * A builder that is used to create a request with the desired parameters. + *

    + * Note: A valid request builder must define a {@link Key}. + */ + public static final class Builder { + private Key key; + private Boolean consistentRead; + + private Builder() { + } + + /** + * Determines the read consistency model: If set to true, the operation uses strongly consistent reads; otherwise, + * the operation uses eventually consistent reads. + *

    + * By default, the value of this property is set to false. + * + * @param consistentRead sets consistency model of the operation to use strong consistency + * @return a builder of this type + */ + public Builder consistentRead(Boolean consistentRead) { + this.consistentRead = consistentRead; + return this; + } + + /** + * Sets the primary {@link Key} that will be used to match the item to retrieve. + * + * @param key the primary key to use in the request. + * @return a builder of this type + */ + public Builder key(Key key) { + this.key = key; + return this; + } + + /** + * Sets the primary {@link Key} that will be used to match the item to retrieve + * by accepting a consumer of {@link Key.Builder}. + * + * @param keyConsumer a {@link Consumer} of {@link Key} + * @return a builder of this type + */ + public Builder key(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return key(builder.build()); + } + + public GetItemEnhancedRequest build() { + return new GetItemEnhancedRequest(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/Page.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/Page.java new file mode 100644 index 000000000000..d5248cfdd903 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/Page.java @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +/** + * An immutable object that holds a page of queried or scanned results from DynamoDb. + *

    + * Contains a reference to the last evaluated key for the current page; see {@link #lastEvaluatedKey()} for more information. + * @param The modelled type of the object that has been read. + */ +@SdkPublicApi +public final class Page { + private final List items; + private final Map lastEvaluatedKey; + + private Page(List items, Map lastEvaluatedKey) { + this.items = items; + this.lastEvaluatedKey = lastEvaluatedKey; + } + + /** + * Static constructor for this object. + * @param items A list of items to store for the page. + * @param lastEvaluatedKey A 'lastEvaluatedKey' to store for the page. + * @param The modelled type of the object that has been read. + * @return A newly constructed {@link Page} object. + */ + public static Page create(List items, Map lastEvaluatedKey) { + return new Page<>(items, lastEvaluatedKey); + } + + /** + * Static constructor for this object that sets a null 'lastEvaluatedKey' which indicates this is the final page + * of results. + * @param items A list of items to store for the page. + * @param The modelled type of the object that has been read. + * @return A newly constructed {@link Page} object. + */ + public static Page create(List items) { + return new Page<>(items, null); + } + + /** + * Returns a page of mapped objects that represent records from a database query or scan. + * @return A list of mapped objects. + */ + public List items() { + return items; + } + + /** + * Returns the 'lastEvaluatedKey' that DynamoDb returned from the last page query or scan. This key can be used + * to continue the query or scan if passed into a request. + * @return The 'lastEvaluatedKey' from the last query or scan operation or null if the no more pages are available. + */ + public Map lastEvaluatedKey() { + return lastEvaluatedKey; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Page page = (Page) o; + + if (items != null ? ! items.equals(page.items) : page.items != null) { + return false; + } + return lastEvaluatedKey != null ? lastEvaluatedKey.equals(page.lastEvaluatedKey) : page.lastEvaluatedKey == null; + } + + @Override + public int hashCode() { + int result = items != null ? items.hashCode() : 0; + result = 31 * result + (lastEvaluatedKey != null ? lastEvaluatedKey.hashCode() : 0); + return result; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PageIterable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PageIterable.java new file mode 100644 index 000000000000..ffa7b8519526 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PageIterable.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.pagination.sync.PaginatedItemsIterable; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; + +/** + * Page iterable represents the result from paginated operations such as scan and query. + * + *

    + * The result can be accessed either through iterable {@link Page}s or flattened items across all pages via + * {@link #items()} + * + *

    + * Example: + *

    + * 1) Iterating through pages + * + *

    + * {@code
    + * PageIterable results = table.scan();
    + * results.stream().forEach(p -> p.items().forEach(item -> System.out.println(item)))
    + * }
    + * 
    + * + * 2) Iterating through items + * + *
    + * {@code
    + * PageIterable results = table.scan();
    + * results.items().stream().forEach(item -> System.out.println(item));
    + * }
    + * 
    + * @param The modelled type of the object in a page. + */ +@SdkPublicApi +public interface PageIterable extends SdkIterable> { + + static PageIterable create(SdkIterable> pageIterable) { + return pageIterable::iterator; + } + + /** + * Returns an iterable to iterate through the paginated {@link Page#items()} across all response pages. + * + *

    + * This method is useful if you are interested in iterating over the items in the response pages + * instead of the top level pages. + */ + default SdkIterable items() { + return PaginatedItemsIterable., T>builder() + .pagesIterable(this) + .itemIteratorFunction(page -> page.items().iterator()) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PagePublisher.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PagePublisher.java new file mode 100644 index 000000000000..f4f5f85fbf2c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PagePublisher.java @@ -0,0 +1,69 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.async.SdkPublisher; + +/** + * Represents the result from paginated operations such as scan and query. + *

    + * You can either subscribe to the {@link Page}s or flattened items across all pages via {@link #items()}. + * + * Example: + *

    + * 1) Subscribing to {@link Page}s + *

    + * {@code
    + *
    + * PagePublisher publisher = mappedTable.scan();
    + * publisher.subscribe(page -> page.items().forEach(item -> System.out.println(item)));
    + * }
    + * 
    + * + *

    + * 2) Subscribing to items across all pages. + *

    + * {@code
    + *
    + * PagePublisher< publisher = mappedTable.scan();
    + * publisher.items().subscribe(item -> System.out.println(item));
    + * }
    + * 
    + * + * @param The modelled type of the object in a page. + */ +@SdkPublicApi +public interface PagePublisher extends SdkPublisher> { + + /** + * Creates a flattened items publisher with the underlying page publisher. + */ + static PagePublisher create(SdkPublisher> publisher) { + return publisher::subscribe; + } + + /** + * Returns a publisher that can be used to request a stream of items across all pages. + * + *

    + * This method is useful if you are interested in subscribing the items in the response pages + * instead of the top level pages. + */ + default SdkPublisher items() { + return this.flatMapIterable(Page::items); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequest.java new file mode 100644 index 000000000000..a8b1e4208ca4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequest.java @@ -0,0 +1,133 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; + +/** + * Defines parameters used to write an item to a DynamoDb table using the putItem() operation (such as + * {@link DynamoDbTable#putItem(PutItemEnhancedRequest)} or {@link DynamoDbAsyncTable#putItem(PutItemEnhancedRequest)}). + *

    + * A valid request object must contain the item that should be written to the table. + * @param The type of the modelled object. + */ +@SdkPublicApi +public final class PutItemEnhancedRequest { + + private final T item; + private final Expression conditionExpression; + + private PutItemEnhancedRequest(Builder builder) { + this.item = builder.item; + this.conditionExpression = builder.conditionExpression; + } + + /** + * Creates a newly initialized builder for the request object. + * + * @param itemClass the class that items in this table map to + * @param The type of the modelled object, corresponding to itemClass + * @return a PutItemEnhancedRequest builder + */ + public static Builder builder(Class itemClass) { + return new Builder<>(); + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return new Builder().item(item).conditionExpression(conditionExpression); + } + + /** + * Returns the item for this put operation request. + */ + public T item() { + return item; + } + + /** + * Returns the condition {@link Expression} set on this request object, or null if it doesn't exist. + */ + public Expression conditionExpression() { + return conditionExpression; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PutItemEnhancedRequest putItem = (PutItemEnhancedRequest) o; + + return item != null ? item.equals(putItem.item) : putItem.item == null; + } + + @Override + public int hashCode() { + return item != null ? item.hashCode() : 0; + } + + /** + * A builder that is used to create a request with the desired parameters. + *

    + * Note: A valid request builder must define an item. + */ + public static final class Builder { + private T item; + private Expression conditionExpression; + + private Builder() { + } + + /** + * Sets the item to write to DynamoDb. Required. + * + * @param item the item to write + * @return a builder of this type + */ + public Builder item(T item) { + this.item = item; + return this; + } + + /** + * Defines a logical expression on an item's attribute values which, if evaluating to true, + * will allow the put operation to succeed. If evaluating to false, the operation will not succeed. + *

    + * See {@link Expression} for condition syntax and examples. + * + * @param conditionExpression a condition written as an {@link Expression} + * @return a builder of this type + */ + public Builder conditionExpression(Expression conditionExpression) { + this.conditionExpression = conditionExpression; + return this; + } + + public PutItemEnhancedRequest build() { + return new PutItemEnhancedRequest<>(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryConditional.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryConditional.java new file mode 100644 index 000000000000..a877244406a5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryConditional.java @@ -0,0 +1,186 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.conditional.BeginsWithConditional; +import software.amazon.awssdk.enhanced.dynamodb.internal.conditional.BetweenConditional; +import software.amazon.awssdk.enhanced.dynamodb.internal.conditional.EqualToConditional; +import software.amazon.awssdk.enhanced.dynamodb.internal.conditional.SingleKeyItemConditional; + +/** + * An interface for a literal conditional that can be used in an enhanced DynamoDB query. Contains convenient static + * methods that can be used to construct the most common conditional statements. Query conditionals are not linked to + * any specific table or schema and can be re-used in different contexts. + *

    + * Example: + *

    + * {@code
    + * QueryConditional sortValueGreaterThanFour = QueryConditional.sortGreaterThan(k -> k.partitionValue(10).sortValue(4));
    + * }
    + * 
    + */ +@SdkPublicApi +public interface QueryConditional { + /** + * Creates a {@link QueryConditional} that matches when the key of an index is equal to a specific value. + * @param key the literal key used to compare the value of the index against + */ + static QueryConditional keyEqualTo(Key key) { + return new EqualToConditional(key); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is equal to a specific value. + * @param keyConsumer 'builder consumer' for the literal key used to compare the value of the index against + */ + static QueryConditional keyEqualTo(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return keyEqualTo(builder.build()); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is greater than a specific value. + * @param key the literal key used to compare the value of the index against + */ + static QueryConditional sortGreaterThan(Key key) { + return new SingleKeyItemConditional(key, ">"); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is greater than a specific value. + * @param keyConsumer 'builder consumer' for the literal key used to compare the value of the index against + */ + static QueryConditional sortGreaterThan(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return sortGreaterThan(builder.build()); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is greater than or equal to a specific + * value. + * @param key the literal key used to compare the value of the index against + */ + static QueryConditional sortGreaterThanOrEqualTo(Key key) { + return new SingleKeyItemConditional(key, ">="); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is greater than or equal to a specific + * value. + * @param keyConsumer 'builder consumer' for the literal key used to compare the value of the index against + */ + static QueryConditional sortGreaterThanOrEqualTo(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return sortGreaterThanOrEqualTo(builder.build()); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is less than a specific value. + * @param key the literal key used to compare the value of the index against + */ + static QueryConditional sortLessThan(Key key) { + return new SingleKeyItemConditional(key, "<"); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is less than a specific value. + * @param keyConsumer 'builder consumer' for the literal key used to compare the value of the index against + */ + static QueryConditional sortLessThan(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return sortLessThan(builder.build()); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is less than or equal to a specific + * value. + * @param key the literal key used to compare the value of the index against + */ + static QueryConditional sortLessThanOrEqualTo(Key key) { + return new SingleKeyItemConditional(key, "<="); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is less than or equal to a specific + * value. + * @param keyConsumer 'builder consumer' for the literal key used to compare the value of the index against + */ + static QueryConditional sortLessThanOrEqualTo(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return sortLessThanOrEqualTo(builder.build()); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is between two specific values. + * @param keyFrom the literal key used to compare the start of the range to compare the value of the index against + * @param keyTo the literal key used to compare the end of the range to compare the value of the index against + */ + static QueryConditional sortBetween(Key keyFrom, Key keyTo) { + return new BetweenConditional(keyFrom, keyTo); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index is between two specific values. + * @param keyFromConsumer 'builder consumer' for the literal key used to compare the start of the range to compare + * the value of the index against + * @param keyToConsumer 'builder consumer' for the literal key used to compare the end of the range to compare the + * value of the index against + */ + static QueryConditional sortBetween(Consumer keyFromConsumer, Consumer keyToConsumer) { + Key.Builder builderFrom = Key.builder(); + Key.Builder builderTo = Key.builder(); + keyFromConsumer.accept(builderFrom); + keyToConsumer.accept(builderTo); + return sortBetween(builderFrom.build(), builderTo.build()); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index begins with a specific value. + * @param key the literal key used to compare the start of the value of the index against + */ + static QueryConditional sortBeginsWith(Key key) { + return new BeginsWithConditional(key); + } + + /** + * Creates a {@link QueryConditional} that matches when the key of an index begins with a specific value. + * @param keyConsumer 'builder consumer' the literal key used to compare the start of the value of the index + * against + */ + static QueryConditional sortBeginsWith(Consumer keyConsumer) { + Key.Builder builder = Key.builder(); + keyConsumer.accept(builder); + return sortBeginsWith(builder.build()); + } + + /** + * Generates a conditional {@link Expression} based on specific context that is supplied as arguments. + * @param tableSchema A {@link TableSchema} that this expression will be used with + * @param indexName The specific index name of the index this expression will be used with + * @return A specific {@link Expression} that can be used as part of a query request + */ + Expression expression(TableSchema tableSchema, String indexName); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequest.java new file mode 100644 index 000000000000..d1727f6e9122 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequest.java @@ -0,0 +1,442 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.NestedAttributeName; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + +/** + * Defines parameters used to when querying a DynamoDb table or index using the query() operation (such as + * {@link DynamoDbTable#query(QueryEnhancedRequest)} or {@link DynamoDbAsyncIndex#query(QueryEnhancedRequest)}). + *

    + * A valid request object must contain a {@link QueryConditional} condition specifying how DynamoDb + * should match items in the table. + *

    + * All other parameters are optional. + */ +@SdkPublicApi +public final class QueryEnhancedRequest { + + private final QueryConditional queryConditional; + private final Map exclusiveStartKey; + private final Boolean scanIndexForward; + private final Integer limit; + private final Boolean consistentRead; + private final Expression filterExpression; + private final List attributesToProject; + + private QueryEnhancedRequest(Builder builder) { + this.queryConditional = builder.queryConditional; + this.exclusiveStartKey = builder.exclusiveStartKey; + this.scanIndexForward = builder.scanIndexForward; + this.limit = builder.limit; + this.consistentRead = builder.consistentRead; + this.filterExpression = builder.filterExpression; + this.attributesToProject = builder.attributesToProject != null + ? Collections.unmodifiableList(builder.attributesToProject) + : null; + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return builder().queryConditional(queryConditional) + .exclusiveStartKey(exclusiveStartKey) + .scanIndexForward(scanIndexForward) + .limit(limit) + .consistentRead(consistentRead) + .filterExpression(filterExpression) + .addNestedAttributesToProject(attributesToProject); + } + + /** + * Returns the matching condition of the query. + */ + public QueryConditional queryConditional() { + return queryConditional; + } + + /** + * Returns the value of the exclusive start key set on this request object, or null if it doesn't exist. + */ + public Map exclusiveStartKey() { + return exclusiveStartKey; + } + + /** + * Returns the value of scan index forward, meaning an ascending result sort order, or true if it + * has not been set. + */ + public Boolean scanIndexForward() { + return scanIndexForward; + } + + /** + * Returns the value of limit set on this request object, or null if it doesn't exist. + */ + public Integer limit() { + return limit; + } + + /** + * Returns the value of consistent read, or false if it has not been set. + */ + public Boolean consistentRead() { + return consistentRead; + } + + /** + * Returns the return result filter {@link Expression} set on this request object, or null if it doesn't exist. + */ + public Expression filterExpression() { + return filterExpression; + } + + /** + * Returns the list of projected attributes on this request object, or an null if no projection is specified. + * This is the single list which has Nested and Non Nested attributes to project. + * The Nested Attributes are represented using DOT separator in this List. + * Example : foo.bar is represented as "foo.bar" which is indistinguishable from a non-nested attribute + * with the name "foo.bar". + * Use {@link #nestedAttributesToProject} if you have a use-case that requires discrimination between these two cases. + */ + public List attributesToProject() { + return attributesToProject != null ? attributesToProject.stream() + .map(item -> String.join(".", item.elements())).collect(Collectors.toList()) : null; + } + + /** + * Returns the list of projected attribute names, in the form of {@link NestedAttributeName} objects, + * for this request object, or null if no projection is specified. + * Refer {@link NestedAttributeName} . + */ + public List nestedAttributesToProject() { + return attributesToProject; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + QueryEnhancedRequest query = (QueryEnhancedRequest) o; + + if (queryConditional != null ? ! queryConditional.equals(query.queryConditional) : + query.queryConditional != null) { + return false; + } + if (exclusiveStartKey != null ? ! exclusiveStartKey.equals(query.exclusiveStartKey) : + query.exclusiveStartKey != null) { + return false; + } + if (scanIndexForward != null ? ! scanIndexForward.equals(query.scanIndexForward) : + query.scanIndexForward != null) { + return false; + } + if (limit != null ? ! limit.equals(query.limit) : query.limit != null) { + return false; + } + if (consistentRead != null ? ! consistentRead.equals(query.consistentRead) : query.consistentRead != null) { + return false; + } + if (attributesToProject != null + ? !attributesToProject.equals(query.attributesToProject) : query.attributesToProject != null) { + return false; + } + return filterExpression != null ? filterExpression.equals(query.filterExpression) : query.filterExpression == null; + } + + @Override + public int hashCode() { + int result = queryConditional != null ? queryConditional.hashCode() : 0; + result = 31 * result + (exclusiveStartKey != null ? exclusiveStartKey.hashCode() : 0); + result = 31 * result + (scanIndexForward != null ? scanIndexForward.hashCode() : 0); + result = 31 * result + (limit != null ? limit.hashCode() : 0); + result = 31 * result + (consistentRead != null ? consistentRead.hashCode() : 0); + result = 31 * result + (filterExpression != null ? filterExpression.hashCode() : 0); + result = 31 * result + (attributesToProject != null ? attributesToProject.hashCode() : 0); + return result; + } + + /** + * A builder that is used to create a request with the desired parameters. + *

    + * A valid builder must set the {@link #queryConditional} parameter. Other parameters are optional. + */ + public static final class Builder { + private QueryConditional queryConditional; + private Map exclusiveStartKey; + private Boolean scanIndexForward; + private Integer limit; + private Boolean consistentRead; + private Expression filterExpression; + private List attributesToProject; + + private Builder() { + } + + /** + * Determines the matching conditions for this query request. See {@link QueryConditional} for examples + * and constraints. Required. + * + * @param queryConditional the query conditions + * @return a builder of this type + */ + public Builder queryConditional(QueryConditional queryConditional) { + this.queryConditional = queryConditional; + return this; + } + + /** + * Results are sorted by sort key in ascending order if {@link #scanIndexForward} is true. If its false, the + * order is descending. The default value is true. + * + * @param scanIndexForward the sort order + * @return a builder of this type + */ + public Builder scanIndexForward(Boolean scanIndexForward) { + this.scanIndexForward = scanIndexForward; + return this; + } + + /** + * The primary key of the first item that this operation will evaluate. By default, the operation will evaluate + * the whole dataset. If used, normally this parameter is populated with the value that was returned for + * {@link Page#lastEvaluatedKey()} in the previous operation. + * + * @param exclusiveStartKey the primary key value where DynamoDb should start to evaluate items + * @return a builder of this type + */ + public Builder exclusiveStartKey(Map exclusiveStartKey) { + this.exclusiveStartKey = exclusiveStartKey != null ? new HashMap<>(exclusiveStartKey) : null; + return this; + } + + /** + * Sets a limit on how many items to evaluate in the query. If not set, the operation uses + * the maximum values allowed. + *

    + * Note:The limit does not refer to the number of items to return, but how many items + * the database should evaluate while executing the query. Use limit together with {@link Page#lastEvaluatedKey()} + * and {@link #exclusiveStartKey} in subsequent query calls to evaluate limit items per call. + * + * @param limit the maximum number of items to evalute + * @return a builder of this type + */ + public Builder limit(Integer limit) { + this.limit = limit; + return this; + } + + /** + * Determines the read consistency model: If set to true, the operation uses strongly consistent reads; otherwise, + * the operation uses eventually consistent reads. + *

    + * By default, the value of this property is set to false. + * + * @param consistentRead sets consistency model of the operation to use strong consistency + * @return a builder of this type + */ + public Builder consistentRead(Boolean consistentRead) { + this.consistentRead = consistentRead; + return this; + } + + /** + * Refines the query results by applying the filter expression on the results returned + * from the query and discards items that do not match. See {@link Expression} for examples + * and constraints. + *

    + * Note: Using the filter expression does not reduce the cost of the query, since it is applied + * after the database has found matching items. + * + * @param filterExpression an expression that filters results of evaluating the query + * @return a builder of this type + */ + public Builder filterExpression(Expression filterExpression) { + this.filterExpression = filterExpression; + return this; + } + + /** + *

    + * Sets a collection of the attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + *

    + *

    + * If no attribute names are specified, then all attributes will be returned. If any of the requested attributes + * are not found, they will not appear in the result. + * If there are nested attributes then addNestedAttributesToProject API should be used. + *

    + *

    + * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

    + * + * @param attributesToProject A collection of the attributes names to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder attributesToProject(Collection attributesToProject) { + if (this.attributesToProject != null) { + this.attributesToProject.clear(); + } + if (attributesToProject != null) { + addNestedAttributesToProject(new ArrayList<>(attributesToProject).stream() + .map(NestedAttributeName::create).collect(Collectors.toList())); + } + return this; + } + + /** + *

    + * Sets one or more attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + *

    + *

    + * If no attribute names are specified, then all attributes will be returned. If any of the requested attributes + * are not found, they will not appear in the result. + * If there are nested attributes then addNestedAttributesToProject API should be used. + *

    + *

    + * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

    + * + * @param attributesToProject One or more attributes names to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder attributesToProject(String... attributesToProject) { + return attributesToProject(Arrays.asList(attributesToProject)); + } + + /** + *

    + * Adds a single attribute name to be retrieved from the database. This attribute can include + * scalars, sets, or elements of a JSON document. + * If there are nested attributes then addNestedAttributesToProject API should be used. + *

    + * + * @param attributeToProject An additional single attribute name to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addAttributeToProject(String attributeToProject) { + if (attributeToProject != null) { + addNestedAttributesToProject(NestedAttributeName.create(attributeToProject)); + } + return this; + } + + /** + *

    + * Adds a collection of the NestedAttributeNames to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + * This method takes arguments in form of NestedAttributeName which supports representing nested attributes. + * The NestedAttributeNames is specially created for projecting Nested Attribute names. + * The DOT characters are not recognized as nesting separator by DDB thus for Enhanced request NestedAttributeNames + * should be created to project Nested Attribute name at various levels. + * This method will add new attributes to project to the existing list of attributes to project stored by this builder. + * + * @param nestedAttributeNames A collection of the attributes names to be retrieved from the database. + * Nested levels of Attributes can be added using NestedAttributeName class. + * Refer {@link NestedAttributeName}. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addNestedAttributesToProject(Collection nestedAttributeNames) { + if (nestedAttributeNames != null) { + Validate.noNullElements(nestedAttributeNames, + "nestedAttributeNames list must not contain null elements"); + if (attributesToProject == null) { + this.attributesToProject = new ArrayList<>(nestedAttributeNames); + } else { + this.attributesToProject.addAll(nestedAttributeNames); + } + } + return this; + } + + /** + *

    + * Adds one or more attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + * This method takes arguments in form of NestedAttributeName which supports representing nested attributes. + * This method takes arguments in form of NestedAttributeName which supports representing nested attributes. + * The NestedAttributeNames is specially created for projecting Nested Attribute names. + * The DOT characters are not recognized as nesting separator by DDB thus for Enhanced request NestedAttributeNames + * should be created to project Nested Attribute name at various levels. + * This method will add new attributes to project to the existing list of attributes to project stored + * by this builder. + *

    + * + * @param nestedAttributeNames One or more attributesNames to be retrieved from the database. + * Nested levels of Attributes can be added using NestedAttributeName class. + * Refer {@link NestedAttributeName}. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addNestedAttributesToProject(NestedAttributeName... nestedAttributeNames) { + return addNestedAttributesToProject(Arrays.asList(nestedAttributeNames)); + } + + /** + *

    + * Adds a single NestedAttributeName to be retrieved from the database. This attribute can include + * scalars, sets, or elements of a JSON document. + * This method takes arguments in form of NestedAttributeName which supports representing nested attributes. + * This method will add new attributes to project to the existing list of attributes to project stored by this builder. + *

    + * + * @param nestedAttributeName An additional single attribute name to be retrieved from the database. + * Refer {@link NestedAttributeName}. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addNestedAttributeToProject(NestedAttributeName nestedAttributeName) { + if (nestedAttributeName != null) { + addNestedAttributesToProject(Arrays.asList(nestedAttributeName)); + } + return this; + } + + public QueryEnhancedRequest build() { + return new QueryEnhancedRequest(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ReadBatch.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ReadBatch.java new file mode 100644 index 000000000000..172af1548913 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ReadBatch.java @@ -0,0 +1,250 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; + +/** + * Defines a collection of primary keys for items in a table, stored as {@link KeysAndAttributes}, and + * used for the batchGetItem() operation (such as + * {@link DynamoDbEnhancedClient#batchGetItem(BatchGetItemEnhancedRequest)}) as part of a + * {@link BatchGetItemEnhancedRequest}. + *

    + * A valid request object should contain one or more primary keys. + */ +@SdkPublicApi +public final class ReadBatch { + private final String tableName; + private final KeysAndAttributes keysAndAttributes; + + private ReadBatch(BuilderImpl builder) { + this.tableName = builder.mappedTableResource != null ? builder.mappedTableResource.tableName() : null; + this.keysAndAttributes = generateKeysAndAttributes(builder.requests, builder.mappedTableResource); + } + + /** + * Creates a newly initialized builder for a read batch. + * + * @param itemClass the class that items in this table map to + * @param The type of the modelled object, corresponding to itemClass + * @return a ReadBatch builder + */ + public static Builder builder(Class itemClass) { + return new BuilderImpl<>(); + } + + /** + * Returns the table name associated with this batch. + */ + public String tableName() { + return tableName; + } + + /** + * Returns the collection of keys and attributes, see {@link KeysAndAttributes}, in this read batch. + */ + public KeysAndAttributes keysAndAttributes() { + return keysAndAttributes; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ReadBatch readBatch = (ReadBatch) o; + + if (tableName != null ? !tableName.equals(readBatch.tableName) : + readBatch.tableName != null) { + + return false; + } + return keysAndAttributes != null ? + keysAndAttributes.equals(readBatch.keysAndAttributes) : + readBatch.keysAndAttributes == null; + } + + @Override + public int hashCode() { + int result = tableName != null ? tableName.hashCode() : 0; + result = 31 * result + (keysAndAttributes != null ? keysAndAttributes.hashCode() : 0); + return result; + } + + /** + * A builder that is used to create a request with the desired parameters. + *

    + * A valid builder must define a {@link MappedTableResource} and add at least one + * {@link GetItemEnhancedRequest}. + * + * @param the type that items in this table map to + */ + public interface Builder { + + /** + * Sets the mapped table resource (table) that the items in this read batch should come from. + * + * @param mappedTableResource the table reference + * @return a builder of this type + */ + Builder mappedTableResource(MappedTableResource mappedTableResource); + + /** + * Adds a {@link GetItemEnhancedRequest} with a primary {@link Key} to the builder. + * + * @param request A {@link GetItemEnhancedRequest} + * @return a builder of this type + */ + Builder addGetItem(GetItemEnhancedRequest request); + + /** + * Adds a {@link GetItemEnhancedRequest} with a primary {@link Key} to the builder by accepting a consumer of + * {@link GetItemEnhancedRequest.Builder}. + * + * @param requestConsumer a {@link Consumer} of {@link GetItemEnhancedRequest} + * @return a builder of this type + */ + Builder addGetItem(Consumer requestConsumer); + + /** + * Adds a GetItem request with a primary {@link Key} to the builder. + * + * @param key A {@link Key} to match the record retrieved from the database. + * @return a builder of this type + */ + Builder addGetItem(Key key); + + /** + * Adds a GetItem request to the builder. + * + * @param keyItem an item that will have its key fields used to match a record to retrieve from the database. + * @return a builder of this type + */ + Builder addGetItem(T keyItem); + + ReadBatch build(); + } + + private static KeysAndAttributes generateKeysAndAttributes(List readRequests, + MappedTableResource mappedTableResource) { + if (readRequests == null || readRequests.isEmpty()) { + return null; + } + + Boolean firstRecordConsistentRead = validateAndGetConsistentRead(readRequests); + + List> keys = + readRequests.stream() + .map(GetItemEnhancedRequest::key) + .map(key -> key.keyMap(mappedTableResource.tableSchema(), TableMetadata.primaryIndexName())) + .collect(Collectors.toList()); + + return KeysAndAttributes.builder() + .keys(keys) + .consistentRead(firstRecordConsistentRead) + .build(); + + } + + private static Boolean validateAndGetConsistentRead(List readRequests) { + Boolean firstRecordConsistentRead = null; + boolean isFirstRecord = true; + + for (GetItemEnhancedRequest request : readRequests) { + if (isFirstRecord) { + isFirstRecord = false; + firstRecordConsistentRead = request.consistentRead(); + } else { + if (!compareNullableBooleans(firstRecordConsistentRead, request.consistentRead())) { + throw new IllegalArgumentException("All batchable read requests for the same " + + "table must have the same 'consistentRead' " + + "setting."); + } + } + } + return firstRecordConsistentRead; + } + + private static boolean compareNullableBooleans(Boolean one, Boolean two) { + if (one == null && two == null) { + return true; + } + + if (one != null) { + return one.equals(two); + } else { + return false; + } + } + + private static final class BuilderImpl implements Builder { + private MappedTableResource mappedTableResource; + private List requests = new ArrayList<>(); + + private BuilderImpl() { + } + + @Override + public Builder mappedTableResource(MappedTableResource mappedTableResource) { + this.mappedTableResource = mappedTableResource; + return this; + } + + @Override + public Builder addGetItem(GetItemEnhancedRequest request) { + requests.add(request); + return this; + } + + @Override + public Builder addGetItem(Consumer requestConsumer) { + GetItemEnhancedRequest.Builder builder = GetItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return addGetItem(builder.build()); + } + + @Override + public Builder addGetItem(Key key) { + return addGetItem(r -> r.key(key)); + } + + @Override + public Builder addGetItem(T keyItem) { + return addGetItem(this.mappedTableResource.keyFrom(keyItem)); + } + + @Override + public ReadBatch build() { + return new ReadBatch(this); + } + + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequest.java new file mode 100644 index 000000000000..bc66597422a3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequest.java @@ -0,0 +1,387 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.NestedAttributeName; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + + +/** + * Defines parameters used to when scanning a DynamoDb table or index using the scan() operation (such as + * {@link DynamoDbTable#scan(ScanEnhancedRequest)}). + *

    + * All parameters are optional. + */ +@SdkPublicApi +public final class ScanEnhancedRequest { + + private final Map exclusiveStartKey; + private final Integer limit; + private final Boolean consistentRead; + private final Expression filterExpression; + private final List attributesToProject; + + private ScanEnhancedRequest(Builder builder) { + this.exclusiveStartKey = builder.exclusiveStartKey; + this.limit = builder.limit; + this.consistentRead = builder.consistentRead; + this.filterExpression = builder.filterExpression; + this.attributesToProject = builder.attributesToProject != null + ? Collections.unmodifiableList(builder.attributesToProject) + : null; + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return builder().exclusiveStartKey(exclusiveStartKey) + .limit(limit) + .consistentRead(consistentRead) + .filterExpression(filterExpression) + .addNestedAttributesToProject(attributesToProject); + } + + /** + * Returns the value of the exclusive start key set on this request object, or null if it doesn't exist. + */ + public Map exclusiveStartKey() { + return exclusiveStartKey; + } + + /** + * Returns the value of limit set on this request object, or null if it doesn't exist. + */ + public Integer limit() { + return limit; + } + + /** + * Returns the value of consistent read, or false if it has not been set. + */ + public Boolean consistentRead() { + return consistentRead; + } + + /** + * Returns the return result filter {@link Expression} set on this request object, or null if it doesn't exist. + */ + public Expression filterExpression() { + return filterExpression; + } + + /** + * Returns the list of projected attributes on this request object, or an null if no projection is specified. + * This is the single list which has Nested and Non Nested attributes to project. + * The Nested Attributes are represented using DOT separator in this List. + * Example : foo.bar is represented as "foo.bar" which is indistinguishable from a non-nested attribute + * with the name "foo.bar". + * Use {@link #nestedAttributesToProject} if you have a use-case that requires discrimination between these two cases. + */ + public List attributesToProject() { + return attributesToProject != null ? + attributesToProject.stream().map(item -> String.join(".", item.elements())) + .collect(Collectors.toList()) : null; + } + + /** + * Returns the list of projected attribute names, in the form of {@link NestedAttributeName} objects, + * for this request object, or null if no projection is specified. + * Refer {@link NestedAttributeName} + */ + public List nestedAttributesToProject() { + return attributesToProject; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ScanEnhancedRequest scan = (ScanEnhancedRequest) o; + + if (exclusiveStartKey != null ? ! exclusiveStartKey.equals(scan.exclusiveStartKey) : + scan.exclusiveStartKey != null) { + return false; + } + if (limit != null ? ! limit.equals(scan.limit) : scan.limit != null) { + return false; + } + if (consistentRead != null ? ! consistentRead.equals(scan.consistentRead) : scan.consistentRead != null) { + return false; + } + if (attributesToProject != null + ? !attributesToProject.equals(scan.attributesToProject) : scan.attributesToProject != null) { + return false; + } + return filterExpression != null ? filterExpression.equals(scan.filterExpression) : scan.filterExpression == null; + } + + @Override + public int hashCode() { + int result = exclusiveStartKey != null ? exclusiveStartKey.hashCode() : 0; + result = 31 * result + (limit != null ? limit.hashCode() : 0); + result = 31 * result + (consistentRead != null ? consistentRead.hashCode() : 0); + result = 31 * result + (filterExpression != null ? filterExpression.hashCode() : 0); + result = 31 * result + (attributesToProject != null ? attributesToProject.hashCode() : 0); + return result; + } + + /** + * A builder that is used to create a request with the desired parameters. + */ + public static final class Builder { + private Map exclusiveStartKey; + private Integer limit; + private Boolean consistentRead; + private Expression filterExpression; + private List attributesToProject; + + private Builder() { + } + + /** + * The primary key of the first item that this operation will evaluate. By default, the operation will evaluate + * the whole dataset. If used, normally this parameter is populated with the value that was returned for + * {@link Page#lastEvaluatedKey()} in the previous operation. + * + * @param exclusiveStartKey the primary key value where DynamoDb should start to evaluate items + * @return a builder of this type + */ + public Builder exclusiveStartKey(Map exclusiveStartKey) { + this.exclusiveStartKey = exclusiveStartKey != null ? new HashMap<>(exclusiveStartKey) : null; + return this; + } + + /** + * Sets a limit on how many items to evaluate in the scan. If not set, the operation uses + * the maximum values allowed. + *

    + * Note:The limit does not refer to the number of items to return, but how many items + * the database should evaluate while executing the scan. Use limit together with {@link Page#lastEvaluatedKey()} + * and {@link #exclusiveStartKey} in subsequent scan calls to evaluate limit items per call. + * + * @param limit the maximum number of items to evalute + * @return a builder of this type + */ + public Builder limit(Integer limit) { + this.limit = limit; + return this; + } + + /** + * Determines the read consistency model: If set to true, the operation uses strongly consistent reads; otherwise, + * the operation uses eventually consistent reads. + *

    + * By default, the value of this property is set to false. + * + * @param consistentRead sets consistency model of the operation to use strong consistency if true + * @return a builder of this type + */ + public Builder consistentRead(Boolean consistentRead) { + this.consistentRead = consistentRead; + return this; + } + + /** + * Refines the scan results by applying the filter expression on the results returned + * from the scan and discards items that do not match. See {@link Expression} for examples + * and constraints. + *

    + * Note: Using the filter expression does not reduce the cost of the scan, since it is applied + * after the database has found matching items. + * + * @param filterExpression an expression that filters results of evaluating the scan + * @return a builder of this type + */ + public Builder filterExpression(Expression filterExpression) { + this.filterExpression = filterExpression; + return this; + } + + /** + *

    + * Sets a collection of the attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + *

    + *

    + * If no attribute names are specified, then all attributes will be returned. If any of the requested attributes + * are not found, they will not appear in the result. + *

    + *

    + * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

    + * + * @param attributesToProject A collection of the attributes names to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder attributesToProject(Collection attributesToProject) { + if (this.attributesToProject != null) { + this.attributesToProject.clear(); + } + if (attributesToProject != null) { + addNestedAttributesToProject(new ArrayList<>(attributesToProject).stream() + .map(NestedAttributeName::create).collect(Collectors.toList())); + } + return this; + } + + /** + *

    + * Sets one or more attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + *

    + *

    + * If no attribute names are specified, then all attributes will be returned. If any of the requested attributes + * are not found, they will not appear in the result. + *

    + *

    + * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

    + * + * @param attributesToProject One or more attributes names to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder attributesToProject(String... attributesToProject) { + return attributesToProject(Arrays.asList(attributesToProject)); + } + + /** + *

    + * Adds a single attribute name to be retrieved from the database. This attribute can include + * scalars, sets, or elements of a JSON document. + *

    + *

    + * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

    + * + * @param attributeToProject An additional single attribute name to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addAttributeToProject(String attributeToProject) { + if (attributeToProject != null) { + addNestedAttributesToProject(NestedAttributeName.create(attributeToProject)); + } + return this; + } + + /** + *

    + * Adds a collection of the NestedAttributeNames to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + * This method takes arguments in form of NestedAttributeName which supports representing nested attributes. + * The NestedAttributeNames is specially created for projecting Nested Attribute names. + * The DOT characters are not recognized as nesting separator by DDB thus for Enhanced request NestedAttributeNames + * should be created to project Nested Attribute name at various levels. + * This method will add new attributes to project to the existing list of attributes to project stored by this builder. + * + * @param nestedAttributeNames A collection of the attributes names to be retrieved from the database. + * Nested levels of Attributes can be added using NestedAttributeName class. + * Refer {@link NestedAttributeName}. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addNestedAttributesToProject(Collection nestedAttributeNames) { + if (nestedAttributeNames != null) { + Validate.noNullElements(nestedAttributeNames, + "nestedAttributeNames list must not contain null elements"); + if (attributesToProject == null) { + this.attributesToProject = new ArrayList<>(nestedAttributeNames); + } else { + this.attributesToProject.addAll(nestedAttributeNames); + } + } + return this; + } + + /** + *

    + * Add one or more attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + * This method takes arguments in form of NestedAttributeName which supports representing nested attributes. + * This method takes arguments in form of NestedAttributeName which supports representing nested attributes. + * The NestedAttributeNames is specially created for projecting Nested Attribute names. + * The DOT characters are not recognized as nesting separator by DDB thus for Enhanced request NestedAttributeNames + * should be created to project Nested Attribute name at various levels. + * This method will add new attributes to project to the existing list of attributes to project stored by this builder. + * + * @param nestedAttributeNames One or more attributesNames to be retrieved from the database. + * Nested levels of Attributes can be added using NestedAttributeName class. + * Refer {@link NestedAttributeName}. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addNestedAttributesToProject(NestedAttributeName... nestedAttributeNames) { + addNestedAttributesToProject(Arrays.asList(nestedAttributeNames)); + return this; + } + + /** + *

    + * Adds a single NestedAttributeName to be retrieved from the database. This attribute can include + * scalars, sets, or elements of a JSON document. + * This method takes arguments in form of NestedAttributeName which supports representing nested attributes. + *

    + *

    + * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

    + * + * @param nestedAttributeName An additional single attribute name to be retrieved from the database. + * Refer {@link NestedAttributeName}. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addNestedAttributeToProject(NestedAttributeName nestedAttributeName) { + if (nestedAttributeName != null) { + addNestedAttributesToProject(Arrays.asList(nestedAttributeName)); + } + return this; + } + + public ScanEnhancedRequest build() { + return new ScanEnhancedRequest(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequest.java new file mode 100644 index 000000000000..a8e2f22bbde2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequest.java @@ -0,0 +1,144 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.getItemsFromSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.GetItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactableReadOperation; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItem; + +/** + * Defines parameters used for the transaction operation transactGetItems() (such as + * {@link DynamoDbEnhancedClient#transactGetItems(TransactGetItemsEnhancedRequest)}). + *

    + * A request contains references to the primary keys for the items this operation will search for. + * It's populated with one or more {@link GetItemEnhancedRequest}, each associated with with the table where the item is located. + * On initialization, these requests are transformed into {@link TransactGetItem} and stored in the request. + * . + */ +@SdkPublicApi +public final class TransactGetItemsEnhancedRequest { + + private final List transactGetItems; + + private TransactGetItemsEnhancedRequest(Builder builder) { + this.transactGetItems = getItemsFromSupplier(builder.itemSupplierList); + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns the list of {@link TransactGetItem} that represents all lookup keys in the request. + */ + public List transactGetItems() { + return transactGetItems; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TransactGetItemsEnhancedRequest that = (TransactGetItemsEnhancedRequest) o; + + return transactGetItems != null ? transactGetItems.equals(that.transactGetItems) : that.transactGetItems == null; + } + + @Override + public int hashCode() { + return transactGetItems != null ? transactGetItems.hashCode() : 0; + } + + /** + * A builder that is used to create a transaction object with the desired parameters. + *

    + * A valid builder should contain at least one {@link GetItemEnhancedRequest} added through addGetItem(). + */ + public static final class Builder { + private List> itemSupplierList = new ArrayList<>(); + + private Builder() { + } + + /** + * Adds a primary lookup key and it's associated table to the transaction. + * + * @param mappedTableResource the table where the key is located + * @param request A {@link GetItemEnhancedRequest} + * @return a builder of this type + */ + public Builder addGetItem(MappedTableResource mappedTableResource, GetItemEnhancedRequest request) { + itemSupplierList.add(() -> generateTransactWriteItem(mappedTableResource, GetItemOperation.create(request))); + return this; + } + + /** + * Adds a primary lookup key and it's associated table to the transaction. + * + * @param mappedTableResource the table where the key is located + * @param key the primary key of an item to retrieve as part of the transaction + * @return a builder of this type + */ + public Builder addGetItem(MappedTableResource mappedTableResource, Key key) { + return addGetItem(mappedTableResource, GetItemEnhancedRequest.builder().key(key).build()); + } + + /** + * Adds a primary lookup key and it's associated table to the transaction. + * + * @param mappedTableResource the table where the key is located + * @param keyItem an item that will have its key fields used to match a record to retrieve from the database + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addGetItem(MappedTableResource mappedTableResource, + T keyItem) { + return addGetItem(mappedTableResource, mappedTableResource.keyFrom(keyItem)); + } + + /** + * Builds a {@link TransactGetItemsEnhancedRequest} from the values stored in this builder. + */ + public TransactGetItemsEnhancedRequest build() { + return new TransactGetItemsEnhancedRequest(this); + } + + private TransactGetItem generateTransactWriteItem(MappedTableResource mappedTableResource, + TransactableReadOperation generator) { + return generator.generateTransactGetItem(mappedTableResource.tableSchema(), + DefaultOperationContext.create(mappedTableResource.tableName()), + mappedTableResource.mapperExtension()); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequest.java new file mode 100644 index 000000000000..55298789c04e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequest.java @@ -0,0 +1,303 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.getItemsFromSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PutItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactableWriteOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.UpdateItemOperation; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; + +/** + * Defines parameters used for the transaction operation transactWriteItems() (such as + * {@link DynamoDbEnhancedClient#transactWriteItems(TransactWriteItemsEnhancedRequest)}). + *

    + * A request contains parameters for the different actions available in the operation: + *

      + *
    • Write/Update items through put and update actions
    • + *
    • Delete items
    • + *
    • Use a condition check
    • + *
    + * It's populated with one or more low-level requests, such as {@link PutItemEnhancedRequest} and each low-level action request + * is associated with with the table where the action should be applied. + * On initialization, these requests are transformed into {@link TransactWriteItem} and stored in the request. + */ +@SdkPublicApi +public final class TransactWriteItemsEnhancedRequest { + + private final List transactWriteItems; + + private final String clientRequestToken; + + private TransactWriteItemsEnhancedRequest(Builder builder) { + this.transactWriteItems = getItemsFromSupplier(builder.itemSupplierList); + this.clientRequestToken = builder.clientRequestToken; + } + + /** + * Creates a newly initialized builder for a request object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + *

    + * Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, meaning + * that multiple identical calls have the same effect as one single call. + *

    + *

    + * A client request token is valid for 10 minutes after the first request that uses it is completed. After 10 + * minutes, any request with the same client token is treated as a new request. Do not resubmit the same request + * with the same client token for more than 10 minutes, or the result might not be idempotent. + *

    + *

    + * If you submit a request with the same client token but a change in other parameters within the 10-minute + * idempotency window, DynamoDB returns an IdempotentParameterMismatch exception. + *

    + */ + + public String clientRequestToken() { + return clientRequestToken; + } + + /** + * Returns the list of {@link TransactWriteItem} that represents all actions in the request. + */ + public List transactWriteItems() { + return transactWriteItems; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TransactWriteItemsEnhancedRequest that = (TransactWriteItemsEnhancedRequest) o; + + return transactWriteItems != null ? transactWriteItems.equals(that.transactWriteItems) : that.transactWriteItems == null; + } + + @Override + public int hashCode() { + return transactWriteItems != null ? transactWriteItems.hashCode() : 0; + } + + /** + * A builder that is used to create a transaction object with the desired parameters. + *

    + * A valid builder should contain at least one low-level request such as {@link DeleteItemEnhancedRequest}. + */ + public static final class Builder { + private List> itemSupplierList = new ArrayList<>(); + + private String clientRequestToken; + + private Builder() { + } + + /** + * Adds a condition check for a primary key in the associated table to the transaction. + *

    + * Note: The condition check should be applied to an item that is not modified by another action in the + * same transaction. See {@link ConditionCheck} for more information on how to build a condition check, and the + * DynamoDb TransactWriteItems documentation for more information on how a condition check affects the transaction. + * + * @param mappedTableResource the table on which to apply the condition check + * @param request A {@link ConditionCheck} definition + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addConditionCheck(MappedTableResource mappedTableResource, ConditionCheck request) { + itemSupplierList.add(() -> generateTransactWriteItem(mappedTableResource, request)); + return this; + } + + /** + * Adds a condition check for a primary key in the associated table to the transaction by accepting a consumer + * of {@link ConditionCheck.Builder}. + *

    + * Note: The condition check should be applied to an item that is not modified by another action in the + * same transaction. See {@link ConditionCheck} for more information on how to build a condition check, and the + * DynamoDb TransactWriteItems documentation for more information on how a condition check affects the transaction. + * + * @param mappedTableResource the table on which to apply the condition check + * @param requestConsumer a {@link Consumer} of {@link DeleteItemEnhancedRequest} + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addConditionCheck(MappedTableResource mappedTableResource, + Consumer requestConsumer) { + ConditionCheck.Builder builder = ConditionCheck.builder(); + requestConsumer.accept(builder); + return addConditionCheck(mappedTableResource, builder.build()); + } + + /** + * Adds a primary lookup key for the item to delete, and it's associated table, to the transaction. For more information + * on the delete action, see the low-level operation description in for instance + * {@link DynamoDbTable#deleteItem(DeleteItemEnhancedRequest)} and how to construct the low-level request in + * {@link DeleteItemEnhancedRequest}. + * + * @param mappedTableResource the table where the key is located + * @param request A {@link DeleteItemEnhancedRequest} + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addDeleteItem(MappedTableResource mappedTableResource, DeleteItemEnhancedRequest request) { + itemSupplierList.add(() -> generateTransactWriteItem(mappedTableResource, DeleteItemOperation.create(request))); + return this; + } + + /** + * Adds a primary lookup key for the item to delete, and it's associated table, to the transaction. For more + * information on the delete action, see the low-level operation description in for instance + * {@link DynamoDbTable#deleteItem(DeleteItemEnhancedRequest)}. + * + * @param mappedTableResource the table where the key is located + * @param key a {@link Key} that identifies the record to be deleted as part of the transaction. + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addDeleteItem(MappedTableResource mappedTableResource, Key key) { + return addDeleteItem(mappedTableResource, DeleteItemEnhancedRequest.builder().key(key).build()); + } + + /** + * Adds a primary lookup key for the item to delete, and it's associated table, to the transaction. For more + * information on the delete action, see the low-level operation description in for instance + * {@link DynamoDbTable#deleteItem(DeleteItemEnhancedRequest)}. + * + * @param mappedTableResource the table where the key is located + * @param keyItem an item that will have its key fields used to match a record to retrieve from the database + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addDeleteItem(MappedTableResource mappedTableResource, T keyItem) { + return addDeleteItem(mappedTableResource, mappedTableResource.keyFrom(keyItem)); + } + + /** + * Adds an item to be written, and it's associated table, to the transaction. For more information on the put action, + * see the low-level operation description in for instance {@link DynamoDbTable#putItem(PutItemEnhancedRequest)} + * and how to construct the low-level request in {@link PutItemEnhancedRequest}. + * + * @param mappedTableResource the table to write the item to + * @param request A {@link PutItemEnhancedRequest} + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addPutItem(MappedTableResource mappedTableResource, PutItemEnhancedRequest request) { + itemSupplierList.add(() -> generateTransactWriteItem(mappedTableResource, PutItemOperation.create(request))); + return this; + } + + /** + * Adds an item to be written, and it's associated table, to the transaction. For more information on the put + * action, see the low-level operation description in for instance + * {@link DynamoDbTable#putItem(PutItemEnhancedRequest)}. + * + * @param mappedTableResource the table to write the item to + * @param item the item to be inserted or overwritten in the database + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addPutItem(MappedTableResource mappedTableResource, T item) { + return addPutItem( + mappedTableResource, + PutItemEnhancedRequest.builder(mappedTableResource.tableSchema().itemType().rawClass()) + .item(item) + .build()); + } + + /** + * Adds an item to be updated, and it's associated table, to the transaction. For more information on the update + * action, see the low-level operation description in for instance + * {@link DynamoDbTable#updateItem(UpdateItemEnhancedRequest)} and how to construct the low-level request in + * {@link UpdateItemEnhancedRequest}. + * + * @param mappedTableResource the table to write the item to + * @param request A {@link UpdateItemEnhancedRequest} + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addUpdateItem(MappedTableResource mappedTableResource, + UpdateItemEnhancedRequest request) { + itemSupplierList.add(() -> generateTransactWriteItem(mappedTableResource, + UpdateItemOperation.create(request))); + return this; + } + + /** + * Adds an item to be updated, and it's associated table, to the transaction. For more information on the update + * action, see the low-level operation description in for instance + * {@link DynamoDbTable#updateItem(UpdateItemEnhancedRequest)}. + * + * @param mappedTableResource the table to write the item to + * @param item an item to update or insert into the database as part of this transaction + * @param the type of modelled objects in the table + * @return a builder of this type + */ + public Builder addUpdateItem(MappedTableResource mappedTableResource, T item) { + return addUpdateItem( + mappedTableResource, + UpdateItemEnhancedRequest.builder(mappedTableResource.tableSchema().itemType().rawClass()) + .item(item) + .build()); + } + + /** + * Sets the clientRequestToken in this builder. + * + * @param clientRequestToken the clientRequestToken going to be used for build + * @return a builder of this type + */ + public Builder clientRequestToken(String clientRequestToken) { + this.clientRequestToken = clientRequestToken; + return this; + } + + /** + * Builds a {@link TransactWriteItemsEnhancedRequest} from the values stored in this builder. + */ + public TransactWriteItemsEnhancedRequest build() { + return new TransactWriteItemsEnhancedRequest(this); + } + + private TransactWriteItem generateTransactWriteItem(MappedTableResource mappedTableResource, + TransactableWriteOperation generator) { + return generator.generateTransactWriteItem(mappedTableResource.tableSchema(), + DefaultOperationContext.create(mappedTableResource.tableName()), + mappedTableResource.mapperExtension()); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequest.java new file mode 100644 index 000000000000..968425647220 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequest.java @@ -0,0 +1,165 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; + +/** + * Defines parameters used to update an item to a DynamoDb table using the updateItem() operation (such as + * {@link DynamoDbTable#updateItem(UpdateItemEnhancedRequest)} or + * {@link DynamoDbAsyncTable#updateItem(UpdateItemEnhancedRequest)}). + *

    + * A valid request object must contain the item that should be written to the table. + * + * @param The type of the modelled object. + */ +@SdkPublicApi +public final class UpdateItemEnhancedRequest { + + private final T item; + private final Boolean ignoreNulls; + private final Expression conditionExpression; + + private UpdateItemEnhancedRequest(Builder builder) { + this.item = builder.item; + this.ignoreNulls = builder.ignoreNulls; + this.conditionExpression = builder.conditionExpression; + } + + /** + * Creates a newly initialized builder for the request object. + * + * @param itemClass the class that items in this table map to + * @param The type of the modelled object, corresponding to itemClass + * @return a UpdateItemEnhancedRequest builder + */ + public static Builder builder(Class itemClass) { + return new Builder<>(); + } + + /** + * Returns a builder initialized with all existing values on the request object. + */ + public Builder toBuilder() { + return new Builder().item(item).ignoreNulls(ignoreNulls); + } + + /** + * Returns the item for this update operation request. + */ + public T item() { + return item; + } + + /** + * Returns if the update operation should ignore attributes with null values, or false if it has not been set. + */ + public Boolean ignoreNulls() { + return ignoreNulls; + } + + /** + * Returns the condition {@link Expression} set on this request object, or null if it doesn't exist. + */ + public Expression conditionExpression() { + return conditionExpression; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + UpdateItemEnhancedRequest that = (UpdateItemEnhancedRequest) o; + + if (item != null ? ! item.equals(that.item) : that.item != null) { + return false; + } + return ignoreNulls != null ? ignoreNulls.equals(that.ignoreNulls) : that.ignoreNulls == null; + } + + @Override + public int hashCode() { + int result = item != null ? item.hashCode() : 0; + result = 31 * result + (ignoreNulls != null ? ignoreNulls.hashCode() : 0); + return result; + } + + /** + * A builder that is used to create a request with the desired parameters. + *

    + * Note: A valid request builder must define an item. + */ + public static final class Builder { + private T item; + private Boolean ignoreNulls; + private Expression conditionExpression; + + private Builder() { + } + + /** + * Sets if the update operation should ignore attributes with null values. By default, the value is false. + *

    + * If set to true, any null values in the Java object will be ignored and not be updated on the persisted + * record. This is commonly referred to as a 'partial update'. + * If set to false, null values in the Java object will cause those attributes to be removed from the persisted + * record on update. + * @param ignoreNulls the boolean value + * @return a builder of this type + */ + public Builder ignoreNulls(Boolean ignoreNulls) { + this.ignoreNulls = ignoreNulls; + return this; + } + + /** + * Defines a logical expression on an item's attribute values which, if evaluating to true, + * will allow the update operation to succeed. If evaluating to false, the operation will not succeed. + *

    + * See {@link Expression} for condition syntax and examples. + * + * @param conditionExpression a condition written as an {@link Expression} + * @return a builder of this type + */ + public Builder conditionExpression(Expression conditionExpression) { + this.conditionExpression = conditionExpression; + return this; + } + + /** + * Sets the item to write to DynamoDb. Required. + * + * @param item the item to write + * @return a builder of this type + */ + public Builder item(T item) { + this.item = item; + return this; + } + + public UpdateItemEnhancedRequest build() { + return new UpdateItemEnhancedRequest<>(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatch.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatch.java new file mode 100644 index 000000000000..4dc8b95b6c75 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatch.java @@ -0,0 +1,249 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.getItemsFromSupplier; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.BatchableWriteOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PutItemOperation; +import software.amazon.awssdk.services.dynamodb.model.WriteRequest; + +/** + * Defines a collection of references to keys for delete actions and items for put actions + * for one specific table. A WriteBatch is part of a {@link BatchWriteItemEnhancedRequest} + * and used in a batchWriteItem() operation (such as + * {@link DynamoDbEnhancedClient#batchWriteItem(BatchWriteItemEnhancedRequest)}). + *

    + * A valid write batch should contain one or more delete or put action reference. + */ +@SdkPublicApi +public final class WriteBatch { + private final String tableName; + private final List writeRequests; + + private WriteBatch(BuilderImpl builder) { + this.tableName = builder.mappedTableResource != null ? builder.mappedTableResource.tableName() : null; + this.writeRequests = getItemsFromSupplier(builder.itemSupplierList); + } + + /** + * Creates a newly initialized builder for a write batch. + * + * @param itemClass the class that items in this table map to + * @param The type of the modelled object, corresponding to itemClass + * @return a WriteBatch builder + */ + public static Builder builder(Class itemClass) { + return new BuilderImpl<>(itemClass); + } + + /** + * Returns the table name associated with this batch. + */ + public String tableName() { + return tableName; + } + + /** + * Returns the collection of write requests in this writek batch. + */ + public Collection writeRequests() { + return writeRequests; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WriteBatch that = (WriteBatch) o; + + if (tableName != null ? !tableName.equals(that.tableName) + : that.tableName != null) { + + return false; + } + return writeRequests != null ? writeRequests.equals(that.writeRequests) : that.writeRequests == null; + } + + @Override + public int hashCode() { + int result = tableName != null ? tableName.hashCode() : 0; + result = 31 * result + (writeRequests != null ? writeRequests.hashCode() : 0); + return result; + } + + /** + * A builder that is used to create a request with the desired parameters. + *

    + * A valid builder must define a {@link MappedTableResource} and add at least one + * {@link DeleteItemEnhancedRequest} or {@link PutItemEnhancedRequest}. + * + * @param the type that items in this table map to + */ + public interface Builder { + + /** + * Sets the mapped table resource (table) that the items in this write batch should come from. + * + * @param mappedTableResource the table reference + * @return a builder of this type + */ + Builder mappedTableResource(MappedTableResource mappedTableResource); + + /** + * Adds a {@link DeleteItemEnhancedRequest} to the builder, this request should contain + * the primary {@link Key} to an item to be deleted. + * + * @param request A {@link DeleteItemEnhancedRequest} + * @return a builder of this type + */ + Builder addDeleteItem(DeleteItemEnhancedRequest request); + + /** + * Adds a {@link DeleteItemEnhancedRequest} to the builder, this request should contain + * the primary {@link Key} to an item to be deleted. + * + * @param requestConsumer a {@link Consumer} of {@link DeleteItemEnhancedRequest} + * @return a builder of this type + */ + Builder addDeleteItem(Consumer requestConsumer); + + /** + * Adds a DeleteItem request to the builder. + * + * @param key a {@link Key} to match the item to be deleted from the database. + * @return a builder of this type + */ + Builder addDeleteItem(Key key); + + /** + * Adds a DeleteItem request to the builder. + * + * @param keyItem an item that will have its key fields used to match a record to delete from the database. + * @return a builder of this type + */ + Builder addDeleteItem(T keyItem); + + /** + * Adds a {@link PutItemEnhancedRequest} to the builder, this request should contain the item + * to be written. + * + * @param request A {@link PutItemEnhancedRequest} + * @return a builder of this type + */ + Builder addPutItem(PutItemEnhancedRequest request); + + /** + * Adds a {@link PutItemEnhancedRequest} to the builder, this request should contain the item + * to be written. + * + * @param requestConsumer a {@link Consumer} of {@link PutItemEnhancedRequest} + * @return a builder of this type + */ + Builder addPutItem(Consumer> requestConsumer); + + /** + * Adds a PutItem request to the builder. + * + * @param item the item to insert or overwrite in the database. + * @return a builder of this type + */ + Builder addPutItem(T item); + + WriteBatch build(); + } + + private static final class BuilderImpl implements Builder { + + private Class itemClass; + private List> itemSupplierList = new ArrayList<>(); + private MappedTableResource mappedTableResource; + + private BuilderImpl(Class itemClass) { + this.itemClass = itemClass; + } + + public Builder mappedTableResource(MappedTableResource mappedTableResource) { + this.mappedTableResource = mappedTableResource; + return this; + } + + public Builder addDeleteItem(DeleteItemEnhancedRequest request) { + itemSupplierList.add(() -> generateWriteRequest(() -> mappedTableResource, DeleteItemOperation.create(request))); + return this; + } + + public Builder addDeleteItem(Consumer requestConsumer) { + DeleteItemEnhancedRequest.Builder builder = DeleteItemEnhancedRequest.builder(); + requestConsumer.accept(builder); + return addDeleteItem(builder.build()); + } + + @Override + public Builder addDeleteItem(Key key) { + return addDeleteItem(r -> r.key(key)); + } + + @Override + public Builder addDeleteItem(T keyItem) { + return addDeleteItem(this.mappedTableResource.keyFrom(keyItem)); + } + + public Builder addPutItem(PutItemEnhancedRequest request) { + itemSupplierList.add(() -> generateWriteRequest(() -> mappedTableResource, PutItemOperation.create(request))); + return this; + } + + public Builder addPutItem(Consumer> requestConsumer) { + PutItemEnhancedRequest.Builder builder = PutItemEnhancedRequest.builder(this.itemClass); + requestConsumer.accept(builder); + return addPutItem(builder.build()); + } + + @Override + public Builder addPutItem(T item) { + return addPutItem(r -> r.item(item)); + } + + public WriteBatch build() { + return new WriteBatch(this); + } + + private WriteRequest generateWriteRequest(Supplier> mappedTableResourceSupplier, + BatchableWriteOperation operation) { + return operation.generateWriteRequest(mappedTableResourceSupplier.get().tableSchema(), + DefaultOperationContext.create(mappedTableResourceSupplier.get().tableName()), + mappedTableResourceSupplier.get().mapperExtension()); + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/main/resources/software/amazon/awssdk/services/dynamodb/execution.interceptors b/services-custom/dynamodb-enhanced/src/main/resources/software/amazon/awssdk/services/dynamodb/execution.interceptors new file mode 100644 index 000000000000..748c9df85e52 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/resources/software/amazon/awssdk/services/dynamodb/execution.interceptors @@ -0,0 +1 @@ +software.amazon.awssdk.enhanced.dynamodb.internal.ApplyUserAgentInterceptor \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedTypeTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedTypeTest.java new file mode 100644 index 000000000000..c78ae744a416 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedTypeTest.java @@ -0,0 +1,226 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.assertThatCode; + +import java.util.Collection; +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.concurrent.ConcurrentMap; + +import org.junit.Test; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class EnhancedTypeTest { + @Test + public void anonymousCreationCapturesComplexTypeArguments() { + EnhancedType>>> enhancedType = new EnhancedType>>>(){}; + assertThat(enhancedType.rawClass()).isEqualTo(Map.class); + assertThat(enhancedType.rawClassParameters().get(0).rawClass()).isEqualTo(String.class); + assertThat(enhancedType.rawClassParameters().get(1).rawClass()).isEqualTo(List.class); + assertThat(enhancedType.rawClassParameters().get(1).rawClassParameters().get(0).rawClass()).isEqualTo(List.class); + assertThat(enhancedType.rawClassParameters().get(1).rawClassParameters().get(0).rawClassParameters().get(0).rawClass()) + .isEqualTo(String.class); + } + + @Test + public void customTypesWork() { + EnhancedType enhancedType = new EnhancedType(){}; + assertThat(enhancedType.rawClass()).isEqualTo(EnhancedTypeTest.class); + } + + @Test + public void nonStaticInnerTypesWork() { + EnhancedType enhancedType = new EnhancedType(){}; + assertThat(enhancedType.rawClass()).isEqualTo(InnerType.class); + } + + @Test + public void staticInnerTypesWork() { + EnhancedType enhancedType = new EnhancedType(){}; + assertThat(enhancedType.rawClass()).isEqualTo(InnerStaticType.class); + } + + @Test + public void genericParameterTypesDontWork() { + assertThatThrownBy(() -> new EnhancedType>(){}).isInstanceOf(IllegalStateException.class); + } + + @Test + public void helperCreationMethodsWork() { + assertThat(EnhancedType.of(String.class).rawClass()).isEqualTo(String.class); + + assertThat(EnhancedType.listOf(String.class)).satisfies(v -> { + assertThat(v.rawClass()).isEqualTo(List.class); + assertThat(v.rawClassParameters()).hasSize(1); + assertThat(v.rawClassParameters().get(0).rawClass()).isEqualTo(String.class); + }); + + assertThat(EnhancedType.mapOf(String.class, Integer.class)).satisfies(v -> { + assertThat(v.rawClass()).isEqualTo(Map.class); + assertThat(v.rawClassParameters()).hasSize(2); + assertThat(v.rawClassParameters().get(0).rawClass()).isEqualTo(String.class); + assertThat(v.rawClassParameters().get(1).rawClass()).isEqualTo(Integer.class); + }); + } + + @Test + public void equalityIsBasedOnInnerEquality() { + assertThat(EnhancedType.of(String.class)).isEqualTo(EnhancedType.of(String.class)); + assertThat(EnhancedType.of(String.class)).isNotEqualTo(EnhancedType.of(Integer.class)); + + assertThat(new EnhancedType>>(){}).isEqualTo(new EnhancedType>>(){}); + assertThat(new EnhancedType>>(){}).isNotEqualTo(new EnhancedType>>(){}); + } + + @Test + public void dequeOf_ReturnsRawClassOfDeque_WhenSpecifyingClass() { + EnhancedType> type = EnhancedType.dequeOf(String.class); + + assertThat(type.rawClass()).isEqualTo(Deque.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class)); + } + + @Test + public void dequeOf_ReturnsRawClassOfDeque_WhenSpecifyingEnhancedType() { + EnhancedType> type = EnhancedType.dequeOf(EnhancedType.of(String.class)); + + assertThat(type.rawClass()).isEqualTo(Deque.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class)); + } + + @Test + public void sortedSetOf_ReturnsRawClassOfDeque_WhenSpecifyingClass() { + EnhancedType> type = EnhancedType.sortedSetOf(String.class); + + assertThat(type.rawClass()).isEqualTo(SortedSet.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class)); + } + + @Test + public void sortedSetOf_ReturnsRawClassOfDeque_WhenSpecifyingEnhancedType() { + EnhancedType> type = EnhancedType.sortedSetOf(EnhancedType.of(String.class)); + + assertThat(type.rawClass()).isEqualTo(SortedSet.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class)); + } + + @Test + public void navigableSetOf_ReturnsRawClassOfNavigableSet_WhenSpecifyingClass() { + EnhancedType> type = EnhancedType.navigableSetOf(String.class); + + assertThat(type.rawClass()).isEqualTo(NavigableSet.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class)); + } + + @Test + public void navigableSetOf_ReturnsRawClassOfNavigableSet_WhenSpecifyingEnhancedType() { + EnhancedType> type = EnhancedType.navigableSetOf(EnhancedType.of(String.class)); + + assertThat(type.rawClass()).isEqualTo(NavigableSet.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class)); + } + + + @Test + public void collectionOf_ReturnsRawClassOfCollection_WhenSpecifyingClass() { + EnhancedType> type = EnhancedType.collectionOf(String.class); + + assertThat(type.rawClass()).isEqualTo(Collection.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class)); + } + + @Test + public void collectionOf_ReturnsRawClassOfCollection_WhenSpecifyingEnhancedType() { + EnhancedType> type = EnhancedType.collectionOf(EnhancedType.of(String.class)); + + assertThat(type.rawClass()).isEqualTo(Collection.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class)); + } + + @Test + public void sortedMapOf_ReturnsRawClassOfSortedMap_WhenSpecifyingClass() { + EnhancedType> type = EnhancedType.sortedMapOf(String.class, Integer.class); + + assertThat(type.rawClass()).isEqualTo(SortedMap.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + } + + @Test + public void sortedMapOf_ReturnsRawClassOfSortedMap_WhenSpecifyingEnhancedType() { + EnhancedType> type = + EnhancedType.sortedMapOf(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + + assertThat(type.rawClass()).isEqualTo(SortedMap.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + } + + @Test + public void concurrentMapOf_ReturnsRawClassOfConcurrentMap_WhenSpecifyingClass() { + EnhancedType> type = EnhancedType.concurrentMapOf(String.class, Integer.class); + + assertThat(type.rawClass()).isEqualTo(ConcurrentMap.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + } + + @Test + public void concurrentMapOf_ReturnsRawClassOfConcurrentMap_WhenSpecifyingEnhancedType() { + EnhancedType> type = + EnhancedType.concurrentMapOf(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + + assertThat(type.rawClass()).isEqualTo(ConcurrentMap.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + } + + @Test + public void navigableMapOf_ReturnsRawClassOfNavigableMap_WhenSpecifyingClass() { + EnhancedType> type = EnhancedType.navigableMapOf(String.class, Integer.class); + + assertThat(type.rawClass()).isEqualTo(NavigableMap.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + } + + @Test + public void navigableMapOf_ReturnsRawClassOfNavigableMap_WhenSpecifyingEnhancedType() { + EnhancedType> type = + EnhancedType.navigableMapOf(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + + assertThat(type.rawClass()).isEqualTo(NavigableMap.class); + assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); + } + + @Test + public void documentOf_toString_doesNotRaiseNPE() { + TableSchema tableSchema = StaticTableSchema.builder(String.class).build(); + EnhancedType type = EnhancedType.documentOf(String.class, tableSchema); + assertThatCode(() -> type.toString()).doesNotThrowAnyException(); + } + + public class InnerType { + } + + public static class InnerStaticType { + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ExpressionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ExpressionTest.java new file mode 100644 index 000000000000..66efd6d08f78 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ExpressionTest.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +import java.util.HashMap; +import java.util.Map; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class ExpressionTest { + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void join_correctlyWrapsExpressions() { + Expression expression1 = Expression.builder().expression("one").build(); + Expression expression2 = Expression.builder().expression("two").build(); + Expression expression3 = Expression.builder().expression("three").build(); + + Expression coalescedExpression = Expression.join(Expression.join(expression1, expression2, " AND "), + expression3, " AND "); + + String expectedExpression = "((one) AND (two)) AND (three)"; + assertThat(coalescedExpression.expression(), is(expectedExpression)); + } + + @Test + public void joinExpressions_correctlyJoins() { + String result = Expression.joinExpressions("one", "two", " AND "); + assertThat(result, is("(one) AND (two)")); + } + + @Test + public void joinNames_correctlyJoins() { + Map names1 = new HashMap<>(); + names1.put("one", "1"); + names1.put("two", "2"); + Map names2 = new HashMap<>(); + names2.put("three", "3"); + names2.put("four", "4"); + + Map result = Expression.joinNames(names1, names2); + + assertThat(result.size(), is(4)); + assertThat(result, hasEntry("one", "1")); + assertThat(result, hasEntry("two", "2")); + assertThat(result, hasEntry("three", "3")); + assertThat(result, hasEntry("four", "4")); + } + + @Test + public void joinNames_conflictingKey() { + Map names1 = new HashMap<>(); + names1.put("one", "1"); + names1.put("two", "2"); + Map names2 = new HashMap<>(); + names2.put("three", "3"); + names2.put("two", "4"); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("two"); + Expression.joinNames(names1, names2); + } + + @Test + public void joinValues_correctlyJoins() { + Map values1 = new HashMap<>(); + values1.put("one", EnhancedAttributeValue.fromString("1").toAttributeValue()); + values1.put("two", EnhancedAttributeValue.fromString("2").toAttributeValue()); + Map values2 = new HashMap<>(); + values2.put("three", EnhancedAttributeValue.fromString("3").toAttributeValue()); + values2.put("four", EnhancedAttributeValue.fromString("4").toAttributeValue()); + + Map result = Expression.joinValues(values1, values2); + + assertThat(result.size(), is(4)); + assertThat(result, hasEntry("one", EnhancedAttributeValue.fromString("1").toAttributeValue())); + assertThat(result, hasEntry("two", EnhancedAttributeValue.fromString("2").toAttributeValue())); + assertThat(result, hasEntry("three", EnhancedAttributeValue.fromString("3").toAttributeValue())); + assertThat(result, hasEntry("four", EnhancedAttributeValue.fromString("4").toAttributeValue())); + } + + @Test + public void joinValues_conflictingKey() { + Map values1 = new HashMap<>(); + values1.put("one", EnhancedAttributeValue.fromString("1").toAttributeValue()); + values1.put("two", EnhancedAttributeValue.fromString("2").toAttributeValue()); + Map values2 = new HashMap<>(); + values2.put("three", EnhancedAttributeValue.fromString("3").toAttributeValue()); + values2.put("two", EnhancedAttributeValue.fromString("4").toAttributeValue()); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("two"); + Expression.joinValues(values1, values2); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/KeyTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/KeyTest.java new file mode 100644 index 000000000000..885302926417 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/KeyTest.java @@ -0,0 +1,131 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import org.junit.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class KeyTest { + private final Key key = Key.builder().partitionValue("id123").sortValue("id456").build(); + private final Key partitionOnlyKey = Key.builder().partitionValue("id123").build(); + + @Test + public void getKeyMap() { + Map expectedResult = new HashMap<>(); + expectedResult.put("gsi_id", AttributeValue.builder().s("id123").build()); + expectedResult.put("gsi_sort", AttributeValue.builder().s("id456").build()); + assertThat(key.keyMap(FakeItemWithIndices.getTableSchema(), "gsi_1"), is(expectedResult)); + } + + @Test + public void getPrimaryKeyMap() { + Map expectedResult = new HashMap<>(); + expectedResult.put("id", AttributeValue.builder().s("id123").build()); + expectedResult.put("sort", AttributeValue.builder().s("id456").build()); + assertThat(key.primaryKeyMap(FakeItemWithIndices.getTableSchema()), is(expectedResult)); + } + + @Test + public void getPartitionKeyValue() { + assertThat(key.partitionKeyValue(), + is(AttributeValue.builder().s("id123").build())); + } + + @Test + public void getSortKeyValue() { + assertThat(key.sortKeyValue(), is(Optional.of(AttributeValue.builder().s("id456").build()))); + } + + @Test + public void getKeyMap_partitionOnly() { + Map expectedResult = new HashMap<>(); + expectedResult.put("gsi_id", AttributeValue.builder().s("id123").build()); + assertThat(partitionOnlyKey.keyMap(FakeItemWithIndices.getTableSchema(), "gsi_1"), is(expectedResult)); + } + + @Test + public void getPrimaryKeyMap_partitionOnly() { + Map expectedResult = new HashMap<>(); + expectedResult.put("id", AttributeValue.builder().s("id123").build()); + assertThat(partitionOnlyKey.primaryKeyMap(FakeItemWithIndices.getTableSchema()), is(expectedResult)); + } + + @Test + public void getPartitionKeyValue_partitionOnly() { + assertThat(partitionOnlyKey.partitionKeyValue(), + is(AttributeValue.builder().s("id123").build())); + } + + @Test + public void getSortKeyValue_partitionOnly() { + assertThat(partitionOnlyKey.sortKeyValue(), is(Optional.empty())); + } + + @Test + public void numericKeys_convertsToCorrectAttributeValue() { + Key key = Key.builder().partitionValue(123).sortValue(45.6).build(); + + assertThat(key.partitionKeyValue(), is(AttributeValue.builder().n("123").build())); + assertThat(key.sortKeyValue(), is(Optional.of(AttributeValue.builder().n("45.6").build()))); + } + + @Test + public void stringKeys_convertsToCorrectAttributeValue() { + Key key = Key.builder().partitionValue("one").sortValue("two").build(); + + assertThat(key.partitionKeyValue(), is(AttributeValue.builder().s("one").build())); + assertThat(key.sortKeyValue(), is(Optional.of(AttributeValue.builder().s("two").build()))); + } + + @Test + public void binaryKeys_convertsToCorrectAttributeValue() { + SdkBytes partition = SdkBytes.fromString("one", StandardCharsets.UTF_8); + SdkBytes sort = SdkBytes.fromString("two", StandardCharsets.UTF_8); + + Key key = Key.builder().partitionValue(partition).sortValue(sort).build(); + + assertThat(key.partitionKeyValue(), is(AttributeValue.builder().b(partition).build())); + assertThat(key.sortKeyValue(), is(Optional.of(AttributeValue.builder().b(sort).build()))); + } + + @Test + public void toBuilder() { + Key keyClone = key.toBuilder().build(); + + assertThat(key, is(equalTo(keyClone))); + } + + @Test + public void nullPartitionKey_shouldThrowException() { + AttributeValue attributeValue = null; + assertThatThrownBy(() -> Key.builder().partitionValue(attributeValue).build()) + .isInstanceOf(IllegalArgumentException.class).hasMessageContaining("partitionValue should not be null"); + + assertThatThrownBy(() -> Key.builder().partitionValue(AttributeValue.builder().nul(true).build()).build()) + .isInstanceOf(IllegalArgumentException.class).hasMessageContaining("partitionValue should not be null"); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ProjectionExpressionConvertorTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ProjectionExpressionConvertorTest.java new file mode 100644 index 000000000000..4a967ae8c784 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ProjectionExpressionConvertorTest.java @@ -0,0 +1,65 @@ +package software.amazon.awssdk.enhanced.dynamodb; + +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.ProjectionExpressionConvertor; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; + +public class ProjectionExpressionConvertorTest { + + public static final String MAPPED_INDICATOR = "#AMZN_MAPPED_"; + public static final String NESTING_SEPARATOR = "."; + + @Test + public void testAttributeNameWithNoNestedAttributes() { + final String keyName = "fieldKey"; + NestedAttributeName attributeName = NestedAttributeName.builder().elements(keyName).build(); + ProjectionExpressionConvertor expressionConvertor = ProjectionExpressionConvertor.create(Arrays.asList(attributeName)); + final Map stringStringMap = expressionConvertor.convertToExpressionMap(); + final Optional toNameExpression = expressionConvertor.convertToProjectionExpression(); + Map expectedmap = new HashMap<>(); + expectedmap.put(MAPPED_INDICATOR + keyName, keyName); + assertThat(stringStringMap).isEqualTo(expectedmap); + assertThat(toNameExpression.get()).contains((MAPPED_INDICATOR + keyName)); + } + + @Test + public void testAttributeNameWithNestedNestedAttributes() { + final String keyName = "fieldKey"; + final String nestedAttribute = "levelOne"; + NestedAttributeName attributeName = NestedAttributeName.builder().addElements(keyName, nestedAttribute).build(); + ProjectionExpressionConvertor expressionConvertor = ProjectionExpressionConvertor.create(Arrays.asList(attributeName)); + final Map stringStringMap = expressionConvertor.convertToExpressionMap(); + final Optional toNameExpression = expressionConvertor.convertToProjectionExpression(); + Map expectedmap = new HashMap<>(); + expectedmap.put(MAPPED_INDICATOR + keyName, keyName); + expectedmap.put(MAPPED_INDICATOR + nestedAttribute, nestedAttribute); + assertThat(stringStringMap).isEqualTo(expectedmap); + assertThat(toNameExpression.get()).contains(MAPPED_INDICATOR + keyName + NESTING_SEPARATOR + MAPPED_INDICATOR + nestedAttribute); + } + + @Test + public void testAttributeNameWithNullAttributeName() { + assertFails(() -> NestedAttributeName.builder().addElement(null).build()); + + } + + @Test + public void testAttributeNameWithNullElementsForNestingElement() { + assertFails(() -> NestedAttributeName.builder() + .elements("foo").addElement(null).build()); + } + + @Test + public void toBuilder() { + NestedAttributeName builtObject = NestedAttributeName.builder().addElement("foo").build(); + NestedAttributeName copiedObject = builtObject.toBuilder().build(); + assertThat(copiedObject).isEqualTo(builtObject); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java new file mode 100644 index 000000000000..d42296dfe110 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.ImmutableTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.InvalidBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SimpleBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SimpleImmutable; + +public class TableSchemaTest { + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void builder_constructsStaticTableSchemaBuilder() { + StaticTableSchema.Builder builder = TableSchema.builder(FakeItem.class); + assertThat(builder).isNotNull(); + } + + @Test + public void fromBean_constructsBeanTableSchema() { + BeanTableSchema beanBeanTableSchema = TableSchema.fromBean(SimpleBean.class); + assertThat(beanBeanTableSchema).isNotNull(); + } + + @Test + public void fromImmutable_constructsImmutableTableSchema() { + ImmutableTableSchema immutableTableSchema = + TableSchema.fromImmutableClass(SimpleImmutable.class); + + assertThat(immutableTableSchema).isNotNull(); + } + + @Test + public void fromClass_constructsBeanTableSchema() { + TableSchema tableSchema = TableSchema.fromClass(SimpleBean.class); + assertThat(tableSchema).isInstanceOf(BeanTableSchema.class); + } + + @Test + public void fromClass_constructsImmutableTableSchema() { + TableSchema tableSchema = TableSchema.fromClass(SimpleImmutable.class); + assertThat(tableSchema).isInstanceOf(ImmutableTableSchema.class); + } + + @Test + public void fromClass_invalidClassThrowsException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("InvalidBean"); + TableSchema.fromClass(InvalidBean.class); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/TypeConvertingVisitorTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/TypeConvertingVisitorTest.java new file mode 100644 index 000000000000..b0aae190a287 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/TypeConvertingVisitorTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.util.Collections; +import org.junit.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; + +public class TypeConvertingVisitorTest { + @Test + public void defaultConvertersThrowExceptions() { + assertThat(DefaultVisitor.INSTANCE.convert(EnhancedAttributeValue.nullValue())).isEqualTo(null); + + assertDefaultConversionFails(EnhancedAttributeValue.fromString("foo")); + assertDefaultConversionFails(EnhancedAttributeValue.fromNumber("1")); + assertDefaultConversionFails(EnhancedAttributeValue.fromBoolean(true)); + assertDefaultConversionFails(EnhancedAttributeValue.fromBytes(SdkBytes.fromUtf8String(""))); + assertDefaultConversionFails(EnhancedAttributeValue.fromSetOfStrings(Collections.emptyList())); + assertDefaultConversionFails(EnhancedAttributeValue.fromSetOfNumbers(Collections.emptyList())); + assertDefaultConversionFails(EnhancedAttributeValue.fromSetOfBytes(Collections.emptyList())); + assertDefaultConversionFails(EnhancedAttributeValue.fromListOfAttributeValues(Collections.emptyList())); + assertDefaultConversionFails(EnhancedAttributeValue.fromMap(Collections.emptyMap())); + } + + private void assertDefaultConversionFails(EnhancedAttributeValue attributeValue) { + assertThatThrownBy(() -> DefaultVisitor.INSTANCE.convert(attributeValue)).isInstanceOf(IllegalStateException.class); + } + + + private static class DefaultVisitor extends TypeConvertingVisitor { + private static final DefaultVisitor INSTANCE = new DefaultVisitor(); + + protected DefaultVisitor() { + super(Void.class); + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BinaryAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BinaryAttributeConvertersTest.java new file mode 100644 index 000000000000..82cf07310e0d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BinaryAttributeConvertersTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.util.Set; +import org.junit.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ByteArrayAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.SdkBytesAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.SetAttributeConverter; + +public class BinaryAttributeConvertersTest { + @Test + public void byteArrayAttributeConverterBehaves() { + ByteArrayAttributeConverter converter = ByteArrayAttributeConverter.create(); + + byte[] emptyBytes = new byte[0]; + byte[] bytes = "foo".getBytes(); + + assertThat(transformFrom(converter, bytes).b().asByteArray()).isEqualTo(bytes); + assertThat(transformFrom(converter, emptyBytes).b().asByteArray()).isEqualTo(emptyBytes); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromBytes(SdkBytes.fromUtf8String("foo")).toAttributeValue())).isEqualTo(bytes); + assertThat(transformTo(converter, EnhancedAttributeValue.fromBytes(SdkBytes.fromUtf8String("")).toAttributeValue())).isEqualTo(emptyBytes); + } + + @Test + public void sdkBytesAttributeConverterBehaves() { + SdkBytesAttributeConverter converter = SdkBytesAttributeConverter.create(); + SdkBytes bytes = SdkBytes.fromUtf8String(""); + assertThat(transformFrom(converter, bytes).b()).isSameAs(bytes); + assertThat(transformTo(converter, EnhancedAttributeValue.fromBytes(bytes).toAttributeValue())).isSameAs(bytes); + } + + @Test + public void sdkBytesSetAttributeConverter_ReturnsBSType() { + SetAttributeConverter> bytesSet = SetAttributeConverter.setConverter(SdkBytesAttributeConverter.create()); + assertThat(bytesSet.attributeValueType()).isEqualTo(AttributeValueType.BS); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BooleanAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BooleanAttributeConvertersTest.java new file mode 100644 index 000000000000..75c6d2a953e6 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BooleanAttributeConvertersTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.AtomicBooleanAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.BooleanAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.SetAttributeConverter; + +public class BooleanAttributeConvertersTest { + @Test + public void atomicBooleanAttributeConverterBehaves() { + AtomicBooleanAttributeConverter converter = AtomicBooleanAttributeConverter.create(); + assertThat(transformFrom(converter, new AtomicBoolean(true)).bool()).isEqualTo(true); + assertThat(transformFrom(converter, new AtomicBoolean(false)).bool()).isEqualTo(false); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("FALSE").toAttributeValue())); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("TRUE").toAttributeValue())); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("0").toAttributeValue())); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1").toAttributeValue())); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("").toAttributeValue())); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("1").toAttributeValue())).isTrue(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0").toAttributeValue())).isFalse(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("true").toAttributeValue())).isTrue(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("false").toAttributeValue())).isFalse(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromBoolean(true).toAttributeValue())).isTrue(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromBoolean(false).toAttributeValue())).isFalse(); + } + + @Test + public void booleanAttributeConverterBehaves() { + BooleanAttributeConverter converter = BooleanAttributeConverter.create(); + + assertThat(transformFrom(converter, true).bool()).isEqualTo(true); + assertThat(transformFrom(converter, false).bool()).isEqualTo(false); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("FALSE").toAttributeValue())); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("TRUE").toAttributeValue())); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("0").toAttributeValue())); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1").toAttributeValue())); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("").toAttributeValue())); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("1").toAttributeValue())).isTrue(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0").toAttributeValue())).isFalse(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("true").toAttributeValue())).isTrue(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("false").toAttributeValue())).isFalse(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromBoolean(true).toAttributeValue())).isTrue(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromBoolean(false).toAttributeValue())).isFalse(); + } + + @Test + public void setOfBooleanAttributeConverter_ThrowsIllegalArgumentException() { + assertThatThrownBy(() -> SetAttributeConverter.setConverter(BooleanAttributeConverter.create())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("SetAttributeConverter cannot be created") + .hasMessageContaining("Boolean"); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ConverterTestUtils.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ConverterTestUtils.java new file mode 100644 index 000000000000..73aa03deb99e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ConverterTestUtils.java @@ -0,0 +1,44 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.assertj.core.api.ThrowableAssert; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + + +public final class ConverterTestUtils { + private ConverterTestUtils() {} + + public static AttributeValue transformFrom(AttributeConverter converter, T value) { + return converter.transformFrom(value); + } + + public static T transformTo(AttributeConverter converter, AttributeValue value) { + return converter.transformTo(value); + } + + public static T transformTo(AttributeConverter converter, EnhancedAttributeValue value) { + return converter.transformTo(value.toAttributeValue()); + } + + public static void assertFails(ThrowableAssert.ThrowingCallable shouldRaiseThrowable) { + assertThatThrownBy(shouldRaiseThrowable).isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnhancedAttributeValueTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnhancedAttributeValueTest.java new file mode 100644 index 000000000000..07f669b96a8c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnhancedAttributeValueTest.java @@ -0,0 +1,174 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class EnhancedAttributeValueTest { + @Test + public void simpleFromMethodsCreateCorrectTypes() { + assertThat(EnhancedAttributeValue.nullValue()).satisfies(v -> { + assertThat(v.isNull()).isTrue(); + assertThat(v.type()).isEqualTo(AttributeValueType.NULL); + }); + + assertThat(EnhancedAttributeValue.fromString("foo")).satisfies(v -> { + assertThat(v.isString()).isTrue(); + assertThat(v.asString()).isEqualTo("foo"); + assertThat(v.type()).isEqualTo(AttributeValueType.S); + }); + + assertThat(EnhancedAttributeValue.fromNumber("1")).satisfies(v -> { + assertThat(v.isNumber()).isTrue(); + assertThat(v.asNumber()).isEqualTo("1"); + assertThat(v.type()).isEqualTo(AttributeValueType.N); + }); + + assertThat(EnhancedAttributeValue.fromBoolean(true)).satisfies(v -> { + assertThat(v.isBoolean()).isTrue(); + assertThat(v.asBoolean()).isEqualTo(true); + assertThat(v.type()).isEqualTo(AttributeValueType.BOOL); + }); + + assertThat(EnhancedAttributeValue.fromBytes(SdkBytes.fromUtf8String("foo"))).satisfies(v -> { + assertThat(v.isBytes()).isTrue(); + assertThat(v.asBytes().asUtf8String()).isEqualTo("foo"); + assertThat(v.type()).isEqualTo(AttributeValueType.B); + }); + + assertThat(EnhancedAttributeValue.fromSetOfStrings(Arrays.asList("a", "b"))).satisfies(v -> { + assertThat(v.isSetOfStrings()).isTrue(); + assertThat(v.asSetOfStrings()).containsExactly("a", "b"); + assertThat(v.type()).isEqualTo(AttributeValueType.SS); + }); + + assertThat(EnhancedAttributeValue.fromSetOfNumbers(Arrays.asList("1", "2"))).satisfies(v -> { + assertThat(v.isSetOfNumbers()).isTrue(); + assertThat(v.asSetOfNumbers()).containsExactly("1", "2"); + assertThat(v.type()).isEqualTo(AttributeValueType.NS); + }); + + assertThat(EnhancedAttributeValue.fromSetOfBytes(Arrays.asList(SdkBytes.fromUtf8String("foo"), + SdkBytes.fromUtf8String("foo2")))).satisfies(v -> { + assertThat(v.isSetOfBytes()).isTrue(); + assertThat(v.asSetOfBytes().get(0).asUtf8String()).isEqualTo("foo"); + assertThat(v.asSetOfBytes().get(1).asUtf8String()).isEqualTo("foo2"); + assertThat(v.type()).isEqualTo(AttributeValueType.BS); + }); + + assertThat(EnhancedAttributeValue.fromListOfAttributeValues(Arrays.asList(AttributeValue.builder().s("foo").build(), + AttributeValue.builder().bool(true).build()))).satisfies(v -> { + assertThat(v.isListOfAttributeValues()).isTrue(); + assertThat(v.asListOfAttributeValues().get(0).s()).isEqualTo("foo"); + assertThat(v.asListOfAttributeValues().get(1).bool()).isEqualTo(true); + assertThat(v.type()).isEqualTo(AttributeValueType.L); + }); + + Map map = new LinkedHashMap<>(); + map.put("a", AttributeValue.builder().s("foo").build()); + map.put("b", AttributeValue.builder().bool(true).build()); + assertThat(EnhancedAttributeValue.fromMap(map)).satisfies(v -> { + assertThat(v.isMap()).isTrue(); + assertThat(v.asMap().get("a").s()).isEqualTo("foo"); + assertThat(v.asMap().get("b").bool()).isEqualTo(true); + assertThat(v.type()).isEqualTo(AttributeValueType.M); + }); + } + + @Test + public void fromGeneratedTypeMethodsCreateCorrectType() { + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().nul(true).build())) + .isEqualTo(EnhancedAttributeValue.nullValue()); + + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().s("foo").build())) + .isEqualTo(EnhancedAttributeValue.fromString("foo")); + + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().n("1").build())) + .isEqualTo(EnhancedAttributeValue.fromNumber("1")); + + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().bool(true).build())) + .isEqualTo(EnhancedAttributeValue.fromBoolean(true)); + + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().b(SdkBytes.fromUtf8String("foo")).build())) + .isEqualTo(EnhancedAttributeValue.fromBytes(SdkBytes.fromUtf8String("foo"))); + + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().ss(Arrays.asList("foo", "bar")).build())) + .isEqualTo(EnhancedAttributeValue.fromSetOfStrings(Arrays.asList("foo", "bar"))); + + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().ns(Arrays.asList("1", "2")).build())) + .isEqualTo(EnhancedAttributeValue.fromSetOfNumbers(Arrays.asList("1", "2"))); + + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder() + .bs(Arrays.asList(SdkBytes.fromUtf8String("foo"), + SdkBytes.fromUtf8String("foo2"))) + .build())) + .isEqualTo(EnhancedAttributeValue.fromSetOfBytes(Arrays.asList(SdkBytes.fromUtf8String("foo"), + SdkBytes.fromUtf8String("foo2")))); + + List list = Arrays.asList(AttributeValue.builder().s("foo").build(), + AttributeValue.builder().n("1").build()); + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().l(list).build())) + .isEqualTo(EnhancedAttributeValue.fromListOfAttributeValues(list)); + + Map map = new LinkedHashMap<>(); + map.put("foo", AttributeValue.builder().s("foo").build()); + map.put("bar", AttributeValue.builder().n("1").build()); + + assertThat(EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().m(map).build())) + .isEqualTo(EnhancedAttributeValue.fromMap(map)); + } + + @Test + public void emptyAttributeValuesCannotBeConverted() { + assertThatThrownBy(() -> EnhancedAttributeValue.fromAttributeValue(AttributeValue.builder().build())) + .isInstanceOf(IllegalStateException.class); + } + + @Test + public void conversionToGeneratedIsCorrect() { + List strings = Arrays.asList("foo", "bar"); + List bytes = Arrays.asList(SdkBytes.fromUtf8String("foo"), SdkBytes.fromUtf8String("bar")); + + List attributes = Arrays.asList(AttributeValue.builder().s("foo").build(), AttributeValue.builder().n("1").build()); + + Map attributeMap = new LinkedHashMap<>(); + attributeMap.put("foo", AttributeValue.builder().s("foo").build()); + attributeMap.put("bar", AttributeValue.builder().n("1").build()); + + assertThat(EnhancedAttributeValue.nullValue().toAttributeValue().nul()).isEqualTo(true); + assertThat(EnhancedAttributeValue.fromString("foo").toAttributeValue().s()).isEqualTo("foo"); + assertThat(EnhancedAttributeValue.fromNumber("1").toAttributeValue().n()).isEqualTo("1"); + assertThat(EnhancedAttributeValue.fromBoolean(false).toAttributeValue().bool()).isEqualTo(false); + assertThat(EnhancedAttributeValue.fromBytes(SdkBytes.fromUtf8String("foo")).toAttributeValue().b().asUtf8String()).isEqualTo("foo"); + assertThat(EnhancedAttributeValue.fromSetOfStrings(strings).toAttributeValue().ss()).isEqualTo(strings); + assertThat(EnhancedAttributeValue.fromSetOfNumbers(strings).toAttributeValue().ns()).isEqualTo(strings); + assertThat(EnhancedAttributeValue.fromSetOfBytes(bytes).toAttributeValue().bs()).isEqualTo(bytes); + assertThat(EnhancedAttributeValue.fromListOfAttributeValues(attributes).toAttributeValue().l()).isEqualTo(attributes); + assertThat(EnhancedAttributeValue.fromMap(attributeMap).toAttributeValue().m()).isEqualTo(attributeMap); + assertThat(EnhancedAttributeValue.fromMap(attributeMap).toAttributeValueMap()).isEqualTo(attributeMap); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/InstantAsStringAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/InstantAsStringAttributeConvertersTest.java new file mode 100644 index 000000000000..7abccac079fc --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/InstantAsStringAttributeConvertersTest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.Instant; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; + +public class InstantAsStringAttributeConvertersTest { + + private static final InstantAsStringAttributeConverter CONVERTER = InstantAsStringAttributeConverter.create(); + + @Test + public void InstantAsStringAttributeConverterMinTest() { + verifyTransform(Instant.MIN, "-1000000000-01-01T00:00:00Z"); + } + + @Test + public void InstantAsStringAttributeConverterEpochMinusOneMilliTest() { + verifyTransform(Instant.EPOCH.minusMillis(1), "1969-12-31T23:59:59.999Z"); + } + + @Test + public void InstantAsStringAttributeConverterEpochTest() { + verifyTransform(Instant.EPOCH, "1970-01-01T00:00:00Z"); + } + + @Test + public void InstantAsStringAttributeConverterEpochPlusOneMilliTest() { + verifyTransform(Instant.EPOCH.plusMillis(1), "1970-01-01T00:00:00.001Z"); + } + + @Test + public void InstantAsStringAttributeConverterMaxTest() { + verifyTransform(Instant.MAX, "+1000000000-12-31T23:59:59.999999999Z"); + } + + + @Test + public void InstantAsStringAttributeConverterExceedLowerBoundTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("X") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + private void verifyTransform(Instant objectToTransform, String attributeValueString) { + assertThat(transformFrom(CONVERTER, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(CONVERTER, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateAttributeConverterTest.java new file mode 100644 index 000000000000..ce4b4025464d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateAttributeConverterTest.java @@ -0,0 +1,93 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.LocalDate; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateAttributeConverter; + +public class LocalDateAttributeConverterTest { + + private static LocalDateAttributeConverter converter = LocalDateAttributeConverter.create(); + + @Test + public void LocalDateAttributeConverterMinTest() { + verifyTransform(LocalDate.MIN, "-999999999-01-01"); + } + + @Test + public void LocalDateAttributeConverterNormalTest() { + verifyTransform(LocalDate.of(0, 1, 1), "0000-01-01"); + } + + @Test + public void LocalDateAttributeConverterMaxTest() { + verifyTransform(LocalDate.MAX, "+999999999-12-31"); + } + + + @Test + public void LocalDateAttributeConverterLowerBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-9999999999-01-01") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("9999999999-12-31") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("9999999999-12-32") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptInstantTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001Z") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + private void verifyTransform(LocalDate objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConverterTest.java new file mode 100644 index 000000000000..70a68b74c97f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConverterTest.java @@ -0,0 +1,114 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.LocalDateTime; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateTimeAttributeConverter; + +public class LocalDateTimeAttributeConverterTest { + + private static LocalDateTimeAttributeConverter converter = LocalDateTimeAttributeConverter.create(); + + @Test + public void localDateTimeAttributeConverterMinTest() { + verifyTransform(LocalDateTime.MIN, "-999999999-01-01T00:00"); + } + + @Test + public void localDateTimeAttributeConverterNormalTest() { + verifyTransform(LocalDateTime.of(0, 1, 1, 0, 0, 0, 0), "0000-01-01T00:00"); + } + + @Test + public void localDateTimeAttributeConverterMaxTest() { + verifyTransform(LocalDateTime.MAX, "+999999999-12-31T23:59:59.999999999"); + } + + + @Test + public void localDateTimeAttributeConverterLowerBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-9999999999-01-01T00:00") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("9999999999-12-31T00:00:00") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("9999999999-12-32T00:00:00") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterInvalidNanoSecondsTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("0-01-01T00:00:00.9999999999") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptInstantTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001Z") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterAdditionallyAcceptLocalDateTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21").toAttributeValue())) + .isEqualTo(LocalDateTime.of(1988, 5, 21, 0, 0, 0)); + } + + private void verifyTransform(LocalDateTime objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalTimeAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalTimeAttributeConverterTest.java new file mode 100644 index 000000000000..584b8614841d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalTimeAttributeConverterTest.java @@ -0,0 +1,93 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.LocalTime; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalTimeAttributeConverter; + +public class LocalTimeAttributeConverterTest { + + private static LocalTimeAttributeConverter converter = LocalTimeAttributeConverter.create(); + + @Test + public void LocalTimeAttributeConverterMinTest() { + verifyTransform(LocalTime.MIN, "00:00"); + } + + @Test + public void LocalTimeAttributeConverterNormalTest() { + verifyTransform(LocalTime.of(1, 2, 3, 4), "01:02:03.000000004"); + } + + @Test + public void LocalTimeAttributeConverterMaxTest() { + verifyTransform(LocalTime.MAX, "23:59:59.999999999"); + } + + + @Test + public void LocalTimeAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("24:00:00") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterInvalidNanoSecondsTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:00:00.9999999999") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptInstantTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001Z") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + private void verifyTransform(LocalTime objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/MonthDayAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/MonthDayAttributeConverterTest.java new file mode 100644 index 000000000000..7e972ae32a87 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/MonthDayAttributeConverterTest.java @@ -0,0 +1,88 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.MonthDay; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.MonthDayAttributeConverter; + +public class MonthDayAttributeConverterTest { + + private static MonthDayAttributeConverter converter = MonthDayAttributeConverter.create(); + + @Test + public void MonthDayAttributeConverterMinTest() { + verifyTransform(MonthDay.of(1, 1), "--01-01"); + } + + @Test + public void MonthDayAttributeConverterNormalTest() { + verifyTransform(MonthDay.of(5, 21), "--05-21"); + } + + @Test + public void MonthDayAttributeConverterMaxTest() { + verifyTransform(MonthDay.of(12, 31), "--12-31"); + } + + + @Test + public void MonthDayAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterInvalidDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("--02-30") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptInstantTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001Z") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + private void verifyTransform(MonthDay objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/NumberAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/NumberAttributeConvertersTest.java new file mode 100644 index 000000000000..d9ba57f5e15c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/NumberAttributeConvertersTest.java @@ -0,0 +1,285 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.data.Offset.offset; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.AtomicIntegerAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.AtomicLongAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.BigDecimalAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.BigIntegerAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DoubleAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.FloatAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.IntegerAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LongAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.SetAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ShortAttributeConverter; + +public class NumberAttributeConvertersTest { + private static String TIIIINY_NUMBER = tiiiinyNumber(); + private static String HUUUUGE_NUMBER = huuuugeNumber(); + + @Test + public void atomicIntegerAttributeConverterBehaves() { + AtomicIntegerAttributeConverter converter = AtomicIntegerAttributeConverter.create(); + + assertThat(transformFrom(converter, new AtomicInteger(Integer.MIN_VALUE)).n()) + .isEqualTo(Integer.toString(Integer.MIN_VALUE)); + assertThat(transformFrom(converter, new AtomicInteger(-42)).n()).isEqualTo("-42"); + assertThat(transformFrom(converter, new AtomicInteger(0)).n()).isEqualTo("0"); + assertThat(transformFrom(converter, new AtomicInteger(42)).n()).isEqualTo("42"); + assertThat(transformFrom(converter, new AtomicInteger(Integer.MAX_VALUE)).n()) + .isEqualTo(Integer.toString(Integer.MAX_VALUE)); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1.0"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("1.0"))); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(Integer.toString(Integer.MIN_VALUE)))) + .hasValue(Integer.MIN_VALUE); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-42"))).hasValue(-42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-42"))).hasValue(-42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).hasValue(0); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("42"))).hasValue(42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("42"))).hasValue(42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(Integer.toString(Integer.MAX_VALUE)))) + .hasValue(Integer.MAX_VALUE); + } + + @Test + public void atomicLongAttributeConverterBehaves() { + AtomicLongAttributeConverter converter = AtomicLongAttributeConverter.create(); + + assertThat(transformFrom(converter, new AtomicLong(Long.MIN_VALUE)).n()) + .isEqualTo(Long.toString(Long.MIN_VALUE)); + assertThat(transformFrom(converter, new AtomicLong(-42)).n()).isEqualTo("-42"); + assertThat(transformFrom(converter, new AtomicLong(0)).n()).isEqualTo("0"); + assertThat(transformFrom(converter, new AtomicLong(42)).n()).isEqualTo("42"); + assertThat(transformFrom(converter, new AtomicLong(Long.MAX_VALUE)).n()) + .isEqualTo(Long.toString(Long.MAX_VALUE)); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1.0"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("1.0"))); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(Long.toString(Long.MIN_VALUE)))) + .hasValue(Long.MIN_VALUE); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-42"))).hasValue(-42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-42"))).hasValue(-42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).hasValue(0); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("42"))).hasValue(42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("42"))).hasValue(42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(Long.toString(Long.MAX_VALUE)))) + .hasValue(Long.MAX_VALUE); + } + + @Test + public void bigDecimalAttributeConverterBehaves() { + BigDecimalAttributeConverter converter = BigDecimalAttributeConverter.create(); + + assertThat(transformFrom(converter, new BigDecimal(TIIIINY_NUMBER)).n()).isEqualTo(TIIIINY_NUMBER); + assertThat(transformFrom(converter, new BigDecimal("43.0")).n()).isEqualTo("43.0"); + assertThat(transformFrom(converter, new BigDecimal("-42.42")).n()).isEqualTo("-42.42"); + assertThat(transformFrom(converter, new BigDecimal("-42")).n()).isEqualTo("-42"); + assertThat(transformFrom(converter, new BigDecimal("0")).n()).isEqualTo("0"); + assertThat(transformFrom(converter, new BigDecimal("42")).n()).isEqualTo("42"); + assertThat(transformFrom(converter, new BigDecimal("42.42")).n()).isEqualTo("42.42"); + assertThat(transformFrom(converter, new BigDecimal("43.0")).n()).isEqualTo("43.0"); + assertThat(transformFrom(converter, new BigDecimal(HUUUUGE_NUMBER)).n()).isEqualTo(HUUUUGE_NUMBER); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X"))); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(TIIIINY_NUMBER)).toString()).isEqualTo(TIIIINY_NUMBER); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-43.0")).toString()).isEqualTo("-43.0"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-42.42")).toString()).isEqualTo("-42.42"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-42")).toString()).isEqualTo("-42"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0")).toString()).isEqualTo("0"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("42")).toString()).isEqualTo("42"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("42.42")).toString()).isEqualTo("42.42"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("43.0")).toString()).isEqualTo("43.0"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(HUUUUGE_NUMBER)).toString()).isEqualTo(HUUUUGE_NUMBER); + } + + @Test + public void bigIntegerAttributeConverterBehaves() { + BigIntegerAttributeConverter converter = BigIntegerAttributeConverter.create(); + + assertThat(transformFrom(converter, new BigInteger(TIIIINY_NUMBER)).n()).isEqualTo(TIIIINY_NUMBER); + assertThat(transformFrom(converter, new BigInteger("-42")).n()).isEqualTo("-42"); + assertThat(transformFrom(converter, new BigInteger("0")).n()).isEqualTo("0"); + assertThat(transformFrom(converter, new BigInteger("42")).n()).isEqualTo("42"); + assertThat(transformFrom(converter, new BigInteger(HUUUUGE_NUMBER)).n()).isEqualTo(HUUUUGE_NUMBER); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("1.0"))); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(TIIIINY_NUMBER)).toString()).isEqualTo(TIIIINY_NUMBER); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-42")).toString()).isEqualTo("-42"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-42")).toString()).isEqualTo("-42"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0")).toString()).isEqualTo("0"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("42")).toString()).isEqualTo("42"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("42")).toString()).isEqualTo("42"); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(HUUUUGE_NUMBER)).toString()).isEqualTo(HUUUUGE_NUMBER); + } + + @Test + public void floatAttributeConverterBehaves() { + FloatAttributeConverter converter = FloatAttributeConverter.create(); + + assertFails(() -> transformFrom(converter, Float.NEGATIVE_INFINITY)); + assertFails(() -> transformFrom(converter, Float.POSITIVE_INFINITY)); + assertFails(() -> transformFrom(converter, Float.NaN)); + + assertThat(transformFrom(converter, -Float.MAX_VALUE).n()).isEqualTo("-3.4028235E38"); + assertThat(Float.parseFloat(transformFrom(converter, -42.42f).n())).isCloseTo(-42.42f, offset(1E-10f)); + assertThat(transformFrom(converter, -Float.MIN_VALUE).n()).isEqualTo("-1.4E-45"); + assertThat(transformFrom(converter, 0f).n()).isEqualTo("0.0"); + assertThat(transformFrom(converter, Float.MIN_VALUE).n()).isEqualTo("1.4E-45"); + assertThat(Float.parseFloat(transformFrom(converter, 42.42f).n())).isCloseTo(42.42f, offset(1E-10f)); + assertThat(transformFrom(converter, Float.MAX_VALUE).n()).isEqualTo("3.4028235E38"); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("2E308"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("-2E308"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("NaN"))); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("1.4E-45"))) + .isCloseTo(Float.MIN_VALUE, offset(1E-10f)); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-42.42"))).isCloseTo(-42.42f, offset(1E-10f)); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo(0f); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("42.42"))).isCloseTo(42.42f, offset(1E-10f)); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("3.4028235E38"))) + .isCloseTo(Float.MAX_VALUE, offset(1E-10f)); + } + + @Test + public void doubleAttributeConverterBehaves() { + DoubleAttributeConverter converter = DoubleAttributeConverter.create(); + + assertFails(() -> transformFrom(converter, Double.NEGATIVE_INFINITY)); + assertFails(() -> transformFrom(converter, Double.POSITIVE_INFINITY)); + assertFails(() -> transformFrom(converter, Double.NaN)); + + assertThat(transformFrom(converter, -Double.MAX_VALUE).n()).isEqualTo("-1.7976931348623157E308"); + assertThat(Double.parseDouble(transformFrom(converter, -42.42d).n())).isCloseTo(-42.42d, offset(1E-10)); + assertThat(transformFrom(converter, -Double.MIN_VALUE).n()).isEqualTo("-4.9E-324"); + assertThat(transformFrom(converter, 0d).n()).isEqualTo("0.0"); + assertThat(transformFrom(converter, Double.MIN_VALUE).n()).isEqualTo("4.9E-324"); + assertThat(Double.parseDouble(transformFrom(converter, 42.42).n())).isCloseTo(42.42d, offset(1E-10)); + assertThat(transformFrom(converter, Double.MAX_VALUE).n()).isEqualTo("1.7976931348623157E308"); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("2E308"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("-2E308"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("NaN"))); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("4.9E-324"))) + .isCloseTo(Double.MIN_VALUE, offset(1E-10)); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-42.42"))).isCloseTo(-42.42d, offset(1E-10)); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo(0d); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("42.42"))).isCloseTo(42.42d, offset(1E-10)); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("1.7976931348623157E308"))) + .isCloseTo(Double.MAX_VALUE, offset(1E-10)); + } + + @Test + public void shortAttributeConverterBehaves() { + ShortAttributeConverter converter = ShortAttributeConverter.create(); + + assertThat(transformFrom(converter, Short.MIN_VALUE).n()).isEqualTo("-32768"); + assertThat(transformFrom(converter, (short) 0).n()).isEqualTo("0"); + assertThat(transformFrom(converter, Short.MAX_VALUE).n()).isEqualTo("32767"); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1.0"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("1.0"))); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-32768"))).isEqualTo(Short.MIN_VALUE); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo((short) 0); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("32767"))).isEqualTo(Short.MAX_VALUE); + } + + @Test + public void integerAttributeConverterBehaves() { + IntegerAttributeConverter converter = IntegerAttributeConverter.create(); + + assertThat(transformFrom(converter, Integer.MIN_VALUE).n()).isEqualTo(Integer.toString(Integer.MIN_VALUE)); + assertThat(transformFrom(converter, -42).n()).isEqualTo("-42"); + assertThat(transformFrom(converter, 0).n()).isEqualTo("0"); + assertThat(transformFrom(converter, 42).n()).isEqualTo("42"); + assertThat(transformFrom(converter, Integer.MAX_VALUE).n()).isEqualTo(Integer.toString(Integer.MAX_VALUE)); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1.0"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("1.0"))); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(Integer.toString(Integer.MIN_VALUE)))) + .isEqualTo(Integer.MIN_VALUE); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-42"))).isEqualTo(-42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-42"))).isEqualTo(-42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo(0); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("42"))).isEqualTo(42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("42"))).isEqualTo(42); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(Integer.toString(Integer.MAX_VALUE)))) + .isEqualTo(Integer.MAX_VALUE); + } + + @Test + public void longAttributeConverterBehaves() { + LongAttributeConverter converter = LongAttributeConverter.create(); + + assertThat(transformFrom(converter, Long.MIN_VALUE).n()).isEqualTo(Long.toString(Long.MIN_VALUE)); + assertThat(transformFrom(converter, -42L).n()).isEqualTo("-42"); + assertThat(transformFrom(converter, 0L).n()).isEqualTo("0"); + assertThat(transformFrom(converter, 42L).n()).isEqualTo("42"); + assertThat(transformFrom(converter, Long.MAX_VALUE).n()).isEqualTo(Long.toString(Long.MAX_VALUE)); + + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1.0"))); + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("1.0"))); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(Long.toString(Long.MIN_VALUE)))) + .isEqualTo(Long.MIN_VALUE); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-42"))).isEqualTo(-42L); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-42"))).isEqualTo(-42L); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo(0L); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("42"))).isEqualTo(42L); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("42"))).isEqualTo(42L); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(Long.toString(Long.MAX_VALUE)))) + .isEqualTo(Long.MAX_VALUE); + } + + @Test + public void setOfLongsAttributeConverter_ReturnsNSType() { + SetAttributeConverter> longSet = SetAttributeConverter.setConverter(LongAttributeConverter.create()); + assertThat(longSet.attributeValueType()).isEqualTo(AttributeValueType.NS); + } + + private static String tiiiinyNumber() { + return "-" + huuuugeNumber(); + } + + private static String huuuugeNumber() { + StringBuilder result = new StringBuilder(); + for (int i = 0; i < 1_000; ++i) { + result.append("9"); + } + return result.toString(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OffsetDateTimeAsStringAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OffsetDateTimeAsStringAttributeConverterTest.java new file mode 100644 index 000000000000..d7839a99d948 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OffsetDateTimeAsStringAttributeConverterTest.java @@ -0,0 +1,112 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OffsetDateTimeAsStringAttributeConverter; + +public class OffsetDateTimeAsStringAttributeConverterTest { + + private static OffsetDateTimeAsStringAttributeConverter converter = OffsetDateTimeAsStringAttributeConverter.create(); + + private static OffsetDateTime epochUtc = Instant.EPOCH.atOffset(ZoneOffset.UTC); + + @Test + public void OffsetDateTimeAsStringAttributeConverterMinTest() { + verifyTransform(OffsetDateTime.MIN, "-999999999-01-01T00:00+18:00"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterEpochMinusOneMilliTest() { + verifyTransform(epochUtc.minusNanos(1), "1969-12-31T23:59:59.999999999Z"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterEpochTest() { + verifyTransform(epochUtc, "1970-01-01T00:00Z"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterEpochPlusOneMilliTest() { + verifyTransform(epochUtc.plusNanos(1), "1970-01-01T00:00:00.000000001Z"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterMaxTest() { + verifyTransform(OffsetDateTime.MAX, "+999999999-12-31T23:59:59.999999999-18:00"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNormalOffsetTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) + .isEqualTo(OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.ofHours(1))); + } + + + @Test + public void OffsetDateTimeAsStringAttributeConverterExceedLowerBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAsStringAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNotAcceptTimeZoneNamedZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterAdditionallyAcceptInstantTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))).isEqualTo(epochUtc); + } + + private void verifyTransform(OffsetDateTime objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OptionalAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OptionalAttributeConvertersTest.java new file mode 100644 index 000000000000..31d1b2fb95d5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OptionalAttributeConvertersTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromNumber; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.nullValue; + +import java.util.OptionalDouble; +import java.util.OptionalInt; +import java.util.OptionalLong; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OptionalDoubleAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OptionalIntAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OptionalLongAttributeConverter; + +public class OptionalAttributeConvertersTest { + + @Test + public void optionalDoubleConverterWorksCorrectly() { + OptionalDoubleAttributeConverter converter = OptionalDoubleAttributeConverter.create(); + + assertThat(transformFrom(converter, OptionalDouble.empty())).isEqualTo(nullValue().toAttributeValue()); + assertThat(transformFrom(converter, OptionalDouble.of(-Double.MAX_VALUE))).isEqualTo(fromNumber("-1.7976931348623157E308").toAttributeValue()); + assertThat(transformFrom(converter, OptionalDouble.of(-Double.MIN_VALUE))).isEqualTo(fromNumber("-4.9E-324").toAttributeValue()); + assertThat(transformFrom(converter, OptionalDouble.of(0.0))).isEqualTo(fromNumber("0.0").toAttributeValue()); + assertThat(transformFrom(converter, OptionalDouble.of(Double.MIN_VALUE))).isEqualTo(fromNumber("4.9E-324").toAttributeValue()); + assertThat(transformFrom(converter, OptionalDouble.of(Double.MAX_VALUE))).isEqualTo(fromNumber("1.7976931348623157E308").toAttributeValue()); + + assertThat(transformTo(converter, nullValue().toAttributeValue())).isEmpty(); + assertThat(transformTo(converter, fromNumber("-1.7976931348623157E308"))).hasValue(-Double.MAX_VALUE); + assertThat(transformTo(converter, fromNumber("-4.9E-324"))).hasValue(-Double.MIN_VALUE); + assertThat(transformTo(converter, fromNumber("0.0"))).hasValue(0.0); + assertThat(transformTo(converter, fromNumber("4.9E-324"))).hasValue(Double.MIN_VALUE); + assertThat(transformTo(converter, fromNumber("1.7976931348623157E308"))).hasValue(Double.MAX_VALUE); + } + + @Test + public void optionalIntConverterWorksCorrectly() { + OptionalIntAttributeConverter converter = OptionalIntAttributeConverter.create(); + + assertThat(transformFrom(converter, OptionalInt.empty())).isEqualTo(nullValue().toAttributeValue()); + assertThat(transformFrom(converter, OptionalInt.of(Integer.MIN_VALUE))).isEqualTo(fromNumber("-2147483648").toAttributeValue()); + assertThat(transformFrom(converter, OptionalInt.of(0))).isEqualTo(fromNumber("0").toAttributeValue()); + assertThat(transformFrom(converter, OptionalInt.of(Integer.MAX_VALUE))).isEqualTo(fromNumber("2147483647").toAttributeValue()); + + assertThat(transformTo(converter, nullValue().toAttributeValue())).isEmpty(); + assertThat(transformTo(converter, fromNumber("-2147483648"))).hasValue(Integer.MIN_VALUE); + assertThat(transformTo(converter, fromNumber("0"))).hasValue(0); + assertThat(transformTo(converter, fromNumber("2147483647"))).hasValue(Integer.MAX_VALUE); + } + + @Test + public void optionalLongConverterWorksCorrectly() { + OptionalLongAttributeConverter converter = OptionalLongAttributeConverter.create(); + + assertThat(transformFrom(converter, OptionalLong.empty())).isEqualTo(nullValue().toAttributeValue()); + assertThat(transformFrom(converter, OptionalLong.of(Long.MIN_VALUE))).isEqualTo(fromNumber("-9223372036854775808").toAttributeValue()); + assertThat(transformFrom(converter, OptionalLong.of(0))).isEqualTo(fromNumber("0").toAttributeValue()); + assertThat(transformFrom(converter, OptionalLong.of(Long.MAX_VALUE))).isEqualTo(fromNumber("9223372036854775807").toAttributeValue()); + + assertThat(transformTo(converter, nullValue().toAttributeValue())).isEmpty(); + assertThat(transformTo(converter, fromNumber("-9223372036854775808"))).hasValue(Long.MIN_VALUE); + assertThat(transformTo(converter, fromNumber("0"))).hasValue(0); + assertThat(transformTo(converter, fromNumber("9223372036854775807"))).hasValue(Long.MAX_VALUE); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/StringAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/StringAttributeConvertersTest.java new file mode 100644 index 000000000000..75c121bd0198 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/StringAttributeConvertersTest.java @@ -0,0 +1,244 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromBoolean; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromBytes; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromListOfAttributeValues; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromMap; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromNumber; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromSetOfBytes; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromSetOfNumbers; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromSetOfStrings; +import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue.fromString; + +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; +import java.time.Period; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.util.Set; +import java.util.UUID; +import org.junit.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.CharSequenceAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.CharacterArrayAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.CharacterAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.PeriodAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.SetAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringBufferAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringBuilderAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.UriAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.UrlAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.UuidAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ZoneIdAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ZoneOffsetAttributeConverter; +import software.amazon.awssdk.utils.ImmutableMap; + +public class StringAttributeConvertersTest { + @Test + public void charArrayAttributeConverterBehaves() { + CharacterArrayAttributeConverter converter = CharacterArrayAttributeConverter.create(); + + char[] emptyChars = {}; + char[] chars = {'f', 'o', 'o'}; + char[] numChars = {'4', '2'}; + + assertThat(transformFrom(converter, chars).s()).isEqualTo("foo"); + assertThat(transformFrom(converter, emptyChars).s()).isEqualTo(""); + + assertThat(transformTo(converter, fromString(""))).isEqualTo(emptyChars); + assertThat(transformTo(converter, fromString("foo"))).isEqualTo(chars); + assertThat(transformTo(converter, fromNumber("42"))).isEqualTo(numChars); + } + + @Test + public void characterAttributeConverterBehaves() { + CharacterAttributeConverter converter = CharacterAttributeConverter.create(); + + assertThat(transformFrom(converter, 'a').s()).isEqualTo("a"); + + assertFails(() -> transformTo(converter, fromString(""))); + assertFails(() -> transformTo(converter, fromString("ab"))); + + assertThat(transformTo(converter, fromString("a"))).isEqualTo('a'); + } + + @Test + public void charSequenceAttributeConverterBehaves() { + CharSequenceAttributeConverter converter = CharSequenceAttributeConverter.create(); + + CharSequence emptyChars = ""; + CharSequence chars = "foo"; + CharSequence numChars = "42"; + + assertThat(transformFrom(converter, chars).s()).isEqualTo("foo"); + assertThat(transformFrom(converter, emptyChars).s()).isEqualTo(""); + + assertThat(transformTo(converter, fromString(""))).isEqualTo(emptyChars); + assertThat(transformTo(converter, fromString("foo"))).isEqualTo(chars); + assertThat(transformTo(converter, fromNumber("42"))).isEqualTo(numChars); + } + + @Test + public void periodAttributeConverterBehaves() { + PeriodAttributeConverter converter = PeriodAttributeConverter.create(); + + assertThat(transformFrom(converter, Period.ofYears(-5)).s()).isEqualTo("P-5Y"); + assertThat(transformFrom(converter, Period.ofDays(-1)).s()).isEqualTo("P-1D"); + assertThat(transformFrom(converter, Period.ZERO).s()).isEqualTo("P0D"); + assertThat(transformFrom(converter, Period.ofDays(1)).s()).isEqualTo("P1D"); + assertThat(transformFrom(converter, Period.ofYears(5)).s()).isEqualTo("P5Y"); + + assertFails(() -> transformTo(converter, fromString(""))); + assertFails(() -> transformTo(converter, fromString("P"))); + + assertThat(transformTo(converter, fromString("P-5Y"))).isEqualTo(Period.ofYears(-5)); + assertThat(transformTo(converter, fromString("P-1D"))).isEqualTo(Period.ofDays(-1)); + assertThat(transformTo(converter, fromString("P0D"))).isEqualTo(Period.ZERO); + assertThat(transformTo(converter, fromString("P1D"))).isEqualTo(Period.ofDays(1)); + assertThat(transformTo(converter, fromString("P5Y"))).isEqualTo(Period.ofYears(5)); + } + + @Test + public void stringAttributeConverterBehaves() { + StringAttributeConverter converter = StringAttributeConverter.create(); + + String emptyChars = ""; + String chars = "foo"; + String numChars = "42"; + + assertThat(transformFrom(converter, chars).s()).isSameAs(chars); + assertThat(transformFrom(converter, emptyChars).s()).isSameAs(emptyChars); + + assertThat(transformTo(converter, fromString(emptyChars))).isSameAs(emptyChars); + assertThat(transformTo(converter, fromString(chars))).isSameAs(chars); + assertThat(transformTo(converter, fromNumber(emptyChars))).isSameAs(emptyChars); + assertThat(transformTo(converter, fromNumber(numChars))).isSameAs(numChars); + assertThat(transformTo(converter, fromBytes(SdkBytes.fromUtf8String("foo")))).isEqualTo("Zm9v"); + assertThat(transformTo(converter, fromBoolean(true))).isEqualTo("true"); + assertThat(transformTo(converter, fromBoolean(false))).isEqualTo("false"); + assertThat(transformTo(converter, fromMap(ImmutableMap.of("a", fromString("b").toAttributeValue(), + "c", fromBytes(SdkBytes.fromUtf8String("d")).toAttributeValue())))) + .isEqualTo("{a=b, c=ZA==}"); + assertThat(transformTo(converter, fromListOfAttributeValues(fromString("a").toAttributeValue(), + fromBytes(SdkBytes.fromUtf8String("d")).toAttributeValue()))) + .isEqualTo("[a, ZA==]"); + assertThat(transformTo(converter, fromSetOfStrings("a", "b"))).isEqualTo("[a, b]"); + assertThat(transformTo(converter, fromSetOfBytes(SdkBytes.fromUtf8String("a"), SdkBytes.fromUtf8String("b")))) + .isEqualTo("[YQ==,Yg==]"); + assertThat(transformTo(converter, fromSetOfNumbers("1", "2"))).isEqualTo("[1, 2]"); + } + + @Test + public void stringBuilderAttributeConverterBehaves() { + StringBuilderAttributeConverter converter = StringBuilderAttributeConverter.create(); + + assertThat(transformFrom(converter, new StringBuilder()).s()).isEqualTo(""); + assertThat(transformFrom(converter, new StringBuilder("foo")).s()).isEqualTo("foo"); + assertThat(transformFrom(converter, new StringBuilder("42")).s()).isEqualTo("42"); + + assertThat(transformTo(converter, fromString("")).toString()).isEqualTo(""); + assertThat(transformTo(converter, fromString("foo")).toString()).isEqualTo("foo"); + assertThat(transformTo(converter, fromNumber("42")).toString()).isEqualTo("42"); + } + + @Test + public void stringBufferAttributeConverterBehaves() { + StringBufferAttributeConverter converter = StringBufferAttributeConverter.create(); + + assertThat(transformFrom(converter, new StringBuffer()).s()).isEqualTo(""); + assertThat(transformFrom(converter, new StringBuffer("foo")).s()).isEqualTo("foo"); + assertThat(transformFrom(converter, new StringBuffer("42")).s()).isEqualTo("42"); + + assertThat(transformTo(converter, fromString("")).toString()).isEqualTo(""); + assertThat(transformTo(converter, fromString("foo")).toString()).isEqualTo("foo"); + assertThat(transformTo(converter, fromNumber("42")).toString()).isEqualTo("42"); + } + + @Test + public void uriAttributeConverterBehaves() { + UriAttributeConverter converter = UriAttributeConverter.create(); + + assertThat(transformFrom(converter, URI.create("http://example.com/languages/java/")).s()) + .isEqualTo("http://example.com/languages/java/"); + assertThat(transformFrom(converter, URI.create("sample/a/index.html#28")).s()) + .isEqualTo("sample/a/index.html#28"); + assertThat(transformFrom(converter, URI.create("../../demo/b/index.html")).s()) + .isEqualTo("../../demo/b/index.html"); + assertThat(transformFrom(converter, URI.create("file:///~/calendar")).s()).isEqualTo("file:///~/calendar"); + + assertThat(transformTo(converter, fromString("http://example.com/languages/java/"))) + .isEqualTo(URI.create("http://example.com/languages/java/")); + assertThat(transformTo(converter, fromString("sample/a/index.html#28"))) + .isEqualTo(URI.create("sample/a/index.html#28")); + assertThat(transformTo(converter, fromString("../../demo/b/index.html"))) + .isEqualTo(URI.create("../../demo/b/index.html")); + assertThat(transformTo(converter, fromString("file:///~/calendar"))) + .isEqualTo(URI.create("file:///~/calendar")); + } + + @Test + public void urlAttributeConverterBehaves() throws MalformedURLException { + UrlAttributeConverter converter = UrlAttributeConverter.create(); + + assertThat(transformFrom(converter, new URL("http://example.com/languages/java/")).s()) + .isEqualTo("http://example.com/languages/java/"); + assertThat(transformTo(converter, fromString("http://example.com/languages/java/"))) + .isEqualTo(new URL("http://example.com/languages/java/")); + } + + @Test + public void uuidAttributeConverterBehaves() { + UuidAttributeConverter converter = UuidAttributeConverter.create(); + UUID uuid = UUID.randomUUID(); + assertThat(transformFrom(converter, uuid).s()).isEqualTo(uuid.toString()); + assertThat(transformTo(converter, fromString(uuid.toString()))).isEqualTo(uuid); + } + + @Test + public void zoneIdAttributeConverterBehaves() { + ZoneIdAttributeConverter converter = ZoneIdAttributeConverter.create(); + assertThat(transformFrom(converter, ZoneId.of("UTC")).s()).isEqualTo("UTC"); + assertFails(() -> transformTo(converter, fromString("XXXXXX"))); + assertThat(transformTo(converter, fromString("UTC"))).isEqualTo(ZoneId.of("UTC")); + } + + @Test + public void zoneOffsetAttributeConverterBehaves() { + ZoneOffsetAttributeConverter converter = ZoneOffsetAttributeConverter.create(); + assertThat(transformFrom(converter, ZoneOffset.ofHoursMinutesSeconds(0, -1, -2)).s()).isEqualTo("-00:01:02"); + assertThat(transformFrom(converter, ZoneOffset.ofHoursMinutesSeconds(0, 1, 2)).s()).isEqualTo("+00:01:02"); + assertFails(() -> transformTo(converter, fromString("+99999:00:00"))); + assertThat(transformTo(converter, fromString("-00:01:02"))) + .isEqualTo(ZoneOffset.ofHoursMinutesSeconds(0, -1, -2)); + assertThat(transformTo(converter, fromString("+00:01:02"))) + .isEqualTo(ZoneOffset.ofHoursMinutesSeconds(0, 1, 2)); + } + + @Test + public void stringSetAttributeConverter_ReturnsSSType() { + SetAttributeConverter> converter = SetAttributeConverter.setConverter(StringAttributeConverter.create()); + assertThat(converter.attributeValueType()).isEqualTo(AttributeValueType.SS); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ZonedDateTimeAsStringAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ZonedDateTimeAsStringAttributeConverterTest.java new file mode 100644 index 000000000000..3f44c03e2824 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ZonedDateTimeAsStringAttributeConverterTest.java @@ -0,0 +1,132 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoUnit; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ZonedDateTimeAsStringAttributeConverter; + +public class ZonedDateTimeAsStringAttributeConverterTest { + + private static ZonedDateTimeAsStringAttributeConverter converter = ZonedDateTimeAsStringAttributeConverter.create(); + + private static ZonedDateTime epochUtc = Instant.EPOCH.atZone(ZoneOffset.UTC); + private static ZonedDateTime min = OffsetDateTime.MIN.toZonedDateTime(); + private static ZonedDateTime max = OffsetDateTime.MAX.toZonedDateTime(); + + @Test + public void ZonedDateTimeAsStringAttributeConverterMinTest() { + verifyTransform(min, "-999999999-01-01T00:00+18:00"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterEpochMinusOneMilliTest() { + verifyTransform(epochUtc.minusNanos(1), "1969-12-31T23:59:59.999999999Z"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterEpochTest() { + verifyTransform(epochUtc, "1970-01-01T00:00Z"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterEpochPlusOneMilliTest() { + verifyTransform(epochUtc.plusNanos(1), "1970-01-01T00:00:00.000000001Z"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterMaxTest() { + verifyTransform(max, "+999999999-12-31T23:59:59.999999999-18:00"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterTimeZoneAtParisTest() { + verifyTransform(Instant.EPOCH.atZone(ZoneId.of("Europe/Paris")), "1970-01-01T01:00+01:00[Europe/Paris]"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNormalOffsetTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) + .isEqualTo(ZonedDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.ofHours(1))); + } + + + @Test + public void ZonedDateTimeAsStringAttributeConverterExceedLowerBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterFakeZoneTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00[FakeZone]") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterAdditionallyAcceptInstantTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))).isEqualTo(epochUtc); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterAdditionallyAcceptOffsetDateTimeTest() { + // To make sure the specific zone converter is selected, here a specific Zoned converter is used. + ZonedDateTimeAsStringAttributeConverter converter = ZonedDateTimeAsStringAttributeConverter.create(); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) + .isEqualTo(epochUtc.minus(1, ChronoUnit.HOURS)); + } + + private void verifyTransform(ZonedDateTime objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ChainExtensionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ChainExtensionTest.java new file mode 100644 index 000000000000..66d0b0569613 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ChainExtensionTest.java @@ -0,0 +1,275 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.extensions; + +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.ChainExtension; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class ChainExtensionTest { + private static final String TABLE_NAME = "concrete-table-name"; + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + + private static final Map ATTRIBUTE_VALUES_1 = + Collections.unmodifiableMap(Collections.singletonMap("key1", AttributeValue.builder().s("1").build())); + private static final Map ATTRIBUTE_VALUES_2 = + Collections.unmodifiableMap(Collections.singletonMap("key2", AttributeValue.builder().s("2").build())); + private static final Map ATTRIBUTE_VALUES_3 = + Collections.unmodifiableMap(Collections.singletonMap("key3", AttributeValue.builder().s("3").build())); + + @Mock + private DynamoDbEnhancedClientExtension mockExtension1; + @Mock + private DynamoDbEnhancedClientExtension mockExtension2; + @Mock + private DynamoDbEnhancedClientExtension mockExtension3; + + private final List> fakeItems = + IntStream.range(0, 4) + .mapToObj($ -> createUniqueFakeItem()) + .map(fakeItem -> FakeItem.getTableSchema().itemToMap(fakeItem, true)) + .collect(toList()); + + @Test + public void beforeWrite_multipleExtensions_multipleConditions_multipleTransformations() { + Expression expression1 = Expression.builder().expression("one").expressionValues(ATTRIBUTE_VALUES_1).build(); + Expression expression2 = Expression.builder().expression("two").expressionValues(ATTRIBUTE_VALUES_2).build(); + Expression expression3 = Expression.builder().expression("three").expressionValues(ATTRIBUTE_VALUES_3).build(); + ChainExtension extension = ChainExtension.create(mockExtension1, mockExtension2, mockExtension3); + WriteModification writeModification1 = WriteModification.builder() + .additionalConditionalExpression(expression1) + .transformedItem(fakeItems.get(1)) + .build(); + WriteModification writeModification2 = WriteModification.builder() + .additionalConditionalExpression(expression2) + .transformedItem(fakeItems.get(2)) + .build(); + WriteModification writeModification3 = WriteModification.builder() + .additionalConditionalExpression(expression3) + .transformedItem(fakeItems.get(3)) + .build(); + when(mockExtension1.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification1); + when(mockExtension2.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification2); + when(mockExtension3.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification3); + + WriteModification result = extension.beforeWrite(getExtensionContext(0)); + + Map combinedMap = new HashMap<>(ATTRIBUTE_VALUES_1); + combinedMap.putAll(ATTRIBUTE_VALUES_2); + combinedMap.putAll(ATTRIBUTE_VALUES_3); + Expression expectedExpression = + Expression.builder().expression("((one) AND (two)) AND (three)").expressionValues(combinedMap).build(); + assertThat(result.transformedItem(), is(fakeItems.get(3))); + assertThat(result.additionalConditionalExpression(), is(expectedExpression)); + + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2, mockExtension3); + inOrder.verify(mockExtension1).beforeWrite( + getExtensionContext(0)); + inOrder.verify(mockExtension2).beforeWrite(getExtensionContext(1)); + inOrder.verify(mockExtension3).beforeWrite(getExtensionContext(2)); + inOrder.verifyNoMoreInteractions(); + } + + @Test + public void beforeWrite_multipleExtensions_doingNothing() { + ChainExtension extension = ChainExtension.create(mockExtension1, mockExtension2, mockExtension3); + when(mockExtension1.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(WriteModification.builder().build()); + when(mockExtension2.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(WriteModification.builder().build()); + when(mockExtension3.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(WriteModification.builder().build()); + + WriteModification result = extension.beforeWrite(getExtensionContext(0)); + + assertThat(result.additionalConditionalExpression(), is(nullValue())); + assertThat(result.transformedItem(), is(nullValue())); + + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2, mockExtension3); + inOrder.verify(mockExtension1).beforeWrite( + getExtensionContext(0)); + inOrder.verify(mockExtension2).beforeWrite(getExtensionContext(0)); + inOrder.verify(mockExtension3).beforeWrite(getExtensionContext(0)); + inOrder.verifyNoMoreInteractions(); + } + + private DefaultDynamoDbExtensionContext getExtensionContext(int i) { + return DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT) + .items(fakeItems.get(i)).build(); + } + + @Test + public void beforeWrite_multipleExtensions_singleCondition_noTransformations() { + Expression expression = Expression.builder().expression("one").expressionValues(ATTRIBUTE_VALUES_1).build(); + ChainExtension extension = ChainExtension.create(mockExtension1, mockExtension2, mockExtension3); + WriteModification writeModification1 = WriteModification.builder().build(); + WriteModification writeModification2 = WriteModification.builder() + .additionalConditionalExpression(expression) + .build(); + WriteModification writeModification3 = WriteModification.builder().build(); + when(mockExtension1.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification1); + when(mockExtension2.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification2); + when(mockExtension3.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification3); + + WriteModification result = extension.beforeWrite(getExtensionContext(0)); + + Expression expectedExpression = Expression.builder() + .expression("one") + .expressionValues(ATTRIBUTE_VALUES_1) + .build(); + assertThat(result.transformedItem(), is(nullValue())); + assertThat(result.additionalConditionalExpression(), is(expectedExpression)); + + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2, mockExtension3); + inOrder.verify(mockExtension1).beforeWrite( + getExtensionContext(0)); + inOrder.verify(mockExtension2).beforeWrite(getExtensionContext(0)); + inOrder.verify(mockExtension3).beforeWrite(getExtensionContext(0)); + inOrder.verifyNoMoreInteractions(); + } + + @Test + public void beforeWrite_multipleExtensions_noConditions_singleTransformation() { + ChainExtension extension = ChainExtension.create(mockExtension1, mockExtension2, mockExtension3); + WriteModification writeModification1 = WriteModification.builder().build(); + WriteModification writeModification2 = WriteModification.builder() + .transformedItem(fakeItems.get(1)) + .build(); + WriteModification writeModification3 = WriteModification.builder().build(); + when(mockExtension1.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification1); + when(mockExtension2.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification2); + when(mockExtension3.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(writeModification3); + + WriteModification result = extension.beforeWrite(getExtensionContext(0)); + + assertThat(result.transformedItem(), is(fakeItems.get(1))); + assertThat(result.additionalConditionalExpression(), is(nullValue())); + + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2, mockExtension3); + inOrder.verify(mockExtension1).beforeWrite( + getExtensionContext(0)); + inOrder.verify(mockExtension2).beforeWrite(getExtensionContext(0)); + inOrder.verify(mockExtension3).beforeWrite(getExtensionContext(1)); + inOrder.verifyNoMoreInteractions(); + } + + @Test + public void beforeWrite_noExtensions() { + ChainExtension extension = ChainExtension.create(); + + WriteModification result = extension.beforeWrite(getExtensionContext(0)); + + assertThat(result.transformedItem(), is(nullValue())); + assertThat(result.additionalConditionalExpression(), is(nullValue())); + } + + @Test + public void afterRead_multipleExtensions_multipleTransformations() { + ChainExtension extension = ChainExtension.create(mockExtension1, mockExtension2, mockExtension3); + ReadModification readModification1 = ReadModification.builder().transformedItem(fakeItems.get(1)).build(); + ReadModification readModification2 = ReadModification.builder().transformedItem(fakeItems.get(2)).build(); + ReadModification readModification3 = ReadModification.builder().transformedItem(fakeItems.get(3)).build(); + when(mockExtension1.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification1); + when(mockExtension2.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification2); + when(mockExtension3.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification3); + + ReadModification result = extension.afterRead(getExtensionContext(0)); + + assertThat(result.transformedItem(), is(fakeItems.get(1))); + + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2, mockExtension3); + inOrder.verify(mockExtension3).afterRead(getExtensionContext(0)); + inOrder.verify(mockExtension2).afterRead(getExtensionContext(3)); + inOrder.verify(mockExtension1).afterRead(getExtensionContext(2)); + inOrder.verifyNoMoreInteractions(); + } + + @Test + public void afterRead_multipleExtensions_singleTransformation() { + ChainExtension extension = ChainExtension.create(mockExtension1, mockExtension2, mockExtension3); + ReadModification readModification1 = ReadModification.builder().build(); + ReadModification readModification2 = ReadModification.builder().transformedItem(fakeItems.get(1)).build(); + ReadModification readModification3 = ReadModification.builder().build(); + when(mockExtension1.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification1); + when(mockExtension2.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification2); + when(mockExtension3.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification3); + + ReadModification result = extension.afterRead(getExtensionContext(0)); + + assertThat(result.transformedItem(), is(fakeItems.get(1))); + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2, mockExtension3); + inOrder.verify(mockExtension3).afterRead(getExtensionContext(0)); + inOrder.verify(mockExtension2).afterRead(getExtensionContext(0)); + inOrder.verify(mockExtension1).afterRead(getExtensionContext(1)); + inOrder.verifyNoMoreInteractions(); + } + + @Test + public void afterRead_multipleExtensions_noTransformations() { + ChainExtension extension = ChainExtension.create(mockExtension1, mockExtension2, mockExtension3); + ReadModification readModification1 = ReadModification.builder().build(); + ReadModification readModification2 = ReadModification.builder().build(); + ReadModification readModification3 = ReadModification.builder().build(); + when(mockExtension1.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification1); + when(mockExtension2.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification2); + when(mockExtension3.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(readModification3); + + ReadModification result = extension.afterRead(getExtensionContext(0)); + + assertThat(result.transformedItem(), is(nullValue())); + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2, mockExtension3); + inOrder.verify(mockExtension3).afterRead(getExtensionContext(0)); + inOrder.verify(mockExtension2).afterRead(getExtensionContext(0)); + inOrder.verify(mockExtension1).afterRead(getExtensionContext(0)); + inOrder.verifyNoMoreInteractions(); + } + + @Test + public void afterRead_noExtensions() { + ChainExtension extension = ChainExtension.create(); + + ReadModification result = extension.afterRead(getExtensionContext(0)); + + assertThat(result.transformedItem(), is(nullValue())); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java new file mode 100644 index 000000000000..b12ae9a8a18c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java @@ -0,0 +1,180 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.extensions; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; + +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class VersionedRecordExtensionTest { + private static final String TABLE_NAME = "table-name"; + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + + private final VersionedRecordExtension versionedRecordExtension = VersionedRecordExtension.builder().build(); + + @Test + public void beforeRead_doesNotTransformObject() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + + ReadModification result = + versionedRecordExtension.afterRead(DefaultDynamoDbExtensionContext + .builder() + .items(fakeItemMap) + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT).build()); + + assertThat(result, is(ReadModification.builder().build())); + } + + @Test + public void beforeWrite_initialVersion_expressionIsCorrect() { + FakeItem fakeItem = createUniqueFakeItem(); + + WriteModification result = + versionedRecordExtension.beforeWrite( + DefaultDynamoDbExtensionContext + .builder() + .items(FakeItem.getTableSchema().itemToMap(fakeItem, true)) + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT).build()); + + assertThat(result.additionalConditionalExpression(), + is(Expression.builder().expression("attribute_not_exists(version)").build())); + } + + @Test + public void beforeWrite_initialVersion_transformedItemIsCorrect() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemWithInitialVersion = + new HashMap<>(FakeItem.getTableSchema().itemToMap(fakeItem, true)); + fakeItemWithInitialVersion.put("version", AttributeValue.builder().n("1").build()); + + WriteModification result = + versionedRecordExtension.beforeWrite(DefaultDynamoDbExtensionContext + .builder() + .items(FakeItem.getTableSchema().itemToMap(fakeItem, true)) + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT).build()); + + + assertThat(result.transformedItem(), is(fakeItemWithInitialVersion)); + } + + @Test + public void beforeWrite_initialVersionDueToExplicitNull_transformedItemIsCorrect() { + FakeItem fakeItem = createUniqueFakeItem(); + Map inputMap = + new HashMap<>(FakeItem.getTableSchema().itemToMap(fakeItem, true)); + inputMap.put("version", AttributeValue.builder().nul(true).build()); + Map fakeItemWithInitialVersion = + new HashMap<>(FakeItem.getTableSchema().itemToMap(fakeItem, true)); + fakeItemWithInitialVersion.put("version", AttributeValue.builder().n("1").build()); + + WriteModification result = + versionedRecordExtension.beforeWrite(DefaultDynamoDbExtensionContext + .builder() + .items(inputMap) + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT).build()); + + assertThat(result.transformedItem(), is(fakeItemWithInitialVersion)); + } + + @Test + public void beforeWrite_existingVersion_expressionIsCorrect() { + FakeItem fakeItem = createUniqueFakeItem(); + fakeItem.setVersion(13); + + WriteModification result = + versionedRecordExtension.beforeWrite(DefaultDynamoDbExtensionContext + .builder() + .items(FakeItem.getTableSchema().itemToMap(fakeItem, true)) + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT).build()); + + assertThat(result.additionalConditionalExpression(), + is(Expression.builder() + .expression("version = :old_version_value") + .expressionValues(singletonMap(":old_version_value", + AttributeValue.builder().n("13").build())) + .build())); + } + + @Test + public void beforeWrite_existingVersion_transformedItemIsCorrect() { + FakeItem fakeItem = createUniqueFakeItem(); + fakeItem.setVersion(13); + Map fakeItemWithInitialVersion = + new HashMap<>(FakeItem.getTableSchema().itemToMap(fakeItem, true)); + fakeItemWithInitialVersion.put("version", AttributeValue.builder().n("14").build()); + + WriteModification result = + versionedRecordExtension.beforeWrite(DefaultDynamoDbExtensionContext + .builder() + .items(FakeItem.getTableSchema().itemToMap(fakeItem, true)) + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT).build()); + + + assertThat(result.transformedItem(), is(fakeItemWithInitialVersion)); + } + + @Test + public void beforeWrite_returnsNoOpModification_ifVersionAttributeNotDefined() { + FakeItemWithSort fakeItemWithSort = createUniqueFakeItemWithSort(); + Map itemMap = + new HashMap<>(FakeItemWithSort.getTableSchema().itemToMap(fakeItemWithSort, true)); + + WriteModification writeModification = versionedRecordExtension.beforeWrite( DefaultDynamoDbExtensionContext.builder() + .items(itemMap) + .operationContext(PRIMARY_CONTEXT) + .tableMetadata(FakeItemWithSort.getTableMetadata()) + .build()); + + assertThat(writeModification, is(WriteModification.builder().build())); + } + + @Test(expected = IllegalArgumentException.class) + public void beforeWrite_throwsIllegalArgumentException_ifVersionAttributeIsWrongType() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemWIthBadVersion = + new HashMap<>(FakeItem.getTableSchema().itemToMap(fakeItem, true)); + fakeItemWIthBadVersion.put("version", AttributeValue.builder().s("14").build()); + + versionedRecordExtension.beforeWrite( + DefaultDynamoDbExtensionContext.builder() + .items(fakeItemWIthBadVersion) + .operationContext(PRIMARY_CONTEXT) + .tableMetadata(FakeItem.getTableMetadata()) + .build()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AnnotatedImmutableTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AnnotatedImmutableTableSchemaTest.java new file mode 100644 index 000000000000..998f13998280 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AnnotatedImmutableTableSchemaTest.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.After; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.ImmutableFakeItem; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; + +public class AnnotatedImmutableTableSchemaTest extends LocalDynamoDbSyncTestBase { + private static final String TABLE_NAME = "table-name"; + + private final DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName(TABLE_NAME)) + .build()); + } + + @Test + public void simpleItem_putAndGet() { + TableSchema tableSchema = + TableSchema.fromClass(ImmutableFakeItem.class); + + DynamoDbTable mappedTable = + enhancedClient.table(getConcreteTableName(TABLE_NAME), tableSchema); + + mappedTable.createTable(r -> r.provisionedThroughput(ProvisionedThroughput.builder() + .readCapacityUnits(5L) + .writeCapacityUnits(5L) + .build())); + ImmutableFakeItem immutableFakeItem = ImmutableFakeItem.builder() + .id("id123") + .attribute("test-value") + .build(); + + mappedTable.putItem(immutableFakeItem); + ImmutableFakeItem readItem = mappedTable.getItem(immutableFakeItem); + assertThat(readItem).isEqualTo(immutableFakeItem); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicCrudTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicCrudTest.java new file mode 100644 index 000000000000..7901f42fe594 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicCrudTest.java @@ -0,0 +1,680 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondarySortKey; + +import java.util.Objects; +import java.util.concurrent.CompletionException; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; + +public class AsyncBasicCrudTest extends LocalDynamoDbAsyncTestBase { + private static class Record { + private String id; + private String sort; + private String attribute; + private String attribute2; + private String attribute3; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private String getSort() { + return sort; + } + + private Record setSort(String sort) { + this.sort = sort; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + private String getAttribute2() { + return attribute2; + } + + private Record setAttribute2(String attribute2) { + this.attribute2 = attribute2; + return this; + } + + private String getAttribute3() { + return attribute3; + } + + private Record setAttribute3(String attribute3) { + this.attribute3 = attribute3; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort) && + Objects.equals(attribute, record.attribute) && + Objects.equals(attribute2, record.attribute2) && + Objects.equals(attribute3, record.attribute3); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, attribute, attribute2, attribute3); + } + } + + private static class ShortRecord { + private String id; + private String sort; + private String attribute; + + private String getId() { + return id; + } + + private ShortRecord setId(String id) { + this.id = id; + return this; + } + + private String getSort() { + return sort; + } + + private ShortRecord setSort(String sort) { + this.sort = sort; + return this; + } + + private String getAttribute() { + return attribute; + } + + private ShortRecord setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShortRecord that = (ShortRecord) o; + return Objects.equals(id, that.id) && + Objects.equals(sort, that.sort) && + Objects.equals(attribute, that.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, attribute); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record::getAttribute) + .setter(Record::setAttribute)) + .addAttribute(String.class, a -> a.name("attribute2*") + .getter(Record::getAttribute2) + .setter(Record::setAttribute2) + .tags(secondaryPartitionKey("gsi_1"))) + .addAttribute(String.class, a -> a.name("attribute3") + .getter(Record::getAttribute3) + .setter(Record::setAttribute3) + .tags(secondarySortKey("gsi_1"))) + .build(); + + private static final TableSchema SHORT_TABLE_SCHEMA = + StaticTableSchema.builder(ShortRecord.class) + .newItemSupplier(ShortRecord::new) + .addAttribute(String.class, a -> a.name("id") + .getter(ShortRecord::getId) + .setter(ShortRecord::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("sort") + .getter(ShortRecord::getSort) + .setter(ShortRecord::setSort) + .tags(primarySortKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(ShortRecord::getAttribute) + .setter(ShortRecord::setAttribute)) + .build(); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable = enhancedAsyncClient.table(getConcreteTableName("table-name"), + TABLE_SCHEMA); + private DynamoDbAsyncTable mappedShortTable = enhancedAsyncClient.table(getConcreteTableName("table-name"), + SHORT_TABLE_SCHEMA); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void createTable() { + + mappedTable.createTable( + r -> r.provisionedThroughput(getDefaultProvisionedThroughput()) + .globalSecondaryIndices( + EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_1") + .projection(p -> p.projectionType(ProjectionType.ALL)) + .provisionedThroughput(getDefaultProvisionedThroughput()) + .build())) + .join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()) + .join(); + } + + @Test + public void putThenGetItemUsingKey() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))).join(); + + assertThat(result, is(record)); + } + + @Test + public void putThenGetItemUsingKeyItem() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + + Record keyItem = new Record(); + keyItem.setId("id-value"); + keyItem.setSort("sort-value"); + + Record result = mappedTable.getItem(keyItem).join(); + + assertThat(result, is(record)); + } + + @Test + public void getNonExistentItem() { + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))).join(); + assertThat(result, is(nullValue())); + } + + @Test + public void putTwiceThenGetItem() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + Record record2 = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four") + .setAttribute2("five") + .setAttribute3("six"); + + mappedTable.putItem(r -> r.item(record2)).join(); + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))).join(); + + assertThat(result, is(record2)); + } + + @Test + public void putThenDeleteItem_usingShortcutForm() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(record).join(); + Record beforeDeleteResult = + mappedTable.deleteItem(Key.builder().partitionValue("id-value").sortValue("sort-value").build()).join(); + Record afterDeleteResult = + mappedTable.getItem(Key.builder().partitionValue("id-value").sortValue("sort-value").build()).join(); + + assertThat(beforeDeleteResult, is(record)); + assertThat(afterDeleteResult, is(nullValue())); + } + + @Test + public void putThenDeleteItem_usingKeyItemForm() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(record).join(); + Record beforeDeleteResult = + mappedTable.deleteItem(record).join(); + Record afterDeleteResult = + mappedTable.getItem(Key.builder().partitionValue("id-value").sortValue("sort-value").build()).join(); + + assertThat(beforeDeleteResult, is(record)); + assertThat(afterDeleteResult, is(nullValue())); + } + + @Test + public void putWithConditionThatSucceeds() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + record.setAttribute("four"); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + mappedTable.putItem(PutItemEnhancedRequest.builder(Record.class) + .item(record) + .conditionExpression(conditionExpression) + .build()).join(); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))).join(); + assertThat(result, is(record)); + } + + @Test + public void putWithConditionThatFails() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + record.setAttribute("four"); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("wrong")) + .build(); + + exception.expect(CompletionException.class); + exception.expectCause(instanceOf(ConditionalCheckFailedException.class)); + mappedTable.putItem(PutItemEnhancedRequest.builder(Record.class) + .item(record) + .conditionExpression(conditionExpression) + .build()) + .join(); + } + + @Test + public void deleteNonExistentItem() { + Record result = mappedTable.deleteItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))).join(); + assertThat(result, is(nullValue())); + } + + @Test + public void deleteWithConditionThatSucceeds() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + Key key = mappedTable.keyFrom(record); + mappedTable.deleteItem(DeleteItemEnhancedRequest.builder() + .key(key) + .conditionExpression(conditionExpression) + .build()) + .join(); + + Record result = mappedTable.getItem(r -> r.key(key)).join(); + assertThat(result, is(nullValue())); + } + + @Test + public void deleteWithConditionThatFails() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("wrong")) + .build(); + + exception.expect(CompletionException.class); + exception.expectCause(instanceOf(ConditionalCheckFailedException.class)); + mappedTable.deleteItem(DeleteItemEnhancedRequest.builder().key(mappedTable.keyFrom(record)) + .conditionExpression(conditionExpression) + .build()).join(); + } + + @Test + public void updateOverwriteCompleteRecord_usingShortcutForm() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(record).join(); + Record record2 = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four") + .setAttribute2("five") + .setAttribute3("six"); + Record result = mappedTable.updateItem(record2).join(); + + assertThat(result, is(record2)); + } + + @Test + public void updateCreatePartialRecord() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one"); + + Record result = mappedTable.updateItem(r -> r.item(record)).join(); + + assertThat(result, is(record)); + } + + @Test + public void updateCreateKeyOnlyRecord() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value"); + + Record result = mappedTable.updateItem(r -> r.item(record)).join(); + assertThat(result, is(record)); + } + + @Test + public void updateOverwriteModelledNulls() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + Record record2 = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four"); + Record result = mappedTable.updateItem(r -> r.item(record2)).join(); + + assertThat(result, is(record2)); + } + + @Test + public void updateCanIgnoreNullsAndDoPartialUpdate() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + Record record2 = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four"); + Record result = mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(record2) + .ignoreNulls(true) + .build()) + .join(); + + Record expectedResult = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four") + .setAttribute2("two") + .setAttribute3("three"); + assertThat(result, is(expectedResult)); + } + + @Test + public void updateShortRecordDoesPartialUpdate() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + ShortRecord record2 = new ShortRecord() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four"); + ShortRecord shortResult = mappedShortTable.updateItem(r -> r.item(record2)).join(); + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue(record.getId()) + .sortValue(record.getSort()))).join(); + + Record expectedResult = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four") + .setAttribute2("two") + .setAttribute3("three"); + assertThat(result, is(expectedResult)); + assertThat(shortResult, is(record2)); + } + + @Test + public void updateKeyOnlyExistingRecordDoesNothing() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + Record updateRecord = new Record().setId("id-value").setSort("sort-value"); + + Record result = mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(updateRecord) + .ignoreNulls(true) + .build()) + .join(); + + assertThat(result, is(record)); + } + + @Test + public void updateWithConditionThatSucceeds() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + record.setAttribute("four"); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(record) + .conditionExpression(conditionExpression) + .build()) + .join(); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))).join(); + assertThat(result, is(record)); + } + + @Test + public void updateWithConditionThatFails() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)).join(); + record.setAttribute("four"); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("wrong")) + .build(); + + exception.expect(CompletionException.class); + exception.expectCause(instanceOf(ConditionalCheckFailedException.class)); + mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(record) + .conditionExpression(conditionExpression) + .build()) + .join(); + } + + @Test + public void getAShortRecordWithNewModelledFields() { + ShortRecord shortRecord = new ShortRecord() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one"); + mappedShortTable.putItem(r -> r.item(shortRecord)).join(); + Record expectedRecord = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one"); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))).join(); + assertThat(result, is(expectedRecord)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicQueryTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicQueryTest.java new file mode 100644 index 000000000000..b7fe4c041801 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicQueryTest.java @@ -0,0 +1,317 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional.keyEqualTo; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class AsyncBasicQueryTest extends LocalDynamoDbAsyncTestBase { + private static class Record { + private String id; + private Integer sort; + private Integer value; + + public String getId() { + return id; + } + + public Record setId(String id) { + this.id = id; + return this; + } + + public Integer getSort() { + return sort; + } + + public Record setSort(Integer sort) { + this.sort = sort; + return this; + } + + public Integer getValue() { + return value; + } + + public Record setValue(Integer value) { + this.value = value; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort) && + Objects.equals(value, record.value); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, value); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .addAttribute(Integer.class, a -> a.name("value") + .getter(Record::getValue) + .setter(Record::setValue)) + .build(); + + private static final List RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> new Record().setId("id-value").setSort(i).setValue(i)) + .collect(Collectors.toList()); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable = enhancedAsyncClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + + private void insertRecords() { + RECORDS.forEach(record -> mappedTable.putItem(r -> r.item(record)).join()); + } + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()) + .join(); + } + + @Test + public void queryAllRecordsDefaultSettings_usingShortcutForm() { + insertRecords(); + + SdkPublisher> publisher = + mappedTable.query(keyEqualTo(k -> k.partitionValue("id-value"))); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(RECORDS)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryAllRecordsWithFilter() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#value >= :min_value AND #value <= :max_value") + .expressionValues(expressionValues) + .expressionNames(Collections.singletonMap("#value", "value")) + .build(); + + SdkPublisher> publisher = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .filterExpression(expression) + .build()); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryAllRecordsWithFilter_viaItems() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#value >= :min_value AND #value <= :max_value") + .expressionValues(expressionValues) + .expressionNames(Collections.singletonMap("#value", "value")) + .build(); + + SdkPublisher publisher = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .filterExpression(expression) + .build()).items(); + + List results = drainPublisher(publisher, 3); + + assertThat(results, + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + } + + @Test + public void queryBetween() { + insertRecords(); + Key fromKey = Key.builder().partitionValue("id-value").sortValue(3).build(); + Key toKey = Key.builder().partitionValue("id-value").sortValue(5).build(); + SdkPublisher> publisher = mappedTable.query(r -> r.queryConditional(QueryConditional.sortBetween(fromKey, toKey))); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryBetween_viaItems() { + insertRecords(); + Key fromKey = Key.builder().partitionValue("id-value").sortValue(3).build(); + Key toKey = Key.builder().partitionValue("id-value").sortValue(5).build(); + SdkPublisher publisher = mappedTable.query(r -> r.queryConditional(QueryConditional.sortBetween(fromKey, toKey))).items(); + + List results = drainPublisher(publisher, 3); + + assertThat(results, + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + } + + @Test + public void queryLimit() { + insertRecords(); + SdkPublisher> publisher = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .limit(5) + .build()); + + List> results = drainPublisher(publisher, 3); + Page page1 = results.get(0); + Page page2 = results.get(1); + Page page3 = results.get(2); + + Map expectedLastEvaluatedKey1 = new HashMap<>(); + expectedLastEvaluatedKey1.put("id", stringValue("id-value")); + expectedLastEvaluatedKey1.put("sort", numberValue(4)); + Map expectedLastEvaluatedKey2 = new HashMap<>(); + expectedLastEvaluatedKey2.put("id", stringValue("id-value")); + expectedLastEvaluatedKey2.put("sort", numberValue(9)); + assertThat(page1.items(), is(RECORDS.subList(0, 5))); + assertThat(page1.lastEvaluatedKey(), is(expectedLastEvaluatedKey1)); + assertThat(page2.items(), is(RECORDS.subList(5, 10))); + assertThat(page2.lastEvaluatedKey(), is(expectedLastEvaluatedKey2)); + assertThat(page3.items(), is(empty())); + assertThat(page3.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryLimit_viaItems() { + insertRecords(); + SdkPublisher publisher = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .limit(5) + .build()) + .items(); + + List results = drainPublisher(publisher, 10); + assertThat(results, is(RECORDS)); + } + + @Test + public void queryEmpty() { + SdkPublisher> publisher = + mappedTable.query(r -> r.queryConditional(keyEqualTo(k -> k.partitionValue("id-value")))); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(empty())); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryEmpty_viaItems() { + SdkPublisher publisher = + mappedTable.query(r -> r.queryConditional(keyEqualTo(k -> k.partitionValue("id-value")))).items(); + + List results = drainPublisher(publisher, 0); + assertThat(results, is(empty())); + } + + @Test + public void queryExclusiveStartKey() { + Map exclusiveStartKey = new HashMap<>(); + exclusiveStartKey.put("id", stringValue("id-value")); + exclusiveStartKey.put("sort", numberValue(7)); + insertRecords(); + SdkPublisher> publisher = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .exclusiveStartKey(exclusiveStartKey) + .build()); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + assertThat(page.items(), is(RECORDS.subList(8, 10))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicScanTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicScanTest.java new file mode 100644 index 000000000000..e69ff876e4ad --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBasicScanTest.java @@ -0,0 +1,270 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class AsyncBasicScanTest extends LocalDynamoDbAsyncTestBase { + private static class Record { + private String id; + private Integer sort; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private Integer getSort() { + return sort; + } + + private Record setSort(Integer sort) { + this.sort = sort; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .build(); + + private static final List RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> new Record().setId("id-value").setSort(i)) + .collect(Collectors.toList()); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable = enhancedAsyncClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + + private void insertRecords() { + RECORDS.forEach(record -> mappedTable.putItem(r -> r.item(record)).join()); + } + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()).join(); + } + + @Test + public void scanAllRecordsDefaultSettings() { + insertRecords(); + + SdkPublisher> publisher = mappedTable.scan(ScanEnhancedRequest.builder().build()); + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(RECORDS)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanAllRecordsDefaultSettings_viaItems() { + insertRecords(); + + SdkPublisher publisher = mappedTable.scan(ScanEnhancedRequest.builder().build()).items(); + List results = drainPublisher(publisher, 10); + + assertThat(results, is(RECORDS)); + } + + @Test + public void scanAllRecordsWithFilter() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("sort >= :min_value AND sort <= :max_value") + .expressionValues(expressionValues) + .build(); + + SdkPublisher> publisher = + mappedTable.scan(ScanEnhancedRequest.builder().filterExpression(expression).build()); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanAllRecordsWithFilter_viaItems() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("sort >= :min_value AND sort <= :max_value") + .expressionValues(expressionValues) + .build(); + + SdkPublisher publisher = + mappedTable.scan(ScanEnhancedRequest.builder().filterExpression(expression).build()).items(); + + List results = drainPublisher(publisher, 3); + + assertThat(results, + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + } + + @Test + public void scanLimit() { + insertRecords(); + + SdkPublisher> publisher = mappedTable.scan(r -> r.limit(5)); + publisher.subscribe(page -> page.items().forEach(item -> System.out.println(item))); + + List> results = drainPublisher(publisher, 3); + + Page page1 = results.get(0); + Page page2 = results.get(1); + Page page3 = results.get(2); + + assertThat(page1.items(), is(RECORDS.subList(0, 5))); + assertThat(page1.lastEvaluatedKey(), is(getKeyMap(4))); + assertThat(page2.items(), is(RECORDS.subList(5, 10))); + assertThat(page2.lastEvaluatedKey(), is(getKeyMap(9))); + assertThat(page3.items(), is(empty())); + assertThat(page3.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanLimit_viaItems() { + insertRecords(); + + SdkPublisher publisher = mappedTable.scan(r -> r.limit(5)).items(); + List results = drainPublisher(publisher, 10); + assertThat(results, is(RECORDS)); + } + + @Test + public void scanEmpty() { + SdkPublisher> publisher = mappedTable.scan(); + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(empty())); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanEmpty_viaItems() { + SdkPublisher publisher = mappedTable.scan().items(); + List results = drainPublisher(publisher, 0); + + assertThat(results, is(empty())); + } + + @Test + public void scanExclusiveStartKey() { + insertRecords(); + SdkPublisher> publisher = + mappedTable.scan(ScanEnhancedRequest.builder().exclusiveStartKey(getKeyMap(7)).build()); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(RECORDS.subList(8, 10))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanExclusiveStartKey_viaItems() { + insertRecords(); + SdkPublisher publisher = + mappedTable.scan(ScanEnhancedRequest.builder().exclusiveStartKey(getKeyMap(7)).build()).items(); + + List results = drainPublisher(publisher, 2); + + assertThat(results, is(RECORDS.subList(8, 10))); + } + + private Map getKeyMap(int sort) { + Map result = new HashMap<>(); + result.put("id", stringValue("id-value")); + result.put("sort", numberValue(sort)); + return Collections.unmodifiableMap(result); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBatchGetItemTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBatchGetItemTest.java new file mode 100644 index 000000000000..3c90d04562da --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBatchGetItemTest.java @@ -0,0 +1,267 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPage; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPagePublisher; +import software.amazon.awssdk.enhanced.dynamodb.model.ReadBatch; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class AsyncBatchGetItemTest extends LocalDynamoDbAsyncTestBase { + private static class Record1 { + private Integer id; + + private Integer getId() { + return id; + } + + private Record1 setId(Integer id) { + this.id = id; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record1 record1 = (Record1) o; + return Objects.equals(id, record1.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } + + private static class Record2 { + private Integer id; + + private Integer getId() { + return id; + } + + private Record2 setId(Integer id) { + this.id = id; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record2 record2 = (Record2) o; + return Objects.equals(id, record2.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } + + private static final TableSchema TABLE_SCHEMA_1 = + StaticTableSchema.builder(Record1.class) + .newItemSupplier(Record1::new) + .addAttribute(Integer.class, a -> a.name("id_1") + .getter(Record1::getId) + .setter(Record1::setId) + .tags(primaryPartitionKey())) + .build(); + + private static final TableSchema TABLE_SCHEMA_2 = + StaticTableSchema.builder(Record2.class) + .newItemSupplier(Record2::new) + .addAttribute(Integer.class, a -> a.name("id_2") + .getter(Record2::getId) + .setter(Record2::setId) + .tags(primaryPartitionKey())) + .build(); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable1 = enhancedAsyncClient.table(getConcreteTableName("table-name-1"), + TABLE_SCHEMA_1); + private DynamoDbAsyncTable mappedTable2 = enhancedAsyncClient.table(getConcreteTableName("table-name-2"), + TABLE_SCHEMA_2); + + private static final List RECORDS_1 = + IntStream.range(0, 2) + .mapToObj(i -> new Record1().setId(i)) + .collect(Collectors.toList()); + + private static final List RECORDS_2 = + IntStream.range(0, 2) + .mapToObj(i -> new Record2().setId(i)) + .collect(Collectors.toList()); + + @Before + public void createTable() { + mappedTable1.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + mappedTable2.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-1")) + .build()).join(); + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-2")) + .build()).join(); + } + + private void insertRecords() { + RECORDS_1.forEach(record -> mappedTable1.putItem(r -> r.item(record)).join()); + RECORDS_2.forEach(record -> mappedTable2.putItem(r -> r.item(record)).join()); + } + + @Test + public void getRecordsFromMultipleTables() { + insertRecords(); + + SdkPublisher publisher = batchGetResultPageSdkPublisherForBothTables(); + + List results = drainPublisher(publisher, 1); + assertThat(results.size(), is(1)); + BatchGetResultPage page = results.get(0); + List record1List = page.resultsForTable(mappedTable1); + assertThat(record1List.size(), is(2)); + assertThat(record1List, containsInAnyOrder(RECORDS_1.get(0), RECORDS_1.get(1))); + + List record2List = page.resultsForTable(mappedTable2); + assertThat(record2List.size(), is(2)); + assertThat(record2List, containsInAnyOrder(RECORDS_2.get(0), RECORDS_2.get(1))); + } + + @Test + public void getRecordsFromMultipleTables_viaFlattenedItems() { + insertRecords(); + + BatchGetResultPagePublisher publisher = batchGetResultPageSdkPublisherForBothTables(); + + List table1Results = drainPublisher(publisher.resultsForTable(mappedTable1), 2); + assertThat(table1Results.size(), is(2)); + assertThat(table1Results, containsInAnyOrder(RECORDS_1.toArray())); + + List table2Results = drainPublisher(publisher.resultsForTable(mappedTable2), 2); + assertThat(table1Results.size(), is(2)); + assertThat(table2Results, containsInAnyOrder(RECORDS_2.toArray())); + } + + @Test + public void notFoundRecordReturnsNull() { + insertRecords(); + + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = requestWithNotFoundRecord(); + + SdkPublisher publisher = enhancedAsyncClient.batchGetItem(batchGetItemEnhancedRequest); + + List results = drainPublisher(publisher, 1); + assertThat(results.size(), is(1)); + + BatchGetResultPage page = results.get(0); + List record1List = page.resultsForTable(mappedTable1); + assertThat(record1List.size(), is(1)); + assertThat(record1List.get(0).getId(), is(0)); + + List record2List = page.resultsForTable(mappedTable2); + assertThat(record2List.size(), is(2)); + assertThat(record2List, containsInAnyOrder(RECORDS_2.get(0), RECORDS_2.get(1))); + } + + @Test + public void notFoundRecordReturnsNull_viaFlattenedItems() { + insertRecords(); + + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = requestWithNotFoundRecord(); + + BatchGetResultPagePublisher publisher = enhancedAsyncClient.batchGetItem(batchGetItemEnhancedRequest); + + List resultsForTable1 = drainPublisher(publisher.resultsForTable(mappedTable1), 1); + assertThat(resultsForTable1.size(), is(1)); + assertThat(resultsForTable1.get(0).getId(), is(0)); + + List record2List = drainPublisher(publisher.resultsForTable(mappedTable2), 2); + assertThat(record2List.size(), is(2)); + assertThat(record2List, containsInAnyOrder(RECORDS_2.toArray())); + } + + private BatchGetItemEnhancedRequest requestWithNotFoundRecord() { + return BatchGetItemEnhancedRequest.builder() + .readBatches( + ReadBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addGetItem(r -> r.key(k -> k.partitionValue(0))) + .build(), + ReadBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addGetItem(r -> r.key(k -> k.partitionValue(0))) + .build(), + ReadBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addGetItem(r -> r.key(k -> k.partitionValue(1))) + .build(), + ReadBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addGetItem(r -> r.key(k -> k.partitionValue(5))) + .build()) + .build(); + } + + private BatchGetResultPagePublisher batchGetResultPageSdkPublisherForBothTables() { + return enhancedAsyncClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addGetItem(i -> i.key(k -> k.partitionValue(0))) + .build(), + ReadBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addGetItem(i -> i.key(k -> k.partitionValue(0))) + .build(), + ReadBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addGetItem(i -> i.key(k -> k.partitionValue(1))) + .build(), + ReadBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addGetItem(i -> i.key(k -> k.partitionValue(1))) + .build())); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBatchWriteItemTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBatchWriteItemTest.java new file mode 100644 index 000000000000..bcc703e803c7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncBatchWriteItemTest.java @@ -0,0 +1,270 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.WriteBatch; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class AsyncBatchWriteItemTest extends LocalDynamoDbAsyncTestBase { + private static class Record1 { + private Integer id; + private String attribute; + + private Integer getId() { + return id; + } + + private Record1 setId(Integer id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record1 setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record1 record1 = (Record1) o; + return Objects.equals(id, record1.id) && + Objects.equals(attribute, record1.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute); + } + } + + private static class Record2 { + private Integer id; + private String attribute; + + private Integer getId() { + return id; + } + + private Record2 setId(Integer id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record2 setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record2 record2 = (Record2) o; + return Objects.equals(id, record2.id) && + Objects.equals(attribute, record2.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute); + } + } + + private static final TableSchema TABLE_SCHEMA_1 = + StaticTableSchema.builder(Record1.class) + .newItemSupplier(Record1::new) + .addAttribute(Integer.class, a -> a.name("id_1") + .getter(Record1::getId) + .setter(Record1::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record1::getAttribute) + .setter(Record1::setAttribute)) + .build(); + + private static final TableSchema TABLE_SCHEMA_2 = + StaticTableSchema.builder(Record2.class) + .newItemSupplier(Record2::new) + .addAttribute(Integer.class, a -> a.name("id_2") + .getter(Record2::getId) + .setter(Record2::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record2::getAttribute) + .setter(Record2::setAttribute)) + .build(); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable1 = enhancedAsyncClient.table(getConcreteTableName("table-name-1"), + TABLE_SCHEMA_1); + private DynamoDbAsyncTable mappedTable2 = enhancedAsyncClient.table(getConcreteTableName("table-name-2"), + TABLE_SCHEMA_2); + + private static final List RECORDS_1 = + IntStream.range(0, 2) + .mapToObj(i -> new Record1().setId(i).setAttribute(Integer.toString(i))) + .collect(Collectors.toList()); + + private static final List RECORDS_2 = + IntStream.range(0, 2) + .mapToObj(i -> new Record2().setId(i).setAttribute(Integer.toString(i))) + .collect(Collectors.toList()); + + @Before + public void createTable() { + mappedTable1.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + mappedTable2.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-1")) + .build()).join(); + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-2")) + .build()).join(); + } + + @Test + public void singlePut() { + List writeBatches = + singletonList(WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addPutItem(r -> r.item(RECORDS_1.get(0))) + .build()); + + enhancedAsyncClient.batchWriteItem(BatchWriteItemEnhancedRequest.builder().writeBatches(writeBatches).build()).join(); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record, is(RECORDS_1.get(0))); + } + + @Test + public void multiplePut() { + List writeBatches = + asList(WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addPutItem(r -> r.item(RECORDS_1.get(0))) + .build(), + WriteBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addPutItem(r -> r.item(RECORDS_2.get(0))) + .build()); + + enhancedAsyncClient.batchWriteItem(BatchWriteItemEnhancedRequest.builder().writeBatches(writeBatches).build()).join(); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record1, is(RECORDS_1.get(0))); + assertThat(record2, is(RECORDS_2.get(0))); + } + + @Test + public void singleDelete() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + + List writeBatches = + singletonList(WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addDeleteItem(r -> r.key(k -> k.partitionValue(0))) + .build()); + + enhancedAsyncClient.batchWriteItem(BatchWriteItemEnhancedRequest.builder().writeBatches(writeBatches).build()).join(); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record, is(nullValue())); + } + + @Test + public void multipleDelete() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))).join(); + + List writeBatches = + asList(WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addDeleteItem(DeleteItemEnhancedRequest.builder().key(k -> k.partitionValue(0)).build()) + .build(), + WriteBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addDeleteItem(DeleteItemEnhancedRequest.builder().key(k -> k.partitionValue(0)).build()) + .build()); + + enhancedAsyncClient.batchWriteItem(BatchWriteItemEnhancedRequest.builder().writeBatches(writeBatches).build()).join(); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record1, is(nullValue())); + assertThat(record2, is(nullValue())); + } + + @Test + public void mixedCommands() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))).join(); + + enhancedAsyncClient.batchWriteItem(r -> r.writeBatches( + WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addPutItem(i -> i.item(RECORDS_1.get(1))) + .build(), + WriteBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addDeleteItem(i -> i.key(k -> k.partitionValue(0))) + .build())).join(); + + assertThat(mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(), is(RECORDS_1.get(0))); + assertThat(mappedTable1.getItem(r -> r.key(k -> k.partitionValue(1))).join(), is(RECORDS_1.get(1))); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))).join(), is(nullValue())); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncIndexQueryTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncIndexQueryTest.java new file mode 100644 index 000000000000..0a53549e6f62 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncIndexQueryTest.java @@ -0,0 +1,298 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional.keyEqualTo; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; + +public class AsyncIndexQueryTest extends LocalDynamoDbAsyncTestBase { + private static class Record { + private String id; + private Integer sort; + private Integer value; + private String gsiId; + private Integer gsiSort; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private Integer getSort() { + return sort; + } + + private Record setSort(Integer sort) { + this.sort = sort; + return this; + } + + private Integer getValue() { + return value; + } + + private Record setValue(Integer value) { + this.value = value; + return this; + } + + private String getGsiId() { + return gsiId; + } + + private Record setGsiId(String gsiId) { + this.gsiId = gsiId; + return this; + } + + private Integer getGsiSort() { + return gsiSort; + } + + private Record setGsiSort(Integer gsiSort) { + this.gsiSort = gsiSort; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort) && + Objects.equals(value, record.value) && + Objects.equals(gsiId, record.gsiId) && + Objects.equals(gsiSort, record.gsiSort); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, value, gsiId, gsiSort); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .addAttribute(Integer.class, a -> a.name("value") + .getter(Record::getValue) + .setter(Record::setValue)) + .addAttribute(String.class, a -> a.name("gsi_id") + .getter(Record::getGsiId) + .setter(Record::setGsiId) + .tags(secondaryPartitionKey("gsi_keys_only"))) + .addAttribute(Integer.class, a -> a.name("gsi_sort") + .getter(Record::getGsiSort) + .setter(Record::setGsiSort) + .tags(secondarySortKey("gsi_keys_only"))) + .build(); + + private static final List RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> new Record() + .setId("id-value") + .setSort(i) + .setValue(i) + .setGsiId("gsi-id-value") + .setGsiSort(i)) + .collect(Collectors.toList()); + + private static final List KEYS_ONLY_RECORDS = + RECORDS.stream() + .map(record -> new Record() + .setId(record.id) + .setSort(record.sort) + .setGsiId(record.gsiId) + .setGsiSort(record.gsiSort)) + .collect(Collectors.toList()); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable = enhancedAsyncClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private DynamoDbAsyncIndex keysOnlyMappedIndex = mappedTable.index("gsi_keys_only"); + + private void insertRecords() { + RECORDS.forEach(record -> mappedTable.putItem(r -> r.item(record)).join()); + } + + @Before + public void createTable() { + mappedTable.createTable( + CreateTableEnhancedRequest.builder() + .provisionedThroughput(getDefaultProvisionedThroughput()) + .globalSecondaryIndices(EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_keys_only") + .projection(p -> p.projectionType(ProjectionType.KEYS_ONLY)) + .provisionedThroughput(getDefaultProvisionedThroughput()) + .build()) + .build()) + .join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()) + .join(); + } + + @Test + public void queryAllRecordsDefaultSettings_usingShortcutForm() { + insertRecords(); + + SdkPublisher> publisher = + keysOnlyMappedIndex.query(keyEqualTo(k -> k.partitionValue("gsi-id-value"))); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(KEYS_ONLY_RECORDS)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryBetween() { + insertRecords(); + Key fromKey = Key.builder().partitionValue("gsi-id-value").sortValue(3).build(); + Key toKey = Key.builder().partitionValue("gsi-id-value").sortValue(5).build(); + + SdkPublisher> publisher = keysOnlyMappedIndex.query(r -> r.queryConditional(QueryConditional.sortBetween(fromKey, toKey))); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), + is(KEYS_ONLY_RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryLimit() { + insertRecords(); + SdkPublisher> publisher = + keysOnlyMappedIndex.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("gsi-id-value"))) + .limit(5) + .build()); + + List> results = drainPublisher(publisher, 3); + Page page1 = results.get(0); + Page page2 = results.get(1); + Page page3 = results.get(2); + + Map expectedLastEvaluatedKey1 = new HashMap<>(); + expectedLastEvaluatedKey1.put("id", stringValue(KEYS_ONLY_RECORDS.get(4).getId())); + expectedLastEvaluatedKey1.put("sort", numberValue(KEYS_ONLY_RECORDS.get(4).getSort())); + expectedLastEvaluatedKey1.put("gsi_id", stringValue(KEYS_ONLY_RECORDS.get(4).getGsiId())); + expectedLastEvaluatedKey1.put("gsi_sort", numberValue(KEYS_ONLY_RECORDS.get(4).getGsiSort())); + Map expectedLastEvaluatedKey2 = new HashMap<>(); + expectedLastEvaluatedKey2.put("id", stringValue(KEYS_ONLY_RECORDS.get(9).getId())); + expectedLastEvaluatedKey2.put("sort", numberValue(KEYS_ONLY_RECORDS.get(9).getSort())); + expectedLastEvaluatedKey2.put("gsi_id", stringValue(KEYS_ONLY_RECORDS.get(9).getGsiId())); + expectedLastEvaluatedKey2.put("gsi_sort", numberValue(KEYS_ONLY_RECORDS.get(9).getGsiSort())); + + assertThat(page1.items(), is(KEYS_ONLY_RECORDS.subList(0, 5))); + assertThat(page1.lastEvaluatedKey(), is(expectedLastEvaluatedKey1)); + assertThat(page2.items(), is(KEYS_ONLY_RECORDS.subList(5, 10))); + assertThat(page2.lastEvaluatedKey(), is(expectedLastEvaluatedKey2)); + assertThat(page3.items(), is(empty())); + assertThat(page3.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryEmpty() { + SdkPublisher> publisher = + keysOnlyMappedIndex.query(r -> r.queryConditional(keyEqualTo(k -> k.partitionValue("gsi-id-value")))); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(empty())); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryExclusiveStartKey() { + insertRecords(); + Map expectedLastEvaluatedKey = new HashMap<>(); + expectedLastEvaluatedKey.put("id", stringValue(KEYS_ONLY_RECORDS.get(7).getId())); + expectedLastEvaluatedKey.put("sort", numberValue(KEYS_ONLY_RECORDS.get(7).getSort())); + expectedLastEvaluatedKey.put("gsi_id", stringValue(KEYS_ONLY_RECORDS.get(7).getGsiId())); + expectedLastEvaluatedKey.put("gsi_sort", numberValue(KEYS_ONLY_RECORDS.get(7).getGsiSort())); + + SdkPublisher> publisher = + keysOnlyMappedIndex.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("gsi-id-value"))) + .exclusiveStartKey(expectedLastEvaluatedKey) + .build()); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + assertThat(page.items(), is(KEYS_ONLY_RECORDS.subList(8, 10))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncIndexScanTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncIndexScanTest.java new file mode 100644 index 000000000000..9de11ea5c0fd --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncIndexScanTest.java @@ -0,0 +1,285 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondarySortKey; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; + +public class AsyncIndexScanTest extends LocalDynamoDbAsyncTestBase { + private static class Record { + private String id; + private Integer sort; + private Integer value; + private String gsiId; + private Integer gsiSort; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private Integer getSort() { + return sort; + } + + private Record setSort(Integer sort) { + this.sort = sort; + return this; + } + + private Integer getValue() { + return value; + } + + private Record setValue(Integer value) { + this.value = value; + return this; + } + + private String getGsiId() { + return gsiId; + } + + private Record setGsiId(String gsiId) { + this.gsiId = gsiId; + return this; + } + + private Integer getGsiSort() { + return gsiSort; + } + + private Record setGsiSort(Integer gsiSort) { + this.gsiSort = gsiSort; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort) && + Objects.equals(value, record.value) && + Objects.equals(gsiId, record.gsiId) && + Objects.equals(gsiSort, record.gsiSort); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, value, gsiId, gsiSort); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .addAttribute(Integer.class, a -> a.name("value") + .getter(Record::getValue) + .setter(Record::setValue)) + .addAttribute(String.class, a -> a.name("gsi_id") + .getter(Record::getGsiId) + .setter(Record::setGsiId) + .tags(secondaryPartitionKey("gsi_keys_only"))) + .addAttribute(Integer.class, a -> a.name("gsi_sort") + .getter(Record::getGsiSort) + .setter(Record::setGsiSort) + .tags(secondarySortKey("gsi_keys_only"))) + .build(); + + private static final List RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> new Record() + .setId("id-value") + .setSort(i) + .setValue(i) + .setGsiId("gsi-id-value") + .setGsiSort(i)) + .collect(Collectors.toList()); + + private static final List KEYS_ONLY_RECORDS = + RECORDS.stream() + .map(record -> new Record() + .setId(record.id) + .setSort(record.sort) + .setGsiId(record.gsiId) + .setGsiSort(record.gsiSort)) + .collect(Collectors.toList()); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable = enhancedAsyncClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private DynamoDbAsyncIndex keysOnlyMappedIndex = mappedTable.index("gsi_keys_only"); + + private void insertRecords() { + RECORDS.forEach(record -> mappedTable.putItem(r -> r.item(record)).join()); + } + + @Before + public void createTable() { + mappedTable.createTable( + r -> r.provisionedThroughput(getDefaultProvisionedThroughput()) + .globalSecondaryIndices( + EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_keys_only") + .projection(p -> p.projectionType(ProjectionType.KEYS_ONLY)) + .provisionedThroughput(getDefaultProvisionedThroughput()).build())) + .join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()).join(); + } + + @Test + public void scanAllRecordsDefaultSettings() { + insertRecords(); + + SdkPublisher> publisher = keysOnlyMappedIndex.scan(ScanEnhancedRequest.builder().build()); + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(KEYS_ONLY_RECORDS)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanAllRecordsWithFilter() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("sort >= :min_value AND sort <= :max_value") + .expressionValues(expressionValues) + .build(); + + SdkPublisher> publisher = keysOnlyMappedIndex.scan(ScanEnhancedRequest.builder() + .filterExpression(expression) + .build()); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), + is(KEYS_ONLY_RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanLimit() { + insertRecords(); + + SdkPublisher> publisher = keysOnlyMappedIndex.scan(r -> r.limit(5)); + + List> results = drainPublisher(publisher, 3); + + Page page1 = results.get(0); + Page page2 = results.get(1); + Page page3 = results.get(2); + + assertThat(page1.items(), is(KEYS_ONLY_RECORDS.subList(0, 5))); + assertThat(page1.lastEvaluatedKey(), is(getKeyMap(4))); + assertThat(page2.items(), is(KEYS_ONLY_RECORDS.subList(5, 10))); + assertThat(page2.lastEvaluatedKey(), is(getKeyMap(9))); + assertThat(page3.items(), is(empty())); + assertThat(page3.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanEmpty() { + SdkPublisher> publisher = keysOnlyMappedIndex.scan(); + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(empty())); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanExclusiveStartKey() { + insertRecords(); + SdkPublisher> publisher = + keysOnlyMappedIndex.scan(ScanEnhancedRequest.builder().exclusiveStartKey(getKeyMap(7)).build()); + + List> results = drainPublisher(publisher, 1); + Page page = results.get(0); + + assertThat(page.items(), is(KEYS_ONLY_RECORDS.subList(8, 10))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + private Map getKeyMap(int sort) { + Map result = new HashMap<>(); + result.put("id", stringValue(KEYS_ONLY_RECORDS.get(sort).getId())); + result.put("sort", numberValue(KEYS_ONLY_RECORDS.get(sort).getSort())); + result.put("gsi_id", stringValue(KEYS_ONLY_RECORDS.get(sort).getGsiId())); + result.put("gsi_sort", numberValue(KEYS_ONLY_RECORDS.get(sort).getGsiSort())); + return Collections.unmodifiableMap(result); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncTransactGetItemsTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncTransactGetItemsTest.java new file mode 100644 index 000000000000..18c2466cddab --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncTransactGetItemsTest.java @@ -0,0 +1,194 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.Document; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactGetItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class AsyncTransactGetItemsTest extends LocalDynamoDbAsyncTestBase { + private static class Record1 { + private Integer id; + + private Integer getId() { + return id; + } + + private Record1 setId(Integer id) { + this.id = id; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record1 record1 = (Record1) o; + return Objects.equals(id, record1.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } + + private static class Record2 { + private Integer id; + + private Integer getId() { + return id; + } + + private Record2 setId(Integer id) { + this.id = id; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record2 record2 = (Record2) o; + return Objects.equals(id, record2.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } + + private static final TableSchema TABLE_SCHEMA_1 = + StaticTableSchema.builder(Record1.class) + .newItemSupplier(Record1::new) + .addAttribute(Integer.class, a -> a.name("id_1") + .getter(Record1::getId) + .setter(Record1::setId) + .tags(primaryPartitionKey())) + .build(); + + private static final TableSchema TABLE_SCHEMA_2 = + StaticTableSchema.builder(Record2.class) + .newItemSupplier(Record2::new) + .addAttribute(Integer.class, a -> a.name("id_2") + .getter(Record2::getId) + .setter(Record2::setId) + .tags(primaryPartitionKey())) + .build(); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable1 = enhancedAsyncClient.table(getConcreteTableName("table-name-1"), + TABLE_SCHEMA_1); + private DynamoDbAsyncTable mappedTable2 = enhancedAsyncClient.table(getConcreteTableName("table-name-2"), + TABLE_SCHEMA_2); + + private static final List RECORDS_1 = + IntStream.range(0, 2) + .mapToObj(i -> new Record1().setId(i)) + .collect(Collectors.toList()); + + private static final List RECORDS_2 = + IntStream.range(0, 2) + .mapToObj(i -> new Record2().setId(i)) + .collect(Collectors.toList()); + + @Before + public void createTable() { + mappedTable1.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + mappedTable2.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-1")) + .build()).join(); + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-2")) + .build()).join(); + } + + private void insertRecords() { + RECORDS_1.forEach(record -> mappedTable1.putItem(r -> r.item(record)).join()); + RECORDS_2.forEach(record -> mappedTable2.putItem(r -> r.item(record)).join()); + } + + @Test + public void getRecordsFromMultipleTables() { + insertRecords(); + + TransactGetItemsEnhancedRequest transactGetItemsEnhancedRequest = + TransactGetItemsEnhancedRequest.builder() + .addGetItem(mappedTable1, Key.builder().partitionValue(0).build()) + .addGetItem(mappedTable2, Key.builder().partitionValue(0).build()) + .addGetItem(mappedTable2, Key.builder().partitionValue(1).build()) + .addGetItem(mappedTable1, Key.builder().partitionValue(1).build()) + .build(); + + List results = enhancedAsyncClient.transactGetItems(transactGetItemsEnhancedRequest).join(); + + assertThat(results.size(), is(4)); + assertThat(results.get(0).getItem(mappedTable1), is(RECORDS_1.get(0))); + assertThat(results.get(1).getItem(mappedTable2), is(RECORDS_2.get(0))); + assertThat(results.get(2).getItem(mappedTable2), is(RECORDS_2.get(1))); + assertThat(results.get(3).getItem(mappedTable1), is(RECORDS_1.get(1))); + } + + @Test + public void notFoundRecordReturnsNull() { + insertRecords(); + + TransactGetItemsEnhancedRequest transactGetItemsEnhancedRequest = + TransactGetItemsEnhancedRequest.builder() + .addGetItem(mappedTable1, Key.builder().partitionValue(0).build()) + .addGetItem(mappedTable2, Key.builder().partitionValue(0).build()) + .addGetItem(mappedTable2, Key.builder().partitionValue(5).build()) + .addGetItem(mappedTable1, Key.builder().partitionValue(1).build()) + .build(); + + List results = enhancedAsyncClient.transactGetItems(transactGetItemsEnhancedRequest).join(); + + assertThat(results.size(), is(4)); + assertThat(results.get(0).getItem(mappedTable1), is(RECORDS_1.get(0))); + assertThat(results.get(1).getItem(mappedTable2), is(RECORDS_2.get(0))); + assertThat(results.get(2).getItem(mappedTable2), is(nullValue())); + assertThat(results.get(3).getItem(mappedTable1), is(RECORDS_1.get(1))); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncTransactWriteItemsTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncTransactWriteItemsTest.java new file mode 100644 index 000000000000..6a1348fd1cf2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AsyncTransactWriteItemsTest.java @@ -0,0 +1,376 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.fail; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CompletionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.DefaultDynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.ConditionCheck; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactWriteItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.TransactionCanceledException; + +public class AsyncTransactWriteItemsTest extends LocalDynamoDbAsyncTestBase { + private static class Record1 { + private Integer id; + private String attribute; + + private Integer getId() { + return id; + } + + private Record1 setId(Integer id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record1 setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record1 record1 = (Record1) o; + return Objects.equals(id, record1.id) && + Objects.equals(attribute, record1.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute); + } + } + + private static class Record2 { + private Integer id; + private String attribute; + + private Integer getId() { + return id; + } + + private Record2 setId(Integer id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record2 setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record2 record2 = (Record2) o; + return Objects.equals(id, record2.id) && + Objects.equals(attribute, record2.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute); + } + } + + private static final TableSchema TABLE_SCHEMA_1 = + StaticTableSchema.builder(Record1.class) + .newItemSupplier(Record1::new) + .addAttribute(Integer.class, a -> a.name("id_1") + .getter(Record1::getId) + .setter(Record1::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record1::getAttribute) + .setter(Record1::setAttribute)) + .build(); + + private static final TableSchema TABLE_SCHEMA_2 = + StaticTableSchema.builder(Record2.class) + .newItemSupplier(Record2::new) + .addAttribute(Integer.class, a -> a.name("id_2") + .getter(Record2::getId) + .setter(Record2::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record2::getAttribute) + .setter(Record2::setAttribute)) + .build(); + + private DynamoDbEnhancedAsyncClient enhancedAsyncClient = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private DynamoDbAsyncTable mappedTable1 = enhancedAsyncClient.table(getConcreteTableName("table-name-1"), + TABLE_SCHEMA_1); + private DynamoDbAsyncTable mappedTable2 = enhancedAsyncClient.table(getConcreteTableName("table-name-2"), + TABLE_SCHEMA_2); + + private static final List RECORDS_1 = + IntStream.range(0, 2) + .mapToObj(i -> new Record1().setId(i).setAttribute(Integer.toString(i))) + .collect(Collectors.toList()); + + private static final List RECORDS_2 = + IntStream.range(0, 2) + .mapToObj(i -> new Record2().setId(i).setAttribute(Integer.toString(i))) + .collect(Collectors.toList()); + + @Before + public void createTable() { + mappedTable1.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + mappedTable2.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-1")) + .build()).join(); + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-2")) + .build()).join(); + } + + @Test + public void singlePut() { + enhancedAsyncClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(mappedTable1, RECORDS_1.get(0)) + .build()).join(); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record, is(RECORDS_1.get(0))); + } + + @Test + public void multiplePut() { + enhancedAsyncClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(mappedTable1, RECORDS_1.get(0)) + .addPutItem(mappedTable2, RECORDS_2.get(0)) + .build()).join(); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record1, is(RECORDS_1.get(0))); + assertThat(record2, is(RECORDS_2.get(0))); + } + + @Test + public void singleUpdate() { + enhancedAsyncClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addUpdateItem(mappedTable1, RECORDS_1.get(0)) + .build()).join(); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record, is(RECORDS_1.get(0))); + } + + @Test + public void multipleUpdate() { + enhancedAsyncClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addUpdateItem(mappedTable1, RECORDS_1.get(0)) + .addUpdateItem(mappedTable2, RECORDS_2.get(0)) + .build()).join(); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record1, is(RECORDS_1.get(0))); + assertThat(record2, is(RECORDS_2.get(0))); + } + + @Test + public void singleDelete() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + + enhancedAsyncClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addDeleteItem(mappedTable1, RECORDS_1.get(0)) + .build()).join(); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record, is(nullValue())); + } + + @Test + public void multipleDelete() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))).join(); + + enhancedAsyncClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addDeleteItem(mappedTable1, RECORDS_1.get(0)) + .addDeleteItem(mappedTable2, RECORDS_2.get(0)) + .build()).join(); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))).join(); + assertThat(record1, is(nullValue())); + assertThat(record2, is(nullValue())); + } + + @Test + public void singleConditionCheck() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + Key key = Key.builder().partitionValue(0).build(); + + enhancedAsyncClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addConditionCheck(mappedTable1, ConditionCheck.builder() + .key(key) + .conditionExpression(conditionExpression) + .build()) + .build()).join(); + } + + @Test + public void multiConditionCheck() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))).join(); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + Key key1 = Key.builder().partitionValue(0).build(); + Key key2 = Key.builder().partitionValue(0).build(); + + enhancedAsyncClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addConditionCheck(mappedTable1, ConditionCheck.builder() + .key(key1) + .conditionExpression(conditionExpression) + .build()) + .addConditionCheck(mappedTable2, ConditionCheck.builder() + .key(key2) + .conditionExpression(conditionExpression) + .build()) + .build()).join(); + } + + @Test + public void mixedCommands() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))).join(); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + Key key = Key.builder().partitionValue(0).build(); + + TransactWriteItemsEnhancedRequest transactWriteItemsEnhancedRequest = + TransactWriteItemsEnhancedRequest.builder() + .addConditionCheck(mappedTable1, ConditionCheck.builder() + .key(key) + .conditionExpression(conditionExpression) + .build()) + .addPutItem(mappedTable2, RECORDS_2.get(1)) + .addUpdateItem(mappedTable1, RECORDS_1.get(1)) + .addDeleteItem(mappedTable2, RECORDS_2.get(0)) + .build(); + enhancedAsyncClient.transactWriteItems(transactWriteItemsEnhancedRequest).join(); + + assertThat(mappedTable1.getItem(r -> r.key(k -> k.partitionValue(1))).join(), is(RECORDS_1.get(1))); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))).join(), is(nullValue())); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(1))).join(), is(RECORDS_2.get(1))); + } + + @Test + public void mixedCommands_conditionCheckFailsTransaction() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))).join(); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))).join(); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("1"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + Key key = Key.builder().partitionValue(0).build(); + + TransactWriteItemsEnhancedRequest transactWriteItemsEnhancedRequest = + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(mappedTable2, RECORDS_2.get(1)) + .addUpdateItem(mappedTable1, RECORDS_1.get(1)) + .addConditionCheck(mappedTable1, ConditionCheck.builder() + .key(key) + .conditionExpression(conditionExpression) + .build()) + .addDeleteItem(mappedTable2, RECORDS_2.get(0)) + .build(); + + try { + enhancedAsyncClient.transactWriteItems(transactWriteItemsEnhancedRequest).join(); + fail("Expected CompletionException to be thrown"); + } catch (CompletionException e) { + assertThat(e.getCause(), instanceOf(TransactionCanceledException.class)); + } + + assertThat(mappedTable1.getItem(r -> r.key(k -> k.partitionValue(1))).join(), is(nullValue())); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))).join(), is(RECORDS_2.get(0))); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(1))).join(), is(nullValue())); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicCrudTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicCrudTest.java new file mode 100644 index 000000000000..205d9ec86fe2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicCrudTest.java @@ -0,0 +1,659 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondarySortKey; + +import java.util.Objects; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; + +public class BasicCrudTest extends LocalDynamoDbSyncTestBase { + private static final String ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS = "a*t:t.r-i#bute3"; + + private static class Record { + private String id; + private String sort; + private String attribute; + private String attribute2; + private String attribute3; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private String getSort() { + return sort; + } + + private Record setSort(String sort) { + this.sort = sort; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + private String getAttribute2() { + return attribute2; + } + + private Record setAttribute2(String attribute2) { + this.attribute2 = attribute2; + return this; + } + + private String getAttribute3() { + return attribute3; + } + + private Record setAttribute3(String attribute3) { + this.attribute3 = attribute3; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort) && + Objects.equals(attribute, record.attribute) && + Objects.equals(attribute2, record.attribute2) && + Objects.equals(attribute3, record.attribute3); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, attribute, attribute2, attribute3); + } + } + + private static class ShortRecord { + private String id; + private String sort; + private String attribute; + + private String getId() { + return id; + } + + private ShortRecord setId(String id) { + this.id = id; + return this; + } + + private String getSort() { + return sort; + } + + private ShortRecord setSort(String sort) { + this.sort = sort; + return this; + } + + private String getAttribute() { + return attribute; + } + + private ShortRecord setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShortRecord that = (ShortRecord) o; + return Objects.equals(id, that.id) && + Objects.equals(sort, that.sort) && + Objects.equals(attribute, that.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, attribute); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record::getAttribute) + .setter(Record::setAttribute)) + .addAttribute(String.class, a -> a.name("attribute2*") + .getter(Record::getAttribute2) + .setter(Record::setAttribute2) + .tags(secondaryPartitionKey("gsi_1"))) + .addAttribute(String.class, a -> a.name(ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) + .getter(Record::getAttribute3) + .setter(Record::setAttribute3) + .tags(secondarySortKey("gsi_1"))) + .build(); + + private static final TableSchema SHORT_TABLE_SCHEMA = + StaticTableSchema.builder(ShortRecord.class) + .newItemSupplier(ShortRecord::new) + .addAttribute(String.class, a -> a.name("id") + .getter(ShortRecord::getId) + .setter(ShortRecord::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("sort") + .getter(ShortRecord::getSort) + .setter(ShortRecord::setSort) + .tags(primarySortKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(ShortRecord::getAttribute) + .setter(ShortRecord::setAttribute)) + .build(); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable = enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private DynamoDbTable mappedShortTable = enhancedClient.table(getConcreteTableName("table-name"), + SHORT_TABLE_SCHEMA); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput()) + .globalSecondaryIndices( + EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_1") + .projection(p -> p.projectionType(ProjectionType.ALL)) + .provisionedThroughput(getDefaultProvisionedThroughput()) + .build())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + } + + @Test + public void putThenGetItemUsingKey() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))); + + assertThat(result, is(record)); + } + + @Test + public void putThenGetItemUsingKeyItem() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + + Record keyItem = new Record(); + keyItem.setId("id-value"); + keyItem.setSort("sort-value"); + + Record result = mappedTable.getItem(keyItem); + + assertThat(result, is(record)); + } + + @Test + public void getNonExistentItem() { + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))); + assertThat(result, is(nullValue())); + } + + @Test + public void putTwiceThenGetItem() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + Record record2 = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four") + .setAttribute2("five") + .setAttribute3("six"); + + mappedTable.putItem(r -> r.item(record2)); + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))); + + assertThat(result, is(record2)); + } + + @Test + public void putThenDeleteItem_usingShortcutForm() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(record); + Record beforeDeleteResult = + mappedTable.deleteItem(Key.builder().partitionValue("id-value").sortValue("sort-value").build()); + Record afterDeleteResult = + mappedTable.getItem(Key.builder().partitionValue("id-value").sortValue("sort-value").build()); + + assertThat(beforeDeleteResult, is(record)); + assertThat(afterDeleteResult, is(nullValue())); + } + + @Test + public void putThenDeleteItem_usingKeyItemForm() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(record); + Record beforeDeleteResult = + mappedTable.deleteItem(record); + Record afterDeleteResult = + mappedTable.getItem(Key.builder().partitionValue("id-value").sortValue("sort-value").build()); + + assertThat(beforeDeleteResult, is(record)); + assertThat(afterDeleteResult, is(nullValue())); + } + + @Test + public void putWithConditionThatSucceeds() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + record.setAttribute("four"); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + mappedTable.putItem(PutItemEnhancedRequest.builder(Record.class) + .item(record) + .conditionExpression(conditionExpression).build()); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))); + assertThat(result, is(record)); + } + + @Test + public void putWithConditionThatFails() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + record.setAttribute("four"); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("wrong")) + .build(); + + exception.expect(ConditionalCheckFailedException.class); + mappedTable.putItem(PutItemEnhancedRequest.builder(Record.class) + .item(record) + .conditionExpression(conditionExpression).build()); + } + + @Test + public void deleteNonExistentItem() { + Record result = mappedTable.deleteItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))); + assertThat(result, is(nullValue())); + } + + @Test + public void deleteWithConditionThatSucceeds() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + Key key = mappedTable.keyFrom(record); + mappedTable.deleteItem(DeleteItemEnhancedRequest.builder().key(key).conditionExpression(conditionExpression).build()); + + Record result = mappedTable.getItem(r -> r.key(key)); + assertThat(result, is(nullValue())); + } + + @Test + public void deleteWithConditionThatFails() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("wrong")) + .build(); + + exception.expect(ConditionalCheckFailedException.class); + mappedTable.deleteItem(DeleteItemEnhancedRequest.builder().key(mappedTable.keyFrom(record)) + .conditionExpression(conditionExpression) + .build()); + } + + @Test + public void updateOverwriteCompleteRecord_usingShortcutForm() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(record); + Record record2 = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four") + .setAttribute2("five") + .setAttribute3("six"); + Record result = mappedTable.updateItem(record2); + + assertThat(result, is(record2)); + } + + @Test + public void updateCreatePartialRecord() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one"); + + Record result = mappedTable.updateItem(r -> r.item(record)); + + assertThat(result, is(record)); + } + + @Test + public void updateCreateKeyOnlyRecord() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value"); + + Record result = mappedTable.updateItem(r -> r.item(record)); + assertThat(result, is(record)); + } + + @Test + public void updateOverwriteModelledNulls() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + Record record2 = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four"); + Record result = mappedTable.updateItem(r -> r.item(record2)); + + assertThat(result, is(record2)); + } + + @Test + public void updateCanIgnoreNullsAndDoPartialUpdate() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + Record record2 = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four"); + Record result = mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(record2) + .ignoreNulls(true) + .build()); + + Record expectedResult = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four") + .setAttribute2("two") + .setAttribute3("three"); + assertThat(result, is(expectedResult)); + } + + @Test + public void updateShortRecordDoesPartialUpdate() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + ShortRecord record2 = new ShortRecord() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four"); + ShortRecord shortResult = mappedShortTable.updateItem(r -> r.item(record2)); + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue(record.getId()).sortValue(record.getSort()))); + + Record expectedResult = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("four") + .setAttribute2("two") + .setAttribute3("three"); + assertThat(result, is(expectedResult)); + assertThat(shortResult, is(record2)); + } + + @Test + public void updateKeyOnlyExistingRecordDoesNothing() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + Record updateRecord = new Record().setId("id-value").setSort("sort-value"); + + Record result = mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(updateRecord) + .ignoreNulls(true) + .build()); + + assertThat(result, is(record)); + } + + @Test + public void updateWithConditionThatSucceeds() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + record.setAttribute("four"); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(record) + .conditionExpression(conditionExpression) + .build()); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))); + assertThat(result, is(record)); + } + + @Test + public void updateWithConditionThatFails() { + Record record = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one") + .setAttribute2("two") + .setAttribute3("three"); + + mappedTable.putItem(r -> r.item(record)); + record.setAttribute("four"); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("wrong")) + .build(); + + exception.expect(ConditionalCheckFailedException.class); + mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(record) + .conditionExpression(conditionExpression) + .build()); + } + + @Test + public void getAShortRecordWithNewModelledFields() { + ShortRecord shortRecord = new ShortRecord() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one"); + mappedShortTable.putItem(r -> r.item(shortRecord)); + Record expectedRecord = new Record() + .setId("id-value") + .setSort("sort-value") + .setAttribute("one"); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("sort-value"))); + assertThat(result, is(expectedRecord)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicQueryTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicQueryTest.java new file mode 100644 index 000000000000..b62bad8d9975 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicQueryTest.java @@ -0,0 +1,567 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional.keyEqualTo; +import static software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional.sortBetween; + +import java.util.*; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.*; +import software.amazon.awssdk.enhanced.dynamodb.NestedAttributeName; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.InnerAttributeRecord; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.NestedTestRecord; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class BasicQueryTest extends LocalDynamoDbSyncTestBase { + + private static class Record { + private String id; + private Integer sort; + private Integer value; + + public String getId() { + return id; + } + + public Record setId(String id) { + this.id = id; + return this; + } + + public Integer getSort() { + return sort; + } + + public Record setSort(Integer sort) { + this.sort = sort; + return this; + } + + public Integer getValue() { + return value; + } + + public Record setValue(Integer value) { + this.value = value; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort) && + Objects.equals(value, record.value); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, value); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .addAttribute(Integer.class, a -> a.name("value") + .getter(Record::getValue) + .setter(Record::setValue)) + .build(); + + private static final List RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> new Record().setId("id-value").setSort(i).setValue(i)) + .collect(Collectors.toList()); + + private static final List NESTED_TEST_RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> { + final NestedTestRecord nestedTestRecord = new NestedTestRecord(); + nestedTestRecord.setOuterAttribOne("id-value-" + i); + nestedTestRecord.setSort(i); + final InnerAttributeRecord innerAttributeRecord = new InnerAttributeRecord(); + innerAttributeRecord.setAttribOne("attribOne-"+i); + innerAttributeRecord.setAttribTwo(i); + nestedTestRecord.setInnerAttributeRecord(innerAttributeRecord); + nestedTestRecord.setDotVariable("v"+i); + return nestedTestRecord; + }) + .collect(Collectors.toList()); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable = enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + + private DynamoDbTable mappedNestedTable = enhancedClient.table(getConcreteTableName("nested-table-name"), + TableSchema.fromClass(NestedTestRecord.class)); + + private void insertRecords() { + RECORDS.forEach(record -> mappedTable.putItem(r -> r.item(record))); + NESTED_TEST_RECORDS.forEach(nestedTestRecord -> mappedNestedTable.putItem(r -> r.item(nestedTestRecord))); + } + + private void insertNestedRecords() { + NESTED_TEST_RECORDS.forEach(nestedTestRecord -> mappedNestedTable.putItem(r -> r.item(nestedTestRecord))); + } + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + mappedNestedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("nested-table-name")) + .build()); + } + + @Test + public void queryAllRecordsDefaultSettings_shortcutForm() { + insertRecords(); + + Iterator> results = + mappedTable.query(keyEqualTo(k -> k.partitionValue("id-value"))).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), is(RECORDS)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryAllRecordsDefaultSettings_withProjection() { + insertRecords(); + + Iterator> results = + mappedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .attributesToProject("value") + ).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items().size(), is(RECORDS.size())); + + Record firstRecord = page.items().get(0); + assertThat(firstRecord.id, is(nullValue())); + assertThat(firstRecord.sort, is(nullValue())); + assertThat(firstRecord.value, is(0)); + } + + @Test + public void queryAllRecordsDefaultSettings_shortcutForm_viaItems() { + insertRecords(); + + PageIterable query = mappedTable.query(keyEqualTo(k -> k.partitionValue("id-value"))); + SdkIterable results = query.items(); + + assertThat(results.stream().collect(Collectors.toList()), is(RECORDS)); + } + + @Test + public void queryAllRecordsWithFilter() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#value >= :min_value AND #value <= :max_value") + .expressionValues(expressionValues) + .expressionNames(Collections.singletonMap("#value", "value")) + .build(); + + Iterator> results = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .filterExpression(expression) + .build()) + .iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryAllRecordsWithFilterAndProjection() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#value >= :min_value AND #value <= :max_value") + .expressionValues(expressionValues) + .expressionNames(Collections.singletonMap("#value", "value")) + .build(); + + Iterator> results = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .filterExpression(expression) + .attributesToProject("value") + .build()) + .iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), hasSize(3)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + + Record record = page.items().get(0); + assertThat(record.id, nullValue()); + assertThat(record.sort, nullValue()); + assertThat(record.value, is(3)); + } + + @Test + public void queryBetween() { + insertRecords(); + Key fromKey = Key.builder().partitionValue("id-value").sortValue(3).build(); + Key toKey = Key.builder().partitionValue("id-value").sortValue(5).build(); + Iterator> results = mappedTable.query(r -> r.queryConditional(sortBetween(fromKey, toKey))).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryLimit() { + insertRecords(); + Iterator> results = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .limit(5) + .build()) + .iterator(); + assertThat(results.hasNext(), is(true)); + Page page1 = results.next(); + assertThat(results.hasNext(), is(true)); + Page page2 = results.next(); + assertThat(results.hasNext(), is(true)); + Page page3 = results.next(); + assertThat(results.hasNext(), is(false)); + + Map expectedLastEvaluatedKey1 = new HashMap<>(); + expectedLastEvaluatedKey1.put("id", stringValue("id-value")); + expectedLastEvaluatedKey1.put("sort", numberValue(4)); + Map expectedLastEvaluatedKey2 = new HashMap<>(); + expectedLastEvaluatedKey2.put("id", stringValue("id-value")); + expectedLastEvaluatedKey2.put("sort", numberValue(9)); + assertThat(page1.items(), is(RECORDS.subList(0, 5))); + assertThat(page1.lastEvaluatedKey(), is(expectedLastEvaluatedKey1)); + assertThat(page2.items(), is(RECORDS.subList(5, 10))); + assertThat(page2.lastEvaluatedKey(), is(expectedLastEvaluatedKey2)); + assertThat(page3.items(), is(empty())); + assertThat(page3.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryEmpty() { + Iterator> results = + mappedTable.query(r -> r.queryConditional(keyEqualTo(k -> k.partitionValue("id-value")))).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items(), is(empty())); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryEmpty_viaItems() { + PageIterable query = mappedTable.query(keyEqualTo(k -> k.partitionValue("id-value"))); + SdkIterable results = query.items(); + + assertThat(results.stream().collect(Collectors.toList()), is(empty())); + } + + @Test + public void queryExclusiveStartKey() { + Map exclusiveStartKey = new HashMap<>(); + exclusiveStartKey.put("id", stringValue("id-value")); + exclusiveStartKey.put("sort", numberValue(7)); + insertRecords(); + Iterator> results = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .exclusiveStartKey(exclusiveStartKey) + .build()) + .iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items(), is(RECORDS.subList(8, 10))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryExclusiveStartKey_viaItems() { + Map exclusiveStartKey = new HashMap<>(); + exclusiveStartKey.put("id", stringValue("id-value")); + exclusiveStartKey.put("sort", numberValue(7)); + insertRecords(); + SdkIterable results = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .exclusiveStartKey(exclusiveStartKey) + .build()) + .items(); + + assertThat(results.stream().collect(Collectors.toList()), is(RECORDS.subList(8, 10))); + } + + @Test + public void queryNestedRecord_SingleAttributeName() { + insertNestedRecords(); + Iterator> results = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-1"))) + .addNestedAttributeToProject(NestedAttributeName.builder().addElement("innerAttributeRecord") + .addElement("attribOne").build())).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(1)); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(nullValue())); + assertThat(firstRecord.getSort(), is(nullValue())); + assertThat(firstRecord.getInnerAttributeRecord().getAttribOne(), is("attribOne-1")); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(nullValue())); + results = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-1"))) + .addNestedAttributeToProject(NestedAttributeName.create("sort")) + .addAttributeToProject("sort")).iterator(); + assertThat(results.hasNext(), is(true)); + page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(1)); + firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(nullValue())); + assertThat(firstRecord.getSort(), is(1)); + assertThat(firstRecord.getInnerAttributeRecord(), is(nullValue())); + } + + + @Test + public void queryNestedRecord_withAttributeNameList() { + insertNestedRecords(); + Iterator> results = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-1"))) + .addNestedAttributesToProject(Arrays.asList( + NestedAttributeName.builder().elements("innerAttributeRecord", "attribOne").build(), + NestedAttributeName.builder().addElement("outerAttribOne").build())) + .addNestedAttributesToProject(NestedAttributeName.builder() + .addElements(Arrays.asList("innerAttributeRecord","attribTwo")).build())).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(1)); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is("id-value-1")); + assertThat(firstRecord.getSort(), is(nullValue())); + assertThat(firstRecord.getInnerAttributeRecord().getAttribOne(), is("attribOne-1")); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(1)); + } + + + + + @Test + public void queryNestedRecord_withAttributeNameListAndStringAttributeToProjectAppended() { + insertNestedRecords(); + Iterator> results = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-1"))) + .addNestedAttributesToProject(Arrays.asList( + NestedAttributeName.builder().elements("innerAttributeRecord","attribOne").build())) + .addNestedAttributesToProject(NestedAttributeName.create("innerAttributeRecord","attribTwo")) + .addAttributeToProject("sort")).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(1)); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(is(nullValue()))); + assertThat(firstRecord.getSort(), is(1)); + assertThat(firstRecord.getInnerAttributeRecord().getAttribOne(), is("attribOne-1")); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(1)); + } + + @Test + public void queryAllRecordsDefaultSettings_withNestedProjectionNamesNotInNameMap() { + insertNestedRecords(); + + Iterator> results = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-1"))) + .addNestedAttributeToProject( NestedAttributeName.builder().addElement("nonExistentSlot").build())).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(1)); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord, is(nullValue())); + } + @Test + public void queryRecordDefaultSettings_withDotInTheName() { + insertNestedRecords(); + Iterator> results = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-7"))) + .addNestedAttributeToProject( NestedAttributeName.create("test.com"))).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(1)); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(is(nullValue()))); + assertThat(firstRecord.getSort(), is(is(nullValue()))); + assertThat(firstRecord.getInnerAttributeRecord() , is(nullValue())); + assertThat(firstRecord.getDotVariable(), is("v7")); + Iterator> resultWithAttributeToProject = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-7"))) + .attributesToProject( "test.com").build()).iterator(); + assertThat(resultWithAttributeToProject.hasNext(), is(true)); + Page pageResult = resultWithAttributeToProject.next(); + assertThat(resultWithAttributeToProject.hasNext(), is(false)); + assertThat(pageResult.items().size(), is(1)); + NestedTestRecord record = pageResult.items().get(0); + assertThat(record.getOuterAttribOne(), is(is(nullValue()))); + assertThat(record.getSort(), is(is(nullValue()))); + assertThat(firstRecord.getInnerAttributeRecord() , is(nullValue())); + assertThat(record.getDotVariable(), is("v7")); + } + + @Test + public void queryRecordDefaultSettings_withEmptyAttributeList() { + insertNestedRecords(); + Iterator> results = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-7"))) + .attributesToProject(new ArrayList<>()).build()).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(1)); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is("id-value-7")); + assertThat(firstRecord.getSort(), is(7)); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(7)); + assertThat(firstRecord.getDotVariable(), is("v7")); + } + + @Test + public void queryRecordDefaultSettings_withNullAttributeList() { + insertNestedRecords(); + + List backwardCompatibilty = null; + + Iterator> results = + mappedNestedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value-7"))) + .attributesToProject(backwardCompatibilty).build()).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(1)); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is("id-value-7")); + assertThat(firstRecord.getSort(), is(7)); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(7)); + assertThat(firstRecord.getDotVariable(), is("v7")); + } + + @Test + public void queryAllRecordsDefaultSettings_withNestedProjectionNameEmptyNameMap() { + insertNestedRecords(); + + assertThatExceptionOfType(Exception.class).isThrownBy( + () -> { + Iterator> results = mappedNestedTable.query(b -> b.queryConditional( + keyEqualTo(k -> k.partitionValue("id-value-3"))) + .attributesToProject("").build()).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + }); + + assertThatExceptionOfType(Exception.class).isThrownBy( + () -> { + Iterator> results = mappedNestedTable.query(b -> b.queryConditional( + keyEqualTo(k -> k.partitionValue("id-value-3"))) + .addNestedAttributeToProject(NestedAttributeName.create("")).build()).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + + }); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicScanTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicScanTest.java new file mode 100644 index 000000000000..b11666510d3c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicScanTest.java @@ -0,0 +1,634 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.*; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; + +import java.util.*; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.*; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.InnerAttributeRecord; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.NestedTestRecord; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class BasicScanTest extends LocalDynamoDbSyncTestBase { + private static class Record { + private String id; + private Integer sort; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private Integer getSort() { + return sort; + } + + private Record setSort(Integer sort) { + this.sort = sort; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .build(); + + private static final List RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> new Record().setId("id-value").setSort(i)) + .collect(Collectors.toList()); + + private static final List NESTED_TEST_RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> { + final NestedTestRecord nestedTestRecord = new NestedTestRecord(); + nestedTestRecord.setOuterAttribOne("id-value-" + i); + nestedTestRecord.setSort(i); + final InnerAttributeRecord innerAttributeRecord = new InnerAttributeRecord(); + innerAttributeRecord.setAttribOne("attribOne-"+i); + innerAttributeRecord.setAttribTwo(i); + nestedTestRecord.setInnerAttributeRecord(innerAttributeRecord); + nestedTestRecord.setDotVariable("v"+i); + return nestedTestRecord; + }) + .collect(Collectors.toList()); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable = enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + + private DynamoDbTable mappedNestedTable = enhancedClient.table(getConcreteTableName("nested-table-name"), + TableSchema.fromClass(NestedTestRecord.class)); + + + private void insertRecords() { + RECORDS.forEach(record -> mappedTable.putItem(r -> r.item(record))); + } + + private void insertNestedRecords() { + NESTED_TEST_RECORDS.forEach(nestedTestRecord -> mappedNestedTable.putItem(r -> r.item(nestedTestRecord))); + } + + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + mappedNestedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("nested-table-name")) + .build()); + } + + @Test + public void scanAllRecordsDefaultSettings() { + insertRecords(); + + mappedTable.scan(ScanEnhancedRequest.builder().build()) + .forEach(p -> p.items().forEach(item -> System.out.println(item))); + Iterator> results = mappedTable.scan(ScanEnhancedRequest.builder().build()).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), is(RECORDS)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryAllRecordsDefaultSettings_withProjection() { + insertRecords(); + + Iterator> results = + mappedTable.scan(b -> b.attributesToProject("sort")).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items().size(), is(RECORDS.size())); + + Record firstRecord = page.items().get(0); + assertThat(firstRecord.id, is(nullValue())); + assertThat(firstRecord.sort, is(0)); + } + + @Test + public void scanAllRecordsDefaultSettings_viaItems() { + insertRecords(); + + SdkIterable items = mappedTable.scan(ScanEnhancedRequest.builder().limit(2).build()).items(); + assertThat(items.stream().collect(Collectors.toList()), is(RECORDS)); + } + + @Test + public void scanAllRecordsWithFilter() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("sort >= :min_value AND sort <= :max_value") + .expressionValues(expressionValues) + .build(); + + Iterator> results = + mappedTable.scan(ScanEnhancedRequest.builder().filterExpression(expression).build()).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), + is(RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanAllRecordsWithFilterAndProjection() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + Iterator> results = + mappedTable.scan( + ScanEnhancedRequest.builder() + .attributesToProject("sort") + .filterExpression(expression) + .build() + ).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), hasSize(3)); + + Record record = page.items().get(0); + + assertThat(record.id, is(nullValue())); + assertThat(record.sort, is(3)); + } + + @Test + public void scanLimit() { + insertRecords(); + Iterator> results = mappedTable.scan(r -> r.limit(5)).iterator(); + assertThat(results.hasNext(), is(true)); + Page page1 = results.next(); + assertThat(results.hasNext(), is(true)); + Page page2 = results.next(); + assertThat(results.hasNext(), is(true)); + Page page3 = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page1.items(), is(RECORDS.subList(0, 5))); + assertThat(page1.lastEvaluatedKey(), is(getKeyMap(4))); + assertThat(page2.items(), is(RECORDS.subList(5, 10))); + assertThat(page2.lastEvaluatedKey(), is(getKeyMap(9))); + assertThat(page3.items(), is(empty())); + assertThat(page3.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanLimit_viaItems() { + insertRecords(); + SdkIterable results = mappedTable.scan(r -> r.limit(5)).items(); + assertThat(results.stream().collect(Collectors.toList()), is(RECORDS)); + } + + @Test + public void scanEmpty() { + Iterator> results = mappedTable.scan().iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items(), is(empty())); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanEmpty_viaItems() { + Iterator results = mappedTable.scan().items().iterator(); + assertThat(results.hasNext(), is(false)); + } + + @Test + public void scanExclusiveStartKey() { + insertRecords(); + Iterator> results = + mappedTable.scan(r -> r.exclusiveStartKey(getKeyMap(7))).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items(), is(RECORDS.subList(8, 10))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanExclusiveStartKey_viaItems() { + insertRecords(); + SdkIterable results = + mappedTable.scan(r -> r.exclusiveStartKey(getKeyMap(7))).items(); + assertThat(results.stream().collect(Collectors.toList()), is(RECORDS.subList(8, 10))); + } + + private Map getKeyMap(int sort) { + Map result = new HashMap<>(); + result.put("id", stringValue("id-value")); + result.put("sort", numberValue(sort)); + return Collections.unmodifiableMap(result); + } + @Test + public void scanAllRecordsWithFilterAndNestedProjectionSingleAttribute() { + insertNestedRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + Iterator> results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addNestedAttributesToProject( + NestedAttributeName.create(Arrays.asList("innerAttributeRecord","attribOne"))) + .build() + ).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + Collections.sort(page.items(), (item1, item2) -> + item1.getInnerAttributeRecord().getAttribOne() + .compareTo(item2.getInnerAttributeRecord().getAttribOne())); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(nullValue())); + assertThat(firstRecord.getSort(), is(nullValue())); + assertThat(firstRecord.getInnerAttributeRecord().getAttribOne(), is("attribOne-3")); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(nullValue())); + + //Attribute repeated with new and old attributeToProject + results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addNestedAttributesToProject(NestedAttributeName.create("sort")) + .addAttributeToProject("sort") + .build() + ).iterator(); + assertThat(results.hasNext(), is(true)); + page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + Collections.sort(page.items(), (item1, item2) -> + item1.getSort() + .compareTo(item2.getSort())); + firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(nullValue())); + assertThat(firstRecord.getSort(), is(3)); + assertThat(firstRecord.getInnerAttributeRecord(), is(nullValue())); + assertThat(firstRecord.getInnerAttributeRecord(), is(nullValue())); + + results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addNestedAttributeToProject( + NestedAttributeName.create(Arrays.asList("innerAttributeRecord","attribOne"))) + .build() + ).iterator(); + assertThat(results.hasNext(), is(true)); + page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + Collections.sort(page.items(), (item1, item2) -> + item1.getInnerAttributeRecord().getAttribOne() + .compareTo(item2.getInnerAttributeRecord().getAttribOne())); + firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(nullValue())); + assertThat(firstRecord.getSort(), is(nullValue())); + assertThat(firstRecord.getInnerAttributeRecord().getAttribOne(), is("attribOne-3")); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(nullValue())); + } + + @Test + public void scanAllRecordsWithFilterAndNestedProjectionMultipleAttribute() { + insertNestedRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + final ScanEnhancedRequest build = ScanEnhancedRequest.builder() + .filterExpression(expression) + .addAttributeToProject("outerAttribOne") + .addNestedAttributesToProject(Arrays.asList(NestedAttributeName.builder().elements("innerAttributeRecord") + .addElement("attribOne").build())) + .addNestedAttributeToProject(NestedAttributeName.builder() + .elements(Arrays.asList("innerAttributeRecord", "attribTwo")).build()) + .build(); + Iterator> results = + mappedNestedTable.scan( + build + ).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + Collections.sort(page.items(), (item1, item2) -> + item1.getInnerAttributeRecord().getAttribOne() + .compareTo(item2.getInnerAttributeRecord().getAttribOne())); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is("id-value-3")); + assertThat(firstRecord.getSort(), is(nullValue())); + assertThat(firstRecord.getInnerAttributeRecord().getAttribOne(), is("attribOne-3")); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(3)); + + } + + @Test + public void scanAllRecordsWithNonExistigKeyName() { + insertNestedRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + + Iterator> results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addNestedAttributesToProject(NestedAttributeName.builder().addElement("nonExistent").build()) + .build() + ).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord, is(nullValue())); + } + + @Test + public void scanAllRecordsWithDotInAttributeKeyName() { + insertNestedRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + Iterator> results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addNestedAttributesToProject(NestedAttributeName + .create("test.com")).build() + ).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + Collections.sort(page.items(), (item1, item2) -> + item1.getDotVariable() + .compareTo(item2.getDotVariable())); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(nullValue())); + assertThat(firstRecord.getSort(), is(nullValue())); + assertThat(firstRecord.getDotVariable(), is("v3")); + assertThat(firstRecord.getInnerAttributeRecord(), is(nullValue())); + assertThat(firstRecord.getInnerAttributeRecord(), is(nullValue())); + } + + @Test + public void scanAllRecordsWithSameNamesRepeated() { + //Attribute repeated with new and old attributeToProject + insertNestedRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + Iterator >results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addNestedAttributesToProject(NestedAttributeName.builder().elements("sort").build()) + .addAttributeToProject("sort") + .build() + ).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + Collections.sort(page.items(), (item1, item2) -> + item1.getSort() + .compareTo(item2.getSort())); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is(nullValue())); + assertThat(firstRecord.getSort(), is(3)); + assertThat(firstRecord.getInnerAttributeRecord(), is(nullValue())); + assertThat(firstRecord.getInnerAttributeRecord(), is(nullValue())); + } + + @Test + public void scanAllRecordsWithEmptyList() { + //Attribute repeated with new and old attributeToProject + insertNestedRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + Iterator >results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addNestedAttributesToProject(new ArrayList<>()) + .build() + ).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + Collections.sort(page.items(), (item1, item2) -> + item1.getSort() + .compareTo(item2.getSort())); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is("id-value-3")); + assertThat(firstRecord.getSort(), is(3)); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(3)); + assertThat(firstRecord.getInnerAttributeRecord().getAttribOne(), is("attribOne-3")); + } + + @Test + public void scanAllRecordsWithNullAttributesToProject() { + //Attribute repeated with new and old attributeToProject + insertNestedRecords(); + List backwardCompatibilityNull = null; + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + Iterator >results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .attributesToProject("test.com") + .attributesToProject(backwardCompatibilityNull) + .build() + ).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items().size(), is(3)); + Collections.sort(page.items(), (item1, item2) -> + item1.getSort() + .compareTo(item2.getSort())); + NestedTestRecord firstRecord = page.items().get(0); + assertThat(firstRecord.getOuterAttribOne(), is("id-value-3")); + assertThat(firstRecord.getSort(), is(3)); + assertThat(firstRecord.getInnerAttributeRecord().getAttribTwo(), is(3)); + assertThat(firstRecord.getInnerAttributeRecord().getAttribOne(), is("attribOne-3")); + } + + @Test + public void scanAllRecordsWithNestedProjectionNameEmptyNameMap() { + insertNestedRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + final Iterator> results = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addNestedAttributesToProject(NestedAttributeName.builder().elements("").build()).build() + ).iterator(); + + assertThatExceptionOfType(Exception.class).isThrownBy(() -> { final boolean b = results.hasNext(); + Page next = results.next(); }).withMessageContaining("ExpressionAttributeNames contains invalid value"); + + final Iterator> resultsAttributeToProject = + mappedNestedTable.scan( + ScanEnhancedRequest.builder() + .filterExpression(expression) + .addAttributeToProject("").build() + ).iterator(); + + assertThatExceptionOfType(Exception.class).isThrownBy(() -> { + final boolean b = resultsAttributeToProject.hasNext(); + Page next = resultsAttributeToProject.next(); + }); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BatchGetItemTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BatchGetItemTest.java new file mode 100644 index 000000000000..721739157e32 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BatchGetItemTest.java @@ -0,0 +1,255 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPage; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.ReadBatch; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class BatchGetItemTest extends LocalDynamoDbSyncTestBase { + private static class Record1 { + private Integer id; + + private Integer getId() { + return id; + } + + private Record1 setId(Integer id) { + this.id = id; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record1 record1 = (Record1) o; + return Objects.equals(id, record1.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } + + private static class Record2 { + private Integer id; + + private Integer getId() { + return id; + } + + private Record2 setId(Integer id) { + this.id = id; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record2 record2 = (Record2) o; + return Objects.equals(id, record2.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } + + private static final TableSchema TABLE_SCHEMA_1 = + StaticTableSchema.builder(Record1.class) + .newItemSupplier(Record1::new) + .addAttribute(Integer.class, a -> a.name("id_1") + .getter(Record1::getId) + .setter(Record1::setId) + .tags(primaryPartitionKey())) + .build(); + + private static final TableSchema TABLE_SCHEMA_2 = + StaticTableSchema.builder(Record2.class) + .newItemSupplier(Record2::new) + .addAttribute(Integer.class, a -> a.name("id_2") + .getter(Record2::getId) + .setter(Record2::setId) + .tags(primaryPartitionKey())) + .build(); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable1 = enhancedClient.table(getConcreteTableName("table-name-1"), TABLE_SCHEMA_1); + private DynamoDbTable mappedTable2 = enhancedClient.table(getConcreteTableName("table-name-2"), TABLE_SCHEMA_2); + + private static final List RECORDS_1 = + IntStream.range(0, 2) + .mapToObj(i -> new Record1().setId(i)) + .collect(Collectors.toList()); + + private static final List RECORDS_2 = + IntStream.range(0, 2) + .mapToObj(i -> new Record2().setId(i)) + .collect(Collectors.toList()); + + @Before + public void createTable() { + mappedTable1.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + mappedTable2.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-1")) + .build()); + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-2")) + .build()); + } + + private void insertRecords() { + RECORDS_1.forEach(record -> mappedTable1.putItem(r -> r.item(record))); + RECORDS_2.forEach(record -> mappedTable2.putItem(r -> r.item(record))); + } + + @Test + public void getRecordsFromMultipleTables() { + insertRecords(); + SdkIterable results = getBatchGetResultPagesForBothTables(); + assertThat(results.stream().count(), is(1L)); + + results.iterator().forEachRemaining((page) -> { + List table1Results = page.resultsForTable(mappedTable1); + assertThat(table1Results.size(), is(2)); + assertThat(table1Results.get(0).id, is(0)); + assertThat(table1Results.get(1).id, is(1)); + assertThat(page.resultsForTable(mappedTable2).size(), is(2)); + }); + } + + @Test + public void getRecordsFromMultipleTables_viaFlattenedItems() { + insertRecords(); + + BatchGetResultPageIterable results = getBatchGetResultPagesForBothTables(); + + SdkIterable recordsList1 = results.resultsForTable(mappedTable1); + assertThat(recordsList1, containsInAnyOrder(RECORDS_1.toArray())); + + SdkIterable recordsList2 = results.resultsForTable(mappedTable2); + assertThat(recordsList2, containsInAnyOrder(RECORDS_2.toArray())); + } + + @Test + public void notFoundRecordIgnored() { + insertRecords(); + + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = batchGetItemEnhancedRequestWithNotFoundRecord(); + + SdkIterable results = enhancedClient.batchGetItem(batchGetItemEnhancedRequest); + + assertThat(results.stream().count(), is(1L)); + + results.iterator().forEachRemaining((page) -> { + List mappedTable1Results = page.resultsForTable(mappedTable1); + assertThat(mappedTable1Results.size(), is(1)); + assertThat(mappedTable1Results.get(0).id, is(0)); + assertThat(page.resultsForTable(mappedTable2).size(), is(2)); + }); + } + + @Test + public void notFoundRecordIgnored_viaFlattenedItems() { + insertRecords(); + + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = batchGetItemEnhancedRequestWithNotFoundRecord(); + + BatchGetResultPageIterable pageIterable = enhancedClient.batchGetItem(batchGetItemEnhancedRequest); + + assertThat(pageIterable.stream().count(), is(1L)); + + List recordsList1 = pageIterable.resultsForTable(mappedTable1).stream().collect(Collectors.toList()); + assertThat(recordsList1, is(RECORDS_1.subList(0, 1))); + + SdkIterable recordsList2 = pageIterable.resultsForTable(mappedTable2); + assertThat(recordsList2, containsInAnyOrder(RECORDS_2.toArray())); + } + + private BatchGetItemEnhancedRequest batchGetItemEnhancedRequestWithNotFoundRecord() { + return BatchGetItemEnhancedRequest.builder() + .readBatches( + ReadBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addGetItem(r -> r.key(k -> k.partitionValue(0))) + .build(), + ReadBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addGetItem(r -> r.key(k -> k.partitionValue(0))) + .build(), + ReadBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addGetItem(r -> r.key(k -> k.partitionValue(1))) + .build(), + ReadBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addGetItem(r -> r.key(k -> k.partitionValue(5))) + .build()) + .build(); + } + + private BatchGetResultPageIterable getBatchGetResultPagesForBothTables() { + return enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addGetItem(i -> i.key(k -> k.partitionValue(0))) + .build(), + ReadBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addGetItem(i -> i.key(k -> k.partitionValue(0))) + .build(), + ReadBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addGetItem(i -> i.key(k -> k.partitionValue(1))) + .build(), + ReadBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addGetItem(i -> i.key(k -> k.partitionValue(1))) + .build())); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BatchWriteItemTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BatchWriteItemTest.java new file mode 100644 index 000000000000..536c34fe8da1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BatchWriteItemTest.java @@ -0,0 +1,277 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.WriteBatch; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class BatchWriteItemTest extends LocalDynamoDbSyncTestBase { + private static class Record1 { + private Integer id; + private String attribute; + + private Integer getId() { + return id; + } + + private Record1 setId(Integer id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record1 setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record1 record1 = (Record1) o; + return Objects.equals(id, record1.id) && + Objects.equals(attribute, record1.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute); + } + } + + private static class Record2 { + private Integer id; + private String attribute; + + private Integer getId() { + return id; + } + + private Record2 setId(Integer id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record2 setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record2 record2 = (Record2) o; + return Objects.equals(id, record2.id) && + Objects.equals(attribute, record2.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute); + } + } + + private static final TableSchema TABLE_SCHEMA_1 = + StaticTableSchema.builder(Record1.class) + .newItemSupplier(Record1::new) + .addAttribute(Integer.class, a -> a.name("id_1") + .getter(Record1::getId) + .setter(Record1::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record1::getAttribute) + .setter(Record1::setAttribute)) + .build(); + + private static final TableSchema TABLE_SCHEMA_2 = + StaticTableSchema.builder(Record2.class) + .newItemSupplier(Record2::new) + .addAttribute(Integer.class, a -> a.name("id_2") + .getter(Record2::getId) + .setter(Record2::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record2::getAttribute) + .setter(Record2::setAttribute)) + .build(); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable1 = enhancedClient.table(getConcreteTableName("table-name-1"), TABLE_SCHEMA_1); + private DynamoDbTable mappedTable2 = enhancedClient.table(getConcreteTableName("table-name-2"), TABLE_SCHEMA_2); + + private static final List RECORDS_1 = + IntStream.range(0, 2) + .mapToObj(i -> new Record1().setId(i).setAttribute(Integer.toString(i))) + .collect(Collectors.toList()); + + private static final List RECORDS_2 = + IntStream.range(0, 2) + .mapToObj(i -> new Record2().setId(i).setAttribute(Integer.toString(i))) + .collect(Collectors.toList()); + + @Before + public void createTable() { + mappedTable1.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + mappedTable2.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-1")) + .build()); + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-2")) + .build()); + } + + @Test + public void singlePut() { + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .addWriteBatch( + WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addPutItem(r -> r.item(RECORDS_1.get(0))) + .build()) + .build(); + + enhancedClient.batchWriteItem(batchWriteItemEnhancedRequest); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record, is(RECORDS_1.get(0))); + } + + @Test + public void multiplePut() { + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .writeBatches( + WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addPutItem(r -> r.item(RECORDS_1.get(0))) + .build(), + WriteBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addPutItem(r -> r.item(RECORDS_2.get(0))) + .build()) + .build(); + + enhancedClient.batchWriteItem(batchWriteItemEnhancedRequest); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record1, is(RECORDS_1.get(0))); + assertThat(record2, is(RECORDS_2.get(0))); + } + + @Test + public void singleDelete() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + + WriteBatch singleDeleteBatch = WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addDeleteItem(r -> r.key(k -> k.partitionValue(0))) + .build(); + + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .addWriteBatch(singleDeleteBatch) + .build(); + + enhancedClient.batchWriteItem(batchWriteItemEnhancedRequest); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record, is(nullValue())); + } + + @Test + public void multipleDelete() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))); + + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .writeBatches( + WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addDeleteItem(r -> r.key(k -> k.partitionValue(0))) + .build(), + WriteBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addDeleteItem(r -> r.key(k -> k.partitionValue(0))) + .build()) + .build(); + + enhancedClient.batchWriteItem(batchWriteItemEnhancedRequest); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record1, is(nullValue())); + assertThat(record2, is(nullValue())); + } + + @Test + public void mixedCommands() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))); + + enhancedClient.batchWriteItem(r -> r.writeBatches( + WriteBatch.builder(Record1.class) + .mappedTableResource(mappedTable1) + .addPutItem(i -> i.item(RECORDS_1.get(1))) + .build(), + WriteBatch.builder(Record2.class) + .mappedTableResource(mappedTable2) + .addDeleteItem(i -> i.key(k -> k.partitionValue(0))) + .build())); + + assertThat(mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))), is(RECORDS_1.get(0))); + assertThat(mappedTable1.getItem(r -> r.key(k -> k.partitionValue(1))), is(RECORDS_1.get(1))); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))), is(nullValue())); + } + +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BeanTableSchemaRecursiveTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BeanTableSchemaRecursiveTest.java new file mode 100644 index 000000000000..e2919f9769c3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BeanTableSchemaRecursiveTest.java @@ -0,0 +1,92 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Collections; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.RecursiveRecordBean; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.RecursiveRecordImmutable; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class BeanTableSchemaRecursiveTest { + @Test + public void recursiveRecord_document() { + TableSchema tableSchema = TableSchema.fromClass(RecursiveRecordBean.class); + + RecursiveRecordImmutable recursiveRecordImmutable2 = RecursiveRecordImmutable.builder() + .setAttribute(4) + .build(); + + RecursiveRecordImmutable recursiveRecordImmutable1 = + RecursiveRecordImmutable.builder() + .setAttribute(3) + .setRecursiveRecordImmutable(recursiveRecordImmutable2) + .build(); + + RecursiveRecordBean recursiveRecordBean2 = new RecursiveRecordBean(); + recursiveRecordBean2.setAttribute(2); + recursiveRecordBean2.setRecursiveRecordImmutable(recursiveRecordImmutable1); + + RecursiveRecordBean recursiveRecordBean1 = new RecursiveRecordBean(); + recursiveRecordBean1.setAttribute(1); + recursiveRecordBean1.setRecursiveRecordBean(recursiveRecordBean2); + + Map itemMap = tableSchema.itemToMap(recursiveRecordBean1, true); + + assertThat(itemMap).hasSize(2); + assertThat(itemMap).containsEntry("attribute", AttributeValue.builder().n("1").build()); + assertThat(itemMap).hasEntrySatisfying("recursiveRecordBean", av -> { + assertThat(av.hasM()).isTrue(); + assertThat(av.m()).containsEntry("attribute", AttributeValue.builder().n("2").build()); + assertThat(av.m()).hasEntrySatisfying("recursiveRecordImmutable", iav -> { + assertThat(iav.hasM()).isTrue(); + assertThat(iav.m()).containsEntry("attribute", AttributeValue.builder().n("3").build()); + assertThat(iav.m()).hasEntrySatisfying("recursiveRecordImmutable", iav2 -> { + assertThat(iav2.hasM()).isTrue(); + assertThat(iav2.m()).containsEntry("attribute", AttributeValue.builder().n("4").build()); + }); + }); + }); + } + + @Test + public void recursiveRecord_list() { + TableSchema tableSchema = TableSchema.fromClass(RecursiveRecordBean.class); + + RecursiveRecordBean recursiveRecordBean2 = new RecursiveRecordBean(); + recursiveRecordBean2.setAttribute(2); + + RecursiveRecordBean recursiveRecordBean1 = new RecursiveRecordBean(); + recursiveRecordBean1.setAttribute(1); + recursiveRecordBean1.setRecursiveRecordList(Collections.singletonList(recursiveRecordBean2)); + + Map itemMap = tableSchema.itemToMap(recursiveRecordBean1, true); + + assertThat(itemMap).hasSize(2); + assertThat(itemMap).containsEntry("attribute", AttributeValue.builder().n("1").build()); + assertThat(itemMap).hasEntrySatisfying("recursiveRecordList", av -> { + assertThat(av.hasL()).isTrue(); + assertThat(av.l()).hasOnlyOneElementSatisfying(listAv -> { + assertThat(listAv.hasM()).isTrue(); + assertThat(listAv.m()).containsEntry("attribute", AttributeValue.builder().n("2").build()); + }); + }); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BufferingSubscriber.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BufferingSubscriber.java new file mode 100644 index 000000000000..006005f438a8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BufferingSubscriber.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +public class BufferingSubscriber implements Subscriber { + private final CountDownLatch latch = new CountDownLatch(1); + private final List bufferedItems = new ArrayList<>(); + private Throwable bufferedError = null; + private boolean isCompleted = false; + + @Override + public void onSubscribe(Subscription subscription) { + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(T t) { + bufferedItems.add(t); + } + + @Override + public void onError(Throwable throwable) { + this.bufferedError = throwable; + this.latch.countDown(); + } + + @Override + public void onComplete() { + this.isCompleted = true; + this.latch.countDown(); + } + + public void waitForCompletion(long timeoutInMillis) { + try { + this.latch.await(timeoutInMillis, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + public List bufferedItems() { + return bufferedItems; + } + + public Throwable bufferedError() { + return bufferedError; + } + + public boolean isCompleted() { + return isCompleted; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyBinaryTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyBinaryTest.java new file mode 100644 index 000000000000..cec83beeba2d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyBinaryTest.java @@ -0,0 +1,176 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static java.util.Collections.singletonMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; +import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; +import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; +import software.amazon.awssdk.services.dynamodb.model.ReturnValue; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; + +@RunWith(MockitoJUnitRunner.class) +public class EmptyBinaryTest { + private static final String TABLE_NAME = "TEST_TABLE"; + private static final SdkBytes EMPTY_BYTES = SdkBytes.fromUtf8String(""); + private static final AttributeValue EMPTY_BINARY = AttributeValue.builder().b(EMPTY_BYTES).build(); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbTable dynamoDbTable; + + @DynamoDbBean + public static class TestBean { + private String id; + private SdkBytes b; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public SdkBytes getB() { + return b; + } + + public void setB(SdkBytes b) { + this.b = b; + } + } + + private static final TableSchema TABLE_SCHEMA = TableSchema.fromClass(TestBean.class); + + @Before + public void initializeTable() { + DynamoDbEnhancedClient dynamoDbEnhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .build(); + + this.dynamoDbTable = dynamoDbEnhancedClient.table(TABLE_NAME, TABLE_SCHEMA); + } + + @Test + public void putEmptyBytes() { + TestBean testBean = new TestBean(); + testBean.setId("id123"); + testBean.setB(EMPTY_BYTES); + + dynamoDbTable.putItem(testBean); + + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s("id123").build()); + expectedItemMap.put("b", EMPTY_BINARY); + + PutItemRequest expectedRequest = PutItemRequest.builder() + .tableName(TABLE_NAME) + .item(expectedItemMap) + .build(); + + verify(mockDynamoDbClient).putItem(expectedRequest); + } + + @Test + public void getEmptyBytes() { + Map itemMap = new HashMap<>(); + itemMap.put("id", AttributeValue.builder().s("id123").build()); + itemMap.put("b", EMPTY_BINARY); + + GetItemResponse response = GetItemResponse.builder() + .item(itemMap) + .build(); + + when(mockDynamoDbClient.getItem(any(GetItemRequest.class))).thenReturn(response); + + TestBean result = dynamoDbTable.getItem(r -> r.key(k -> k.partitionValue("id123"))); + + assertThat(result.getId()).isEqualTo("id123"); + assertThat(result.getB()).isEqualTo(EMPTY_BYTES); + } + + @Test + public void updateEmptyBytesWithCondition() { + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s("id123").build()); + expectedItemMap.put("b", EMPTY_BINARY); + TestBean testBean = new TestBean(); + testBean.setId("id123"); + testBean.setB(EMPTY_BYTES); + + UpdateItemResponse response = UpdateItemResponse.builder() + .attributes(expectedItemMap) + .build(); + when(mockDynamoDbClient.updateItem(any(UpdateItemRequest.class))).thenReturn(response); + + Expression conditionExpression = Expression.builder() + .expression("#attr = :val") + .expressionNames(singletonMap("#attr", "b")) + .expressionValues(singletonMap(":val", EMPTY_BINARY)) + .build(); + + TestBean result = dynamoDbTable.updateItem(r -> r.item(testBean).conditionExpression(conditionExpression)); + + Map expectedExpressionAttributeNames = new HashMap<>(); + expectedExpressionAttributeNames.put("#AMZN_MAPPED_b", "b"); + expectedExpressionAttributeNames.put("#attr", "b"); + Map expectedExpressionAttributeValues = new HashMap<>(); + expectedExpressionAttributeValues.put(":AMZN_MAPPED_b", EMPTY_BINARY); + expectedExpressionAttributeValues.put(":val", EMPTY_BINARY); + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s("id123").build()); + + UpdateItemRequest expectedRequest = + UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .returnValues(ReturnValue.ALL_NEW) + .updateExpression("SET #AMZN_MAPPED_b = :AMZN_MAPPED_b") + .conditionExpression("#attr = :val") + .expressionAttributeNames(expectedExpressionAttributeNames) + .expressionAttributeValues(expectedExpressionAttributeValues) + .build(); + + verify(mockDynamoDbClient).updateItem(expectedRequest); + assertThat(result.getId()).isEqualTo("id123"); + assertThat(result.getB()).isEqualTo(EMPTY_BYTES); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyStringTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyStringTest.java new file mode 100644 index 000000000000..a4dfba568e40 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyStringTest.java @@ -0,0 +1,174 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static java.util.Collections.singletonMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; +import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; +import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; +import software.amazon.awssdk.services.dynamodb.model.ReturnValue; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; + +@RunWith(MockitoJUnitRunner.class) +public class EmptyStringTest { + private static final String TABLE_NAME = "TEST_TABLE"; + private static final AttributeValue EMPTY_STRING = AttributeValue.builder().s("").build(); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbTable dynamoDbTable; + + @DynamoDbBean + public static class TestBean { + private String id; + private String s; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getS() { + return s; + } + + public void setS(String s) { + this.s = s; + } + } + + private static final TableSchema TABLE_SCHEMA = TableSchema.fromClass(TestBean.class); + + @Before + public void initializeTable() { + DynamoDbEnhancedClient dynamoDbEnhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .build(); + + this.dynamoDbTable = dynamoDbEnhancedClient.table(TABLE_NAME, TABLE_SCHEMA); + } + + @Test + public void putEmptyString() { + TestBean testBean = new TestBean(); + testBean.setId("id123"); + testBean.setS(""); + + dynamoDbTable.putItem(testBean); + + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s("id123").build()); + expectedItemMap.put("s", EMPTY_STRING); + + PutItemRequest expectedRequest = PutItemRequest.builder() + .tableName(TABLE_NAME) + .item(expectedItemMap) + .build(); + + verify(mockDynamoDbClient).putItem(expectedRequest); + } + + @Test + public void getEmptyString() { + Map itemMap = new HashMap<>(); + itemMap.put("id", AttributeValue.builder().s("id123").build()); + itemMap.put("s", EMPTY_STRING); + + GetItemResponse response = GetItemResponse.builder() + .item(itemMap) + .build(); + + when(mockDynamoDbClient.getItem(any(GetItemRequest.class))).thenReturn(response); + + TestBean result = dynamoDbTable.getItem(r -> r.key(k -> k.partitionValue("id123"))); + + assertThat(result.getId()).isEqualTo("id123"); + assertThat(result.getS()).isEmpty(); + } + + @Test + public void updateEmptyStringWithCondition() { + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s("id123").build()); + expectedItemMap.put("s", EMPTY_STRING); + TestBean testBean = new TestBean(); + testBean.setId("id123"); + testBean.setS(""); + + UpdateItemResponse response = UpdateItemResponse.builder() + .attributes(expectedItemMap) + .build(); + when(mockDynamoDbClient.updateItem(any(UpdateItemRequest.class))).thenReturn(response); + + Expression conditionExpression = Expression.builder() + .expression("#attr = :val") + .expressionNames(singletonMap("#attr", "s")) + .expressionValues(singletonMap(":val", EMPTY_STRING)) + .build(); + + TestBean result = dynamoDbTable.updateItem(r -> r.item(testBean).conditionExpression(conditionExpression)); + + Map expectedExpressionAttributeNames = new HashMap<>(); + expectedExpressionAttributeNames.put("#AMZN_MAPPED_s", "s"); + expectedExpressionAttributeNames.put("#attr", "s"); + Map expectedExpressionAttributeValues = new HashMap<>(); + expectedExpressionAttributeValues.put(":AMZN_MAPPED_s", EMPTY_STRING); + expectedExpressionAttributeValues.put(":val", EMPTY_STRING); + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s("id123").build()); + + UpdateItemRequest expectedRequest = + UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .returnValues(ReturnValue.ALL_NEW) + .updateExpression("SET #AMZN_MAPPED_s = :AMZN_MAPPED_s") + .conditionExpression("#attr = :val") + .expressionAttributeNames(expectedExpressionAttributeNames) + .expressionAttributeValues(expectedExpressionAttributeValues) + .build(); + + verify(mockDynamoDbClient).updateItem(expectedRequest); + assertThat(result.getId()).isEqualTo("id123"); + assertThat(result.getS()).isEmpty(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionAsyncTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionAsyncTest.java new file mode 100644 index 000000000000..9cb864bc38aa --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionAsyncTest.java @@ -0,0 +1,103 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CompletionException; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnum; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumRecord; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumShortened; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumShortenedRecord; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class FailedConversionAsyncTest extends LocalDynamoDbAsyncTestBase { + private static final TableSchema TABLE_SCHEMA = TableSchema.fromClass(FakeEnumRecord.class); + private static final TableSchema SHORT_TABLE_SCHEMA = + TableSchema.fromClass(FakeEnumShortenedRecord.class); + + private final DynamoDbEnhancedAsyncClient enhancedClient = + DynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private final DynamoDbAsyncTable mappedTable = + enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private final DynamoDbAsyncTable mappedShortTable = + enhancedClient.table(getConcreteTableName("table-name"), SHORT_TABLE_SCHEMA); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()).join(); + } + + @Test + public void exceptionOnRead() { + FakeEnumRecord record = new FakeEnumRecord(); + record.setId("123"); + record.setEnumAttribute(FakeEnum.TWO); + mappedTable.putItem(record).join(); + + assertThatThrownBy(() -> mappedShortTable.getItem(Key.builder().partitionValue("123").build()).join()) + .isInstanceOf(CompletionException.class) + .hasCauseInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("TWO") + .hasMessageContaining("FakeEnumShortened"); + } + + @Test + public void iterableExceptionOnRead() { + FakeEnumRecord record = new FakeEnumRecord(); + record.setId("1"); + record.setEnumAttribute(FakeEnum.ONE); + mappedTable.putItem(record).join(); + record.setId("2"); + record.setEnumAttribute(FakeEnum.TWO); + mappedTable.putItem(record).join(); + + List> results = + drainPublisherToError(mappedShortTable.scan(r -> r.limit(1)), 1, IllegalArgumentException.class); + + assertThat(results).hasOnlyOneElementSatisfying( + page -> assertThat(page.items()).hasOnlyOneElementSatisfying(item -> { + assertThat(item.getId()).isEqualTo("1"); + assertThat(item.getEnumAttribute()).isEqualTo(FakeEnumShortened.ONE); + })); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionSyncTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionSyncTest.java new file mode 100644 index 000000000000..db93471f8cba --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionSyncTest.java @@ -0,0 +1,99 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import java.util.Iterator; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnum; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumRecord; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumShortenedRecord; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class FailedConversionSyncTest extends LocalDynamoDbSyncTestBase { + private static final TableSchema TABLE_SCHEMA = TableSchema.fromClass(FakeEnumRecord.class); + private static final TableSchema SHORT_TABLE_SCHEMA = + TableSchema.fromClass(FakeEnumShortenedRecord.class); + + private final DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private final DynamoDbTable mappedTable = + enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private final DynamoDbTable mappedShortTable = + enhancedClient.table(getConcreteTableName("table-name"), SHORT_TABLE_SCHEMA); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + } + + @Test + public void exceptionOnRead() { + FakeEnumRecord record = new FakeEnumRecord(); + record.setId("123"); + record.setEnumAttribute(FakeEnum.TWO); + mappedTable.putItem(record); + + assertThatThrownBy(() -> mappedShortTable.getItem(Key.builder().partitionValue("123").build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("TWO") + .hasMessageContaining("FakeEnumShortened"); + } + + @Test + public void iterableExceptionOnRead() { + FakeEnumRecord record = new FakeEnumRecord(); + record.setId("1"); + record.setEnumAttribute(FakeEnum.ONE); + mappedTable.putItem(record); + record.setId("2"); + record.setEnumAttribute(FakeEnum.TWO); + mappedTable.putItem(record); + + Iterator> results = mappedShortTable.scan(r -> r.limit(1)).iterator(); + + assertThatThrownBy(() -> { + // We can't guarantee the order they will be returned + results.next(); + results.next(); + }).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("TWO") + .hasMessageContaining("FakeEnumShortened"); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FlattenTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FlattenTest.java new file mode 100644 index 000000000000..69e410ac08df --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FlattenTest.java @@ -0,0 +1,206 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.Objects; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class FlattenTest extends LocalDynamoDbSyncTestBase { + private static class Record { + private String id; + private Document document; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private Document getDocument() { + return document; + } + + private Record setDocument(Document document) { + this.document = document; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(document, record.document); + } + + @Override + public int hashCode() { + return Objects.hash(id, document); + } + } + + private static class Document { + private String documentAttribute1; + private String documentAttribute2; + private String documentAttribute3; + + private String getDocumentAttribute1() { + return documentAttribute1; + } + + private Document setDocumentAttribute1(String documentAttribute1) { + this.documentAttribute1 = documentAttribute1; + return this; + } + + private String getDocumentAttribute2() { + return documentAttribute2; + } + + private Document setDocumentAttribute2(String documentAttribute2) { + this.documentAttribute2 = documentAttribute2; + return this; + } + + private String getDocumentAttribute3() { + return documentAttribute3; + } + + private Document setDocumentAttribute3(String documentAttribute3) { + this.documentAttribute3 = documentAttribute3; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Document document = (Document) o; + return Objects.equals(documentAttribute1, document.documentAttribute1) && + Objects.equals(documentAttribute2, document.documentAttribute2) && + Objects.equals(documentAttribute3, document.documentAttribute3); + } + + @Override + public int hashCode() { + return Objects.hash(documentAttribute1, documentAttribute2, documentAttribute3); + } + } + + private static final StaticTableSchema DOCUMENT_SCHEMA = + StaticTableSchema.builder(Document.class) + .newItemSupplier(Document::new) + .addAttribute(String.class, a -> a.name("documentAttribute1") + .getter(Document::getDocumentAttribute1) + .setter(Document::setDocumentAttribute1)) + .addAttribute(String.class, a -> a.name("documentAttribute2") + .getter(Document::getDocumentAttribute2) + .setter(Document::setDocumentAttribute2)) + .addAttribute(String.class, a -> a.name("documentAttribute3") + .getter(Document::getDocumentAttribute3) + .setter(Document::setDocumentAttribute3)) + .build(); + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .flatten(DOCUMENT_SCHEMA, Record::getDocument, Record::setDocument) + .build(); + + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable = enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + } + + @Test + public void update_allValues() { + Document document = new Document() + .setDocumentAttribute1("one") + .setDocumentAttribute2("two") + .setDocumentAttribute3("three"); + Record record = new Record() + .setId("id-value") + .setDocument(document); + + Record updatedRecord = mappedTable.updateItem(r -> r.item(record)); + Record fetchedRecord = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value"))); + + assertThat(updatedRecord, is(record)); + assertThat(fetchedRecord, is(record)); + } + + @Test + public void update_someValues() { + Document document = new Document() + .setDocumentAttribute1("one") + .setDocumentAttribute2("two"); + Record record = new Record() + .setId("id-value") + .setDocument(document); + + Record updatedRecord = mappedTable.updateItem(r -> r.item(record)); + Record fetchedRecord = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value"))); + + assertThat(updatedRecord, is(record)); + assertThat(fetchedRecord, is(record)); + } + + @Test + public void update_nullDocument() { + Record record = new Record() + .setId("id-value"); + + Record updatedRecord = mappedTable.updateItem(r -> r.item(record)); + Record fetchedRecord = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value"))); + + assertThat(updatedRecord, is(record)); + assertThat(fetchedRecord, is(record)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FlattenWithTagsTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FlattenWithTagsTest.java new file mode 100644 index 000000000000..5cda96444073 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FlattenWithTagsTest.java @@ -0,0 +1,196 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; + +import java.util.Objects; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class FlattenWithTagsTest extends LocalDynamoDbSyncTestBase { + private static class Record { + private String id; + private Document document; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private Document getDocument() { + return document; + } + + private Record setDocument(Document document) { + this.document = document; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(document, record.document); + } + + @Override + public int hashCode() { + return Objects.hash(id, document); + } + } + + private static class Document { + private String documentAttribute1; + private String documentAttribute2; + private String documentAttribute3; + + private String getDocumentAttribute1() { + return documentAttribute1; + } + + private Document setDocumentAttribute1(String documentAttribute1) { + this.documentAttribute1 = documentAttribute1; + return this; + } + + private String getDocumentAttribute2() { + return documentAttribute2; + } + + private Document setDocumentAttribute2(String documentAttribute2) { + this.documentAttribute2 = documentAttribute2; + return this; + } + + private String getDocumentAttribute3() { + return documentAttribute3; + } + + private Document setDocumentAttribute3(String documentAttribute3) { + this.documentAttribute3 = documentAttribute3; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Document document = (Document) o; + return Objects.equals(documentAttribute1, document.documentAttribute1) && + Objects.equals(documentAttribute2, document.documentAttribute2) && + Objects.equals(documentAttribute3, document.documentAttribute3); + } + + @Override + public int hashCode() { + return Objects.hash(documentAttribute1, documentAttribute2, documentAttribute3); + } + } + + private static final StaticTableSchema DOCUMENT_SCHEMA = + StaticTableSchema.builder(Document.class) + .newItemSupplier(Document::new) + .addAttribute(String.class, a -> a.name("documentAttribute1") + .getter(Document::getDocumentAttribute1) + .setter(Document::setDocumentAttribute1) + .addTag(primarySortKey())) + .addAttribute(String.class, a -> a.name("documentAttribute2") + .getter(Document::getDocumentAttribute2) + .setter(Document::setDocumentAttribute2)) + .addAttribute(String.class, a -> a.name("documentAttribute3") + .getter(Document::getDocumentAttribute3) + .setter(Document::setDocumentAttribute3)) + .build(); + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .flatten(DOCUMENT_SCHEMA, Record::getDocument, Record::setDocument) + .build(); + + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable = enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + } + + @Test + public void update_allValues() { + Document document = new Document() + .setDocumentAttribute1("one") + .setDocumentAttribute2("two") + .setDocumentAttribute3("three"); + Record record = new Record() + .setId("id-value") + .setDocument(document); + + Record updatedRecord = mappedTable.updateItem(r -> r.item(record)); + Record fetchedRecord = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("one"))); + + assertThat(updatedRecord, is(record)); + assertThat(fetchedRecord, is(record)); + } + + @Test + public void update_someValues() { + Document document = new Document() + .setDocumentAttribute1("one") + .setDocumentAttribute2("two"); + Record record = new Record() + .setId("id-value") + .setDocument(document); + + Record updatedRecord = mappedTable.updateItem(r -> r.item(record)); + Record fetchedRecord = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id-value").sortValue("one"))); + + assertThat(updatedRecord, is(record)); + assertThat(fetchedRecord, is(record)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/ImmutableTableSchemaRecursiveTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/ImmutableTableSchemaRecursiveTest.java new file mode 100644 index 000000000000..308f3f405e0f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/ImmutableTableSchemaRecursiveTest.java @@ -0,0 +1,98 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Collections; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.RecursiveRecordBean; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.RecursiveRecordImmutable; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class ImmutableTableSchemaRecursiveTest { + + @Test + public void recursiveRecord_document() { + TableSchema tableSchema = TableSchema.fromClass(RecursiveRecordImmutable.class); + + RecursiveRecordBean recursiveRecordBean2 = new RecursiveRecordBean(); + recursiveRecordBean2.setAttribute(4); + + RecursiveRecordBean recursiveRecordBean1 = new RecursiveRecordBean(); + recursiveRecordBean1.setAttribute(3); + recursiveRecordBean1.setRecursiveRecordBean(recursiveRecordBean2); + + RecursiveRecordImmutable recursiveRecordImmutable2 = + RecursiveRecordImmutable.builder() + .setAttribute(2) + .setRecursiveRecordBean(recursiveRecordBean1) + .build(); + + RecursiveRecordImmutable recursiveRecordImmutable1 = + RecursiveRecordImmutable.builder() + .setAttribute(1) + .setRecursiveRecordImmutable(recursiveRecordImmutable2) + .build(); + + Map itemMap = tableSchema.itemToMap(recursiveRecordImmutable1, true); + + assertThat(itemMap).hasSize(2); + assertThat(itemMap).containsEntry("attribute", AttributeValue.builder().n("1").build()); + assertThat(itemMap).hasEntrySatisfying("recursiveRecordImmutable", av -> { + assertThat(av.hasM()).isTrue(); + assertThat(av.m()).containsEntry("attribute", AttributeValue.builder().n("2").build()); + assertThat(av.m()).hasEntrySatisfying("recursiveRecordBean", bav -> { + assertThat(bav.hasM()).isTrue(); + assertThat(bav.m()).containsEntry("attribute", AttributeValue.builder().n("3").build()); + assertThat(bav.m()).hasEntrySatisfying("recursiveRecordBean", bav2 -> { + assertThat(bav2.hasM()).isTrue(); + assertThat(bav2.m()).containsEntry("attribute", AttributeValue.builder().n("4").build()); + }); + }); + }); + } + + @Test + public void recursiveRecord_list() { + TableSchema tableSchema = + TableSchema.fromClass(RecursiveRecordImmutable.class); + + RecursiveRecordImmutable recursiveRecordImmutable2 = RecursiveRecordImmutable.builder() + .setAttribute(2) + .build(); + + RecursiveRecordImmutable recursiveRecordImmutable1 = + RecursiveRecordImmutable.builder() + .setAttribute(1) + .setRecursiveRecordList(Collections.singletonList(recursiveRecordImmutable2)) + .build(); + + Map itemMap = tableSchema.itemToMap(recursiveRecordImmutable1, true); + + assertThat(itemMap).hasSize(2); + assertThat(itemMap).containsEntry("attribute", AttributeValue.builder().n("1").build()); + assertThat(itemMap).hasEntrySatisfying("recursiveRecordList", av -> { + assertThat(av.hasL()).isTrue(); + assertThat(av.l()).hasOnlyOneElementSatisfying(listAv -> { + assertThat(listAv.hasM()).isTrue(); + assertThat(listAv.m()).containsEntry("attribute", AttributeValue.builder().n("2").build()); + }); + }); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/IndexQueryTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/IndexQueryTest.java new file mode 100644 index 000000000000..e82e340e90ac --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/IndexQueryTest.java @@ -0,0 +1,301 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional.keyEqualTo; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; + +public class IndexQueryTest extends LocalDynamoDbSyncTestBase { + private static class Record { + private String id; + private Integer sort; + private Integer value; + private String gsiId; + private Integer gsiSort; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private Integer getSort() { + return sort; + } + + private Record setSort(Integer sort) { + this.sort = sort; + return this; + } + + private Integer getValue() { + return value; + } + + private Record setValue(Integer value) { + this.value = value; + return this; + } + + private String getGsiId() { + return gsiId; + } + + private Record setGsiId(String gsiId) { + this.gsiId = gsiId; + return this; + } + + private Integer getGsiSort() { + return gsiSort; + } + + private Record setGsiSort(Integer gsiSort) { + this.gsiSort = gsiSort; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort) && + Objects.equals(value, record.value) && + Objects.equals(gsiId, record.gsiId) && + Objects.equals(gsiSort, record.gsiSort); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, value, gsiId, gsiSort); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .addAttribute(Integer.class, a -> a.name("value") + .getter(Record::getValue) + .setter(Record::setValue)) + .addAttribute(String.class, a -> a.name("gsi_id") + .getter(Record::getGsiId) + .setter(Record::setGsiId) + .tags(secondaryPartitionKey("gsi_keys_only"))) + .addAttribute(Integer.class, a -> a.name("gsi_sort") + .getter(Record::getGsiSort) + .setter(Record::setGsiSort) + .tags(secondarySortKey("gsi_keys_only"))) + .build(); + + private static final List RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> new Record() + .setId("id-value") + .setSort(i) + .setValue(i) + .setGsiId("gsi-id-value") + .setGsiSort(i)) + .collect(Collectors.toList()); + + private static final List KEYS_ONLY_RECORDS = + RECORDS.stream() + .map(record -> new Record() + .setId(record.id) + .setSort(record.sort) + .setGsiId(record.gsiId) + .setGsiSort(record.gsiSort)) + .collect(Collectors.toList()); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable = enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private DynamoDbIndex keysOnlyMappedIndex = mappedTable.index("gsi_keys_only"); + + private void insertRecords() { + RECORDS.forEach(record -> mappedTable.putItem(r -> r.item(record))); + } + + @Before + public void createTable() { + mappedTable.createTable( + CreateTableEnhancedRequest.builder() + .provisionedThroughput(getDefaultProvisionedThroughput()) + .globalSecondaryIndices( + EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_keys_only") + .projection(p -> p.projectionType(ProjectionType.KEYS_ONLY)) + .provisionedThroughput(getDefaultProvisionedThroughput()) + .build()) + .build()); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + } + + @Test + public void queryAllRecordsDefaultSettings_usingShortcutForm() { + insertRecords(); + + Iterator> results = + keysOnlyMappedIndex.query(keyEqualTo(k -> k.partitionValue("gsi-id-value"))).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), is(KEYS_ONLY_RECORDS)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryBetween() { + insertRecords(); + Key fromKey = Key.builder().partitionValue("gsi-id-value").sortValue(3).build(); + Key toKey = Key.builder().partitionValue("gsi-id-value").sortValue(5).build(); + Iterator> results = + keysOnlyMappedIndex.query(r -> r.queryConditional(QueryConditional.sortBetween(fromKey, toKey))).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), + is(KEYS_ONLY_RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryLimit() { + insertRecords(); + Iterator> results = + keysOnlyMappedIndex.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("gsi-id-value"))) + .limit(5) + .build()) + .iterator(); + + assertThat(results.hasNext(), is(true)); + Page page1 = results.next(); + assertThat(results.hasNext(), is(true)); + Page page2 = results.next(); + assertThat(results.hasNext(), is(true)); + Page page3 = results.next(); + assertThat(results.hasNext(), is(false)); + + Map expectedLastEvaluatedKey1 = new HashMap<>(); + expectedLastEvaluatedKey1.put("id", stringValue(KEYS_ONLY_RECORDS.get(4).getId())); + expectedLastEvaluatedKey1.put("sort", numberValue(KEYS_ONLY_RECORDS.get(4).getSort())); + expectedLastEvaluatedKey1.put("gsi_id", stringValue(KEYS_ONLY_RECORDS.get(4).getGsiId())); + expectedLastEvaluatedKey1.put("gsi_sort", numberValue(KEYS_ONLY_RECORDS.get(4).getGsiSort())); + Map expectedLastEvaluatedKey2 = new HashMap<>(); + expectedLastEvaluatedKey2.put("id", stringValue(KEYS_ONLY_RECORDS.get(9).getId())); + expectedLastEvaluatedKey2.put("sort", numberValue(KEYS_ONLY_RECORDS.get(9).getSort())); + expectedLastEvaluatedKey2.put("gsi_id", stringValue(KEYS_ONLY_RECORDS.get(9).getGsiId())); + expectedLastEvaluatedKey2.put("gsi_sort", numberValue(KEYS_ONLY_RECORDS.get(9).getGsiSort())); + + assertThat(page1.items(), is(KEYS_ONLY_RECORDS.subList(0, 5))); + assertThat(page1.lastEvaluatedKey(), is(expectedLastEvaluatedKey1)); + assertThat(page2.items(), is(KEYS_ONLY_RECORDS.subList(5, 10))); + assertThat(page2.lastEvaluatedKey(), is(expectedLastEvaluatedKey2)); + assertThat(page3.items(), is(empty())); + assertThat(page3.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryEmpty() { + Iterator> results = + keysOnlyMappedIndex.query(r -> r.queryConditional(keyEqualTo(k -> k.partitionValue("gsi-id-value")))).iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items(), is(empty())); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void queryExclusiveStartKey() { + insertRecords(); + Map expectedLastEvaluatedKey = new HashMap<>(); + expectedLastEvaluatedKey.put("id", stringValue(KEYS_ONLY_RECORDS.get(7).getId())); + expectedLastEvaluatedKey.put("sort", numberValue(KEYS_ONLY_RECORDS.get(7).getSort())); + expectedLastEvaluatedKey.put("gsi_id", stringValue(KEYS_ONLY_RECORDS.get(7).getGsiId())); + expectedLastEvaluatedKey.put("gsi_sort", numberValue(KEYS_ONLY_RECORDS.get(7).getGsiSort())); + Iterator> results = + keysOnlyMappedIndex.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("gsi-id-value"))) + .exclusiveStartKey(expectedLastEvaluatedKey).build()) + .iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items(), is(KEYS_ONLY_RECORDS.subList(8, 10))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/IndexScanTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/IndexScanTest.java new file mode 100644 index 000000000000..9cc674ed489a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/IndexScanTest.java @@ -0,0 +1,285 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondarySortKey; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbIndex; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; + +public class IndexScanTest extends LocalDynamoDbSyncTestBase { + private static class Record { + private String id; + private Integer sort; + private Integer value; + private String gsiId; + private Integer gsiSort; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private Integer getSort() { + return sort; + } + + private Record setSort(Integer sort) { + this.sort = sort; + return this; + } + + private Integer getValue() { + return value; + } + + private Record setValue(Integer value) { + this.value = value; + return this; + } + + private String getGsiId() { + return gsiId; + } + + private Record setGsiId(String gsiId) { + this.gsiId = gsiId; + return this; + } + + private Integer getGsiSort() { + return gsiSort; + } + + private Record setGsiSort(Integer gsiSort) { + this.gsiSort = gsiSort; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(sort, record.sort) && + Objects.equals(value, record.value) && + Objects.equals(gsiId, record.gsiId) && + Objects.equals(gsiSort, record.gsiSort); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, value, gsiId, gsiSort); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(Record::getSort) + .setter(Record::setSort) + .tags(primarySortKey())) + .addAttribute(Integer.class, a -> a.name("value") + .getter(Record::getValue) + .setter(Record::setValue)) + .addAttribute(String.class, a -> a.name("gsi_id") + .getter(Record::getGsiId) + .setter(Record::setGsiId) + .tags(secondaryPartitionKey("gsi_keys_only"))) + .addAttribute(Integer.class, a -> a.name("gsi_sort") + .getter(Record::getGsiSort) + .setter(Record::setGsiSort) + .tags(secondarySortKey("gsi_keys_only"))) + .build(); + + private static final List RECORDS = + IntStream.range(0, 10) + .mapToObj(i -> new Record() + .setId("id-value") + .setSort(i) + .setValue(i) + .setGsiId("gsi-id-value") + .setGsiSort(i)) + .collect(Collectors.toList()); + + private static final List KEYS_ONLY_RECORDS = + RECORDS.stream() + .map(record -> new Record() + .setId(record.id) + .setSort(record.sort) + .setGsiId(record.gsiId) + .setGsiSort(record.gsiSort)) + .collect(Collectors.toList()); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable = enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private DynamoDbIndex keysOnlyMappedIndex = mappedTable.index("gsi_keys_only"); + + private void insertRecords() { + RECORDS.forEach(record -> mappedTable.putItem(r -> r.item(record))); + } + + @Before + public void createTable() { + mappedTable.createTable( + r -> r.provisionedThroughput(getDefaultProvisionedThroughput()) + .globalSecondaryIndices( + EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_keys_only") + .projection(p -> p.projectionType(ProjectionType.KEYS_ONLY)) + .provisionedThroughput(getDefaultProvisionedThroughput()) + .build())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + } + + @Test + public void scanAllRecordsDefaultSettings() { + insertRecords(); + + Iterator> results = keysOnlyMappedIndex.scan(ScanEnhancedRequest.builder().build()).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), is(KEYS_ONLY_RECORDS)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanAllRecordsWithFilter() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("sort >= :min_value AND sort <= :max_value") + .expressionValues(expressionValues) + .build(); + + Iterator> results = + keysOnlyMappedIndex.scan(ScanEnhancedRequest.builder().filterExpression(expression).build()).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), + is(KEYS_ONLY_RECORDS.stream().filter(r -> r.sort >= 3 && r.sort <= 5).collect(Collectors.toList()))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanLimit() { + insertRecords(); + Iterator> results = keysOnlyMappedIndex.scan(r -> r.limit(5)).iterator(); + assertThat(results.hasNext(), is(true)); + Page page1 = results.next(); + assertThat(results.hasNext(), is(true)); + Page page2 = results.next(); + assertThat(results.hasNext(), is(true)); + Page page3 = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page1.items(), is(KEYS_ONLY_RECORDS.subList(0, 5))); + assertThat(page1.lastEvaluatedKey(), is(getKeyMap(4))); + assertThat(page2.items(), is(KEYS_ONLY_RECORDS.subList(5, 10))); + assertThat(page2.lastEvaluatedKey(), is(getKeyMap(9))); + assertThat(page3.items(), is(empty())); + assertThat(page3.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanEmpty() { + Iterator> results = keysOnlyMappedIndex.scan().iterator(); + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items(), is(empty())); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + @Test + public void scanExclusiveStartKey() { + insertRecords(); + Iterator> results = + keysOnlyMappedIndex.scan(r -> r.exclusiveStartKey(getKeyMap(7))).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + assertThat(page.items(), is(KEYS_ONLY_RECORDS.subList(8, 10))); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + } + + private Map getKeyMap(int sort) { + Map result = new HashMap<>(); + result.put("id", stringValue(KEYS_ONLY_RECORDS.get(sort).getId())); + result.put("sort", numberValue(KEYS_ONLY_RECORDS.get(sort).getSort())); + result.put("gsi_id", stringValue(KEYS_ONLY_RECORDS.get(sort).getGsiId())); + result.put("gsi_sort", numberValue(KEYS_ONLY_RECORDS.get(sort).getGsiSort())); + return Collections.unmodifiableMap(result); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDb.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDb.java new file mode 100644 index 000000000000..7a6bdb5fce6a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDb.java @@ -0,0 +1,142 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.IOException; +import java.net.ServerSocket; +import java.net.URI; +import java.util.Optional; +import com.amazonaws.services.dynamodbv2.local.main.ServerRunner; +import com.amazonaws.services.dynamodbv2.local.server.DynamoDBProxyServer; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +/** + * Wrapper for a local DynamoDb server used in testing. Each instance of this class will find a new port to run on, + * so multiple instances can be safely run simultaneously. Each instance of this service uses memory as a storage medium + * and is thus completely ephemeral; no data will be persisted between stops and starts. + * + * LocalDynamoDb localDynamoDb = new LocalDynamoDb(); + * localDynamoDb.start(); // Start the service running locally on host + * DynamoDbClient dynamoDbClient = localDynamoDb.createClient(); + * ... // Do your testing with the client + * localDynamoDb.stop(); // Stop the service and free up resources + * + * If possible it's recommended to keep a single running instance for all your tests, as it can be slow to teardown + * and create new servers for every test, but there have been observed problems when dropping tables between tests for + * this scenario, so it's best to write your tests to be resilient to tables that already have data in them. + */ +class LocalDynamoDb { + private DynamoDBProxyServer server; + private int port; + + /** + * Start the local DynamoDb service and run in background + */ + void start() { + port = getFreePort(); + String portString = Integer.toString(port); + + try { + server = createServer(portString); + server.start(); + } catch (Exception e) { + throw propagate(e); + } + } + + /** + * Create a standard AWS v2 SDK client pointing to the local DynamoDb instance + * @return A DynamoDbClient pointing to the local DynamoDb instance + */ + DynamoDbClient createClient() { + String endpoint = String.format("http://localhost:%d", port); + return DynamoDbClient.builder() + .endpointOverride(URI.create(endpoint)) + // The region is meaningless for local DynamoDb but required for client builder validation + .region(Region.US_EAST_1) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create("dummy-key", "dummy-secret"))) + .overrideConfiguration(o -> o.addExecutionInterceptor(new VerifyUserAgentInterceptor())) + .build(); + } + + DynamoDbAsyncClient createAsyncClient() { + String endpoint = String.format("http://localhost:%d", port); + return DynamoDbAsyncClient.builder() + .endpointOverride(URI.create(endpoint)) + .region(Region.US_EAST_1) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create("dummy-key", "dummy-secret"))) + .overrideConfiguration(o -> o.addExecutionInterceptor(new VerifyUserAgentInterceptor())) + .build(); + } + + /** + * Stops the local DynamoDb service and frees up resources it is using. + */ + void stop() { + try { + server.stop(); + } catch (Exception e) { + throw propagate(e); + } + } + + private DynamoDBProxyServer createServer(String portString) throws Exception { + return ServerRunner.createServerFromCommandLineArgs( + new String[]{ + "-inMemory", + "-port", portString + }); + } + + private int getFreePort() { + try { + ServerSocket socket = new ServerSocket(0); + int port = socket.getLocalPort(); + socket.close(); + return port; + } catch (IOException ioe) { + throw propagate(ioe); + } + } + + private static RuntimeException propagate(Exception e) { + if (e instanceof RuntimeException) { + throw (RuntimeException)e; + } + throw new RuntimeException(e); + } + + private static class VerifyUserAgentInterceptor implements ExecutionInterceptor { + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + Optional headers = context.httpRequest().firstMatchingHeader("User-agent"); + assertThat(headers).isPresent(); + assertThat(headers.get()).contains("hll/ddb-enh"); + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbAsyncTestBase.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbAsyncTestBase.java new file mode 100644 index 000000000000..1d25b1ee8329 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbAsyncTestBase.java @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.*; + +import java.util.List; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +public class LocalDynamoDbAsyncTestBase extends LocalDynamoDbTestBase { + private DynamoDbAsyncClient dynamoDbAsyncClient = localDynamoDb().createAsyncClient(); + + protected DynamoDbAsyncClient getDynamoDbAsyncClient() { + return dynamoDbAsyncClient; + } + + public static List drainPublisher(SdkPublisher publisher, int expectedNumberOfResults) { + BufferingSubscriber subscriber = new BufferingSubscriber<>(); + publisher.subscribe(subscriber); + subscriber.waitForCompletion(1000L); + + assertThat(subscriber.isCompleted(), is(true)); + assertThat(subscriber.bufferedError(), is(nullValue())); + assertThat(subscriber.bufferedItems().size(), is(expectedNumberOfResults)); + + return subscriber.bufferedItems(); + } + + public static List drainPublisherToError(SdkPublisher publisher, + int expectedNumberOfResults, + Class expectedError) { + BufferingSubscriber subscriber = new BufferingSubscriber<>(); + publisher.subscribe(subscriber); + subscriber.waitForCompletion(1000L); + + assertThat(subscriber.isCompleted(), is(false)); + assertThat(subscriber.bufferedError(), instanceOf(expectedError)); + assertThat(subscriber.bufferedItems().size(), is(expectedNumberOfResults)); + + return subscriber.bufferedItems(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbSyncTestBase.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbSyncTestBase.java new file mode 100644 index 000000000000..294ea13f5788 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbSyncTestBase.java @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +public class LocalDynamoDbSyncTestBase extends LocalDynamoDbTestBase { + private DynamoDbClient dynamoDbClient = localDynamoDb().createClient(); + + protected DynamoDbClient getDynamoDbClient() { + return dynamoDbClient; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbTestBase.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbTestBase.java new file mode 100644 index 000000000000..edce69d31825 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbTestBase.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import java.util.UUID; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; + +public class LocalDynamoDbTestBase { + private static final LocalDynamoDb localDynamoDb = new LocalDynamoDb(); + private static final ProvisionedThroughput DEFAULT_PROVISIONED_THROUGHPUT = + ProvisionedThroughput.builder() + .readCapacityUnits(50L) + .writeCapacityUnits(50L) + .build(); + + private String uniqueTableSuffix = UUID.randomUUID().toString(); + + @BeforeClass + public static void initializeLocalDynamoDb() { + localDynamoDb.start(); + } + + @AfterClass + public static void stopLocalDynamoDb() { + localDynamoDb.stop(); + } + + protected static LocalDynamoDb localDynamoDb() { + return localDynamoDb; + } + + protected String getConcreteTableName(String logicalTableName) { + return logicalTableName + "_" + uniqueTableSuffix; + + } + + protected ProvisionedThroughput getDefaultProvisionedThroughput() { + return DEFAULT_PROVISIONED_THROUGHPUT; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/TransactGetItemsTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/TransactGetItemsTest.java new file mode 100644 index 000000000000..d5a12246dde2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/TransactGetItemsTest.java @@ -0,0 +1,190 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.Document; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactGetItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class TransactGetItemsTest extends LocalDynamoDbSyncTestBase { + private static class Record1 { + private Integer id; + + private Integer getId() { + return id; + } + + private Record1 setId(Integer id) { + this.id = id; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record1 record1 = (Record1) o; + return Objects.equals(id, record1.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } + + private static class Record2 { + private Integer id; + + private Integer getId() { + return id; + } + + private Record2 setId(Integer id) { + this.id = id; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record2 record2 = (Record2) o; + return Objects.equals(id, record2.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } + + private static final TableSchema TABLE_SCHEMA_1 = + StaticTableSchema.builder(Record1.class) + .newItemSupplier(Record1::new) + .addAttribute(Integer.class, a -> a.name("id_1") + .getter(Record1::getId) + .setter(Record1::setId) + .tags(primaryPartitionKey())) + .build(); + + private static final TableSchema TABLE_SCHEMA_2 = + StaticTableSchema.builder(Record2.class) + .newItemSupplier(Record2::new) + .addAttribute(Integer.class, a -> a.name("id_2") + .getter(Record2::getId) + .setter(Record2::setId) + .tags(primaryPartitionKey())) + .build(); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable1 = enhancedClient.table(getConcreteTableName("table-name-1"), TABLE_SCHEMA_1); + private DynamoDbTable mappedTable2 = enhancedClient.table(getConcreteTableName("table-name-2"), TABLE_SCHEMA_2); + + private static final List RECORDS_1 = + IntStream.range(0, 2) + .mapToObj(i -> new Record1().setId(i)) + .collect(Collectors.toList()); + + private static final List RECORDS_2 = + IntStream.range(0, 2) + .mapToObj(i -> new Record2().setId(i)) + .collect(Collectors.toList()); + + @Before + public void createTable() { + mappedTable1.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + mappedTable2.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-1")) + .build()); + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-2")) + .build()); + } + + private void insertRecords() { + RECORDS_1.forEach(record -> mappedTable1.putItem(r -> r.item(record))); + RECORDS_2.forEach(record -> mappedTable2.putItem(r -> r.item(record))); + } + + @Test + public void getRecordsFromMultipleTables() { + insertRecords(); + + TransactGetItemsEnhancedRequest transactGetItemsEnhancedRequest = + TransactGetItemsEnhancedRequest.builder() + .addGetItem(mappedTable1, Key.builder().partitionValue(0).build()) + .addGetItem(mappedTable2, Key.builder().partitionValue(0).build()) + .addGetItem(mappedTable2, Key.builder().partitionValue(1).build()) + .addGetItem(mappedTable1, Key.builder().partitionValue(1).build()) + .build(); + + List results = enhancedClient.transactGetItems(transactGetItemsEnhancedRequest); + + assertThat(results.size(), is(4)); + assertThat(results.get(0).getItem(mappedTable1), is(RECORDS_1.get(0))); + assertThat(results.get(1).getItem(mappedTable2), is(RECORDS_2.get(0))); + assertThat(results.get(2).getItem(mappedTable2), is(RECORDS_2.get(1))); + assertThat(results.get(3).getItem(mappedTable1), is(RECORDS_1.get(1))); + } + + @Test + public void notFoundRecordReturnsNull() { + insertRecords(); + + TransactGetItemsEnhancedRequest transactGetItemsEnhancedRequest = + TransactGetItemsEnhancedRequest.builder() + .addGetItem(mappedTable1, Key.builder().partitionValue(0).build()) + .addGetItem(mappedTable2, Key.builder().partitionValue(0).build()) + .addGetItem(mappedTable2, Key.builder().partitionValue(5).build()) + .addGetItem(mappedTable1, Key.builder().partitionValue(1).build()) + .build(); + + List results = enhancedClient.transactGetItems(transactGetItemsEnhancedRequest); + + assertThat(results.size(), is(4)); + assertThat(results.get(0).getItem(mappedTable1), is(RECORDS_1.get(0))); + assertThat(results.get(1).getItem(mappedTable2), is(RECORDS_2.get(0))); + assertThat(results.get(2).getItem(mappedTable2), is(nullValue())); + assertThat(results.get(3).getItem(mappedTable1), is(RECORDS_1.get(1))); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/TransactWriteItemsTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/TransactWriteItemsTest.java new file mode 100644 index 000000000000..b4eeec90efb1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/TransactWriteItemsTest.java @@ -0,0 +1,369 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.fail; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.ConditionCheck; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactWriteItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.TransactionCanceledException; + +public class TransactWriteItemsTest extends LocalDynamoDbSyncTestBase { + private static class Record1 { + private Integer id; + private String attribute; + + private Integer getId() { + return id; + } + + private Record1 setId(Integer id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record1 setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record1 record1 = (Record1) o; + return Objects.equals(id, record1.id) && + Objects.equals(attribute, record1.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute); + } + } + + private static class Record2 { + private Integer id; + private String attribute; + + private Integer getId() { + return id; + } + + private Record2 setId(Integer id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record2 setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record2 record2 = (Record2) o; + return Objects.equals(id, record2.id) && + Objects.equals(attribute, record2.attribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute); + } + } + + private static final TableSchema TABLE_SCHEMA_1 = + StaticTableSchema.builder(Record1.class) + .newItemSupplier(Record1::new) + .addAttribute(Integer.class, a -> a.name("id_1") + .getter(Record1::getId) + .setter(Record1::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record1::getAttribute) + .setter(Record1::setAttribute)) + .build(); + + private static final TableSchema TABLE_SCHEMA_2 = + StaticTableSchema.builder(Record2.class) + .newItemSupplier(Record2::new) + .addAttribute(Integer.class, a -> a.name("id_2") + .getter(Record2::getId) + .setter(Record2::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record2::getAttribute) + .setter(Record2::setAttribute)) + .build(); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private DynamoDbTable mappedTable1 = enhancedClient.table(getConcreteTableName("table-name-1"), TABLE_SCHEMA_1); + private DynamoDbTable mappedTable2 = enhancedClient.table(getConcreteTableName("table-name-2"), TABLE_SCHEMA_2); + + private static final List RECORDS_1 = + IntStream.range(0, 2) + .mapToObj(i -> new Record1().setId(i).setAttribute(Integer.toString(i))) + .collect(Collectors.toList()); + + private static final List RECORDS_2 = + IntStream.range(0, 2) + .mapToObj(i -> new Record2().setId(i).setAttribute(Integer.toString(i))) + .collect(Collectors.toList()); + + @Before + public void createTable() { + mappedTable1.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + mappedTable2.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-1")) + .build()); + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name-2")) + .build()); + } + + @Test + public void singlePut() { + enhancedClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(mappedTable1, RECORDS_1.get(0)) + .build()); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record, is(RECORDS_1.get(0))); + } + + @Test + public void multiplePut() { + enhancedClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(mappedTable1, RECORDS_1.get(0)) + .addPutItem(mappedTable2, RECORDS_2.get(0)) + .build()); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record1, is(RECORDS_1.get(0))); + assertThat(record2, is(RECORDS_2.get(0))); + } + + @Test + public void singleUpdate() { + enhancedClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addUpdateItem(mappedTable1, RECORDS_1.get(0)) + .build()); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record, is(RECORDS_1.get(0))); + } + + @Test + public void multipleUpdate() { + enhancedClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addUpdateItem(mappedTable1, RECORDS_1.get(0)) + .addUpdateItem(mappedTable2, RECORDS_2.get(0)) + .build()); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record1, is(RECORDS_1.get(0))); + assertThat(record2, is(RECORDS_2.get(0))); + } + + @Test + public void singleDelete() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + + enhancedClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addDeleteItem(mappedTable1, RECORDS_1.get(0)) + .build()); + + Record1 record = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record, is(nullValue())); + } + + @Test + public void multipleDelete() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))); + + enhancedClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addDeleteItem(mappedTable1, RECORDS_1.get(0)) + .addDeleteItem(mappedTable2, RECORDS_2.get(0)) + .build()); + + Record1 record1 = mappedTable1.getItem(r -> r.key(k -> k.partitionValue(0))); + Record2 record2 = mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))); + assertThat(record1, is(nullValue())); + assertThat(record2, is(nullValue())); + } + + @Test + public void singleConditionCheck() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + Key key = Key.builder().partitionValue(0).build(); + + enhancedClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addConditionCheck(mappedTable1, ConditionCheck.builder() + .key(key) + .conditionExpression(conditionExpression) + .build()) + .build()); + } + + @Test + public void multiConditionCheck() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + Key key1 = Key.builder().partitionValue(0).build(); + Key key2 = Key.builder().partitionValue(0).build(); + + enhancedClient.transactWriteItems( + TransactWriteItemsEnhancedRequest.builder() + .addConditionCheck(mappedTable1, ConditionCheck.builder() + .key(key1) + .conditionExpression(conditionExpression) + .build()) + .addConditionCheck(mappedTable2, ConditionCheck.builder() + .key(key2) + .conditionExpression(conditionExpression) + .build()) + .build()); + } + + @Test + public void mixedCommands() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + Key key = Key.builder().partitionValue(0).build(); + + TransactWriteItemsEnhancedRequest transactWriteItemsEnhancedRequest = + TransactWriteItemsEnhancedRequest.builder() + .addConditionCheck(mappedTable1, ConditionCheck.builder() + .key(key) + .conditionExpression(conditionExpression) + .build()) + .addPutItem(mappedTable2, RECORDS_2.get(1)) + .addUpdateItem(mappedTable1,RECORDS_1.get(1)) + .addDeleteItem(mappedTable2, RECORDS_2.get(0)) + .build(); + + enhancedClient.transactWriteItems(transactWriteItemsEnhancedRequest); + + assertThat(mappedTable1.getItem(r -> r.key(k -> k.partitionValue(1))), is(RECORDS_1.get(1))); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))), is(nullValue())); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(1))), is(RECORDS_2.get(1))); + } + + @Test + public void mixedCommands_conditionCheckFailsTransaction() { + mappedTable1.putItem(r -> r.item(RECORDS_1.get(0))); + mappedTable2.putItem(r -> r.item(RECORDS_2.get(0))); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("1"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + Key key = Key.builder().partitionValue(0).build(); + + TransactWriteItemsEnhancedRequest transactWriteItemsEnhancedRequest = + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(mappedTable2, RECORDS_2.get(1)) + .addUpdateItem(mappedTable1, RECORDS_1.get(1)) + .addConditionCheck(mappedTable1, ConditionCheck.builder() + .key(key) + .conditionExpression(conditionExpression) + .build()) + .addDeleteItem(mappedTable2, RECORDS_2.get(0)) + .build(); + + try { + enhancedClient.transactWriteItems(transactWriteItemsEnhancedRequest); + fail("Expected TransactionCanceledException to be thrown"); + } catch(TransactionCanceledException ignored) { + } + + assertThat(mappedTable1.getItem(r -> r.key(k -> k.partitionValue(1))), is(nullValue())); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(0))), is(RECORDS_2.get(0))); + assertThat(mappedTable2.getItem(r -> r.key(k -> k.partitionValue(1))), is(nullValue())); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/UpdateBehaviorTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/UpdateBehaviorTest.java new file mode 100644 index 000000000000..cef3796ed539 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/UpdateBehaviorTest.java @@ -0,0 +1,107 @@ +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.RecordWithUpdateBehaviors; + +import java.time.Instant; + +import static org.assertj.core.api.Assertions.assertThat; + +public class UpdateBehaviorTest extends LocalDynamoDbSyncTestBase { + private static final Instant INSTANT_1 = Instant.parse("2020-05-03T10:00:00Z"); + private static final Instant INSTANT_2 = Instant.parse("2020-05-03T10:05:00Z"); + + private static final TableSchema TABLE_SCHEMA = + TableSchema.fromClass(RecordWithUpdateBehaviors.class); + + private final DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + + private final DynamoDbTable mappedTable = + enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(r -> r.tableName(getConcreteTableName("table-name"))); + } + + @Test + public void updateBehaviors_firstUpdate() { + RecordWithUpdateBehaviors record = new RecordWithUpdateBehaviors(); + record.setId("id123"); + record.setCreatedOn(INSTANT_1); + record.setLastUpdatedOn(INSTANT_2); + mappedTable.updateItem(record); + + RecordWithUpdateBehaviors persistedRecord = mappedTable.getItem(record); + assertThat(persistedRecord.getCreatedOn()).isEqualTo(INSTANT_1); + assertThat(persistedRecord.getLastUpdatedOn()).isEqualTo(INSTANT_2); + } + + @Test + public void updateBehaviors_secondUpdate() { + RecordWithUpdateBehaviors record = new RecordWithUpdateBehaviors(); + record.setId("id123"); + record.setCreatedOn(INSTANT_1); + record.setLastUpdatedOn(INSTANT_2); + mappedTable.updateItem(record); + + record.setVersion(1L); + record.setCreatedOn(INSTANT_2); + record.setLastUpdatedOn(INSTANT_2); + mappedTable.updateItem(record); + + RecordWithUpdateBehaviors persistedRecord = mappedTable.getItem(record); + assertThat(persistedRecord.getCreatedOn()).isEqualTo(INSTANT_1); + assertThat(persistedRecord.getLastUpdatedOn()).isEqualTo(INSTANT_2); + } + + @Test + public void updateBehaviors_removal() { + RecordWithUpdateBehaviors record = new RecordWithUpdateBehaviors(); + record.setId("id123"); + record.setCreatedOn(INSTANT_1); + record.setLastUpdatedOn(INSTANT_2); + mappedTable.updateItem(record); + + record.setVersion(1L); + record.setCreatedOn(null); + record.setLastUpdatedOn(null); + mappedTable.updateItem(record); + + RecordWithUpdateBehaviors persistedRecord = mappedTable.getItem(record); + assertThat(persistedRecord.getCreatedOn()).isNull(); + assertThat(persistedRecord.getLastUpdatedOn()).isNull(); + } + + @Test + public void updateBehaviors_transactWriteItems_secondUpdate() { + RecordWithUpdateBehaviors record = new RecordWithUpdateBehaviors(); + record.setId("id123"); + record.setCreatedOn(INSTANT_1); + record.setLastUpdatedOn(INSTANT_2); + mappedTable.updateItem(record); + + record.setVersion(1L); + record.setCreatedOn(INSTANT_2); + record.setLastUpdatedOn(INSTANT_2); + enhancedClient.transactWriteItems(r -> r.addUpdateItem(mappedTable, record)); + + RecordWithUpdateBehaviors persistedRecord = mappedTable.getItem(record); + assertThat(persistedRecord.getCreatedOn()).isEqualTo(INSTANT_1); + assertThat(persistedRecord.getLastUpdatedOn()).isEqualTo(INSTANT_2); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/VersionedRecordTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/VersionedRecordTest.java new file mode 100644 index 000000000000..198bb1c20fe0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/VersionedRecordTest.java @@ -0,0 +1,294 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension.AttributeTags.versionAttribute; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.Objects; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +public class VersionedRecordTest extends LocalDynamoDbSyncTestBase { + private static class Record { + private String id; + private String attribute; + private Integer version; + + private String getId() { + return id; + } + + private Record setId(String id) { + this.id = id; + return this; + } + + private String getAttribute() { + return attribute; + } + + private Record setAttribute(String attribute) { + this.attribute = attribute; + return this; + } + + private Integer getVersion() { + return version; + } + + private Record setVersion(Integer version) { + this.version = version; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Record record = (Record) o; + return Objects.equals(id, record.id) && + Objects.equals(attribute, record.attribute) && + Objects.equals(version, record.version); + } + + @Override + public int hashCode() { + return Objects.hash(id, attribute, version); + } + } + + private static final TableSchema TABLE_SCHEMA = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(String.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute") + .getter(Record::getAttribute) + .setter(Record::setAttribute)) + .addAttribute(Integer.class, a -> a.name("version") + .getter(Record::getVersion) + .setter(Record::setVersion) + .tags(versionAttribute())) + .build(); + + private DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .extensions(VersionedRecordExtension.builder().build()) + .build(); + + private DynamoDbTable mappedTable = enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + } + + @Test + public void putNewRecordSetsInitialVersion() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id"))); + Record expectedResult = new Record().setId("id").setAttribute("one").setVersion(1); + + assertThat(result, is(expectedResult)); + } + + @Test + public void updateNewRecordSetsInitialVersion() { + Record result = mappedTable.updateItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + + Record expectedResult = new Record().setId("id").setAttribute("one").setVersion(1); + + assertThat(result, is(expectedResult)); + } + + @Test + public void putExistingRecordVersionMatches() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one").setVersion(1))); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id"))); + Record expectedResult = new Record().setId("id").setAttribute("one").setVersion(2); + assertThat(result, is(expectedResult)); + } + + @Test + public void putExistingRecordVersionMatchesConditionExpressionMatches() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + Expression conditionExpression = Expression.builder() + .expression("#k = :v OR #k = :v1") + .putExpressionName("#k", "attribute") + .putExpressionValue(":v", stringValue("wrong")) + .putExpressionValue(":v1", stringValue("one")) + .build(); + + mappedTable.putItem(PutItemEnhancedRequest.builder(Record.class) + .item(new Record().setId("id").setAttribute("one").setVersion(1)) + .conditionExpression(conditionExpression) + .build()); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id"))); + Record expectedResult = new Record().setId("id").setAttribute("one").setVersion(2); + assertThat(result, is(expectedResult)); + } + + @Test + public void putExistingRecordVersionDoesNotMatchConditionExpressionMatches() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + Expression conditionExpression = Expression.builder() + .expression("#k = :v OR #k = :v1") + .putExpressionName("#k", "attribute") + .putExpressionValue(":v", stringValue("wrong")) + .putExpressionValue(":v1", stringValue("one")) + .build(); + + exception.expect(ConditionalCheckFailedException.class); + mappedTable.putItem(PutItemEnhancedRequest.builder(Record.class) + .item(new Record().setId("id").setAttribute("one").setVersion(2)) + .conditionExpression(conditionExpression) + .build()); + } + + @Test + public void putExistingRecordVersionMatchesConditionExpressionDoesNotMatch() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + Expression conditionExpression = Expression.builder() + .expression("#k = :v OR #k = :v1") + .putExpressionName("#k", "attribute") + .putExpressionValue(":v", stringValue("wrong")) + .putExpressionValue(":v1", stringValue("wrong2")) + .build(); + + exception.expect(ConditionalCheckFailedException.class); + mappedTable.putItem(PutItemEnhancedRequest.builder(Record.class) + .item(new Record().setId("id").setAttribute("one").setVersion(1)) + .conditionExpression(conditionExpression) + .build()); + } + + @Test + public void updateExistingRecordVersionMatchesConditionExpressionMatches() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + Expression conditionExpression = Expression.builder() + .expression("#k = :v OR #k = :v1") + .putExpressionName("#k", "attribute") + .putExpressionValue(":v", stringValue("wrong")) + .putExpressionValue(":v1", stringValue("one")) + .build(); + + mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(new Record().setId("id").setAttribute("one").setVersion(1)) + .conditionExpression(conditionExpression) + .build()); + + Record result = mappedTable.getItem(r -> r.key(k -> k.partitionValue("id"))); + Record expectedResult = new Record().setId("id").setAttribute("one").setVersion(2); + assertThat(result, is(expectedResult)); + } + + @Test + public void updateExistingRecordVersionDoesNotMatchConditionExpressionMatches() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + Expression conditionExpression = Expression.builder() + .expression("#k = :v OR #k = :v1") + .putExpressionName("#k", "attribute") + .putExpressionValue(":v", stringValue("wrong")) + .putExpressionValue(":v1", stringValue("one")) + .build(); + + exception.expect(ConditionalCheckFailedException.class); + mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(new Record().setId("id").setAttribute("one").setVersion(2)) + .conditionExpression(conditionExpression) + .build()); + } + + @Test + public void updateExistingRecordVersionMatchesConditionExpressionDoesNotMatch() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + Expression conditionExpression = Expression.builder() + .expression("#k = :v OR #k = :v1") + .putExpressionName("#k", "attribute") + .putExpressionValue(":v", stringValue("wrong")) + .putExpressionValue(":v1", stringValue("wrong2")) + .build(); + + exception.expect(ConditionalCheckFailedException.class); + mappedTable.updateItem(UpdateItemEnhancedRequest.builder(Record.class) + .item(new Record().setId("id").setAttribute("one").setVersion(1)) + .conditionExpression(conditionExpression) + .build()); + } + + @Test + public void updateExistingRecordVersionMatches() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + + Record result = + mappedTable.updateItem(r -> r.item(new Record().setId("id").setAttribute("one").setVersion(1))); + + Record expectedResult = new Record().setId("id").setAttribute("one").setVersion(2); + assertThat(result, is(expectedResult)); + } + + @Test(expected = ConditionalCheckFailedException.class) + public void putNewRecordTwice() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + } + + @Test(expected = ConditionalCheckFailedException.class) + public void updateNewRecordTwice() { + mappedTable.updateItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + mappedTable.updateItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + } + + @Test(expected = ConditionalCheckFailedException.class) + public void putRecordWithWrongVersionNumber() { + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one"))); + mappedTable.putItem(r -> r.item(new Record().setId("id").setAttribute("one").setVersion(2))); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnum.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnum.java new file mode 100644 index 000000000000..4e4f04efd0fe --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnum.java @@ -0,0 +1,21 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +public enum FakeEnum { + ONE, + TWO +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumRecord.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumRecord.java new file mode 100644 index 000000000000..35d632c78f24 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumRecord.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class FakeEnumRecord { + private String id; + private FakeEnum enumAttribute; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public FakeEnum getEnumAttribute() { + return enumAttribute; + } + + public void setEnumAttribute(FakeEnum enumAttribute) { + this.enumAttribute = enumAttribute; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortened.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortened.java new file mode 100644 index 000000000000..a44b95daa996 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortened.java @@ -0,0 +1,20 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +public enum FakeEnumShortened { + ONE, +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortenedRecord.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortenedRecord.java new file mode 100644 index 000000000000..3a369f515090 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortenedRecord.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class FakeEnumShortenedRecord { + private String id; + private FakeEnumShortened enumAttribute; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public FakeEnumShortened getEnumAttribute() { + return enumAttribute; + } + + public void setEnumAttribute(FakeEnumShortened enumAttribute) { + this.enumAttribute = enumAttribute; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItem.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItem.java new file mode 100644 index 000000000000..246c0fd2a5e7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItem.java @@ -0,0 +1,140 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import static software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension.AttributeTags.versionAttribute; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.Objects; +import java.util.UUID; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class FakeItem extends FakeItemAbstractSubclass { + private static final StaticTableSchema FAKE_ITEM_MAPPER = + StaticTableSchema.builder(FakeItem.class) + .newItemSupplier(FakeItem::new) + .flatten(FakeItemComposedClass.getTableSchema(), + FakeItem::getComposedObject, + FakeItem::setComposedObject) + .extend(getSubclassTableSchema()) + .addAttribute(String.class, a -> a.name("id") + .getter(FakeItem::getId) + .setter(FakeItem::setId) + .addTag(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("version") + .getter(FakeItem::getVersion) + .setter(FakeItem::setVersion) + .addTag(versionAttribute())) + .build(); + + private String id; + private Integer version; + private FakeItemComposedClass composedObject; + + public FakeItem() { + } + + public FakeItem(String id, Integer version, FakeItemComposedClass composedObject) { + this.id = id; + this.version = version; + this.composedObject = composedObject; + } + + public static Builder builder() { + return new Builder(); + } + + public static TableSchema getTableSchema() { + return FAKE_ITEM_MAPPER; + } + + public static TableMetadata getTableMetadata() { + return FAKE_ITEM_MAPPER.tableMetadata(); + } + + public static FakeItem createUniqueFakeItem() { + return FakeItem.builder() + .id(UUID.randomUUID().toString()) + .build(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getVersion() { + return version; + } + + public void setVersion(Integer version) { + this.version = version; + } + + public FakeItemComposedClass getComposedObject() { + return composedObject; + } + + public void setComposedObject(FakeItemComposedClass composedObject) { + this.composedObject = composedObject; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (! super.equals(o)) return false; + FakeItem fakeItem = (FakeItem) o; + return Objects.equals(id, fakeItem.id) && + Objects.equals(version, fakeItem.version) && + Objects.equals(composedObject, fakeItem.composedObject); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), id, version, composedObject); + } + + public static class Builder { + private String id; + private Integer version; + private FakeItemComposedClass composedObject; + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder version(Integer version) { + this.version = version; + return this; + } + + public Builder composedObject(FakeItemComposedClass composedObject) { + this.composedObject = composedObject; + return this; + } + + public FakeItem build() { + return new FakeItem(id, version, composedObject); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemAbstractSubclass.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemAbstractSubclass.java new file mode 100644 index 000000000000..90d54b91a604 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemAbstractSubclass.java @@ -0,0 +1,76 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +abstract class FakeItemAbstractSubclass extends FakeItemAbstractSubclass2 { + private static final StaticTableSchema FAKE_ITEM_MAPPER = + StaticTableSchema.builder(FakeItemAbstractSubclass.class) + .addAttribute(String.class, + a -> a.name("subclass_attribute") + .getter(FakeItemAbstractSubclass::getSubclassAttribute) + .setter(FakeItemAbstractSubclass::setSubclassAttribute)) + .flatten(FakeItemComposedSubclass.getTableSchema(), + FakeItemAbstractSubclass::getComposedAttribute, + FakeItemAbstractSubclass::setComposedAttribute) + .extend(FakeItemAbstractSubclass2.getSubclass2TableSchema()) + .build(); + + private String subclassAttribute; + + private FakeItemComposedSubclass composedAttribute; + + static StaticTableSchema getSubclassTableSchema() { + return FAKE_ITEM_MAPPER; + } + + FakeItemAbstractSubclass() { + composedAttribute = new FakeItemComposedSubclass(); + } + + public String getSubclassAttribute() { + return subclassAttribute; + } + + public void setSubclassAttribute(String subclassAttribute) { + this.subclassAttribute = subclassAttribute; + } + + public FakeItemComposedSubclass getComposedAttribute() { + return composedAttribute; + } + + public void setComposedAttribute(FakeItemComposedSubclass composedAttribute) { + this.composedAttribute = composedAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (! super.equals(o)) return false; + FakeItemAbstractSubclass that = (FakeItemAbstractSubclass) o; + return Objects.equals(subclassAttribute, that.subclassAttribute) && + Objects.equals(composedAttribute, that.composedAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), subclassAttribute, composedAttribute); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemAbstractSubclass2.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemAbstractSubclass2.java new file mode 100644 index 000000000000..fcca0b922738 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemAbstractSubclass2.java @@ -0,0 +1,75 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +abstract class FakeItemAbstractSubclass2 { + private static final StaticTableSchema FAKE_ITEM_MAPPER = + StaticTableSchema.builder(FakeItemAbstractSubclass2.class) + .addAttribute(String.class, + a -> a.name("abstract_subclass_2") + .getter(FakeItemAbstractSubclass2::getSubclassAttribute2) + .setter(FakeItemAbstractSubclass2::setSubclassAttribute2)) + .flatten(FakeItemComposedSubclass2.getTableSchema(), + FakeItemAbstractSubclass2::getComposedAttribute2, + FakeItemAbstractSubclass2::setComposedAttribute2) + .build(); + + + private String subclassAttribute2; + + private FakeItemComposedSubclass2 composedAttribute2; + + static StaticTableSchema getSubclass2TableSchema() { + return FAKE_ITEM_MAPPER; + } + + FakeItemAbstractSubclass2() { + composedAttribute2 = new FakeItemComposedSubclass2(); + } + + public String getSubclassAttribute2() { + return subclassAttribute2; + } + + public void setSubclassAttribute2(String subclassAttribute2) { + this.subclassAttribute2 = subclassAttribute2; + } + + public FakeItemComposedSubclass2 getComposedAttribute2() { + return composedAttribute2; + } + + public void setComposedAttribute2(FakeItemComposedSubclass2 composedAttribute2) { + this.composedAttribute2 = composedAttribute2; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FakeItemAbstractSubclass2 that = (FakeItemAbstractSubclass2) o; + return Objects.equals(subclassAttribute2, that.subclassAttribute2) && + Objects.equals(composedAttribute2, that.composedAttribute2); + } + + @Override + public int hashCode() { + return Objects.hash(subclassAttribute2, composedAttribute2); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedAbstractSubclass.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedAbstractSubclass.java new file mode 100644 index 000000000000..0510501b2df6 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedAbstractSubclass.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +abstract class FakeItemComposedAbstractSubclass { + private static final StaticTableSchema FAKE_ITEM_MAPPER = + StaticTableSchema.builder(FakeItemComposedAbstractSubclass.class) + .addAttribute(String.class, + a -> a.name("composed_abstract_subclass") + .getter(FakeItemComposedAbstractSubclass::getComposedSubclassAttribute) + .setter(FakeItemComposedAbstractSubclass::setComposedSubclassAttribute)) + .build(); + + private String composedSubclassAttribute; + + static StaticTableSchema getSubclassTableSchema() { + return FAKE_ITEM_MAPPER; + } + + public String getComposedSubclassAttribute() { + return composedSubclassAttribute; + } + + public void setComposedSubclassAttribute(String composedSubclassAttribute) { + this.composedSubclassAttribute = composedSubclassAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FakeItemComposedAbstractSubclass that = (FakeItemComposedAbstractSubclass) o; + return Objects.equals(composedSubclassAttribute, that.composedSubclassAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(composedSubclassAttribute); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedAbstractSubclass2.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedAbstractSubclass2.java new file mode 100644 index 000000000000..9b231839ff38 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedAbstractSubclass2.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +abstract class FakeItemComposedAbstractSubclass2 { + private static final StaticTableSchema FAKE_ITEM_MAPPER = + StaticTableSchema.builder(FakeItemComposedAbstractSubclass2.class) + .addAttribute(String.class, + a -> a.name("composed_abstract_subclass_2") + .getter(FakeItemComposedAbstractSubclass2::getComposedSubclassAttribute2) + .setter(FakeItemComposedAbstractSubclass2::setComposedSubclassAttribute2)) + .build(); + + private String composedSubclassAttribute2; + + static StaticTableSchema getSubclassTableSchema() { + return FAKE_ITEM_MAPPER; + } + + public String getComposedSubclassAttribute2() { + return composedSubclassAttribute2; + } + + public void setComposedSubclassAttribute2(String composedSubclassAttribute2) { + this.composedSubclassAttribute2 = composedSubclassAttribute2; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FakeItemComposedAbstractSubclass2 that = (FakeItemComposedAbstractSubclass2) o; + return Objects.equals(composedSubclassAttribute2, that.composedSubclassAttribute2); + } + + @Override + public int hashCode() { + return Objects.hash(composedSubclassAttribute2); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedClass.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedClass.java new file mode 100644 index 000000000000..789d78ba95b8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedClass.java @@ -0,0 +1,81 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class FakeItemComposedClass { + private static final StaticTableSchema ITEM_MAPPER = + StaticTableSchema.builder(FakeItemComposedClass.class) + .addAttribute(String.class, + a -> a.name("composed_attribute") + .getter(FakeItemComposedClass::getComposedAttribute) + .setter(FakeItemComposedClass::setComposedAttribute)) + .newItemSupplier(FakeItemComposedClass::new) + .build(); + + private String composedAttribute; + + public FakeItemComposedClass() { + } + + public FakeItemComposedClass(String composedAttribute) { + this.composedAttribute = composedAttribute; + } + + public static Builder builder() { + return new Builder(); + } + + public static StaticTableSchema getTableSchema() { + return ITEM_MAPPER; + } + + public String getComposedAttribute() { + return composedAttribute; + } + + public void setComposedAttribute(String composedAttribute) { + this.composedAttribute = composedAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FakeItemComposedClass that = (FakeItemComposedClass) o; + return Objects.equals(composedAttribute, that.composedAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(composedAttribute); + } + + public static class Builder { + private String composedAttribute; + + public Builder composedAttribute(String composedAttribute) { + this.composedAttribute = composedAttribute; + return this; + } + + public FakeItemComposedClass build() { + return new FakeItemComposedClass(composedAttribute); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedSubclass.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedSubclass.java new file mode 100644 index 000000000000..5792c8cb8cd7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedSubclass.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class FakeItemComposedSubclass extends FakeItemComposedAbstractSubclass { + private static final StaticTableSchema ITEM_MAPPER = + StaticTableSchema.builder(FakeItemComposedSubclass.class) + .newItemSupplier(FakeItemComposedSubclass::new) + .addAttribute(String.class, + a -> a.name("composed_subclass") + .getter(FakeItemComposedSubclass::getComposedAttribute) + .setter(FakeItemComposedSubclass::setComposedAttribute)) + .extend(FakeItemComposedAbstractSubclass.getSubclassTableSchema()) + .build(); + + private String composedAttribute; + + public static StaticTableSchema getTableSchema() { + return ITEM_MAPPER; + } + + public String getComposedAttribute() { + return composedAttribute; + } + + public void setComposedAttribute(String composedAttribute) { + this.composedAttribute = composedAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (! super.equals(o)) return false; + FakeItemComposedSubclass that = (FakeItemComposedSubclass) o; + return Objects.equals(composedAttribute, that.composedAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), composedAttribute); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedSubclass2.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedSubclass2.java new file mode 100644 index 000000000000..22fc8a70705f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemComposedSubclass2.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class FakeItemComposedSubclass2 extends FakeItemComposedAbstractSubclass2 { + private static final StaticTableSchema ITEM_MAPPER = + StaticTableSchema.builder(FakeItemComposedSubclass2.class) + .newItemSupplier(FakeItemComposedSubclass2::new) + .extend(getSubclassTableSchema()) + .addAttribute(String.class, + a -> a.name("composed_subclass_2") + .getter(FakeItemComposedSubclass2::getComposedAttribute2) + .setter(FakeItemComposedSubclass2::setComposedAttribute2)) + .build(); + + private String composedAttribute2; + + public static StaticTableSchema getTableSchema() { + return ITEM_MAPPER; + } + + public String getComposedAttribute2() { + return composedAttribute2; + } + + public void setComposedAttribute2(String composedAttribute2) { + this.composedAttribute2 = composedAttribute2; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (! super.equals(o)) return false; + FakeItemComposedSubclass2 that = (FakeItemComposedSubclass2) o; + return Objects.equals(composedAttribute2, that.composedAttribute2); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), composedAttribute2); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithBinaryKey.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithBinaryKey.java new file mode 100644 index 000000000000..00ea82cee32a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithBinaryKey.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.Objects; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class FakeItemWithBinaryKey { + private static final StaticTableSchema FAKE_ITEM_WITH_BINARY_KEY_SCHEMA = + StaticTableSchema.builder(FakeItemWithBinaryKey.class) + .newItemSupplier(FakeItemWithBinaryKey::new) + .addAttribute(SdkBytes.class, a -> a.name("id") + .getter(FakeItemWithBinaryKey::getId) + .setter(FakeItemWithBinaryKey::setId) + .tags(primaryPartitionKey())) + .build(); + + private SdkBytes id; + + public static StaticTableSchema getTableSchema() { + return FAKE_ITEM_WITH_BINARY_KEY_SCHEMA; + } + + public SdkBytes getId() { + return id; + } + + public void setId(SdkBytes id) { + this.id = id; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FakeItemWithBinaryKey that = (FakeItemWithBinaryKey) o; + return Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithIndices.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithIndices.java new file mode 100644 index 000000000000..cda40aa312ec --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithIndices.java @@ -0,0 +1,163 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.secondarySortKey; + +import java.util.UUID; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class FakeItemWithIndices { + private static final StaticTableSchema FAKE_ITEM_MAPPER = + StaticTableSchema.builder(FakeItemWithIndices.class) + .newItemSupplier(FakeItemWithIndices::new) + .addAttribute(String.class, a -> a.name("id") + .getter(FakeItemWithIndices::getId) + .setter(FakeItemWithIndices::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("sort") + .getter(FakeItemWithIndices::getSort) + .setter(FakeItemWithIndices::setSort) + .tags(primarySortKey())) + .addAttribute(String.class, a -> a.name("gsi_id") + .getter(FakeItemWithIndices::getGsiId) + .setter(FakeItemWithIndices::setGsiId) + .tags(secondaryPartitionKey("gsi_1"), secondaryPartitionKey("gsi_2"))) + .addAttribute(String.class, a -> a.name("gsi_sort") + .getter(FakeItemWithIndices::getGsiSort) + .setter(FakeItemWithIndices::setGsiSort) + .tags(secondarySortKey("gsi_1"))) + .addAttribute(String.class, a -> a.name("lsi_sort") + .getter(FakeItemWithIndices::getLsiSort) + .setter(FakeItemWithIndices::setLsiSort) + .tags(secondarySortKey("lsi_1"))) + .build(); + + private String id; + private String sort; + private String gsiId; + private String gsiSort; + private String lsiSort; + + public FakeItemWithIndices() { + } + + public FakeItemWithIndices(String id, String sort, String gsiId, String gsiSort, String lsiSort) { + this.id = id; + this.sort = sort; + this.gsiId = gsiId; + this.gsiSort = gsiSort; + this.lsiSort = lsiSort; + } + + public static Builder builder() { + return new Builder(); + } + + public static StaticTableSchema getTableSchema() { + return FAKE_ITEM_MAPPER; + } + + public static FakeItemWithIndices createUniqueFakeItemWithIndices() { + return FakeItemWithIndices.builder() + .id(UUID.randomUUID().toString()) + .sort(UUID.randomUUID().toString()) + .gsiId(UUID.randomUUID().toString()) + .gsiSort(UUID.randomUUID().toString()) + .lsiSort(UUID.randomUUID().toString()) + .build(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getSort() { + return sort; + } + + public void setSort(String sort) { + this.sort = sort; + } + + public String getGsiId() { + return gsiId; + } + + public void setGsiId(String gsiId) { + this.gsiId = gsiId; + } + + public String getGsiSort() { + return gsiSort; + } + + public void setGsiSort(String gsiSort) { + this.gsiSort = gsiSort; + } + + public String getLsiSort() { + return lsiSort; + } + + public void setLsiSort(String lsiSort) { + this.lsiSort = lsiSort; + } + + public static class Builder { + private String id; + private String sort; + private String gsiId; + private String gsiSort; + private String lsiSort; + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder sort(String sort) { + this.sort = sort; + return this; + } + + public Builder gsiId(String gsiId) { + this.gsiId = gsiId; + return this; + } + + public Builder gsiSort(String gsiSort) { + this.gsiSort = gsiSort; + return this; + } + + public Builder lsiSort(String lsiSort) { + this.lsiSort = lsiSort; + return this; + } + + public FakeItemWithIndices build() { + return new FakeItemWithIndices(id, sort, gsiId, gsiSort, lsiSort); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithNumericSort.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithNumericSort.java new file mode 100644 index 000000000000..5a51ac1b921b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithNumericSort.java @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; + +import java.util.Random; +import java.util.UUID; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class FakeItemWithNumericSort { + private static final Random RANDOM = new Random(); + + private static final StaticTableSchema FAKE_ITEM_MAPPER = + StaticTableSchema.builder(FakeItemWithNumericSort.class) + .newItemSupplier(FakeItemWithNumericSort::new) + .addAttribute(String.class, a -> a.name("id") + .getter(FakeItemWithNumericSort::getId) + .setter(FakeItemWithNumericSort::setId) + .addTag(primaryPartitionKey())) + .addAttribute(Integer.class, a -> a.name("sort") + .getter(FakeItemWithNumericSort::getSort) + .setter(FakeItemWithNumericSort::setSort) + .addTag(primarySortKey())) + .build(); + + private String id; + private Integer sort; + + public FakeItemWithNumericSort() { + } + + public FakeItemWithNumericSort(String id, Integer sort) { + this.id = id; + this.sort = sort; + } + + public static Builder builder() { + return new Builder(); + } + + public static StaticTableSchema getTableSchema() { + return FAKE_ITEM_MAPPER; + } + + public static FakeItemWithNumericSort createUniqueFakeItemWithSort() { + return FakeItemWithNumericSort.builder() + .id(UUID.randomUUID().toString()) + .sort(RANDOM.nextInt()) + .build(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getSort() { + return sort; + } + + public void setSort(Integer sort) { + this.sort = sort; + } + + public static class Builder { + private String id; + private Integer sort; + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder sort(Integer sort) { + this.sort = sort; + return this; + } + + public FakeItemWithNumericSort build() { + return new FakeItemWithNumericSort(id, sort); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithSort.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithSort.java new file mode 100644 index 000000000000..caccc95aa715 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeItemWithSort.java @@ -0,0 +1,168 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primarySortKey; + +import java.util.Objects; +import java.util.UUID; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + +public class FakeItemWithSort { + private static final StaticTableSchema FAKE_ITEM_MAPPER = + StaticTableSchema.builder(FakeItemWithSort.class) + .newItemSupplier(FakeItemWithSort::new) + .addAttribute(String.class, a -> a.name("id") + .getter(FakeItemWithSort::getId) + .setter(FakeItemWithSort::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("sort") + .getter(FakeItemWithSort::getSort) + .setter(FakeItemWithSort::setSort) + .tags(primarySortKey())) + .addAttribute(String.class, a -> a.name("other_attribute_1") + .getter(FakeItemWithSort::getOtherAttribute1) + .setter(FakeItemWithSort::setOtherAttribute1)) + .addAttribute(String.class, a -> a.name("other_attribute_2") + .getter(FakeItemWithSort::getOtherAttribute2) + .setter(FakeItemWithSort::setOtherAttribute2)) + .build(); + + private String id; + private String sort; + private String otherAttribute1; + private String otherAttribute2; + + public FakeItemWithSort() { + } + + public FakeItemWithSort(String id, String sort, String otherAttribute1, String otherAttribute2) { + this.id = id; + this.sort = sort; + this.otherAttribute1 = otherAttribute1; + this.otherAttribute2 = otherAttribute2; + } + + public static Builder builder() { + return new Builder(); + } + + public static StaticTableSchema getTableSchema() { + return FAKE_ITEM_MAPPER; + } + + public static TableMetadata getTableMetadata() { + return FAKE_ITEM_MAPPER.tableMetadata(); + } + + public static FakeItemWithSort createUniqueFakeItemWithSort() { + return FakeItemWithSort.builder() + .id(UUID.randomUUID().toString()) + .sort(UUID.randomUUID().toString()) + .build(); + } + + public static FakeItemWithSort createUniqueFakeItemWithoutSort() { + return FakeItemWithSort.builder() + .id(UUID.randomUUID().toString()) + .build(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getSort() { + return sort; + } + + public void setSort(String sort) { + this.sort = sort; + } + + public String getOtherAttribute1() { + return otherAttribute1; + } + + public void setOtherAttribute1(String otherAttribute1) { + this.otherAttribute1 = otherAttribute1; + } + + public String getOtherAttribute2() { + return otherAttribute2; + } + + public void setOtherAttribute2(String otherAttribute2) { + this.otherAttribute2 = otherAttribute2; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FakeItemWithSort that = (FakeItemWithSort) o; + return Objects.equals(id, that.id) && + Objects.equals(sort, that.sort) && + Objects.equals(otherAttribute1, that.otherAttribute1) && + Objects.equals(otherAttribute2, that.otherAttribute2); + } + + @Override + public int hashCode() { + return Objects.hash(id, sort, otherAttribute1, otherAttribute2); + } + + public static class Builder { + private String id; + private String sort; + private String otherAttribute1; + private String otherAttribute2; + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder sort(String sort) { + this.sort = sort; + return this; + } + + public Builder otherAttribute1(String otherAttribute1) { + this.otherAttribute1 = otherAttribute1; + return this; + } + + public Builder otherAttribute2(String otherAttribute2) { + this.otherAttribute2 = otherAttribute2; + return this; + } + + public FakeItemWithSort build() { + return new FakeItemWithSort(id, sort, otherAttribute1, otherAttribute2); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/ImmutableFakeItem.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/ImmutableFakeItem.java new file mode 100644 index 000000000000..4e8f7eacdea5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/ImmutableFakeItem.java @@ -0,0 +1,86 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbImmutable(builder = ImmutableFakeItem.Builder.class) +public class ImmutableFakeItem { + private final String id; + private final String attribute; + + private ImmutableFakeItem(Builder b) { + this.id = b.id; + this.attribute = b.attribute; + } + + public static Builder builder() { + return new Builder(); + } + + public String attribute() { + return attribute; + } + + @DynamoDbPartitionKey + public String id() { + return id; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ImmutableFakeItem that = (ImmutableFakeItem) o; + + if (id != null ? !id.equals(that.id) : that.id != null) { + return false; + } + return attribute != null ? attribute.equals(that.attribute) : that.attribute == null; + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (attribute != null ? attribute.hashCode() : 0); + return result; + } + + public static final class Builder { + private String id; + private String attribute; + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder attribute(String attribute) { + this.attribute = attribute; + return this; + } + + public ImmutableFakeItem build() { + return new ImmutableFakeItem(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttribConverter.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttribConverter.java new file mode 100755 index 000000000000..a006e0f4bf0f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttribConverter.java @@ -0,0 +1,81 @@ +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +import java.util.HashMap; +import java.util.Map; + +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +/** + * Event Payload Converter to save the record on the class + */ +public class InnerAttribConverter implements AttributeConverter { + + private final ObjectMapper objectMapper; + + /** + * This No Args constuctor is needed by the DynamoDbConvertedBy annotation + */ + public InnerAttribConverter() { + this.objectMapper = new ObjectMapper(); + this.objectMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); + + final AttributeValue dd = stringValue("dd"); + AttributeConverter attributeConverter = null; + AttributeValueType attributeValueType = null; + EnhancedType enhancedType = null; + // add this to preserve the same offset (don't convert to UTC) + this.objectMapper.configure(DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE, false); + } + + @Override + public AttributeValue transformFrom(final T input) { + + Map map = null; + if (input != null) { + map = new HashMap<>(); + InnerAttributeRecord innerAttributeRecord = (InnerAttributeRecord) input; + if (innerAttributeRecord.getAttribOne() != null) { + + final AttributeValue attributeValue = stringValue(innerAttributeRecord.getAttribOne()); + map.put("attribOne", stringValue(innerAttributeRecord.getAttribOne())); + } + if (innerAttributeRecord.getAttribTwo() != null) { + map.put("attribTwo", stringValue(String.valueOf(innerAttributeRecord.getAttribTwo()))); + } + } + return AttributeValue.builder().m(map).build(); + + } + + @Override + public T transformTo(final AttributeValue attributeValue) { + InnerAttributeRecord innerMetadata = new InnerAttributeRecord(); + if (attributeValue.m().get("attribOne") != null) { + innerMetadata.setAttribOne(attributeValue.m().get("attribOne").s()); + } + if (attributeValue.m().get("attribTwo") != null) { + innerMetadata.setAttribTwo(Integer.valueOf(attributeValue.m().get("attribTwo").s())); + } + return (T) innerMetadata; + } + + @Override + public EnhancedType type() { + return (EnhancedType) EnhancedType.of(InnerAttributeRecord.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + + +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttribConverterProvider.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttribConverterProvider.java new file mode 100755 index 000000000000..b38cc7554a37 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttribConverterProvider.java @@ -0,0 +1,18 @@ +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + + +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +/** + * InnerAttribConverterProvider to save the InnerAttribConverter on the class. + */ +public class InnerAttribConverterProvider implements AttributeConverterProvider { + + + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return new InnerAttribConverter(); + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttributeRecord.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttributeRecord.java new file mode 100755 index 000000000000..6edd5112ff94 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/InnerAttributeRecord.java @@ -0,0 +1,34 @@ +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + + +public class InnerAttributeRecord { + private String attribOne; + private Integer attribTwo; + + @DynamoDbPartitionKey + public String getAttribOne() { + return attribOne; + } + + public void setAttribOne(String attribOne) { + this.attribOne = attribOne; + } + + public Integer getAttribTwo() { + return attribTwo; + } + + public void setAttribTwo(Integer attribTwo) { + this.attribTwo = attribTwo; + } + + @Override + public String toString() { + return "InnerAttributeRecord{" + + "attribOne='" + attribOne + '\'' + + ", attribTwo=" + attribTwo + + '}'; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/NestedTestRecord.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/NestedTestRecord.java new file mode 100755 index 000000000000..b1122efbc0f0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/NestedTestRecord.java @@ -0,0 +1,62 @@ +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.*; + + +@DynamoDbBean +public class NestedTestRecord { + private String outerAttribOne; + private Integer sort; + private InnerAttributeRecord innerAttributeRecord; + + private String dotVariable; + + + @DynamoDbPartitionKey + public String getOuterAttribOne() { + return outerAttribOne; + } + + public void setOuterAttribOne(String outerAttribOne) { + this.outerAttribOne = outerAttribOne; + } + + @DynamoDbSortKey + public Integer getSort() { + return sort; + } + + public void setSort(Integer sort) { + this.sort = sort; + } + + + + @DynamoDbConvertedBy(InnerAttribConverter.class) + public InnerAttributeRecord getInnerAttributeRecord() { + return innerAttributeRecord; + } + + public void setInnerAttributeRecord(InnerAttributeRecord innerAttributeRecord) { + this.innerAttributeRecord = innerAttributeRecord; + } + + @DynamoDbAttribute("test.com") + public String getDotVariable() { + return dotVariable; + } + + public void setDotVariable(String dotVariable) { + this.dotVariable = dotVariable; + } + + @Override + public String toString() { + return "NestedTestRecord{" + + "outerAttribOne='" + outerAttribOne + '\'' + + ", sort=" + sort + + ", innerAttributeRecord=" + innerAttributeRecord + + ", dotVariable='" + dotVariable + '\'' + + '}'; + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecordWithUpdateBehaviors.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecordWithUpdateBehaviors.java new file mode 100644 index 000000000000..cc6edf4b4a2c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecordWithUpdateBehaviors.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.time.Instant; +import software.amazon.awssdk.enhanced.dynamodb.extensions.annotations.DynamoDbVersionAttribute; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbAttribute; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbUpdateBehavior; + +import static software.amazon.awssdk.enhanced.dynamodb.mapper.UpdateBehavior.WRITE_ALWAYS; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.UpdateBehavior.WRITE_IF_NOT_EXISTS; + +@DynamoDbBean +public class RecordWithUpdateBehaviors { + private String id; + private Instant createdOn; + private Instant lastUpdatedOn; + private Long version; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + @DynamoDbUpdateBehavior(WRITE_IF_NOT_EXISTS) + @DynamoDbAttribute("created-on") // Forces a test on attribute name cleaning + public Instant getCreatedOn() { + return createdOn; + } + + public void setCreatedOn(Instant createdOn) { + this.createdOn = createdOn; + } + + @DynamoDbUpdateBehavior(WRITE_ALWAYS) + public Instant getLastUpdatedOn() { + return lastUpdatedOn; + } + + public void setLastUpdatedOn(Instant lastUpdatedOn) { + this.lastUpdatedOn = lastUpdatedOn; + } + + @DynamoDbVersionAttribute + public Long getVersion() { + return version; + } + + public void setVersion(Long version) { + this.version = version; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecursiveRecordBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecursiveRecordBean.java new file mode 100644 index 000000000000..cbd846684609 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecursiveRecordBean.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.util.List; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; + +@DynamoDbBean +public final class RecursiveRecordBean { + private int attribute; + private RecursiveRecordBean recursiveRecordBean; + private RecursiveRecordImmutable recursiveRecordImmutable; + private List recursiveRecordBeanList; + + public int getAttribute() { + return attribute; + } + + public void setAttribute(int attribute) { + this.attribute = attribute; + } + + public RecursiveRecordBean getRecursiveRecordBean() { + return recursiveRecordBean; + } + + public void setRecursiveRecordBean(RecursiveRecordBean recursiveRecordBean) { + this.recursiveRecordBean = recursiveRecordBean; + } + + public RecursiveRecordImmutable getRecursiveRecordImmutable() { + return recursiveRecordImmutable; + } + + public void setRecursiveRecordImmutable(RecursiveRecordImmutable recursiveRecordImmutable) { + this.recursiveRecordImmutable = recursiveRecordImmutable; + } + + public List getRecursiveRecordList() { + return recursiveRecordBeanList; + } + + public void setRecursiveRecordList(List recursiveRecordBeanList) { + this.recursiveRecordBeanList = recursiveRecordBeanList; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecursiveRecordImmutable.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecursiveRecordImmutable.java new file mode 100644 index 000000000000..19d1bda4183f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecursiveRecordImmutable.java @@ -0,0 +1,88 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import java.util.List; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; + +@DynamoDbImmutable(builder = RecursiveRecordImmutable.Builder.class) +public final class RecursiveRecordImmutable { + private final int attribute; + private final RecursiveRecordImmutable recursiveRecordImmutable; + private final RecursiveRecordBean recursiveRecordBean; + private final List recursiveRecordImmutableList; + + private RecursiveRecordImmutable(Builder b) { + this.attribute = b.attribute; + this.recursiveRecordImmutable = b.recursiveRecordImmutable; + this.recursiveRecordBean = b.recursiveRecordBean; + this.recursiveRecordImmutableList = b.recursiveRecordImmutableList; + } + + public int getAttribute() { + return attribute; + } + + public RecursiveRecordImmutable getRecursiveRecordImmutable() { + return recursiveRecordImmutable; + } + + public RecursiveRecordBean getRecursiveRecordBean() { + return recursiveRecordBean; + } + + public List getRecursiveRecordList() { + return recursiveRecordImmutableList; + } + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + private int attribute; + private RecursiveRecordImmutable recursiveRecordImmutable; + private RecursiveRecordBean recursiveRecordBean; + private List recursiveRecordImmutableList; + + private Builder() { + } + + public Builder setAttribute(int attribute) { + this.attribute = attribute; + return this; + } + + public Builder setRecursiveRecordImmutable(RecursiveRecordImmutable recursiveRecordImmutable) { + this.recursiveRecordImmutable = recursiveRecordImmutable; + return this; + } + + public Builder setRecursiveRecordBean(RecursiveRecordBean recursiveRecordBean) { + this.recursiveRecordBean = recursiveRecordBean; + return this; + } + + public Builder setRecursiveRecordList(List recursiveRecordImmutableList) { + this.recursiveRecordImmutableList = recursiveRecordImmutableList; + return this; + } + + public RecursiveRecordImmutable build() { + return new RecursiveRecordImmutable(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/ApplyUserAgentInterceptorTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/ApplyUserAgentInterceptorTest.java new file mode 100644 index 000000000000..087171a8d6ea --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/ApplyUserAgentInterceptorTest.java @@ -0,0 +1,70 @@ +/* + * Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.List; +import java.util.Optional; +import org.junit.Test; +import software.amazon.awssdk.core.RequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.util.VersionInfo; +import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; + +public class ApplyUserAgentInterceptorTest { + + private ApplyUserAgentInterceptor interceptor = new ApplyUserAgentInterceptor(); + + @Test + public void ddbRequest_shouldModifyRequest() { + GetItemRequest getItemRequest = GetItemRequest.builder().build(); + SdkRequest sdkRequest = interceptor.modifyRequest(() -> getItemRequest, new ExecutionAttributes()); + + RequestOverrideConfiguration requestOverrideConfiguration = sdkRequest.overrideConfiguration().get(); + assertThat(requestOverrideConfiguration.apiNames() + .stream() + .filter(a -> a.name() + .equals("hll") && + a.version().equals("ddb-enh")).findAny()) + .isPresent(); + } + + @Test + public void otherRequest_shouldNotModifyRequest() { + SdkRequest someOtherRequest = new SdkRequest() { + @Override + public List> sdkFields() { + return null; + } + + @Override + public Optional overrideConfiguration() { + return Optional.empty(); + } + + @Override + public Builder toBuilder() { + return null; + } + }; + SdkRequest sdkRequest = interceptor.modifyRequest(() -> someOtherRequest, new ExecutionAttributes()); + + assertThat(sdkRequest).isEqualTo(someOtherRequest); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocumentTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocumentTest.java new file mode 100644 index 000000000000..547927b09105 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocumentTest.java @@ -0,0 +1,104 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import static java.util.Collections.emptyMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.Document; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class DefaultDocumentTest { + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + private DynamoDbTable createMappedTable(DynamoDbEnhancedClientExtension dynamoDbEnhancedClientExtension) { + return DynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .extensions(dynamoDbEnhancedClientExtension) + .build() + .table(TABLE_NAME, FakeItem.getTableSchema()); + } + + @Test + public void noExtension_mapsToItem() { + FakeItem fakeItem = FakeItem.createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + Document defaultDocument = DefaultDocument.create(fakeItemMap); + + assertThat(defaultDocument.getItem(createMappedTable(null)), is(fakeItem)); + } + + @Test + public void extension_mapsToItem() { + FakeItem fakeItem = FakeItem.createUniqueFakeItem(); + FakeItem fakeItem2 = FakeItem.createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + Map fakeItemMap2 = FakeItem.getTableSchema().itemToMap(fakeItem2, true); + when(mockDynamoDbEnhancedClientExtension.afterRead(any(DynamoDbExtensionContext.AfterRead.class))) + .thenReturn(ReadModification.builder().transformedItem(fakeItemMap2).build()); + + Document defaultDocument = DefaultDocument.create(fakeItemMap); + + DynamoDbTable mappedTable = createMappedTable(mockDynamoDbEnhancedClientExtension); + assertThat(defaultDocument.getItem(mappedTable), is(fakeItem2)); + verify(mockDynamoDbEnhancedClientExtension).afterRead(DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(DefaultOperationContext.create(mappedTable.tableName())) + .items(fakeItemMap).build() + ); + } + + @Test + public void nullMapReturnsNullItem() { + Document defaultDocument = DefaultDocument.create(null); + + assertThat(defaultDocument.getItem(createMappedTable(null)), is(nullValue())); + } + + @Test + public void emptyMapReturnsNullItem() { + Document defaultDocument = DefaultDocument.create(emptyMap()); + + assertThat(defaultDocument.getItem(createMappedTable(null)), is(nullValue())); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtilsTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtilsTest.java new file mode 100644 index 000000000000..adb51a92cdab --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtilsTest.java @@ -0,0 +1,67 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class EnhancedClientUtilsTest { + private static final AttributeValue PARTITION_VALUE = AttributeValue.builder().s("id123").build(); + private static final AttributeValue SORT_VALUE = AttributeValue.builder().s("sort123").build(); + + @Test + public void createKeyFromMap_partitionOnly() { + Map itemMap = new HashMap<>(); + itemMap.put("id", PARTITION_VALUE); + + Key key = EnhancedClientUtils.createKeyFromMap(itemMap, + FakeItem.getTableSchema(), + TableMetadata.primaryIndexName()); + + assertThat(key.partitionKeyValue()).isEqualTo(PARTITION_VALUE); + assertThat(key.sortKeyValue()).isEmpty(); + } + + @Test + public void createKeyFromMap_partitionAndSort() { + Map itemMap = new HashMap<>(); + itemMap.put("id", PARTITION_VALUE); + itemMap.put("sort", SORT_VALUE); + + Key key = EnhancedClientUtils.createKeyFromMap(itemMap, + FakeItemWithSort.getTableSchema(), + TableMetadata.primaryIndexName()); + + assertThat(key.partitionKeyValue()).isEqualTo(PARTITION_VALUE); + assertThat(key.sortKeyValue()).isEqualTo(Optional.of(SORT_VALUE)); + } + + @Test + public void cleanAttributeName_cleansSpecialCharacters() { + String result = EnhancedClientUtils.cleanAttributeName("a*b.c-d:e#f"); + + assertThat(result).isEqualTo("a_b_c_d_e_f"); + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncIndexTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncIndexTest.java new file mode 100644 index 000000000000..127f6f4a774d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncIndexTest.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +@RunWith(MockitoJUnitRunner.class) +public class DefaultDynamoDbAsyncIndexTest { + @Mock + private DynamoDbAsyncClient mockDynamoDbAsyncClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void keyFrom_secondaryIndex_partitionAndSort() { + FakeItemWithIndices item = FakeItemWithIndices.createUniqueFakeItemWithIndices(); + DefaultDynamoDbAsyncIndex dynamoDbMappedIndex = + new DefaultDynamoDbAsyncIndex<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + "test_table", + "gsi_1"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getGsiId()))); + assertThat(key.sortKeyValue(), is(Optional.of(stringValue(item.getGsiSort())))); + } + + @Test + public void keyFrom_secondaryIndex_partitionOnly() { + FakeItemWithIndices item = FakeItemWithIndices.createUniqueFakeItemWithIndices(); + DefaultDynamoDbAsyncIndex dynamoDbMappedIndex = + new DefaultDynamoDbAsyncIndex<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + "test_table", + "gsi_2"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getGsiId()))); + assertThat(key.sortKeyValue(), is(Optional.empty())); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java new file mode 100644 index 000000000000..cbf1b7acba56 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +@RunWith(MockitoJUnitRunner.class) +public class DefaultDynamoDbAsyncTableTest { + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbAsyncClient mockDynamoDbAsyncClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void index_constructsCorrectMappedIndex() { + DefaultDynamoDbAsyncTable dynamoDbMappedTable = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + TABLE_NAME); + + DefaultDynamoDbAsyncIndex dynamoDbMappedIndex = dynamoDbMappedTable.index("gsi_1"); + + assertThat(dynamoDbMappedIndex.dynamoDbClient(), is(sameInstance(mockDynamoDbAsyncClient))); + assertThat(dynamoDbMappedIndex.mapperExtension(), is(sameInstance(mockDynamoDbEnhancedClientExtension))); + assertThat(dynamoDbMappedIndex.tableSchema(), is(sameInstance(FakeItemWithIndices.getTableSchema()))); + assertThat(dynamoDbMappedIndex.indexName(), is("gsi_1")); + } + + @Test(expected = IllegalArgumentException.class) + public void index_invalidIndex_throwsIllegalArgumentException() { + DefaultDynamoDbAsyncTable dynamoDbMappedTable = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + TABLE_NAME); + + dynamoDbMappedTable.index("invalid"); + } + + @Test + public void keyFrom_primaryIndex_partitionAndSort() { + FakeItemWithSort item = FakeItemWithSort.createUniqueFakeItemWithSort(); + DefaultDynamoDbAsyncTable dynamoDbMappedIndex = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithSort.getTableSchema(), + "test_table"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); + assertThat(key.sortKeyValue(), is(Optional.of(stringValue(item.getSort())))); + } + + @Test + public void keyFrom_primaryIndex_partitionOnly() { + FakeItem item = FakeItem.createUniqueFakeItem(); + DefaultDynamoDbAsyncTable dynamoDbMappedIndex = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItem.getTableSchema(), + "test_table"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); + assertThat(key.sortKeyValue(), is(Optional.empty())); + } + + @Test + public void keyFrom_primaryIndex_partitionAndNullSort() { + FakeItemWithSort item = FakeItemWithSort.createUniqueFakeItemWithoutSort(); + DefaultDynamoDbAsyncTable dynamoDbMappedIndex = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithSort.getTableSchema(), + "test_table"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); + assertThat(key.sortKeyValue(), is(Optional.empty())); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedAsyncClientTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedAsyncClientTest.java new file mode 100644 index 000000000000..c187247af768 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedAsyncClientTest.java @@ -0,0 +1,120 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertNotNull; + +import java.util.Arrays; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.ChainExtension; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +@RunWith(MockitoJUnitRunner.class) +public class DefaultDynamoDbEnhancedAsyncClientTest { + @Mock + private DynamoDbAsyncClient mockDynamoDbAsyncClient; + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension2; + @Mock + private TableSchema mockTableSchema; + + private DefaultDynamoDbEnhancedAsyncClient dynamoDbEnhancedAsyncClient; + + @Before + public void initializeClient() { + this.dynamoDbEnhancedAsyncClient = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(mockDynamoDbAsyncClient) + .extensions(mockDynamoDbEnhancedClientExtension) + .build(); + } + + @Test + public void table() { + DefaultDynamoDbAsyncTable mappedTable = dynamoDbEnhancedAsyncClient.table("table-name", mockTableSchema); + + assertThat(mappedTable.dynamoDbClient(), is(mockDynamoDbAsyncClient)); + assertThat(mappedTable.mapperExtension(), is(mockDynamoDbEnhancedClientExtension)); + assertThat(mappedTable.tableSchema(), is(mockTableSchema)); + assertThat(mappedTable.tableName(), is("table-name")); + } + + @Test + public void builder_minimal() { + DefaultDynamoDbEnhancedAsyncClient builtObject = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(mockDynamoDbAsyncClient) + .build(); + + assertThat(builtObject.dynamoDbAsyncClient(), is(mockDynamoDbAsyncClient)); + assertThat(builtObject.mapperExtension(), instanceOf(VersionedRecordExtension.class)); + } + + @Test + public void builder_maximal() { + DefaultDynamoDbEnhancedAsyncClient builtObject = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(mockDynamoDbAsyncClient) + .extensions(mockDynamoDbEnhancedClientExtension) + .build(); + + assertThat(builtObject.dynamoDbAsyncClient(), is(mockDynamoDbAsyncClient)); + assertThat(builtObject.mapperExtension(), is(mockDynamoDbEnhancedClientExtension)); + } + + @Test + public void builder_multipleExtensions_varargs() { + DefaultDynamoDbEnhancedAsyncClient builtObject = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(mockDynamoDbAsyncClient) + .extensions(mockDynamoDbEnhancedClientExtension, mockDynamoDbEnhancedClientExtension2) + .build(); + + assertThat(builtObject.dynamoDbAsyncClient(), is(mockDynamoDbAsyncClient)); + assertThat(builtObject.mapperExtension(), instanceOf(ChainExtension.class)); + } + + @Test + public void builder_multipleExtensions_list() { + DefaultDynamoDbEnhancedAsyncClient builtObject = + DefaultDynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(mockDynamoDbAsyncClient) + .extensions(Arrays.asList(mockDynamoDbEnhancedClientExtension, mockDynamoDbEnhancedClientExtension2)) + .build(); + + assertThat(builtObject.dynamoDbAsyncClient(), is(mockDynamoDbAsyncClient)); + assertThat(builtObject.mapperExtension(), instanceOf(ChainExtension.class)); + } + + @Test + public void toBuilder() { + DefaultDynamoDbEnhancedAsyncClient copiedObject = dynamoDbEnhancedAsyncClient.toBuilder().build(); + + assertThat(copiedObject, is(dynamoDbEnhancedAsyncClient)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedClientTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedClientTest.java new file mode 100644 index 000000000000..645148334674 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbEnhancedClientTest.java @@ -0,0 +1,119 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +import java.util.Arrays; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.extensions.VersionedRecordExtension; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.ChainExtension; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@RunWith(MockitoJUnitRunner.class) +public class DefaultDynamoDbEnhancedClientTest { + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension2; + + @Mock + private TableSchema mockTableSchema; + + private DefaultDynamoDbEnhancedClient dynamoDbEnhancedClient; + + @Before + public void initializeClient() { + this.dynamoDbEnhancedClient = DefaultDynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .extensions(mockDynamoDbEnhancedClientExtension) + .build(); + } + + @Test + public void table() { + DefaultDynamoDbTable mappedTable = dynamoDbEnhancedClient.table("table-name", mockTableSchema); + + assertThat(mappedTable.dynamoDbClient(), is(mockDynamoDbClient)); + assertThat(mappedTable.mapperExtension(), is(mockDynamoDbEnhancedClientExtension)); + assertThat(mappedTable.tableSchema(), is(mockTableSchema)); + assertThat(mappedTable.tableName(), is("table-name")); + } + + @Test + public void builder_minimal() { + DefaultDynamoDbEnhancedClient builtObject = DefaultDynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .build(); + + assertThat(builtObject.dynamoDbClient(), is(mockDynamoDbClient)); + assertThat(builtObject.mapperExtension(), instanceOf(VersionedRecordExtension.class)); + } + + @Test + public void builder_maximal() { + DefaultDynamoDbEnhancedClient builtObject = DefaultDynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .extensions(mockDynamoDbEnhancedClientExtension) + .build(); + + assertThat(builtObject.dynamoDbClient(), is(mockDynamoDbClient)); + assertThat(builtObject.mapperExtension(), is(mockDynamoDbEnhancedClientExtension)); + } + + @Test + public void builder_multipleExtensions_varargs() { + DefaultDynamoDbEnhancedClient builtObject = + DefaultDynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .extensions(mockDynamoDbEnhancedClientExtension, mockDynamoDbEnhancedClientExtension2) + .build(); + + assertThat(builtObject.dynamoDbClient(), is(mockDynamoDbClient)); + assertThat(builtObject.mapperExtension(), instanceOf(ChainExtension.class)); + } + + @Test + public void builder_multipleExtensions_list() { + DefaultDynamoDbEnhancedClient builtObject = + DefaultDynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .extensions(Arrays.asList(mockDynamoDbEnhancedClientExtension, mockDynamoDbEnhancedClientExtension2)) + .build(); + + assertThat(builtObject.dynamoDbClient(), is(mockDynamoDbClient)); + assertThat(builtObject.mapperExtension(), instanceOf(ChainExtension.class)); + } + + @Test + public void toBuilder() { + DefaultDynamoDbEnhancedClient copiedObject = dynamoDbEnhancedClient.toBuilder().build(); + + assertThat(copiedObject, is(dynamoDbEnhancedClient)); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbIndexTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbIndexTest.java new file mode 100644 index 000000000000..30ee03983858 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbIndexTest.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@RunWith(MockitoJUnitRunner.class) +public class DefaultDynamoDbIndexTest { + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void keyFrom_secondaryIndex_partitionAndSort() { + FakeItemWithIndices item = FakeItemWithIndices.createUniqueFakeItemWithIndices(); + DefaultDynamoDbIndex dynamoDbMappedIndex = + new DefaultDynamoDbIndex<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + "test_table", + "gsi_1"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getGsiId()))); + assertThat(key.sortKeyValue(), is(Optional.of(stringValue(item.getGsiSort())))); + } + + @Test + public void keyFrom_secondaryIndex_partitionOnly() { + FakeItemWithIndices item = FakeItemWithIndices.createUniqueFakeItemWithIndices(); + DefaultDynamoDbIndex dynamoDbMappedIndex = + new DefaultDynamoDbIndex<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + "test_table", + "gsi_2"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getGsiId()))); + assertThat(key.sortKeyValue(), is(Optional.empty())); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java new file mode 100644 index 000000000000..b268f2928855 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@RunWith(MockitoJUnitRunner.class) +public class DefaultDynamoDbTableTest { + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void index_constructsCorrectMappedIndex() { + DefaultDynamoDbTable dynamoDbMappedTable = + new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + TABLE_NAME); + + DefaultDynamoDbIndex dynamoDbMappedIndex = dynamoDbMappedTable.index("gsi_1"); + + assertThat(dynamoDbMappedIndex.dynamoDbClient(), is(sameInstance(mockDynamoDbClient))); + assertThat(dynamoDbMappedIndex.mapperExtension(), is(sameInstance(mockDynamoDbEnhancedClientExtension))); + assertThat(dynamoDbMappedIndex.tableSchema(), is(sameInstance(FakeItemWithIndices.getTableSchema()))); + assertThat(dynamoDbMappedIndex.indexName(), is("gsi_1")); + } + + @Test(expected = IllegalArgumentException.class) + public void index_invalidIndex_throwsIllegalArgumentException() { + DefaultDynamoDbTable dynamoDbMappedTable = + new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + TABLE_NAME); + + dynamoDbMappedTable.index("invalid"); + } + + @Test + public void keyFrom_primaryIndex_partitionAndSort() { + FakeItemWithSort item = FakeItemWithSort.createUniqueFakeItemWithSort(); + DefaultDynamoDbTable dynamoDbMappedIndex = + new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithSort.getTableSchema(), + "test_table"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); + assertThat(key.sortKeyValue(), is(Optional.of(stringValue(item.getSort())))); + } + + @Test + public void keyFrom_primaryIndex_partitionOnly() { + FakeItem item = FakeItem.createUniqueFakeItem(); + DefaultDynamoDbTable dynamoDbMappedIndex = + new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItem.getTableSchema(), + "test_table"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); + assertThat(key.sortKeyValue(), is(Optional.empty())); + } + + @Test + public void keyFrom_primaryIndex_partitionAndNullSort() { + FakeItemWithSort item = FakeItemWithSort.createUniqueFakeItemWithoutSort(); + DefaultDynamoDbTable dynamoDbMappedIndex = + new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithSort.getTableSchema(), + "test_table"); + + Key key = dynamoDbMappedIndex.keyFrom(item); + + assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); + assertThat(key.sortKeyValue(), is(Optional.empty())); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/ExtensionResolverTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/ExtensionResolverTest.java new file mode 100644 index 000000000000..71f79b12d097 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/ExtensionResolverTest.java @@ -0,0 +1,99 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; + +@RunWith(MockitoJUnitRunner.class) +public class ExtensionResolverTest { + @Mock + private DynamoDbEnhancedClientExtension mockExtension1; + @Mock + private DynamoDbEnhancedClientExtension mockExtension2; + + @Before + public void stubMocks() { + when(mockExtension1.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(WriteModification.builder().build()); + when(mockExtension2.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))).thenReturn(WriteModification.builder().build()); + when(mockExtension1.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(ReadModification.builder().build()); + when(mockExtension2.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn(ReadModification.builder().build()); + } + + @Test + public void resolveExtensions_null() { + assertThat(ExtensionResolver.resolveExtensions(null)).isNull(); + } + + @Test + public void resolveExtensions_empty() { + assertThat(ExtensionResolver.resolveExtensions(emptyList())).isNull(); + } + + @Test + public void resolveExtensions_singleton() { + assertThat(ExtensionResolver.resolveExtensions(singletonList(mockExtension1))).isSameAs(mockExtension1); + } + + @Test + public void resolveExtensions_multiple_beforeWrite_correctCallingOrder() { + DynamoDbEnhancedClientExtension extension = + ExtensionResolver.resolveExtensions(Arrays.asList(mockExtension1, mockExtension2)); + + extension.beforeWrite(mock(DynamoDbExtensionContext.BeforeWrite.class)); + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2); + inOrder.verify(mockExtension1).beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class)); + inOrder.verify(mockExtension2).beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class)); + inOrder.verifyNoMoreInteractions(); + } + + @Test + public void resolveExtensions_multiple_afterRead_correctCallingOrder() { + DynamoDbEnhancedClientExtension extension = + ExtensionResolver.resolveExtensions(Arrays.asList(mockExtension1, mockExtension2)); + + extension.afterRead(mock(DynamoDbExtensionContext.AfterRead.class)); + InOrder inOrder = Mockito.inOrder(mockExtension1, mockExtension2); + inOrder.verify(mockExtension2).afterRead(any(DynamoDbExtensionContext.AfterRead.class)); + inOrder.verify(mockExtension1).afterRead(any(DynamoDbExtensionContext.AfterRead.class)); + inOrder.verifyNoMoreInteractions(); + } + + @Test + public void defaultExtensions_isImmutable() { + List defaultExtensions = ExtensionResolver.defaultExtensions(); + assertThatThrownBy(() -> defaultExtensions.add(mockExtension1)).isInstanceOf(UnsupportedOperationException.class); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProviderTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProviderTest.java new file mode 100644 index 000000000000..069ae50a2d4b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProviderTest.java @@ -0,0 +1,75 @@ +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +@RunWith(MockitoJUnitRunner.class) +public class ChainConverterProviderTest { + + @Mock + private AttributeConverterProvider mockConverterProvider1; + + @Mock + private AttributeConverterProvider mockConverterProvider2; + + @Mock + private AttributeConverter mockAttributeConverter1; + + @Mock + private AttributeConverter mockAttributeConverter2; + + @Test + public void checkSingleProviderChain() { + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1); + List providerQueue = chain.chainedProviders(); + assertThat(providerQueue.size()).isEqualTo(1); + assertThat(providerQueue.get(0)).isEqualTo(mockConverterProvider1); + } + + @Test + public void checkMultipleProviderChain() { + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1, mockConverterProvider2); + List providerQueue = chain.chainedProviders(); + assertThat(providerQueue.size()).isEqualTo(2); + assertThat(providerQueue.get(0)).isEqualTo(mockConverterProvider1); + assertThat(providerQueue.get(1)).isEqualTo(mockConverterProvider2); + } + + @Test + public void resolveSingleProviderChain() { + when(mockConverterProvider1.converterFor(any())).thenReturn(mockAttributeConverter1); + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1); + assertThat(chain.converterFor(EnhancedType.of(String.class))).isSameAs(mockAttributeConverter1); + } + + @Test + public void resolveMultipleProviderChain_noMatch() { + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1, mockConverterProvider2); + assertThat(chain.converterFor(EnhancedType.of(String.class))).isNull(); + } + + @Test + public void resolveMultipleProviderChain_matchSecond() { + when(mockConverterProvider2.converterFor(any())).thenReturn(mockAttributeConverter2); + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1, mockConverterProvider2); + assertThat(chain.converterFor(EnhancedType.of(String.class))).isSameAs(mockAttributeConverter2); + } + + @Test + public void resolveMultipleProviderChain_matchFirst() { + when(mockConverterProvider1.converterFor(any())).thenReturn(mockAttributeConverter1); + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1, mockConverterProvider2); + assertThat(chain.converterFor(EnhancedType.of(String.class))).isSameAs(mockAttributeConverter1); + } + +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolverTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolverTest.java new file mode 100644 index 000000000000..0cd3294c085a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolverTest.java @@ -0,0 +1,55 @@ +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; + +@RunWith(MockitoJUnitRunner.class) +public class ConverterProviderResolverTest { + + @Mock + private AttributeConverterProvider mockConverterProvider1; + + @Mock + private AttributeConverterProvider mockConverterProvider2; + + @Test + public void resolveProviders_null() { + assertThat(ConverterProviderResolver.resolveProviders(null)).isNull(); + } + + @Test + public void resolveProviders_empty() { + assertThat(ConverterProviderResolver.resolveProviders(emptyList())).isNull(); + } + + @Test + public void resolveProviders_singleton() { + assertThat(ConverterProviderResolver.resolveProviders(singletonList(mockConverterProvider1))) + .isSameAs(mockConverterProvider1); + } + + @Test + public void resolveProviders_multiple() { + AttributeConverterProvider result = ConverterProviderResolver.resolveProviders( + Arrays.asList(mockConverterProvider1, mockConverterProvider2)); + assertThat(result).isNotNull(); + assertThat(result).isInstanceOf(ChainConverterProvider.class); + } + + @Test + public void defaultProvider_returnsInstance() { + AttributeConverterProvider defaultProvider = ConverterProviderResolver.defaultConverterProvider(); + assertThat(defaultProvider).isNotNull(); + assertThat(defaultProvider).isInstanceOf(DefaultAttributeConverterProvider.class); + } + +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeValueConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeValueConverterTest.java new file mode 100644 index 000000000000..8cb798e71b7e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeValueConverterTest.java @@ -0,0 +1,33 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import org.junit.Test; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class OptionalAttributeValueConverterTest { + private static final OptionalAttributeConverter CONVERTER = + OptionalAttributeConverter.create(StringAttributeConverter.create()); + @Test + public void testTransformTo_nulPropertyIsNull_doesNotThrowNPE() { + AttributeValue av = AttributeValue.builder() + .nul(null) + .s("foo") + .build(); + + CONVERTER.transformTo(av); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableIntrospectorTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableIntrospectorTest.java new file mode 100644 index 000000000000..350467f4bc05 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/ImmutableIntrospectorTest.java @@ -0,0 +1,607 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.immutable; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbIgnore; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; + +public class ImmutableIntrospectorTest { + @Rule + public ExpectedException exception = ExpectedException.none(); + + @DynamoDbImmutable(builder = SimpleImmutableMixedStyle.Builder.class) + private static final class SimpleImmutableMixedStyle { + public String getAttribute1() { + throw new UnsupportedOperationException(); + } + + public Integer attribute2() { + throw new UnsupportedOperationException(); + } + + public Boolean isAttribute3() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public void setAttribute1(String attribute1) { + throw new UnsupportedOperationException(); + } + + public Builder attribute2(Integer attribute2) { + throw new UnsupportedOperationException(); + } + + public Void setAttribute3(Boolean attribute3) { + throw new UnsupportedOperationException(); + } + + public SimpleImmutableMixedStyle build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void simpleImmutableMixedStyle() { + ImmutableInfo immutableInfo = + ImmutableIntrospector.getImmutableInfo(SimpleImmutableMixedStyle.class); + + assertThat(immutableInfo.immutableClass()).isSameAs(SimpleImmutableMixedStyle.class); + assertThat(immutableInfo.builderClass()).isSameAs(SimpleImmutableMixedStyle.Builder.class); + assertThat(immutableInfo.buildMethod().getReturnType()).isSameAs(SimpleImmutableMixedStyle.class); + assertThat(immutableInfo.buildMethod().getParameterCount()).isZero(); + assertThat(immutableInfo.staticBuilderMethod()).isNotPresent(); + assertThat(immutableInfo.propertyDescriptors()).hasSize(3); + assertThat(immutableInfo.propertyDescriptors()).anySatisfy(p -> { + assertThat(p.name()).isEqualTo("attribute1"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(String.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(String.class); + }); + assertThat(immutableInfo.propertyDescriptors()).anySatisfy(p -> { + assertThat(p.name()).isEqualTo("attribute2"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(Integer.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(Integer.class); + }); + assertThat(immutableInfo.propertyDescriptors()).anySatisfy(p -> { + assertThat(p.name()).isEqualTo("attribute3"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(Boolean.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(Boolean.class); + }); + } + + @DynamoDbImmutable(builder = SimpleImmutableWithPrimitives.Builder.class) + private static final class SimpleImmutableWithPrimitives { + public int attribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder attribute(int attribute) { + throw new UnsupportedOperationException(); + } + + public SimpleImmutableWithPrimitives build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void simpleImmutableWithPrimitives() { + ImmutableInfo immutableInfo = + ImmutableIntrospector.getImmutableInfo(SimpleImmutableWithPrimitives.class); + + assertThat(immutableInfo.immutableClass()).isSameAs(SimpleImmutableWithPrimitives.class); + assertThat(immutableInfo.builderClass()).isSameAs(SimpleImmutableWithPrimitives.Builder.class); + assertThat(immutableInfo.buildMethod().getReturnType()).isSameAs(SimpleImmutableWithPrimitives.class); + assertThat(immutableInfo.buildMethod().getParameterCount()).isZero(); + assertThat(immutableInfo.staticBuilderMethod()).isNotPresent(); + assertThat(immutableInfo.propertyDescriptors()).hasOnlyOneElementSatisfying(p -> { + assertThat(p.name()).isEqualTo("attribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(int.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(int.class); + }); + } + + @DynamoDbImmutable(builder = SimpleImmutableWithTrickyNames.Builder.class) + private static final class SimpleImmutableWithTrickyNames { + public String isAttribute() { + throw new UnsupportedOperationException(); + } + + public String getGetAttribute() { + throw new UnsupportedOperationException(); + } + + public String getSetAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder isAttribute(String isAttribute) { + throw new UnsupportedOperationException(); + } + + public Builder getAttribute(String getAttribute) { + throw new UnsupportedOperationException(); + } + + public Builder setSetAttribute(String setAttribute) { + throw new UnsupportedOperationException(); + } + + public SimpleImmutableWithTrickyNames build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void simpleImmutableWithTrickyNames() { + ImmutableInfo immutableInfo = + ImmutableIntrospector.getImmutableInfo(SimpleImmutableWithTrickyNames.class); + + assertThat(immutableInfo.immutableClass()).isSameAs(SimpleImmutableWithTrickyNames.class); + assertThat(immutableInfo.builderClass()).isSameAs(SimpleImmutableWithTrickyNames.Builder.class); + assertThat(immutableInfo.buildMethod().getReturnType()).isSameAs(SimpleImmutableWithTrickyNames.class); + assertThat(immutableInfo.buildMethod().getParameterCount()).isZero(); + assertThat(immutableInfo.staticBuilderMethod()).isNotPresent(); + assertThat(immutableInfo.propertyDescriptors()).hasSize(3); + assertThat(immutableInfo.propertyDescriptors()).anySatisfy(p -> { + assertThat(p.name()).isEqualTo("isAttribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(String.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(String.class); + }); + assertThat(immutableInfo.propertyDescriptors()).anySatisfy(p -> { + assertThat(p.name()).isEqualTo("getAttribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(String.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(String.class); + }); + assertThat(immutableInfo.propertyDescriptors()).anySatisfy(p -> { + assertThat(p.name()).isEqualTo("setAttribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(String.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(String.class); + }); + } + + @DynamoDbImmutable(builder = ImmutableWithNoMatchingSetter.Builder.class) + private static final class ImmutableWithNoMatchingSetter { + public int rightAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder wrongAttribute(int attribute) { + throw new UnsupportedOperationException(); + } + + public ImmutableWithNoMatchingSetter build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithNoMatchingSetter() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("rightAttribute"); + exception.expectMessage("matching setter"); + ImmutableIntrospector.getImmutableInfo(ImmutableWithNoMatchingSetter.class); + } + + @DynamoDbImmutable(builder = ImmutableWithGetterParams.Builder.class) + private static final class ImmutableWithGetterParams { + public int rightAttribute(String illegalParam) { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder rightAttribute(int rightAttribute) { + throw new UnsupportedOperationException(); + } + + public ImmutableWithGetterParams build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithGetterParams() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("rightAttribute"); + exception.expectMessage("getter"); + exception.expectMessage("parameters"); + ImmutableIntrospector.getImmutableInfo(ImmutableWithGetterParams.class); + } + + @DynamoDbImmutable(builder = ImmutableWithVoidAttribute.Builder.class) + private static final class ImmutableWithVoidAttribute { + public Void rightAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder rightAttribute(Void rightAttribute) { + throw new UnsupportedOperationException(); + } + + public ImmutableWithVoidAttribute build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithVoidAttribute() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("rightAttribute"); + exception.expectMessage("getter"); + exception.expectMessage("void"); + ImmutableIntrospector.getImmutableInfo(ImmutableWithVoidAttribute.class); + } + + @DynamoDbImmutable(builder = ImmutableWithNoMatchingGetter.Builder.class) + private static final class ImmutableWithNoMatchingGetter { + public static final class Builder { + public Builder rightAttribute(int attribute) { + throw new UnsupportedOperationException(); + } + + public ImmutableWithNoMatchingGetter build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithNoMatchingGetter() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("rightAttribute"); + exception.expectMessage("matching getter"); + ImmutableIntrospector.getImmutableInfo(ImmutableWithNoMatchingGetter.class); + } + + @DynamoDbImmutable(builder = ImmutableWithNoBuildMethod.Builder.class) + private static final class ImmutableWithNoBuildMethod { + public int rightAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder rightAttribute(int attribute) { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithNoBuildMethod() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("build"); + ImmutableIntrospector.getImmutableInfo(ImmutableWithNoBuildMethod.class); + } + + @DynamoDbImmutable(builder = ImmutableWithWrongSetter.Builder.class) + private static final class ImmutableWithWrongSetter { + public int rightAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder rightAttribute(String attribute) { + throw new UnsupportedOperationException(); + } + + public ImmutableWithWrongSetter build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithWrongSetter() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("rightAttribute"); + exception.expectMessage("matching setter"); + ImmutableIntrospector.getImmutableInfo(ImmutableWithWrongSetter.class); + } + + @DynamoDbImmutable(builder = ImmutableWithWrongBuildType.Builder.class) + private static final class ImmutableWithWrongBuildType { + public int rightAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder rightAttribute(int attribute) { + throw new UnsupportedOperationException(); + } + + public String build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithWrongBuildType() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("build"); + ImmutableIntrospector.getImmutableInfo(ImmutableWithWrongBuildType.class); + } + + private static final class ImmutableMissingAnnotation { + public int rightAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder rightAttribute(int attribute) { + throw new UnsupportedOperationException(); + } + + public ImmutableMissingAnnotation build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableMissingAnnotation() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("@DynamoDbImmutable"); + ImmutableIntrospector.getImmutableInfo(ImmutableMissingAnnotation.class); + } + + @DynamoDbImmutable(builder = SimpleImmutableWithIgnoredGetter.Builder.class) + private static final class SimpleImmutableWithIgnoredGetter { + public int attribute() { + throw new UnsupportedOperationException(); + } + + @DynamoDbIgnore + public int ignoreMe() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder attribute(int attribute) { + throw new UnsupportedOperationException(); + } + + public SimpleImmutableWithIgnoredGetter build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void simpleImmutableWithIgnoredGetter() { + ImmutableInfo immutableInfo = + ImmutableIntrospector.getImmutableInfo(SimpleImmutableWithIgnoredGetter.class); + + assertThat(immutableInfo.immutableClass()).isSameAs(SimpleImmutableWithIgnoredGetter.class); + assertThat(immutableInfo.builderClass()).isSameAs(SimpleImmutableWithIgnoredGetter.Builder.class); + assertThat(immutableInfo.buildMethod().getReturnType()).isSameAs(SimpleImmutableWithIgnoredGetter.class); + assertThat(immutableInfo.buildMethod().getParameterCount()).isZero(); + assertThat(immutableInfo.staticBuilderMethod()).isNotPresent(); + assertThat(immutableInfo.propertyDescriptors()).hasOnlyOneElementSatisfying(p -> { + assertThat(p.name()).isEqualTo("attribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(int.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(int.class); + }); + } + + @DynamoDbImmutable(builder = SimpleImmutableWithIgnoredSetter.Builder.class) + private static final class SimpleImmutableWithIgnoredSetter { + public int attribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder attribute(int attribute) { + throw new UnsupportedOperationException(); + } + + @DynamoDbIgnore + public int ignoreMe() { + throw new UnsupportedOperationException(); + } + + public SimpleImmutableWithIgnoredSetter build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void simpleImmutableWithIgnoredSetter() { + ImmutableInfo immutableInfo = + ImmutableIntrospector.getImmutableInfo(SimpleImmutableWithIgnoredSetter.class); + + assertThat(immutableInfo.immutableClass()).isSameAs(SimpleImmutableWithIgnoredSetter.class); + assertThat(immutableInfo.builderClass()).isSameAs(SimpleImmutableWithIgnoredSetter.Builder.class); + assertThat(immutableInfo.buildMethod().getReturnType()).isSameAs(SimpleImmutableWithIgnoredSetter.class); + assertThat(immutableInfo.buildMethod().getParameterCount()).isZero(); + assertThat(immutableInfo.staticBuilderMethod()).isNotPresent(); + assertThat(immutableInfo.propertyDescriptors()).hasOnlyOneElementSatisfying(p -> { + assertThat(p.name()).isEqualTo("attribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(int.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(int.class); + }); + } + + private static class ExtendedImmutableBase { + public int baseAttribute() { + throw new UnsupportedOperationException(); + } + + public static class Builder { + public Builder baseAttribute(int attribute) { + throw new UnsupportedOperationException(); + } + } + } + + @DynamoDbImmutable(builder = ExtendedImmutable.Builder.class) + private static final class ExtendedImmutable extends ExtendedImmutableBase { + public int childAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder extends ExtendedImmutableBase.Builder { + public Builder childAttribute(int attribute) { + throw new UnsupportedOperationException(); + } + + public ExtendedImmutable build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void extendedImmutable() { + ImmutableInfo immutableInfo = + ImmutableIntrospector.getImmutableInfo(ExtendedImmutable.class); + + assertThat(immutableInfo.immutableClass()).isSameAs(ExtendedImmutable.class); + assertThat(immutableInfo.builderClass()).isSameAs(ExtendedImmutable.Builder.class); + assertThat(immutableInfo.buildMethod().getReturnType()).isSameAs(ExtendedImmutable.class); + assertThat(immutableInfo.buildMethod().getParameterCount()).isZero(); + assertThat(immutableInfo.staticBuilderMethod()).isNotPresent(); + assertThat(immutableInfo.propertyDescriptors()).hasSize(2); + assertThat(immutableInfo.propertyDescriptors()).anySatisfy(p -> { + assertThat(p.name()).isEqualTo("baseAttribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(int.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(int.class); + }); + assertThat(immutableInfo.propertyDescriptors()).anySatisfy(p -> { + assertThat(p.name()).isEqualTo("childAttribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(int.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(int.class); + }); + } + + @DynamoDbImmutable(builder = ImmutableWithPrimitiveBoolean.Builder.class) + private static final class ImmutableWithPrimitiveBoolean { + public boolean isAttribute() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + public Builder attribute(boolean attribute) { + throw new UnsupportedOperationException(); + } + + public ImmutableWithPrimitiveBoolean build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithPrimitiveBoolean() { + ImmutableInfo immutableInfo = + ImmutableIntrospector.getImmutableInfo(ImmutableWithPrimitiveBoolean.class); + + assertThat(immutableInfo.immutableClass()).isSameAs(ImmutableWithPrimitiveBoolean.class); + assertThat(immutableInfo.builderClass()).isSameAs(ImmutableWithPrimitiveBoolean.Builder.class); + assertThat(immutableInfo.buildMethod().getReturnType()).isSameAs(ImmutableWithPrimitiveBoolean.class); + assertThat(immutableInfo.buildMethod().getParameterCount()).isZero(); + assertThat(immutableInfo.staticBuilderMethod()).isNotPresent(); + assertThat(immutableInfo.propertyDescriptors()).hasOnlyOneElementSatisfying(p -> { + assertThat(p.name()).isEqualTo("attribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(boolean.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(boolean.class); + }); + } + + @DynamoDbImmutable(builder = ImmutableWithStaticBuilder.Builder.class) + private static final class ImmutableWithStaticBuilder { + public boolean isAttribute() { + throw new UnsupportedOperationException(); + } + + public static Builder builder() { + throw new UnsupportedOperationException(); + } + + public static final class Builder { + private Builder() { + } + + public Builder attribute(boolean attribute) { + throw new UnsupportedOperationException(); + } + + public ImmutableWithStaticBuilder build() { + throw new UnsupportedOperationException(); + } + } + } + + @Test + public void immutableWithStaticBuilder() { + ImmutableInfo immutableInfo = + ImmutableIntrospector.getImmutableInfo(ImmutableWithStaticBuilder.class); + + assertThat(immutableInfo.immutableClass()).isSameAs(ImmutableWithStaticBuilder.class); + assertThat(immutableInfo.builderClass()).isSameAs(ImmutableWithStaticBuilder.Builder.class); + assertThat(immutableInfo.buildMethod().getReturnType()).isSameAs(ImmutableWithStaticBuilder.class); + assertThat(immutableInfo.buildMethod().getParameterCount()).isZero(); + assertThat(immutableInfo.staticBuilderMethod()) + .hasValueSatisfying(m -> assertThat(m.getName()).isEqualTo("builder")); + assertThat(immutableInfo.propertyDescriptors()).hasOnlyOneElementSatisfying(p -> { + assertThat(p.name()).isEqualTo("attribute"); + assertThat(p.getter().getParameterCount()).isZero(); + assertThat(p.getter().getReturnType()).isSameAs(boolean.class); + assertThat(p.setter().getParameterCount()).isEqualTo(1); + assertThat(p.setter().getParameterTypes()[0]).isSameAs(boolean.class); + }); + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/MetaTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/MetaTableSchemaTest.java new file mode 100644 index 000000000000..7551c0ad605a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/immutable/MetaTableSchemaTest.java @@ -0,0 +1,222 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.immutable; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.MetaTableSchema; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class MetaTableSchemaTest { + private final MetaTableSchema metaTableSchema = MetaTableSchema.create(FakeItem.class); + private final FakeItem fakeItem = FakeItem.createUniqueFakeItem(); + private final Map fakeMap = + Collections.singletonMap("test", AttributeValue.builder().s("test").build()); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Mock + private TableSchema mockTableSchema; + + @Mock + private EnhancedType mockEnhancedType; + + @Test + public void mapToItem() { + metaTableSchema.initialize(mockTableSchema); + when(mockTableSchema.mapToItem(any())).thenReturn(fakeItem); + + assertThat(metaTableSchema.mapToItem(fakeMap)).isSameAs(fakeItem); + verify(mockTableSchema).mapToItem(fakeMap); + } + + @Test + public void mapToItem_notInitialized() { + assertUninitialized(t -> t.mapToItem(fakeMap)); + } + + @Test + public void itemToMap_ignoreNulls() { + metaTableSchema.initialize(mockTableSchema); + when(mockTableSchema.itemToMap(any(FakeItem.class), any(boolean.class))).thenReturn(fakeMap); + + assertThat(metaTableSchema.itemToMap(fakeItem, true)).isSameAs(fakeMap); + verify(mockTableSchema).itemToMap(fakeItem, true); + assertThat(metaTableSchema.itemToMap(fakeItem, false)).isSameAs(fakeMap); + verify(mockTableSchema).itemToMap(fakeItem, false); + } + + @Test + public void itemToMap_ignoreNulls_notInitialized() { + assertUninitialized(t -> t.itemToMap(fakeItem, true)); + } + + @Test + public void itemToMap_attributes() { + Collection attributes = Collections.singletonList("test-attribute"); + + metaTableSchema.initialize(mockTableSchema); + when(mockTableSchema.itemToMap(any(FakeItem.class), any())).thenReturn(fakeMap); + + assertThat(metaTableSchema.itemToMap(fakeItem, attributes)).isSameAs(fakeMap); + verify(mockTableSchema).itemToMap(fakeItem, attributes); + } + + @Test + public void itemToMap_attributes_notInitialized() { + assertUninitialized(t -> t.itemToMap(fakeItem, null)); + } + + @Test + public void attributeValue() { + AttributeValue attributeValue = AttributeValue.builder().s("test-attribute").build(); + + metaTableSchema.initialize(mockTableSchema); + when(mockTableSchema.attributeValue(any(), any())).thenReturn(attributeValue); + + assertThat(metaTableSchema.attributeValue(fakeItem, "test-name")).isSameAs(attributeValue); + verify(mockTableSchema).attributeValue(fakeItem, "test-name"); + } + + @Test + public void attributeValue_notInitialized() { + assertUninitialized(t -> t.attributeValue(fakeItem, "test")); + } + + @Test + public void tableMetadata() { + TableMetadata mockTableMetadata = Mockito.mock(TableMetadata.class); + + metaTableSchema.initialize(mockTableSchema); + when(mockTableSchema.tableMetadata()).thenReturn(mockTableMetadata); + + assertThat(metaTableSchema.tableMetadata()).isSameAs(mockTableMetadata); + verify(mockTableSchema).tableMetadata(); + } + + @Test + public void tableMetadata_notInitialized() { + assertUninitialized(TableSchema::tableMetadata); + } + + @Test + public void itemType() { + metaTableSchema.initialize(mockTableSchema); + when(mockTableSchema.itemType()).thenReturn(mockEnhancedType); + + assertThat(metaTableSchema.itemType()).isSameAs(mockEnhancedType); + verify(mockTableSchema).itemType(); + } + + @Test + public void itemType_notInitialized() { + assertUninitialized(TableSchema::itemType); + } + + @Test + public void attributeNames() { + List attributeNames = Collections.singletonList("attribute-names"); + + metaTableSchema.initialize(mockTableSchema); + when(mockTableSchema.attributeNames()).thenReturn(attributeNames); + + assertThat(metaTableSchema.attributeNames()).isSameAs(attributeNames); + verify(mockTableSchema).attributeNames(); + } + + @Test + public void attributeNames_notInitialized() { + assertUninitialized(TableSchema::attributeNames); + } + + @Test + public void isAbstract() { + metaTableSchema.initialize(mockTableSchema); + + when(mockTableSchema.isAbstract()).thenReturn(true); + assertThat(metaTableSchema.isAbstract()).isTrue(); + verify(mockTableSchema).isAbstract(); + + when(mockTableSchema.isAbstract()).thenReturn(false); + assertThat(metaTableSchema.isAbstract()).isFalse(); + verify(mockTableSchema, times(2)).isAbstract(); + } + + @Test + public void isAbstract_notInitialized() { + assertUninitialized(TableSchema::isAbstract); + } + + @Test + public void doubleInitialize_throwsIllegalStateException() { + metaTableSchema.initialize(mockTableSchema); + exception.expect(IllegalStateException.class); + exception.expectMessage("initialized"); + metaTableSchema.initialize(mockTableSchema); + } + + @Test + public void isInitialized_uninitialized() { + assertThat(metaTableSchema.isInitialized()).isFalse(); + } + + @Test + public void isInitialized_initialized() { + metaTableSchema.initialize(mockTableSchema); + assertThat(metaTableSchema.isInitialized()).isTrue(); + } + + @Test + public void concreteTableSchema_uninitialized() { + exception.expect(IllegalStateException.class); + exception.expectMessage("must be initialized"); + metaTableSchema.concreteTableSchema(); + } + + @Test + public void concreteTableSchema_initialized() { + metaTableSchema.initialize(mockTableSchema); + assertThat(metaTableSchema.concreteTableSchema()).isSameAs(mockTableSchema); + } + + private void assertUninitialized(Consumer> methodToTest) { + exception.expect(IllegalStateException.class); + exception.expectMessage("must be initialized"); + methodToTest.accept(metaTableSchema); + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchemaCacheTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchemaCacheTest.java new file mode 100644 index 000000000000..cc191a7a7e1d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/mapper/MetaTableSchemaCacheTest.java @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; + +public class MetaTableSchemaCacheTest { + private final MetaTableSchemaCache metaTableSchemaCache = new MetaTableSchemaCache(); + + @Test + public void createAndGetSingleEntry() { + MetaTableSchema metaTableSchema = metaTableSchemaCache.getOrCreate(FakeItem.class); + assertThat(metaTableSchema).isNotNull(); + + assertThat(metaTableSchemaCache.get(FakeItem.class)).hasValue(metaTableSchema); + } + + @Test + public void getKeyNotInMap() { + assertThat(metaTableSchemaCache.get(FakeItem.class)).isNotPresent(); + } + + @Test + public void createReturnsExistingObject() { + MetaTableSchema metaTableSchema = metaTableSchemaCache.getOrCreate(FakeItem.class); + assertThat(metaTableSchema).isNotNull(); + + assertThat(metaTableSchemaCache.getOrCreate(FakeItem.class)).isSameAs(metaTableSchema); + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchGetItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchGetItemOperationTest.java new file mode 100644 index 000000000000..866a237e5784 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchGetItemOperationTest.java @@ -0,0 +1,357 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPage; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.ReadBatch; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; +import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; +import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; +import software.amazon.awssdk.services.dynamodb.paginators.BatchGetItemIterable; + +@RunWith(MockitoJUnitRunner.class) +public class BatchGetItemOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final String TABLE_NAME_2 = "table-name-2"; + + private static final List FAKE_ITEMS = + IntStream.range(0, 6).mapToObj($ -> createUniqueFakeItem()).collect(toList()); + private static final List> FAKE_ITEM_MAPS = FAKE_ITEMS.stream().map(item -> + FakeItem.getTableSchema().itemToMap(item, FakeItem.getTableMetadata().primaryKeys())).collect(toList()); + private static final List FAKESORT_ITEMS = + IntStream.range(0, 6).mapToObj($ -> createUniqueFakeItemWithSort()).collect(toList()); + private static final List> FAKESORT_ITEM_MAPS = FAKESORT_ITEMS.stream().map(item -> + FakeItemWithSort.getTableSchema().itemToMap(item, FakeItemWithSort.getTableMetadata().primaryKeys())) + .collect(toList()); + private static final List FAKE_ITEM_KEYS = + FAKE_ITEMS.stream().map(fakeItem -> Key.builder().partitionValue(fakeItem.getId()).build()).collect(toList()); + private static final List FAKESORT_ITEM_KEYS = + FAKESORT_ITEMS.stream() + .map(fakeItemWithSort -> Key.builder() + .partitionValue(fakeItemWithSort.getId()) + .sortValue(fakeItemWithSort.getSort()) + .build()) + .collect(toList()); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockExtension; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + private DynamoDbTable fakeItemWithSortMappedTable; + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).extensions().build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + fakeItemWithSortMappedTable = enhancedClient.table(TABLE_NAME_2, FakeItemWithSort.getTableSchema()); + } + + @Test + public void getServiceCall_usingShortcutForm_makesTheRightCallAndReturnsResponse() { + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = + BatchGetItemEnhancedRequest.builder() + .readBatches(ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(FAKE_ITEM_KEYS.get(0)) + .build()) + .build(); + + BatchGetItemOperation operation = BatchGetItemOperation.create(batchGetItemEnhancedRequest); + + BatchGetItemRequest batchGetItemRequest = + BatchGetItemRequest.builder() + .requestItems(singletonMap("test-table", + KeysAndAttributes.builder() + .keys(singletonList(FAKE_ITEM_MAPS.get(0))) + .build())) + .build(); + + BatchGetItemIterable expectedResponse = mock(BatchGetItemIterable.class); + when(mockDynamoDbClient.batchGetItemPaginator(any(BatchGetItemRequest.class))).thenReturn(expectedResponse); + + SdkIterable response = + operation.serviceCall(mockDynamoDbClient).apply(batchGetItemRequest); + + assertThat(response, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).batchGetItemPaginator(batchGetItemRequest); + } + + @Test + public void getServiceCall_usingKeyItemForm_makesTheRightCallAndReturnsResponse() { + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = + BatchGetItemEnhancedRequest.builder() + .readBatches(ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(FAKE_ITEMS.get(0)) + .build()) + .build(); + + BatchGetItemOperation operation = BatchGetItemOperation.create(batchGetItemEnhancedRequest); + + BatchGetItemRequest batchGetItemRequest = + BatchGetItemRequest.builder() + .requestItems(singletonMap("test-table", + KeysAndAttributes.builder() + .keys(singletonList(FAKE_ITEM_MAPS.get(0))) + .build())) + .build(); + + BatchGetItemIterable expectedResponse = mock(BatchGetItemIterable.class); + when(mockDynamoDbClient.batchGetItemPaginator(any(BatchGetItemRequest.class))).thenReturn(expectedResponse); + + SdkIterable response = + operation.serviceCall(mockDynamoDbClient).apply(batchGetItemRequest); + + assertThat(response, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).batchGetItemPaginator(batchGetItemRequest); + } + + @Test + public void generateRequest_multipleBatches_multipleTableSchemas() { + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = + BatchGetItemEnhancedRequest.builder() + .readBatches( + ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(r -> r.key(FAKE_ITEM_KEYS.get(0))) + .addGetItem(r -> r.key(FAKE_ITEM_KEYS.get(1))) + .addGetItem(r -> r.key(FAKE_ITEM_KEYS.get(2))) + .build(), + ReadBatch.builder(FakeItemWithSort.class) + .mappedTableResource(fakeItemWithSortMappedTable) + .addGetItem(r -> r.key(FAKESORT_ITEM_KEYS.get(0))) + .addGetItem(r -> r.key(FAKESORT_ITEM_KEYS.get(1))) + .addGetItem(r -> r.key(FAKESORT_ITEM_KEYS.get(2))) + .build()) + .build(); + + BatchGetItemOperation operation = BatchGetItemOperation.create(batchGetItemEnhancedRequest); + + BatchGetItemRequest batchGetItemRequest = operation.generateRequest(mockExtension); + + KeysAndAttributes keysAndAttributes1 = batchGetItemRequest.requestItems().get(TABLE_NAME); + KeysAndAttributes keysAndAttributes2 = batchGetItemRequest.requestItems().get(TABLE_NAME_2); + assertThat(keysAndAttributes1.keys(), containsInAnyOrder(FAKE_ITEM_MAPS.subList(0, 3).toArray())); + assertThat(keysAndAttributes2.keys(), containsInAnyOrder(FAKESORT_ITEM_MAPS.subList(0, 3).toArray())); + assertThat(keysAndAttributes1.consistentRead(), is(nullValue())); + assertThat(keysAndAttributes2.consistentRead(), is(nullValue())); + verifyNoMoreInteractions(mockExtension); + } + + @Test + public void generateRequest_multipleBatches_multipleTableSchemas_nonConflictingConsistentRead() { + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = BatchGetItemEnhancedRequest + .builder() + .readBatches( + ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(0)).consistentRead(true).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(1)).consistentRead(true).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(2)).consistentRead(true).build()) + .build(), + ReadBatch.builder(FakeItemWithSort.class) + .mappedTableResource(fakeItemWithSortMappedTable) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(0)).consistentRead(false).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(1)).consistentRead(false).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(2)).consistentRead(false).build()) + .build()) + .build(); + + BatchGetItemOperation operation = BatchGetItemOperation.create(batchGetItemEnhancedRequest); + + BatchGetItemRequest batchGetItemRequest = operation.generateRequest(mockExtension); + + KeysAndAttributes keysAndAttributes1 = batchGetItemRequest.requestItems().get(TABLE_NAME); + KeysAndAttributes keysAndAttributes2 = batchGetItemRequest.requestItems().get(TABLE_NAME_2); + assertThat(keysAndAttributes1.keys(), containsInAnyOrder(FAKE_ITEM_MAPS.subList(0, 3).toArray())); + assertThat(keysAndAttributes2.keys(), containsInAnyOrder(FAKESORT_ITEM_MAPS.subList(0, 3).toArray())); + assertThat(keysAndAttributes1.consistentRead(), is(true)); + assertThat(keysAndAttributes2.consistentRead(), is(false)); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_multipleBatches_multipleTableSchemas_ConflictingConsistentReadStartingWithNull() { + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = BatchGetItemEnhancedRequest + .builder() + .readBatches( + ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(0)).consistentRead(true).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(1)).consistentRead(true).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(2)).consistentRead(true).build()) + .build(), + ReadBatch.builder(FakeItemWithSort.class) + .mappedTableResource(fakeItemWithSortMappedTable) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(0)).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(1)).consistentRead(true).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(2)).consistentRead(true).build()) + .build()) + .build(); + + BatchGetItemOperation operation = BatchGetItemOperation.create(batchGetItemEnhancedRequest); + + operation.generateRequest(mockExtension); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_multipleBatches_multipleTableSchemas_ConflictingConsistentReadStartingWithFalse() { + BatchGetItemEnhancedRequest batchGetItemEnhancedRequest = BatchGetItemEnhancedRequest + .builder() + .readBatches( + ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(0)).consistentRead(true).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(1)).consistentRead(true).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKE_ITEM_KEYS.get(2)).consistentRead(true).build()) + .build(), + ReadBatch.builder(FakeItemWithSort.class) + .mappedTableResource(fakeItemWithSortMappedTable) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(0)).consistentRead(false).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(1)).consistentRead(true).build()) + .addGetItem(GetItemEnhancedRequest.builder().key(FAKESORT_ITEM_KEYS.get(2)).consistentRead(true).build()) + .build()) + .build(); + + BatchGetItemOperation operation = + BatchGetItemOperation.create(batchGetItemEnhancedRequest); + + operation.generateRequest(mockExtension); + } + + @Test + public void transformResponse_multipleTables_multipleItems_noExtension() { + Map>> page = new HashMap<>(); + page.put(TABLE_NAME, Arrays.asList(FAKE_ITEM_MAPS.get(0), FAKE_ITEM_MAPS.get(1))); + page.put(TABLE_NAME_2, singletonList(FAKESORT_ITEM_MAPS.get(0))); + + BatchGetItemResponse fakeResults = generateFakeResults(page); + BatchGetItemOperation operation = BatchGetItemOperation.create(emptyRequest()); + + BatchGetResultPage resultsPage = operation.transformResponse(fakeResults, null); + + List fakeItemResultsPage = resultsPage.resultsForTable(fakeItemMappedTable); + List fakeItemWithSortResultsPage = + resultsPage.resultsForTable(fakeItemWithSortMappedTable); + + assertThat(fakeItemResultsPage, containsInAnyOrder(FAKE_ITEMS.get(0), FAKE_ITEMS.get(1))); + assertThat(fakeItemWithSortResultsPage, containsInAnyOrder(FAKESORT_ITEMS.get(0))); + } + + @Test + public void transformResponse_multipleTables_multipleItems_extensionWithTransformation() { + Map>> page = new HashMap<>(); + page.put(TABLE_NAME, Arrays.asList(FAKE_ITEM_MAPS.get(0), FAKE_ITEM_MAPS.get(1))); + page.put(TABLE_NAME_2, singletonList(FAKESORT_ITEM_MAPS.get(0))); + BatchGetItemResponse fakeResults = generateFakeResults(page); + BatchGetItemOperation operation = BatchGetItemOperation.create(emptyRequest()); + + // Use the mock extension to transform every item based on table name + IntStream.range(0, 3).forEach(i -> { + doReturn(ReadModification.builder().transformedItem(FAKE_ITEM_MAPS.get(i + 3)).build()) + .when(mockExtension) + .afterRead( + argThat(extensionContext -> + extensionContext.operationContext().tableName().equals(TABLE_NAME) && + extensionContext.items().equals(FAKE_ITEM_MAPS.get(i)) + )); + doReturn(ReadModification.builder().transformedItem(FAKESORT_ITEM_MAPS.get(i + 3)).build()) + .when(mockExtension) + .afterRead(argThat(extensionContext -> + extensionContext.operationContext().tableName().equals(TABLE_NAME_2) && + extensionContext.items().equals(FAKESORT_ITEM_MAPS.get(i)) + )); + }); + + BatchGetResultPage resultsPage = operation.transformResponse(fakeResults, mockExtension); + + List fakeItemResultsPage = resultsPage.resultsForTable(fakeItemMappedTable); + List fakeItemWithSortResultsPage = + resultsPage.resultsForTable(fakeItemWithSortMappedTable); + + + assertThat(fakeItemResultsPage, containsInAnyOrder(FAKE_ITEMS.get(3), FAKE_ITEMS.get(4))); + assertThat(fakeItemWithSortResultsPage, containsInAnyOrder(FAKESORT_ITEMS.get(3))); + } + + @Test + public void transformResponse_queryingEmptyResults() { + BatchGetItemResponse fakeResults = generateFakeResults(emptyMap()); + BatchGetItemOperation operation = BatchGetItemOperation.create(emptyRequest()); + + BatchGetResultPage resultsPage = operation.transformResponse(fakeResults, null); + + assertThat(resultsPage.resultsForTable(fakeItemMappedTable), is(emptyList())); + } + + private static BatchGetItemEnhancedRequest emptyRequest() { + return BatchGetItemEnhancedRequest.builder().readBatches().build(); + } + + private static BatchGetItemResponse generateFakeResults( + Map>> itemMapsPage) { + + return BatchGetItemResponse.builder() + .responses(itemMapsPage) + .build(); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchWriteItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchWriteItemOperationTest.java new file mode 100644 index 000000000000..42558ce659af --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchWriteItemOperationTest.java @@ -0,0 +1,403 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteItemEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchWriteResult; +import software.amazon.awssdk.enhanced.dynamodb.model.WriteBatch; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemRequest; +import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemResponse; +import software.amazon.awssdk.services.dynamodb.model.DeleteRequest; +import software.amazon.awssdk.services.dynamodb.model.PutRequest; +import software.amazon.awssdk.services.dynamodb.model.WriteRequest; + +@RunWith(MockitoJUnitRunner.class) +public class BatchWriteItemOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final String TABLE_NAME_2 = "table-name-2"; + + private static final List FAKE_ITEMS = + IntStream.range(0, 6).mapToObj($ -> createUniqueFakeItem()).collect(toList()); + private static final List> FAKE_ITEM_MAPS = FAKE_ITEMS.stream().map(item -> + FakeItem.getTableSchema().itemToMap(item, FakeItem.getTableMetadata().primaryKeys())).collect(toList()); + private static final List FAKESORT_ITEMS = + IntStream.range(0, 6).mapToObj($ -> createUniqueFakeItemWithSort()).collect(toList()); + private static final List> FAKESORT_ITEM_MAPS = FAKESORT_ITEMS.stream().map(item -> + FakeItemWithSort.getTableSchema().itemToMap(item, FakeItemWithSort.getTableMetadata().primaryKeys())) + .collect(toList()); + private static final List FAKE_ITEM_KEYS = + FAKE_ITEMS.stream().map(fakeItem -> Key.builder().partitionValue(fakeItem.getId()).build()).collect(toList()); + private static final List FAKESORT_ITEM_KEYS = + FAKESORT_ITEMS.stream() + .map(fakeItemWithSort -> Key.builder() + .partitionValue(fakeItemWithSort.getId()) + .sortValue(fakeItemWithSort.getSort()) + .build()) + .collect(toList()); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockExtension; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + private DynamoDbTable fakeItemMappedTableWithExtension; + private DynamoDbTable fakeItemWithSortMappedTable; + private DynamoDbTable fakeItemWithSortMappedTableWithExtension; + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).extensions().build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + fakeItemWithSortMappedTable = enhancedClient.table(TABLE_NAME_2, FakeItemWithSort.getTableSchema()); + DynamoDbEnhancedClient dynamoDbEnhancedClientWithExtension = + DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).extensions(mockExtension).build(); + fakeItemMappedTableWithExtension = dynamoDbEnhancedClientWithExtension.table(TABLE_NAME, FakeItem.getTableSchema()); + fakeItemWithSortMappedTableWithExtension = dynamoDbEnhancedClientWithExtension.table(TABLE_NAME_2, + FakeItemWithSort.getTableSchema()); + } + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse() { + + WriteBatch batch = WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addPutItem(r -> r.item(FAKE_ITEMS.get(0))) + .build(); + + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .writeBatches(batch) + .build(); + + BatchWriteItemOperation operation = BatchWriteItemOperation.create(batchWriteItemEnhancedRequest); + + WriteRequest writeRequest = + WriteRequest.builder() + .putRequest(PutRequest.builder().item(FAKE_ITEM_MAPS.get(0)).build()) + .build(); + + BatchWriteItemRequest request = + BatchWriteItemRequest.builder() + .requestItems(singletonMap("table", singletonList(writeRequest))) + .build(); + + BatchWriteItemResponse expectedResponse = BatchWriteItemResponse.builder().build(); + when(mockDynamoDbClient.batchWriteItem(any(BatchWriteItemRequest.class))).thenReturn(expectedResponse); + + BatchWriteItemResponse response = operation.serviceCall(mockDynamoDbClient).apply(request); + + assertThat(response, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).batchWriteItem(request); + } + + @Test + public void generateRequest_multipleTables_mixedCommands_usingShortcutForm() { + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .writeBatches( + WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addPutItem(FAKE_ITEMS.get(0)) + .addDeleteItem(FAKE_ITEM_KEYS.get(1)) + .addPutItem(FAKE_ITEMS.get(2)) + .build(), + WriteBatch.builder(FakeItemWithSort.class) + .mappedTableResource(fakeItemWithSortMappedTable) + .addDeleteItem(FAKESORT_ITEM_KEYS.get(0)) + .addPutItem(FAKESORT_ITEMS.get(1)) + .addDeleteItem(FAKESORT_ITEM_KEYS.get(2)) + .build()) + .build(); + + BatchWriteItemOperation operation = BatchWriteItemOperation.create(batchWriteItemEnhancedRequest); + + BatchWriteItemRequest request = operation.generateRequest(mockExtension); + + List writeRequests1 = request.requestItems().get(TABLE_NAME); + List writeRequests2 = request.requestItems().get(TABLE_NAME_2); + assertThat(writeRequests1, containsInAnyOrder(putRequest(FAKE_ITEM_MAPS.get(0)), + deleteRequest(FAKE_ITEM_MAPS.get(1)), + putRequest(FAKE_ITEM_MAPS.get(2)))); + assertThat(writeRequests2, containsInAnyOrder(deleteRequest(FAKESORT_ITEM_MAPS.get(0)), + putRequest(FAKESORT_ITEM_MAPS.get(1)), + deleteRequest(FAKESORT_ITEM_MAPS.get(2)))); + } + + @Test + public void generateRequest_multipleTables_mixedCommands_usingKeyItemForm() { + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .writeBatches( + WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addPutItem(FAKE_ITEMS.get(0)) + .addDeleteItem(FAKE_ITEMS.get(1)) + .addPutItem(FAKE_ITEMS.get(2)) + .build(), + WriteBatch.builder(FakeItemWithSort.class) + .mappedTableResource(fakeItemWithSortMappedTable) + .addDeleteItem(FAKESORT_ITEMS.get(0)) + .addPutItem(FAKESORT_ITEMS.get(1)) + .addDeleteItem(FAKESORT_ITEMS.get(2)) + .build()) + .build(); + + BatchWriteItemOperation operation = BatchWriteItemOperation.create(batchWriteItemEnhancedRequest); + + BatchWriteItemRequest request = operation.generateRequest(mockExtension); + + List writeRequests1 = request.requestItems().get(TABLE_NAME); + List writeRequests2 = request.requestItems().get(TABLE_NAME_2); + assertThat(writeRequests1, containsInAnyOrder(putRequest(FAKE_ITEM_MAPS.get(0)), + deleteRequest(FAKE_ITEM_MAPS.get(1)), + putRequest(FAKE_ITEM_MAPS.get(2)))); + assertThat(writeRequests2, containsInAnyOrder(deleteRequest(FAKESORT_ITEM_MAPS.get(0)), + putRequest(FAKESORT_ITEM_MAPS.get(1)), + deleteRequest(FAKESORT_ITEM_MAPS.get(2)))); + } + + @Test + public void generateRequest_multipleTables_extensionOnlyTransformsPutsAndNotDeletes() { + + // Use the mock extension to transform every item based on table name + IntStream.range(0, 3).forEach(i -> { + lenient().doReturn(WriteModification.builder().transformedItem(FAKE_ITEM_MAPS.get(i + 3)).build()) + .when(mockExtension) + .beforeWrite( + argThat(extensionContext -> + extensionContext.operationContext().tableName().equals(TABLE_NAME) && + extensionContext.items().equals(FAKE_ITEM_MAPS.get(i)) + )); + lenient().doReturn(WriteModification.builder().transformedItem(FAKESORT_ITEM_MAPS.get(i + 3)).build()) + .when(mockExtension) + .beforeWrite( + argThat(extensionContext -> + extensionContext.operationContext().tableName().equals(TABLE_NAME_2) && + extensionContext.items().equals(FAKESORT_ITEM_MAPS.get(i)) + )); + }); + + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .writeBatches( + WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTableWithExtension) + .addPutItem(r -> r.item(FAKE_ITEMS.get(0))) + .addDeleteItem(r -> r.key(FAKE_ITEM_KEYS.get(1))) + .addPutItem(r -> r.item(FAKE_ITEMS.get(2))) + .build(), + WriteBatch.builder(FakeItemWithSort.class) + .mappedTableResource(fakeItemWithSortMappedTableWithExtension) + .addDeleteItem(r -> r.key(FAKESORT_ITEM_KEYS.get(0))) + .addPutItem(r -> r.item(FAKESORT_ITEMS.get(1))) + .addDeleteItem(r -> r.key(FAKESORT_ITEM_KEYS.get(2))) + .build()) + .build(); + + BatchWriteItemOperation operation = BatchWriteItemOperation.create(batchWriteItemEnhancedRequest); + + BatchWriteItemRequest request = operation.generateRequest(mockExtension); + + List writeRequests1 = request.requestItems().get(TABLE_NAME); + List writeRequests2 = request.requestItems().get(TABLE_NAME_2); + + // Only PutItem requests should have their attributes transformed + assertThat(writeRequests1, containsInAnyOrder(putRequest(FAKE_ITEM_MAPS.get(3)), + deleteRequest(FAKE_ITEM_MAPS.get(1)), + putRequest(FAKE_ITEM_MAPS.get(5)))); + assertThat(writeRequests2, containsInAnyOrder(deleteRequest(FAKESORT_ITEM_MAPS.get(0)), + putRequest(FAKESORT_ITEM_MAPS.get(4)), + deleteRequest(FAKESORT_ITEM_MAPS.get(2)))); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_extensionTriesToAddConditionalToPutItem() { + Expression expression = Expression.builder().expression("test-expression").build(); + + doReturn(WriteModification.builder().additionalConditionalExpression(expression).build()) + .when(mockExtension) + .beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class)); + + BatchWriteItemEnhancedRequest batchWriteItemEnhancedRequest = + BatchWriteItemEnhancedRequest.builder() + .writeBatches( + WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTableWithExtension) + .addPutItem(r -> r.item(FAKE_ITEMS.get(0))) + .addDeleteItem(r -> r.key(FAKE_ITEM_KEYS.get(1))) + .addPutItem(r -> r.item(FAKE_ITEMS.get(2))) + .build(), + WriteBatch.builder(FakeItemWithSort.class) + .mappedTableResource(fakeItemWithSortMappedTableWithExtension) + .addDeleteItem(r -> r.key(FAKESORT_ITEM_KEYS.get(0))) + .addPutItem(r -> r.item(FAKESORT_ITEMS.get(1))) + .addDeleteItem(r -> r.key(FAKESORT_ITEM_KEYS.get(2))) + .build()) + .build(); + + BatchWriteItemOperation operation = BatchWriteItemOperation.create(batchWriteItemEnhancedRequest); + + operation.generateRequest(mockExtension); + } + + @Test + public void transformResults_multipleUnprocessedOperations() { + BatchWriteItemOperation operation = BatchWriteItemOperation.create(emptyRequest()); + + List writeRequests1 = Arrays.asList(putRequest(FAKE_ITEM_MAPS.get(0)), + deleteRequest(FAKE_ITEM_MAPS.get(1)), + deleteRequest(FAKE_ITEM_MAPS.get(2))); + List writeRequests2 = Arrays.asList(deleteRequest(FAKESORT_ITEM_MAPS.get(0)), + putRequest(FAKESORT_ITEM_MAPS.get(1)), + putRequest(FAKESORT_ITEM_MAPS.get(2))); + Map> writeRequests = new HashMap<>(); + writeRequests.put(TABLE_NAME, writeRequests1); + writeRequests.put(TABLE_NAME_2, writeRequests2); + BatchWriteItemResponse response = + BatchWriteItemResponse.builder() + .unprocessedItems(writeRequests) + .build(); + + BatchWriteResult results = operation.transformResponse(response, mockExtension); + + assertThat(results.unprocessedDeleteItemsForTable(fakeItemMappedTableWithExtension), + containsInAnyOrder(FAKE_ITEM_KEYS.get(1), FAKE_ITEM_KEYS.get(2))); + assertThat(results.unprocessedPutItemsForTable(fakeItemMappedTableWithExtension), + containsInAnyOrder(FAKE_ITEMS.get(0))); + assertThat(results.unprocessedDeleteItemsForTable(fakeItemWithSortMappedTableWithExtension), + containsInAnyOrder(FAKESORT_ITEM_KEYS.get(0))); + assertThat(results.unprocessedPutItemsForTable(fakeItemWithSortMappedTableWithExtension), + containsInAnyOrder(FAKESORT_ITEMS.get(1), FAKESORT_ITEMS.get(2))); + } + + @Test + public void transformResults_multipleUnprocessedOperations_extensionTransformsPutsNotDeletes() { + BatchWriteItemOperation operation = BatchWriteItemOperation.create(emptyRequest()); + + List writeRequests1 = Arrays.asList(putRequest(FAKE_ITEM_MAPS.get(0)), + deleteRequest(FAKE_ITEM_MAPS.get(1)), + deleteRequest(FAKE_ITEM_MAPS.get(2))); + List writeRequests2 = Arrays.asList(deleteRequest(FAKESORT_ITEM_MAPS.get(0)), + putRequest(FAKESORT_ITEM_MAPS.get(1)), + putRequest(FAKESORT_ITEM_MAPS.get(2))); + Map> writeRequests = new HashMap<>(); + writeRequests.put(TABLE_NAME, writeRequests1); + writeRequests.put(TABLE_NAME_2, writeRequests2); + BatchWriteItemResponse response = + BatchWriteItemResponse.builder() + .unprocessedItems(writeRequests) + .build(); + + // Use the mock extension to transform every item based on table name + IntStream.range(0, 3).forEach(i -> { + doReturn(ReadModification.builder().transformedItem(FAKE_ITEM_MAPS.get(i + 3)).build()) + .when(mockExtension) + .afterRead( + argThat(extensionContext -> + extensionContext.operationContext().tableName().equals(TABLE_NAME) && + extensionContext.items().equals(FAKE_ITEM_MAPS.get(i)) + )); + doReturn(ReadModification.builder().transformedItem(FAKESORT_ITEM_MAPS.get(i + 3)).build()) + .when(mockExtension) + .afterRead(argThat(extensionContext -> + extensionContext.operationContext().tableName().equals(TABLE_NAME_2) && + extensionContext.items().equals(FAKESORT_ITEM_MAPS.get(i)) + )); + }); + + BatchWriteResult results = operation.transformResponse(response, mockExtension); + + assertThat(results.unprocessedDeleteItemsForTable(fakeItemMappedTableWithExtension), + containsInAnyOrder(FAKE_ITEM_KEYS.get(1), FAKE_ITEM_KEYS.get(2))); + assertThat(results.unprocessedPutItemsForTable(fakeItemMappedTableWithExtension), + containsInAnyOrder(FAKE_ITEMS.get(3))); + assertThat(results.unprocessedDeleteItemsForTable(fakeItemWithSortMappedTableWithExtension), + containsInAnyOrder(FAKESORT_ITEM_KEYS.get(0))); + assertThat(results.unprocessedPutItemsForTable(fakeItemWithSortMappedTableWithExtension), + containsInAnyOrder(FAKESORT_ITEMS.get(4), FAKESORT_ITEMS.get(5))); + } + + @Test + public void transformResults_noUnprocessedOperations() { + BatchWriteItemOperation operation = BatchWriteItemOperation.create(emptyRequest()); + + BatchWriteItemResponse response = + BatchWriteItemResponse.builder() + .unprocessedItems(emptyMap()) + .build(); + + BatchWriteResult results = operation.transformResponse(response, mockExtension); + + assertThat(results.unprocessedDeleteItemsForTable(fakeItemMappedTable), is(emptyList())); + assertThat(results.unprocessedPutItemsForTable(fakeItemMappedTable), is(emptyList())); + } + + + private static BatchWriteItemEnhancedRequest emptyRequest() { + return BatchWriteItemEnhancedRequest.builder().writeBatches().build(); + } + + private static WriteRequest putRequest(Map itemMap) { + return WriteRequest.builder().putRequest(PutRequest.builder().item(itemMap).build()).build(); + } + + private static WriteRequest deleteRequest(Map itemMap) { + return WriteRequest.builder().deleteRequest(DeleteRequest.builder().key(itemMap).build()).build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperationTest.java new file mode 100644 index 000000000000..0ccadc5c98ad --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperationTest.java @@ -0,0 +1,80 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@RunWith(MockitoJUnitRunner.class) +public class CommonOperationTest { + + private static final String FAKE_REQUEST = "fake-request"; + private static final String FAKE_RESPONSE = "fake-response"; + private static final String FAKE_RESULT = "fake-result"; + private static final String FAKE_TABLE_NAME = "fake-table-name"; + private static final String FAKE_INDEX_NAME = "fake-index-name"; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Spy + private CommonOperation spyCommonOperation; + + @Before + public void stubSpy() { + when(spyCommonOperation.generateRequest(any(), any(), any())).thenReturn(FAKE_REQUEST); + when(spyCommonOperation.serviceCall(any())).thenReturn(s -> { + if (!FAKE_REQUEST.equals(s)) { + throw new RuntimeException("Did not receive expected request"); + } + + return FAKE_RESPONSE; + }); + when(spyCommonOperation.transformResponse(any(), any(), any(), any())).thenReturn(FAKE_RESULT); + } + + @Test + public void execute_defaultImplementation_behavesCorrectlyAndReturnsCorrectResult() { + OperationContext operationContext = DefaultOperationContext.create(FAKE_TABLE_NAME, FAKE_INDEX_NAME); + String result = spyCommonOperation.execute(FakeItem.getTableSchema(), + operationContext, + mockDynamoDbEnhancedClientExtension, + mockDynamoDbClient); + + assertThat(result, is(FAKE_RESULT)); + verify(spyCommonOperation).generateRequest(FakeItem.getTableSchema(), operationContext, mockDynamoDbEnhancedClientExtension); + verify(spyCommonOperation).serviceCall(mockDynamoDbClient); + verify(spyCommonOperation).transformResponse(FAKE_RESPONSE, FakeItem.getTableSchema(), operationContext, + mockDynamoDbEnhancedClientExtension); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ConditionCheckTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ConditionCheckTest.java new file mode 100644 index 000000000000..1c56f20d19fd --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ConditionCheckTest.java @@ -0,0 +1,77 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.verifyZeroInteractions; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.model.ConditionCheck; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; + +@RunWith(MockitoJUnitRunner.class) +public class ConditionCheckTest { + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void generateTransactWriteItem() { + FakeItem fakeItem = FakeItem.createUniqueFakeItem(); + Map keyMap = singletonMap("id", stringValue(fakeItem.getId())); + Expression conditionExpression = Expression.builder() + .expression("expression") + .expressionNames(singletonMap("key1", "value1")) + .expressionValues(singletonMap("key2", stringValue("value2"))) + .build(); + ConditionCheck operation = + ConditionCheck.builder() + .key(k -> k.partitionValue(fakeItem.getId())) + .conditionExpression(conditionExpression) + .build(); + OperationContext context = DefaultOperationContext.create("table-name", TableMetadata.primaryIndexName()); + + TransactWriteItem result = operation.generateTransactWriteItem(FakeItem.getTableSchema(), context, + mockDynamoDbEnhancedClientExtension); + + TransactWriteItem expectedResult = + TransactWriteItem.builder() + .conditionCheck( + software.amazon.awssdk.services.dynamodb.model.ConditionCheck + .builder() + .tableName("table-name") + .key(keyMap) + .conditionExpression(conditionExpression.expression()) + .expressionAttributeValues(conditionExpression.expressionValues()) + .expressionAttributeNames(conditionExpression.expressionNames()) + .build()) + .build(); + assertThat(result, is(expectedResult)); + verifyZeroInteractions(mockDynamoDbEnhancedClientExtension); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperationTest.java new file mode 100644 index 000000000000..08cd1f478356 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperationTest.java @@ -0,0 +1,418 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.services.dynamodb.model.KeyType.HASH; +import static software.amazon.awssdk.services.dynamodb.model.KeyType.RANGE; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithBinaryKey; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithNumericSort; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; +import software.amazon.awssdk.services.dynamodb.model.BillingMode; +import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; +import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse; +import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; +import software.amazon.awssdk.services.dynamodb.model.Projection; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; +import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; +import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; + +@RunWith(MockitoJUnitRunner.class) +public class CreateTableOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + private static final OperationContext GSI_1_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); + + private static MatchedGsi matchesGsi(software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex other) { + return new MatchedGsi(other); + } + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private static class MatchedGsi + extends TypeSafeMatcher { + + private final software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex other; + + private MatchedGsi(software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex other) { + this.other = other; + } + + @Override + protected boolean matchesSafely(software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex globalSecondaryIndex) { + if (!other.indexName().equals(globalSecondaryIndex.indexName())) { + return false; + } + + if ((other.projection() != null && !other.projection().equals(globalSecondaryIndex.projection())) || + (other.projection() == null && globalSecondaryIndex.projection() != null)) { + return false; + } + + return containsInAnyOrder(other.keySchema().toArray(new KeySchemaElement[]{})) + .matches(globalSecondaryIndex.keySchema()); + } + + @Override + public void describeTo(Description description) { + description.appendText("a GlobalSecondaryIndex equivalent to [" + other.toString() + "]"); + } + } + + @Test + public void generateRequest_withLsiAndGsi() { + Projection projection1 = Projection.builder().projectionType(ProjectionType.ALL).build(); + Projection projection2 = Projection.builder().projectionType(ProjectionType.KEYS_ONLY).build(); + Projection projection3 = Projection.builder() + .projectionType(ProjectionType.INCLUDE) + .nonKeyAttributes("key1", "key2") + .build(); + ProvisionedThroughput provisionedThroughput1 = ProvisionedThroughput.builder() + .readCapacityUnits(1L) + .writeCapacityUnits(2L) + .build(); + ProvisionedThroughput provisionedThroughput2 = ProvisionedThroughput.builder() + .readCapacityUnits(3L) + .writeCapacityUnits(4L) + .build(); + + + List globalSecondaryIndexList = Arrays.asList( + EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_1") + .projection(projection1) + .provisionedThroughput(provisionedThroughput1) + .build(), + EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_2") + .projection(projection2) + .provisionedThroughput(provisionedThroughput2) + .build()); + + CreateTableOperation operation = + CreateTableOperation.create(CreateTableEnhancedRequest.builder() + .globalSecondaryIndices(globalSecondaryIndexList) + .localSecondaryIndices(Collections.singletonList( + EnhancedLocalSecondaryIndex.create("lsi_1", projection3))) + .build()); + + CreateTableRequest request = operation.generateRequest(FakeItemWithIndices.getTableSchema(), + PRIMARY_CONTEXT, + null); + + + + assertThat(request.tableName(), is(TABLE_NAME)); + assertThat(request.keySchema(), containsInAnyOrder(KeySchemaElement.builder() + .attributeName("id") + .keyType(HASH) + .build(), + KeySchemaElement.builder() + .attributeName("sort") + .keyType(RANGE) + .build())); + software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex expectedGsi1 = + software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex.builder() + .indexName("gsi_1") + .keySchema(KeySchemaElement.builder() + .attributeName("gsi_id") + .keyType(HASH) + .build(), + KeySchemaElement.builder() + .attributeName("gsi_sort") + .keyType(RANGE) + .build()) + .projection(projection1) + .provisionedThroughput(provisionedThroughput1) + .build(); + software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex expectedGsi2 = + software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex.builder() + .indexName("gsi_2") + .keySchema(KeySchemaElement.builder() + .attributeName("gsi_id") + .keyType(HASH) + .build()) + .projection(projection2) + .provisionedThroughput(provisionedThroughput2) + .build(); + assertThat(request.globalSecondaryIndexes(), containsInAnyOrder(matchesGsi(expectedGsi1), + matchesGsi(expectedGsi2))); + software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex expectedLsi = + software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex.builder() + .indexName("lsi_1") + .keySchema(KeySchemaElement.builder() + .attributeName("id") + .keyType(HASH) + .build(), + KeySchemaElement.builder() + .attributeName("lsi_sort") + .keyType(RANGE) + .build()) + .projection(projection3) + .build(); + assertThat(request.localSecondaryIndexes(), containsInAnyOrder(expectedLsi)); + assertThat(request.attributeDefinitions(), containsInAnyOrder( + AttributeDefinition.builder() + .attributeName("id") + .attributeType(ScalarAttributeType.S) + .build(), + AttributeDefinition.builder() + .attributeName("sort") + .attributeType(ScalarAttributeType.S) + .build(), + AttributeDefinition.builder() + .attributeName("lsi_sort") + .attributeType(ScalarAttributeType.S) + .build(), + AttributeDefinition.builder() + .attributeName("gsi_id") + .attributeType(ScalarAttributeType.S) + .build(), + AttributeDefinition.builder() + .attributeName("gsi_sort") + .attributeType(ScalarAttributeType.S) + .build())); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_invalidGsi() { + ProvisionedThroughput provisionedThroughput = ProvisionedThroughput.builder() + .readCapacityUnits(1L) + .writeCapacityUnits(1L) + .build(); + + List invalidGsiList = Collections.singletonList( + EnhancedGlobalSecondaryIndex.builder() + .indexName("invalid") + .projection(p -> p.projectionType(ProjectionType.ALL)) + .provisionedThroughput(provisionedThroughput) + .build()); + + CreateTableOperation operation = + CreateTableOperation.create(CreateTableEnhancedRequest.builder().globalSecondaryIndices(invalidGsiList).build()); + + operation.generateRequest(FakeItem.getTableSchema(), PRIMARY_CONTEXT, null); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_invalidGsiAsLsiReference() { + List invalidGsiList = Collections.singletonList( + EnhancedLocalSecondaryIndex.create("gsi_1", Projection.builder().projectionType(ProjectionType.ALL).build())); + + CreateTableOperation operation = + CreateTableOperation.create(CreateTableEnhancedRequest.builder().localSecondaryIndices(invalidGsiList).build()); + + operation.generateRequest(FakeItemWithIndices.getTableSchema(), PRIMARY_CONTEXT, null); + } + + @Test + public void generateRequest_validLsiAsGsiReference() { + List validLsiList = Collections.singletonList( + EnhancedGlobalSecondaryIndex.builder() + .indexName("lsi_1") + .projection(p -> p.projectionType(ProjectionType.ALL)) + .provisionedThroughput(p -> p.readCapacityUnits(1L).writeCapacityUnits(1L)) + .build()); + + CreateTableOperation operation = + CreateTableOperation.create(CreateTableEnhancedRequest.builder().globalSecondaryIndices(validLsiList).build()); + + CreateTableRequest request = operation.generateRequest(FakeItemWithIndices.getTableSchema(), PRIMARY_CONTEXT, null); + + assertThat(request.globalSecondaryIndexes().size(), is(1)); + software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex globalSecondaryIndex = + request.globalSecondaryIndexes().get(0); + + assertThat(globalSecondaryIndex.indexName(), is("lsi_1")); + } + + @Test + public void generateRequest_nonReferencedIndicesDoNotCreateExtraAttributeDefinitions() { + CreateTableOperation operation = CreateTableOperation.create(CreateTableEnhancedRequest.builder().build()); + + CreateTableRequest request = operation.generateRequest(FakeItemWithIndices.getTableSchema(), + PRIMARY_CONTEXT, null); + + AttributeDefinition attributeDefinition1 = AttributeDefinition.builder() + .attributeName("id") + .attributeType(ScalarAttributeType.S) + .build(); + AttributeDefinition attributeDefinition2 = AttributeDefinition.builder() + .attributeName("sort") + .attributeType(ScalarAttributeType.S) + .build(); + + assertThat(request.attributeDefinitions(), containsInAnyOrder(attributeDefinition1, attributeDefinition2)); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_invalidLsi() { + List invalidLsiList = Collections.singletonList( + EnhancedLocalSecondaryIndex.create("invalid", Projection.builder().projectionType(ProjectionType.ALL).build())); + + CreateTableOperation operation = + CreateTableOperation.create(CreateTableEnhancedRequest.builder().localSecondaryIndices(invalidLsiList).build()); + + operation.generateRequest(FakeItem.getTableSchema(), PRIMARY_CONTEXT, null); + } + + @Test + public void generateRequest_withProvisionedThroughput() { + ProvisionedThroughput provisionedThroughput = ProvisionedThroughput.builder() + .writeCapacityUnits(1L) + .readCapacityUnits(2L) + .build(); + + CreateTableOperation operation = CreateTableOperation.create( + CreateTableEnhancedRequest.builder().provisionedThroughput(provisionedThroughput).build()); + + CreateTableRequest request = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.billingMode(), is(BillingMode.PROVISIONED)); + assertThat(request.provisionedThroughput(), is(provisionedThroughput)); + } + + @Test + public void generateRequest_withNoProvisionedThroughput() { + CreateTableOperation operation = CreateTableOperation.create(CreateTableEnhancedRequest.builder().build()); + + CreateTableRequest request = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.billingMode(), is(BillingMode.PAY_PER_REQUEST)); + } + + + @Test + public void generateRequest_withNumericKey() { + CreateTableOperation operation = CreateTableOperation.create(CreateTableEnhancedRequest.builder() + .build()); + + CreateTableRequest request = operation.generateRequest(FakeItemWithNumericSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.tableName(), is(TABLE_NAME)); + assertThat(request.keySchema(), containsInAnyOrder(KeySchemaElement.builder() + .attributeName("id") + .keyType(HASH) + .build(), + KeySchemaElement.builder() + .attributeName("sort") + .keyType(RANGE) + .build())); + + assertThat(request.globalSecondaryIndexes(), is(DefaultSdkAutoConstructList.getInstance())); + assertThat(request.localSecondaryIndexes(), is(DefaultSdkAutoConstructList.getInstance())); + + assertThat(request.attributeDefinitions(), containsInAnyOrder( + AttributeDefinition.builder() + .attributeName("id") + .attributeType(ScalarAttributeType.S) + .build(), + AttributeDefinition.builder() + .attributeName("sort") + .attributeType(ScalarAttributeType.N) + .build())); + } + + @Test + public void generateRequest_withBinaryKey() { + CreateTableOperation operation = CreateTableOperation.create(CreateTableEnhancedRequest.builder() + .build()); + + CreateTableRequest request = operation.generateRequest(FakeItemWithBinaryKey.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.tableName(), is(TABLE_NAME)); + assertThat(request.keySchema(), containsInAnyOrder(KeySchemaElement.builder() + .attributeName("id") + .keyType(HASH) + .build())); + + assertThat(request.globalSecondaryIndexes(), is(empty())); + assertThat(request.localSecondaryIndexes(), is(empty())); + + assertThat(request.attributeDefinitions(), containsInAnyOrder( + AttributeDefinition.builder() + .attributeName("id") + .attributeType(ScalarAttributeType.B) + .build())); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_doesNotWorkForIndex() { + CreateTableOperation operation = CreateTableOperation.create(CreateTableEnhancedRequest.builder() + .build()); + + operation.generateRequest(FakeItemWithIndices.getTableSchema(), GSI_1_CONTEXT, null); + } + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse() { + CreateTableOperation operation = CreateTableOperation.create(CreateTableEnhancedRequest.builder().build()); + CreateTableRequest createTableRequest = CreateTableRequest.builder().build(); + CreateTableResponse expectedResponse = CreateTableResponse.builder().build(); + when(mockDynamoDbClient.createTable(any(CreateTableRequest.class))).thenReturn(expectedResponse); + + CreateTableResponse actualResponse = operation.serviceCall(mockDynamoDbClient).apply(createTableRequest); + + assertThat(actualResponse, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).createTable(same(createTableRequest)); + } + + @Test + public void transformResults_doesNothing() { + CreateTableOperation operation = CreateTableOperation.create(CreateTableEnhancedRequest.builder().build()); + CreateTableResponse response = CreateTableResponse.builder().build(); + + operation.transformResponse(response, FakeItem.getTableSchema(), PRIMARY_CONTEXT, null); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContextTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContextTest.java new file mode 100644 index 000000000000..8c37782e588e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContextTest.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +import org.hamcrest.Matchers; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; + +public class DefaultOperationContextTest { + @Test + public void createWithTableNameAndIndexName() { + DefaultOperationContext context = DefaultOperationContext.create("table_name", "index_name"); + + assertThat(context.tableName(), is("table_name")); + assertThat(context.indexName(), is("index_name")); + } + + @Test + public void createWithTableName() { + DefaultOperationContext context = DefaultOperationContext.create("table_name"); + + assertThat(context.tableName(), is("table_name")); + assertThat(context.indexName(), Matchers.is(TableMetadata.primaryIndexName())); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperationTest.java new file mode 100644 index 000000000000..4c990587bce8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperationTest.java @@ -0,0 +1,354 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.Delete; +import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; +import software.amazon.awssdk.services.dynamodb.model.DeleteItemResponse; +import software.amazon.awssdk.services.dynamodb.model.ReturnValue; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteItemOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + private static final OperationContext GSI_1_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); + private static final Expression CONDITION_EXPRESSION; + private static final Expression MINIMAL_CONDITION_EXPRESSION = Expression.builder().expression("foo = bar").build(); + + static { + Map expressionNames = new HashMap<>(); + expressionNames.put("#test_field_1", "test_field_1"); + expressionNames.put("#test_field_2", "test_field_2"); + Map expressionValues = new HashMap<>(); + expressionValues.put(":test_value_1", numberValue(1)); + expressionValues.put(":test_value_2", numberValue(2)); + CONDITION_EXPRESSION = Expression.builder() + .expression("#test_field_1 = :test_value_1 OR #test_field_2 = :test_value_2") + .expressionNames(Collections.unmodifiableMap(expressionNames)) + .expressionValues(Collections.unmodifiableMap(expressionValues)) + .build(); + } + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse() { + FakeItem keyItem = createUniqueFakeItem(); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + DeleteItemRequest deleteItemRequest = DeleteItemRequest.builder().tableName(TABLE_NAME).build(); + DeleteItemResponse expectedResponse = DeleteItemResponse.builder().build(); + when(mockDynamoDbClient.deleteItem(any(DeleteItemRequest.class))).thenReturn(expectedResponse); + + DeleteItemResponse response = deleteItemOperation.serviceCall(mockDynamoDbClient).apply(deleteItemRequest); + + assertThat(response, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).deleteItem(deleteItemRequest); + } + + @Test + public void generateRequest_partitionKeyOnly() { + FakeItem keyItem = createUniqueFakeItem(); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + + DeleteItemRequest request = deleteItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s(keyItem.getId()).build()); + DeleteItemRequest expectedRequest = DeleteItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .returnValues(ReturnValue.ALL_OLD) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_partitionAndSortKey() { + FakeItemWithSort keyItem = createUniqueFakeItemWithSort(); + DeleteItemOperation deleteItemOperation = DeleteItemOperation.create( + DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(keyItem.getId()).sortValue(keyItem.getSort())) + .build()); + + DeleteItemRequest request = deleteItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s(keyItem.getId()).build()); + expectedKeyMap.put("sort", AttributeValue.builder().s(keyItem.getSort()).build()); + DeleteItemRequest expectedRequest = DeleteItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .returnValues(ReturnValue.ALL_OLD) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_withConditionExpression() { + FakeItem keyItem = createUniqueFakeItem(); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(keyItem.getId())) + .conditionExpression(CONDITION_EXPRESSION) + .build()); + + DeleteItemRequest request = deleteItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.conditionExpression(), is(CONDITION_EXPRESSION.expression())); + assertThat(request.expressionAttributeNames(), is(CONDITION_EXPRESSION.expressionNames())); + assertThat(request.expressionAttributeValues(), is(CONDITION_EXPRESSION.expressionValues())); + } + + @Test + public void generateRequest_withMinimalConditionExpression() { + FakeItem keyItem = createUniqueFakeItem(); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(keyItem.getId())) + .conditionExpression(MINIMAL_CONDITION_EXPRESSION) + .build()); + + DeleteItemRequest request = deleteItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.conditionExpression(), is(MINIMAL_CONDITION_EXPRESSION.expression())); + assertThat(request.expressionAttributeNames(), is(emptyMap())); + assertThat(request.expressionAttributeValues(), is(emptyMap())); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_noPartitionKey_throwsIllegalArgumentException() { + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder().key(k -> k.partitionValue("whatever")).build()); + + deleteItemOperation.generateRequest(FakeItemComposedClass.getTableSchema(), PRIMARY_CONTEXT, null); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_withIndex_throwsIllegalArgumentException() { + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder().key(k -> k.partitionValue("whatever")).build()); + + deleteItemOperation.generateRequest(FakeItem.getTableSchema(), GSI_1_CONTEXT, null); + } + + @Test + public void transformResponse_correctlyTransformsIntoAnItem() { + FakeItem keyItem = createUniqueFakeItem(); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + Map responseMap = new HashMap<>(); + responseMap.put("id", AttributeValue.builder().s(keyItem.getId()).build()); + responseMap.put("subclass_attribute", AttributeValue.builder().s("test-value").build()); + DeleteItemResponse response = DeleteItemResponse.builder() + .attributes(responseMap) + .build(); + + FakeItem result = deleteItemOperation.transformResponse(response, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(result.getId(), is(keyItem.getId())); + assertThat(result.getSubclassAttribute(), is("test-value")); + } + + @Test + public void transformResponse_noResults_returnsNull() { + FakeItem keyItem = createUniqueFakeItem(); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + DeleteItemResponse response = DeleteItemResponse.builder() + .build(); + + FakeItem result = deleteItemOperation.transformResponse(response, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(result, is(nullValue())); + } + + @Test + public void generateRequest_withExtension_doesNotModifyKey() { + FakeItem baseFakeItem = createUniqueFakeItem(); + Map keyMap = FakeItem.getTableSchema().itemToMap(baseFakeItem, singletonList("id")); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(baseFakeItem.getId())) + .build()); + + + DeleteItemRequest request = deleteItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.key(), is(keyMap)); + verify(mockDynamoDbEnhancedClientExtension, never()).beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class)); + } + + @Test + public void transformResponse_withExtension_appliesItemModification() { + FakeItem baseFakeItem = createUniqueFakeItem(); + FakeItem fakeItem = createUniqueFakeItem(); + Map baseFakeItemMap = FakeItem.getTableSchema().itemToMap(baseFakeItem, false); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, false); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(baseFakeItem.getId())) + .build()); + + DeleteItemResponse response = DeleteItemResponse.builder() + .attributes(baseFakeItemMap) + .build(); + when(mockDynamoDbEnhancedClientExtension.afterRead(any(DynamoDbExtensionContext.AfterRead.class))) + .thenReturn(ReadModification.builder().transformedItem(fakeItemMap).build()); + + FakeItem resultItem = deleteItemOperation.transformResponse(response, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(resultItem, is(fakeItem)); + verify(mockDynamoDbEnhancedClientExtension).afterRead(DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT) + .items(baseFakeItemMap).build()); + } + + @Test + public void generateTransactWriteItem_basicRequest() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + DeleteItemOperation deleteItemOperation = + spy(DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(fakeItem.getId())) + .build())); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + + DeleteItemRequest deleteItemRequest = DeleteItemRequest.builder() + .tableName(TABLE_NAME) + .key(fakeItemMap) + .build(); + doReturn(deleteItemRequest).when(deleteItemOperation).generateRequest(any(), any(), any()); + + TransactWriteItem actualResult = deleteItemOperation.generateTransactWriteItem(FakeItem.getTableSchema(), + context, + mockDynamoDbEnhancedClientExtension); + + TransactWriteItem expectedResult = TransactWriteItem.builder() + .delete(Delete.builder() + .key(fakeItemMap) + .tableName(TABLE_NAME) + .build()) + .build(); + assertThat(actualResult, is(expectedResult)); + verify(deleteItemOperation).generateRequest(FakeItem.getTableSchema(), context, mockDynamoDbEnhancedClientExtension); + } + + @Test + public void generateTransactWriteItem_conditionalRequest() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + DeleteItemOperation deleteItemOperation = + spy(DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(fakeItem.getId())) + .build())); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + + String conditionExpression = "condition-expression"; + Map attributeValues = Collections.singletonMap("key", stringValue("value1")); + Map attributeNames = Collections.singletonMap("key", "value2"); + + DeleteItemRequest deleteItemRequest = DeleteItemRequest.builder() + .tableName(TABLE_NAME) + .key(fakeItemMap) + .conditionExpression(conditionExpression) + .expressionAttributeValues(attributeValues) + .expressionAttributeNames(attributeNames) + .build(); + doReturn(deleteItemRequest).when(deleteItemOperation).generateRequest(any(), any(), any()); + + TransactWriteItem actualResult = deleteItemOperation.generateTransactWriteItem(FakeItem.getTableSchema(), + context, + mockDynamoDbEnhancedClientExtension); + + TransactWriteItem expectedResult = TransactWriteItem.builder() + .delete(Delete.builder() + .key(fakeItemMap) + .tableName(TABLE_NAME) + .conditionExpression(conditionExpression) + .expressionAttributeNames(attributeNames) + .expressionAttributeValues(attributeValues) + .build()) + .build(); + assertThat(actualResult, is(expectedResult)); + verify(deleteItemOperation).generateRequest(FakeItem.getTableSchema(), context, mockDynamoDbEnhancedClientExtension); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperationTest.java new file mode 100644 index 000000000000..e20c1f37966e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperationTest.java @@ -0,0 +1,231 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.singletonList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; + +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; +import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; + +@RunWith(MockitoJUnitRunner.class) +public class GetItemOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + private static final OperationContext GSI_1_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse() { + FakeItem keyItem = createUniqueFakeItem(); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + GetItemRequest getItemRequest = GetItemRequest.builder().tableName(TABLE_NAME).build(); + GetItemResponse expectedResponse = GetItemResponse.builder().build(); + when(mockDynamoDbClient.getItem(any(GetItemRequest.class))).thenReturn(expectedResponse); + + GetItemResponse response = getItemOperation.serviceCall(mockDynamoDbClient).apply(getItemRequest); + + assertThat(response, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).getItem(getItemRequest); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_withIndex_throwsIllegalArgumentException() { + FakeItem keyItem = createUniqueFakeItem(); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + + getItemOperation.generateRequest(FakeItem.getTableSchema(), GSI_1_CONTEXT, null); + } + + @Test + public void generateRequest_consistentRead() { + FakeItem keyItem = createUniqueFakeItem(); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder() + .key(k -> k.partitionValue(keyItem.getId())) + .consistentRead(true).build()); + + GetItemRequest request = getItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s(keyItem.getId()).build()); + GetItemRequest expectedRequest = GetItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .consistentRead(true) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_partitionKeyOnly() { + FakeItem keyItem = createUniqueFakeItem(); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + + GetItemRequest request = getItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s(keyItem.getId()).build()); + GetItemRequest expectedRequest = GetItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_partitionAndSortKey() { + FakeItemWithSort keyItem = createUniqueFakeItemWithSort(); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder() + .key(k -> k.partitionValue(keyItem.getId()) + .sortValue(keyItem.getSort())) + .build()); + + GetItemRequest request = getItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s(keyItem.getId()).build()); + expectedKeyMap.put("sort", AttributeValue.builder().s(keyItem.getSort()).build()); + GetItemRequest expectedRequest = GetItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_noPartitionKey_throwsIllegalArgumentException() { + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder().key(k -> k.partitionValue("whatever")).build()); + + getItemOperation.generateRequest(FakeItemComposedClass.getTableSchema(), PRIMARY_CONTEXT, null); + } + + @Test + public void transformResponse_noItem() { + FakeItem keyItem = createUniqueFakeItem(); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + GetItemResponse response = GetItemResponse.builder().build(); + + FakeItem result = getItemOperation.transformResponse(response, FakeItem.getTableSchema(), PRIMARY_CONTEXT, + null); + + assertThat(result, is(nullValue())); + } + + @Test + public void transformResponse_correctlyTransformsIntoAnItem() { + FakeItem keyItem = createUniqueFakeItem(); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder().key(k -> k.partitionValue(keyItem.getId())).build()); + Map responseMap = new HashMap<>(); + responseMap.put("id", AttributeValue.builder().s(keyItem.getId()).build()); + responseMap.put("subclass_attribute", AttributeValue.builder().s("test-value").build()); + GetItemResponse response = GetItemResponse.builder() + .item(responseMap) + .build(); + + FakeItem result = getItemOperation.transformResponse(response, FakeItem.getTableSchema(), PRIMARY_CONTEXT, + null); + + assertThat(result.getId(), is(keyItem.getId())); + assertThat(result.getSubclassAttribute(), is("test-value")); + } + + @Test + public void generateRequest_withExtension_doesNotModifyKey() { + FakeItem baseFakeItem = createUniqueFakeItem(); + Map keyMap = FakeItem.getTableSchema().itemToMap(baseFakeItem, singletonList("id")); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder().key(k -> k.partitionValue(baseFakeItem.getId())).build()); + + GetItemRequest request = getItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.key(), is(keyMap)); + verify(mockDynamoDbEnhancedClientExtension, never()).beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class)); + } + + @Test + public void transformResponse_withExtension_appliesItemModification() { + FakeItem baseFakeItem = createUniqueFakeItem(); + FakeItem fakeItem = createUniqueFakeItem(); + Map baseFakeItemMap = FakeItem.getTableSchema().itemToMap(baseFakeItem, false); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, false); + GetItemOperation getItemOperation = + GetItemOperation.create(GetItemEnhancedRequest.builder().key(k -> k.partitionValue(baseFakeItem.getId())).build()); + GetItemResponse response = GetItemResponse.builder() + .item(baseFakeItemMap) + .build(); + when(mockDynamoDbEnhancedClientExtension.afterRead(any(DynamoDbExtensionContext.AfterRead.class))) + .thenReturn(ReadModification.builder().transformedItem(fakeItemMap).build()); + + FakeItem resultItem = getItemOperation.transformResponse(response, FakeItem.getTableSchema(), + PRIMARY_CONTEXT, mockDynamoDbEnhancedClientExtension); + + assertThat(resultItem, is(fakeItem)); + verify(mockDynamoDbEnhancedClientExtension).afterRead(DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT) + .items(baseFakeItemMap).build()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperationTest.java new file mode 100644 index 000000000000..bb28acc54809 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperationTest.java @@ -0,0 +1,104 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static org.junit.Assert.assertThat; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@RunWith(MockitoJUnitRunner.class) +public class IndexOperationTest { + + private static final String FAKE_RESULT = "fake-result"; + private static final String FAKE_TABLE_NAME = "fake-table-name"; + private static final String FAKE_INDEX_NAME = "fake-index-name"; + + private final FakeIndexOperation fakeIndexOperation = new FakeIndexOperation(); + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Test + public void executeOnSecondaryIndex_defaultImplementation_callsExecuteCorrectly() { + fakeIndexOperation.executeOnSecondaryIndex(FakeItem.getTableSchema(), + FAKE_TABLE_NAME, + FAKE_INDEX_NAME, + mockDynamoDbEnhancedClientExtension, + mockDynamoDbClient); + + assertThat(fakeIndexOperation.lastDynamoDbClient, sameInstance(mockDynamoDbClient)); + assertThat(fakeIndexOperation.lastDynamoDbEnhancedClientExtension, sameInstance(mockDynamoDbEnhancedClientExtension)); + assertThat(fakeIndexOperation.lastTableSchema, sameInstance(FakeItem.getTableSchema())); + assertThat(fakeIndexOperation.lastOperationContext, is(DefaultOperationContext.create(FAKE_TABLE_NAME, FAKE_INDEX_NAME))); + } + + private static class FakeIndexOperation implements IndexOperation { + private TableSchema lastTableSchema = null; + private OperationContext lastOperationContext = null; + private DynamoDbEnhancedClientExtension lastDynamoDbEnhancedClientExtension = null; + private DynamoDbClient lastDynamoDbClient = null; + + @Override + public String generateRequest(TableSchema tableSchema, OperationContext context, + DynamoDbEnhancedClientExtension extension) { + return null; + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return null; + } + + @Override + public Function> asyncServiceCall(DynamoDbAsyncClient dynamoDbAsyncClient) { + return null; + } + + @Override + public String transformResponse(String response, TableSchema tableSchema, OperationContext context, + DynamoDbEnhancedClientExtension extension) { + return null; + } + + @Override + public String execute(TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension, + DynamoDbClient dynamoDbClient) { + lastTableSchema = tableSchema; + lastOperationContext = context; + lastDynamoDbEnhancedClientExtension = extension; + lastDynamoDbClient = dynamoDbClient; + return FAKE_RESULT; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperationTest.java new file mode 100644 index 000000000000..11f772c2e0f2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperationTest.java @@ -0,0 +1,357 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.emptyMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.Put; +import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; +import software.amazon.awssdk.services.dynamodb.model.PutItemResponse; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; + +@RunWith(MockitoJUnitRunner.class) +public class PutItemOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + private static final OperationContext GSI_1_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); + private static final Expression CONDITION_EXPRESSION; + private static final Expression CONDITION_EXPRESSION_2; + private static final Expression MINIMAL_CONDITION_EXPRESSION = Expression.builder().expression("foo = bar").build(); + + static { + Map expressionNames = new HashMap<>(); + expressionNames.put("#test_field_1", "test_field_1"); + expressionNames.put("#test_field_2", "test_field_2"); + Map expressionValues = new HashMap<>(); + expressionValues.put(":test_value_1", numberValue(1)); + expressionValues.put(":test_value_2", numberValue(2)); + CONDITION_EXPRESSION = Expression.builder() + .expression("#test_field_1 = :test_value_1 OR #test_field_2 = :test_value_2") + .expressionNames(Collections.unmodifiableMap(expressionNames)) + .expressionValues(Collections.unmodifiableMap(expressionValues)) + .build(); + } + + static { + Map expressionNames = new HashMap<>(); + expressionNames.put("#test_field_3", "test_field_3"); + expressionNames.put("#test_field_4", "test_field_4"); + Map expressionValues = new HashMap<>(); + expressionValues.put(":test_value_3", numberValue(3)); + expressionValues.put(":test_value_4", numberValue(4)); + CONDITION_EXPRESSION_2 = Expression.builder() + .expression("#test_field_3 = :test_value_3 OR #test_field_4 = :test_value_4") + .expressionNames(Collections.unmodifiableMap(expressionNames)) + .expressionValues(Collections.unmodifiableMap(expressionValues)) + .build(); + } + + @Mock + private DynamoDbClient mockDynamoDbClient; + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse() { + FakeItem fakeItem = createUniqueFakeItem(); + PutItemOperation putItemOperation = PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build()); + PutItemRequest getItemRequest = PutItemRequest.builder().tableName(TABLE_NAME).build(); + PutItemResponse expectedResponse = PutItemResponse.builder().build(); + when(mockDynamoDbClient.putItem(any(PutItemRequest.class))).thenReturn(expectedResponse); + + PutItemResponse response = putItemOperation.serviceCall(mockDynamoDbClient).apply(getItemRequest); + + assertThat(response, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).putItem(getItemRequest); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_withIndex_throwsIllegalArgumentException() { + FakeItem fakeItem = createUniqueFakeItem(); + PutItemOperation putItemOperation = PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build()); + + putItemOperation.generateRequest(FakeItem.getTableSchema(), GSI_1_CONTEXT, null); + } + + @Test + public void generateRequest_generatesCorrectRequest() { + FakeItem fakeItem = createUniqueFakeItem(); + fakeItem.setSubclassAttribute("subclass-value"); + PutItemOperation putItemOperation = PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build()); + + PutItemRequest request = putItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s(fakeItem.getId()).build()); + expectedItemMap.put("subclass_attribute", AttributeValue.builder().s("subclass-value").build()); + PutItemRequest expectedRequest = PutItemRequest.builder() + .tableName(TABLE_NAME) + .item(expectedItemMap) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_withConditionExpression_generatesCorrectRequest() { + FakeItem fakeItem = createUniqueFakeItem(); + fakeItem.setSubclassAttribute("subclass-value"); + + PutItemOperation putItemOperation = + PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .conditionExpression(CONDITION_EXPRESSION) + .item(fakeItem) + .build()); + + PutItemRequest request = putItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s(fakeItem.getId()).build()); + expectedItemMap.put("subclass_attribute", AttributeValue.builder().s("subclass-value").build()); + PutItemRequest expectedRequest = + PutItemRequest.builder() + .tableName(TABLE_NAME) + .item(expectedItemMap) + .conditionExpression(CONDITION_EXPRESSION.expression()) + .expressionAttributeNames(CONDITION_EXPRESSION.expressionNames()) + .expressionAttributeValues(CONDITION_EXPRESSION.expressionValues()) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_withMinimalConditionExpression() { + FakeItem fakeItem = createUniqueFakeItem(); + PutItemOperation putItemOperation = + PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem) + .conditionExpression(MINIMAL_CONDITION_EXPRESSION) + .build()); + + PutItemRequest request = putItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.conditionExpression(), is(MINIMAL_CONDITION_EXPRESSION.expression())); + assertThat(request.expressionAttributeNames(), is(emptyMap())); + assertThat(request.expressionAttributeValues(), is(emptyMap())); + } + + @Test + public void generateRequest_withConditionExpression_andExtensionWithSingleCondition() { + FakeItem baseFakeItem = createUniqueFakeItem(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().additionalConditionalExpression(CONDITION_EXPRESSION_2).build()); + PutItemOperation putItemOperation = + PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .conditionExpression(CONDITION_EXPRESSION) + .item(baseFakeItem) + .build()); + + PutItemRequest request = putItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + Expression expectedCondition = Expression.join(CONDITION_EXPRESSION, CONDITION_EXPRESSION_2, " AND "); + assertThat(request.conditionExpression(), is(expectedCondition.expression())); + assertThat(request.expressionAttributeNames(), is(expectedCondition.expressionNames())); + assertThat(request.expressionAttributeValues(), is(expectedCondition.expressionValues())); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_noPartitionKey_throwsIllegalArgumentException() { + FakeItemComposedClass fakeItem = FakeItemComposedClass.builder().composedAttribute("whatever").build(); + PutItemOperation putItemOperation = + PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItemComposedClass.class).item(fakeItem).build()); + + putItemOperation.generateRequest(FakeItemComposedClass.getTableSchema(), PRIMARY_CONTEXT, null); + } + + @Test + public void transformResponse_doesNotBlowUp() { + FakeItem fakeItem = createUniqueFakeItem(); + PutItemOperation putItemOperation = PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem) + .build()); + PutItemResponse response = PutItemResponse.builder().build(); + + putItemOperation.transformResponse(response, FakeItem.getTableSchema(), PRIMARY_CONTEXT, null); + } + + @Test + public void generateRequest_withExtension_modifiesItemToPut() { + FakeItem baseFakeItem = createUniqueFakeItem(); + FakeItem fakeItem = createUniqueFakeItem(); + Map baseMap = FakeItem.getTableSchema().itemToMap(baseFakeItem, true); + Map fakeMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().transformedItem(fakeMap).build()); + PutItemOperation putItemOperation = PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .build()); + + PutItemRequest request = putItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.item(), is(fakeMap)); + verify(mockDynamoDbEnhancedClientExtension).beforeWrite( + DefaultDynamoDbExtensionContext.builder() + .items(baseMap) + .operationContext(PRIMARY_CONTEXT) + .tableMetadata(FakeItem.getTableMetadata()).build()); + } + + @Test + public void generateRequest_withExtension_singleCondition() { + FakeItem baseFakeItem = createUniqueFakeItem(); + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + Expression condition = Expression.builder().expression("condition").expressionValues(fakeMap).build(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().additionalConditionalExpression(condition).build()); + PutItemOperation putItemOperation = PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .build()); + + PutItemRequest request = putItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.conditionExpression(), is("condition")); + assertThat(request.expressionAttributeValues(), is(fakeMap)); + } + + @Test + public void generateRequest_withExtension_noModifications() { + FakeItem baseFakeItem = createUniqueFakeItem(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().build()); + PutItemOperation putItemOperation = PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .build()); + + PutItemRequest request = putItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + assertThat(request.conditionExpression(), is(nullValue())); + assertThat(request.expressionAttributeValues().size(), is(0)); + } + + @Test + public void generateTransactWriteItem_basicRequest() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + PutItemOperation putItemOperation = spy(PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem) + .build())); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + + PutItemRequest putItemRequest = PutItemRequest.builder() + .tableName(TABLE_NAME) + .item(fakeItemMap) + .build(); + doReturn(putItemRequest).when(putItemOperation).generateRequest(any(), any(), any()); + + TransactWriteItem actualResult = putItemOperation.generateTransactWriteItem(FakeItem.getTableSchema(), + context, + mockDynamoDbEnhancedClientExtension); + + TransactWriteItem expectedResult = TransactWriteItem.builder() + .put(Put.builder() + .item(fakeItemMap) + .tableName(TABLE_NAME) + .build()) + .build(); + assertThat(actualResult, is(expectedResult)); + verify(putItemOperation).generateRequest(FakeItem.getTableSchema(), context, mockDynamoDbEnhancedClientExtension); + } + + @Test + public void generateTransactWriteItem_conditionalRequest() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + PutItemOperation putItemOperation = spy(PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem) + .build())); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + + String conditionExpression = "condition-expression"; + Map attributeValues = Collections.singletonMap("key", stringValue("value1")); + Map attributeNames = Collections.singletonMap("key", "value2"); + + PutItemRequest putItemRequest = PutItemRequest.builder() + .tableName(TABLE_NAME) + .item(fakeItemMap) + .conditionExpression(conditionExpression) + .expressionAttributeValues(attributeValues) + .expressionAttributeNames(attributeNames) + .build(); + doReturn(putItemRequest).when(putItemOperation).generateRequest(any(), any(), any()); + + TransactWriteItem actualResult = putItemOperation.generateTransactWriteItem(FakeItem.getTableSchema(), + context, + mockDynamoDbEnhancedClientExtension); + + TransactWriteItem expectedResult = TransactWriteItem.builder() + .put(Put.builder() + .item(fakeItemMap) + .tableName(TABLE_NAME) + .conditionExpression(conditionExpression) + .expressionAttributeNames(attributeNames) + .expressionAttributeValues(attributeValues) + .build()) + .build(); + assertThat(actualResult, is(expectedResult)); + verify(putItemOperation).generateRequest(FakeItem.getTableSchema(), context, mockDynamoDbEnhancedClientExtension); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationConditionalTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationConditionalTest.java new file mode 100644 index 000000000000..9e2483ae65d9 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationConditionalTest.java @@ -0,0 +1,270 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithoutSort; + +import java.util.UUID; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithNumericSort; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class QueryOperationConditionalTest { + private static final String ID_KEY = "#AMZN_MAPPED_id"; + private static final String ID_VALUE = ":AMZN_MAPPED_id"; + private static final String SORT_KEY = "#AMZN_MAPPED_sort"; + private static final String SORT_VALUE = ":AMZN_MAPPED_sort"; + private static final String SORT_OTHER_VALUE = ":AMZN_MAPPED_sort2"; + + private final FakeItem fakeItem = createUniqueFakeItem(); + private final AttributeValue fakeItemHashValue = AttributeValue.builder().s(fakeItem.getId()).build(); + private final FakeItemWithSort fakeItemWithSort = createUniqueFakeItemWithSort(); + private final AttributeValue fakeItemWithSortHashValue = + AttributeValue.builder().s(fakeItemWithSort.getId()).build(); + private final AttributeValue fakeItemWithSortSortValue = + AttributeValue.builder().s(fakeItemWithSort.getSort()).build(); + private final FakeItemWithSort fakeItemWithoutSort = createUniqueFakeItemWithoutSort(); + private final AttributeValue fakeItemWithoutSortHashValue = + AttributeValue.builder().s(fakeItemWithoutSort.getId()).build(); + + @Test + public void equalTo_hashOnly() { + Expression expression = QueryConditional.keyEqualTo(getKey(fakeItem)).expression(FakeItem.getTableSchema(), + TableMetadata.primaryIndexName()); + + assertThat(expression.expression(), is(ID_KEY + " = " + ID_VALUE)); + assertThat(expression.expressionNames(), hasEntry(ID_KEY, "id")); + assertThat(expression.expressionValues(), hasEntry(ID_VALUE, fakeItemHashValue)); + } + + @Test(expected = IllegalArgumentException.class) + public void equalTo_hashOnly_notSet_throwsIllegalArgumentException() { + fakeItem.setId(null); + QueryConditional.keyEqualTo(getKey(fakeItem)) + .expression(FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test + public void equalTo_hashAndRangeKey_bothSet() { + Expression expression = QueryConditional.keyEqualTo(getKey(fakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + + verifyExpression(expression, "="); + } + + @Test(expected = IllegalArgumentException.class) + public void equalTo_hashAndRangeKey_hashNotSet_throwsIllegalArgumentException() { + fakeItemWithSort.setId(null); + QueryConditional.keyEqualTo(getKey(fakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test + public void equalTo_hashAndRangeKey_hashOnlySet() { + Expression expression = QueryConditional.keyEqualTo(getKey(fakeItemWithoutSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + + assertThat(expression.expression(), is(ID_KEY + " = " + ID_VALUE)); + assertThat(expression.expressionNames(), hasEntry(ID_KEY, "id")); + assertThat(expression.expressionValues(), hasEntry(ID_VALUE, fakeItemWithoutSortHashValue)); + } + + @Test + public void greaterThan_hashAndRangeKey_bothSet() { + Expression expression = QueryConditional.sortGreaterThan(getKey(fakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + + verifyExpression(expression, ">"); + } + + @Test(expected = IllegalArgumentException.class) + public void greaterThan_hashOnly_throwsIllegalArgumentException() { + QueryConditional.sortGreaterThan(getKey(fakeItem)) + .expression(FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test(expected = IllegalArgumentException.class) + public void greaterThan_hashAndSort_onlyHashSet_throwsIllegalArgumentException() { + QueryConditional.sortGreaterThan(getKey(fakeItemWithoutSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test + public void greaterThanOrEqualTo_hashAndRangeKey_bothSet() { + Expression expression = QueryConditional.sortGreaterThanOrEqualTo(getKey(fakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + + verifyExpression(expression, ">="); + } + + @Test(expected = IllegalArgumentException.class) + public void greaterThanOrEqualTo_hashOnly_throwsIllegalArgumentException() { + QueryConditional.sortGreaterThanOrEqualTo(getKey(fakeItem)) + .expression(FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test(expected = IllegalArgumentException.class) + public void greaterThanOrEqualTo_hashAndSort_onlyHashSet_throwsIllegalArgumentException() { + QueryConditional.sortGreaterThanOrEqualTo(getKey(fakeItemWithoutSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test + public void lessThan_hashAndRangeKey_bothSet() { + Expression expression = QueryConditional.sortLessThan(getKey(fakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + + verifyExpression(expression, "<"); + } + + @Test(expected = IllegalArgumentException.class) + public void lessThan_hashOnly_throwsIllegalArgumentException() { + QueryConditional.sortLessThan(getKey(fakeItem)) + .expression(FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test(expected = IllegalArgumentException.class) + public void lessThan_hashAndSort_onlyHashSet_throwsIllegalArgumentException() { + QueryConditional.sortLessThan(getKey(fakeItemWithoutSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test + public void lessThanOrEqualTo_hashAndRangeKey_bothSet() { + Expression expression = QueryConditional.sortLessThanOrEqualTo(getKey(fakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + + verifyExpression(expression, "<="); + } + + @Test(expected = IllegalArgumentException.class) + public void lessThanOrEqualTo_hashOnly_throwsIllegalArgumentException() { + QueryConditional.sortLessThanOrEqualTo(getKey(fakeItem)) + .expression(FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test(expected = IllegalArgumentException.class) + public void lessThanOrEqualTo_hashAndSort_onlyHashSet_throwsIllegalArgumentException() { + QueryConditional.sortLessThanOrEqualTo(getKey(fakeItemWithoutSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test + public void beginsWith_hashAndRangeKey_bothSet() { + Expression expression = QueryConditional.sortBeginsWith(getKey(fakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + + String expectedExpression = String.format("%s = %s AND begins_with ( %s, %s )", ID_KEY, ID_VALUE, SORT_KEY, + SORT_VALUE); + assertThat(expression.expression(), is(expectedExpression)); + assertThat(expression.expressionValues(), hasEntry(ID_VALUE, fakeItemWithSortHashValue)); + assertThat(expression.expressionValues(), hasEntry(SORT_VALUE, fakeItemWithSortSortValue)); + assertThat(expression.expressionNames(), hasEntry(ID_KEY, "id")); + assertThat(expression.expressionNames(), hasEntry(SORT_KEY, "sort")); + } + + @Test(expected = IllegalArgumentException.class) + public void beginsWith_hashOnly_throwsIllegalArgumentException() { + QueryConditional.sortBeginsWith(getKey(fakeItem)) + .expression(FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test(expected = IllegalArgumentException.class) + public void beginsWith_hashAndSort_onlyHashSet_throwsIllegalArgumentException() { + QueryConditional.sortBeginsWith(getKey(fakeItemWithoutSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test(expected = IllegalArgumentException.class) + public void beginsWith_numericRange_throwsIllegalArgumentException() { + FakeItemWithNumericSort fakeItemWithNumericSort = FakeItemWithNumericSort.createUniqueFakeItemWithSort(); + QueryConditional.sortBeginsWith(getKey(fakeItemWithNumericSort)).expression(FakeItemWithNumericSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test + public void between_allKeysSet_stringSort() { + FakeItemWithSort otherFakeItemWithSort = + FakeItemWithSort.builder().id(fakeItemWithSort.getId()).sort(UUID.randomUUID().toString()).build(); + AttributeValue otherFakeItemWithSortSortValue = + AttributeValue.builder().s(otherFakeItemWithSort.getSort()).build(); + + Expression expression = QueryConditional.sortBetween(getKey(fakeItemWithSort), getKey(otherFakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + + String expectedExpression = String.format("%s = %s AND %s BETWEEN %s AND %s", ID_KEY, ID_VALUE, SORT_KEY, + SORT_VALUE, SORT_OTHER_VALUE); + assertThat(expression.expression(), is(expectedExpression)); + assertThat(expression.expressionValues(), hasEntry(ID_VALUE, fakeItemWithSortHashValue)); + assertThat(expression.expressionValues(), hasEntry(SORT_VALUE, fakeItemWithSortSortValue)); + assertThat(expression.expressionValues(), hasEntry(SORT_OTHER_VALUE, otherFakeItemWithSortSortValue)); + assertThat(expression.expressionNames(), hasEntry(ID_KEY, "id")); + assertThat(expression.expressionNames(), hasEntry(SORT_KEY, "sort")); + } + + @Test(expected = IllegalArgumentException.class) + public void between_hashOnly_throwsIllegalArgumentException() { + FakeItem otherFakeItem = createUniqueFakeItem(); + QueryConditional.sortBetween(getKey(fakeItem), getKey(otherFakeItem)) + .expression(FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test(expected = IllegalArgumentException.class) + public void between_hashAndSort_onlyFirstSortSet_throwsIllegalArgumentException() { + QueryConditional.sortBetween(getKey(fakeItemWithSort), getKey(fakeItemWithoutSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test(expected = IllegalArgumentException.class) + public void between_hashAndSort_onlySecondSortSet_throwsIllegalArgumentException() { + QueryConditional.sortBetween(getKey(fakeItemWithoutSort), getKey(fakeItemWithSort)) + .expression(FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + private void verifyExpression(Expression expression, String condition) { + assertThat(expression.expression(), is(ID_KEY + " = " + ID_VALUE + " AND " + SORT_KEY + " " + condition + + " " + SORT_VALUE)); + assertThat(expression.expressionNames(), hasEntry(ID_KEY, "id")); + assertThat(expression.expressionNames(), hasEntry(SORT_KEY, "sort")); + assertThat(expression.expressionValues(), hasEntry(ID_VALUE, fakeItemWithSortHashValue)); + assertThat(expression.expressionValues(), hasEntry(SORT_VALUE, fakeItemWithSortSortValue)); + } + + private Key getKey(FakeItem item) { + return EnhancedClientUtils.createKeyFromItem(item, FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + private Key getKey(FakeItemWithSort item) { + return EnhancedClientUtils.createKeyFromItem(item, FakeItemWithSort.getTableSchema(), TableMetadata.primaryIndexName()); + } + + private Key getKey(FakeItemWithNumericSort item) { + return EnhancedClientUtils.createKeyFromItem(item, + FakeItemWithNumericSort.getTableSchema(), + TableMetadata.primaryIndexName()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationTest.java new file mode 100644 index 000000000000..963de03e6cf6 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationTest.java @@ -0,0 +1,453 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices.createUniqueFakeItemWithIndices; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional.keyEqualTo; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional; +import software.amazon.awssdk.enhanced.dynamodb.model.QueryEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.QueryRequest; +import software.amazon.awssdk.services.dynamodb.model.QueryResponse; +import software.amazon.awssdk.services.dynamodb.paginators.QueryIterable; +import software.amazon.awssdk.services.dynamodb.paginators.QueryPublisher; + +@RunWith(MockitoJUnitRunner.class) +public class QueryOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + private static final OperationContext GSI_1_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); + + private final FakeItem keyItem = createUniqueFakeItem(); + private final QueryOperation queryOperation = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .build()); + + @Mock + private DynamoDbClient mockDynamoDbClient; + @Mock + private DynamoDbAsyncClient mockDynamoDbAsyncClient; + @Mock + private QueryConditional mockQueryConditional; + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse() { + QueryRequest queryRequest = QueryRequest.builder().build(); + QueryIterable mockQueryIterable = mock(QueryIterable.class); + when(mockDynamoDbClient.queryPaginator(any(QueryRequest.class))).thenReturn(mockQueryIterable); + + SdkIterable response = queryOperation.serviceCall(mockDynamoDbClient).apply(queryRequest); + + assertThat(response, is(mockQueryIterable)); + verify(mockDynamoDbClient).queryPaginator(queryRequest); + } + + @Test + public void getAsyncServiceCall_makesTheRightCallAndReturnsResponse() { + QueryRequest queryRequest = QueryRequest.builder().build(); + QueryPublisher mockQueryPublisher = mock(QueryPublisher.class); + when(mockDynamoDbAsyncClient.queryPaginator(any(QueryRequest.class))).thenReturn(mockQueryPublisher); + + SdkPublisher response = + queryOperation.asyncServiceCall(mockDynamoDbAsyncClient).apply(queryRequest); + + assertThat(response, is(mockQueryPublisher)); + verify(mockDynamoDbAsyncClient).queryPaginator(queryRequest); + } + + @Test + public void generateRequest_nonDefault_usesQueryConditional() { + Map keyItemMap = getAttributeValueMap(keyItem); + Expression expression = Expression.builder().expression("test-expression").expressionValues(keyItemMap).build(); + when(mockQueryConditional.expression(any(), anyString())).thenReturn(expression); + + QueryOperation query = QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(mockQueryConditional) + .build()); + QueryRequest queryRequest = query.generateRequest(FakeItem.getTableSchema(), PRIMARY_CONTEXT, null); + + QueryRequest expectedQueryRequest = QueryRequest.builder() + .tableName(TABLE_NAME) + .keyConditionExpression("test-expression") + .expressionAttributeValues(keyItemMap) + .build(); + assertThat(queryRequest, is(expectedQueryRequest)); + verify(mockQueryConditional).expression(FakeItem.getTableSchema(), TableMetadata.primaryIndexName()); + } + + @Test + public void generateRequest_defaultQuery_usesEqualTo() { + QueryRequest queryRequest = queryOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + QueryRequest expectedQueryRequest = QueryRequest.builder() + .tableName(TABLE_NAME) + .keyConditionExpression("#AMZN_MAPPED_id = :AMZN_MAPPED_id") + .expressionAttributeValues(singletonMap(":AMZN_MAPPED_id", + AttributeValue.builder().s(keyItem.getId()).build())) + .expressionAttributeNames(singletonMap("#AMZN_MAPPED_id", "id")) + .build(); + assertThat(queryRequest, is(expectedQueryRequest)); + } + + @Test + public void generateRequest_knowsHowToUseAnIndex() { + FakeItemWithIndices fakeItem = createUniqueFakeItemWithIndices(); + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(fakeItem.getGsiId()))) + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItemWithIndices.getTableSchema(), GSI_1_CONTEXT, null); + + assertThat(queryRequest.indexName(), is("gsi_1")); + } + + @Test + public void generateRequest_ascending() { + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .scanIndexForward(true) + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.scanIndexForward(), is(true)); + } + + @Test + public void generateRequest_descending() { + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .scanIndexForward(false) + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.scanIndexForward(), is(false)); + } + + @Test + public void generateRequest_limit() { + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .limit(123) + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.limit(), is(123)); + } + + @Test + public void generateRequest_filterExpression_withValues() { + Map expressionValues = singletonMap(":test-key", stringValue("test-value")); + Expression filterExpression = Expression.builder() + .expression("test-expression") + .expressionValues(expressionValues) + .build(); + + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .filterExpression(filterExpression) + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.filterExpression(), is("test-expression")); + assertThat(queryRequest.expressionAttributeValues(), hasEntry(":test-key", stringValue("test-value"))); + } + + @Test + public void generateRequest_filterExpression_withoutValues() { + Expression filterExpression = Expression.builder().expression("test-expression").build(); + + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .filterExpression(filterExpression) + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.filterExpression(), is("test-expression")); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_filterExpression_withConflictingValues() { + Map expressionValues = singletonMap(":AMZN_MAPPED_id", stringValue("test-value")); + Map expressionNames = singletonMap("#AMZN_MAPPED_id", "id"); + Expression filterExpression = Expression.builder() + .expression("test-expression") + .expressionNames(expressionNames) + .expressionValues(expressionValues) + .build(); + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .filterExpression(filterExpression) + .build()); + queryToTest.generateRequest(FakeItem.getTableSchema(), PRIMARY_CONTEXT, null); + } + + @Test + public void generateRequest_consistentRead() { + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .consistentRead(true) + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.consistentRead(), is(true)); + } + + @Test + public void generateRequest_projectionExpression() { + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .attributesToProject("id") + .addAttributeToProject("version") + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.projectionExpression(), is("#AMZN_MAPPED_id,#AMZN_MAPPED_version")); + assertThat(queryRequest.expressionAttributeNames().get("#AMZN_MAPPED_id"), is ("id")); + assertThat(queryRequest.expressionAttributeNames().get("#AMZN_MAPPED_version"), is ("version")); + } + + @Test + public void generateRequest_hashKeyOnly_withExclusiveStartKey() { + FakeItem exclusiveStartKey = createUniqueFakeItem(); + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .exclusiveStartKey(FakeItem.getTableSchema() + .itemToMap(exclusiveStartKey, + FakeItem.getTableMetadata() + .primaryKeys())) + .build()); + + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.exclusiveStartKey(), + hasEntry("id", AttributeValue.builder().s(exclusiveStartKey.getId()).build())); + } + + @Test + public void generateRequest_secondaryIndex_exclusiveStartKeyUsesPrimaryAndSecondaryIndex() { + FakeItemWithIndices exclusiveStartKey = createUniqueFakeItemWithIndices(); + Set keyFields = new HashSet<>(FakeItemWithIndices.getTableSchema().tableMetadata().primaryKeys()); + keyFields.addAll(FakeItemWithIndices.getTableSchema().tableMetadata().indexKeys("gsi_1")); + + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .exclusiveStartKey(FakeItemWithIndices.getTableSchema() + .itemToMap(exclusiveStartKey, + keyFields)) + .build()); + + QueryRequest queryRequest = queryToTest.generateRequest(FakeItemWithIndices.getTableSchema(), + GSI_1_CONTEXT, + null); + + assertThat(queryRequest.exclusiveStartKey(), + hasEntry("id", AttributeValue.builder().s(exclusiveStartKey.getId()).build())); + assertThat(queryRequest.exclusiveStartKey(), + hasEntry("sort", AttributeValue.builder().s(exclusiveStartKey.getSort()).build())); + assertThat(queryRequest.exclusiveStartKey(), + hasEntry("gsi_id", AttributeValue.builder().s(exclusiveStartKey.getGsiId()).build())); + assertThat(queryRequest.exclusiveStartKey(), + hasEntry("gsi_sort", AttributeValue.builder().s(exclusiveStartKey.getGsiSort()).build())); + } + + @Test + public void generateRequest_hashAndSortKey_withExclusiveStartKey() { + FakeItemWithSort exclusiveStartKey = createUniqueFakeItemWithSort(); + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .exclusiveStartKey( + FakeItemWithSort.getTableSchema() + .itemToMap( + exclusiveStartKey, + FakeItemWithSort.getTableSchema() + .tableMetadata() + .primaryKeys())) + .build()); + + QueryRequest queryRequest = queryToTest.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.exclusiveStartKey(), + hasEntry("id", AttributeValue.builder().s(exclusiveStartKey.getId()).build())); + assertThat(queryRequest.exclusiveStartKey(), + hasEntry("sort", AttributeValue.builder().s(exclusiveStartKey.getSort()).build())); + } + + @Test + public void transformResults_multipleItems_returnsCorrectItems() { + List queryResultItems = generateFakeItemList(); + List> queryResultMaps = + queryResultItems.stream().map(QueryOperationTest::getAttributeValueMap).collect(toList()); + + QueryResponse queryResponse = generateFakeQueryResults(queryResultMaps); + + Page queryResultPage = queryOperation.transformResponse(queryResponse, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryResultPage.items(), is(queryResultItems)); + } + + @Test + public void transformResults_multipleItems_setsLastEvaluatedKey() { + List queryResultItems = generateFakeItemList(); + FakeItem lastEvaluatedKey = createUniqueFakeItem(); + List> queryResultMaps = + queryResultItems.stream().map(QueryOperationTest::getAttributeValueMap).collect(toList()); + + QueryResponse queryResponse = generateFakeQueryResults(queryResultMaps, + getAttributeValueMap(lastEvaluatedKey)); + + Page queryResultPage = queryOperation.transformResponse(queryResponse, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryResultPage.lastEvaluatedKey(), is(getAttributeValueMap(lastEvaluatedKey))); + } + + @Test + public void queryItem_withExtension_correctlyTransformsItem() { + List queryResultItems = generateFakeItemList(); + List modifiedResultItems = generateFakeItemList(); + + List> queryResultMap = + queryResultItems.stream().map(QueryOperationTest::getAttributeValueMap).collect(toList()); + + ReadModification[] readModifications = + modifiedResultItems.stream() + .map(QueryOperationTest::getAttributeValueMap) + .map(attributeMap -> ReadModification.builder().transformedItem(attributeMap).build()) + .collect(Collectors.toList()) + .toArray(new ReadModification[]{}); + + when(mockDynamoDbEnhancedClientExtension.afterRead(any(DynamoDbExtensionContext.AfterRead.class))) + .thenReturn(readModifications[0], Arrays.copyOfRange(readModifications, 1, readModifications.length)); + + QueryResponse queryResponse = generateFakeQueryResults(queryResultMap); + + Page queryResultPage = queryOperation.transformResponse(queryResponse, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(queryResultPage.items(), is(modifiedResultItems)); + InOrder inOrder = Mockito.inOrder(mockDynamoDbEnhancedClientExtension); + queryResultMap.forEach( + attributeMap -> inOrder.verify(mockDynamoDbEnhancedClientExtension) + .afterRead( + DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT) + .items(attributeMap).build())); + } + + private static QueryResponse generateFakeQueryResults(List> queryItemMapsPage) { + return QueryResponse.builder().items(queryItemMapsPage).build(); + } + + private static QueryResponse generateFakeQueryResults(List> queryItemMapsPage, + Map lastEvaluatedKey) { + return QueryResponse.builder().items(queryItemMapsPage).lastEvaluatedKey(lastEvaluatedKey).build(); + + } + + private static List generateFakeItemList() { + return IntStream.range(0, 3).mapToObj(ignored -> FakeItem.createUniqueFakeItem()).collect(toList()); + } + + private static Map getAttributeValueMap(FakeItem fakeItem) { + return singletonMap("id", AttributeValue.builder().s(fakeItem.getId()).build()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperationTest.java new file mode 100644 index 000000000000..8228be4add3d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperationTest.java @@ -0,0 +1,338 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.ScanRequest; +import software.amazon.awssdk.services.dynamodb.model.ScanResponse; +import software.amazon.awssdk.services.dynamodb.paginators.ScanIterable; +import software.amazon.awssdk.services.dynamodb.paginators.ScanPublisher; + +@RunWith(MockitoJUnitRunner.class) +public class ScanOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + private static final OperationContext GSI_1_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); + + private final ScanOperation scanOperation = ScanOperation.create(ScanEnhancedRequest.builder().build()); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbAsyncClient mockDynamoDbAsyncClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse() { + ScanRequest scanRequest = ScanRequest.builder().build(); + ScanIterable mockScanIterable = mock(ScanIterable.class); + when(mockDynamoDbClient.scanPaginator(any(ScanRequest.class))).thenReturn(mockScanIterable); + + SdkIterable response = scanOperation.serviceCall(mockDynamoDbClient).apply(scanRequest); + + assertThat(response, is(mockScanIterable)); + verify(mockDynamoDbClient).scanPaginator(scanRequest); + } + + @Test + public void getAsyncServiceCall_makesTheRightCallAndReturnsResponse() { + ScanRequest scanRequest = ScanRequest.builder().build(); + ScanPublisher mockScanPublisher = mock(ScanPublisher.class); + when(mockDynamoDbAsyncClient.scanPaginator(any(ScanRequest.class))).thenReturn(mockScanPublisher); + + SdkPublisher response = scanOperation.asyncServiceCall(mockDynamoDbAsyncClient) + .apply(scanRequest); + + assertThat(response, is(mockScanPublisher)); + verify(mockDynamoDbAsyncClient).scanPaginator(scanRequest); + } + + @Test + public void generateRequest_defaultScan() { + ScanRequest request = scanOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + ScanRequest expectedRequest = ScanRequest.builder() + .tableName(TABLE_NAME) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_knowsHowToUseAnIndex() { + ScanOperation operation = ScanOperation.create(ScanEnhancedRequest.builder().build()); + ScanRequest scanRequest = operation.generateRequest(FakeItemWithIndices.getTableSchema(), GSI_1_CONTEXT, null); + assertThat(scanRequest.indexName(), is("gsi_1")); + } + + + @Test + public void generateRequest_limit() { + ScanOperation operation = ScanOperation.create(ScanEnhancedRequest.builder().limit(10).build()); + ScanRequest request = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + ScanRequest expectedRequest = ScanRequest.builder() + .tableName(TABLE_NAME) + .limit(10) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_filterCondition_expressionAndValues() { + Map expressionValues = singletonMap(":test-key", stringValue("test-value")); + Expression filterExpression = + Expression.builder().expression("test-expression").expressionValues(expressionValues).build(); + ScanOperation operation = + ScanOperation.create(ScanEnhancedRequest.builder().filterExpression(filterExpression).build()); + + ScanRequest request = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + ScanRequest expectedRequest = ScanRequest.builder() + .tableName(TABLE_NAME) + .filterExpression("test-expression") + .expressionAttributeValues(expressionValues) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_filterCondition_expressionOnly() { + Expression filterExpression = Expression.builder().expression("test-expression").build(); + ScanOperation operation = + ScanOperation.create(ScanEnhancedRequest.builder().filterExpression(filterExpression).build()); + + ScanRequest request = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + ScanRequest expectedRequest = ScanRequest.builder() + .tableName(TABLE_NAME) + .filterExpression("test-expression") + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_consistentRead() { + ScanOperation operation = ScanOperation.create(ScanEnhancedRequest.builder().consistentRead(true).build()); + ScanRequest request = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + ScanRequest expectedRequest = ScanRequest.builder() + .tableName(TABLE_NAME) + .consistentRead(true) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_projectionExpression() { + ScanOperation operation = ScanOperation.create( + ScanEnhancedRequest.builder() + .attributesToProject("id") + .addAttributeToProject("version") + .build() + ); + ScanRequest request = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedExpressionAttributeNames = new HashMap<>(); + expectedExpressionAttributeNames.put("#AMZN_MAPPED_id", "id"); + expectedExpressionAttributeNames.put("#AMZN_MAPPED_version", "version"); + + ScanRequest expectedRequest = ScanRequest.builder() + .tableName(TABLE_NAME) + .projectionExpression("#AMZN_MAPPED_id,#AMZN_MAPPED_version") + .expressionAttributeNames(expectedExpressionAttributeNames) + .build(); + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_hashKeyOnly_exclusiveStartKey() { + FakeItem exclusiveStartKey = createUniqueFakeItem(); + Map keyMap = FakeItem.getTableSchema().itemToMap(exclusiveStartKey, singletonList("id")); + ScanOperation operation = + ScanOperation.create(ScanEnhancedRequest.builder().exclusiveStartKey(keyMap).build()); + + ScanRequest scanRequest = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(scanRequest.exclusiveStartKey(), + hasEntry("id", AttributeValue.builder().s(exclusiveStartKey.getId()).build())); + } + + @Test + public void generateRequest_hashAndRangeKey_exclusiveStartKey() { + FakeItemWithSort exclusiveStartKey = createUniqueFakeItemWithSort(); + Map keyMap = + FakeItemWithSort.getTableSchema().itemToMap(exclusiveStartKey, + FakeItemWithSort.getTableMetadata().primaryKeys()); + + ScanOperation operation = + ScanOperation.create(ScanEnhancedRequest.builder().exclusiveStartKey(keyMap).build()); + + ScanRequest scanRequest = operation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(scanRequest.exclusiveStartKey(), + hasEntry("id", AttributeValue.builder().s(exclusiveStartKey.getId()).build())); + assertThat(scanRequest.exclusiveStartKey(), + hasEntry("sort", AttributeValue.builder().s(exclusiveStartKey.getSort()).build())); + } + + @Test + public void transformResults_multipleItems_returnsCorrectItems() { + List scanResultItems = generateFakeItemList(); + List> scanResultMaps = + scanResultItems.stream().map(ScanOperationTest::getAttributeValueMap).collect(toList()); + + ScanResponse scanResponse = generateFakeScanResults(scanResultMaps); + + Page scanResultPage = scanOperation.transformResponse(scanResponse, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + assertThat(scanResultPage.items(), is(scanResultItems)); + } + + @Test + public void transformResults_multipleItems_setsLastEvaluatedKey() { + List scanResultItems = generateFakeItemList(); + FakeItem lastEvaluatedKey = createUniqueFakeItem(); + List> scanResultMaps = + scanResultItems.stream().map(ScanOperationTest::getAttributeValueMap).collect(toList()); + + ScanResponse scanResponse = generateFakeScanResults(scanResultMaps, getAttributeValueMap(lastEvaluatedKey)); + + Page scanResultPage = scanOperation.transformResponse(scanResponse, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(scanResultPage.lastEvaluatedKey(), is(getAttributeValueMap(lastEvaluatedKey))); + } + + @Test + public void scanItem_withExtension_correctlyTransformsItems() { + List scanResultItems = generateFakeItemList(); + List modifiedResultItems = generateFakeItemList(); + + List> scanResultMaps = + scanResultItems.stream().map(ScanOperationTest::getAttributeValueMap).collect(toList()); + + ReadModification[] readModifications = + modifiedResultItems.stream() + .map(ScanOperationTest::getAttributeValueMap) + .map(attributeMap -> ReadModification.builder().transformedItem(attributeMap).build()) + .collect(Collectors.toList()) + .toArray(new ReadModification[]{}); + when(mockDynamoDbEnhancedClientExtension.afterRead(any(DynamoDbExtensionContext.AfterRead.class))) + .thenReturn(readModifications[0], Arrays.copyOfRange(readModifications, 1, readModifications.length)); + + ScanResponse scanResponse = generateFakeScanResults(scanResultMaps); + + Page scanResultPage = scanOperation.transformResponse(scanResponse, + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(scanResultPage.items(), is(modifiedResultItems)); + + InOrder inOrder = Mockito.inOrder(mockDynamoDbEnhancedClientExtension); + scanResultMaps.forEach( + attributeMap -> inOrder.verify(mockDynamoDbEnhancedClientExtension).afterRead( + DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT) + .items(attributeMap).build())); + } + + private static ScanResponse generateFakeScanResults(List> scanItemMapsPage) { + return ScanResponse.builder().items(scanItemMapsPage).build(); + } + + private static ScanResponse generateFakeScanResults(List> scanItemMapsPage, + Map lastEvaluatedKey) { + return ScanResponse.builder() + .items(scanItemMapsPage) + .lastEvaluatedKey(lastEvaluatedKey) + .build(); + } + + private static List generateFakeItemList() { + return IntStream.range(0, 3).mapToObj(ignored -> FakeItem.createUniqueFakeItem()).collect(toList()); + } + + private static Map getAttributeValueMap(FakeItem fakeItem) { + return singletonMap("id", AttributeValue.builder().s(fakeItem.getId()).build()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperationTest.java new file mode 100644 index 000000000000..81b2e725ae10 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperationTest.java @@ -0,0 +1,104 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static org.junit.Assert.assertThat; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@RunWith(MockitoJUnitRunner.class) +public class TableOperationTest { + + private static final String FAKE_RESULT = "fake-result"; + private static final String FAKE_TABLE_NAME = "fake-table-name"; + + private final FakeTableOperation fakeTableOperation = new FakeTableOperation(); + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Test + public void executeOnPrimaryIndex_defaultImplementation_callsExecuteCorrectly() { + fakeTableOperation.executeOnPrimaryIndex(FakeItem.getTableSchema(), + FAKE_TABLE_NAME, + mockDynamoDbEnhancedClientExtension, + mockDynamoDbClient); + + assertThat(fakeTableOperation.lastDynamoDbClient, sameInstance(mockDynamoDbClient)); + assertThat(fakeTableOperation.lastDynamoDbEnhancedClientExtension, sameInstance(mockDynamoDbEnhancedClientExtension)); + assertThat(fakeTableOperation.lastTableSchema, sameInstance(FakeItem.getTableSchema())); + assertThat(fakeTableOperation.lastOperationContext, is( + DefaultOperationContext.create(FAKE_TABLE_NAME, TableMetadata.primaryIndexName()))); + } + + private static class FakeTableOperation implements TableOperation { + private TableSchema lastTableSchema = null; + private OperationContext lastOperationContext = null; + private DynamoDbEnhancedClientExtension lastDynamoDbEnhancedClientExtension = null; + private DynamoDbClient lastDynamoDbClient = null; + + @Override + public String generateRequest(TableSchema tableSchema, OperationContext context, + DynamoDbEnhancedClientExtension extension) { + return null; + } + + @Override + public Function serviceCall(DynamoDbClient dynamoDbClient) { + return null; + } + + @Override + public Function> asyncServiceCall(DynamoDbAsyncClient dynamoDbAsyncClient) { + return null; + } + + @Override + public String transformResponse(String response, TableSchema tableSchema, OperationContext context, + DynamoDbEnhancedClientExtension extension) { + return null; + } + + @Override + public String execute(TableSchema tableSchema, + OperationContext context, + DynamoDbEnhancedClientExtension extension, + DynamoDbClient dynamoDbClient) { + lastTableSchema = tableSchema; + lastOperationContext = context; + lastDynamoDbEnhancedClientExtension = extension; + lastDynamoDbClient = dynamoDbClient; + return FAKE_RESULT; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactGetItemsOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactGetItemsOperationTest.java new file mode 100644 index 000000000000..1f487b595684 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactGetItemsOperationTest.java @@ -0,0 +1,238 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.Document; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.internal.DefaultDocument; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactGetItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.Get; +import software.amazon.awssdk.services.dynamodb.model.ItemResponse; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItem; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItemsRequest; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItemsResponse; + +@RunWith(MockitoJUnitRunner.class) +public class TransactGetItemsOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final String TABLE_NAME_2 = "table-name-2"; + + private static final List FAKE_ITEMS = + IntStream.range(0, 6).mapToObj($ -> createUniqueFakeItem()).collect(toList()); + private static final List> FAKE_ITEM_MAPS = + FAKE_ITEMS.stream() + .map(item -> FakeItem.getTableSchema().itemToMap(item, FakeItem.getTableMetadata().primaryKeys())) + .collect(toList()); + private static final List FAKE_ITEM_KEYS = + FAKE_ITEMS.stream().map(fakeItem -> Key.builder().partitionValue(fakeItem.getId()).build()).collect(toList()); + + private static final List FAKESORT_ITEMS = + IntStream.range(0, 6) + .mapToObj($ -> createUniqueFakeItemWithSort()).collect(toList()); + + private static final List> FAKESORT_ITEM_MAPS = + FAKESORT_ITEMS.stream() + .map(item -> FakeItemWithSort.getTableSchema() + .itemToMap(item, FakeItemWithSort.getTableMetadata().primaryKeys())) + .collect(toList()); + private static final List FAKESORT_ITEM_KEYS = + FAKESORT_ITEMS.stream() + .map(fakeItemWithSort -> Key.builder() + .partitionValue(fakeItemWithSort.getId()) + .sortValue(fakeItemWithSort.getSort()) + .build()) + .collect(toList()); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockExtension; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + private DynamoDbTable fakeItemWithSortMappedTable; + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).extensions().build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + fakeItemWithSortMappedTable = enhancedClient.table(TABLE_NAME_2, FakeItemWithSort.getTableSchema()); + } + + @Test + public void generateRequest_getsFromMultipleTables_usingShortcutForm() { + TransactGetItemsEnhancedRequest transactGetItemsEnhancedRequest = + TransactGetItemsEnhancedRequest.builder() + .addGetItem(fakeItemMappedTable, FAKE_ITEM_KEYS.get(0)) + .addGetItem(fakeItemWithSortMappedTable, FAKESORT_ITEM_KEYS.get(0)) + .addGetItem(fakeItemWithSortMappedTable, FAKESORT_ITEM_KEYS.get(1)) + .addGetItem(fakeItemMappedTable, FAKE_ITEM_KEYS.get(1)) + .build(); + + TransactGetItemsOperation operation = TransactGetItemsOperation.create(transactGetItemsEnhancedRequest); + + List transactGetItems = Arrays.asList( + TransactGetItem.builder().get(Get.builder().tableName(TABLE_NAME).key(FAKE_ITEM_MAPS.get(0)).build()).build(), + TransactGetItem.builder().get(Get.builder().tableName(TABLE_NAME_2).key(FAKESORT_ITEM_MAPS.get(0)).build()).build(), + TransactGetItem.builder().get(Get.builder().tableName(TABLE_NAME_2).key(FAKESORT_ITEM_MAPS.get(1)).build()).build(), + TransactGetItem.builder().get(Get.builder().tableName(TABLE_NAME).key(FAKE_ITEM_MAPS.get(1)).build()).build()); + + TransactGetItemsRequest expectedRequest = TransactGetItemsRequest.builder() + .transactItems(transactGetItems) + .build(); + + TransactGetItemsRequest actualRequest = operation.generateRequest(null); + + assertThat(actualRequest, is(expectedRequest)); + } + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse_usingKeyItemForm() { + TransactGetItemsEnhancedRequest transactGetItemsEnhancedRequest = + TransactGetItemsEnhancedRequest.builder() + .addGetItem(fakeItemMappedTable, FAKE_ITEMS.get(0)) + .build(); + + TransactGetItemsOperation operation = TransactGetItemsOperation.create(transactGetItemsEnhancedRequest); + + TransactGetItem transactGetItem = + TransactGetItem.builder().get(Get.builder().tableName(TABLE_NAME).key(FAKE_ITEM_MAPS.get(0)).build()).build(); + + TransactGetItemsRequest transactGetItemsRequest = TransactGetItemsRequest.builder() + .transactItems(singletonList(transactGetItem)) + .build(); + + TransactGetItemsResponse expectedResponse = TransactGetItemsResponse.builder().build(); + when(mockDynamoDbClient.transactGetItems(any(TransactGetItemsRequest.class))).thenReturn(expectedResponse); + + TransactGetItemsResponse response = operation.serviceCall(mockDynamoDbClient).apply(transactGetItemsRequest); + + assertThat(response, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).transactGetItems(transactGetItemsRequest); + } + + @Test + public void transformResponse_noExtension_returnsItemsFromDifferentTables() { + TransactGetItemsOperation operation = TransactGetItemsOperation.create(emptyRequest()); + + List itemResponses = Arrays.asList( + ItemResponse.builder().item(FAKE_ITEM_MAPS.get(0)).build(), + ItemResponse.builder().item(FAKESORT_ITEM_MAPS.get(0)).build(), + ItemResponse.builder().item(FAKESORT_ITEM_MAPS.get(1)).build(), + ItemResponse.builder().item(FAKE_ITEM_MAPS.get(1)).build()); + TransactGetItemsResponse response = TransactGetItemsResponse.builder() + .responses(itemResponses) + .build(); + + List result = operation.transformResponse(response, null); + + assertThat(result, contains(DefaultDocument.create(FAKE_ITEM_MAPS.get(0)), + DefaultDocument.create(FAKESORT_ITEM_MAPS.get(0)), + DefaultDocument.create(FAKESORT_ITEM_MAPS.get(1)), + DefaultDocument.create(FAKE_ITEM_MAPS.get(1)))); + } + + @Test + public void transformResponse_doesNotInteractWithExtension() { + TransactGetItemsOperation operation = TransactGetItemsOperation.create(emptyRequest()); + + List itemResponses = Arrays.asList( + ItemResponse.builder().item(FAKE_ITEM_MAPS.get(0)).build(), + ItemResponse.builder().item(FAKESORT_ITEM_MAPS.get(0)).build(), + ItemResponse.builder().item(FAKESORT_ITEM_MAPS.get(1)).build(), + ItemResponse.builder().item(FAKE_ITEM_MAPS.get(1)).build()); + TransactGetItemsResponse response = TransactGetItemsResponse.builder() + .responses(itemResponses) + .build(); + + operation.transformResponse(response, mockExtension); + + verifyZeroInteractions(mockExtension); + } + + @Test + public void transformResponse_noExtension_returnsNullsAsNulls() { + TransactGetItemsOperation operation = TransactGetItemsOperation.create(emptyRequest()); + + List itemResponses = Arrays.asList( + ItemResponse.builder().item(FAKE_ITEM_MAPS.get(0)).build(), + ItemResponse.builder().item(FAKESORT_ITEM_MAPS.get(0)).build(), + null); + TransactGetItemsResponse response = TransactGetItemsResponse.builder() + .responses(itemResponses) + .build(); + + List result = operation.transformResponse(response, null); + + assertThat(result, contains(DefaultDocument.create(FAKE_ITEM_MAPS.get(0)), + DefaultDocument.create(FAKESORT_ITEM_MAPS.get(0)), + null)); + } + + @Test + public void transformResponse_noExtension_returnsEmptyAsNull() { + TransactGetItemsOperation operation = TransactGetItemsOperation.create(emptyRequest()); + + List itemResponses = Arrays.asList( + ItemResponse.builder().item(FAKE_ITEM_MAPS.get(0)).build(), + ItemResponse.builder().item(FAKESORT_ITEM_MAPS.get(0)).build(), + ItemResponse.builder().item(emptyMap()).build()); + TransactGetItemsResponse response = TransactGetItemsResponse.builder() + .responses(itemResponses) + .build(); + + List result = operation.transformResponse(response, null); + + assertThat(result, contains(DefaultDocument.create(FAKE_ITEM_MAPS.get(0)), + DefaultDocument.create(FAKESORT_ITEM_MAPS.get(0)), + DefaultDocument.create(emptyMap()))); + } + + private static TransactGetItemsEnhancedRequest emptyRequest() { + return TransactGetItemsEnhancedRequest.builder().build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperationTest.java new file mode 100644 index 000000000000..9696e01954ac --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperationTest.java @@ -0,0 +1,160 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.singletonList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.model.TransactWriteItemsEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.Put; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItemsRequest; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItemsResponse; + +@RunWith(MockitoJUnitRunner.class) +public class TransactWriteItemsOperationTest { + private static final String TABLE_NAME = "table-name"; + + private final FakeItem fakeItem1 = FakeItem.createUniqueFakeItem(); + private final FakeItem fakeItem2 = FakeItem.createUniqueFakeItem(); + private final Map fakeItemMap1 = FakeItem.getTableSchema().itemToMap(fakeItem1, true); + private final Map fakeItemMap2 = FakeItem.getTableSchema().itemToMap(fakeItem2, true); + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + @Mock + private DynamoDbClient mockDynamoDbClient; + + private TransactWriteItem fakeTransactWriteItem1 = TransactWriteItem.builder() + .put(Put.builder() + .item(fakeItemMap1) + .tableName(TABLE_NAME) + .build()) + .build(); + + private TransactWriteItem fakeTransactWriteItem2 = TransactWriteItem.builder() + .put(Put.builder() + .item(fakeItemMap2) + .tableName(TABLE_NAME) + .build()) + .build(); + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).extensions().build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + } + + @Test + public void generateRequest_singleTransaction() { + TransactWriteItemsEnhancedRequest transactGetItemsEnhancedRequest = + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(fakeItemMappedTable, fakeItem1) + .build(); + + TransactWriteItemsOperation operation = TransactWriteItemsOperation.create(transactGetItemsEnhancedRequest); + TransactWriteItemsRequest actualRequest = operation.generateRequest(mockDynamoDbEnhancedClientExtension); + TransactWriteItemsRequest expectedRequest = TransactWriteItemsRequest.builder() + .transactItems(fakeTransactWriteItem1) + .build(); + + assertThat(actualRequest, is(expectedRequest)); + verifyZeroInteractions(mockDynamoDbEnhancedClientExtension); + } + + @Test + public void generateRequest_multipleTransactions() { + TransactWriteItemsEnhancedRequest transactGetItemsEnhancedRequest = + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(fakeItemMappedTable, fakeItem1) + .addPutItem(fakeItemMappedTable, fakeItem2) + .build(); + + TransactWriteItemsOperation operation = TransactWriteItemsOperation.create(transactGetItemsEnhancedRequest); + TransactWriteItemsRequest actualRequest = operation.generateRequest(mockDynamoDbEnhancedClientExtension); + TransactWriteItemsRequest expectedRequest = + TransactWriteItemsRequest.builder() + .transactItems(fakeTransactWriteItem1, fakeTransactWriteItem2) + .build(); + + assertThat(actualRequest, is(expectedRequest)); + verifyZeroInteractions(mockDynamoDbEnhancedClientExtension); + } + + @Test + public void generateRequest_noTransactions() { + TransactWriteItemsOperation operation = TransactWriteItemsOperation.create(emptyRequest()); + + TransactWriteItemsRequest actualRequest = operation.generateRequest(mockDynamoDbEnhancedClientExtension); + + TransactWriteItemsRequest expectedRequest = TransactWriteItemsRequest.builder().build(); + assertThat(actualRequest, is(expectedRequest)); + verifyZeroInteractions(mockDynamoDbEnhancedClientExtension); + } + + @Test + public void getServiceCall_callsServiceAndReturnsResult() { + TransactWriteItemsOperation operation = TransactWriteItemsOperation.create(emptyRequest()); + TransactWriteItemsRequest request = + TransactWriteItemsRequest.builder() + .transactItems(singletonList(fakeTransactWriteItem1)) + .build(); + TransactWriteItemsResponse expectedResponse = TransactWriteItemsResponse.builder() + .build(); + when(mockDynamoDbClient.transactWriteItems(any(TransactWriteItemsRequest.class))).thenReturn(expectedResponse); + + TransactWriteItemsResponse actualResponse = operation.serviceCall(mockDynamoDbClient).apply(request); + + assertThat(actualResponse, is(sameInstance(expectedResponse))); + verify(mockDynamoDbClient).transactWriteItems(request); + verifyZeroInteractions(mockDynamoDbEnhancedClientExtension); + } + + @Test + public void transformResponse_doesNothing() { + TransactWriteItemsOperation operation = TransactWriteItemsOperation.create(emptyRequest()); + TransactWriteItemsResponse response = TransactWriteItemsResponse.builder().build(); + + operation.transformResponse(response, mockDynamoDbEnhancedClientExtension); + + verifyZeroInteractions(mockDynamoDbEnhancedClientExtension); + } + + private TransactWriteItemsEnhancedRequest emptyRequest() { + return TransactWriteItemsEnhancedRequest.builder().build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperationTest.java new file mode 100644 index 000000000000..4698dad3ad2f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperationTest.java @@ -0,0 +1,778 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.operations; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort.createUniqueFakeItemWithSort; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; +import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.model.UpdateItemEnhancedRequest; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.ReturnValue; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; +import software.amazon.awssdk.services.dynamodb.model.Update; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; + +@RunWith(MockitoJUnitRunner.class) +public class UpdateItemOperationTest { + private static final String TABLE_NAME = "table-name"; + private static final String OTHER_ATTRIBUTE_1_NAME = "#AMZN_MAPPED_other_attribute_1"; + private static final String OTHER_ATTRIBUTE_2_NAME = "#AMZN_MAPPED_other_attribute_2"; + private static final String SUBCLASS_ATTRIBUTE_NAME = "#AMZN_MAPPED_subclass_attribute"; + private static final String OTHER_ATTRIBUTE_1_VALUE = ":AMZN_MAPPED_other_attribute_1"; + private static final String OTHER_ATTRIBUTE_2_VALUE = ":AMZN_MAPPED_other_attribute_2"; + private static final String SUBCLASS_ATTRIBUTE_VALUE = ":AMZN_MAPPED_subclass_attribute"; + + private static final OperationContext PRIMARY_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + private static final OperationContext GSI_1_CONTEXT = + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); + private static final Expression CONDITION_EXPRESSION; + private static final Expression MINIMAL_CONDITION_EXPRESSION = Expression.builder().expression("foo = bar").build(); + + + static { + Map expressionNames = new HashMap<>(); + expressionNames.put("#test_field_1", "test_field_1"); + expressionNames.put("#test_field_2", "test_field_2"); + Map expressionValues = new HashMap<>(); + expressionValues.put(":test_value_1", numberValue(1)); + expressionValues.put(":test_value_2", numberValue(2)); + CONDITION_EXPRESSION = Expression.builder() + .expression("#test_field_1 = :test_value_1 OR #test_field_2 = :test_value_2") + .expressionNames(Collections.unmodifiableMap(expressionNames)) + .expressionValues(Collections.unmodifiableMap(expressionValues)) + .build(); + } + + @Mock + private DynamoDbClient mockDynamoDbClient; + + @Mock + private DynamoDbEnhancedClientExtension mockDynamoDbEnhancedClientExtension; + + @Test + public void getServiceCall_makesTheRightCallAndReturnsResponse() { + FakeItem item = createUniqueFakeItem(); + UpdateItemOperation updateItemOperation = UpdateItemOperation.create( + UpdateItemEnhancedRequest.builder(FakeItem.class).item(item).build()); + UpdateItemRequest updateItemRequest = UpdateItemRequest.builder().tableName(TABLE_NAME).build(); + UpdateItemResponse expectedResponse = UpdateItemResponse.builder().build(); + when(mockDynamoDbClient.updateItem(any(UpdateItemRequest.class))).thenReturn(expectedResponse); + + UpdateItemResponse response = updateItemOperation.serviceCall(mockDynamoDbClient).apply(updateItemRequest); + + assertThat(response, sameInstance(expectedResponse)); + verify(mockDynamoDbClient).updateItem(updateItemRequest); + } + + @Test(expected = IllegalArgumentException.class) + public void generateRequest_withIndex_throwsIllegalArgumentException() { + FakeItem item = createUniqueFakeItem(); + UpdateItemOperation updateItemOperation = UpdateItemOperation.create( + UpdateItemEnhancedRequest.builder(FakeItem.class).item(item).build()); + + updateItemOperation.generateRequest(FakeItem.getTableSchema(), GSI_1_CONTEXT, null); + } + + @Test + public void generateRequest_nullValuesNotIgnoredByDefault() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + item.setOtherAttribute1("value-1"); + UpdateItemOperation updateItemOperation = UpdateItemOperation.create( + UpdateItemEnhancedRequest.builder(FakeItemWithSort.class).item(item).build()); + Map expectedKey = new HashMap<>(); + expectedKey.put("id", AttributeValue.builder().s(item.getId()).build()); + expectedKey.put("sort", AttributeValue.builder().s(item.getSort()).build()); + Map expectedValues = new HashMap<>(); + expectedValues.put(OTHER_ATTRIBUTE_1_VALUE, AttributeValue.builder().s("value-1").build()); + Map expectedNames = new HashMap<>(); + expectedNames.put(OTHER_ATTRIBUTE_1_NAME, "other_attribute_1"); + expectedNames.put(OTHER_ATTRIBUTE_2_NAME, "other_attribute_2"); + UpdateItemRequest.Builder baseExpectedRequest = UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .expressionAttributeValues(expectedValues) + .expressionAttributeNames(expectedNames) + .key(expectedKey) + .returnValues(ReturnValue.ALL_NEW); + UpdateItemRequest expectedRequest = + baseExpectedRequest.updateExpression("SET " + OTHER_ATTRIBUTE_1_NAME + " = " + OTHER_ATTRIBUTE_1_VALUE + + " REMOVE " + OTHER_ATTRIBUTE_2_NAME) + .build(); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_withConditionExpression() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + item.setOtherAttribute1("value-1"); + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItemWithSort.class) + .item(item) + .conditionExpression(CONDITION_EXPRESSION) + .build()); + Map expectedKey = new HashMap<>(); + expectedKey.put("id", AttributeValue.builder().s(item.getId()).build()); + expectedKey.put("sort", AttributeValue.builder().s(item.getSort()).build()); + Map expectedValues = new HashMap<>(CONDITION_EXPRESSION.expressionValues()); + expectedValues.put(OTHER_ATTRIBUTE_1_VALUE, AttributeValue.builder().s("value-1").build()); + Map expectedNames = new HashMap<>(CONDITION_EXPRESSION.expressionNames()); + expectedNames.put(OTHER_ATTRIBUTE_1_NAME, "other_attribute_1"); + expectedNames.put(OTHER_ATTRIBUTE_2_NAME, "other_attribute_2"); + UpdateItemRequest.Builder baseExpectedRequest = + UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .expressionAttributeValues(expectedValues) + .expressionAttributeNames(expectedNames) + .conditionExpression(CONDITION_EXPRESSION.expression()) + .key(expectedKey) + .returnValues(ReturnValue.ALL_NEW); + UpdateItemRequest expectedRequest = + baseExpectedRequest.updateExpression("SET " + OTHER_ATTRIBUTE_1_NAME + " = " + OTHER_ATTRIBUTE_1_VALUE + + " REMOVE " + OTHER_ATTRIBUTE_2_NAME) + .build(); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_withMinimalConditionExpression() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + item.setOtherAttribute1("value-1"); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItemWithSort.class) + .item(item) + .conditionExpression(MINIMAL_CONDITION_EXPRESSION) + .build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedValues = new HashMap<>(); + expectedValues.put(OTHER_ATTRIBUTE_1_VALUE, AttributeValue.builder().s("value-1").build()); + Map expectedNames = new HashMap<>(); + expectedNames.put(OTHER_ATTRIBUTE_1_NAME, "other_attribute_1"); + expectedNames.put(OTHER_ATTRIBUTE_2_NAME, "other_attribute_2"); + assertThat(request.conditionExpression(), is(MINIMAL_CONDITION_EXPRESSION.expression())); + assertThat(request.expressionAttributeNames(), is(expectedNames)); + assertThat(request.expressionAttributeValues(), is(expectedValues)); + } + + @Test + public void generateRequest_explicitlyUnsetIgnoreNulls() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + item.setOtherAttribute1("value-1"); + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItemWithSort.class) + .item(item) + .ignoreNulls(false) + .build()); + Map expectedKey = new HashMap<>(); + expectedKey.put("id", AttributeValue.builder().s(item.getId()).build()); + expectedKey.put("sort", AttributeValue.builder().s(item.getSort()).build()); + Map expectedValues = new HashMap<>(); + expectedValues.put(OTHER_ATTRIBUTE_1_VALUE, AttributeValue.builder().s("value-1").build()); + Map expectedNames = new HashMap<>(); + expectedNames.put(OTHER_ATTRIBUTE_1_NAME, "other_attribute_1"); + expectedNames.put(OTHER_ATTRIBUTE_2_NAME, "other_attribute_2"); + UpdateItemRequest.Builder baseExpectedRequest = UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .expressionAttributeValues(expectedValues) + .expressionAttributeNames(expectedNames) + .key(expectedKey) + .returnValues(ReturnValue.ALL_NEW); + UpdateItemRequest expectedRequest = + baseExpectedRequest.updateExpression("SET " + OTHER_ATTRIBUTE_1_NAME + " = " + OTHER_ATTRIBUTE_1_VALUE + + " REMOVE " + OTHER_ATTRIBUTE_2_NAME) + .build(); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_multipleSetters() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + item.setOtherAttribute1("value-1"); + item.setOtherAttribute2("value-2"); + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItemWithSort.class) + .item(item) + .ignoreNulls(false) + .build()); + Map expectedKey = new HashMap<>(); + expectedKey.put("id", AttributeValue.builder().s(item.getId()).build()); + expectedKey.put("sort", AttributeValue.builder().s(item.getSort()).build()); + Map expectedValues = new HashMap<>(); + expectedValues.put(OTHER_ATTRIBUTE_1_VALUE, AttributeValue.builder().s("value-1").build()); + expectedValues.put(OTHER_ATTRIBUTE_2_VALUE, AttributeValue.builder().s("value-2").build()); + Map expectedNames = new HashMap<>(); + expectedNames.put(OTHER_ATTRIBUTE_1_NAME, "other_attribute_1"); + expectedNames.put(OTHER_ATTRIBUTE_2_NAME, "other_attribute_2"); + UpdateItemRequest.Builder baseExpectedRequest = UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .expressionAttributeValues(expectedValues) + .expressionAttributeNames(expectedNames) + .key(expectedKey) + .returnValues(ReturnValue.ALL_NEW); + UpdateItemRequest expectedRequest1 = + baseExpectedRequest.updateExpression("SET " + OTHER_ATTRIBUTE_1_NAME + " = " + OTHER_ATTRIBUTE_1_VALUE + + ", " + OTHER_ATTRIBUTE_2_NAME + " = " + OTHER_ATTRIBUTE_2_VALUE) + .build(); + UpdateItemRequest expectedRequest2 = + baseExpectedRequest.updateExpression("SET " + OTHER_ATTRIBUTE_2_NAME + " = " + OTHER_ATTRIBUTE_2_VALUE + + ", " + OTHER_ATTRIBUTE_1_NAME + " = " + OTHER_ATTRIBUTE_1_VALUE) + .build(); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request, either(is(expectedRequest1)).or(is(expectedRequest2))); + } + + @Test + public void generateRequest_multipleDeletes() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItemWithSort.class) + .item(item) + .ignoreNulls(false) + .build()); + Map expectedKey = new HashMap<>(); + expectedKey.put("id", AttributeValue.builder().s(item.getId()).build()); + expectedKey.put("sort", AttributeValue.builder().s(item.getSort()).build()); + Map expectedNames = new HashMap<>(); + expectedNames.put(OTHER_ATTRIBUTE_1_NAME, "other_attribute_1"); + expectedNames.put(OTHER_ATTRIBUTE_2_NAME, "other_attribute_2"); + UpdateItemRequest.Builder baseExpectedRequest = UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .expressionAttributeNames(expectedNames) + .key(expectedKey) + .returnValues(ReturnValue.ALL_NEW); + UpdateItemRequest expectedRequest1 = + baseExpectedRequest.updateExpression("REMOVE " + OTHER_ATTRIBUTE_1_NAME + ", " + OTHER_ATTRIBUTE_2_NAME) + .build(); + UpdateItemRequest expectedRequest2 = + baseExpectedRequest.updateExpression("REMOVE " + OTHER_ATTRIBUTE_2_NAME + ", " + OTHER_ATTRIBUTE_1_NAME) + .build(); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request,either(is(expectedRequest1)).or(is(expectedRequest2))); + } + + @Test + public void generateRequest_canIgnoreNullValues() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + item.setOtherAttribute1("value-1"); + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItemWithSort.class) + .item(item) + .ignoreNulls(true) + .build()); + Map expectedKey = new HashMap<>(); + expectedKey.put("id", AttributeValue.builder().s(item.getId()).build()); + expectedKey.put("sort", AttributeValue.builder().s(item.getSort()).build()); + Map expectedValues = + singletonMap(OTHER_ATTRIBUTE_1_VALUE, AttributeValue.builder().s("value-1").build()); + Map expectedNames = singletonMap(OTHER_ATTRIBUTE_1_NAME, "other_attribute_1"); + UpdateItemRequest expectedRequest = UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .updateExpression("SET " + OTHER_ATTRIBUTE_1_NAME + " = " + OTHER_ATTRIBUTE_1_VALUE) + .expressionAttributeValues(expectedValues) + .expressionAttributeNames(expectedNames) + .key(expectedKey) + .returnValues(ReturnValue.ALL_NEW) + .build(); + + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_keyOnlyItem() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItemWithSort.class) + .item(item) + .ignoreNulls(true) + .build()); + Map expectedKey = new HashMap<>(); + expectedKey.put("id", AttributeValue.builder().s(item.getId()).build()); + expectedKey.put("sort", AttributeValue.builder().s(item.getSort()).build()); + UpdateItemRequest expectedRequest = UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKey) + .returnValues(ReturnValue.ALL_NEW) + .build(); + + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request, is(expectedRequest)); + } + + @Test + public void generateRequest_withExtension_modifiesKeyPortionOfItem() { + FakeItem baseFakeItem = createUniqueFakeItem(); + FakeItem fakeItem = createUniqueFakeItem(); + + Map baseMap = FakeItem.getTableSchema().itemToMap(baseFakeItem, false); + Map fakeMap = FakeItem.getTableSchema().itemToMap(fakeItem, false); + Map keyMap = FakeItem.getTableSchema().itemToMap(fakeItem, singletonList("id")); + + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().transformedItem(fakeMap).build()); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class).item(baseFakeItem).build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.key(), is(keyMap)); + verify(mockDynamoDbEnhancedClientExtension).beforeWrite(DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT) + .items(baseMap).build()); + } + + @Test + public void generateRequest_withExtension_modifiesUpdateExpression() { + FakeItem fakeItem = createUniqueFakeItem(); + Map baseMap = new HashMap<>(FakeItem.getTableSchema().itemToMap(fakeItem, true)); + + Map fakeMap = new HashMap<>(baseMap); + fakeMap.put("subclass_attribute", AttributeValue.builder().s("1").build()); + + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().transformedItem(fakeMap).build()); + + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem) + .ignoreNulls(true) + .build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.updateExpression(), is("SET " + SUBCLASS_ATTRIBUTE_NAME + " = " + SUBCLASS_ATTRIBUTE_VALUE)); + assertThat(request.expressionAttributeValues(), hasEntry(SUBCLASS_ATTRIBUTE_VALUE, + AttributeValue.builder().s("1").build())); + assertThat(request.expressionAttributeNames(), hasEntry(SUBCLASS_ATTRIBUTE_NAME, "subclass_attribute")); + } + + @Test + public void transformResponse_mapsAttributesReturnedInResponse() { + FakeItem fakeItem1 = FakeItem.createUniqueFakeItem(); + FakeItem fakeItem2 = FakeItem.createUniqueFakeItem(); + Map fakeItem2Attributes = FakeItem.getTableSchema().itemToMap(fakeItem2, true); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class).item(fakeItem1).build()); + + FakeItem result = updateItemOperation.transformResponse( + UpdateItemResponse.builder().attributes(fakeItem2Attributes).build(), + FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(result, is(fakeItem2)); + } + + @Test + public void generateRequest_withExtensions_singleCondition() { + FakeItem baseFakeItem = createUniqueFakeItem(); + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + Expression condition = Expression.builder().expression("condition").expressionValues(fakeMap).build(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().additionalConditionalExpression(condition).build()); + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.conditionExpression(), is("condition")); + assertThat(request.expressionAttributeValues(), is(fakeMap)); + } + + @Test + public void generateRequest_withExtensions_conflictingExpressionValue_throwsRuntimeException() { + FakeItem baseFakeItem = createUniqueFakeItem(); + baseFakeItem.setSubclassAttribute("something"); + Map values = singletonMap(SUBCLASS_ATTRIBUTE_VALUE, + AttributeValue.builder().s("1").build()); + Expression condition1 = Expression.builder().expression("condition1").expressionValues(values).build(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().additionalConditionalExpression(condition1).build()); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + try { + updateItemOperation.generateRequest(FakeItem.getTableSchema(), PRIMARY_CONTEXT, mockDynamoDbEnhancedClientExtension); + + fail("Exception should be thrown"); + } catch (RuntimeException e) { + assertThat(e.getMessage(), containsString("subclass_attribute")); + } + } + + @Test + public void generateRequest_withExtensions_conflictingExpressionName_throwsRuntimeException() { + FakeItem baseFakeItem = createUniqueFakeItem(); + baseFakeItem.setSubclassAttribute("something"); + Map names = singletonMap(SUBCLASS_ATTRIBUTE_NAME, "conflict"); + Expression condition1 = Expression.builder().expression("condition1").expressionNames(names).build(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().additionalConditionalExpression(condition1).build()); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + try { + updateItemOperation.generateRequest(FakeItem.getTableSchema(), PRIMARY_CONTEXT, mockDynamoDbEnhancedClientExtension); + + fail("Exception should be thrown"); + } catch (RuntimeException e) { + assertThat(e.getMessage(), containsString("subclass_attribute")); + } + } + + @Test + public void generateRequest_withExtension_correctlyCoalescesIdenticalExpressionValues() { + FakeItem baseFakeItem = createUniqueFakeItem(); + baseFakeItem.setSubclassAttribute("something"); + Map values = singletonMap(SUBCLASS_ATTRIBUTE_VALUE, + AttributeValue.builder().s("something").build()); + Expression condition = Expression.builder().expression("condition").expressionValues(values).build(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().additionalConditionalExpression(condition).build()); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.expressionAttributeValues(), is(values)); + } + + @Test + public void generateRequest_withExtension_correctlyCoalescesIdenticalExpressionNames() { + FakeItem baseFakeItem = createUniqueFakeItem(); + baseFakeItem.setSubclassAttribute("something"); + Map names = singletonMap(SUBCLASS_ATTRIBUTE_NAME, "subclass_attribute"); + Expression condition = Expression.builder().expression("condition").expressionNames(names).build(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().additionalConditionalExpression(condition).build()); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.expressionAttributeNames(), is(names)); + } + + @Test + public void generateRequest_withExtension_noModifications() { + FakeItem baseFakeItem = createUniqueFakeItem(); + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder().build()); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + assertThat(request.conditionExpression(), is(nullValue())); + assertThat(request.expressionAttributeValues().size(), is(0)); + } + + @Test + public void generateRequest_withExtension_conditionAndModification() { + FakeItem baseFakeItem = createUniqueFakeItem(); + Map baseMap = new HashMap<>(FakeItem.getTableSchema().itemToMap(baseFakeItem, true)); + + Map fakeMap = new HashMap<>(baseMap); + fakeMap.put("subclass_attribute", AttributeValue.builder().s("1").build()); + + Map conditionValues = new HashMap<>(); + conditionValues.put(":condition_value", AttributeValue.builder().s("2").build()); + + when(mockDynamoDbEnhancedClientExtension.beforeWrite(any(DynamoDbExtensionContext.BeforeWrite.class))) + .thenReturn(WriteModification.builder() + .transformedItem(fakeMap) + .additionalConditionalExpression(Expression.builder() + .expression("condition") + .expressionValues(conditionValues) + .build()) + .build()); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(request.updateExpression(), is("SET " + SUBCLASS_ATTRIBUTE_NAME + " = " + SUBCLASS_ATTRIBUTE_VALUE)); + assertThat(request.expressionAttributeValues(), hasEntry(SUBCLASS_ATTRIBUTE_VALUE, + AttributeValue.builder().s("1").build())); + assertThat(request.expressionAttributeValues(), hasEntry(":condition_value", + AttributeValue.builder().s("2").build())); + assertThat(request.expressionAttributeNames(), hasEntry(SUBCLASS_ATTRIBUTE_NAME, "subclass_attribute")); + } + + @Test + public void transformResponse_withExtension_returnsCorrectTransformedItem() { + FakeItem baseFakeItem = createUniqueFakeItem(); + FakeItem fakeItem = createUniqueFakeItem(); + Map baseFakeMap = FakeItem.getTableSchema().itemToMap(baseFakeItem, true); + Map fakeMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + when(mockDynamoDbEnhancedClientExtension.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn( + ReadModification.builder().transformedItem(fakeMap).build()); + UpdateItemResponse response = UpdateItemResponse.builder() + .attributes(baseFakeMap) + .build(); + + FakeItem resultItem = updateItemOperation.transformResponse(response, FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + mockDynamoDbEnhancedClientExtension); + + assertThat(resultItem, is(fakeItem)); + verify(mockDynamoDbEnhancedClientExtension).afterRead(DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT) + .items(baseFakeMap).build()); + } + + @Test + public void transformResponse_withNoOpExtension_returnsCorrectItem() { + FakeItem baseFakeItem = createUniqueFakeItem(); + Map baseFakeMap = FakeItem.getTableSchema().itemToMap(baseFakeItem, true); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(baseFakeItem) + .ignoreNulls(true) + .build()); + + when(mockDynamoDbEnhancedClientExtension.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenReturn( + ReadModification.builder().build()); + UpdateItemResponse response = UpdateItemResponse.builder() + .attributes(baseFakeMap) + .build(); + + FakeItem resultItem = updateItemOperation.transformResponse(response, FakeItem.getTableSchema(), + PRIMARY_CONTEXT, mockDynamoDbEnhancedClientExtension); + + assertThat(resultItem, is(baseFakeItem)); + verify(mockDynamoDbEnhancedClientExtension).afterRead(DefaultDynamoDbExtensionContext.builder() + .tableMetadata(FakeItem.getTableMetadata()) + .operationContext(PRIMARY_CONTEXT) + .items(baseFakeMap).build()); + } + + @Test(expected = IllegalStateException.class) + public void transformResponse_afterReadThrowsException_throwsIllegalStateException() { + when(mockDynamoDbEnhancedClientExtension.afterRead(any(DynamoDbExtensionContext.AfterRead.class))).thenThrow(RuntimeException.class); + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class).item(createUniqueFakeItem()).build()); + + UpdateItemResponse response = + UpdateItemResponse.builder() + .attributes(FakeItem.getTableSchema().itemToMap(FakeItem.createUniqueFakeItem(), true)) + .build(); + + updateItemOperation.transformResponse(response, FakeItem.getTableSchema(), PRIMARY_CONTEXT, mockDynamoDbEnhancedClientExtension); + } + + @Test + public void generateTransactWriteItem_basicRequest() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + UpdateItemOperation updateItemOperation = + spy(UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build())); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + String updateExpression = "update-expression"; + Map attributeValues = Collections.singletonMap("key", stringValue("value1")); + Map attributeNames = Collections.singletonMap("key", "value2"); + + UpdateItemRequest updateItemRequest = UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .key(fakeItemMap) + .updateExpression(updateExpression) + .expressionAttributeValues(attributeValues) + .expressionAttributeNames(attributeNames) + .build(); + doReturn(updateItemRequest).when(updateItemOperation).generateRequest(any(), any(), any()); + + TransactWriteItem actualResult = updateItemOperation.generateTransactWriteItem(FakeItem.getTableSchema(), + context, + mockDynamoDbEnhancedClientExtension); + + TransactWriteItem expectedResult = TransactWriteItem.builder() + .update(Update.builder() + .key(fakeItemMap) + .tableName(TABLE_NAME) + .updateExpression(updateExpression) + .expressionAttributeNames(attributeNames) + .expressionAttributeValues(attributeValues) + .build()) + .build(); + assertThat(actualResult, is(expectedResult)); + verify(updateItemOperation).generateRequest(FakeItem.getTableSchema(), context, mockDynamoDbEnhancedClientExtension); + } + + @Test + public void generateTransactWriteItem_conditionalRequest() { + FakeItem fakeItem = createUniqueFakeItem(); + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + UpdateItemOperation updateItemOperation = + spy(UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build())); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + String updateExpression = "update-expression"; + String conditionExpression = "condition-expression"; + Map attributeValues = Collections.singletonMap("key", stringValue("value1")); + Map attributeNames = Collections.singletonMap("key", "value2"); + + UpdateItemRequest updateItemRequest = UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .key(fakeItemMap) + .updateExpression(updateExpression) + .conditionExpression(conditionExpression) + .expressionAttributeValues(attributeValues) + .expressionAttributeNames(attributeNames) + .build(); + doReturn(updateItemRequest).when(updateItemOperation).generateRequest(any(), any(), any()); + + TransactWriteItem actualResult = updateItemOperation.generateTransactWriteItem(FakeItem.getTableSchema(), + context, + mockDynamoDbEnhancedClientExtension); + + TransactWriteItem expectedResult = TransactWriteItem.builder() + .update(Update.builder() + .key(fakeItemMap) + .tableName(TABLE_NAME) + .updateExpression(updateExpression) + .conditionExpression(conditionExpression) + .expressionAttributeNames(attributeNames) + .expressionAttributeValues(attributeValues) + .build()) + .build(); + assertThat(actualResult, is(expectedResult)); + verify(updateItemOperation).generateRequest(FakeItem.getTableSchema(), context, mockDynamoDbEnhancedClientExtension); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchemaTest.java new file mode 100644 index 000000000000..41c8e0bf0685 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchemaTest.java @@ -0,0 +1,1067 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.binaryValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.AbstractBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.AbstractImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.AttributeConverterBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.AttributeConverterNoConstructorBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.CommonTypesBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.DocumentBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.EmptyConverterProvidersInvalidBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.EmptyConverterProvidersValidBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.EnumBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ExtendedBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.FlattenedBeanBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.FlattenedImmutableBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.IgnoredAttributeBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.InvalidBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ListBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.MapBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.MultipleConverterProvidersBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.NoConstructorConverterProvidersBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ParameterizedAbstractBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ParameterizedDocumentBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.PrimitiveTypesBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.RemappedAttributeBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SecondaryIndexBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SetBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SetterAnnotatedBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SimpleBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SingleConverterProvidersBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SortKeyBean; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class BeanTableSchemaTest { + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void simpleBean_correctlyAssignsPrimaryPartitionKey() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + assertThat(beanTableSchema.tableMetadata().primaryPartitionKey(), is("id")); + } + + @Test + public void sortKeyBean_correctlyAssignsSortKey() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SortKeyBean.class); + assertThat(beanTableSchema.tableMetadata().primarySortKey(), is(Optional.of("sort"))); + } + + @Test + public void simpleBean_hasNoSortKey() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + assertThat(beanTableSchema.tableMetadata().primarySortKey(), is(Optional.empty())); + } + + @Test + public void simpleBean_hasNoAdditionalKeys() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + assertThat(beanTableSchema.tableMetadata().allKeys(), contains("id")); + } + + @Test + public void sortKeyBean_hasNoAdditionalKeys() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SortKeyBean.class); + assertThat(beanTableSchema.tableMetadata().allKeys(), containsInAnyOrder("id", "sort")); + } + + @Test + public void secondaryIndexBean_definesGsiCorrectly() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SecondaryIndexBean.class); + + assertThat(beanTableSchema.tableMetadata().indexPartitionKey("gsi"), is("sort")); + assertThat(beanTableSchema.tableMetadata().indexSortKey("gsi"), is(Optional.of("attribute"))); + } + + @Test + public void secondaryIndexBean_definesLsiCorrectly() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SecondaryIndexBean.class); + + assertThat(beanTableSchema.tableMetadata().indexPartitionKey("lsi"), is("id")); + assertThat(beanTableSchema.tableMetadata().indexSortKey("lsi"), is(Optional.of("attribute"))); + } + + @Test + public void dynamoDbIgnore_propertyIsIgnored() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(IgnoredAttributeBean.class); + IgnoredAttributeBean ignoredAttributeBean = new IgnoredAttributeBean(); + ignoredAttributeBean.setId("id-value"); + ignoredAttributeBean.setIntegerAttribute(123); + + Map itemMap = beanTableSchema.itemToMap(ignoredAttributeBean, false); + + assertThat(itemMap.size(), is(1)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + } + + @Test + public void setterAnnotations_alsoWork() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetterAnnotatedBean.class); + SetterAnnotatedBean setterAnnotatedBean = new SetterAnnotatedBean(); + setterAnnotatedBean.setId("id-value"); + setterAnnotatedBean.setIntegerAttribute(123); + + assertThat(beanTableSchema.tableMetadata().primaryPartitionKey(), is("id")); + + Map itemMap = beanTableSchema.itemToMap(setterAnnotatedBean, false); + assertThat(itemMap.size(), is(1)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + } + + @Test + public void dynamoDbAttribute_remapsAttributeName() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(RemappedAttributeBean.class); + + assertThat(beanTableSchema.tableMetadata().primaryPartitionKey(), is("remappedAttribute")); + } + + @Test + public void dynamoDbFlatten_correctlyFlattensBeanAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(FlattenedBeanBean.class); + AbstractBean abstractBean = new AbstractBean(); + abstractBean.setAttribute2("two"); + FlattenedBeanBean flattenedBeanBean = new FlattenedBeanBean(); + flattenedBeanBean.setId("id-value"); + flattenedBeanBean.setAttribute1("one"); + flattenedBeanBean.setAbstractBean(abstractBean); + + Map itemMap = beanTableSchema.itemToMap(flattenedBeanBean, false); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("attribute2", stringValue("two"))); + } + + @Test + public void dynamoDbFlatten_correctlyFlattensImmutableAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(FlattenedImmutableBean.class); + AbstractImmutable abstractImmutable = AbstractImmutable.builder().attribute2("two").build(); + FlattenedImmutableBean flattenedImmutableBean = new FlattenedImmutableBean(); + flattenedImmutableBean.setId("id-value"); + flattenedImmutableBean.setAttribute1("one"); + flattenedImmutableBean.setAbstractImmutable(abstractImmutable); + + Map itemMap = beanTableSchema.itemToMap(flattenedImmutableBean, false); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("attribute2", stringValue("two"))); + } + + @Test + public void documentBean_correctlyMapsBeanAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(DocumentBean.class); + AbstractBean abstractBean = new AbstractBean(); + abstractBean.setAttribute2("two"); + DocumentBean documentBean = new DocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + documentBean.setAbstractBean(abstractBean); + + AttributeValue expectedDocument = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBean", expectedDocument)); + } + + @Test + public void documentBean_list_correctlyMapsBeanAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(DocumentBean.class); + AbstractBean abstractBean1 = new AbstractBean(); + abstractBean1.setAttribute2("two"); + AbstractBean abstractBean2 = new AbstractBean(); + abstractBean2.setAttribute2("three"); + DocumentBean documentBean = new DocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + documentBean.setAbstractBeanList(Arrays.asList(abstractBean1, abstractBean2)); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + AttributeValue expectedList = AttributeValue.builder().l(expectedDocument1, expectedDocument2).build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBeanList", expectedList)); + } + + @Test + public void documentBean_map_correctlyMapsBeanAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(DocumentBean.class); + AbstractBean abstractBean1 = new AbstractBean(); + abstractBean1.setAttribute2("two"); + AbstractBean abstractBean2 = new AbstractBean(); + abstractBean2.setAttribute2("three"); + DocumentBean documentBean = new DocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + + Map abstractBeanMap = new HashMap<>(); + abstractBeanMap.put("key1", abstractBean1); + abstractBeanMap.put("key2", abstractBean2); + documentBean.setAbstractBeanMap(abstractBeanMap); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + Map expectedAttributeValueMap = new HashMap<>(); + expectedAttributeValueMap.put("key1", expectedDocument1); + expectedAttributeValueMap.put("key2", expectedDocument2); + AttributeValue expectedMap = AttributeValue.builder().m(expectedAttributeValueMap).build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBeanMap", expectedMap)); + } + + @Test + public void documentBean_correctlyMapsImmutableAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(DocumentBean.class); + AbstractImmutable abstractImmutable = AbstractImmutable.builder().attribute2("two").build(); + DocumentBean documentBean = new DocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + documentBean.setAbstractImmutable(abstractImmutable); + + AttributeValue expectedDocument = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractImmutable", expectedDocument)); + } + + @Test + public void documentBean_list_correctlyMapsImmutableAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(DocumentBean.class); + AbstractImmutable abstractImmutable1 = AbstractImmutable.builder().attribute2("two").build(); + AbstractImmutable abstractImmutable2 = AbstractImmutable.builder().attribute2("three").build(); + DocumentBean documentBean = new DocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + documentBean.setAbstractImmutableList(Arrays.asList(abstractImmutable1, abstractImmutable2)); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + AttributeValue expectedList = AttributeValue.builder().l(expectedDocument1, expectedDocument2).build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractImmutableList", expectedList)); + } + + @Test + public void documentBean_map_correctlyMapsImmutableAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(DocumentBean.class); + AbstractImmutable abstractImmutable1 = AbstractImmutable.builder().attribute2("two").build(); + AbstractImmutable abstractImmutable2 = AbstractImmutable.builder().attribute2("three").build(); + DocumentBean documentBean = new DocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + + Map abstractImmutableMap = new HashMap<>(); + abstractImmutableMap.put("key1", abstractImmutable1); + abstractImmutableMap.put("key2", abstractImmutable2); + documentBean.setAbstractImmutableMap(abstractImmutableMap); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + Map expectedAttributeValueMap = new HashMap<>(); + expectedAttributeValueMap.put("key1", expectedDocument1); + expectedAttributeValueMap.put("key2", expectedDocument2); + AttributeValue expectedMap = AttributeValue.builder().m(expectedAttributeValueMap).build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractImmutableMap", expectedMap)); + } + + @Test + public void parameterizedDocumentBean_correctlyMapsAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(ParameterizedDocumentBean.class); + ParameterizedAbstractBean abstractBean = new ParameterizedAbstractBean<>(); + abstractBean.setAttribute2("two"); + ParameterizedDocumentBean documentBean = new ParameterizedDocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + documentBean.setAbstractBean(abstractBean); + + AttributeValue expectedDocument = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBean", expectedDocument)); + } + + @Test + public void parameterizedDocumentBean_list_correctlyMapsAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(ParameterizedDocumentBean.class); + ParameterizedAbstractBean abstractBean1 = new ParameterizedAbstractBean<>(); + abstractBean1.setAttribute2("two"); + ParameterizedAbstractBean abstractBean2 = new ParameterizedAbstractBean<>(); + abstractBean2.setAttribute2("three"); + ParameterizedDocumentBean documentBean = new ParameterizedDocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + documentBean.setAbstractBeanList(Arrays.asList(abstractBean1, abstractBean2)); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + AttributeValue expectedList = AttributeValue.builder().l(expectedDocument1, expectedDocument2).build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBeanList", expectedList)); + } + + @Test + public void parameterizedDocumentBean_map_correctlyMapsAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(ParameterizedDocumentBean.class); + ParameterizedAbstractBean abstractBean1 = new ParameterizedAbstractBean<>(); + abstractBean1.setAttribute2("two"); + ParameterizedAbstractBean abstractBean2 = new ParameterizedAbstractBean<>(); + abstractBean2.setAttribute2("three"); + ParameterizedDocumentBean documentBean = new ParameterizedDocumentBean(); + documentBean.setId("id-value"); + documentBean.setAttribute1("one"); + + Map> abstractBeanMap = new HashMap<>(); + abstractBeanMap.put("key1", abstractBean1); + abstractBeanMap.put("key2", abstractBean2); + documentBean.setAbstractBeanMap(abstractBeanMap); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + Map expectedAttributeValueMap = new HashMap<>(); + expectedAttributeValueMap.put("key1", expectedDocument1); + expectedAttributeValueMap.put("key2", expectedDocument2); + AttributeValue expectedMap = AttributeValue.builder().m(expectedAttributeValueMap).build(); + + Map itemMap = beanTableSchema.itemToMap(documentBean, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBeanMap", expectedMap)); + } + + @Test + public void extendedBean_correctlyExtendsAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(ExtendedBean.class); + ExtendedBean extendedBean = new ExtendedBean(); + extendedBean.setId("id-value"); + extendedBean.setAttribute1("one"); + extendedBean.setAttribute2("two"); + + Map itemMap = beanTableSchema.itemToMap(extendedBean, false); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("attribute2", stringValue("two"))); + } + + @Test(expected = IllegalArgumentException.class) + public void invalidBean_throwsIllegalArgumentException() { + BeanTableSchema.create(InvalidBean.class); + } + + @Test + public void itemToMap_nullAttribute_ignoreNullsTrue() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + SimpleBean simpleBean = new SimpleBean(); + simpleBean.setId("id-value"); + + Map itemMap = beanTableSchema.itemToMap(simpleBean, true); + + assertThat(itemMap.size(), is(1)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + } + + @Test + public void itemToMap_nullAttribute_ignoreNullsFalse() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + SimpleBean simpleBean = new SimpleBean(); + simpleBean.setId("id-value"); + + Map itemMap = beanTableSchema.itemToMap(simpleBean, false); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("integerAttribute", AttributeValues.nullAttributeValue())); + } + + @Test + public void itemToMap_nonNullAttribute() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + SimpleBean simpleBean = new SimpleBean(); + simpleBean.setId("id-value"); + simpleBean.setIntegerAttribute(123); + + Map itemMap = beanTableSchema.itemToMap(simpleBean, false); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(123))); + } + + @Test + public void mapToItem_createsItem() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + Map itemMap = new HashMap<>(); + itemMap.put("id", stringValue("id-value")); + itemMap.put("integerAttribute", numberValue(123)); + SimpleBean expectedBean = new SimpleBean(); + expectedBean.setId("id-value"); + expectedBean.setIntegerAttribute(123); + + SimpleBean result = beanTableSchema.mapToItem(itemMap); + + assertThat(result, is(expectedBean)); + } + + @Test + public void attributeValue_returnsValue() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + SimpleBean simpleBean = new SimpleBean(); + simpleBean.setId("id-value"); + simpleBean.setIntegerAttribute(123); + + assertThat(beanTableSchema.attributeValue(simpleBean, "integerAttribute"), is(numberValue(123))); + } + + @Test + public void enumBean_invalidEnum() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(EnumBean.class); + + Map itemMap = new HashMap<>(); + itemMap.put("id", stringValue("id-value")); + itemMap.put("testEnum", stringValue("invalid-value")); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("invalid-value"); + exception.expectMessage("TestEnum"); + beanTableSchema.mapToItem(itemMap); + } + + @Test + public void enumBean_singleEnum() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(EnumBean.class); + EnumBean enumBean = new EnumBean(); + enumBean.setId("id-value"); + enumBean.setTestEnum(EnumBean.TestEnum.ONE); + + Map itemMap = beanTableSchema.itemToMap(enumBean, true); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("testEnum", stringValue("ONE"))); + + EnumBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(enumBean))); + } + + @Test + public void enumBean_listEnum() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(EnumBean.class); + EnumBean enumBean = new EnumBean(); + enumBean.setId("id-value"); + enumBean.setTestEnumList(Arrays.asList(EnumBean.TestEnum.ONE, EnumBean.TestEnum.TWO)); + + Map itemMap = beanTableSchema.itemToMap(enumBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .l(stringValue("ONE"), + stringValue("TWO")) + .build(); + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("testEnumList", expectedAttributeValue)); + + EnumBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(enumBean))); + } + + @Test + public void listBean_stringList() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(ListBean.class); + ListBean listBean = new ListBean(); + listBean.setId("id-value"); + listBean.setStringList(Arrays.asList("one", "two", "three")); + + Map itemMap = beanTableSchema.itemToMap(listBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .l(stringValue("one"), + stringValue("two"), + stringValue("three")) + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("stringList", expectedAttributeValue)); + + ListBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(listBean))); + } + + @Test + public void listBean_stringListList() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(ListBean.class); + ListBean listBean = new ListBean(); + listBean.setId("id-value"); + listBean.setStringListList(Arrays.asList(Arrays.asList("one", "two"), Arrays.asList("three", "four"))); + + Map itemMap = beanTableSchema.itemToMap(listBean, true); + + AttributeValue list1 = AttributeValue.builder().l(stringValue("one"), stringValue("two")).build(); + AttributeValue list2 = AttributeValue.builder().l(stringValue("three"), stringValue("four")).build(); + AttributeValue expectedAttributeValue = AttributeValue.builder() + .l(list1, list2) + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("stringListList", expectedAttributeValue)); + + ListBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(listBean))); + } + + @Test + public void setBean_stringSet() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetBean.class); + SetBean setBean = new SetBean(); + setBean.setId("id-value"); + LinkedHashSet stringSet = new LinkedHashSet<>(); + stringSet.add("one"); + stringSet.add("two"); + stringSet.add("three"); + setBean.setStringSet(stringSet); + + Map itemMap = beanTableSchema.itemToMap(setBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .ss("one", "two", "three") + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("stringSet", expectedAttributeValue)); + + SetBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(setBean))); + } + + @Test + public void setBean_integerSet() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetBean.class); + SetBean setBean = new SetBean(); + setBean.setId("id-value"); + LinkedHashSet integerSet = new LinkedHashSet<>(); + integerSet.add(1); + integerSet.add(2); + integerSet.add(3); + setBean.setIntegerSet(integerSet); + + Map itemMap = beanTableSchema.itemToMap(setBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .ns("1", "2", "3") + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("integerSet", expectedAttributeValue)); + + SetBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(setBean))); + } + + @Test + public void setBean_longSet() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetBean.class); + SetBean setBean = new SetBean(); + setBean.setId("id-value"); + LinkedHashSet longSet = new LinkedHashSet<>(); + longSet.add(1L); + longSet.add(2L); + longSet.add(3L); + setBean.setLongSet(longSet); + + Map itemMap = beanTableSchema.itemToMap(setBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .ns("1", "2", "3") + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("longSet", expectedAttributeValue)); + + SetBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(setBean))); + } + + @Test + public void setBean_shortSet() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetBean.class); + SetBean setBean = new SetBean(); + setBean.setId("id-value"); + LinkedHashSet shortSet = new LinkedHashSet<>(); + shortSet.add((short)1); + shortSet.add((short)2); + shortSet.add((short)3); + setBean.setShortSet(shortSet); + + Map itemMap = beanTableSchema.itemToMap(setBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .ns("1", "2", "3") + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("shortSet", expectedAttributeValue)); + + SetBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(setBean))); + } + + @Test + public void setBean_byteSet() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetBean.class); + SetBean setBean = new SetBean(); + setBean.setId("id-value"); + LinkedHashSet byteSet = new LinkedHashSet<>(); + byteSet.add((byte)1); + byteSet.add((byte)2); + byteSet.add((byte)3); + setBean.setByteSet(byteSet); + + Map itemMap = beanTableSchema.itemToMap(setBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .ns("1", "2", "3") + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("byteSet", expectedAttributeValue)); + + SetBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(setBean))); + } + + @Test + public void setBean_doubleSet() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetBean.class); + SetBean setBean = new SetBean(); + setBean.setId("id-value"); + LinkedHashSet doubleSet = new LinkedHashSet<>(); + doubleSet.add(1.1); + doubleSet.add(2.2); + doubleSet.add(3.3); + setBean.setDoubleSet(doubleSet); + + Map itemMap = beanTableSchema.itemToMap(setBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .ns("1.1", "2.2", "3.3") + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("doubleSet", expectedAttributeValue)); + + SetBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(setBean))); + } + + @Test + public void setBean_floatSet() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetBean.class); + SetBean setBean = new SetBean(); + setBean.setId("id-value"); + LinkedHashSet floatSet = new LinkedHashSet<>(); + floatSet.add(1.1f); + floatSet.add(2.2f); + floatSet.add(3.3f); + setBean.setFloatSet(floatSet); + + Map itemMap = beanTableSchema.itemToMap(setBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .ns("1.1", "2.2", "3.3") + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("floatSet", expectedAttributeValue)); + + SetBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(setBean))); + } + + @Test + public void setBean_binarySet() { + SdkBytes buffer1 = SdkBytes.fromString("one", StandardCharsets.UTF_8); + SdkBytes buffer2 = SdkBytes.fromString("two", StandardCharsets.UTF_8); + SdkBytes buffer3 = SdkBytes.fromString("three", StandardCharsets.UTF_8); + + BeanTableSchema beanTableSchema = BeanTableSchema.create(SetBean.class); + SetBean setBean = new SetBean(); + setBean.setId("id-value"); + LinkedHashSet binarySet = new LinkedHashSet<>(); + binarySet.add(buffer1); + binarySet.add(buffer2); + binarySet.add(buffer3); + setBean.setBinarySet(binarySet); + + Map itemMap = beanTableSchema.itemToMap(setBean, true); + + AttributeValue expectedAttributeValue = AttributeValue.builder() + .bs(buffer1, buffer2, buffer3) + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("binarySet", expectedAttributeValue)); + + SetBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(setBean))); + } + + @Test + public void mapBean_stringStringMap() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(MapBean.class); + MapBean mapBean = new MapBean(); + mapBean.setId("id-value"); + + Map testMap = new HashMap<>(); + testMap.put("one", "two"); + testMap.put("three", "four"); + + mapBean.setStringMap(testMap); + + Map itemMap = beanTableSchema.itemToMap(mapBean, true); + + Map expectedMap = new HashMap<>(); + expectedMap.put("one", stringValue("two")); + expectedMap.put("three", stringValue("four")); + AttributeValue expectedMapValue = AttributeValue.builder() + .m(expectedMap) + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("stringMap", expectedMapValue)); + + MapBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(mapBean))); + } + + @Test + public void mapBean_nestedStringMap() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(MapBean.class); + MapBean mapBean = new MapBean(); + mapBean.setId("id-value"); + + Map> testMap = new HashMap<>(); + testMap.put("five", singletonMap("one", "two")); + testMap.put("six", singletonMap("three", "four")); + + mapBean.setNestedStringMap(testMap); + + Map itemMap = beanTableSchema.itemToMap(mapBean, true); + + Map expectedMap = new HashMap<>(); + expectedMap.put("five", AttributeValue.builder().m(singletonMap("one", stringValue("two"))).build()); + expectedMap.put("six", AttributeValue.builder().m(singletonMap("three", stringValue("four"))).build()); + + AttributeValue expectedMapValue = AttributeValue.builder() + .m(expectedMap) + .build(); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("nestedStringMap", expectedMapValue)); + + MapBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(mapBean))); + } + + @Test + public void commonTypesBean() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(CommonTypesBean.class); + CommonTypesBean commonTypesBean = new CommonTypesBean(); + SdkBytes binaryLiteral = SdkBytes.fromString("test-string", StandardCharsets.UTF_8); + + commonTypesBean.setId("id-value"); + commonTypesBean.setBooleanAttribute(true); + commonTypesBean.setIntegerAttribute(123); + commonTypesBean.setLongAttribute(234L); + commonTypesBean.setShortAttribute((short) 345); + commonTypesBean.setByteAttribute((byte) 45); + commonTypesBean.setDoubleAttribute(56.7); + commonTypesBean.setFloatAttribute((float) 67.8); + commonTypesBean.setBinaryAttribute(binaryLiteral); + + Map itemMap = beanTableSchema.itemToMap(commonTypesBean, true); + + assertThat(itemMap.size(), is(9)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("booleanAttribute", AttributeValue.builder().bool(true).build())); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(123))); + assertThat(itemMap, hasEntry("longAttribute", numberValue(234))); + assertThat(itemMap, hasEntry("shortAttribute", numberValue(345))); + assertThat(itemMap, hasEntry("byteAttribute", numberValue(45))); + assertThat(itemMap, hasEntry("doubleAttribute", numberValue(56.7))); + assertThat(itemMap, hasEntry("floatAttribute", numberValue(67.8))); + assertThat(itemMap, hasEntry("binaryAttribute", binaryValue(binaryLiteral))); + + CommonTypesBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(commonTypesBean))); + } + + @Test + public void primitiveTypesBean() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(PrimitiveTypesBean.class); + PrimitiveTypesBean primitiveTypesBean = new PrimitiveTypesBean(); + + primitiveTypesBean.setId("id-value"); + primitiveTypesBean.setBooleanAttribute(true); + primitiveTypesBean.setIntegerAttribute(123); + primitiveTypesBean.setLongAttribute(234L); + primitiveTypesBean.setShortAttribute((short) 345); + primitiveTypesBean.setByteAttribute((byte) 45); + primitiveTypesBean.setDoubleAttribute(56.7); + primitiveTypesBean.setFloatAttribute((float) 67.8); + + Map itemMap = beanTableSchema.itemToMap(primitiveTypesBean, true); + + assertThat(itemMap.size(), is(8)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("booleanAttribute", AttributeValue.builder().bool(true).build())); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(123))); + assertThat(itemMap, hasEntry("longAttribute", numberValue(234))); + assertThat(itemMap, hasEntry("shortAttribute", numberValue(345))); + assertThat(itemMap, hasEntry("byteAttribute", numberValue(45))); + assertThat(itemMap, hasEntry("doubleAttribute", numberValue(56.7))); + assertThat(itemMap, hasEntry("floatAttribute", numberValue(67.8))); + + PrimitiveTypesBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(primitiveTypesBean))); + } + + @Test + public void itemToMap_specificAttributes() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(CommonTypesBean.class); + CommonTypesBean commonTypesBean = new CommonTypesBean(); + + commonTypesBean.setId("id-value"); + commonTypesBean.setIntegerAttribute(123); + commonTypesBean.setLongAttribute(234L); + commonTypesBean.setFloatAttribute((float) 67.8); + + Map itemMap = + beanTableSchema.itemToMap(commonTypesBean, Arrays.asList("longAttribute", "floatAttribute")); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("longAttribute", numberValue(234))); + assertThat(itemMap, hasEntry("floatAttribute", numberValue(67.8))); + } + + @Test + public void itemType_returnsCorrectClass() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SimpleBean.class); + + assertThat(beanTableSchema.itemType(), is(equalTo(EnhancedType.of(SimpleBean.class)))); + } + + @Test + public void attributeConverterWithoutConstructor_throwsIllegalArgumentException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("default constructor"); + BeanTableSchema.create(AttributeConverterNoConstructorBean.class); + } + + @Test + public void usesCustomAttributeConverter() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(AttributeConverterBean.class); + + AttributeConverterBean.AttributeItem attributeItem = new AttributeConverterBean.AttributeItem(); + attributeItem.setInnerValue("inner-value"); + + AttributeConverterBean converterBean = new AttributeConverterBean(); + converterBean.setId("id-value"); + converterBean.setIntegerAttribute(123); + converterBean.setAttributeItem(attributeItem); + + Map itemMap = beanTableSchema.itemToMap(converterBean, false); + + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(123))); + assertThat(itemMap, hasEntry("attributeItem", stringValue("inner-value"))); + + AttributeConverterBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse, is(equalTo(converterBean))); + } + + @Test + public void converterProviderWithoutConstructor_throwsIllegalArgumentException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("default constructor"); + BeanTableSchema.create(NoConstructorConverterProvidersBean.class); + } + + @Test + public void usesCustomAttributeConverterProvider() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SingleConverterProvidersBean.class); + + SingleConverterProvidersBean converterBean = new SingleConverterProvidersBean(); + converterBean.setId("id-value"); + converterBean.setIntegerAttribute(123); + + Map itemMap = beanTableSchema.itemToMap(converterBean, false); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value-custom"))); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(133))); + + SingleConverterProvidersBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse.getId(), is(equalTo("id-value-custom"))); + assertThat(reverse.getIntegerAttribute(), is(equalTo(133))); + } + + @Test + public void usesCustomAttributeConverterProviders() { + BeanTableSchema beanTableSchema = + BeanTableSchema.create(MultipleConverterProvidersBean.class); + + MultipleConverterProvidersBean converterBean = new MultipleConverterProvidersBean(); + converterBean.setId("id-value"); + converterBean.setIntegerAttribute(123); + + Map itemMap = beanTableSchema.itemToMap(converterBean, false); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value-custom"))); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(133))); + + MultipleConverterProvidersBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse.getId(), is(equalTo("id-value-custom"))); + assertThat(reverse.getIntegerAttribute(), is(equalTo(133))); + } + + @Test + public void emptyConverterProviderList_fails_whenAttributeConvertersAreMissing() { + exception.expect(NullPointerException.class); + BeanTableSchema.create(EmptyConverterProvidersInvalidBean.class); + } + + @Test + public void emptyConverterProviderList_correct_whenAttributeConvertersAreSupplied() { + BeanTableSchema beanTableSchema = + BeanTableSchema.create(EmptyConverterProvidersValidBean.class); + + EmptyConverterProvidersValidBean converterBean = new EmptyConverterProvidersValidBean(); + converterBean.setId("id-value"); + converterBean.setIntegerAttribute(123); + + Map itemMap = beanTableSchema.itemToMap(converterBean, false); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value-custom"))); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(133))); + + EmptyConverterProvidersValidBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse.getId(), is(equalTo("id-value-custom"))); + assertThat(reverse.getIntegerAttribute(), is(equalTo(133))); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttributeTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttributeTest.java new file mode 100644 index 000000000000..fac9c11388ea --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttributeTest.java @@ -0,0 +1,232 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import java.util.Objects; +import java.util.function.BiConsumer; +import java.util.function.Function; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.ResolvedImmutableAttribute; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class ImmutableAttributeTest { + private static final Function TEST_GETTER = x -> "test-getter"; + private static final BiConsumer TEST_SETTER = (x, y) -> {}; + + @Mock + private StaticAttributeTag mockTag; + + @Mock + private StaticAttributeTag mockTag2; + + @Mock + private AttributeConverter attributeConverter; + + private static class SimpleItem { + private String aString; + + SimpleItem(String aString) { + this.aString = aString; + } + + String getAString() { + return this.aString; + } + + void setAString(String aString) { + this.aString = aString; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SimpleItem that = (SimpleItem) o; + return aString == that.aString; + } + + @Override + public int hashCode() { + return Objects.hash(aString); + } + } + + @Test + public void build_maximal() { + ImmutableAttribute immutableAttribute = + ImmutableAttribute.builder(Object.class, Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .tags(mockTag) + .attributeConverter(attributeConverter) + .build(); + + assertThat(immutableAttribute.name()).isEqualTo("test-attribute"); + assertThat(immutableAttribute.getter()).isSameAs(TEST_GETTER); + assertThat(immutableAttribute.setter()).isSameAs(TEST_SETTER); + assertThat(immutableAttribute.tags()).containsExactly(mockTag); + assertThat(immutableAttribute.type()).isEqualTo(EnhancedType.of(String.class)); + assertThat(immutableAttribute.attributeConverter()).isSameAs(attributeConverter); + } + + @Test + public void build_minimal() { + ImmutableAttribute immutableAttribute = + ImmutableAttribute.builder(Object.class, Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .build(); + + assertThat(immutableAttribute.name()).isEqualTo("test-attribute"); + assertThat(immutableAttribute.getter()).isSameAs(TEST_GETTER); + assertThat(immutableAttribute.setter()).isSameAs(TEST_SETTER); + assertThat(immutableAttribute.tags()).isEmpty(); + assertThat(immutableAttribute.type()).isEqualTo(EnhancedType.of(String.class)); + } + + @Test + public void build_missing_name() { + assertThatThrownBy(() -> ImmutableAttribute.builder(Object.class, Object.class, String.class) + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("name"); + } + + @Test + public void build_missing_getter() { + assertThatThrownBy(() -> ImmutableAttribute.builder(Object.class, Object.class, String.class) + .name("test-attribute") + .setter(TEST_SETTER) + .build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("getter"); + } + + @Test + public void build_missing_setter() { + assertThatThrownBy(() -> ImmutableAttribute.builder(Object.class, Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("setter"); + } + + @Test + public void toBuilder() { + ImmutableAttribute immutableAttribute = + ImmutableAttribute.builder(Object.class, Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .tags(mockTag, mockTag2) + .attributeConverter(attributeConverter) + .build(); + + ImmutableAttribute clonedAttribute = immutableAttribute.toBuilder().build(); + + assertThat(clonedAttribute.name()).isEqualTo("test-attribute"); + assertThat(clonedAttribute.getter()).isSameAs(TEST_GETTER); + assertThat(clonedAttribute.setter()).isSameAs(TEST_SETTER); + assertThat(clonedAttribute.tags()).containsExactly(mockTag, mockTag2); + assertThat(clonedAttribute.type()).isEqualTo(EnhancedType.of(String.class)); + assertThat(clonedAttribute.attributeConverter()).isSameAs(attributeConverter); + } + + @Test + public void build_addTag_single() { + ImmutableAttribute immutableAttribute = + ImmutableAttribute.builder(Object.class, Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .addTag(mockTag) + .build(); + + assertThat(immutableAttribute.tags()).containsExactly(mockTag); + } + + @Test + public void build_addTag_multiple() { + ImmutableAttribute immutableAttribute = + ImmutableAttribute.builder(Object.class, Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .addTag(mockTag) + .addTag(mockTag2) + .build(); + + assertThat(immutableAttribute.tags()).containsExactly(mockTag, mockTag2); + } + + @Test + public void build_addAttributeConverter() { + ImmutableAttribute immutableAttribute = + ImmutableAttribute.builder(Object.class, Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .attributeConverter(attributeConverter) + .build(); + + AttributeConverter attributeConverterR = immutableAttribute.attributeConverter(); + assertThat(attributeConverterR).isEqualTo(attributeConverter); + } + + @Test + public void resolve_uses_customConverter() { + when(attributeConverter.transformFrom(any())).thenReturn(AttributeValue.builder().s("test-string-custom").build()); + + ImmutableAttribute staticAttribute = + ImmutableAttribute.builder(SimpleItem.class, SimpleItem.class, String.class) + .name("test-attribute") + .getter(SimpleItem::getAString) + .setter(SimpleItem::setAString) + .attributeConverter(attributeConverter) + .build(); + + ResolvedImmutableAttribute resolvedAttribute = + staticAttribute.resolve(AttributeConverterProvider.defaultProvider()); + + Function attributeValueFunction = resolvedAttribute.attributeGetterMethod(); + + SimpleItem item = new SimpleItem("test-string"); + AttributeValue resultAttributeValue = attributeValueFunction.apply(item); + + assertThat(resultAttributeValue.s()).isEqualTo("test-string-custom"); + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableTableSchemaTest.java new file mode 100644 index 000000000000..57dd54c2cbdb --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableTableSchemaTest.java @@ -0,0 +1,244 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.AbstractBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.AbstractImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.DocumentImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.FlattenedBeanImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.FlattenedImmutableImmutable; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class ImmutableTableSchemaTest { + @Test + public void documentImmutable_correctlyMapsBeanAttributes() { + ImmutableTableSchema documentImmutableTableSchema = + ImmutableTableSchema.create(DocumentImmutable.class); + AbstractBean abstractBean = new AbstractBean(); + abstractBean.setAttribute2("two"); + DocumentImmutable documentImmutable = DocumentImmutable.builder().id("id-value") + .attribute1("one") + .abstractBean(abstractBean) + .build(); + + AttributeValue expectedDocument = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + + Map itemMap = documentImmutableTableSchema.itemToMap(documentImmutable, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBean", expectedDocument)); + } + + @Test + public void documentImmutable_list_correctlyMapsBeanAttributes() { + ImmutableTableSchema documentImmutableTableSchema = + ImmutableTableSchema.create(DocumentImmutable.class); + AbstractBean abstractBean1 = new AbstractBean(); + abstractBean1.setAttribute2("two"); + AbstractBean abstractBean2 = new AbstractBean(); + abstractBean2.setAttribute2("three"); + DocumentImmutable documentImmutable = + DocumentImmutable.builder() + .id("id-value") + .attribute1("one") + .abstractBeanList(Arrays.asList(abstractBean1, abstractBean2)) + .build(); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + AttributeValue expectedList = AttributeValue.builder().l(expectedDocument1, expectedDocument2).build(); + + Map itemMap = documentImmutableTableSchema.itemToMap(documentImmutable, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBeanList", expectedList)); + } + + @Test + public void documentImmutable_map_correctlyMapsBeanAttributes() { + ImmutableTableSchema documentImmutableTableSchema = + ImmutableTableSchema.create(DocumentImmutable.class); + AbstractBean abstractBean1 = new AbstractBean(); + abstractBean1.setAttribute2("two"); + AbstractBean abstractBean2 = new AbstractBean(); + abstractBean2.setAttribute2("three"); + Map abstractBeanMap = new HashMap<>(); + abstractBeanMap.put("key1", abstractBean1); + abstractBeanMap.put("key2", abstractBean2); + DocumentImmutable documentImmutable = + DocumentImmutable.builder() + .id("id-value") + .attribute1("one") + .abstractBeanMap(abstractBeanMap) + .build(); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + Map expectedAttributeValueMap = new HashMap<>(); + expectedAttributeValueMap.put("key1", expectedDocument1); + expectedAttributeValueMap.put("key2", expectedDocument2); + AttributeValue expectedMap = AttributeValue.builder().m(expectedAttributeValueMap).build(); + + Map itemMap = documentImmutableTableSchema.itemToMap(documentImmutable, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractBeanMap", expectedMap)); + } + + @Test + public void documentImmutable_correctlyMapsImmutableAttributes() { + ImmutableTableSchema documentImmutableTableSchema = + ImmutableTableSchema.create(DocumentImmutable.class); + AbstractImmutable abstractImmutable = AbstractImmutable.builder().attribute2("two").build(); + DocumentImmutable documentImmutable = DocumentImmutable.builder().id("id-value") + .attribute1("one") + .abstractImmutable(abstractImmutable) + .build(); + + AttributeValue expectedDocument = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + + Map itemMap = documentImmutableTableSchema.itemToMap(documentImmutable, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractImmutable", expectedDocument)); + } + + @Test + public void documentImmutable_list_correctlyMapsImmutableAttributes() { + ImmutableTableSchema documentImmutableTableSchema = + ImmutableTableSchema.create(DocumentImmutable.class); + AbstractImmutable abstractImmutable1 = AbstractImmutable.builder().attribute2("two").build(); + AbstractImmutable abstractImmutable2 = AbstractImmutable.builder().attribute2("three").build(); + + DocumentImmutable documentImmutable = + DocumentImmutable.builder() + .id("id-value") + .attribute1("one") + .abstractImmutableList(Arrays.asList(abstractImmutable1, abstractImmutable2)) + .build(); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + AttributeValue expectedList = AttributeValue.builder().l(expectedDocument1, expectedDocument2).build(); + + Map itemMap = documentImmutableTableSchema.itemToMap(documentImmutable, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractImmutableList", expectedList)); + } + + @Test + public void documentImmutable_map_correctlyMapsImmutableAttributes() { + ImmutableTableSchema documentImmutableTableSchema = + ImmutableTableSchema.create(DocumentImmutable.class); + AbstractImmutable abstractImmutable1 = AbstractImmutable.builder().attribute2("two").build(); + AbstractImmutable abstractImmutable2 = AbstractImmutable.builder().attribute2("three").build(); + Map abstractImmutableMap = new HashMap<>(); + abstractImmutableMap.put("key1", abstractImmutable1); + abstractImmutableMap.put("key2", abstractImmutable2); + DocumentImmutable documentImmutable = + DocumentImmutable.builder() + .id("id-value") + .attribute1("one") + .abstractImmutableMap(abstractImmutableMap) + .build(); + + AttributeValue expectedDocument1 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("two"))) + .build(); + AttributeValue expectedDocument2 = AttributeValue.builder() + .m(singletonMap("attribute2", stringValue("three"))) + .build(); + Map expectedAttributeValueMap = new HashMap<>(); + expectedAttributeValueMap.put("key1", expectedDocument1); + expectedAttributeValueMap.put("key2", expectedDocument2); + AttributeValue expectedMap = AttributeValue.builder().m(expectedAttributeValueMap).build(); + + Map itemMap = documentImmutableTableSchema.itemToMap(documentImmutable, true); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("abstractImmutableMap", expectedMap)); + } + + @Test + public void dynamoDbFlatten_correctlyFlattensBeanAttributes() { + ImmutableTableSchema tableSchema = + ImmutableTableSchema.create(FlattenedBeanImmutable.class); + AbstractBean abstractBean = new AbstractBean(); + abstractBean.setAttribute2("two"); + FlattenedBeanImmutable flattenedBeanImmutable = + new FlattenedBeanImmutable.Builder().setId("id-value") + .setAttribute1("one") + .setAbstractBean(abstractBean) + .build(); + + Map itemMap = tableSchema.itemToMap(flattenedBeanImmutable, false); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("attribute2", stringValue("two"))); + } + + @Test + public void dynamoDbFlatten_correctlyFlattensImmutableAttributes() { + ImmutableTableSchema tableSchema = + ImmutableTableSchema.create(FlattenedImmutableImmutable.class); + AbstractImmutable abstractImmutable = AbstractImmutable.builder().attribute2("two").build(); + FlattenedImmutableImmutable FlattenedImmutableImmutable = + new FlattenedImmutableImmutable.Builder().setId("id-value") + .setAttribute1("one") + .setAbstractImmutable(abstractImmutable) + .build(); + + Map itemMap = tableSchema.itemToMap(FlattenedImmutableImmutable, false); + assertThat(itemMap.size(), is(3)); + assertThat(itemMap, hasEntry("id", stringValue("id-value"))); + assertThat(itemMap, hasEntry("attribute1", stringValue("one"))); + assertThat(itemMap, hasEntry("attribute2", stringValue("two"))); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTest.java new file mode 100644 index 000000000000..2cb25f45d48d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTest.java @@ -0,0 +1,164 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.util.function.BiConsumer; +import java.util.function.Function; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +@RunWith(MockitoJUnitRunner.class) +public class StaticAttributeTest { + private static final Function TEST_GETTER = x -> "test-getter"; + private static final BiConsumer TEST_SETTER = (x, y) -> {}; + + @Mock + private StaticAttributeTag mockTag; + + @Mock + private StaticAttributeTag mockTag2; + + @Mock + private AttributeConverter attributeConverter; + + @Test + public void build_maximal() { + StaticAttribute staticAttribute = StaticAttribute.builder(Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .tags(mockTag) + .attributeConverter(attributeConverter) + .build(); + + assertThat(staticAttribute.name()).isEqualTo("test-attribute"); + assertThat(staticAttribute.getter()).isSameAs(TEST_GETTER); + assertThat(staticAttribute.setter()).isSameAs(TEST_SETTER); + assertThat(staticAttribute.tags()).containsExactly(mockTag); + assertThat(staticAttribute.type()).isEqualTo(EnhancedType.of(String.class)); + assertThat(staticAttribute.attributeConverter()).isSameAs(attributeConverter); + } + + @Test + public void build_minimal() { + StaticAttribute staticAttribute = StaticAttribute.builder(Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .build(); + + assertThat(staticAttribute.name()).isEqualTo("test-attribute"); + assertThat(staticAttribute.getter()).isSameAs(TEST_GETTER); + assertThat(staticAttribute.setter()).isSameAs(TEST_SETTER); + assertThat(staticAttribute.tags()).isEmpty(); + assertThat(staticAttribute.type()).isEqualTo(EnhancedType.of(String.class)); + } + + @Test + public void build_missing_name() { + assertThatThrownBy(() -> StaticAttribute.builder(Object.class, String.class) + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("name"); + } + + @Test + public void build_missing_getter() { + assertThatThrownBy(() -> StaticAttribute.builder(Object.class, String.class) + .name("test-attribute") + .setter(TEST_SETTER) + .build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("getter"); + } + + @Test + public void build_missing_setter() { + assertThatThrownBy(() -> StaticAttribute.builder(Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("setter"); + } + + @Test + public void toBuilder() { + StaticAttribute staticAttribute = StaticAttribute.builder(Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .tags(mockTag, mockTag2) + .attributeConverter(attributeConverter) + .build(); + + StaticAttribute clonedAttribute = staticAttribute.toBuilder().build(); + + assertThat(clonedAttribute.name()).isEqualTo("test-attribute"); + assertThat(clonedAttribute.getter()).isSameAs(TEST_GETTER); + assertThat(clonedAttribute.setter()).isSameAs(TEST_SETTER); + assertThat(clonedAttribute.tags()).containsExactly(mockTag, mockTag2); + assertThat(clonedAttribute.type()).isEqualTo(EnhancedType.of(String.class)); + assertThat(clonedAttribute.attributeConverter()).isSameAs(attributeConverter); + } + + @Test + public void build_addTag_single() { + StaticAttribute staticAttribute = StaticAttribute.builder(Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .addTag(mockTag) + .build(); + + assertThat(staticAttribute.tags()).containsExactly(mockTag); + } + + @Test + public void build_addTag_multiple() { + StaticAttribute staticAttribute = StaticAttribute.builder(Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .addTag(mockTag) + .addTag(mockTag2) + .build(); + + assertThat(staticAttribute.tags()).containsExactly(mockTag, mockTag2); + } + + @Test + public void build_addAttributeConverter() { + StaticAttribute staticAttribute = StaticAttribute.builder(Object.class, String.class) + .name("test-attribute") + .getter(TEST_GETTER) + .setter(TEST_SETTER) + .attributeConverter(attributeConverter) + .build(); + + AttributeConverter attributeConverterR = staticAttribute.attributeConverter(); + assertThat(attributeConverterR).isEqualTo(attributeConverter); + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaExtendTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaExtendTest.java new file mode 100644 index 000000000000..a540fbd8449a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaExtendTest.java @@ -0,0 +1,191 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class StaticImmutableTableSchemaExtendTest { + private static final ImmutableRecord TEST_RECORD = ImmutableRecord.builder() + .id("id123") + .attribute1("one") + .attribute2(2) + .attribute3("three") + .build(); + + private static final Map ITEM_MAP; + + static { + Map map = new HashMap<>(); + map.put("id", AttributeValue.builder().s("id123").build()); + map.put("attribute1", AttributeValue.builder().s("one").build()); + map.put("attribute2", AttributeValue.builder().n("2").build()); + map.put("attribute3", AttributeValue.builder().s("three").build()); + ITEM_MAP = Collections.unmodifiableMap(map); + } + + private final TableSchema immutableTableSchema = + TableSchema.builder(ImmutableRecord.class, ImmutableRecord.Builder.class) + .newItemBuilder(ImmutableRecord::builder, ImmutableRecord.Builder::build) + .addAttribute(String.class, a -> a.name("id") + .getter(ImmutableRecord::id) + .setter(ImmutableRecord.Builder::id) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute1") + .getter(ImmutableRecord::attribute1) + .setter(ImmutableRecord.Builder::attribute1)) + .addAttribute(int.class, a -> a.name("attribute2") + .getter(ImmutableRecord::attribute2) + .setter(ImmutableRecord.Builder::attribute2)) + .extend(TableSchema.builder(SuperRecord.class, SuperRecord.Builder.class) + .addAttribute(String.class, a -> a.name("attribute3") + .getter(SuperRecord::attribute3) + .setter(SuperRecord.Builder::attribute3)) + .build()) + .build(); + + @Test + public void itemToMap() { + Map result = immutableTableSchema.itemToMap(TEST_RECORD, false); + + assertThat(result).isEqualTo(ITEM_MAP); + } + + @Test + public void mapToItem() { + ImmutableRecord record = immutableTableSchema.mapToItem(ITEM_MAP); + + assertThat(record).isEqualTo(TEST_RECORD); + } + + public static class ImmutableRecord extends SuperRecord { + private final String id; + private final String attribute1; + private final int attribute2; + + public ImmutableRecord(Builder b) { + super(b); + this.id = b.id; + this.attribute1 = b.attribute1; + this.attribute2 = b.attribute2; + } + + public static Builder builder() { + return new Builder(); + } + + public String id() { + return id; + } + + public String attribute1() { + return attribute1; + } + + public int attribute2() { + return attribute2; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ImmutableRecord that = (ImmutableRecord) o; + + if (attribute2 != that.attribute2) return false; + if (id != null ? !id.equals(that.id) : that.id != null) return false; + return attribute1 != null ? attribute1.equals(that.attribute1) : that.attribute1 == null; + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (attribute1 != null ? attribute1.hashCode() : 0); + result = 31 * result + attribute2; + return result; + } + + public static class Builder extends SuperRecord.Builder { + private String id; + private String attribute1; + private int attribute2; + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder attribute1(String attribute1) { + this.attribute1 = attribute1; + return this; + } + + public Builder attribute2(int attribute2) { + this.attribute2 = attribute2; + return this; + } + + public ImmutableRecord build() { + return new ImmutableRecord(this); + } + } + } + + public static class SuperRecord { + private final String attribute3; + + public SuperRecord(Builder b) { + + this.attribute3 = b.attribute3; + } + + public String attribute3() { + return attribute3; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SuperRecord that = (SuperRecord) o; + + return attribute3 != null ? attribute3.equals(that.attribute3) : that.attribute3 == null; + } + + @Override + public int hashCode() { + return attribute3 != null ? attribute3.hashCode() : 0; + } + + public static class Builder> { + private String attribute3; + + public T attribute3(String attribute3) { + this.attribute3 = attribute3; + return (T) this; + } + } + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaFlattenTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaFlattenTest.java new file mode 100644 index 000000000000..fde9d5db1259 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaFlattenTest.java @@ -0,0 +1,275 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class StaticImmutableTableSchemaFlattenTest { + private static final ImmutableRecord TEST_RECORD = + ImmutableRecord.builder() + .id("id123") + .attribute1("1") + .child1( + ImmutableRecord.builder() + .attribute1("2a") + .child1( + ImmutableRecord.builder() + .attribute1("3a") + .build() + ) + .child2( + ImmutableRecord.builder() + .attribute1("3b") + .build() + ) + .build() + ) + .child2( + ImmutableRecord.builder() + .attribute1("2b") + .child1( + ImmutableRecord.builder() + .attribute1("4a") + .build() + ) + .child2( + ImmutableRecord.builder() + .attribute1("4b") + .build() + ) + .build() + ) + .build(); + + private static final Map ITEM_MAP; + + static { + Map map = new HashMap<>(); + map.put("id", AttributeValue.builder().s("id123").build()); + map.put("attribute1", AttributeValue.builder().s("1").build()); + map.put("attribute2a", AttributeValue.builder().s("2a").build()); + map.put("attribute2b", AttributeValue.builder().s("2b").build()); + map.put("attribute3a", AttributeValue.builder().s("3a").build()); + map.put("attribute3b", AttributeValue.builder().s("3b").build()); + map.put("attribute4a", AttributeValue.builder().s("4a").build()); + map.put("attribute4b", AttributeValue.builder().s("4b").build()); + + ITEM_MAP = Collections.unmodifiableMap(map); + } + + private final TableSchema childTableSchema4a = + TableSchema.builder(ImmutableRecord.class, ImmutableRecord.Builder.class) + .newItemBuilder(ImmutableRecord::builder, ImmutableRecord.Builder::build) + .addAttribute(String.class, a -> a.name("attribute4a") + .getter(ImmutableRecord::attribute1) + .setter(ImmutableRecord.Builder::attribute1)) + .build(); + + private final TableSchema childTableSchema4b = + TableSchema.builder(ImmutableRecord.class, ImmutableRecord.Builder.class) + .newItemBuilder(ImmutableRecord::builder, ImmutableRecord.Builder::build) + .addAttribute(String.class, a -> a.name("attribute4b") + .getter(ImmutableRecord::attribute1) + .setter(ImmutableRecord.Builder::attribute1)) + .build(); + + private final TableSchema childTableSchema3a = + TableSchema.builder(ImmutableRecord.class, ImmutableRecord.Builder.class) + .newItemBuilder(ImmutableRecord::builder, ImmutableRecord.Builder::build) + .addAttribute(String.class, a -> a.name("attribute3a") + .getter(ImmutableRecord::attribute1) + .setter(ImmutableRecord.Builder::attribute1)) + .build(); + + private final TableSchema childTableSchema3b = + TableSchema.builder(ImmutableRecord.class, ImmutableRecord.Builder.class) + .newItemBuilder(ImmutableRecord::builder, ImmutableRecord.Builder::build) + .addAttribute(String.class, a -> a.name("attribute3b") + .getter(ImmutableRecord::attribute1) + .setter(ImmutableRecord.Builder::attribute1)) + .build(); + + private final TableSchema childTableSchema2a = + TableSchema.builder(ImmutableRecord.class, ImmutableRecord.Builder.class) + .newItemBuilder(ImmutableRecord::builder, ImmutableRecord.Builder::build) + .addAttribute(String.class, a -> a.name("attribute2a") + .getter(ImmutableRecord::attribute1) + .setter(ImmutableRecord.Builder::attribute1)) + .flatten(childTableSchema3a, ImmutableRecord::getChild1, ImmutableRecord.Builder::child1) + .flatten(childTableSchema3b, ImmutableRecord::getChild2, ImmutableRecord.Builder::child2) + .build(); + + private final TableSchema childTableSchema2b = + TableSchema.builder(ImmutableRecord.class, ImmutableRecord.Builder.class) + .newItemBuilder(ImmutableRecord::builder, ImmutableRecord.Builder::build) + .addAttribute(String.class, a -> a.name("attribute2b") + .getter(ImmutableRecord::attribute1) + .setter(ImmutableRecord.Builder::attribute1)) + .flatten(childTableSchema4a, ImmutableRecord::getChild1, ImmutableRecord.Builder::child1) + .flatten(childTableSchema4b, ImmutableRecord::getChild2, ImmutableRecord.Builder::child2) + .build(); + + private final TableSchema immutableTableSchema = + TableSchema.builder(ImmutableRecord.class, ImmutableRecord.Builder.class) + .newItemBuilder(ImmutableRecord::builder, ImmutableRecord.Builder::build) + .addAttribute(String.class, a -> a.name("id") + .getter(ImmutableRecord::id) + .setter(ImmutableRecord.Builder::id) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("attribute1") + .getter(ImmutableRecord::attribute1) + .setter(ImmutableRecord.Builder::attribute1)) + .flatten(childTableSchema2a, ImmutableRecord::getChild1, ImmutableRecord.Builder::child1) + .flatten(childTableSchema2b, ImmutableRecord::getChild2, ImmutableRecord.Builder::child2) + .build(); + + @Test + public void itemToMap_completeRecord() { + Map result = immutableTableSchema.itemToMap(TEST_RECORD, false); + + assertThat(result).isEqualTo(ITEM_MAP); + } + + @Test + public void itemToMap_specificAttributes() { + Map result = + immutableTableSchema.itemToMap(TEST_RECORD, Arrays.asList("attribute1", "attribute2a", "attribute4b")); + + Map expectedResult = new HashMap<>(); + expectedResult.put("attribute1", AttributeValue.builder().s("1").build()); + expectedResult.put("attribute2a", AttributeValue.builder().s("2a").build()); + expectedResult.put("attribute4b", AttributeValue.builder().s("4b").build()); + + assertThat(result).isEqualTo(expectedResult); + } + + @Test + public void itemToMap_specificAttribute() { + AttributeValue result = immutableTableSchema.attributeValue(TEST_RECORD, "attribute4b"); + assertThat(result).isEqualTo(AttributeValue.builder().s("4b").build()); + } + + @Test + public void mapToItem() { + ImmutableRecord record = immutableTableSchema.mapToItem(ITEM_MAP); + + assertThat(record).isEqualTo(TEST_RECORD); + } + + @Test + public void attributeNames() { + Collection result = immutableTableSchema.attributeNames(); + + assertThat(result).containsExactlyInAnyOrder(ITEM_MAP.keySet().toArray(new String[]{})); + } + + public static class ImmutableRecord { + private final String id; + private final String attribute1; + private final ImmutableRecord child1; + private final ImmutableRecord child2; + + private ImmutableRecord(Builder b) { + this.id = b.id; + this.attribute1 = b.attribute1; + this.child1 = b.child1; + this.child2 = b.child2; + } + + public static Builder builder() { + return new Builder(); + } + + public String id() { + return id; + } + + public String attribute1() { + return attribute1; + } + + public ImmutableRecord getChild1() { + return child1; + } + + public ImmutableRecord getChild2() { + return child2; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ImmutableRecord that = (ImmutableRecord) o; + + if (id != null ? !id.equals(that.id) : that.id != null) return false; + if (attribute1 != null ? !attribute1.equals(that.attribute1) : that.attribute1 != null) return false; + if (child1 != null ? !child1.equals(that.child1) : that.child1 != null) return false; + return child2 != null ? child2.equals(that.child2) : that.child2 == null; + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (attribute1 != null ? attribute1.hashCode() : 0); + result = 31 * result + (child1 != null ? child1.hashCode() : 0); + result = 31 * result + (child2 != null ? child2.hashCode() : 0); + return result; + } + + public static class Builder { + private String id; + private String attribute1; + private ImmutableRecord child1; + private ImmutableRecord child2; + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder attribute1(String attribute1) { + this.attribute1 = attribute1; + return this; + } + + public Builder child1(ImmutableRecord child1) { + this.child1 = child1; + return this; + } + + public Builder child2(ImmutableRecord child2) { + this.child2 = child2; + return this; + } + + public ImmutableRecord build() { + return new ImmutableRecord(this); + } + } + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java new file mode 100644 index 000000000000..177384e358bb --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java @@ -0,0 +1,1514 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.nullAttributeValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class StaticImmutableTableSchemaTest { + private static final String TABLE_TAG_KEY = "table-tag-key"; + private static final String TABLE_TAG_VALUE = "table-tag-value"; + private static final AttributeValue ATTRIBUTE_VALUE_B = AttributeValue.builder().bool(true).build(); + private static final AttributeValue ATTRIBUTE_VALUE_S = AttributeValue.builder().s("test-string").build(); + + private static final StaticTableSchema FAKE_DOCUMENT_TABLE_SCHEMA = + StaticTableSchema.builder(FakeDocument.class) + .newItemSupplier(FakeDocument::new) + .addAttribute(String.class, a -> a.name("documentString") + .getter(FakeDocument::getDocumentString) + .setter(FakeDocument::setDocumentString)) + .addAttribute(Integer.class, a -> a.name("documentInteger") + .getter(FakeDocument::getDocumentInteger) + .setter(FakeDocument::setDocumentInteger)) + .build(); + + private static final FakeMappedItem FAKE_ITEM = FakeMappedItem.builder() + .aPrimitiveBoolean(true) + .aBoolean(true) + .aString("test-string") + .build(); + + private static class FakeMappedItem { + private boolean aPrimitiveBoolean; + private Boolean aBoolean; + private String aString; + private Integer anInteger; + private int aPrimitiveInteger; + private Byte aByte; + private byte aPrimitiveByte; + private Long aLong; + private long aPrimitiveLong; + private Short aShort; + private short aPrimitiveShort; + private Double aDouble; + private double aPrimitiveDouble; + private Float aFloat; + private float aPrimitiveFloat; + private BigDecimal aBigDecimal; + private SdkBytes aBinaryValue; + private FakeDocument aFakeDocument; + private Set aStringSet; + private Set anIntegerSet; + private Set aByteSet; + private Set aLongSet; + private Set aShortSet; + private Set aDoubleSet; + private Set aFloatSet; + private Set aBinarySet; + private List anIntegerList; + private List> aNestedStructure; + private Map aStringMap; + private Map aIntDoubleMap; + private TestEnum testEnum; + + FakeMappedItem() { + } + + FakeMappedItem(boolean aPrimitiveBoolean, Boolean aBoolean, String aString, Integer anInteger, + int aPrimitiveInteger, Byte aByte, byte aPrimitiveByte, Long aLong, long aPrimitiveLong, + Short aShort, short aPrimitiveShort, Double aDouble, double aPrimitiveDouble, Float aFloat, + float aPrimitiveFloat, BigDecimal aBigDecimal, SdkBytes aBinaryValue, FakeDocument aFakeDocument, + Set aStringSet, Set anIntegerSet, Set aByteSet, + Set aLongSet, Set aShortSet, Set aDoubleSet, Set aFloatSet, + Set aBinarySet, List anIntegerList, + List> aNestedStructure, Map aStringMap, + Map aIntDoubleMap, TestEnum testEnum) { + this.aPrimitiveBoolean = aPrimitiveBoolean; + this.aBoolean = aBoolean; + this.aString = aString; + this.anInteger = anInteger; + this.aPrimitiveInteger = aPrimitiveInteger; + this.aByte = aByte; + this.aPrimitiveByte = aPrimitiveByte; + this.aLong = aLong; + this.aPrimitiveLong = aPrimitiveLong; + this.aShort = aShort; + this.aPrimitiveShort = aPrimitiveShort; + this.aDouble = aDouble; + this.aPrimitiveDouble = aPrimitiveDouble; + this.aFloat = aFloat; + this.aPrimitiveFloat = aPrimitiveFloat; + this.aBigDecimal = aBigDecimal; + this.aBinaryValue = aBinaryValue; + this.aFakeDocument = aFakeDocument; + this.aStringSet = aStringSet; + this.anIntegerSet = anIntegerSet; + this.aByteSet = aByteSet; + this.aLongSet = aLongSet; + this.aShortSet = aShortSet; + this.aDoubleSet = aDoubleSet; + this.aFloatSet = aFloatSet; + this.aBinarySet = aBinarySet; + this.anIntegerList = anIntegerList; + this.aNestedStructure = aNestedStructure; + this.aStringMap = aStringMap; + this.aIntDoubleMap = aIntDoubleMap; + this.testEnum = testEnum; + } + + public static Builder builder() { + return new Builder(); + } + + boolean isAPrimitiveBoolean() { + return aPrimitiveBoolean; + } + + void setAPrimitiveBoolean(boolean aPrimitiveBoolean) { + this.aPrimitiveBoolean = aPrimitiveBoolean; + } + + Boolean getABoolean() { + return aBoolean; + } + + void setABoolean(Boolean aBoolean) { + this.aBoolean = aBoolean; + } + + String getAString() { + return aString; + } + + void setAString(String aString) { + this.aString = aString; + } + + Integer getAnInteger() { + return anInteger; + } + + void setAnInteger(Integer anInteger) { + this.anInteger = anInteger; + } + + int getAPrimitiveInteger() { + return aPrimitiveInteger; + } + + void setAPrimitiveInteger(int aPrimitiveInteger) { + this.aPrimitiveInteger = aPrimitiveInteger; + } + + Byte getAByte() { + return aByte; + } + + void setAByte(Byte aByte) { + this.aByte = aByte; + } + + byte getAPrimitiveByte() { + return aPrimitiveByte; + } + + void setAPrimitiveByte(byte aPrimitiveByte) { + this.aPrimitiveByte = aPrimitiveByte; + } + + Long getALong() { + return aLong; + } + + void setALong(Long aLong) { + this.aLong = aLong; + } + + long getAPrimitiveLong() { + return aPrimitiveLong; + } + + void setAPrimitiveLong(long aPrimitiveLong) { + this.aPrimitiveLong = aPrimitiveLong; + } + + Short getAShort() { + return aShort; + } + + void setAShort(Short aShort) { + this.aShort = aShort; + } + + short getAPrimitiveShort() { + return aPrimitiveShort; + } + + void setAPrimitiveShort(short aPrimitiveShort) { + this.aPrimitiveShort = aPrimitiveShort; + } + + Double getADouble() { + return aDouble; + } + + void setADouble(Double aDouble) { + this.aDouble = aDouble; + } + + double getAPrimitiveDouble() { + return aPrimitiveDouble; + } + + void setAPrimitiveDouble(double aPrimitiveDouble) { + this.aPrimitiveDouble = aPrimitiveDouble; + } + + Float getAFloat() { + return aFloat; + } + + void setAFloat(Float aFloat) { + this.aFloat = aFloat; + } + + BigDecimal aBigDecimal() { + return aBigDecimal; + } + + void setABigDecimal(BigDecimal aBigDecimal) { + this.aBigDecimal = aBigDecimal; + } + + float getAPrimitiveFloat() { + return aPrimitiveFloat; + } + + void setAPrimitiveFloat(float aPrimitiveFloat) { + this.aPrimitiveFloat = aPrimitiveFloat; + } + + SdkBytes getABinaryValue() { + return aBinaryValue; + } + + void setABinaryValue(SdkBytes aBinaryValue) { + this.aBinaryValue = aBinaryValue; + } + + FakeDocument getAFakeDocument() { + return aFakeDocument; + } + + void setAFakeDocument(FakeDocument aFakeDocument) { + this.aFakeDocument = aFakeDocument; + } + + Set getAStringSet() { + return aStringSet; + } + + void setAStringSet(Set aStringSet) { + this.aStringSet = aStringSet; + } + + Set getAnIntegerSet() { + return anIntegerSet; + } + + void setAnIntegerSet(Set anIntegerSet) { + this.anIntegerSet = anIntegerSet; + } + + Set getAByteSet() { + return aByteSet; + } + + void setAByteSet(Set aByteSet) { + this.aByteSet = aByteSet; + } + + Set getALongSet() { + return aLongSet; + } + + void setALongSet(Set aLongSet) { + this.aLongSet = aLongSet; + } + + Set getAShortSet() { + return aShortSet; + } + + void setAShortSet(Set aShortSet) { + this.aShortSet = aShortSet; + } + + Set getADoubleSet() { + return aDoubleSet; + } + + void setADoubleSet(Set aDoubleSet) { + this.aDoubleSet = aDoubleSet; + } + + Set getAFloatSet() { + return aFloatSet; + } + + void setAFloatSet(Set aFloatSet) { + this.aFloatSet = aFloatSet; + } + + Set getABinarySet() { + return aBinarySet; + } + + void setABinarySet(Set aBinarySet) { + this.aBinarySet = aBinarySet; + } + + List getAnIntegerList() { + return anIntegerList; + } + + void setAnIntegerList(List anIntegerList) { + this.anIntegerList = anIntegerList; + } + + List> getANestedStructure() { + return aNestedStructure; + } + + void setANestedStructure(List> aNestedStructure) { + this.aNestedStructure = aNestedStructure; + } + + Map getAStringMap() { + return aStringMap; + } + + void setAStringMap(Map aStringMap) { + this.aStringMap = aStringMap; + } + + Map getAIntDoubleMap() { + return aIntDoubleMap; + } + + void setAIntDoubleMap(Map aIntDoubleMap) { + this.aIntDoubleMap = aIntDoubleMap; + } + + TestEnum getTestEnum() { + return testEnum; + } + + void setTestEnum(TestEnum testEnum) { + this.testEnum = testEnum; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FakeMappedItem that = (FakeMappedItem) o; + return aPrimitiveBoolean == that.aPrimitiveBoolean && + aPrimitiveInteger == that.aPrimitiveInteger && + aPrimitiveByte == that.aPrimitiveByte && + aPrimitiveLong == that.aPrimitiveLong && + aPrimitiveShort == that.aPrimitiveShort && + Double.compare(that.aPrimitiveDouble, aPrimitiveDouble) == 0 && + Float.compare(that.aPrimitiveFloat, aPrimitiveFloat) == 0 && + Objects.equals(aBoolean, that.aBoolean) && + Objects.equals(aString, that.aString) && + Objects.equals(anInteger, that.anInteger) && + Objects.equals(aByte, that.aByte) && + Objects.equals(aLong, that.aLong) && + Objects.equals(aShort, that.aShort) && + Objects.equals(aDouble, that.aDouble) && + Objects.equals(aFloat, that.aFloat) && + Objects.equals(aBinaryValue, that.aBinaryValue) && + Objects.equals(aFakeDocument, that.aFakeDocument) && + Objects.equals(aStringSet, that.aStringSet) && + Objects.equals(anIntegerSet, that.anIntegerSet) && + Objects.equals(aByteSet, that.aByteSet) && + Objects.equals(aLongSet, that.aLongSet) && + Objects.equals(aShortSet, that.aShortSet) && + Objects.equals(aDoubleSet, that.aDoubleSet) && + Objects.equals(aFloatSet, that.aFloatSet) && + Objects.equals(aBinarySet, that.aBinarySet) && + Objects.equals(anIntegerList, that.anIntegerList) && + Objects.equals(aNestedStructure, that.aNestedStructure) && + Objects.equals(aStringMap, that.aStringMap) && + Objects.equals(aIntDoubleMap, that.aIntDoubleMap) && + Objects.equals(testEnum, that.testEnum); + } + + @Override + public int hashCode() { + return Objects.hash(aPrimitiveBoolean, aBoolean, aString, anInteger, aPrimitiveInteger, aByte, + aPrimitiveByte, aLong, aPrimitiveLong, aShort, aPrimitiveShort, aDouble, + aPrimitiveDouble, aFloat, aPrimitiveFloat, aBinaryValue, aFakeDocument, aStringSet, + anIntegerSet, aByteSet, aLongSet, aShortSet, aDoubleSet, aFloatSet, aBinarySet, + anIntegerList, aNestedStructure, aStringMap, aIntDoubleMap, testEnum); + } + + public enum TestEnum { + ONE, + TWO, + THREE; + } + + private static class Builder { + private boolean aPrimitiveBoolean; + private Boolean aBoolean; + private String aString; + private Integer anInteger; + private int aPrimitiveInteger; + private Byte aByte; + private byte aPrimitiveByte; + private Long aLong; + private long aPrimitiveLong; + private Short aShort; + private short aPrimitiveShort; + private Double aDouble; + private double aPrimitiveDouble; + private Float aFloat; + private float aPrimitiveFloat; + private BigDecimal aBigDecimal; + private SdkBytes aBinaryValue; + private FakeDocument aFakeDocument; + private Set aStringSet; + private Set anIntegerSet; + private Set aByteSet; + private Set aLongSet; + private Set aShortSet; + private Set aDoubleSet; + private Set aFloatSet; + private Set aBinarySet; + private List anIntegerList; + private List> aNestedStructure; + private Map aStringMap; + private Map aIntDoubleMap; + private TestEnum testEnum; + + Builder aPrimitiveBoolean(boolean aPrimitiveBoolean) { + this.aPrimitiveBoolean = aPrimitiveBoolean; + return this; + } + + Builder aBoolean(Boolean aBoolean) { + this.aBoolean = aBoolean; + return this; + } + + Builder aString(String aString) { + this.aString = aString; + return this; + } + + Builder anInteger(Integer anInteger) { + this.anInteger = anInteger; + return this; + } + + Builder aPrimitiveInteger(int aPrimitiveInteger) { + this.aPrimitiveInteger = aPrimitiveInteger; + return this; + } + + Builder aByte(Byte aByte) { + this.aByte = aByte; + return this; + } + + Builder aPrimitiveByte(byte aPrimitiveByte) { + this.aPrimitiveByte = aPrimitiveByte; + return this; + } + + Builder aLong(Long aLong) { + this.aLong = aLong; + return this; + } + + Builder aPrimitiveLong(long aPrimitiveLong) { + this.aPrimitiveLong = aPrimitiveLong; + return this; + } + + Builder aShort(Short aShort) { + this.aShort = aShort; + return this; + } + + Builder aPrimitiveShort(short aPrimitiveShort) { + this.aPrimitiveShort = aPrimitiveShort; + return this; + } + + Builder aDouble(Double aDouble) { + this.aDouble = aDouble; + return this; + } + + Builder aPrimitiveDouble(double aPrimitiveDouble) { + this.aPrimitiveDouble = aPrimitiveDouble; + return this; + } + + Builder aFloat(Float aFloat) { + this.aFloat = aFloat; + return this; + } + + Builder aPrimitiveFloat(float aPrimitiveFloat) { + this.aPrimitiveFloat = aPrimitiveFloat; + return this; + } + + Builder aBigDecimal(BigDecimal aBigDecimal) { + this.aBigDecimal = aBigDecimal; + return this; + } + + Builder aBinaryValue(SdkBytes aBinaryValue) { + this.aBinaryValue = aBinaryValue; + return this; + } + + Builder aFakeDocument(FakeDocument aFakeDocument) { + this.aFakeDocument = aFakeDocument; + return this; + } + + Builder aStringSet(Set aStringSet) { + this.aStringSet = aStringSet; + return this; + } + + Builder anIntegerSet(Set anIntegerSet) { + this.anIntegerSet = anIntegerSet; + return this; + } + + Builder aByteSet(Set aByteSet) { + this.aByteSet = aByteSet; + return this; + } + + Builder aLongSet(Set aLongSet) { + this.aLongSet = aLongSet; + return this; + } + + Builder aShortSet(Set aShortSet) { + this.aShortSet = aShortSet; + return this; + } + + Builder aDoubleSet(Set aDoubleSet) { + this.aDoubleSet = aDoubleSet; + return this; + } + + Builder aFloatSet(Set aFloatSet) { + this.aFloatSet = aFloatSet; + return this; + } + + Builder aBinarySet(Set aBinarySet) { + this.aBinarySet = aBinarySet; + return this; + } + + Builder anIntegerList(List anIntegerList) { + this.anIntegerList = anIntegerList; + return this; + } + + Builder aNestedStructure(List> aNestedStructure) { + this.aNestedStructure = aNestedStructure; + return this; + } + + Builder aStringMap(Map aStringMap) { + this.aStringMap = aStringMap; + return this; + } + + Builder aIntDoubleMap(Map aIntDoubleMap) { + this.aIntDoubleMap = aIntDoubleMap; + return this; + } + + Builder testEnum(TestEnum testEnum) { + this.testEnum = testEnum; + return this; + } + + public StaticImmutableTableSchemaTest.FakeMappedItem build() { + return new StaticImmutableTableSchemaTest.FakeMappedItem(aPrimitiveBoolean, aBoolean, aString, anInteger, aPrimitiveInteger, aByte, + aPrimitiveByte, aLong, aPrimitiveLong, aShort, aPrimitiveShort, aDouble, + aPrimitiveDouble, aFloat, aPrimitiveFloat, aBigDecimal, aBinaryValue, aFakeDocument, + aStringSet, anIntegerSet, aByteSet, aLongSet, aShortSet, aDoubleSet, + aFloatSet, aBinarySet, anIntegerList, aNestedStructure, aStringMap, aIntDoubleMap, + testEnum); + } + } + } + + private static class FakeDocument { + private String documentString; + private Integer documentInteger; + + FakeDocument() { + } + + private FakeDocument(String documentString, Integer documentInteger) { + this.documentString = documentString; + this.documentInteger = documentInteger; + } + + private static FakeDocument of(String documentString, Integer documentInteger) { + return new FakeDocument(documentString, documentInteger); + } + + String getDocumentString() { + return documentString; + } + + void setDocumentString(String documentString) { + this.documentString = documentString; + } + + Integer getDocumentInteger() { + return documentInteger; + } + + void setDocumentInteger(Integer documentInteger) { + this.documentInteger = documentInteger; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FakeDocument that = (FakeDocument) o; + return Objects.equals(documentString, that.documentString) && + Objects.equals(documentInteger, that.documentInteger); + } + + @Override + public int hashCode() { + return Objects.hash(documentString, documentInteger); + } + } + + private static class FakeAbstractSubclass extends FakeAbstractSuperclass { + + } + + private static class FakeBrokenClass { + FakeAbstractSuperclass abstractObject; + + FakeAbstractSuperclass getAbstractObject() { + return abstractObject; + } + + void setAbstractObject(FakeAbstractSuperclass abstractObject) { + this.abstractObject = abstractObject; + } + } + + private static abstract class FakeAbstractSuperclass { + private String aString; + + String getAString() { + return aString; + } + + void setAString(String aString) { + this.aString = aString; + } + } + + private static final Collection> ATTRIBUTES = Arrays.asList( + StaticAttribute.builder(FakeMappedItem.class, Boolean.class) + .name("a_primitive_boolean") + .getter(FakeMappedItem::isAPrimitiveBoolean) + .setter(FakeMappedItem::setAPrimitiveBoolean) + .build(), + StaticAttribute.builder(FakeMappedItem.class, Boolean.class) + .name("a_boolean") + .getter(FakeMappedItem::getABoolean) + .setter(FakeMappedItem::setABoolean) + .build(), + StaticAttribute.builder(FakeMappedItem.class, String.class) + .name("a_string") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString) + .build() + ); + + private StaticTableSchema createSimpleTableSchema() { + return StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .attributes(ATTRIBUTES) + .build(); + } + + private static class TestStaticTableTag implements StaticTableTag { + @Override + public Consumer modifyMetadata() { + return metadata -> metadata.addCustomMetadataObject(TABLE_TAG_KEY, TABLE_TAG_VALUE); + } + } + + @Mock + private AttributeConverterProvider provider1; + + @Mock + private AttributeConverterProvider provider2; + + @Mock + private AttributeConverter attributeConverter1; + + @Mock + private AttributeConverter attributeConverter2; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void itemType_returnsCorrectClass() { + assertThat(FakeItem.getTableSchema().itemType(), is(equalTo(EnhancedType.of(FakeItem.class)))); + } + + @Test + public void getTableMetadata_hasCorrectFields() { + TableMetadata tableMetadata = FakeItemWithSort.getTableSchema().tableMetadata(); + + assertThat(tableMetadata.primaryPartitionKey(), is("id")); + assertThat(tableMetadata.primarySortKey(), is(Optional.of("sort"))); + } + + @Test + public void itemToMap_returnsCorrectMapWithMultipleAttributes() { + Map attributeMap = createSimpleTableSchema().itemToMap(FAKE_ITEM, false); + + assertThat(attributeMap.size(), is(3)); + assertThat(attributeMap, hasEntry("a_boolean", ATTRIBUTE_VALUE_B)); + assertThat(attributeMap, hasEntry("a_primitive_boolean", ATTRIBUTE_VALUE_B)); + assertThat(attributeMap, hasEntry("a_string", ATTRIBUTE_VALUE_S)); + } + + @Test + public void itemToMap_omitsNullAttributes() { + FakeMappedItem fakeMappedItemWithNulls = FakeMappedItem.builder().aPrimitiveBoolean(true).build(); + Map attributeMap = createSimpleTableSchema().itemToMap(fakeMappedItemWithNulls, true); + + assertThat(attributeMap.size(), is(1)); + assertThat(attributeMap, hasEntry("a_primitive_boolean", ATTRIBUTE_VALUE_B)); + } + + @Test + public void itemToMap_filtersAttributes() { + Map attributeMap = createSimpleTableSchema() + .itemToMap(FAKE_ITEM, asList("a_boolean", "a_string")); + + assertThat(attributeMap.size(), is(2)); + assertThat(attributeMap, hasEntry("a_boolean", ATTRIBUTE_VALUE_B)); + assertThat(attributeMap, hasEntry("a_string", ATTRIBUTE_VALUE_S)); + } + + @Test(expected = IllegalArgumentException.class) + public void itemToMap_attributeNotFound_throwsIllegalArgumentException() { + createSimpleTableSchema().itemToMap(FAKE_ITEM, singletonList("unknown_key")); + } + + @Test + public void mapToItem_returnsCorrectItemWithMultipleAttributes() { + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("a_boolean", ATTRIBUTE_VALUE_B); + attributeValueMap.put("a_primitive_boolean", ATTRIBUTE_VALUE_B); + attributeValueMap.put("a_string", ATTRIBUTE_VALUE_S); + + FakeMappedItem fakeMappedItem = + createSimpleTableSchema().mapToItem(Collections.unmodifiableMap(attributeValueMap)); + + assertThat(fakeMappedItem, is(FAKE_ITEM)); + } + + @Test + public void mapToItem_unknownAttributes_doNotCauseErrors() { + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("unknown_attribute", ATTRIBUTE_VALUE_S); + + createSimpleTableSchema().mapToItem(Collections.unmodifiableMap(attributeValueMap)); + } + + @Test(expected = IllegalArgumentException.class) + public void mapToItem_attributesWrongType_throwsException() { + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("a_boolean", ATTRIBUTE_VALUE_S); + attributeValueMap.put("a_primitive_boolean", ATTRIBUTE_VALUE_S); + attributeValueMap.put("a_string", ATTRIBUTE_VALUE_B); + + createSimpleTableSchema().mapToItem(Collections.unmodifiableMap(attributeValueMap)); + } + + @Test + public void mapperCanHandleEnum() { + verifyNullableAttribute(EnhancedType.of(FakeMappedItem.TestEnum.class), + a -> a.name("value") + .getter(FakeMappedItem::getTestEnum) + .setter(FakeMappedItem::setTestEnum), + FakeMappedItem.builder().testEnum(FakeMappedItem.TestEnum.ONE).build(), + AttributeValue.builder().s("ONE").build()); + } + + @Test + public void mapperCanHandleDocument() { + FakeDocument fakeDocument = FakeDocument.of("test-123", 123); + + Map expectedMap = new HashMap<>(); + expectedMap.put("documentInteger", AttributeValue.builder().n("123").build()); + expectedMap.put("documentString", AttributeValue.builder().s("test-123").build()); + + verifyNullableAttribute(EnhancedType.documentOf(FakeDocument.class, FAKE_DOCUMENT_TABLE_SCHEMA), + a -> a.name("value") + .getter(FakeMappedItem::getAFakeDocument) + .setter(FakeMappedItem::setAFakeDocument), + FakeMappedItem.builder().aFakeDocument(fakeDocument).build(), + AttributeValue.builder().m(expectedMap).build()); + } + + @Test + public void mapperCanHandleDocumentWithNullValues() { + verifyNullAttribute(EnhancedType.documentOf(FakeDocument.class, FAKE_DOCUMENT_TABLE_SCHEMA), + a -> a.name("value") + .getter(FakeMappedItem::getAFakeDocument) + .setter(FakeMappedItem::setAFakeDocument), + FakeMappedItem.builder().build()); + } + + @Test + public void mapperCanHandleInteger() { + verifyNullableAttribute(EnhancedType.of(Integer.class), a -> a.name("value") + .getter(FakeMappedItem::getAnInteger) + .setter(FakeMappedItem::setAnInteger), + FakeMappedItem.builder().anInteger(123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandlePrimitiveInteger() { + verifyAttribute(EnhancedType.of(int.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveInteger) + .setter(FakeMappedItem::setAPrimitiveInteger), + FakeMappedItem.builder().aPrimitiveInteger(123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandleBoolean() { + verifyNullableAttribute(EnhancedType.of(Boolean.class), + a -> a.name("value") + .getter(FakeMappedItem::getABoolean) + .setter(FakeMappedItem::setABoolean), + FakeMappedItem.builder().aBoolean(true).build(), + AttributeValue.builder().bool(true).build()); + } + + @Test + public void mapperCanHandlePrimitiveBoolean() { + verifyAttribute(EnhancedType.of(boolean.class), + a -> a.name("value") + .getter(FakeMappedItem::isAPrimitiveBoolean) + .setter(FakeMappedItem::setAPrimitiveBoolean), + FakeMappedItem.builder().aPrimitiveBoolean(true).build(), + AttributeValue.builder().bool(true).build()); + } + + @Test + public void mapperCanHandleString() { + verifyNullableAttribute(EnhancedType.of(String.class), + a -> a.name("value") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString), + FakeMappedItem.builder().aString("onetwothree").build(), + AttributeValue.builder().s("onetwothree").build()); + } + + @Test + public void mapperCanHandleLong() { + verifyNullableAttribute(EnhancedType.of(Long.class), + a -> a.name("value") + .getter(FakeMappedItem::getALong) + .setter(FakeMappedItem::setALong), + FakeMappedItem.builder().aLong(123L).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandlePrimitiveLong() { + verifyAttribute(EnhancedType.of(long.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveLong) + .setter(FakeMappedItem::setAPrimitiveLong), + FakeMappedItem.builder().aPrimitiveLong(123L).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandleShort() { + verifyNullableAttribute(EnhancedType.of(Short.class), + a -> a.name("value") + .getter(FakeMappedItem::getAShort) + .setter(FakeMappedItem::setAShort), + FakeMappedItem.builder().aShort((short)123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandlePrimitiveShort() { + verifyAttribute(EnhancedType.of(short.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveShort) + .setter(FakeMappedItem::setAPrimitiveShort), + FakeMappedItem.builder().aPrimitiveShort((short)123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandleByte() { + verifyNullableAttribute(EnhancedType.of(Byte.class), + a -> a.name("value") + .getter(FakeMappedItem::getAByte) + .setter(FakeMappedItem::setAByte), + FakeMappedItem.builder().aByte((byte)123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandlePrimitiveByte() { + verifyAttribute(EnhancedType.of(byte.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveByte) + .setter(FakeMappedItem::setAPrimitiveByte), + FakeMappedItem.builder().aPrimitiveByte((byte)123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandleDouble() { + verifyNullableAttribute(EnhancedType.of(Double.class), + a -> a.name("value") + .getter(FakeMappedItem::getADouble) + .setter(FakeMappedItem::setADouble), + FakeMappedItem.builder().aDouble(1.23).build(), + AttributeValue.builder().n("1.23").build()); + } + + @Test + public void mapperCanHandlePrimitiveDouble() { + verifyAttribute(EnhancedType.of(double.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveDouble) + .setter(FakeMappedItem::setAPrimitiveDouble), + FakeMappedItem.builder().aPrimitiveDouble(1.23).build(), + AttributeValue.builder().n("1.23").build()); + } + + @Test + public void mapperCanHandleFloat() { + verifyNullableAttribute(EnhancedType.of(Float.class), + a -> a.name("value") + .getter(FakeMappedItem::getAFloat) + .setter(FakeMappedItem::setAFloat), + FakeMappedItem.builder().aFloat(1.23f).build(), + AttributeValue.builder().n("1.23").build()); + } + + @Test + public void mapperCanHandlePrimitiveFloat() { + verifyAttribute(EnhancedType.of(float.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveFloat) + .setter(FakeMappedItem::setAPrimitiveFloat), + FakeMappedItem.builder().aPrimitiveFloat(1.23f).build(), + AttributeValue.builder().n("1.23").build()); + } + + + @Test + public void mapperCanHandleBinary() { + SdkBytes sdkBytes = SdkBytes.fromString("test", UTF_8); + verifyNullableAttribute(EnhancedType.of(SdkBytes.class), + a -> a.name("value") + .getter(FakeMappedItem::getABinaryValue) + .setter(FakeMappedItem::setABinaryValue), + FakeMappedItem.builder().aBinaryValue(sdkBytes).build(), + AttributeValue.builder().b(sdkBytes).build()); + } + + @Test + public void mapperCanHandleSimpleList() { + verifyNullableAttribute(EnhancedType.listOf(Integer.class), + a -> a.name("value") + .getter(FakeMappedItem::getAnIntegerList) + .setter(FakeMappedItem::setAnIntegerList), + FakeMappedItem.builder().anIntegerList(asList(1, 2, 3)).build(), + AttributeValue.builder().l(asList(AttributeValue.builder().n("1").build(), + AttributeValue.builder().n("2").build(), + AttributeValue.builder().n("3").build())).build()); + } + + @Test + public void mapperCanHandleNestedLists() { + FakeMappedItem fakeMappedItem = + FakeMappedItem.builder() + .aNestedStructure(singletonList(singletonList(FakeDocument.of("nested", null)))) + .build(); + + Map documentMap = new HashMap<>(); + documentMap.put("documentString", AttributeValue.builder().s("nested").build()); + documentMap.put("documentInteger", AttributeValue.builder().nul(true).build()); + + AttributeValue attributeValue = + AttributeValue.builder() + .l(singletonList(AttributeValue.builder() + .l(AttributeValue.builder().m(documentMap).build()) + .build())) + .build(); + + verifyNullableAttribute( + EnhancedType.listOf(EnhancedType.listOf(EnhancedType.documentOf(FakeDocument.class, FAKE_DOCUMENT_TABLE_SCHEMA))), + a -> a.name("value") + .getter(FakeMappedItem::getANestedStructure) + .setter(FakeMappedItem::setANestedStructure), + fakeMappedItem, + attributeValue); + } + + @Test + public void mapperCanHandleIntegerSet() { + Set valueSet = new HashSet<>(asList(1, 2, 3)); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Integer.class), + a -> a.name("value") + .getter(FakeMappedItem::getAnIntegerSet) + .setter(FakeMappedItem::setAnIntegerSet), + FakeMappedItem.builder().anIntegerSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleStringSet() { + Set valueSet = new HashSet<>(asList("one", "two", "three")); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(String.class), + a -> a.name("value") + .getter(FakeMappedItem::getAStringSet) + .setter(FakeMappedItem::setAStringSet), + FakeMappedItem.builder().aStringSet(valueSet).build(), + AttributeValue.builder().ss(expectedList).build()); + } + + @Test + public void mapperCanHandleLongSet() { + Set valueSet = new HashSet<>(asList(1L, 2L, 3L)); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Long.class), + a -> a.name("value") + .getter(FakeMappedItem::getALongSet) + .setter(FakeMappedItem::setALongSet), + FakeMappedItem.builder().aLongSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleShortSet() { + Set valueSet = new HashSet<>(asList((short) 1, (short) 2, (short) 3)); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Short.class), + a -> a.name("value") + .getter(FakeMappedItem::getAShortSet) + .setter(FakeMappedItem::setAShortSet), + FakeMappedItem.builder().aShortSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleByteSet() { + Set valueSet = new HashSet<>(asList((byte) 1, (byte) 2, (byte) 3)); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Byte.class), + a -> a.name("value") + .getter(FakeMappedItem::getAByteSet) + .setter(FakeMappedItem::setAByteSet), + FakeMappedItem.builder().aByteSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleDoubleSet() { + Set valueSet = new HashSet<>(asList(1.2, 3.4, 5.6)); + List expectedList = valueSet.stream().map(Object::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Double.class), + a -> a.name("value") + .getter(FakeMappedItem::getADoubleSet) + .setter(FakeMappedItem::setADoubleSet), + FakeMappedItem.builder().aDoubleSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleFloatSet() { + Set valueSet = new HashSet<>(asList(1.2f, 3.4f, 5.6f)); + List expectedList = valueSet.stream().map(Object::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Float.class), + a -> a.name("value") + .getter(FakeMappedItem::getAFloatSet) + .setter(FakeMappedItem::setAFloatSet), + FakeMappedItem.builder().aFloatSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleGenericMap() { + Map stringMap = new ConcurrentHashMap<>(); + stringMap.put("one", "two"); + stringMap.put("three", "four"); + + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("one", AttributeValue.builder().s("two").build()); + attributeValueMap.put("three", AttributeValue.builder().s("four").build()); + + verifyNullableAttribute(EnhancedType.mapOf(String.class, String.class), + a -> a.name("value") + .getter(FakeMappedItem::getAStringMap) + .setter(FakeMappedItem::setAStringMap), + FakeMappedItem.builder().aStringMap(stringMap).build(), + AttributeValue.builder().m(attributeValueMap).build()); + } + + @Test + public void mapperCanHandleIntDoubleMap() { + Map intDoubleMap = new ConcurrentHashMap<>(); + intDoubleMap.put(1, 1.0); + intDoubleMap.put(2, 3.0); + + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("1", AttributeValue.builder().n("1.0").build()); + attributeValueMap.put("2", AttributeValue.builder().n("3.0").build()); + + verifyNullableAttribute(EnhancedType.mapOf(Integer.class, Double.class), + a -> a.name("value") + .getter(FakeMappedItem::getAIntDoubleMap) + .setter(FakeMappedItem::setAIntDoubleMap), + FakeMappedItem.builder().aIntDoubleMap(intDoubleMap).build(), + AttributeValue.builder().m(attributeValueMap).build()); + } + + + @Test + public void getAttributeValue_correctlyMapsSuperclassAttributes() { + FakeItem fakeItem = FakeItem.builder().id("id-value").build(); + fakeItem.setSubclassAttribute("subclass-value"); + + AttributeValue attributeValue = FakeItem.getTableSchema().attributeValue(fakeItem, "subclass_attribute"); + + assertThat(attributeValue, is(AttributeValue.builder().s("subclass-value").build())); + } + + @Test + public void getAttributeValue_correctlyMapsComposedClassAttributes() { + FakeItem fakeItem = FakeItem.builder().id("id-value") + .composedObject(FakeItemComposedClass.builder().composedAttribute("composed-value").build()) + .build(); + + AttributeValue attributeValue = FakeItem.getTableSchema().attributeValue(fakeItem, "composed_attribute"); + + assertThat(attributeValue, is(AttributeValue.builder().s("composed-value").build())); + } + + @Test + public void mapToItem_correctlyConstructsComposedClass() { + Map itemMap = new HashMap<>(); + itemMap.put("id", AttributeValue.builder().s("id-value").build()); + itemMap.put("composed_attribute", AttributeValue.builder().s("composed-value").build()); + + FakeItem fakeItem = FakeItem.getTableSchema().mapToItem(itemMap); + + assertThat(fakeItem, + is(FakeItem.builder() + .id("id-value") + .composedObject(FakeItemComposedClass.builder() + .composedAttribute("composed-value") + .build()) + .build())); + } + + @Test + public void buildAbstractTableSchema() { + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .build(); + + assertThat(tableSchema.itemToMap(FAKE_ITEM, false), is(singletonMap("aString", stringValue("test-string")))); + + exception.expect(UnsupportedOperationException.class); + exception.expectMessage("abstract"); + tableSchema.mapToItem(singletonMap("aString", stringValue("test-string"))); + } + + @Test + public void buildAbstractWithFlatten() { + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .flatten(FAKE_DOCUMENT_TABLE_SCHEMA, + FakeMappedItem::getAFakeDocument, + FakeMappedItem::setAFakeDocument) + .build(); + + FakeDocument document = FakeDocument.of("test-string", null); + FakeMappedItem item = FakeMappedItem.builder().aFakeDocument(document).build(); + + assertThat(tableSchema.itemToMap(item, true), + is(singletonMap("documentString", AttributeValue.builder().s("test-string").build()))); + } + + @Test + public void buildAbstractExtends() { + StaticTableSchema superclassTableSchema = + StaticTableSchema.builder(FakeAbstractSuperclass.class) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeAbstractSuperclass::getAString) + .setter(FakeAbstractSuperclass::setAString)) + .build(); + + StaticTableSchema subclassTableSchema = + StaticTableSchema.builder(FakeAbstractSubclass.class) + .extend(superclassTableSchema) + .build(); + + FakeAbstractSubclass item = new FakeAbstractSubclass(); + item.setAString("test-string"); + + assertThat(subclassTableSchema.itemToMap(item, true), + is(singletonMap("aString", AttributeValue.builder().s("test-string").build()))); + } + + @Test + public void buildAbstractTagWith() { + + StaticTableSchema abstractTableSchema = + StaticTableSchema + .builder(FakeDocument.class) + .tags(new TestStaticTableTag()) + .build(); + + assertThat(abstractTableSchema.tableMetadata().customMetadataObject(TABLE_TAG_KEY, String.class), + is(Optional.of(TABLE_TAG_VALUE))); + } + + @Test + public void buildConcreteTagWith() { + + StaticTableSchema concreteTableSchema = + StaticTableSchema + .builder(FakeDocument.class) + .newItemSupplier(FakeDocument::new) + .tags(new TestStaticTableTag()) + .build(); + + assertThat(concreteTableSchema.tableMetadata().customMetadataObject(TABLE_TAG_KEY, String.class), + is(Optional.of(TABLE_TAG_VALUE))); + } + + @Test + public void instantiateFlattenedAbstractClassShouldThrowException() { + StaticTableSchema superclassTableSchema = + StaticTableSchema.builder(FakeAbstractSuperclass.class) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeAbstractSuperclass::getAString) + .setter(FakeAbstractSuperclass::setAString)) + .build(); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("abstract"); + StaticTableSchema.builder(FakeBrokenClass.class) + .newItemSupplier(FakeBrokenClass::new) + .flatten(superclassTableSchema, + FakeBrokenClass::getAbstractObject, + FakeBrokenClass::setAbstractObject); + } + + @Test + public void addSingleAttributeConverterProvider() { + when(provider1.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter1); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(provider1) + .build(); + + assertThat(tableSchema.attributeConverterProvider(), is(provider1)); + } + + @Test + public void usesCustomAttributeConverterProvider() { + String originalString = "test-string"; + String expectedString = "test-string-custom"; + + when(provider1.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter1); + when(attributeConverter1.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(provider1) + .build(); + + Map resultMap = + tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), false); + assertThat(resultMap.get("aString").s(), is(expectedString)); + } + + @Test + public void usesCustomAttributeConverterProviders() { + String originalString = "test-string"; + String expectedString = "test-string-custom"; + + when(provider2.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter2); + when(attributeConverter2.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(provider1, provider2) + .build(); + + Map resultMap = + tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), false); + assertThat(resultMap.get("aString").s(), is(expectedString)); + } + + @Test + public void noConverterProvider_throwsException_whenMissingAttributeConverters() { + exception.expect(NullPointerException.class); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(Collections.emptyList()) + .build(); + } + + @Test + public void noConverterProvider_handlesCorrectly_whenAttributeConvertersAreSupplied() { + String originalString = "test-string"; + String expectedString = "test-string-custom"; + + when(attributeConverter1.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString) + .attributeConverter(attributeConverter1)) + .attributeConverterProviders(Collections.emptyList()) + .build(); + + Map resultMap = tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), + false); + assertThat(resultMap.get("aString").s(), is(expectedString)); + } + + private void verifyAttribute(EnhancedType attributeType, + Consumer> staticAttribute, + FakeMappedItem fakeMappedItem, + AttributeValue attributeValue) { + + StaticTableSchema tableSchema = StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(attributeType, staticAttribute) + .build(); + Map expectedMap = singletonMap("value", attributeValue); + + Map resultMap = tableSchema.itemToMap(fakeMappedItem, false); + assertThat(resultMap, is(expectedMap)); + + FakeMappedItem resultItem = tableSchema.mapToItem(expectedMap); + assertThat(resultItem, is(fakeMappedItem)); + } + + private void verifyNullAttribute(EnhancedType attributeType, + Consumer> staticAttribute, + FakeMappedItem fakeMappedItem) { + + StaticTableSchema tableSchema = StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(attributeType, staticAttribute) + .build(); + Map expectedMap = singletonMap("value", nullAttributeValue()); + + Map resultMap = tableSchema.itemToMap(fakeMappedItem, false); + assertThat(resultMap, is(expectedMap)); + + FakeMappedItem resultItem = tableSchema.mapToItem(expectedMap); + assertThat(resultItem, is(nullValue())); + } + + private void verifyNullableAttribute(EnhancedType attributeType, + Consumer> staticAttribute, + FakeMappedItem fakeMappedItem, + AttributeValue attributeValue) { + + verifyAttribute(attributeType, staticAttribute, fakeMappedItem, attributeValue); + verifyNullAttribute(attributeType, staticAttribute, FakeMappedItem.builder().build()); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableMetadataTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableMetadataTest.java new file mode 100644 index 000000000000..6548e8ae905f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableMetadataTest.java @@ -0,0 +1,364 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static software.amazon.awssdk.enhanced.dynamodb.TableMetadata.primaryIndexName; + +import java.util.Optional; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; + +public class StaticTableMetadataTest { + private static final String INDEX_NAME = "test_index"; + private static final String ATTRIBUTE_NAME = "test_attribute"; + private static final String ATTRIBUTE_NAME_2 = "test_attribute_2"; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void setAndRetrievePrimaryPartitionKey() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), ATTRIBUTE_NAME, AttributeValueType.S) + .build(); + + assertThat(tableMetadata.primaryPartitionKey(), is(ATTRIBUTE_NAME)); + } + + @Test + public void setAndRetrievePrimarySortKey() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexSortKey(primaryIndexName(), ATTRIBUTE_NAME, AttributeValueType.S) + .build(); + + assertThat(tableMetadata.primarySortKey(), is(Optional.of(ATTRIBUTE_NAME))); + } + + @Test(expected = IllegalArgumentException.class) + public void retrieveUnsetPrimaryPartitionKey() { + TableMetadata tableMetadata = StaticTableMetadata.builder().build(); + + tableMetadata.primaryPartitionKey(); + } + + @Test(expected = IllegalArgumentException.class) + public void retrieveUnsetPrimaryPartitionKey_withSortKeySet() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexSortKey(primaryIndexName(), + ATTRIBUTE_NAME, + AttributeValueType.S) + .build(); + + tableMetadata.primaryPartitionKey(); + } + + @Test + public void retrieveUnsetPrimarySortKey() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), + ATTRIBUTE_NAME, + AttributeValueType.S) + .build(); + + assertThat(tableMetadata.primarySortKey(), is(Optional.empty())); + } + + @Test + public void setAndRetrieveSecondaryPartitionKey() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(INDEX_NAME, + ATTRIBUTE_NAME, + AttributeValueType.S) + .build(); + + assertThat(tableMetadata.indexPartitionKey(INDEX_NAME), is(ATTRIBUTE_NAME)); + } + + @Test + public void setAndRetrieveSecondarySortKey() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexSortKey(INDEX_NAME, + ATTRIBUTE_NAME, + AttributeValueType.S) + .build(); + + assertThat(tableMetadata.indexSortKey(INDEX_NAME), is(Optional.of(ATTRIBUTE_NAME))); + } + + @Test(expected = IllegalArgumentException.class) + public void retrieveUnsetSecondaryPartitionKey() { + TableMetadata tableMetadata = StaticTableMetadata.builder().build(); + + tableMetadata.indexPartitionKey(INDEX_NAME); + } + + @Test + public void retrieveSecondaryPartitionKeyForLocalIndex() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), + ATTRIBUTE_NAME, + AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, + ATTRIBUTE_NAME_2, + AttributeValueType.S) + .build(); + + assertThat(tableMetadata.indexPartitionKey(INDEX_NAME), is(ATTRIBUTE_NAME)); + } + + @Test + public void retrieveUnsetSecondarySortKey() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(INDEX_NAME, + ATTRIBUTE_NAME, + AttributeValueType.S) + .build(); + + assertThat(tableMetadata.indexSortKey(INDEX_NAME), is(Optional.empty())); + } + + @Test(expected = IllegalArgumentException.class) + public void setSamePartitionKeyTwice() { + StaticTableMetadata.builder() + .addIndexPartitionKey("idx", "id", AttributeValueType.S) + .addIndexPartitionKey("idx", "id", AttributeValueType.S) + .build(); + } + + @Test(expected = IllegalArgumentException.class) + public void setSameSortKeyTwice() { + StaticTableMetadata.builder() + .addIndexSortKey("idx", "id", AttributeValueType.S) + .addIndexSortKey("idx", "id", AttributeValueType.S) + .build(); + } + + @Test + public void getPrimaryKeys_partitionAndSort() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", AttributeValueType.S) + .addIndexPartitionKey(INDEX_NAME, "dummy", AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, "dummy2", AttributeValueType.S) + .build(); + + assertThat(tableMetadata.primaryKeys(), containsInAnyOrder("primary_id", "primary_sort")); + } + + @Test + public void getPrimaryKeys_partition() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexPartitionKey(INDEX_NAME, "dummy", AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, "dummy2", AttributeValueType.S) + .build(); + + assertThat(tableMetadata.primaryKeys(), contains("primary_id")); + } + + @Test(expected = IllegalArgumentException.class) + public void getPrimaryKeys_unset() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(INDEX_NAME, "dummy", AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, "dummy2", AttributeValueType.S) + .build(); + + tableMetadata.primaryKeys(); + } + + @Test + public void getIndexKeys_partitionAndSort() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", AttributeValueType.S) + .addIndexPartitionKey(INDEX_NAME, "dummy", AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, "dummy2", AttributeValueType.S) + .build(); + + assertThat(tableMetadata.indexKeys(INDEX_NAME), containsInAnyOrder("dummy", "dummy2")); + } + + @Test + public void getIndexKeys_partition() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", AttributeValueType.S) + .addIndexPartitionKey(INDEX_NAME, "dummy", AttributeValueType.S) + .build(); + + assertThat(tableMetadata.indexKeys(INDEX_NAME), contains("dummy")); + } + + @Test(expected = IllegalArgumentException.class) + public void getIndexKeys_unset() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", AttributeValueType.S) + .build(); + + tableMetadata.indexKeys(INDEX_NAME); + } + + @Test + public void getIndexKeys_sortOnly() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, "dummy", AttributeValueType.S) + .build(); + + assertThat(tableMetadata.indexKeys(INDEX_NAME), containsInAnyOrder("primary_id", "dummy")); + } + + @Test + public void getAllKeys() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", AttributeValueType.S) + .addIndexPartitionKey(INDEX_NAME, "dummy", AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, "dummy2", AttributeValueType.S) + .build(); + + assertThat(tableMetadata.allKeys(), containsInAnyOrder("primary_id", "primary_sort", "dummy", "dummy2")); + } + + @Test + public void getScalarAttributeValueType() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", + AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", + AttributeValueType.N) + .addIndexPartitionKey(INDEX_NAME, "dummy", + AttributeValueType.B) + .addIndexSortKey(INDEX_NAME, "dummy2", AttributeValueType.BOOL) + .build(); + + assertThat(tableMetadata.scalarAttributeType("primary_id"), is(Optional.of(ScalarAttributeType.S))); + assertThat(tableMetadata.scalarAttributeType("primary_sort"), is(Optional.of(ScalarAttributeType.N))); + assertThat(tableMetadata.scalarAttributeType("dummy"), is(Optional.of(ScalarAttributeType.B))); + assertThat(tableMetadata.scalarAttributeType("dummy2"), is(Optional.empty())); + } + + @Test + public void setAndRetrieveSimpleCustomMetadata() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addCustomMetadataObject("custom-key", 123) + .build(); + + assertThat(tableMetadata.customMetadataObject("custom-key", Integer.class), is(Optional.of(123))); + } + + @Test + public void retrieveUnsetCustomMetadata() { + TableMetadata tableMetadata = StaticTableMetadata.builder().build(); + + assertThat(tableMetadata.customMetadataObject("custom-key", Integer.class), is(Optional.empty())); + } + + @Test(expected = IllegalArgumentException.class) + public void setAndRetrieveCustomMetadataOfUnassignableType() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addCustomMetadataObject("custom-key", 123.45) + .build(); + + tableMetadata.customMetadataObject("custom-key", Integer.class); + } + + @Test + public void setAndRetrieveCustomMetadataOfDifferentButAssignableType() { + TableMetadata tableMetadata = StaticTableMetadata.builder() + .addCustomMetadataObject("custom-key", 123.45f) + .build(); + + assertThat(tableMetadata.customMetadataObject("custom-key", Number.class), is(Optional.of(123.45f))); + } + + @Test + public void mergeFullIntoEmpty() { + StaticTableMetadata tableMetadata = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", AttributeValueType.S) + .addIndexPartitionKey(INDEX_NAME, "dummy", AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, "dummy2", AttributeValueType.S) + .addCustomMetadataObject("custom1", "value1") + .addCustomMetadataObject("custom2", "value2") + .build(); + + StaticTableMetadata mergedTableMetadata = StaticTableMetadata.builder().mergeWith(tableMetadata).build(); + + assertThat(mergedTableMetadata, is(tableMetadata)); + } + + @Test + public void mergeEmptyIntoFull() { + StaticTableMetadata emptyTableMetadata = StaticTableMetadata.builder().build(); + + StaticTableMetadata.Builder tableMetadataBuilder = StaticTableMetadata.builder() + .addIndexPartitionKey(primaryIndexName(), "primary_id", AttributeValueType.S) + .addIndexSortKey(primaryIndexName(), "primary_sort", AttributeValueType.S) + .addIndexPartitionKey(INDEX_NAME, "dummy", AttributeValueType.S) + .addIndexSortKey(INDEX_NAME, "dummy2", AttributeValueType.S) + .addCustomMetadataObject("custom1", "value1") + .addCustomMetadataObject("custom2", "value2"); + + StaticTableMetadata original = tableMetadataBuilder.build(); + StaticTableMetadata merged = tableMetadataBuilder.mergeWith(emptyTableMetadata).build(); + + assertThat(merged, is(original)); + } + + @Test + public void mergeWithDuplicateIndexPartitionKey() { + StaticTableMetadata.Builder builder = StaticTableMetadata.builder().addIndexPartitionKey(INDEX_NAME, "id", AttributeValueType.S); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("partition key"); + exception.expectMessage(INDEX_NAME); + + builder.mergeWith(builder.build()); + } + + @Test + public void mergeWithDuplicateIndexSortKey() { + StaticTableMetadata.Builder builder = StaticTableMetadata.builder().addIndexSortKey(INDEX_NAME, "id", AttributeValueType.S); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("sort key"); + exception.expectMessage(INDEX_NAME); + + builder.mergeWith(builder.build()); + } + + @Test + public void mergeWithDuplicateCustomMetadata() { + StaticTableMetadata.Builder builder = StaticTableMetadata.builder().addCustomMetadataObject(INDEX_NAME, "id"); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("custom metadata"); + exception.expectMessage(INDEX_NAME); + + builder.mergeWith(builder.build()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java new file mode 100644 index 000000000000..7ef020a15d52 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java @@ -0,0 +1,1514 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.nullAttributeValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class StaticTableSchemaTest { + private static final String TABLE_TAG_KEY = "table-tag-key"; + private static final String TABLE_TAG_VALUE = "table-tag-value"; + private static final AttributeValue ATTRIBUTE_VALUE_B = AttributeValue.builder().bool(true).build(); + private static final AttributeValue ATTRIBUTE_VALUE_S = AttributeValue.builder().s("test-string").build(); + + private static final StaticTableSchema FAKE_DOCUMENT_TABLE_SCHEMA = + StaticTableSchema.builder(FakeDocument.class) + .newItemSupplier(FakeDocument::new) + .addAttribute(String.class, a -> a.name("documentString") + .getter(FakeDocument::getDocumentString) + .setter(FakeDocument::setDocumentString)) + .addAttribute(Integer.class, a -> a.name("documentInteger") + .getter(FakeDocument::getDocumentInteger) + .setter(FakeDocument::setDocumentInteger)) + .build(); + + private static final FakeMappedItem FAKE_ITEM = FakeMappedItem.builder() + .aPrimitiveBoolean(true) + .aBoolean(true) + .aString("test-string") + .build(); + + private static class FakeMappedItem { + private boolean aPrimitiveBoolean; + private Boolean aBoolean; + private String aString; + private Integer anInteger; + private int aPrimitiveInteger; + private Byte aByte; + private byte aPrimitiveByte; + private Long aLong; + private long aPrimitiveLong; + private Short aShort; + private short aPrimitiveShort; + private Double aDouble; + private double aPrimitiveDouble; + private Float aFloat; + private float aPrimitiveFloat; + private BigDecimal aBigDecimal; + private SdkBytes aBinaryValue; + private FakeDocument aFakeDocument; + private Set aStringSet; + private Set anIntegerSet; + private Set aByteSet; + private Set aLongSet; + private Set aShortSet; + private Set aDoubleSet; + private Set aFloatSet; + private Set aBinarySet; + private List anIntegerList; + private List> aNestedStructure; + private Map aStringMap; + private Map aIntDoubleMap; + private TestEnum testEnum; + + FakeMappedItem() { + } + + FakeMappedItem(boolean aPrimitiveBoolean, Boolean aBoolean, String aString, Integer anInteger, + int aPrimitiveInteger, Byte aByte, byte aPrimitiveByte, Long aLong, long aPrimitiveLong, + Short aShort, short aPrimitiveShort, Double aDouble, double aPrimitiveDouble, Float aFloat, + float aPrimitiveFloat, BigDecimal aBigDecimal, SdkBytes aBinaryValue, FakeDocument aFakeDocument, + Set aStringSet, Set anIntegerSet, Set aByteSet, + Set aLongSet, Set aShortSet, Set aDoubleSet, Set aFloatSet, + Set aBinarySet, List anIntegerList, + List> aNestedStructure, Map aStringMap, + Map aIntDoubleMap, TestEnum testEnum) { + this.aPrimitiveBoolean = aPrimitiveBoolean; + this.aBoolean = aBoolean; + this.aString = aString; + this.anInteger = anInteger; + this.aPrimitiveInteger = aPrimitiveInteger; + this.aByte = aByte; + this.aPrimitiveByte = aPrimitiveByte; + this.aLong = aLong; + this.aPrimitiveLong = aPrimitiveLong; + this.aShort = aShort; + this.aPrimitiveShort = aPrimitiveShort; + this.aDouble = aDouble; + this.aPrimitiveDouble = aPrimitiveDouble; + this.aFloat = aFloat; + this.aPrimitiveFloat = aPrimitiveFloat; + this.aBigDecimal = aBigDecimal; + this.aBinaryValue = aBinaryValue; + this.aFakeDocument = aFakeDocument; + this.aStringSet = aStringSet; + this.anIntegerSet = anIntegerSet; + this.aByteSet = aByteSet; + this.aLongSet = aLongSet; + this.aShortSet = aShortSet; + this.aDoubleSet = aDoubleSet; + this.aFloatSet = aFloatSet; + this.aBinarySet = aBinarySet; + this.anIntegerList = anIntegerList; + this.aNestedStructure = aNestedStructure; + this.aStringMap = aStringMap; + this.aIntDoubleMap = aIntDoubleMap; + this.testEnum = testEnum; + } + + public static Builder builder() { + return new Builder(); + } + + boolean isAPrimitiveBoolean() { + return aPrimitiveBoolean; + } + + void setAPrimitiveBoolean(boolean aPrimitiveBoolean) { + this.aPrimitiveBoolean = aPrimitiveBoolean; + } + + Boolean getABoolean() { + return aBoolean; + } + + void setABoolean(Boolean aBoolean) { + this.aBoolean = aBoolean; + } + + String getAString() { + return aString; + } + + void setAString(String aString) { + this.aString = aString; + } + + Integer getAnInteger() { + return anInteger; + } + + void setAnInteger(Integer anInteger) { + this.anInteger = anInteger; + } + + int getAPrimitiveInteger() { + return aPrimitiveInteger; + } + + void setAPrimitiveInteger(int aPrimitiveInteger) { + this.aPrimitiveInteger = aPrimitiveInteger; + } + + Byte getAByte() { + return aByte; + } + + void setAByte(Byte aByte) { + this.aByte = aByte; + } + + byte getAPrimitiveByte() { + return aPrimitiveByte; + } + + void setAPrimitiveByte(byte aPrimitiveByte) { + this.aPrimitiveByte = aPrimitiveByte; + } + + Long getALong() { + return aLong; + } + + void setALong(Long aLong) { + this.aLong = aLong; + } + + long getAPrimitiveLong() { + return aPrimitiveLong; + } + + void setAPrimitiveLong(long aPrimitiveLong) { + this.aPrimitiveLong = aPrimitiveLong; + } + + Short getAShort() { + return aShort; + } + + void setAShort(Short aShort) { + this.aShort = aShort; + } + + short getAPrimitiveShort() { + return aPrimitiveShort; + } + + void setAPrimitiveShort(short aPrimitiveShort) { + this.aPrimitiveShort = aPrimitiveShort; + } + + Double getADouble() { + return aDouble; + } + + void setADouble(Double aDouble) { + this.aDouble = aDouble; + } + + double getAPrimitiveDouble() { + return aPrimitiveDouble; + } + + void setAPrimitiveDouble(double aPrimitiveDouble) { + this.aPrimitiveDouble = aPrimitiveDouble; + } + + Float getAFloat() { + return aFloat; + } + + void setAFloat(Float aFloat) { + this.aFloat = aFloat; + } + + BigDecimal aBigDecimal() { + return aBigDecimal; + } + + void setABigDecimal(BigDecimal aBigDecimal) { + this.aBigDecimal = aBigDecimal; + } + + float getAPrimitiveFloat() { + return aPrimitiveFloat; + } + + void setAPrimitiveFloat(float aPrimitiveFloat) { + this.aPrimitiveFloat = aPrimitiveFloat; + } + + SdkBytes getABinaryValue() { + return aBinaryValue; + } + + void setABinaryValue(SdkBytes aBinaryValue) { + this.aBinaryValue = aBinaryValue; + } + + FakeDocument getAFakeDocument() { + return aFakeDocument; + } + + void setAFakeDocument(FakeDocument aFakeDocument) { + this.aFakeDocument = aFakeDocument; + } + + Set getAStringSet() { + return aStringSet; + } + + void setAStringSet(Set aStringSet) { + this.aStringSet = aStringSet; + } + + Set getAnIntegerSet() { + return anIntegerSet; + } + + void setAnIntegerSet(Set anIntegerSet) { + this.anIntegerSet = anIntegerSet; + } + + Set getAByteSet() { + return aByteSet; + } + + void setAByteSet(Set aByteSet) { + this.aByteSet = aByteSet; + } + + Set getALongSet() { + return aLongSet; + } + + void setALongSet(Set aLongSet) { + this.aLongSet = aLongSet; + } + + Set getAShortSet() { + return aShortSet; + } + + void setAShortSet(Set aShortSet) { + this.aShortSet = aShortSet; + } + + Set getADoubleSet() { + return aDoubleSet; + } + + void setADoubleSet(Set aDoubleSet) { + this.aDoubleSet = aDoubleSet; + } + + Set getAFloatSet() { + return aFloatSet; + } + + void setAFloatSet(Set aFloatSet) { + this.aFloatSet = aFloatSet; + } + + Set getABinarySet() { + return aBinarySet; + } + + void setABinarySet(Set aBinarySet) { + this.aBinarySet = aBinarySet; + } + + List getAnIntegerList() { + return anIntegerList; + } + + void setAnIntegerList(List anIntegerList) { + this.anIntegerList = anIntegerList; + } + + List> getANestedStructure() { + return aNestedStructure; + } + + void setANestedStructure(List> aNestedStructure) { + this.aNestedStructure = aNestedStructure; + } + + Map getAStringMap() { + return aStringMap; + } + + void setAStringMap(Map aStringMap) { + this.aStringMap = aStringMap; + } + + Map getAIntDoubleMap() { + return aIntDoubleMap; + } + + void setAIntDoubleMap(Map aIntDoubleMap) { + this.aIntDoubleMap = aIntDoubleMap; + } + + TestEnum getTestEnum() { + return testEnum; + } + + void setTestEnum(TestEnum testEnum) { + this.testEnum = testEnum; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FakeMappedItem that = (FakeMappedItem) o; + return aPrimitiveBoolean == that.aPrimitiveBoolean && + aPrimitiveInteger == that.aPrimitiveInteger && + aPrimitiveByte == that.aPrimitiveByte && + aPrimitiveLong == that.aPrimitiveLong && + aPrimitiveShort == that.aPrimitiveShort && + Double.compare(that.aPrimitiveDouble, aPrimitiveDouble) == 0 && + Float.compare(that.aPrimitiveFloat, aPrimitiveFloat) == 0 && + Objects.equals(aBoolean, that.aBoolean) && + Objects.equals(aString, that.aString) && + Objects.equals(anInteger, that.anInteger) && + Objects.equals(aByte, that.aByte) && + Objects.equals(aLong, that.aLong) && + Objects.equals(aShort, that.aShort) && + Objects.equals(aDouble, that.aDouble) && + Objects.equals(aFloat, that.aFloat) && + Objects.equals(aBinaryValue, that.aBinaryValue) && + Objects.equals(aFakeDocument, that.aFakeDocument) && + Objects.equals(aStringSet, that.aStringSet) && + Objects.equals(anIntegerSet, that.anIntegerSet) && + Objects.equals(aByteSet, that.aByteSet) && + Objects.equals(aLongSet, that.aLongSet) && + Objects.equals(aShortSet, that.aShortSet) && + Objects.equals(aDoubleSet, that.aDoubleSet) && + Objects.equals(aFloatSet, that.aFloatSet) && + Objects.equals(aBinarySet, that.aBinarySet) && + Objects.equals(anIntegerList, that.anIntegerList) && + Objects.equals(aNestedStructure, that.aNestedStructure) && + Objects.equals(aStringMap, that.aStringMap) && + Objects.equals(aIntDoubleMap, that.aIntDoubleMap) && + Objects.equals(testEnum, that.testEnum); + } + + @Override + public int hashCode() { + return Objects.hash(aPrimitiveBoolean, aBoolean, aString, anInteger, aPrimitiveInteger, aByte, + aPrimitiveByte, aLong, aPrimitiveLong, aShort, aPrimitiveShort, aDouble, + aPrimitiveDouble, aFloat, aPrimitiveFloat, aBinaryValue, aFakeDocument, aStringSet, + anIntegerSet, aByteSet, aLongSet, aShortSet, aDoubleSet, aFloatSet, aBinarySet, + anIntegerList, aNestedStructure, aStringMap, aIntDoubleMap, testEnum); + } + + public enum TestEnum { + ONE, + TWO, + THREE; + } + + private static class Builder { + private boolean aPrimitiveBoolean; + private Boolean aBoolean; + private String aString; + private Integer anInteger; + private int aPrimitiveInteger; + private Byte aByte; + private byte aPrimitiveByte; + private Long aLong; + private long aPrimitiveLong; + private Short aShort; + private short aPrimitiveShort; + private Double aDouble; + private double aPrimitiveDouble; + private Float aFloat; + private float aPrimitiveFloat; + private BigDecimal aBigDecimal; + private SdkBytes aBinaryValue; + private FakeDocument aFakeDocument; + private Set aStringSet; + private Set anIntegerSet; + private Set aByteSet; + private Set aLongSet; + private Set aShortSet; + private Set aDoubleSet; + private Set aFloatSet; + private Set aBinarySet; + private List anIntegerList; + private List> aNestedStructure; + private Map aStringMap; + private Map aIntDoubleMap; + private TestEnum testEnum; + + Builder aPrimitiveBoolean(boolean aPrimitiveBoolean) { + this.aPrimitiveBoolean = aPrimitiveBoolean; + return this; + } + + Builder aBoolean(Boolean aBoolean) { + this.aBoolean = aBoolean; + return this; + } + + Builder aString(String aString) { + this.aString = aString; + return this; + } + + Builder anInteger(Integer anInteger) { + this.anInteger = anInteger; + return this; + } + + Builder aPrimitiveInteger(int aPrimitiveInteger) { + this.aPrimitiveInteger = aPrimitiveInteger; + return this; + } + + Builder aByte(Byte aByte) { + this.aByte = aByte; + return this; + } + + Builder aPrimitiveByte(byte aPrimitiveByte) { + this.aPrimitiveByte = aPrimitiveByte; + return this; + } + + Builder aLong(Long aLong) { + this.aLong = aLong; + return this; + } + + Builder aPrimitiveLong(long aPrimitiveLong) { + this.aPrimitiveLong = aPrimitiveLong; + return this; + } + + Builder aShort(Short aShort) { + this.aShort = aShort; + return this; + } + + Builder aPrimitiveShort(short aPrimitiveShort) { + this.aPrimitiveShort = aPrimitiveShort; + return this; + } + + Builder aDouble(Double aDouble) { + this.aDouble = aDouble; + return this; + } + + Builder aPrimitiveDouble(double aPrimitiveDouble) { + this.aPrimitiveDouble = aPrimitiveDouble; + return this; + } + + Builder aFloat(Float aFloat) { + this.aFloat = aFloat; + return this; + } + + Builder aPrimitiveFloat(float aPrimitiveFloat) { + this.aPrimitiveFloat = aPrimitiveFloat; + return this; + } + + Builder aBigDecimal(BigDecimal aBigDecimal) { + this.aBigDecimal = aBigDecimal; + return this; + } + + Builder aBinaryValue(SdkBytes aBinaryValue) { + this.aBinaryValue = aBinaryValue; + return this; + } + + Builder aFakeDocument(FakeDocument aFakeDocument) { + this.aFakeDocument = aFakeDocument; + return this; + } + + Builder aStringSet(Set aStringSet) { + this.aStringSet = aStringSet; + return this; + } + + Builder anIntegerSet(Set anIntegerSet) { + this.anIntegerSet = anIntegerSet; + return this; + } + + Builder aByteSet(Set aByteSet) { + this.aByteSet = aByteSet; + return this; + } + + Builder aLongSet(Set aLongSet) { + this.aLongSet = aLongSet; + return this; + } + + Builder aShortSet(Set aShortSet) { + this.aShortSet = aShortSet; + return this; + } + + Builder aDoubleSet(Set aDoubleSet) { + this.aDoubleSet = aDoubleSet; + return this; + } + + Builder aFloatSet(Set aFloatSet) { + this.aFloatSet = aFloatSet; + return this; + } + + Builder aBinarySet(Set aBinarySet) { + this.aBinarySet = aBinarySet; + return this; + } + + Builder anIntegerList(List anIntegerList) { + this.anIntegerList = anIntegerList; + return this; + } + + Builder aNestedStructure(List> aNestedStructure) { + this.aNestedStructure = aNestedStructure; + return this; + } + + Builder aStringMap(Map aStringMap) { + this.aStringMap = aStringMap; + return this; + } + + Builder aIntDoubleMap(Map aIntDoubleMap) { + this.aIntDoubleMap = aIntDoubleMap; + return this; + } + + Builder testEnum(TestEnum testEnum) { + this.testEnum = testEnum; + return this; + } + + public FakeMappedItem build() { + return new FakeMappedItem(aPrimitiveBoolean, aBoolean, aString, anInteger, aPrimitiveInteger, aByte, + aPrimitiveByte, aLong, aPrimitiveLong, aShort, aPrimitiveShort, aDouble, + aPrimitiveDouble, aFloat, aPrimitiveFloat, aBigDecimal, aBinaryValue, aFakeDocument, + aStringSet, anIntegerSet, aByteSet, aLongSet, aShortSet, aDoubleSet, + aFloatSet, aBinarySet, anIntegerList, aNestedStructure, aStringMap, aIntDoubleMap, + testEnum); + } + } + } + + private static class FakeDocument { + private String documentString; + private Integer documentInteger; + + FakeDocument() { + } + + private FakeDocument(String documentString, Integer documentInteger) { + this.documentString = documentString; + this.documentInteger = documentInteger; + } + + private static FakeDocument of(String documentString, Integer documentInteger) { + return new FakeDocument(documentString, documentInteger); + } + + String getDocumentString() { + return documentString; + } + + void setDocumentString(String documentString) { + this.documentString = documentString; + } + + Integer getDocumentInteger() { + return documentInteger; + } + + void setDocumentInteger(Integer documentInteger) { + this.documentInteger = documentInteger; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FakeDocument that = (FakeDocument) o; + return Objects.equals(documentString, that.documentString) && + Objects.equals(documentInteger, that.documentInteger); + } + + @Override + public int hashCode() { + return Objects.hash(documentString, documentInteger); + } + } + + private static class FakeAbstractSubclass extends FakeAbstractSuperclass { + + } + + private static class FakeBrokenClass { + FakeAbstractSuperclass abstractObject; + + FakeAbstractSuperclass getAbstractObject() { + return abstractObject; + } + + void setAbstractObject(FakeAbstractSuperclass abstractObject) { + this.abstractObject = abstractObject; + } + } + + private static abstract class FakeAbstractSuperclass { + private String aString; + + String getAString() { + return aString; + } + + void setAString(String aString) { + this.aString = aString; + } + } + + private static final Collection> ATTRIBUTES = Arrays.asList( + StaticAttribute.builder(FakeMappedItem.class, Boolean.class) + .name("a_primitive_boolean") + .getter(FakeMappedItem::isAPrimitiveBoolean) + .setter(FakeMappedItem::setAPrimitiveBoolean) + .build(), + StaticAttribute.builder(FakeMappedItem.class, Boolean.class) + .name("a_boolean") + .getter(FakeMappedItem::getABoolean) + .setter(FakeMappedItem::setABoolean) + .build(), + StaticAttribute.builder(FakeMappedItem.class, String.class) + .name("a_string") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString) + .build() + ); + + private StaticTableSchema createSimpleTableSchema() { + return StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .attributes(ATTRIBUTES) + .build(); + } + + private static class TestStaticTableTag implements StaticTableTag { + @Override + public Consumer modifyMetadata() { + return metadata -> metadata.addCustomMetadataObject(TABLE_TAG_KEY, TABLE_TAG_VALUE); + } + } + + @Mock + private AttributeConverterProvider provider1; + + @Mock + private AttributeConverterProvider provider2; + + @Mock + private AttributeConverter attributeConverter1; + + @Mock + private AttributeConverter attributeConverter2; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void itemType_returnsCorrectClass() { + assertThat(FakeItem.getTableSchema().itemType(), is(equalTo(EnhancedType.of(FakeItem.class)))); + } + + @Test + public void getTableMetadata_hasCorrectFields() { + TableMetadata tableMetadata = FakeItemWithSort.getTableSchema().tableMetadata(); + + assertThat(tableMetadata.primaryPartitionKey(), is("id")); + assertThat(tableMetadata.primarySortKey(), is(Optional.of("sort"))); + } + + @Test + public void itemToMap_returnsCorrectMapWithMultipleAttributes() { + Map attributeMap = createSimpleTableSchema().itemToMap(FAKE_ITEM, false); + + assertThat(attributeMap.size(), is(3)); + assertThat(attributeMap, hasEntry("a_boolean", ATTRIBUTE_VALUE_B)); + assertThat(attributeMap, hasEntry("a_primitive_boolean", ATTRIBUTE_VALUE_B)); + assertThat(attributeMap, hasEntry("a_string", ATTRIBUTE_VALUE_S)); + } + + @Test + public void itemToMap_omitsNullAttributes() { + FakeMappedItem fakeMappedItemWithNulls = FakeMappedItem.builder().aPrimitiveBoolean(true).build(); + Map attributeMap = createSimpleTableSchema().itemToMap(fakeMappedItemWithNulls, true); + + assertThat(attributeMap.size(), is(1)); + assertThat(attributeMap, hasEntry("a_primitive_boolean", ATTRIBUTE_VALUE_B)); + } + + @Test + public void itemToMap_filtersAttributes() { + Map attributeMap = createSimpleTableSchema() + .itemToMap(FAKE_ITEM, asList("a_boolean", "a_string")); + + assertThat(attributeMap.size(), is(2)); + assertThat(attributeMap, hasEntry("a_boolean", ATTRIBUTE_VALUE_B)); + assertThat(attributeMap, hasEntry("a_string", ATTRIBUTE_VALUE_S)); + } + + @Test(expected = IllegalArgumentException.class) + public void itemToMap_attributeNotFound_throwsIllegalArgumentException() { + createSimpleTableSchema().itemToMap(FAKE_ITEM, singletonList("unknown_key")); + } + + @Test + public void mapToItem_returnsCorrectItemWithMultipleAttributes() { + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("a_boolean", ATTRIBUTE_VALUE_B); + attributeValueMap.put("a_primitive_boolean", ATTRIBUTE_VALUE_B); + attributeValueMap.put("a_string", ATTRIBUTE_VALUE_S); + + FakeMappedItem fakeMappedItem = + createSimpleTableSchema().mapToItem(Collections.unmodifiableMap(attributeValueMap)); + + assertThat(fakeMappedItem, is(FAKE_ITEM)); + } + + @Test + public void mapToItem_unknownAttributes_doNotCauseErrors() { + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("unknown_attribute", ATTRIBUTE_VALUE_S); + + createSimpleTableSchema().mapToItem(Collections.unmodifiableMap(attributeValueMap)); + } + + @Test(expected = IllegalArgumentException.class) + public void mapToItem_attributesWrongType_throwsException() { + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("a_boolean", ATTRIBUTE_VALUE_S); + attributeValueMap.put("a_primitive_boolean", ATTRIBUTE_VALUE_S); + attributeValueMap.put("a_string", ATTRIBUTE_VALUE_B); + + createSimpleTableSchema().mapToItem(Collections.unmodifiableMap(attributeValueMap)); + } + + @Test + public void mapperCanHandleEnum() { + verifyNullableAttribute(EnhancedType.of(FakeMappedItem.TestEnum.class), + a -> a.name("value") + .getter(FakeMappedItem::getTestEnum) + .setter(FakeMappedItem::setTestEnum), + FakeMappedItem.builder().testEnum(FakeMappedItem.TestEnum.ONE).build(), + AttributeValue.builder().s("ONE").build()); + } + + @Test + public void mapperCanHandleDocument() { + FakeDocument fakeDocument = FakeDocument.of("test-123", 123); + + Map expectedMap = new HashMap<>(); + expectedMap.put("documentInteger", AttributeValue.builder().n("123").build()); + expectedMap.put("documentString", AttributeValue.builder().s("test-123").build()); + + verifyNullableAttribute(EnhancedType.documentOf(FakeDocument.class, FAKE_DOCUMENT_TABLE_SCHEMA), + a -> a.name("value") + .getter(FakeMappedItem::getAFakeDocument) + .setter(FakeMappedItem::setAFakeDocument), + FakeMappedItem.builder().aFakeDocument(fakeDocument).build(), + AttributeValue.builder().m(expectedMap).build()); + } + + @Test + public void mapperCanHandleDocumentWithNullValues() { + verifyNullAttribute(EnhancedType.documentOf(FakeDocument.class, FAKE_DOCUMENT_TABLE_SCHEMA), + a -> a.name("value") + .getter(FakeMappedItem::getAFakeDocument) + .setter(FakeMappedItem::setAFakeDocument), + FakeMappedItem.builder().build()); + } + + @Test + public void mapperCanHandleInteger() { + verifyNullableAttribute(EnhancedType.of(Integer.class), a -> a.name("value") + .getter(FakeMappedItem::getAnInteger) + .setter(FakeMappedItem::setAnInteger), + FakeMappedItem.builder().anInteger(123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandlePrimitiveInteger() { + verifyAttribute(EnhancedType.of(int.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveInteger) + .setter(FakeMappedItem::setAPrimitiveInteger), + FakeMappedItem.builder().aPrimitiveInteger(123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandleBoolean() { + verifyNullableAttribute(EnhancedType.of(Boolean.class), + a -> a.name("value") + .getter(FakeMappedItem::getABoolean) + .setter(FakeMappedItem::setABoolean), + FakeMappedItem.builder().aBoolean(true).build(), + AttributeValue.builder().bool(true).build()); + } + + @Test + public void mapperCanHandlePrimitiveBoolean() { + verifyAttribute(EnhancedType.of(boolean.class), + a -> a.name("value") + .getter(FakeMappedItem::isAPrimitiveBoolean) + .setter(FakeMappedItem::setAPrimitiveBoolean), + FakeMappedItem.builder().aPrimitiveBoolean(true).build(), + AttributeValue.builder().bool(true).build()); + } + + @Test + public void mapperCanHandleString() { + verifyNullableAttribute(EnhancedType.of(String.class), + a -> a.name("value") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString), + FakeMappedItem.builder().aString("onetwothree").build(), + AttributeValue.builder().s("onetwothree").build()); + } + + @Test + public void mapperCanHandleLong() { + verifyNullableAttribute(EnhancedType.of(Long.class), + a -> a.name("value") + .getter(FakeMappedItem::getALong) + .setter(FakeMappedItem::setALong), + FakeMappedItem.builder().aLong(123L).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandlePrimitiveLong() { + verifyAttribute(EnhancedType.of(long.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveLong) + .setter(FakeMappedItem::setAPrimitiveLong), + FakeMappedItem.builder().aPrimitiveLong(123L).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandleShort() { + verifyNullableAttribute(EnhancedType.of(Short.class), + a -> a.name("value") + .getter(FakeMappedItem::getAShort) + .setter(FakeMappedItem::setAShort), + FakeMappedItem.builder().aShort((short)123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandlePrimitiveShort() { + verifyAttribute(EnhancedType.of(short.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveShort) + .setter(FakeMappedItem::setAPrimitiveShort), + FakeMappedItem.builder().aPrimitiveShort((short)123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandleByte() { + verifyNullableAttribute(EnhancedType.of(Byte.class), + a -> a.name("value") + .getter(FakeMappedItem::getAByte) + .setter(FakeMappedItem::setAByte), + FakeMappedItem.builder().aByte((byte)123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandlePrimitiveByte() { + verifyAttribute(EnhancedType.of(byte.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveByte) + .setter(FakeMappedItem::setAPrimitiveByte), + FakeMappedItem.builder().aPrimitiveByte((byte)123).build(), + AttributeValue.builder().n("123").build()); + } + + @Test + public void mapperCanHandleDouble() { + verifyNullableAttribute(EnhancedType.of(Double.class), + a -> a.name("value") + .getter(FakeMappedItem::getADouble) + .setter(FakeMappedItem::setADouble), + FakeMappedItem.builder().aDouble(1.23).build(), + AttributeValue.builder().n("1.23").build()); + } + + @Test + public void mapperCanHandlePrimitiveDouble() { + verifyAttribute(EnhancedType.of(double.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveDouble) + .setter(FakeMappedItem::setAPrimitiveDouble), + FakeMappedItem.builder().aPrimitiveDouble(1.23).build(), + AttributeValue.builder().n("1.23").build()); + } + + @Test + public void mapperCanHandleFloat() { + verifyNullableAttribute(EnhancedType.of(Float.class), + a -> a.name("value") + .getter(FakeMappedItem::getAFloat) + .setter(FakeMappedItem::setAFloat), + FakeMappedItem.builder().aFloat(1.23f).build(), + AttributeValue.builder().n("1.23").build()); + } + + @Test + public void mapperCanHandlePrimitiveFloat() { + verifyAttribute(EnhancedType.of(float.class), + a -> a.name("value") + .getter(FakeMappedItem::getAPrimitiveFloat) + .setter(FakeMappedItem::setAPrimitiveFloat), + FakeMappedItem.builder().aPrimitiveFloat(1.23f).build(), + AttributeValue.builder().n("1.23").build()); + } + + + @Test + public void mapperCanHandleBinary() { + SdkBytes sdkBytes = SdkBytes.fromString("test", UTF_8); + verifyNullableAttribute(EnhancedType.of(SdkBytes.class), + a -> a.name("value") + .getter(FakeMappedItem::getABinaryValue) + .setter(FakeMappedItem::setABinaryValue), + FakeMappedItem.builder().aBinaryValue(sdkBytes).build(), + AttributeValue.builder().b(sdkBytes).build()); + } + + @Test + public void mapperCanHandleSimpleList() { + verifyNullableAttribute(EnhancedType.listOf(Integer.class), + a -> a.name("value") + .getter(FakeMappedItem::getAnIntegerList) + .setter(FakeMappedItem::setAnIntegerList), + FakeMappedItem.builder().anIntegerList(asList(1, 2, 3)).build(), + AttributeValue.builder().l(asList(AttributeValue.builder().n("1").build(), + AttributeValue.builder().n("2").build(), + AttributeValue.builder().n("3").build())).build()); + } + + @Test + public void mapperCanHandleNestedLists() { + FakeMappedItem fakeMappedItem = + FakeMappedItem.builder() + .aNestedStructure(singletonList(singletonList(FakeDocument.of("nested", null)))) + .build(); + + Map documentMap = new HashMap<>(); + documentMap.put("documentString", AttributeValue.builder().s("nested").build()); + documentMap.put("documentInteger", AttributeValue.builder().nul(true).build()); + + AttributeValue attributeValue = + AttributeValue.builder() + .l(singletonList(AttributeValue.builder() + .l(AttributeValue.builder().m(documentMap).build()) + .build())) + .build(); + + verifyNullableAttribute( + EnhancedType.listOf(EnhancedType.listOf(EnhancedType.documentOf(FakeDocument.class, FAKE_DOCUMENT_TABLE_SCHEMA))), + a -> a.name("value") + .getter(FakeMappedItem::getANestedStructure) + .setter(FakeMappedItem::setANestedStructure), + fakeMappedItem, + attributeValue); + } + + @Test + public void mapperCanHandleIntegerSet() { + Set valueSet = new HashSet<>(asList(1, 2, 3)); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Integer.class), + a -> a.name("value") + .getter(FakeMappedItem::getAnIntegerSet) + .setter(FakeMappedItem::setAnIntegerSet), + FakeMappedItem.builder().anIntegerSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleStringSet() { + Set valueSet = new HashSet<>(asList("one", "two", "three")); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(String.class), + a -> a.name("value") + .getter(FakeMappedItem::getAStringSet) + .setter(FakeMappedItem::setAStringSet), + FakeMappedItem.builder().aStringSet(valueSet).build(), + AttributeValue.builder().ss(expectedList).build()); + } + + @Test + public void mapperCanHandleLongSet() { + Set valueSet = new HashSet<>(asList(1L, 2L, 3L)); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Long.class), + a -> a.name("value") + .getter(FakeMappedItem::getALongSet) + .setter(FakeMappedItem::setALongSet), + FakeMappedItem.builder().aLongSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleShortSet() { + Set valueSet = new HashSet<>(asList((short) 1, (short) 2, (short) 3)); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Short.class), + a -> a.name("value") + .getter(FakeMappedItem::getAShortSet) + .setter(FakeMappedItem::setAShortSet), + FakeMappedItem.builder().aShortSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleByteSet() { + Set valueSet = new HashSet<>(asList((byte) 1, (byte) 2, (byte) 3)); + List expectedList = valueSet.stream().map(Objects::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Byte.class), + a -> a.name("value") + .getter(FakeMappedItem::getAByteSet) + .setter(FakeMappedItem::setAByteSet), + FakeMappedItem.builder().aByteSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleDoubleSet() { + Set valueSet = new HashSet<>(asList(1.2, 3.4, 5.6)); + List expectedList = valueSet.stream().map(Object::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Double.class), + a -> a.name("value") + .getter(FakeMappedItem::getADoubleSet) + .setter(FakeMappedItem::setADoubleSet), + FakeMappedItem.builder().aDoubleSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleFloatSet() { + Set valueSet = new HashSet<>(asList(1.2f, 3.4f, 5.6f)); + List expectedList = valueSet.stream().map(Object::toString).collect(toList()); + + verifyNullableAttribute(EnhancedType.setOf(Float.class), + a -> a.name("value") + .getter(FakeMappedItem::getAFloatSet) + .setter(FakeMappedItem::setAFloatSet), + FakeMappedItem.builder().aFloatSet(valueSet).build(), + AttributeValue.builder().ns(expectedList).build()); + } + + @Test + public void mapperCanHandleGenericMap() { + Map stringMap = new ConcurrentHashMap<>(); + stringMap.put("one", "two"); + stringMap.put("three", "four"); + + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("one", AttributeValue.builder().s("two").build()); + attributeValueMap.put("three", AttributeValue.builder().s("four").build()); + + verifyNullableAttribute(EnhancedType.mapOf(String.class, String.class), + a -> a.name("value") + .getter(FakeMappedItem::getAStringMap) + .setter(FakeMappedItem::setAStringMap), + FakeMappedItem.builder().aStringMap(stringMap).build(), + AttributeValue.builder().m(attributeValueMap).build()); + } + + @Test + public void mapperCanHandleIntDoubleMap() { + Map intDoubleMap = new ConcurrentHashMap<>(); + intDoubleMap.put(1, 1.0); + intDoubleMap.put(2, 3.0); + + Map attributeValueMap = new HashMap<>(); + attributeValueMap.put("1", AttributeValue.builder().n("1.0").build()); + attributeValueMap.put("2", AttributeValue.builder().n("3.0").build()); + + verifyNullableAttribute(EnhancedType.mapOf(Integer.class, Double.class), + a -> a.name("value") + .getter(FakeMappedItem::getAIntDoubleMap) + .setter(FakeMappedItem::setAIntDoubleMap), + FakeMappedItem.builder().aIntDoubleMap(intDoubleMap).build(), + AttributeValue.builder().m(attributeValueMap).build()); + } + + + @Test + public void getAttributeValue_correctlyMapsSuperclassAttributes() { + FakeItem fakeItem = FakeItem.builder().id("id-value").build(); + fakeItem.setSubclassAttribute("subclass-value"); + + AttributeValue attributeValue = FakeItem.getTableSchema().attributeValue(fakeItem, "subclass_attribute"); + + assertThat(attributeValue, is(AttributeValue.builder().s("subclass-value").build())); + } + + @Test + public void getAttributeValue_correctlyMapsComposedClassAttributes() { + FakeItem fakeItem = FakeItem.builder().id("id-value") + .composedObject(FakeItemComposedClass.builder().composedAttribute("composed-value").build()) + .build(); + + AttributeValue attributeValue = FakeItem.getTableSchema().attributeValue(fakeItem, "composed_attribute"); + + assertThat(attributeValue, is(AttributeValue.builder().s("composed-value").build())); + } + + @Test + public void mapToItem_correctlyConstructsComposedClass() { + Map itemMap = new HashMap<>(); + itemMap.put("id", AttributeValue.builder().s("id-value").build()); + itemMap.put("composed_attribute", AttributeValue.builder().s("composed-value").build()); + + FakeItem fakeItem = FakeItem.getTableSchema().mapToItem(itemMap); + + assertThat(fakeItem, + is(FakeItem.builder() + .id("id-value") + .composedObject(FakeItemComposedClass.builder() + .composedAttribute("composed-value") + .build()) + .build())); + } + + @Test + public void buildAbstractTableSchema() { + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .build(); + + assertThat(tableSchema.itemToMap(FAKE_ITEM, false), is(singletonMap("aString", stringValue("test-string")))); + + exception.expect(UnsupportedOperationException.class); + exception.expectMessage("abstract"); + tableSchema.mapToItem(singletonMap("aString", stringValue("test-string"))); + } + + @Test + public void buildAbstractWithFlatten() { + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .flatten(FAKE_DOCUMENT_TABLE_SCHEMA, + FakeMappedItem::getAFakeDocument, + FakeMappedItem::setAFakeDocument) + .build(); + + FakeDocument document = FakeDocument.of("test-string", null); + FakeMappedItem item = FakeMappedItem.builder().aFakeDocument(document).build(); + + assertThat(tableSchema.itemToMap(item, true), + is(singletonMap("documentString", AttributeValue.builder().s("test-string").build()))); + } + + @Test + public void buildAbstractExtends() { + StaticTableSchema superclassTableSchema = + StaticTableSchema.builder(FakeAbstractSuperclass.class) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeAbstractSuperclass::getAString) + .setter(FakeAbstractSuperclass::setAString)) + .build(); + + StaticTableSchema subclassTableSchema = + StaticTableSchema.builder(FakeAbstractSubclass.class) + .extend(superclassTableSchema) + .build(); + + FakeAbstractSubclass item = new FakeAbstractSubclass(); + item.setAString("test-string"); + + assertThat(subclassTableSchema.itemToMap(item, true), + is(singletonMap("aString", AttributeValue.builder().s("test-string").build()))); + } + + @Test + public void buildAbstractTagWith() { + + StaticTableSchema abstractTableSchema = + StaticTableSchema + .builder(FakeDocument.class) + .tags(new TestStaticTableTag()) + .build(); + + assertThat(abstractTableSchema.tableMetadata().customMetadataObject(TABLE_TAG_KEY, String.class), + is(Optional.of(TABLE_TAG_VALUE))); + } + + @Test + public void buildConcreteTagWith() { + + StaticTableSchema concreteTableSchema = + StaticTableSchema + .builder(FakeDocument.class) + .newItemSupplier(FakeDocument::new) + .tags(new TestStaticTableTag()) + .build(); + + assertThat(concreteTableSchema.tableMetadata().customMetadataObject(TABLE_TAG_KEY, String.class), + is(Optional.of(TABLE_TAG_VALUE))); + } + + @Test + public void instantiateFlattenedAbstractClassShouldThrowException() { + StaticTableSchema superclassTableSchema = + StaticTableSchema.builder(FakeAbstractSuperclass.class) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeAbstractSuperclass::getAString) + .setter(FakeAbstractSuperclass::setAString)) + .build(); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("abstract"); + StaticTableSchema.builder(FakeBrokenClass.class) + .newItemSupplier(FakeBrokenClass::new) + .flatten(superclassTableSchema, + FakeBrokenClass::getAbstractObject, + FakeBrokenClass::setAbstractObject); + } + + @Test + public void addSingleAttributeConverterProvider() { + when(provider1.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter1); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(provider1) + .build(); + + assertThat(tableSchema.attributeConverterProvider(), is(provider1)); + } + + @Test + public void usesCustomAttributeConverterProvider() { + String originalString = "test-string"; + String expectedString = "test-string-custom"; + + when(provider1.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter1); + when(attributeConverter1.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(provider1) + .build(); + + Map resultMap = + tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), false); + assertThat(resultMap.get("aString").s(), is(expectedString)); + } + + @Test + public void usesCustomAttributeConverterProviders() { + String originalString = "test-string"; + String expectedString = "test-string-custom"; + + when(provider2.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter2); + when(attributeConverter2.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(provider1, provider2) + .build(); + + Map resultMap = + tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), false); + assertThat(resultMap.get("aString").s(), is(expectedString)); + } + + @Test + public void noConverterProvider_throwsException_whenMissingAttributeConverters() { + exception.expect(NullPointerException.class); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(Collections.emptyList()) + .build(); + } + + @Test + public void noConverterProvider_handlesCorrectly_whenAttributeConvertersAreSupplied() { + String originalString = "test-string"; + String expectedString = "test-string-custom"; + + when(attributeConverter1.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString) + .attributeConverter(attributeConverter1)) + .attributeConverterProviders(Collections.emptyList()) + .build(); + + Map resultMap = tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), + false); + assertThat(resultMap.get("aString").s(), is(expectedString)); + } + + private void verifyAttribute(EnhancedType attributeType, + Consumer> staticAttribute, + FakeMappedItem fakeMappedItem, + AttributeValue attributeValue) { + + StaticTableSchema tableSchema = StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(attributeType, staticAttribute) + .build(); + Map expectedMap = singletonMap("value", attributeValue); + + Map resultMap = tableSchema.itemToMap(fakeMappedItem, false); + assertThat(resultMap, is(expectedMap)); + + FakeMappedItem resultItem = tableSchema.mapToItem(expectedMap); + assertThat(resultItem, is(fakeMappedItem)); + } + + private void verifyNullAttribute(EnhancedType attributeType, + Consumer> staticAttribute, + FakeMappedItem fakeMappedItem) { + + StaticTableSchema tableSchema = StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(attributeType, staticAttribute) + .build(); + Map expectedMap = singletonMap("value", nullAttributeValue()); + + Map resultMap = tableSchema.itemToMap(fakeMappedItem, false); + assertThat(resultMap, is(expectedMap)); + + FakeMappedItem resultItem = tableSchema.mapToItem(expectedMap); + assertThat(resultItem, is(nullValue())); + } + + private void verifyNullableAttribute(EnhancedType attributeType, + Consumer> staticAttribute, + FakeMappedItem fakeMappedItem, + AttributeValue attributeValue) { + + verifyAttribute(attributeType, staticAttribute, fakeMappedItem, attributeValue); + verifyNullAttribute(attributeType, staticAttribute, FakeMappedItem.builder().build()); + } +} + diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AbstractBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AbstractBean.java new file mode 100644 index 000000000000..e22a846f3ce8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AbstractBean.java @@ -0,0 +1,30 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; + +@DynamoDbBean +public class AbstractBean { + private String attribute2; + + public String getAttribute2() { + return attribute2; + } + public void setAttribute2(String attribute2) { + this.attribute2 = attribute2; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AbstractImmutable.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AbstractImmutable.java new file mode 100644 index 000000000000..f3e9b3bbcb3d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AbstractImmutable.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; + +@DynamoDbImmutable(builder = AbstractImmutable.Builder.class) +public class AbstractImmutable { + private final String attribute2; + + private AbstractImmutable(Builder b) { + this.attribute2 = b.attribute2; + } + + public String attribute2() { + return attribute2; + } + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + private String attribute2; + + public Builder attribute2(String attribute2) { + this.attribute2 = attribute2; + return this; + } + + public AbstractImmutable build() { + return new AbstractImmutable(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterBean.java new file mode 100644 index 000000000000..3f27afd6173f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterBean.java @@ -0,0 +1,130 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbConvertedBy; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@DynamoDbBean +public class AttributeConverterBean { + private String id; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + private AttributeItem attributeItem; + + @DynamoDbConvertedBy(CustomAttributeConverter.class) + public AttributeItem getAttributeItem() { + return attributeItem; + } + public void setAttributeItem(AttributeItem attributeItem) { + this.attributeItem = attributeItem; + } + + private Integer integerAttribute; + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AttributeConverterBean that = (AttributeConverterBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute) && + Objects.equals(attributeItem, that.attributeItem); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute, attributeItem); + } + + public static class CustomAttributeConverter implements AttributeConverter { + + public CustomAttributeConverter() { + } + + @Override + public AttributeValue transformFrom(AttributeItem input) { + return EnhancedAttributeValue.fromString(input.getInnerValue()).toAttributeValue(); + } + + @Override + public AttributeItem transformTo(AttributeValue input) { + return new AttributeItem(input.s()); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(AttributeItem.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + } + + public static class AttributeItem { + private String innerValue; + + public AttributeItem() { + } + + AttributeItem(String value) { + innerValue = value; + } + + public String getInnerValue() { + return innerValue; + } + + public void setInnerValue(String innerValue) { + this.innerValue = innerValue; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AttributeItem that = (AttributeItem) o; + return Objects.equals(innerValue, that.innerValue); + } + + @Override + public int hashCode() { + return Objects.hash(innerValue); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterNoConstructorBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterNoConstructorBean.java new file mode 100644 index 000000000000..5881fbfaf3b3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterNoConstructorBean.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbConvertedBy; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@DynamoDbBean +public class AttributeConverterNoConstructorBean extends AbstractBean { + + private String id; + + @DynamoDbConvertedBy(AttributeConverterNoConstructorBean.CustomAttributeConverter.class) + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public static class CustomAttributeConverter implements AttributeConverter { + + private CustomAttributeConverter() { + } + + @Override + public AttributeValue transformFrom(Object input) { + return null; + } + + @Override + public Object transformTo(AttributeValue input) { + return null; + } + + @Override + public EnhancedType type() { + return null; + } + + @Override + public AttributeValueType attributeValueType() { + return null; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/CommonTypesBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/CommonTypesBean.java new file mode 100644 index 000000000000..d9d1010b656b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/CommonTypesBean.java @@ -0,0 +1,128 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class CommonTypesBean { + private String id; + private Boolean booleanAttribute; + private Integer integerAttribute; + private Long longAttribute; + private Short shortAttribute; + private Byte byteAttribute; + private Double doubleAttribute; + private Float floatAttribute; + private SdkBytes binaryAttribute; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public void setId(String id) { + this.id = id; + } + + public Boolean getBooleanAttribute() { + return booleanAttribute; + } + + public void setBooleanAttribute(Boolean booleanAttribute) { + this.booleanAttribute = booleanAttribute; + } + + public Integer getIntegerAttribute() { + return integerAttribute; + } + + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + public Long getLongAttribute() { + return longAttribute; + } + + public void setLongAttribute(Long longAttribute) { + this.longAttribute = longAttribute; + } + + public Short getShortAttribute() { + return shortAttribute; + } + + public void setShortAttribute(Short shortAttribute) { + this.shortAttribute = shortAttribute; + } + + public Byte getByteAttribute() { + return byteAttribute; + } + + public void setByteAttribute(Byte byteAttribute) { + this.byteAttribute = byteAttribute; + } + + public Double getDoubleAttribute() { + return doubleAttribute; + } + + public void setDoubleAttribute(Double doubleAttribute) { + this.doubleAttribute = doubleAttribute; + } + + public Float getFloatAttribute() { + return floatAttribute; + } + + public void setFloatAttribute(Float floatAttribute) { + this.floatAttribute = floatAttribute; + } + + public SdkBytes getBinaryAttribute() { + return binaryAttribute; + } + + public void setBinaryAttribute(SdkBytes binaryAttribute) { + this.binaryAttribute = binaryAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CommonTypesBean that = (CommonTypesBean) o; + return Objects.equals(id, that.id) && + Objects.equals(booleanAttribute, that.booleanAttribute) && + Objects.equals(integerAttribute, that.integerAttribute) && + Objects.equals(longAttribute, that.longAttribute) && + Objects.equals(shortAttribute, that.shortAttribute) && + Objects.equals(byteAttribute, that.byteAttribute) && + Objects.equals(doubleAttribute, that.doubleAttribute) && + Objects.equals(floatAttribute, that.floatAttribute) && + Objects.equals(binaryAttribute, that.binaryAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, booleanAttribute, integerAttribute, longAttribute, shortAttribute, byteAttribute, doubleAttribute, floatAttribute, binaryAttribute); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/DocumentBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/DocumentBean.java new file mode 100644 index 000000000000..2bbf94279f03 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/DocumentBean.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class DocumentBean { + private String id; + private String attribute1; + private AbstractBean abstractBean; + private AbstractImmutable abstractImmutable; + private List abstractBeanList; + private List abstractImmutableList; + private Map abstractBeanMap; + private Map abstractImmutableMap; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public String getAttribute1() { + return attribute1; + } + public void setAttribute1(String attribute1) { + this.attribute1 = attribute1; + } + + public AbstractBean getAbstractBean() { + return abstractBean; + } + public void setAbstractBean(AbstractBean abstractBean) { + this.abstractBean = abstractBean; + } + + public List getAbstractBeanList() { + return abstractBeanList; + } + public void setAbstractBeanList(List abstractBeanList) { + this.abstractBeanList = abstractBeanList; + } + + public Map getAbstractBeanMap() { + return abstractBeanMap; + } + public void setAbstractBeanMap(Map abstractBeanMap) { + this.abstractBeanMap = abstractBeanMap; + } + + public AbstractImmutable getAbstractImmutable() { + return abstractImmutable; + } + public void setAbstractImmutable(AbstractImmutable abstractImmutable) { + this.abstractImmutable = abstractImmutable; + } + + public List getAbstractImmutableList() { + return abstractImmutableList; + } + public void setAbstractImmutableList(List abstractImmutableList) { + this.abstractImmutableList = abstractImmutableList; + } + + public Map getAbstractImmutableMap() { + return abstractImmutableMap; + } + public void setAbstractImmutableMap(Map abstractImmutableMap) { + this.abstractImmutableMap = abstractImmutableMap; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/DocumentImmutable.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/DocumentImmutable.java new file mode 100644 index 000000000000..6b15c94a8363 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/DocumentImmutable.java @@ -0,0 +1,136 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbImmutable(builder = DocumentImmutable.Builder.class) +public class DocumentImmutable { + private final String id; + private final String attribute1; + private final AbstractBean abstractBean; + private final AbstractImmutable abstractImmutable; + private final List abstractBeanList; + private final List abstractImmutableList; + private final Map abstractBeanMap; + private final Map abstractImmutableMap; + + private DocumentImmutable(Builder b) { + this.id = b.id; + this.attribute1 = b.attribute1; + this.abstractBean = b.abstractBean; + this.abstractImmutable = b.abstractImmutable; + this.abstractBeanList = b.abstractBeanList; + this.abstractImmutableList = b.abstractImmutableList; + this.abstractBeanMap = b.abstractBeanMap; + this.abstractImmutableMap = b.abstractImmutableMap; + } + + @DynamoDbPartitionKey + public String id() { + return this.id; + } + + public String attribute1() { + return attribute1; + } + + public AbstractBean abstractBean() { + return abstractBean; + } + + public List abstractBeanList() { + return abstractBeanList; + } + + public Map abstractBeanMap() { + return abstractBeanMap; + } + + public AbstractImmutable abstractImmutable() { + return abstractImmutable; + } + + public List abstractImmutableList() { + return abstractImmutableList; + } + + public Map abstractImmutableMap() { + return abstractImmutableMap; + } + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + private String id; + private String attribute1; + private AbstractBean abstractBean; + private AbstractImmutable abstractImmutable; + private List abstractBeanList; + private List abstractImmutableList; + private Map abstractBeanMap; + private Map abstractImmutableMap; + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder attribute1(String attribute1) { + this.attribute1 = attribute1; + return this; + } + + public Builder abstractBean(AbstractBean abstractBean) { + this.abstractBean = abstractBean; + return this; + } + + public Builder abstractImmutable(AbstractImmutable abstractImmutable) { + this.abstractImmutable = abstractImmutable; + return this; + } + + public Builder abstractBeanList(List abstractBeanList) { + this.abstractBeanList = abstractBeanList; + return this; + } + + public Builder abstractImmutableList(List abstractImmutableList) { + this.abstractImmutableList = abstractImmutableList; + return this; + } + + public Builder abstractBeanMap(Map abstractBeanMap) { + this.abstractBeanMap = abstractBeanMap; + return this; + } + + public Builder abstractImmutableMap(Map abstractImmutableMap) { + this.abstractImmutableMap = abstractImmutableMap; + return this; + } + + public DocumentImmutable build() { + return new DocumentImmutable(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersInvalidBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersInvalidBean.java new file mode 100644 index 000000000000..60c2d2d9fc9c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersInvalidBean.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbConvertedBy; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@DynamoDbBean(converterProviders = {}) +public class EmptyConverterProvidersInvalidBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + @DynamoDbConvertedBy(CustomStringAttributeConverter.class) + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EmptyConverterProvidersInvalidBean that = (EmptyConverterProvidersInvalidBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } + + public static class CustomStringAttributeConverter implements AttributeConverter { + final static String DEFAULT_SUFFIX = "-custom"; + + public CustomStringAttributeConverter() { + } + + @Override + public AttributeValue transformFrom(String input) { + return EnhancedAttributeValue.fromString(input + DEFAULT_SUFFIX).toAttributeValue(); + } + + @Override + public String transformTo(AttributeValue input) { + return input.s(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersValidBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersValidBean.java new file mode 100644 index 000000000000..ceee30289a91 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersValidBean.java @@ -0,0 +1,119 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.IntegerStringConverter; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbConvertedBy; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@DynamoDbBean(converterProviders = {}) +public class EmptyConverterProvidersValidBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + @DynamoDbConvertedBy(CustomStringAttributeConverter.class) + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + @DynamoDbConvertedBy(CustomIntegerAttributeConverter.class) + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EmptyConverterProvidersValidBean that = (EmptyConverterProvidersValidBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } + + public static class CustomStringAttributeConverter implements AttributeConverter { + final static String DEFAULT_SUFFIX = "-custom"; + + public CustomStringAttributeConverter() { + } + + @Override + public AttributeValue transformFrom(String input) { + return EnhancedAttributeValue.fromString(input + DEFAULT_SUFFIX).toAttributeValue(); + } + + @Override + public String transformTo(AttributeValue input) { + return input.s(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + } + + public static class CustomIntegerAttributeConverter implements AttributeConverter { + final static Integer DEFAULT_INCREMENT = 10; + + public CustomIntegerAttributeConverter() { + } + + @Override + public AttributeValue transformFrom(Integer input) { + return EnhancedAttributeValue.fromNumber(IntegerStringConverter.create().toString(input + DEFAULT_INCREMENT)) + .toAttributeValue(); + } + + @Override + public Integer transformTo(AttributeValue input) { + return Integer.valueOf(input.n()); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Integer.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EnumBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EnumBean.java new file mode 100644 index 000000000000..a9684141751c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EnumBean.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.List; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class EnumBean { + private String id; + private TestEnum testEnum; + private List testEnumList; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public void setId(String id) { + this.id = id; + } + + public TestEnum getTestEnum() { + return testEnum; + } + + public void setTestEnum(TestEnum testEnum) { + this.testEnum = testEnum; + } + + public List getTestEnumList() { + return testEnumList; + } + + public void setTestEnumList(List testEnumList) { + this.testEnumList = testEnumList; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + EnumBean enumBean = (EnumBean) o; + + if (id != null ? !id.equals(enumBean.id) : enumBean.id != null) return false; + if (testEnum != enumBean.testEnum) return false; + return testEnumList != null ? testEnumList.equals(enumBean.testEnumList) : enumBean.testEnumList == null; + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (testEnum != null ? testEnum.hashCode() : 0); + result = 31 * result + (testEnumList != null ? testEnumList.hashCode() : 0); + return result; + } + + public enum TestEnum { + ONE, + TWO, + THREE; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ExtendedBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ExtendedBean.java new file mode 100644 index 000000000000..246a35c3852f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ExtendedBean.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class ExtendedBean extends AbstractBean { + private String id; + private String attribute1; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public String getAttribute1() { + return attribute1; + } + public void setAttribute1(String attribute1) { + this.attribute1 = attribute1; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedBeanBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedBeanBean.java new file mode 100644 index 000000000000..a296aeda1851 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedBeanBean.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbFlatten; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class FlattenedBeanBean { + private String id; + private String attribute1; + private AbstractBean abstractBean; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public String getAttribute1() { + return attribute1; + } + public void setAttribute1(String attribute1) { + this.attribute1 = attribute1; + } + + @DynamoDbFlatten + public AbstractBean getAbstractBean() { + return abstractBean; + } + public void setAbstractBean(AbstractBean abstractBean) { + this.abstractBean = abstractBean; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedBeanImmutable.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedBeanImmutable.java new file mode 100644 index 000000000000..73e482932f64 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedBeanImmutable.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbFlatten; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbImmutable(builder = FlattenedBeanImmutable.Builder.class) +public class FlattenedBeanImmutable { + private final String id; + private final String attribute1; + private final AbstractBean abstractBean; + + private FlattenedBeanImmutable(Builder b) { + this.id = b.id; + this.attribute1 = b.attribute1; + this.abstractBean = b.abstractBean; + } + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public String getAttribute1() { + return attribute1; + } + + @DynamoDbFlatten + public AbstractBean getAbstractBean() { + return abstractBean; + } + + public static final class Builder { + private String id; + private String attribute1; + private AbstractBean abstractBean; + + public Builder setId(String id) { + this.id = id; + return this; + } + + public Builder setAttribute1(String attribute1) { + this.attribute1 = attribute1; + return this; + } + + public Builder setAbstractBean(AbstractBean abstractBean) { + this.abstractBean = abstractBean; + return this; + } + + public FlattenedBeanImmutable build() { + return new FlattenedBeanImmutable(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedImmutableBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedImmutableBean.java new file mode 100644 index 000000000000..8f4ce00c31ac --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedImmutableBean.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbFlatten; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class FlattenedImmutableBean { + private String id; + private String attribute1; + private AbstractImmutable abstractImmutable; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public String getAttribute1() { + return attribute1; + } + public void setAttribute1(String attribute1) { + this.attribute1 = attribute1; + } + + @DynamoDbFlatten + public AbstractImmutable getAbstractImmutable() { + return abstractImmutable; + } + public void setAbstractImmutable(AbstractImmutable abstractImmutable) { + this.abstractImmutable = abstractImmutable; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedImmutableImmutable.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedImmutableImmutable.java new file mode 100644 index 000000000000..90cd9a598110 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/FlattenedImmutableImmutable.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbFlatten; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbImmutable(builder = FlattenedImmutableImmutable.Builder.class) +public class FlattenedImmutableImmutable { + private final String id; + private final String attribute1; + private final AbstractImmutable abstractImmutable; + + private FlattenedImmutableImmutable(Builder b) { + this.id = b.id; + this.attribute1 = b.attribute1; + this.abstractImmutable = b.abstractImmutable; + } + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public String getAttribute1() { + return attribute1; + } + + @DynamoDbFlatten + public AbstractImmutable getAbstractImmutable() { + return abstractImmutable; + } + + public static final class Builder { + private String id; + private String attribute1; + private AbstractImmutable abstractImmutable; + + public Builder setId(String id) { + this.id = id; + return this; + } + + public Builder setAttribute1(String attribute1) { + this.attribute1 = attribute1; + return this; + } + + public Builder setAbstractImmutable(AbstractImmutable abstractImmutable) { + this.abstractImmutable = abstractImmutable; + return this; + } + + public FlattenedImmutableImmutable build() { + return new FlattenedImmutableImmutable(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/IgnoredAttributeBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/IgnoredAttributeBean.java new file mode 100644 index 000000000000..bae02ad276f8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/IgnoredAttributeBean.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbIgnore; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class IgnoredAttributeBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public void setId(String id) { + this.id = id; + } + + @DynamoDbIgnore + public Integer getIntegerAttribute() { + return integerAttribute; + } + + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/InvalidBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/InvalidBean.java new file mode 100644 index 000000000000..495b9dc45437 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/InvalidBean.java @@ -0,0 +1,30 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +public class InvalidBean { + private String id; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ListBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ListBean.java new file mode 100644 index 000000000000..044a95a970e4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ListBean.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.List; +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class ListBean { + private String id; + private List stringList; + private List> stringListList; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public void setId(String id) { + this.id = id; + } + + public List getStringList() { + return stringList; + } + + public void setStringList(List stringList) { + this.stringList = stringList; + } + + public List> getStringListList() { + return stringListList; + } + + public void setStringListList(List> stringListList) { + this.stringListList = stringListList; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListBean listBean = (ListBean) o; + return Objects.equals(id, listBean.id) && + Objects.equals(stringList, listBean.stringList) && + Objects.equals(stringListList, listBean.stringListList); + } + + @Override + public int hashCode() { + return Objects.hash(id, stringList, stringListList); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MapBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MapBean.java new file mode 100644 index 000000000000..6dfa67792d19 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MapBean.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class MapBean { + private String id; + private Map stringMap; + private Map> nestedStringMap; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public void setId(String id) { + this.id = id; + } + + public Map getStringMap() { + return stringMap; + } + + public void setStringMap(Map stringMap) { + this.stringMap = stringMap; + } + + public Map> getNestedStringMap() { + return nestedStringMap; + } + + public void setNestedStringMap(Map> nestedStringMap) { + this.nestedStringMap = nestedStringMap; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MapBean mapBean = (MapBean) o; + return Objects.equals(id, mapBean.id) && + Objects.equals(stringMap, mapBean.stringMap) && + Objects.equals(nestedStringMap, mapBean.nestedStringMap); + } + + @Override + public int hashCode() { + return Objects.hash(id, stringMap, nestedStringMap); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MultipleConverterProvidersBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MultipleConverterProvidersBean.java new file mode 100644 index 000000000000..b3a3578c299f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MultipleConverterProvidersBean.java @@ -0,0 +1,139 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.IntegerStringConverter; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.ImmutableMap; + +@DynamoDbBean(converterProviders = { + MultipleConverterProvidersBean.FirstAttributeConverterProvider.class, + MultipleConverterProvidersBean.SecondAttributeConverterProvider.class}) +public class MultipleConverterProvidersBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MultipleConverterProvidersBean that = (MultipleConverterProvidersBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } + + public static class FirstAttributeConverterProvider implements AttributeConverterProvider { + @SuppressWarnings("unchecked") + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return null; + } + } + + public static class SecondAttributeConverterProvider implements AttributeConverterProvider { + + private final Map, AttributeConverter> converterCache = ImmutableMap.of( + EnhancedType.of(String.class), new CustomStringAttributeConverter(), + EnhancedType.of(Integer.class), new CustomIntegerAttributeConverter() + ); + + @SuppressWarnings("unchecked") + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return (AttributeConverter) converterCache.get(enhancedType); + } + } + + private static class CustomStringAttributeConverter implements AttributeConverter { + + final static String DEFAULT_SUFFIX = "-custom"; + + @Override + public AttributeValue transformFrom(String input) { + return EnhancedAttributeValue.fromString(input + DEFAULT_SUFFIX).toAttributeValue(); + } + + @Override + public String transformTo(AttributeValue input) { + return input.s(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + } + + private static class CustomIntegerAttributeConverter implements AttributeConverter { + + final static Integer DEFAULT_INCREMENT = 10; + + @Override + public AttributeValue transformFrom(Integer input) { + return EnhancedAttributeValue.fromNumber(IntegerStringConverter.create().toString(input + DEFAULT_INCREMENT)) + .toAttributeValue(); + } + + @Override + public Integer transformTo(AttributeValue input) { + return Integer.valueOf(input.n()); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Integer.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/NoConstructorConverterProvidersBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/NoConstructorConverterProvidersBean.java new file mode 100644 index 000000000000..72212fe4a2d5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/NoConstructorConverterProvidersBean.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; + +@DynamoDbBean(converterProviders = NoConstructorConverterProvidersBean.CustomAttributeConverterProvider.class) +public class NoConstructorConverterProvidersBean extends AbstractBean { + + public static class CustomAttributeConverterProvider implements AttributeConverterProvider { + + private CustomAttributeConverterProvider() { + } + + @SuppressWarnings("unchecked") + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return null; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ParameterizedAbstractBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ParameterizedAbstractBean.java new file mode 100644 index 000000000000..7fc7b22fa0b3 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ParameterizedAbstractBean.java @@ -0,0 +1,30 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; + +@DynamoDbBean +public class ParameterizedAbstractBean { + private String attribute2; + + public String getAttribute2() { + return attribute2; + } + public void setAttribute2(String attribute2) { + this.attribute2 = attribute2; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ParameterizedDocumentBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ParameterizedDocumentBean.java new file mode 100644 index 000000000000..70ee1f915ae2 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ParameterizedDocumentBean.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class ParameterizedDocumentBean { + private String id; + private String attribute1; + private ParameterizedAbstractBean abstractBean; + private List> abstractBeanList; + private Map> abstractBeanMap; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public String getAttribute1() { + return attribute1; + } + public void setAttribute1(String attribute1) { + this.attribute1 = attribute1; + } + + public ParameterizedAbstractBean getAbstractBean() { + return abstractBean; + } + public void setAbstractBean(ParameterizedAbstractBean abstractBean) { + this.abstractBean = abstractBean; + } + + public List> getAbstractBeanList() { + return abstractBeanList; + } + + public void setAbstractBeanList(List> abstractBeanList) { + this.abstractBeanList = abstractBeanList; + } + + public Map> getAbstractBeanMap() { + return abstractBeanMap; + } + + public void setAbstractBeanMap(Map> abstractBeanMap) { + this.abstractBeanMap = abstractBeanMap; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/PrimitiveTypesBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/PrimitiveTypesBean.java new file mode 100644 index 000000000000..cdb2279fe448 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/PrimitiveTypesBean.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class PrimitiveTypesBean { + private String id; + private boolean booleanAttribute; + private int integerAttribute; + private long longAttribute; + private short shortAttribute; + private byte byteAttribute; + private double doubleAttribute; + private float floatAttribute; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public boolean isBooleanAttribute() { + return booleanAttribute; + } + + public void setBooleanAttribute(boolean booleanAttribute) { + this.booleanAttribute = booleanAttribute; + } + + public int getIntegerAttribute() { + return integerAttribute; + } + + public void setIntegerAttribute(int integerAttribute) { + this.integerAttribute = integerAttribute; + } + + public long getLongAttribute() { + return longAttribute; + } + + public void setLongAttribute(long longAttribute) { + this.longAttribute = longAttribute; + } + + public short getShortAttribute() { + return shortAttribute; + } + + public void setShortAttribute(short shortAttribute) { + this.shortAttribute = shortAttribute; + } + + public byte getByteAttribute() { + return byteAttribute; + } + + public void setByteAttribute(byte byteAttribute) { + this.byteAttribute = byteAttribute; + } + + public double getDoubleAttribute() { + return doubleAttribute; + } + + public void setDoubleAttribute(double doubleAttribute) { + this.doubleAttribute = doubleAttribute; + } + + public float getFloatAttribute() { + return floatAttribute; + } + + public void setFloatAttribute(float floatAttribute) { + this.floatAttribute = floatAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PrimitiveTypesBean that = (PrimitiveTypesBean) o; + return booleanAttribute == that.booleanAttribute && + integerAttribute == that.integerAttribute && + longAttribute == that.longAttribute && + shortAttribute == that.shortAttribute && + byteAttribute == that.byteAttribute && + Double.compare(that.doubleAttribute, doubleAttribute) == 0 && + Float.compare(that.floatAttribute, floatAttribute) == 0 && + Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(id, booleanAttribute, integerAttribute, longAttribute, shortAttribute, byteAttribute, doubleAttribute, floatAttribute); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/RemappedAttributeBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/RemappedAttributeBean.java new file mode 100644 index 000000000000..d1a019f79c3d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/RemappedAttributeBean.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbAttribute; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class RemappedAttributeBean { + private String id; + + @DynamoDbPartitionKey + @DynamoDbAttribute("remappedAttribute") + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SecondaryIndexBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SecondaryIndexBean.java new file mode 100644 index 000000000000..dc7cee1f0a9e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SecondaryIndexBean.java @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbSecondaryPartitionKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbSecondarySortKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbSortKey; + +@DynamoDbBean +public class SecondaryIndexBean { + private String id; + private Integer sort; + private String attribute; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public void setId(String id) { + this.id = id; + } + + @DynamoDbSortKey + @DynamoDbSecondaryPartitionKey(indexNames = "gsi") + public Integer getSort() { + return sort; + } + + public void setSort(Integer sort) { + this.sort = sort; + } + + @DynamoDbSecondarySortKey(indexNames = {"lsi", "gsi"}) + public String getAttribute() { + return attribute; + } + + public void setAttribute(String attribute) { + this.attribute = attribute; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SetBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SetBean.java new file mode 100644 index 000000000000..488cd45869fd --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SetBean.java @@ -0,0 +1,127 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import java.util.Set; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class SetBean { + private String id; + private Set stringSet; + private Set integerSet; + private Set longSet; + private Set shortSet; + private Set byteSet; + private Set doubleSet; + private Set floatSet; + private Set binarySet; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public Set getStringSet() { + return stringSet; + } + public void setStringSet(Set stringSet) { + this.stringSet = stringSet; + } + + public Set getIntegerSet() { + return integerSet; + } + + public void setIntegerSet(Set integerSet) { + this.integerSet = integerSet; + } + + public Set getLongSet() { + return longSet; + } + + public void setLongSet(Set longSet) { + this.longSet = longSet; + } + + public Set getShortSet() { + return shortSet; + } + + public void setShortSet(Set shortSet) { + this.shortSet = shortSet; + } + + public Set getByteSet() { + return byteSet; + } + + public void setByteSet(Set byteSet) { + this.byteSet = byteSet; + } + + public Set getDoubleSet() { + return doubleSet; + } + + public void setDoubleSet(Set doubleSet) { + this.doubleSet = doubleSet; + } + + public Set getFloatSet() { + return floatSet; + } + + public void setFloatSet(Set floatSet) { + this.floatSet = floatSet; + } + + public Set getBinarySet() { + return binarySet; + } + + public void setBinarySet(Set binarySet) { + this.binarySet = binarySet; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SetBean setBean = (SetBean) o; + return Objects.equals(id, setBean.id) && + Objects.equals(stringSet, setBean.stringSet) && + Objects.equals(integerSet, setBean.integerSet) && + Objects.equals(longSet, setBean.longSet) && + Objects.equals(shortSet, setBean.shortSet) && + Objects.equals(byteSet, setBean.byteSet) && + Objects.equals(doubleSet, setBean.doubleSet) && + Objects.equals(floatSet, setBean.floatSet) && + Objects.equals(binarySet, setBean.binarySet); + } + + @Override + public int hashCode() { + return Objects.hash(id, stringSet, integerSet, longSet, shortSet, byteSet, doubleSet, floatSet, binarySet); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SetterAnnotatedBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SetterAnnotatedBean.java new file mode 100644 index 000000000000..985082eb40c7 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SetterAnnotatedBean.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbIgnore; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class SetterAnnotatedBean { + private String id; + private Integer integerAttribute; + + public String getId() { + return this.id; + } + + @DynamoDbPartitionKey + public void setId(String id) { + this.id = id; + } + + public Integer getIntegerAttribute() { + return integerAttribute; + } + + @DynamoDbIgnore + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SimpleBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SimpleBean.java new file mode 100644 index 000000000000..18942c1740a8 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SimpleBean.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class SimpleBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SimpleBean that = (SimpleBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SimpleImmutable.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SimpleImmutable.java new file mode 100644 index 000000000000..e63b49361892 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SimpleImmutable.java @@ -0,0 +1,80 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbImmutable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbImmutable(builder = SimpleImmutable.Builder.class) +public class SimpleImmutable { + private final String id; + private final Integer integerAttribute; + + private SimpleImmutable(Builder b) { + this.id = b.id; + this.integerAttribute = b.integerAttribute; + } + + @DynamoDbPartitionKey + public String id() { + return this.id; + } + + public Integer integerAttribute() { + return integerAttribute; + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SimpleImmutable that = (SimpleImmutable) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } + + public static final class Builder { + private String id; + private Integer integerAttribute; + + private Builder() { + } + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder integerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + return this; + } + + public SimpleImmutable build() { + return new SimpleImmutable(this); + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SingleConverterProvidersBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SingleConverterProvidersBean.java new file mode 100644 index 000000000000..5e715b297e63 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SingleConverterProvidersBean.java @@ -0,0 +1,129 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.IntegerStringConverter; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.ImmutableMap; + +@DynamoDbBean(converterProviders = SingleConverterProvidersBean.CustomAttributeConverterProvider.class) +public class SingleConverterProvidersBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SingleConverterProvidersBean that = (SingleConverterProvidersBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } + + public static class CustomAttributeConverterProvider implements AttributeConverterProvider { + + private final Map, AttributeConverter> converterCache = ImmutableMap.of( + EnhancedType.of(String.class), new CustomStringAttributeConverter(), + EnhancedType.of(Integer.class), new CustomIntegerAttributeConverter() + ); + + @SuppressWarnings("unchecked") + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return (AttributeConverter) converterCache.get(enhancedType); + } + } + + private static class CustomStringAttributeConverter implements AttributeConverter { + + final static String DEFAULT_SUFFIX = "-custom"; + + @Override + public AttributeValue transformFrom(String input) { + return EnhancedAttributeValue.fromString(input + DEFAULT_SUFFIX).toAttributeValue(); + } + + @Override + public String transformTo(AttributeValue input) { + return input.s(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + } + + private static class CustomIntegerAttributeConverter implements AttributeConverter { + + final static Integer DEFAULT_INCREMENT = 10; + + @Override + public AttributeValue transformFrom(Integer input) { + return EnhancedAttributeValue.fromNumber(IntegerStringConverter.create().toString(input + DEFAULT_INCREMENT)) + .toAttributeValue(); + } + + @Override + public Integer transformTo(AttributeValue input) { + return Integer.valueOf(input.n()); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Integer.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SortKeyBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SortKeyBean.java new file mode 100644 index 000000000000..80042fd74cae --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SortKeyBean.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbSortKey; + +@DynamoDbBean +public class SortKeyBean { + private String id; + private Integer sort; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + + public void setId(String id) { + this.id = id; + } + + @DynamoDbSortKey + public Integer getSort() { + return sort; + } + + public void setSort(Integer sort) { + this.sort = sort; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/AsyncBatchGetItemTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/AsyncBatchGetItemTest.java new file mode 100644 index 000000000000..c3893f46426a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/AsyncBatchGetItemTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mocktests; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.LocalDynamoDbAsyncTestBase.drainPublisher; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mocktests.BatchGetTestUtils.stubResponseWithUnprocessedKeys; +import static software.amazon.awssdk.enhanced.dynamodb.mocktests.BatchGetTestUtils.stubSuccessfulResponse; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.List; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mocktests.BatchGetTestUtils.Record; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPage; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPagePublisher; +import software.amazon.awssdk.enhanced.dynamodb.model.ReadBatch; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; + +public class AsyncBatchGetItemTest { + + private DynamoDbEnhancedAsyncClient enhancedClient; + private DynamoDbAsyncTable table; + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + @Before + public void setup() { + + DynamoDbAsyncClient dynamoDbClient = + DynamoDbAsyncClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(() -> AwsBasicCredentials.create("foo", "bar")) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointDiscoveryEnabled(false) + .build(); + enhancedClient = DynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(dynamoDbClient) + .build(); + StaticTableSchema tableSchema = StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(Integer.class, + a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .build(); + table = enhancedClient.table("table", tableSchema); + } + + @Test + public void successfulResponseWithoutUnprocessedKeys_NoNextPage() { + stubSuccessfulResponse(); + SdkPublisher publisher = enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record.class) + .mappedTableResource(table) + .build())); + + List batchGetResultPages = drainPublisher(publisher, 1); + + assertThat(batchGetResultPages.size()).isEqualTo(1); + assertThat(batchGetResultPages.get(0).resultsForTable(table).size()).isEqualTo(3); + } + + @Test + public void successfulResponseWithoutUnprocessedKeys_viaFlattenedItems_NoNextPage() { + stubSuccessfulResponse(); + BatchGetResultPagePublisher publisher = enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record.class) + .mappedTableResource(table) + .build())); + + List records = drainPublisher(publisher.resultsForTable(table), 3); + assertThat(records.size()).isEqualTo(3); + } + + @Test + public void responseWithUnprocessedKeys_iteratePage_shouldFetchUnprocessedKeys() throws InterruptedException { + stubResponseWithUnprocessedKeys(); + SdkPublisher publisher = enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record.class) + .mappedTableResource(table) + .build())); + + List batchGetResultPages = drainPublisher(publisher, 2); + assertThat(batchGetResultPages.size()).isEqualTo(2); + assertThat(batchGetResultPages.get(0).resultsForTable(table).size()).isEqualTo(2); + assertThat(batchGetResultPages.get(1).resultsForTable(table).size()).isEqualTo(1); + assertThat(batchGetResultPages.size()).isEqualTo(2); + } + + @Test + public void responseWithUnprocessedKeys_iterateItems_shouldFetchUnprocessedKeys() throws InterruptedException { + stubResponseWithUnprocessedKeys(); + BatchGetResultPagePublisher publisher = enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record.class) + .mappedTableResource(table) + .build())); + + List records = drainPublisher(publisher.resultsForTable(table), 3); + assertThat(records.size()).isEqualTo(3); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetItemTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetItemTest.java new file mode 100644 index 000000000000..7c455236da94 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetItemTest.java @@ -0,0 +1,132 @@ +/* + * Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mocktests; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.mapper.StaticAttributeTags.primaryPartitionKey; +import static software.amazon.awssdk.enhanced.dynamodb.mocktests.BatchGetTestUtils.stubResponseWithUnprocessedKeys; +import static software.amazon.awssdk.enhanced.dynamodb.mocktests.BatchGetTestUtils.stubSuccessfulResponse; + +import java.net.URI; +import java.util.Iterator; +import java.util.List; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.util.stream.Collectors; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mocktests.BatchGetTestUtils.Record; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPage; +import software.amazon.awssdk.enhanced.dynamodb.model.BatchGetResultPageIterable; +import software.amazon.awssdk.enhanced.dynamodb.model.ReadBatch; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +public class BatchGetItemTest { + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable table; + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + @Before + public void setup() { + + DynamoDbClient dynamoDbClient = + DynamoDbClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(() -> AwsBasicCredentials.create("foo", "bar")) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointDiscoveryEnabled(false) + .build(); + enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(dynamoDbClient) + .build(); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(Record.class) + .newItemSupplier(Record::new) + .addAttribute(Integer.class, a -> a.name("id") + .getter(Record::getId) + .setter(Record::setId) + .tags(primaryPartitionKey())) + .build(); + table = enhancedClient.table("table", tableSchema); + } + + @Test + public void successfulResponseWithoutUnprocessedKeys_NoNextPage() { + stubSuccessfulResponse(); + SdkIterable batchGetResultPages = enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record.class) + .mappedTableResource(table) + .addGetItem(i -> i.key(k -> k.partitionValue(0))) + .build())); + + List pages = batchGetResultPages.stream().collect(Collectors.toList()); + assertThat(pages.size()).isEqualTo(1); + } + + @Test + public void successfulResponseWithoutUnprocessedKeys_NoNextPage_viaFlattenedItems() { + stubSuccessfulResponse(); + BatchGetResultPageIterable batchGetResultPages = enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record.class) + .mappedTableResource(table) + .addGetItem(i -> i.key(k -> k.partitionValue(0))) + .build())); + + assertThat(batchGetResultPages.resultsForTable(table)).hasSize(3); + } + + @Test + public void responseWithUnprocessedKeys_iteratePage_shouldFetchUnprocessedKeys() { + stubResponseWithUnprocessedKeys(); + SdkIterable batchGetResultPages = enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record.class) + .mappedTableResource(table) + .addGetItem(i -> i.key(k -> k.partitionValue("1"))) + .build())); + + Iterator iterator = batchGetResultPages.iterator(); + BatchGetResultPage firstPage = iterator.next(); + List resultsForTable = firstPage.resultsForTable(table); + assertThat(resultsForTable.size()).isEqualTo(2); + + BatchGetResultPage secondPage = iterator.next(); + assertThat(secondPage.resultsForTable(table).size()).isEqualTo(1); + assertThat(iterator).isEmpty(); + } + + @Test + public void responseWithUnprocessedKeys_iterateItems_shouldFetchUnprocessedKeys() { + stubResponseWithUnprocessedKeys(); + BatchGetResultPageIterable batchGetResultPages = enhancedClient.batchGetItem(r -> r.readBatches( + ReadBatch.builder(Record.class) + .mappedTableResource(table) + .addGetItem(i -> i.key(k -> k.partitionValue("1"))) + .build())); + + SdkIterable results = batchGetResultPages.resultsForTable(table); + assertThat(results.stream().count()).isEqualTo(3); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetTestUtils.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetTestUtils.java new file mode 100644 index 000000000000..40eeede1ceeb --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetTestUtils.java @@ -0,0 +1,78 @@ +/* + * Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mocktests; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; + +import com.github.tomakehurst.wiremock.stubbing.Scenario; + +public class BatchGetTestUtils { + + private BatchGetTestUtils() { + } + + static final String RESPONSE_WITHOUT_UNPROCESSED_KEYS = "{\"Responses\":{\"table\":[{\"id\":{\"N\":\"1\"}," + + "\"value\":{\"N\":\"2\"}},{\"id\":{\"N\":\"2\"}," + + "\"value\":{\"N\":\"0\"}},{\"id\":{\"N\":\"0\"}," + + "\"value\":{\"N\":\"0\"}}]},\"UnprocessedKeys\":{}}"; + + static final String RESPONSE_WITH_UNPROCESSED_KEYS = "{\"Responses\":{\"table\":[{\"id\":{\"N\":\"1\"}," + + "\"value\":{\"N\":\"2\"}},{\"id\":{\"N\":\"0\"}," + + "\"value\":{\"N\":\"0\"}}]},\"UnprocessedKeys\":{\"table" + + "\": {\"Keys\": [{\"id\": {\"N\": \"2\"}}]}}}"; + + static final String RESPONSE_WITH_UNPROCESSED_KEYS_PROCESSED = "{\"Responses\":{\"table\":[{\"id\":{\"N\":\"2\"}," + + "\"value\":{\"N\":\"0\"}}]},\"UnprocessedKeys\":{}}"; + + static void stubSuccessfulResponse() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(200).withBody(RESPONSE_WITHOUT_UNPROCESSED_KEYS))); + } + + static void stubResponseWithUnprocessedKeys() { + stubFor(post(anyUrl()) + .inScenario("unprocessed keys") + .whenScenarioStateIs(Scenario.STARTED) + .willSetStateTo("first attempt") + .willReturn(aResponse().withStatus(200) + .withBody(RESPONSE_WITH_UNPROCESSED_KEYS))); + + stubFor(post(anyUrl()) + .inScenario("unprocessed keys") + .whenScenarioStateIs("first attempt") + .willSetStateTo("second attempt") + .willReturn(aResponse().withStatus(200) + .withBody(RESPONSE_WITH_UNPROCESSED_KEYS_PROCESSED))); + } + + + static final class Record { + private int id; + + Integer getId() { + return id; + } + + Record setId(Integer id) { + this.id = id; + return this; + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetItemEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetItemEnhancedRequestTest.java new file mode 100644 index 000000000000..4fdbc413d7c4 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetItemEnhancedRequestTest.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +import java.util.Collections; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@RunWith(MockitoJUnitRunner.class) +public class BatchGetItemEnhancedRequestTest { + + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + } + + @Test + public void builder_minimal() { + BatchGetItemEnhancedRequest builtObject = BatchGetItemEnhancedRequest.builder().build(); + + assertThat(builtObject.readBatches(), is(nullValue())); + } + + @Test + public void builder_maximal() { + ReadBatch readBatch = ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(r -> r.key(k -> k.partitionValue("key"))) + .build(); + + BatchGetItemEnhancedRequest builtObject = BatchGetItemEnhancedRequest.builder() + .readBatches(readBatch) + .build(); + + assertThat(builtObject.readBatches(), is(Collections.singletonList(readBatch))); + } + + @Test + public void builder_add_single() { + ReadBatch readBatch = ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(r -> r.key(k -> k.partitionValue("key"))) + .build(); + + BatchGetItemEnhancedRequest builtObject = BatchGetItemEnhancedRequest.builder() + .addReadBatch(readBatch) + .build(); + + assertThat(builtObject.readBatches(), is(Collections.singletonList(readBatch))); + } + + @Test + public void toBuilder() { + BatchGetItemEnhancedRequest builtObject = BatchGetItemEnhancedRequest.builder().build(); + + BatchGetItemEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteItemEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteItemEnhancedRequestTest.java new file mode 100644 index 000000000000..63f96287becf --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteItemEnhancedRequestTest.java @@ -0,0 +1,96 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +import java.util.Collections; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + +@RunWith(MockitoJUnitRunner.class) +public class BatchWriteItemEnhancedRequestTest { + + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + } + + + @Test + public void builder_minimal() { + BatchWriteItemEnhancedRequest builtObject = BatchWriteItemEnhancedRequest.builder().build(); + + assertThat(builtObject.writeBatches(), is(nullValue())); + } + + @Test + public void builder_maximal() { + WriteBatch writeBatch = WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addDeleteItem(r -> r.key(k -> k.partitionValue("key"))) + .build(); + + BatchWriteItemEnhancedRequest builtObject = BatchWriteItemEnhancedRequest.builder() + .writeBatches(writeBatch) + .build(); + + assertThat(builtObject.writeBatches(), is(Collections.singletonList(writeBatch))); + } + + @Test + public void builder_add_single() { + WriteBatch writeBatch = WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addDeleteItem(r -> r.key(k -> k.partitionValue("key"))) + .build(); + + BatchWriteItemEnhancedRequest builtObject = BatchWriteItemEnhancedRequest.builder() + .addWriteBatch(writeBatch) + .build(); + + assertThat(builtObject.writeBatches(), is(Collections.singletonList(writeBatch))); + } + + @Test + public void toBuilder() { + BatchWriteItemEnhancedRequest builtObject = BatchWriteItemEnhancedRequest.builder().build(); + + BatchWriteItemEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/CreateTableEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/CreateTableEnhancedRequestTest.java new file mode 100644 index 000000000000..6a2fc6b09f63 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/CreateTableEnhancedRequestTest.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +import java.util.Collections; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.dynamodb.model.Projection; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; +import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; + +@RunWith(MockitoJUnitRunner.class) +public class CreateTableEnhancedRequestTest { + + @Test + public void builder_minimal() { + CreateTableEnhancedRequest builtObject = CreateTableEnhancedRequest.builder().build(); + + assertThat(builtObject.globalSecondaryIndices(), is(nullValue())); + assertThat(builtObject.localSecondaryIndices(), is(nullValue())); + assertThat(builtObject.provisionedThroughput(), is(nullValue())); + } + + @Test + public void builder_maximal() { + EnhancedGlobalSecondaryIndex globalSecondaryIndex = + EnhancedGlobalSecondaryIndex.builder() + .indexName("gsi_1") + .projection(p -> p.projectionType(ProjectionType.ALL)) + .provisionedThroughput(getDefaultProvisionedThroughput()) + .build(); + + EnhancedLocalSecondaryIndex localSecondaryIndex = EnhancedLocalSecondaryIndex.create( + "lsi", Projection.builder().projectionType(ProjectionType.ALL).build()); + + CreateTableEnhancedRequest builtObject = CreateTableEnhancedRequest.builder() + .globalSecondaryIndices(globalSecondaryIndex) + .localSecondaryIndices(localSecondaryIndex) + .provisionedThroughput(getDefaultProvisionedThroughput()) + .build(); + + assertThat(builtObject.globalSecondaryIndices(), is(Collections.singletonList(globalSecondaryIndex))); + assertThat(builtObject.localSecondaryIndices(), is(Collections.singletonList(localSecondaryIndex))); + assertThat(builtObject.provisionedThroughput(), is(getDefaultProvisionedThroughput())); + } + + @Test + public void toBuilder() { + CreateTableEnhancedRequest builtObject = CreateTableEnhancedRequest.builder() + .provisionedThroughput(getDefaultProvisionedThroughput()) + .build(); + + CreateTableEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + + private ProvisionedThroughput getDefaultProvisionedThroughput() { + return ProvisionedThroughput.builder() + .writeCapacityUnits(1L) + .readCapacityUnits(2L) + .build(); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/DeleteItemEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/DeleteItemEnhancedRequestTest.java new file mode 100644 index 000000000000..9d8f4b2a6c72 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/DeleteItemEnhancedRequestTest.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteItemEnhancedRequestTest { + + @Test + public void builder_minimal() { + DeleteItemEnhancedRequest builtObject = DeleteItemEnhancedRequest.builder().build(); + + assertThat(builtObject.key(), is(nullValue())); + assertThat(builtObject.conditionExpression(), is(nullValue())); + } + + @Test + public void builder_maximal() { + Key key = Key.builder().partitionValue("key").build(); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + DeleteItemEnhancedRequest builtObject = DeleteItemEnhancedRequest.builder() + .key(key) + .conditionExpression(conditionExpression) + .build(); + + assertThat(builtObject.key(), is(key)); + assertThat(builtObject.conditionExpression(), is(conditionExpression)); + } + + @Test + public void toBuilder() { + Key key = Key.builder().partitionValue("key").build(); + + DeleteItemEnhancedRequest builtObject = DeleteItemEnhancedRequest.builder().key(key).build(); + + DeleteItemEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/GetItemEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/GetItemEnhancedRequestTest.java new file mode 100644 index 000000000000..d952e1219418 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/GetItemEnhancedRequestTest.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.Key; + +@RunWith(MockitoJUnitRunner.class) +public class GetItemEnhancedRequestTest { + + @Test + public void builder_minimal() { + GetItemEnhancedRequest builtObject = GetItemEnhancedRequest.builder().build(); + + assertThat(builtObject.key(), is(nullValue())); + assertThat(builtObject.consistentRead(), is(nullValue())); + } + + @Test + public void builder_maximal() { + Key key = Key.builder().partitionValue("key").build(); + + GetItemEnhancedRequest builtObject = GetItemEnhancedRequest.builder() + .key(key) + .consistentRead(true) + .build(); + + assertThat(builtObject.key(), is(key)); + assertThat(builtObject.consistentRead(), is(true)); + } + + @Test + public void toBuilder() { + Key key = Key.builder().partitionValue("key").build(); + + GetItemEnhancedRequest builtObject = GetItemEnhancedRequest.builder() + .key(key) + .build(); + + GetItemEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequestTest.java new file mode 100644 index 000000000000..40d203b165b5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequestTest.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; + +@RunWith(MockitoJUnitRunner.class) +public class PutItemEnhancedRequestTest { + + @Test + public void builder_minimal() { + PutItemEnhancedRequest builtObject = PutItemEnhancedRequest.builder(FakeItem.class).build(); + + assertThat(builtObject.item(), is(nullValue())); + assertThat(builtObject.conditionExpression(), is(nullValue())); + } + + @Test + public void builder_maximal() { + FakeItem fakeItem = createUniqueFakeItem(); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + PutItemEnhancedRequest builtObject = PutItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem) + .conditionExpression(conditionExpression) + .build(); + + assertThat(builtObject.item(), is(fakeItem)); + assertThat(builtObject.conditionExpression(), is(conditionExpression)); + } + + @Test + public void toBuilder() { + PutItemEnhancedRequest builtObject = PutItemEnhancedRequest.builder(FakeItem.class).build(); + + PutItemEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequestTest.java new file mode 100644 index 000000000000..827113e1f72b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequestTest.java @@ -0,0 +1,189 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.NestedAttributeName; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +import java.util.*; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import static software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional.keyEqualTo; + +@RunWith(MockitoJUnitRunner.class) +public class QueryEnhancedRequestTest { + + @Test + public void builder_minimal() { + QueryEnhancedRequest builtObject = QueryEnhancedRequest.builder().build(); + + assertThat(builtObject.exclusiveStartKey(), is(nullValue())); + assertThat(builtObject.consistentRead(), is(nullValue())); + assertThat(builtObject.filterExpression(), is(nullValue())); + assertThat(builtObject.limit(), is(nullValue())); + assertThat(builtObject.queryConditional(), is(nullValue())); + assertThat(builtObject.scanIndexForward(), is(nullValue())); + assertThat(builtObject.attributesToProject(), is(nullValue())); + } + + @Test + public void builder_maximal() { + Map exclusiveStartKey = new HashMap<>(); + exclusiveStartKey.put("id", stringValue("id-value")); + exclusiveStartKey.put("sort", numberValue(7)); + + Map expressionValues = singletonMap(":test-key", stringValue("test-value")); + Expression filterExpression = Expression.builder() + .expression("test-expression") + .expressionValues(expressionValues) + .build(); + + QueryConditional queryConditional = keyEqualTo(k -> k.partitionValue("id-value")); + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + List attributesToProject = new ArrayList<>(Arrays.asList(attributesToProjectArray)); + attributesToProject.add(additionalElement); + + QueryEnhancedRequest builtObject = QueryEnhancedRequest.builder() + .exclusiveStartKey(exclusiveStartKey) + .consistentRead(false) + .filterExpression(filterExpression) + .limit(3) + .queryConditional(queryConditional) + .scanIndexForward(true) + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .build(); + + assertThat(builtObject.exclusiveStartKey(), is(exclusiveStartKey)); + assertThat(builtObject.consistentRead(), is(false)); + assertThat(builtObject.filterExpression(), is(filterExpression)); + assertThat(builtObject.limit(), is(3)); + assertThat(builtObject.queryConditional(), is(queryConditional)); + assertThat(builtObject.scanIndexForward(), is(true)); + assertThat(builtObject.attributesToProject(), is(attributesToProject)); + } + + + @Test + public void test_withNestedAttributeAddedFirstAndThenAttributesToProject() { + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + QueryEnhancedRequest builtObject = QueryEnhancedRequest.builder() + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .build(); + List attributesToProject = Arrays.asList("one", "two", "three"); + assertThat(builtObject.attributesToProject(), is(attributesToProject)); + } + + + @Test + public void test_nestedAttributesToProjectWithNestedAttributeAddedLast() { + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + + QueryEnhancedRequest builtObjectOne = QueryEnhancedRequest.builder() + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .build(); + List attributesToProjectNestedLast = Arrays.asList("one", "two", "three", "foo.bar"); + assertThat(builtObjectOne.attributesToProject(), is(attributesToProjectNestedLast)); + + } + + @Test + public void test_nestedAttributesToProjectWithNestedAttributeAddedInBetween() { + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + + QueryEnhancedRequest builtObjectOne = QueryEnhancedRequest.builder() + .attributesToProject(attributesToProjectArray) + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .addAttributeToProject(additionalElement) + .build(); + List attributesToProjectNestedLast = Arrays.asList("one", "two", "foo.bar", "three"); + assertThat(builtObjectOne.attributesToProject(), is(attributesToProjectNestedLast)); + + } + + @Test + public void test_nestedAttributesToProjectOverwrite() { + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + String[] overwrite = { "overwrite"}; + + QueryEnhancedRequest builtObjectTwo = QueryEnhancedRequest.builder() + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .attributesToProject(overwrite) + .build(); + assertThat(builtObjectTwo.attributesToProject(), is(Arrays.asList(overwrite))); + } + + @Test + public void test_nestedAttributesNullNestedAttributeElement() { + List attributeNames = new ArrayList<>(); + attributeNames.add(NestedAttributeName.create("foo")); + attributeNames.add(null); + assertFails(() -> QueryEnhancedRequest.builder() + .addNestedAttributesToProject(attributeNames) + .build()); + + assertFails(() -> QueryEnhancedRequest.builder() + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar"), null) + .build()); + + NestedAttributeName nestedAttributeName = null; + QueryEnhancedRequest.builder() + .addNestedAttributeToProject(nestedAttributeName) + .build(); + assertFails(() -> QueryEnhancedRequest.builder() + .addNestedAttributesToProject(nestedAttributeName) + .build()); + } + + + + + @Test + public void toBuilder() { + QueryEnhancedRequest builtObject = QueryEnhancedRequest.builder().build(); + + QueryEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ReadBatchTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ReadBatchTest.java new file mode 100644 index 000000000000..b5c2c8265d19 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ReadBatchTest.java @@ -0,0 +1,98 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; + +import java.util.Collections; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class ReadBatchTest { + + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + } + + @Test + public void builder_minimal() { + ReadBatch builtObject = ReadBatch.builder(FakeItem.class).build(); + + assertThat(builtObject.tableName(), is(nullValue())); + assertThat(builtObject.keysAndAttributes(), is(nullValue())); + } + + @Test + public void builder_maximal_consumer_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + ReadBatch builtObject = ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(r -> r.key(k -> k.partitionValue(fakeItem.getId()))) + .build(); + + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, + FakeItem.getTableMetadata().primaryKeys()); + + assertThat(builtObject.tableName(), is(TABLE_NAME)); + assertThat(builtObject.keysAndAttributes().keys(), containsInAnyOrder(Collections.singletonList(fakeItemMap).toArray())); + } + + @Test + public void builder_maximal_builder_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + GetItemEnhancedRequest getItem = GetItemEnhancedRequest.builder() + .key(k -> k.partitionValue(fakeItem.getId())) + .build(); + + ReadBatch builtObject = ReadBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addGetItem(getItem) + .build(); + + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, + FakeItem.getTableMetadata().primaryKeys()); + + assertThat(builtObject.tableName(), is(TABLE_NAME)); + assertThat(builtObject.keysAndAttributes().keys(), containsInAnyOrder(Collections.singletonList(fakeItemMap).toArray())); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequestTest.java new file mode 100644 index 000000000000..04d77c7d6090 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequestTest.java @@ -0,0 +1,198 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.NestedAttributeName; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@RunWith(MockitoJUnitRunner.class) +public class ScanEnhancedRequestTest { + + @Test + public void builder_minimal() { + ScanEnhancedRequest builtObject = ScanEnhancedRequest.builder().build(); + + assertThat(builtObject.exclusiveStartKey(), is(nullValue())); + assertThat(builtObject.consistentRead(), is(nullValue())); + assertThat(builtObject.filterExpression(), is(nullValue())); + assertThat(builtObject.attributesToProject(), is(nullValue())); + assertThat(builtObject.limit(), is(nullValue())); + } + + @Test + public void builder_maximal() { + Map exclusiveStartKey = new HashMap<>(); + exclusiveStartKey.put("id", stringValue("id-value")); + exclusiveStartKey.put("sort", numberValue(7)); + + Map expressionValues = singletonMap(":test-key", stringValue("test-value")); + Expression filterExpression = Expression.builder() + .expression("test-expression") + .expressionValues(expressionValues) + .build(); + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + List attributesToProject = new ArrayList<>(Arrays.asList(attributesToProjectArray)); + attributesToProject.add(additionalElement); + + ScanEnhancedRequest builtObject = ScanEnhancedRequest.builder() + .exclusiveStartKey(exclusiveStartKey) + .consistentRead(false) + .filterExpression(filterExpression) + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .limit(3) + .build(); + + assertThat(builtObject.exclusiveStartKey(), is(exclusiveStartKey)); + assertThat(builtObject.consistentRead(), is(false)); + assertThat(builtObject.filterExpression(), is(filterExpression)); + assertThat(builtObject.attributesToProject(), is(attributesToProject)); + assertThat(builtObject.limit(), is(3)); + } + + @Test + public void test_withNestedAttributeAddedFirst() { + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + ScanEnhancedRequest builtObject = ScanEnhancedRequest.builder() + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .build(); + List attributesToProject = Arrays.asList("one", "two", "three"); + assertThat(builtObject.attributesToProject(), is(attributesToProject)); + } + + + @Test + public void test_nestedAttributesToProjectWithNestedAttributeAddedLast() { + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + + ScanEnhancedRequest builtObjectOne = ScanEnhancedRequest.builder() + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .build(); + List attributesToProjectNestedLast = Arrays.asList("one", "two", "three", "foo.bar"); + assertThat(builtObjectOne.attributesToProject(), is(attributesToProjectNestedLast)); + + } + + @Test + public void test_nestedAttributesToProjectWithNestedAttributeAddedInBetween() { + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + + ScanEnhancedRequest builtObjectOne = ScanEnhancedRequest.builder() + .attributesToProject(attributesToProjectArray) + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .addAttributeToProject(additionalElement) + .build(); + List attributesToProjectNestedLast = Arrays.asList("one", "two", "foo.bar", "three"); + assertThat(builtObjectOne.attributesToProject(), is(attributesToProjectNestedLast)); + + } + @Test + public void test_nestedAttributesToProjectOverwrite() { + + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + String[] overwrite = { "overwrite"}; + + ScanEnhancedRequest builtObjectTwo = ScanEnhancedRequest.builder() + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .attributesToProject(overwrite) + .build(); + assertThat(builtObjectTwo.attributesToProject(), is(Arrays.asList(overwrite))); + } + + @Test + public void test_nestedAttributesNullStringElement() { + + String[] attributesToProjectArray = {"one", "two", null}; + String additionalElement = "three"; + assertFails(() -> ScanEnhancedRequest.builder() + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) + .addAttributeToProject(null) + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar")) + .build()); + + assertFails(() -> ScanEnhancedRequest.builder() + .attributesToProject("foo", "bar", null) + .build()); + + } + + @Test + public void test_nestedAttributesNullNestedAttributeElement() { + List attributeNames = new ArrayList<>(); + attributeNames.add(NestedAttributeName.create("foo")); + attributeNames.add(null); + assertFails(() -> ScanEnhancedRequest.builder() + .addNestedAttributesToProject(attributeNames) + .build()); + assertFails(() -> ScanEnhancedRequest.builder() + .addNestedAttributesToProject(NestedAttributeName.create("foo", "bar"), null) + .build()); + NestedAttributeName nestedAttributeName = null; + ScanEnhancedRequest.builder() + .addNestedAttributeToProject(nestedAttributeName) + .build(); + assertFails(() -> ScanEnhancedRequest.builder() + .addNestedAttributesToProject(nestedAttributeName) + .build()); + } + + + + @Test + public void toBuilder() { + ScanEnhancedRequest builtObject = ScanEnhancedRequest.builder().exclusiveStartKey(null).build(); + + ScanEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequestTest.java new file mode 100644 index 000000000000..d3ad194e64ae --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequestTest.java @@ -0,0 +1,109 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.Get; +import software.amazon.awssdk.services.dynamodb.model.TransactGetItem; + +@RunWith(MockitoJUnitRunner.class) +public class TransactGetItemsEnhancedRequestTest { + + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + } + + + @Test + public void builder_minimal() { + TransactGetItemsEnhancedRequest builtObject = TransactGetItemsEnhancedRequest.builder().build(); + + assertThat(builtObject.transactGetItems(), is(nullValue())); + } + + @Test + public void builder_maximal_consumer_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + TransactGetItemsEnhancedRequest builtObject = + TransactGetItemsEnhancedRequest.builder() + .addGetItem(fakeItemMappedTable, fakeItem) + .addGetItem(fakeItemMappedTable, fakeItem) + .build(); + + assertThat(builtObject.transactGetItems(), is(getTransactGetItems(fakeItem))); + } + + @Test + public void builder_maximal_builder_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + GetItemEnhancedRequest getItem = GetItemEnhancedRequest.builder() + .key(k -> k.partitionValue(fakeItem.getId())) + .build(); + + TransactGetItemsEnhancedRequest builtObject = + TransactGetItemsEnhancedRequest.builder() + .addGetItem(fakeItemMappedTable, getItem) + .addGetItem(fakeItemMappedTable, getItem) + .build(); + + assertThat(builtObject.transactGetItems(), is(getTransactGetItems(fakeItem))); + } + + + private List getTransactGetItems(FakeItem fakeItem) { + final Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + + TransactGetItem getItem = TransactGetItem.builder() + .get(Get.builder() + .key(fakeItemMap) + .tableName(TABLE_NAME) + .build()) + .build(); + + return Arrays.asList(getItem, getItem); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequestTest.java new file mode 100644 index 000000000000..c17becfe54bd --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequestTest.java @@ -0,0 +1,206 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertEquals; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.ExtensionResolver; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactWriteItemsOperation; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.Delete; +import software.amazon.awssdk.services.dynamodb.model.Put; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItemsRequest; + +@RunWith(MockitoJUnitRunner.class) +public class TransactWriteItemsEnhancedRequestTest { + + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).extensions().build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + } + + + @Test + public void builder_minimal() { + TransactWriteItemsEnhancedRequest builtObject = TransactWriteItemsEnhancedRequest.builder().build(); + + assertThat(builtObject.transactWriteItems(), is(nullValue())); + } + + @Test + public void builder_maximal_consumer_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + TransactWriteItemsEnhancedRequest builtObject = + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(fakeItemMappedTable, fakeItem) + .addDeleteItem(fakeItemMappedTable, fakeItem) + .addUpdateItem(fakeItemMappedTable, fakeItem) + .addConditionCheck(fakeItemMappedTable, r -> r.key(k -> k.partitionValue(fakeItem.getId())) + .conditionExpression(conditionExpression)) + .build(); + + assertThat(builtObject.transactWriteItems().size(), is(4)); + assertThat(builtObject.transactWriteItems().get(0), is(getTransactWriteItems(fakeItem).get(0))); + assertThat(builtObject.transactWriteItems().get(1), is(getTransactWriteItems(fakeItem).get(1))); + + assertThat(builtObject.transactWriteItems().get(2).update(), is(notNullValue())); + assertThat(builtObject.transactWriteItems().get(2).update().key().get("id").s(), is(fakeItem.getId())); + + assertThat(builtObject.transactWriteItems().get(3).conditionCheck(), is(notNullValue())); + assertThat(builtObject.transactWriteItems().get(3).conditionCheck().key().get("id").s(), is(fakeItem.getId())); + } + + @Test + public void builder_maximal_shortcut_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + + TransactWriteItemsEnhancedRequest builtObject = + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(fakeItemMappedTable, fakeItem) + .addDeleteItem(fakeItemMappedTable, Key.builder().partitionValue(fakeItem.getId()).build()) + .addUpdateItem(fakeItemMappedTable, fakeItem) + .addConditionCheck(fakeItemMappedTable, r -> r.key(k -> k.partitionValue(fakeItem.getId())) + .conditionExpression(conditionExpression)) + .build(); + + assertThat(builtObject.transactWriteItems().size(), is(4)); + assertThat(builtObject.transactWriteItems().get(0), is(getTransactWriteItems(fakeItem).get(0))); + assertThat(builtObject.transactWriteItems().get(1), is(getTransactWriteItems(fakeItem).get(1))); + + assertThat(builtObject.transactWriteItems().get(2).update(), is(notNullValue())); + assertThat(builtObject.transactWriteItems().get(2).update().key().get("id").s(), is(fakeItem.getId())); + + assertThat(builtObject.transactWriteItems().get(3).conditionCheck(), is(notNullValue())); + assertThat(builtObject.transactWriteItems().get(3).conditionCheck().key().get("id").s(), is(fakeItem.getId())); + } + + @Test + public void builder_maximal_builder_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + PutItemEnhancedRequest putItem = PutItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build(); + DeleteItemEnhancedRequest deleteItem = DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(fakeItem.getId())) + .build(); + UpdateItemEnhancedRequest updateItem = UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem).build(); + Expression conditionExpression = Expression.builder() + .expression("#attribute = :attribute") + .expressionValues(singletonMap(":attribute", stringValue("0"))) + .expressionNames(singletonMap("#attribute", "attribute")) + .build(); + ConditionCheck conditionCheck = ConditionCheck.builder() + .key(k -> k.partitionValue(fakeItem.getId())) + .conditionExpression(conditionExpression) + .build(); + + TransactWriteItemsEnhancedRequest builtObject = + TransactWriteItemsEnhancedRequest.builder() + .addPutItem(fakeItemMappedTable, putItem) + .addDeleteItem(fakeItemMappedTable, deleteItem) + .addUpdateItem(fakeItemMappedTable, updateItem) + .addConditionCheck(fakeItemMappedTable, conditionCheck) + .build(); + + assertThat(builtObject.transactWriteItems().size(), is(4)); + assertThat(builtObject.transactWriteItems().get(0), is(getTransactWriteItems(fakeItem).get(0))); + assertThat(builtObject.transactWriteItems().get(1), is(getTransactWriteItems(fakeItem).get(1))); + + assertThat(builtObject.transactWriteItems().get(2).update(), is(notNullValue())); + assertThat(builtObject.transactWriteItems().get(2).update().key().get("id").s(), is(fakeItem.getId())); + + assertThat(builtObject.transactWriteItems().get(3).conditionCheck(), is(notNullValue())); + assertThat(builtObject.transactWriteItems().get(3).conditionCheck().key().get("id").s(), is(fakeItem.getId())); + } + + @Test + public void builder_passRequestToken_shouldWork() { + String token = UUID.randomUUID().toString(); + TransactWriteItemsEnhancedRequest enhancedRequest = TransactWriteItemsEnhancedRequest.builder() + .clientRequestToken(token) + .build(); + DynamoDbEnhancedClientExtension extension = ExtensionResolver.resolveExtensions(ExtensionResolver.defaultExtensions()); + TransactWriteItemsRequest request = TransactWriteItemsOperation.create(enhancedRequest).generateRequest(extension); + assertEquals(token, request.clientRequestToken()); + } + + private List getTransactWriteItems(FakeItem fakeItem) { + final Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); + + TransactWriteItem putWriteItem = TransactWriteItem.builder() + .put(Put.builder() + .item(fakeItemMap) + .tableName(TABLE_NAME) + .build()) + .build(); + TransactWriteItem deleteWriteItem = TransactWriteItem.builder() + .delete(Delete.builder() + .key(fakeItemMap) + .tableName(TABLE_NAME) + .build()) + .build(); + + return Arrays.asList(putWriteItem, deleteWriteItem); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequestTest.java new file mode 100644 index 000000000000..6931b8ed1364 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequestTest.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; +import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; + +@RunWith(MockitoJUnitRunner.class) +public class UpdateItemEnhancedRequestTest { + + @Test + public void builder_minimal() { + UpdateItemEnhancedRequest builtObject = UpdateItemEnhancedRequest.builder(FakeItem.class).build(); + + assertThat(builtObject.item(), is(nullValue())); + assertThat(builtObject.ignoreNulls(), is(nullValue())); + assertThat(builtObject.conditionExpression(), is(nullValue())); + } + + @Test + public void builder_maximal() { + FakeItem fakeItem = createUniqueFakeItem(); + + Expression conditionExpression = Expression.builder() + .expression("#key = :value OR #key1 = :value1") + .putExpressionName("#key", "attribute") + .putExpressionName("#key1", "attribute3") + .putExpressionValue(":value", stringValue("wrong")) + .putExpressionValue(":value1", stringValue("three")) + .build(); + + UpdateItemEnhancedRequest builtObject = UpdateItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem) + .ignoreNulls(true) + .conditionExpression(conditionExpression) + .build(); + + assertThat(builtObject.item(), is(fakeItem)); + assertThat(builtObject.ignoreNulls(), is(true)); + assertThat(builtObject.conditionExpression(), is(conditionExpression)); + } + + @Test + public void toBuilder() { + UpdateItemEnhancedRequest builtObject = UpdateItemEnhancedRequest.builder(FakeItem.class).build(); + + UpdateItemEnhancedRequest copiedObject = builtObject.toBuilder().build(); + + assertThat(copiedObject, is(builtObject)); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatchTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatchTest.java new file mode 100644 index 000000000000..793d83eacddf --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatchTest.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; + +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.DeleteRequest; +import software.amazon.awssdk.services.dynamodb.model.PutRequest; +import software.amazon.awssdk.services.dynamodb.model.WriteRequest; + +@RunWith(MockitoJUnitRunner.class) +public class WriteBatchTest { + + private static final String TABLE_NAME = "table-name"; + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbEnhancedClient enhancedClient; + private DynamoDbTable fakeItemMappedTable; + + @Before + public void setupMappedTables() { + enhancedClient = DynamoDbEnhancedClient.builder().dynamoDbClient(mockDynamoDbClient).extensions().build(); + fakeItemMappedTable = enhancedClient.table(TABLE_NAME, FakeItem.getTableSchema()); + } + + @Test + public void builder_minimal() { + WriteBatch builtObject = WriteBatch.builder(FakeItem.class).build(); + + assertThat(builtObject.tableName(), is(nullValue())); + assertThat(builtObject.writeRequests(), is(nullValue())); + } + + @Test + public void builder_maximal_consumer_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + WriteBatch builtObject = WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addPutItem(r -> r.item(fakeItem)) + .addDeleteItem(r -> r.key(k -> k.partitionValue(fakeItem.getId()))) + .build(); + + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, + FakeItem.getTableMetadata().primaryKeys()); + + assertThat(builtObject.tableName(), is(TABLE_NAME)); + assertThat(builtObject.writeRequests(), containsInAnyOrder(putRequest(fakeItemMap), deleteRequest(fakeItemMap))); + } + + @Test + public void builder_maximal_builder_style() { + FakeItem fakeItem = createUniqueFakeItem(); + + PutItemEnhancedRequest putItem = PutItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build(); + DeleteItemEnhancedRequest deleteItem = DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(fakeItem.getId())) + .build(); + + WriteBatch builtObject = WriteBatch.builder(FakeItem.class) + .mappedTableResource(fakeItemMappedTable) + .addPutItem(putItem) + .addDeleteItem(deleteItem) + .build(); + + Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, + FakeItem.getTableMetadata().primaryKeys()); + + assertThat(builtObject.tableName(), is(TABLE_NAME)); + assertThat(builtObject.writeRequests(), containsInAnyOrder(putRequest(fakeItemMap), deleteRequest(fakeItemMap))); + } + + private static WriteRequest putRequest(Map itemMap) { + return WriteRequest.builder().putRequest(PutRequest.builder().item(itemMap).build()).build(); + } + + private static WriteRequest deleteRequest(Map itemMap) { + return WriteRequest.builder().deleteRequest(DeleteRequest.builder().key(itemMap).build()).build(); + } + +} diff --git a/services-custom/pom.xml b/services-custom/pom.xml new file mode 100644 index 000000000000..47d34c615ae3 --- /dev/null +++ b/services-custom/pom.xml @@ -0,0 +1,46 @@ + + + + + 4.0.0 + + software.amazon.awssdk + aws-sdk-java-pom + 2.15.62-SNAPSHOT + + services-custom + AWS Java SDK :: Custom Services + pom + The AWS Java SDK custom services + https://aws.amazon.com/sdkforjava + + + dynamodb-enhanced + s3-transfermanager + + + + + + software.amazon.awssdk + bom-internal + ${awsjavasdk.version} + pom + import + + + + diff --git a/services-custom/s3-transfermanager/pom.xml b/services-custom/s3-transfermanager/pom.xml index b8e0c36ad1be..685ae086d571 100644 --- a/services-custom/s3-transfermanager/pom.xml +++ b/services-custom/s3-transfermanager/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + accessanalyzer + AWS Java SDK :: Services :: AccessAnalyzer + The AWS Java SDK for AccessAnalyzer module holds the client classes that are used for + communicating with AccessAnalyzer. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.accessanalyzer + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/accessanalyzer/src/main/resources/codegen-resources/paginators-1.json b/services/accessanalyzer/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..64553f7bdf1c --- /dev/null +++ b/services/accessanalyzer/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListAnalyzedResources": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "analyzedResources" + }, + "ListAnalyzers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "analyzers" + }, + "ListArchiveRules": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "archiveRules" + }, + "ListFindings": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findings" + } + } +} diff --git a/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json b/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..4313bb012bfc --- /dev/null +++ b/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1677 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-11-01", + "endpointPrefix":"access-analyzer", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Access Analyzer", + "serviceId":"AccessAnalyzer", + "signatureVersion":"v4", + "signingName":"access-analyzer", + "uid":"accessanalyzer-2019-11-01" + }, + "operations":{ + "ApplyArchiveRule":{ + "name":"ApplyArchiveRule", + "http":{ + "method":"PUT", + "requestUri":"/archive-rule", + "responseCode":200 + }, + "input":{"shape":"ApplyArchiveRuleRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retroactively applies the archive rule to existing findings that meet the archive rule criteria.

    ", + "idempotent":true + }, + "CreateAnalyzer":{ + "name":"CreateAnalyzer", + "http":{ + "method":"PUT", + "requestUri":"/analyzer", + "responseCode":200 + }, + "input":{"shape":"CreateAnalyzerRequest"}, + "output":{"shape":"CreateAnalyzerResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates an analyzer for your account.

    ", + "idempotent":true + }, + "CreateArchiveRule":{ + "name":"CreateArchiveRule", + "http":{ + "method":"PUT", + "requestUri":"/analyzer/{analyzerName}/archive-rule", + "responseCode":200 + }, + "input":{"shape":"CreateArchiveRuleRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates an archive rule for the specified analyzer. Archive rules automatically archive new findings that meet the criteria you define when you create the rule.

    ", + "idempotent":true + }, + "DeleteAnalyzer":{ + "name":"DeleteAnalyzer", + "http":{ + "method":"DELETE", + "requestUri":"/analyzer/{analyzerName}", + "responseCode":200 + }, + "input":{"shape":"DeleteAnalyzerRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes the specified analyzer. When you delete an analyzer, Access Analyzer is disabled for the account in the current or specific Region. All findings that were generated by the analyzer are deleted. You cannot undo this action.

    ", + "idempotent":true + }, + "DeleteArchiveRule":{ + "name":"DeleteArchiveRule", + "http":{ + "method":"DELETE", + "requestUri":"/analyzer/{analyzerName}/archive-rule/{ruleName}", + "responseCode":200 + }, + "input":{"shape":"DeleteArchiveRuleRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes the specified archive rule.

    ", + "idempotent":true + }, + "GetAnalyzedResource":{ + "name":"GetAnalyzedResource", + "http":{ + "method":"GET", + "requestUri":"/analyzed-resource", + "responseCode":200 + }, + "input":{"shape":"GetAnalyzedResourceRequest"}, + "output":{"shape":"GetAnalyzedResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves information about a resource that was analyzed.

    " + }, + "GetAnalyzer":{ + "name":"GetAnalyzer", + "http":{ + "method":"GET", + "requestUri":"/analyzer/{analyzerName}", + "responseCode":200 + }, + "input":{"shape":"GetAnalyzerRequest"}, + "output":{"shape":"GetAnalyzerResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves information about the specified analyzer.

    " + }, + "GetArchiveRule":{ + "name":"GetArchiveRule", + "http":{ + "method":"GET", + "requestUri":"/analyzer/{analyzerName}/archive-rule/{ruleName}", + "responseCode":200 + }, + "input":{"shape":"GetArchiveRuleRequest"}, + "output":{"shape":"GetArchiveRuleResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves information about an archive rule.

    To learn about filter keys that you can use to create an archive rule, see Access Analyzer filter keys in the IAM User Guide.

    " + }, + "GetFinding":{ + "name":"GetFinding", + "http":{ + "method":"GET", + "requestUri":"/finding/{id}", + "responseCode":200 + }, + "input":{"shape":"GetFindingRequest"}, + "output":{"shape":"GetFindingResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves information about the specified finding.

    " + }, + "ListAnalyzedResources":{ + "name":"ListAnalyzedResources", + "http":{ + "method":"POST", + "requestUri":"/analyzed-resource", + "responseCode":200 + }, + "input":{"shape":"ListAnalyzedResourcesRequest"}, + "output":{"shape":"ListAnalyzedResourcesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves a list of resources of the specified type that have been analyzed by the specified analyzer..

    " + }, + "ListAnalyzers":{ + "name":"ListAnalyzers", + "http":{ + "method":"GET", + "requestUri":"/analyzer", + "responseCode":200 + }, + "input":{"shape":"ListAnalyzersRequest"}, + "output":{"shape":"ListAnalyzersResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves a list of analyzers.

    " + }, + "ListArchiveRules":{ + "name":"ListArchiveRules", + "http":{ + "method":"GET", + "requestUri":"/analyzer/{analyzerName}/archive-rule", + "responseCode":200 + }, + "input":{"shape":"ListArchiveRulesRequest"}, + "output":{"shape":"ListArchiveRulesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves a list of archive rules created for the specified analyzer.

    " + }, + "ListFindings":{ + "name":"ListFindings", + "http":{ + "method":"POST", + "requestUri":"/finding", + "responseCode":200 + }, + "input":{"shape":"ListFindingsRequest"}, + "output":{"shape":"ListFindingsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves a list of findings generated by the specified analyzer.

    To learn about filter keys that you can use to create an archive rule, see Access Analyzer filter keys in the IAM User Guide.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves a list of tags applied to the specified resource.

    " + }, + "StartResourceScan":{ + "name":"StartResourceScan", + "http":{ + "method":"POST", + "requestUri":"/resource/scan", + "responseCode":200 + }, + "input":{"shape":"StartResourceScanRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Immediately starts a scan of the policies applied to the specified resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Adds a tag to the specified resource.

    ", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Removes a tag from the specified resource.

    ", + "idempotent":true + }, + "UpdateArchiveRule":{ + "name":"UpdateArchiveRule", + "http":{ + "method":"PUT", + "requestUri":"/analyzer/{analyzerName}/archive-rule/{ruleName}", + "responseCode":200 + }, + "input":{"shape":"UpdateArchiveRuleRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates the criteria and values for the specified archive rule.

    ", + "idempotent":true + }, + "UpdateFindings":{ + "name":"UpdateFindings", + "http":{ + "method":"PUT", + "requestUri":"/finding", + "responseCode":200 + }, + "input":{"shape":"UpdateFindingsRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates the status for the specified findings.

    ", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "ActionList":{ + "type":"list", + "member":{"shape":"String"} + }, + "AnalyzedResource":{ + "type":"structure", + "required":[ + "analyzedAt", + "createdAt", + "isPublic", + "resourceArn", + "resourceOwnerAccount", + "resourceType", + "updatedAt" + ], + "members":{ + "actions":{ + "shape":"ActionList", + "documentation":"

    The actions that an external principal is granted permission to use by the policy that generated the finding.

    " + }, + "analyzedAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the resource was analyzed.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the finding was created.

    " + }, + "error":{ + "shape":"String", + "documentation":"

    An error message.

    " + }, + "isPublic":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the policy that generated the finding grants public access to the resource.

    " + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource that was analyzed.

    " + }, + "resourceOwnerAccount":{ + "shape":"String", + "documentation":"

    The AWS account ID that owns the resource.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of the resource that was analyzed.

    " + }, + "sharedVia":{ + "shape":"SharedViaList", + "documentation":"

    Indicates how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

    " + }, + "status":{ + "shape":"FindingStatus", + "documentation":"

    The current status of the finding generated from the analyzed resource.

    " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the finding was updated.

    " + } + }, + "documentation":"

    Contains details about the analyzed resource.

    " + }, + "AnalyzedResourceSummary":{ + "type":"structure", + "required":[ + "resourceArn", + "resourceOwnerAccount", + "resourceType" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the analyzed resource.

    " + }, + "resourceOwnerAccount":{ + "shape":"String", + "documentation":"

    The AWS account ID that owns the resource.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource that was analyzed.

    " + } + }, + "documentation":"

    Contains the ARN of the analyzed resource.

    " + }, + "AnalyzedResourcesList":{ + "type":"list", + "member":{"shape":"AnalyzedResourceSummary"} + }, + "AnalyzerArn":{ + "type":"string", + "pattern":"^[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:analyzer/.{1,255}$" + }, + "AnalyzerStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "CREATING", + "DISABLED", + "FAILED" + ] + }, + "AnalyzerSummary":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "name", + "status", + "type" + ], + "members":{ + "arn":{ + "shape":"AnalyzerArn", + "documentation":"

    The ARN of the analyzer.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    A timestamp for the time at which the analyzer was created.

    " + }, + "lastResourceAnalyzed":{ + "shape":"String", + "documentation":"

    The resource that was most recently analyzed by the analyzer.

    " + }, + "lastResourceAnalyzedAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the most recently analyzed resource was analyzed.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the analyzer.

    " + }, + "status":{ + "shape":"AnalyzerStatus", + "documentation":"

    The status of the analyzer. An Active analyzer successfully monitors supported resources and generates new findings. The analyzer is Disabled when a user action, such as removing trusted access for IAM Access Analyzer from AWS Organizations, causes the analyzer to stop generating new findings. The status is Creating when the analyzer creation is in progress and Failed when the analyzer creation has failed.

    " + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

    The statusReason provides more details about the current status of the analyzer. For example, if the creation for the analyzer fails, a Failed status is displayed. For an analyzer with organization as the type, this failure can be due to an issue with creating the service-linked roles required in the member accounts of the AWS organization.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    The tags added to the analyzer.

    " + }, + "type":{ + "shape":"Type", + "documentation":"

    The type of analyzer, which corresponds to the zone of trust chosen for the analyzer.

    " + } + }, + "documentation":"

    Contains information about the analyzer.

    " + }, + "AnalyzersList":{ + "type":"list", + "member":{"shape":"AnalyzerSummary"} + }, + "ApplyArchiveRuleRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "ruleName" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

    The Amazon resource name (ARN) of the analyzer.

    " + }, + "clientToken":{ + "shape":"String", + "documentation":"

    A client token.

    ", + "idempotencyToken":true + }, + "ruleName":{ + "shape":"Name", + "documentation":"

    The name of the rule to apply.

    " + } + }, + "documentation":"

    Retroactively applies an archive rule.

    " + }, + "ArchiveRuleSummary":{ + "type":"structure", + "required":[ + "createdAt", + "filter", + "ruleName", + "updatedAt" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the archive rule was created.

    " + }, + "filter":{ + "shape":"FilterCriteriaMap", + "documentation":"

    A filter used to define the archive rule.

    " + }, + "ruleName":{ + "shape":"Name", + "documentation":"

    The name of the archive rule.

    " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the archive rule was last updated.

    " + } + }, + "documentation":"

    Contains information about an archive rule.

    " + }, + "ArchiveRulesList":{ + "type":"list", + "member":{"shape":"ArchiveRuleSummary"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ConditionKeyMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The resource type.

    " + } + }, + "documentation":"

    A conflict exception error.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateAnalyzerRequest":{ + "type":"structure", + "required":[ + "analyzerName", + "type" + ], + "members":{ + "analyzerName":{ + "shape":"Name", + "documentation":"

    The name of the analyzer to create.

    " + }, + "archiveRules":{ + "shape":"InlineArchiveRulesList", + "documentation":"

    Specifies the archive rules to add for the analyzer. Archive rules automatically archive findings that meet the criteria you define for the rule.

    " + }, + "clientToken":{ + "shape":"String", + "documentation":"

    A client token.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    The tags to apply to the analyzer.

    " + }, + "type":{ + "shape":"Type", + "documentation":"

    The type of analyzer to create. Only ACCOUNT analyzers are supported. You can create only one analyzer per account per Region.

    " + } + }, + "documentation":"

    Creates an analyzer.

    " + }, + "CreateAnalyzerResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"AnalyzerArn", + "documentation":"

    The ARN of the analyzer that was created by the request.

    " + } + }, + "documentation":"

    The response to the request to create an analyzer.

    " + }, + "CreateArchiveRuleRequest":{ + "type":"structure", + "required":[ + "analyzerName", + "filter", + "ruleName" + ], + "members":{ + "analyzerName":{ + "shape":"Name", + "documentation":"

    The name of the created analyzer.

    ", + "location":"uri", + "locationName":"analyzerName" + }, + "clientToken":{ + "shape":"String", + "documentation":"

    A client token.

    ", + "idempotencyToken":true + }, + "filter":{ + "shape":"FilterCriteriaMap", + "documentation":"

    The criteria for the rule.

    " + }, + "ruleName":{ + "shape":"Name", + "documentation":"

    The name of the rule to create.

    " + } + }, + "documentation":"

    Creates an archive rule.

    " + }, + "Criterion":{ + "type":"structure", + "members":{ + "contains":{ + "shape":"ValueList", + "documentation":"

    A \"contains\" operator to match for the filter used to create the rule.

    " + }, + "eq":{ + "shape":"ValueList", + "documentation":"

    An \"equals\" operator to match for the filter used to create the rule.

    " + }, + "exists":{ + "shape":"Boolean", + "documentation":"

    An \"exists\" operator to match for the filter used to create the rule.

    " + }, + "neq":{ + "shape":"ValueList", + "documentation":"

    A \"not equals\" operator to match for the filter used to create the rule.

    " + } + }, + "documentation":"

    The criteria to use in the filter that defines the archive rule.

    " + }, + "DeleteAnalyzerRequest":{ + "type":"structure", + "required":["analyzerName"], + "members":{ + "analyzerName":{ + "shape":"Name", + "documentation":"

    The name of the analyzer to delete.

    ", + "location":"uri", + "locationName":"analyzerName" + }, + "clientToken":{ + "shape":"String", + "documentation":"

    A client token.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + }, + "documentation":"

    Deletes an analyzer.

    " + }, + "DeleteArchiveRuleRequest":{ + "type":"structure", + "required":[ + "analyzerName", + "ruleName" + ], + "members":{ + "analyzerName":{ + "shape":"Name", + "documentation":"

    The name of the analyzer that associated with the archive rule to delete.

    ", + "location":"uri", + "locationName":"analyzerName" + }, + "clientToken":{ + "shape":"String", + "documentation":"

    A client token.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "ruleName":{ + "shape":"Name", + "documentation":"

    The name of the rule to delete.

    ", + "location":"uri", + "locationName":"ruleName" + } + }, + "documentation":"

    Deletes an archive rule.

    " + }, + "FilterCriteriaMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Criterion"} + }, + "Finding":{ + "type":"structure", + "required":[ + "analyzedAt", + "condition", + "createdAt", + "id", + "resourceOwnerAccount", + "resourceType", + "status", + "updatedAt" + ], + "members":{ + "action":{ + "shape":"ActionList", + "documentation":"

    The action in the analyzed policy statement that an external principal has permission to use.

    " + }, + "analyzedAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the resource was analyzed.

    " + }, + "condition":{ + "shape":"ConditionKeyMap", + "documentation":"

    The condition in the analyzed policy statement that resulted in a finding.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the finding was generated.

    " + }, + "error":{ + "shape":"String", + "documentation":"

    An error.

    " + }, + "id":{ + "shape":"FindingId", + "documentation":"

    The ID of the finding.

    " + }, + "isPublic":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the policy that generated the finding allows public access to the resource.

    " + }, + "principal":{ + "shape":"PrincipalMap", + "documentation":"

    The external principal that access to a resource within the zone of trust.

    " + }, + "resource":{ + "shape":"String", + "documentation":"

    The resource that an external principal has access to.

    " + }, + "resourceOwnerAccount":{ + "shape":"String", + "documentation":"

    The AWS account ID that owns the resource.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of the resource reported in the finding.

    " + }, + "sources":{ + "shape":"FindingSourceList", + "documentation":"

    The sources of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

    " + }, + "status":{ + "shape":"FindingStatus", + "documentation":"

    The current status of the finding.

    " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the finding was updated.

    " + } + }, + "documentation":"

    Contains information about a finding.

    " + }, + "FindingId":{"type":"string"}, + "FindingIdList":{ + "type":"list", + "member":{"shape":"FindingId"} + }, + "FindingSource":{ + "type":"structure", + "required":["type"], + "members":{ + "detail":{ + "shape":"FindingSourceDetail", + "documentation":"

    Includes details about how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

    " + }, + "type":{ + "shape":"FindingSourceType", + "documentation":"

    Indicates the type of access that generated the finding.

    " + } + }, + "documentation":"

    The source of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

    " + }, + "FindingSourceDetail":{ + "type":"structure", + "members":{ + "accessPointArn":{ + "shape":"String", + "documentation":"

    The ARN of the access point that generated the finding.

    " + } + }, + "documentation":"

    Includes details about how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

    " + }, + "FindingSourceList":{ + "type":"list", + "member":{"shape":"FindingSource"} + }, + "FindingSourceType":{ + "type":"string", + "enum":[ + "POLICY", + "BUCKET_ACL", + "S3_ACCESS_POINT" + ] + }, + "FindingStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "ARCHIVED", + "RESOLVED" + ] + }, + "FindingStatusUpdate":{ + "type":"string", + "enum":[ + "ACTIVE", + "ARCHIVED" + ] + }, + "FindingSummary":{ + "type":"structure", + "required":[ + "analyzedAt", + "condition", + "createdAt", + "id", + "resourceOwnerAccount", + "resourceType", + "status", + "updatedAt" + ], + "members":{ + "action":{ + "shape":"ActionList", + "documentation":"

    The action in the analyzed policy statement that an external principal has permission to use.

    " + }, + "analyzedAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the resource-based policy that generated the finding was analyzed.

    " + }, + "condition":{ + "shape":"ConditionKeyMap", + "documentation":"

    The condition in the analyzed policy statement that resulted in a finding.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the finding was created.

    " + }, + "error":{ + "shape":"String", + "documentation":"

    The error that resulted in an Error finding.

    " + }, + "id":{ + "shape":"FindingId", + "documentation":"

    The ID of the finding.

    " + }, + "isPublic":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the finding reports a resource that has a policy that allows public access.

    " + }, + "principal":{ + "shape":"PrincipalMap", + "documentation":"

    The external principal that has access to a resource within the zone of trust.

    " + }, + "resource":{ + "shape":"String", + "documentation":"

    The resource that the external principal has access to.

    " + }, + "resourceOwnerAccount":{ + "shape":"String", + "documentation":"

    The AWS account ID that owns the resource.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of the resource that the external principal has access to.

    " + }, + "sources":{ + "shape":"FindingSourceList", + "documentation":"

    The sources of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

    " + }, + "status":{ + "shape":"FindingStatus", + "documentation":"

    The status of the finding.

    " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

    The time at which the finding was most recently updated.

    " + } + }, + "documentation":"

    Contains information about a finding.

    " + }, + "FindingsList":{ + "type":"list", + "member":{"shape":"FindingSummary"} + }, + "GetAnalyzedResourceRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "resourceArn" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

    The ARN of the analyzer to retrieve information from.

    ", + "location":"querystring", + "locationName":"analyzerArn" + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource to retrieve information about.

    ", + "location":"querystring", + "locationName":"resourceArn" + } + }, + "documentation":"

    Retrieves an analyzed resource.

    " + }, + "GetAnalyzedResourceResponse":{ + "type":"structure", + "members":{ + "resource":{ + "shape":"AnalyzedResource", + "documentation":"

    An AnalyedResource object that contains information that Access Analyzer found when it analyzed the resource.

    " + } + }, + "documentation":"

    The response to the request.

    " + }, + "GetAnalyzerRequest":{ + "type":"structure", + "required":["analyzerName"], + "members":{ + "analyzerName":{ + "shape":"Name", + "documentation":"

    The name of the analyzer retrieved.

    ", + "location":"uri", + "locationName":"analyzerName" + } + }, + "documentation":"

    Retrieves an analyzer.

    " + }, + "GetAnalyzerResponse":{ + "type":"structure", + "required":["analyzer"], + "members":{ + "analyzer":{ + "shape":"AnalyzerSummary", + "documentation":"

    An AnalyzerSummary object that contains information about the analyzer.

    " + } + }, + "documentation":"

    The response to the request.

    " + }, + "GetArchiveRuleRequest":{ + "type":"structure", + "required":[ + "analyzerName", + "ruleName" + ], + "members":{ + "analyzerName":{ + "shape":"Name", + "documentation":"

    The name of the analyzer to retrieve rules from.

    ", + "location":"uri", + "locationName":"analyzerName" + }, + "ruleName":{ + "shape":"Name", + "documentation":"

    The name of the rule to retrieve.

    ", + "location":"uri", + "locationName":"ruleName" + } + }, + "documentation":"

    Retrieves an archive rule.

    " + }, + "GetArchiveRuleResponse":{ + "type":"structure", + "required":["archiveRule"], + "members":{ + "archiveRule":{"shape":"ArchiveRuleSummary"} + }, + "documentation":"

    The response to the request.

    " + }, + "GetFindingRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "id" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

    The ARN of the analyzer that generated the finding.

    ", + "location":"querystring", + "locationName":"analyzerArn" + }, + "id":{ + "shape":"FindingId", + "documentation":"

    The ID of the finding to retrieve.

    ", + "location":"uri", + "locationName":"id" + } + }, + "documentation":"

    Retrieves a finding.

    " + }, + "GetFindingResponse":{ + "type":"structure", + "members":{ + "finding":{ + "shape":"Finding", + "documentation":"

    A finding object that contains finding details.

    " + } + }, + "documentation":"

    The response to the request.

    " + }, + "InlineArchiveRule":{ + "type":"structure", + "required":[ + "filter", + "ruleName" + ], + "members":{ + "filter":{ + "shape":"FilterCriteriaMap", + "documentation":"

    The condition and values for a criterion.

    " + }, + "ruleName":{ + "shape":"Name", + "documentation":"

    The name of the rule.

    " + } + }, + "documentation":"

    An criterion statement in an archive rule. Each archive rule may have multiple criteria.

    " + }, + "InlineArchiveRulesList":{ + "type":"list", + "member":{"shape":"InlineArchiveRule"} + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

    The seconds to wait to retry.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    Internal server error.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListAnalyzedResourcesRequest":{ + "type":"structure", + "required":["analyzerArn"], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

    The ARN of the analyzer to retrieve a list of analyzed resources from.

    " + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

    The maximum number of results to return in the response.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    A token used for pagination of results returned.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource.

    " + } + }, + "documentation":"

    Retrieves a list of resources that have been analyzed.

    " + }, + "ListAnalyzedResourcesResponse":{ + "type":"structure", + "required":["analyzedResources"], + "members":{ + "analyzedResources":{ + "shape":"AnalyzedResourcesList", + "documentation":"

    A list of resources that were analyzed.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    A token used for pagination of results returned.

    " + } + }, + "documentation":"

    The response to the request.

    " + }, + "ListAnalyzersRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"Integer", + "documentation":"

    The maximum number of results to return in the response.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    A token used for pagination of results returned.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "type":{ + "shape":"Type", + "documentation":"

    The type of analyzer.

    ", + "location":"querystring", + "locationName":"type" + } + }, + "documentation":"

    Retrieves a list of analyzers.

    " + }, + "ListAnalyzersResponse":{ + "type":"structure", + "required":["analyzers"], + "members":{ + "analyzers":{ + "shape":"AnalyzersList", + "documentation":"

    The analyzers retrieved.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    A token used for pagination of results returned.

    " + } + }, + "documentation":"

    The response to the request.

    " + }, + "ListArchiveRulesRequest":{ + "type":"structure", + "required":["analyzerName"], + "members":{ + "analyzerName":{ + "shape":"Name", + "documentation":"

    The name of the analyzer to retrieve rules from.

    ", + "location":"uri", + "locationName":"analyzerName" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

    The maximum number of results to return in the request.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    A token used for pagination of results returned.

    ", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

    Retrieves a list of archive rules created for the specified analyzer.

    " + }, + "ListArchiveRulesResponse":{ + "type":"structure", + "required":["archiveRules"], + "members":{ + "archiveRules":{ + "shape":"ArchiveRulesList", + "documentation":"

    A list of archive rules created for the specified analyzer.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    A token used for pagination of results returned.

    " + } + }, + "documentation":"

    The response to the request.

    " + }, + "ListFindingsRequest":{ + "type":"structure", + "required":["analyzerArn"], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

    The ARN of the analyzer to retrieve findings from.

    " + }, + "filter":{ + "shape":"FilterCriteriaMap", + "documentation":"

    A filter to match for the findings to return.

    " + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

    The maximum number of results to return in the response.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    A token used for pagination of results returned.

    " + }, + "sort":{ + "shape":"SortCriteria", + "documentation":"

    The sort order for the findings returned.

    " + } + }, + "documentation":"

    Retrieves a list of findings generated by the specified analyzer.

    " + }, + "ListFindingsResponse":{ + "type":"structure", + "required":["findings"], + "members":{ + "findings":{ + "shape":"FindingsList", + "documentation":"

    A list of findings retrieved from the analyzer that match the filter criteria specified, if any.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    A token used for pagination of results returned.

    " + } + }, + "documentation":"

    The response to the request.

    " + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    The ARN of the resource to retrieve tags from.

    ", + "location":"uri", + "locationName":"resourceArn" + } + }, + "documentation":"

    Retrieves a list of tags applied to the specified resource.

    " + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagsMap", + "documentation":"

    The tags that are applied to the specified resource.

    " + } + }, + "documentation":"

    The response to the request.

    " + }, + "Name":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[A-Za-z][A-Za-z0-9_.-]*$" + }, + "OrderBy":{ + "type":"string", + "enum":[ + "ASC", + "DESC" + ] + }, + "PrincipalMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ReasonCode":{ + "type":"string", + "enum":[ + "AWS_SERVICE_ACCESS_DISABLED", + "DELEGATED_ADMINISTRATOR_DEREGISTERED", + "ORGANIZATION_DELETED", + "SERVICE_LINKED_ROLE_CREATION_FAILED" + ] + }, + "ResourceArn":{ + "type":"string", + "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:.*$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of the resource.

    " + } + }, + "documentation":"

    The specified resource could not be found.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "AWS::S3::Bucket", + "AWS::IAM::Role", + "AWS::SQS::Queue", + "AWS::Lambda::Function", + "AWS::Lambda::LayerVersion", + "AWS::KMS::Key" + ] + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The resource ID.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The resource type.

    " + } + }, + "documentation":"

    Service quote met error.

    ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SharedViaList":{ + "type":"list", + "member":{"shape":"String"} + }, + "SortCriteria":{ + "type":"structure", + "members":{ + "attributeName":{ + "shape":"String", + "documentation":"

    The name of the attribute to sort on.

    " + }, + "orderBy":{ + "shape":"OrderBy", + "documentation":"

    The sort order, ascending or descending.

    " + } + }, + "documentation":"

    The criteria used to sort.

    " + }, + "StartResourceScanRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "resourceArn" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

    The ARN of the analyzer to use to scan the policies applied to the specified resource.

    " + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource to scan.

    " + } + }, + "documentation":"

    Starts a scan of the policies applied to the specified resource.

    " + }, + "StatusReason":{ + "type":"structure", + "required":["code"], + "members":{ + "code":{ + "shape":"ReasonCode", + "documentation":"

    The reason code for the current status of the analyzer.

    " + } + }, + "documentation":"

    Provides more details about the current status of the analyzer. For example, if the creation for the analyzer fails, a Failed status is displayed. For an analyzer with organization as the type, this failure can be due to an issue with creating the service-linked roles required in the member accounts of the AWS organization.

    " + }, + "String":{"type":"string"}, + "TagKeys":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    The ARN of the resource to add the tag to.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    The tags to add to the resource.

    " + } + }, + "documentation":"

    Adds a tag to the specified resource.

    " + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The response to the request.

    " + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

    The seconds to wait to retry.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    Throttling limit exceeded error.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "Token":{"type":"string"}, + "Type":{ + "type":"string", + "enum":[ + "ACCOUNT", + "ORGANIZATION" + ] + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    The ARN of the resource to remove the tag from.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

    The key for the tag to add.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + }, + "documentation":"

    Removes a tag from the specified resource.

    " + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The response to the request.

    " + }, + "UpdateArchiveRuleRequest":{ + "type":"structure", + "required":[ + "analyzerName", + "filter", + "ruleName" + ], + "members":{ + "analyzerName":{ + "shape":"Name", + "documentation":"

    The name of the analyzer to update the archive rules for.

    ", + "location":"uri", + "locationName":"analyzerName" + }, + "clientToken":{ + "shape":"String", + "documentation":"

    A client token.

    ", + "idempotencyToken":true + }, + "filter":{ + "shape":"FilterCriteriaMap", + "documentation":"

    A filter to match for the rules to update. Only rules that match the filter are updated.

    " + }, + "ruleName":{ + "shape":"Name", + "documentation":"

    The name of the rule to update.

    ", + "location":"uri", + "locationName":"ruleName" + } + }, + "documentation":"

    Updates the specified archive rule.

    " + }, + "UpdateFindingsRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "status" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

    The ARN of the analyzer that generated the findings to update.

    " + }, + "clientToken":{ + "shape":"String", + "documentation":"

    A client token.

    ", + "idempotencyToken":true + }, + "ids":{ + "shape":"FindingIdList", + "documentation":"

    The IDs of the findings to update.

    " + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource identified in the finding.

    " + }, + "status":{ + "shape":"FindingStatusUpdate", + "documentation":"

    The state represents the action to take to update the finding Status. Use ARCHIVE to change an Active finding to an Archived finding. Use ACTIVE to change an Archived finding to an Active finding.

    " + } + }, + "documentation":"

    Updates findings with the new values provided in the request.

    " + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    A list of fields that didn't validate.

    " + }, + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason for the exception.

    " + } + }, + "documentation":"

    Validation exception error.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "message", + "name" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    A message about the validation exception.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    The name of the validation exception.

    " + } + }, + "documentation":"

    Contains information about a validation exception.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + }, + "ValueList":{ + "type":"list", + "member":{"shape":"String"}, + "max":20, + "min":1 + } + }, + "documentation":"

    AWS IAM Access Analyzer helps identify potential resource-access risks by enabling you to identify any policies that grant access to an external principal. It does this by using logic-based reasoning to analyze resource-based policies in your AWS environment. An external principal can be another AWS account, a root user, an IAM user or role, a federated user, an AWS service, or an anonymous user. This guide describes the AWS IAM Access Analyzer operations that you can call programmatically. For general information about Access Analyzer, see AWS IAM Access Analyzer in the IAM User Guide.

    To start using Access Analyzer, you first need to create an analyzer.

    " +} diff --git a/services/acm/build.properties b/services/acm/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/acm/build.properties +++ b/services/acm/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 8643db1db307..b3b1b3037304 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + amp + AWS Java SDK :: Services :: Amp + The AWS Java SDK for Amp module holds the client classes that are used for + communicating with Amp. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.amp + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/amp/src/main/resources/codegen-resources/paginators-1.json b/services/amp/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..8a0ae8e2bf11 --- /dev/null +++ b/services/amp/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListWorkspaces": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "workspaces" + } + } +} diff --git a/services/amp/src/main/resources/codegen-resources/service-2.json b/services/amp/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..389ca8d4dfff --- /dev/null +++ b/services/amp/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,617 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-08-01", + "endpointPrefix":"aps", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Prometheus Service", + "serviceId":"amp", + "signatureVersion":"v4", + "signingName":"aps", + "uid":"amp-2020-08-01" + }, + "operations":{ + "CreateWorkspace":{ + "name":"CreateWorkspace", + "http":{ + "method":"POST", + "requestUri":"/workspaces", + "responseCode":202 + }, + "input":{"shape":"CreateWorkspaceRequest"}, + "output":{"shape":"CreateWorkspaceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new AMP workspace.

    ", + "idempotent":true + }, + "DeleteWorkspace":{ + "name":"DeleteWorkspace", + "http":{ + "method":"DELETE", + "requestUri":"/workspaces/{workspaceId}", + "responseCode":202 + }, + "input":{"shape":"DeleteWorkspaceRequest"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an AMP workspace.

    ", + "idempotent":true + }, + "DescribeWorkspace":{ + "name":"DescribeWorkspace", + "http":{ + "method":"GET", + "requestUri":"/workspaces/{workspaceId}", + "responseCode":200 + }, + "input":{"shape":"DescribeWorkspaceRequest"}, + "output":{"shape":"DescribeWorkspaceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes an existing AMP workspace.

    " + }, + "ListWorkspaces":{ + "name":"ListWorkspaces", + "http":{ + "method":"GET", + "requestUri":"/workspaces", + "responseCode":200 + }, + "input":{"shape":"ListWorkspacesRequest"}, + "output":{"shape":"ListWorkspacesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all AMP workspaces, including workspaces being created or deleted.

    " + }, + "UpdateWorkspaceAlias":{ + "name":"UpdateWorkspaceAlias", + "http":{ + "method":"POST", + "requestUri":"/workspaces/{workspaceId}/alias", + "responseCode":204 + }, + "input":{"shape":"UpdateWorkspaceAliasRequest"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Updates an AMP workspace alias.

    ", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + } + }, + "documentation":"

    User does not have sufficient access to perform this action.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    Identifier of the resource affected.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    Type of the resource affected.

    " + } + }, + "documentation":"

    Updating or deleting a resource can cause an inconsistent state.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateWorkspaceRequest":{ + "type":"structure", + "members":{ + "alias":{ + "shape":"WorkspaceAlias", + "documentation":"

    An optional user-assigned alias for this workspace. This alias is for user reference and does not need to be unique.

    " + }, + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "idempotencyToken":true + } + }, + "documentation":"

    Represents the input of a CreateWorkspace operation.

    " + }, + "CreateWorkspaceResponse":{ + "type":"structure", + "required":[ + "arn", + "status", + "workspaceId" + ], + "members":{ + "arn":{ + "shape":"WorkspaceArn", + "documentation":"

    The ARN of the workspace that was just created.

    " + }, + "status":{ + "shape":"WorkspaceStatus", + "documentation":"

    The status of the workspace that was just created (usually CREATING).

    " + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The generated ID of the workspace that was just created.

    " + } + }, + "documentation":"

    Represents the output of a CreateWorkspace operation.

    " + }, + "DeleteWorkspaceRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to delete.

    ", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

    Represents the input of a DeleteWorkspace operation.

    " + }, + "DescribeWorkspaceRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to describe.

    ", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

    Represents the input of a DescribeWorkspace operation.

    " + }, + "DescribeWorkspaceResponse":{ + "type":"structure", + "required":["workspace"], + "members":{ + "workspace":{ + "shape":"WorkspaceDescription", + "documentation":"

    The properties of the selected workspace.

    " + } + }, + "documentation":"

    Represents the output of a DescribeWorkspace operation.

    " + }, + "IdempotencyToken":{ + "type":"string", + "documentation":"

    An identifier used to ensure the idempotency of a write request.

    ", + "max":64, + "min":1, + "pattern":"[!-~]+" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

    Advice to clients on when the call can be safely retried.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    Unexpected error during processing of request.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListWorkspacesRequest":{ + "type":"structure", + "members":{ + "alias":{ + "shape":"WorkspaceAlias", + "documentation":"

    Optional filter for workspace alias. Only the workspaces with aliases that begin with this value will be returned.

    ", + "location":"querystring", + "locationName":"alias" + }, + "maxResults":{ + "shape":"ListWorkspacesRequestMaxResultsInteger", + "documentation":"

    Maximum results to return in response (default=100, maximum=1000).

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    Pagination token to request the next page in a paginated list. This token is obtained from the output of the previous ListWorkspaces request.

    ", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

    Represents the input of a ListWorkspaces operation.

    " + }, + "ListWorkspacesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListWorkspacesResponse":{ + "type":"structure", + "required":["workspaces"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    Pagination token to use when requesting the next page in this list.

    " + }, + "workspaces":{ + "shape":"WorkspaceSummaryList", + "documentation":"

    The list of existing workspaces, including those undergoing creation or deletion.

    " + } + }, + "documentation":"

    Represents the output of a ListWorkspaces operation.

    " + }, + "PaginationToken":{ + "type":"string", + "documentation":"

    A token used to access the next page in a paginated result set.

    " + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    Identifier of the resource affected.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    Type of the resource affected.

    " + } + }, + "documentation":"

    Request references a resource which does not exist.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "quotaCode", + "resourceId", + "resourceType", + "serviceCode" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

    Service Quotas requirement to identify originating quota.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    Identifier of the resource affected.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    Type of the resource affected.

    " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    Service Quotas requirement to identify originating service.

    " + } + }, + "documentation":"

    Request would cause a service quota to be exceeded.

    ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

    Service Quotas requirement to identify originating quota.

    " + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

    Advice to clients on when the call can be safely retried.

    ", + "location":"header", + "locationName":"Retry-After" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    Service Quotas requirement to identify originating service.

    " + } + }, + "documentation":"

    Request was denied due to request throttling.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "Timestamp":{"type":"timestamp"}, + "UpdateWorkspaceAliasRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "alias":{ + "shape":"WorkspaceAlias", + "documentation":"

    The new alias of the workspace.

    " + }, + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace being updated.

    ", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

    Represents the input of an UpdateWorkspaceAlias operation.

    " + }, + "Uri":{ + "type":"string", + "max":1024, + "min":1 + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The field that caused the error, if applicable. If more than one field caused the error, pick one and elaborate in the message.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    Reason the request failed validation.

    " + } + }, + "documentation":"

    The input fails to satisfy the constraints specified by an AWS service.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "message", + "name" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Message describing why the field failed validation.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    The field name.

    " + } + }, + "documentation":"

    Stores information about a field passed inside a request that resulted in an exception.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"}, + "documentation":"

    A list of fields.

    " + }, + "ValidationExceptionReason":{ + "type":"string", + "documentation":"

    Possible reasons a request failed validation.

    ", + "enum":[ + "UNKNOWN_OPERATION", + "CANNOT_PARSE", + "FIELD_VALIDATION_FAILED", + "OTHER" + ] + }, + "WorkspaceAlias":{ + "type":"string", + "documentation":"

    A user-assigned workspace alias.

    ", + "max":100, + "min":1 + }, + "WorkspaceArn":{ + "type":"string", + "documentation":"

    An ARN identifying a Workspace.

    " + }, + "WorkspaceDescription":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "status", + "workspaceId" + ], + "members":{ + "alias":{ + "shape":"WorkspaceAlias", + "documentation":"

    Alias of this workspace.

    " + }, + "arn":{ + "shape":"WorkspaceArn", + "documentation":"

    The Amazon Resource Name (ARN) of this workspace.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time when the workspace was created.

    " + }, + "prometheusEndpoint":{ + "shape":"Uri", + "documentation":"

    Prometheus endpoint URI.

    " + }, + "status":{ + "shape":"WorkspaceStatus", + "documentation":"

    The status of this workspace.

    " + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    Unique string identifying this workspace.

    " + } + }, + "documentation":"

    Represents the properties of a workspace.

    " + }, + "WorkspaceId":{ + "type":"string", + "documentation":"

    A workspace ID.

    ", + "max":64, + "min":1, + "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" + }, + "WorkspaceStatus":{ + "type":"structure", + "required":["statusCode"], + "members":{ + "statusCode":{ + "shape":"WorkspaceStatusCode", + "documentation":"

    Status code of this workspace.

    " + } + }, + "documentation":"

    Represents the status of a workspace.

    " + }, + "WorkspaceStatusCode":{ + "type":"string", + "documentation":"

    State of a workspace.

    ", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATION_FAILED" + ] + }, + "WorkspaceSummary":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "status", + "workspaceId" + ], + "members":{ + "alias":{ + "shape":"WorkspaceAlias", + "documentation":"

    Alias of this workspace.

    " + }, + "arn":{ + "shape":"WorkspaceArn", + "documentation":"

    The AmazonResourceName of this workspace.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time when the workspace was created.

    " + }, + "status":{ + "shape":"WorkspaceStatus", + "documentation":"

    The status of this workspace.

    " + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    Unique string identifying this workspace.

    " + } + }, + "documentation":"

    Represents a summary of the properties of a workspace.

    " + }, + "WorkspaceSummaryList":{ + "type":"list", + "member":{"shape":"WorkspaceSummary"}, + "documentation":"

    A list of workspace summaries.

    " + } + }, + "documentation":"

    Amazon Managed Service for Prometheus

    " +} diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 0d22303c49f2..cf8bfe6e0e08 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + amplifybackend + AWS Java SDK :: Services :: Amplify Backend + The AWS Java SDK for Amplify Backend module holds the client classes that are used for + communicating with Amplify Backend. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.amplifybackend + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/amplifybackend/src/main/resources/codegen-resources/paginators-1.json b/services/amplifybackend/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..40304c7d438f --- /dev/null +++ b/services/amplifybackend/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListBackendJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Jobs" + } + } +} diff --git a/services/amplifybackend/src/main/resources/codegen-resources/service-2.json b/services/amplifybackend/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..9c9d6ff4cab1 --- /dev/null +++ b/services/amplifybackend/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3946 @@ +{ + "metadata": { + "apiVersion": "2020-08-11", + "endpointPrefix": "amplifybackend", + "signingName": "amplifybackend", + "serviceFullName": "AmplifyBackend", + "serviceId": "AmplifyBackend", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "amplifybackend-2020-08-11", + "signatureVersion": "v4" + }, + "operations": { + "CloneBackend": { + "name": "CloneBackend", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/environments/{backendEnvironmentName}/clone", + "responseCode": 200 + }, + "input": { + "shape": "CloneBackendRequest" + }, + "output": { + "shape": "CloneBackendResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    This operation clones an existing backend.

    " + }, + "CreateBackend": { + "name": "CreateBackend", + "http": { + "method": "POST", + "requestUri": "/backend", + "responseCode": 200 + }, + "input": { + "shape": "CreateBackendRequest" + }, + "output": { + "shape": "CreateBackendResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    This operation creates a backend for an Amplify app. Backends are automatically created at the time of app creation.

    " + }, + "CreateBackendAPI": { + "name": "CreateBackendAPI", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api", + "responseCode": 200 + }, + "input": { + "shape": "CreateBackendAPIRequest" + }, + "output": { + "shape": "CreateBackendAPIResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Creates a new backend API resource.

    " + }, + "CreateBackendAuth": { + "name": "CreateBackendAuth", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/auth", + "responseCode": 200 + }, + "input": { + "shape": "CreateBackendAuthRequest" + }, + "output": { + "shape": "CreateBackendAuthResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Creates a new backend authentication resource.

    " + }, + "CreateBackendConfig": { + "name": "CreateBackendConfig", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/config", + "responseCode": 200 + }, + "input": { + "shape": "CreateBackendConfigRequest" + }, + "output": { + "shape": "CreateBackendConfigResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Creates a config object for a backend.

    " + }, + "CreateToken": { + "name": "CreateToken", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/challenge", + "responseCode": 200 + }, + "input": { + "shape": "CreateTokenRequest" + }, + "output": { + "shape": "CreateTokenResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Generates a one-time challenge code to authenticate a user into your Amplify Admin UI.

    " + }, + "DeleteBackend": { + "name": "DeleteBackend", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/environments/{backendEnvironmentName}/remove", + "responseCode": 200 + }, + "input": { + "shape": "DeleteBackendRequest" + }, + "output": { + "shape": "DeleteBackendResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Removes an existing environment from your Amplify project.

    " + }, + "DeleteBackendAPI": { + "name": "DeleteBackendAPI", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}/remove", + "responseCode": 200 + }, + "input": { + "shape": "DeleteBackendAPIRequest" + }, + "output": { + "shape": "DeleteBackendAPIResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Deletes an existing backend API resource.

    " + }, + "DeleteBackendAuth": { + "name": "DeleteBackendAuth", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/auth/{backendEnvironmentName}/remove", + "responseCode": 200 + }, + "input": { + "shape": "DeleteBackendAuthRequest" + }, + "output": { + "shape": "DeleteBackendAuthResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Deletes an existing backend authentication resource.

    " + }, + "DeleteToken": { + "name": "DeleteToken", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/challenge/{sessionId}/remove", + "responseCode": 200 + }, + "input": { + "shape": "DeleteTokenRequest" + }, + "output": { + "shape": "DeleteTokenResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Deletes the challenge token based on the given appId and sessionId.

    " + }, + "GenerateBackendAPIModels": { + "name": "GenerateBackendAPIModels", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}/generateModels", + "responseCode": 200 + }, + "input": { + "shape": "GenerateBackendAPIModelsRequest" + }, + "output": { + "shape": "GenerateBackendAPIModelsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Generates a model schema for an existing backend API resource.

    " + }, + "GetBackend": { + "name": "GetBackend", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/details", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendRequest" + }, + "output": { + "shape": "GetBackendResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Provides project-level details for your Amplify UI project.

    " + }, + "GetBackendAPI": { + "name": "GetBackendAPI", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}/details", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendAPIRequest" + }, + "output": { + "shape": "GetBackendAPIResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Gets the details for a backend API.

    " + }, + "GetBackendAPIModels": { + "name": "GetBackendAPIModels", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}/getModels", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendAPIModelsRequest" + }, + "output": { + "shape": "GetBackendAPIModelsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Generates a model schema for existing backend API resource.

    " + }, + "GetBackendAuth": { + "name": "GetBackendAuth", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/auth/{backendEnvironmentName}/details", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendAuthRequest" + }, + "output": { + "shape": "GetBackendAuthResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Gets backend auth details.

    " + }, + "GetBackendJob": { + "name": "GetBackendJob", + "http": { + "method": "GET", + "requestUri": "/backend/{appId}/job/{backendEnvironmentName}/{jobId}", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendJobRequest" + }, + "output": { + "shape": "GetBackendJobResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Returns information about a specific job.

    " + }, + "GetToken": { + "name": "GetToken", + "http": { + "method": "GET", + "requestUri": "/backend/{appId}/challenge/{sessionId}", + "responseCode": 200 + }, + "input": { + "shape": "GetTokenRequest" + }, + "output": { + "shape": "GetTokenResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Gets the challenge token based on the given appId and sessionId.

    " + }, + "ListBackendJobs": { + "name": "ListBackendJobs", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/job/{backendEnvironmentName}", + "responseCode": 200 + }, + "input": { + "shape": "ListBackendJobsRequest" + }, + "output": { + "shape": "ListBackendJobsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Lists the jobs for the backend of an Amplify app.

    " + }, + "RemoveAllBackends": { + "name": "RemoveAllBackends", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/remove", + "responseCode": 200 + }, + "input": { + "shape": "RemoveAllBackendsRequest" + }, + "output": { + "shape": "RemoveAllBackendsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Removes all backend environments from your Amplify project.

    " + }, + "RemoveBackendConfig": { + "name": "RemoveBackendConfig", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/config/remove", + "responseCode": 200 + }, + "input": { + "shape": "RemoveBackendConfigRequest" + }, + "output": { + "shape": "RemoveBackendConfigResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Removes the AWS resources that are required to access the Amplify Admin UI.

    " + }, + "UpdateBackendAPI": { + "name": "UpdateBackendAPI", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBackendAPIRequest" + }, + "output": { + "shape": "UpdateBackendAPIResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Updates an existing backend API resource.

    " + }, + "UpdateBackendAuth": { + "name": "UpdateBackendAuth", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/auth/{backendEnvironmentName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBackendAuthRequest" + }, + "output": { + "shape": "UpdateBackendAuthResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Updates an existing backend authentication resource.

    " + }, + "UpdateBackendConfig": { + "name": "UpdateBackendConfig", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/config/update", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBackendConfigRequest" + }, + "output": { + "shape": "UpdateBackendConfigResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Updates the AWS resources that are required to access the Amplify Admin UI.

    " + }, + "UpdateBackendJob": { + "name": "UpdateBackendJob", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/job/{backendEnvironmentName}/{jobId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBackendJobRequest" + }, + "output": { + "shape": "UpdateBackendJobResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

    504 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + } + ], + "documentation": "

    Updates a specific job.

    " + } + }, + "shapes": { + "AuthResources": { + "type": "string", + "enum": [ + "USER_POOL_ONLY", + "IDENTITY_POOL_AND_USER_POOL" + ] + }, + "BackendAPIAppSyncAuthSettings": { + "type": "structure", + "members": { + "CognitoUserPoolId": { + "shape": "__string", + "locationName": "cognitoUserPoolId", + "documentation": "

    The Amazon Cognito user pool ID, if Amazon Cognito is used as an authentication setting to access your data models.

    " + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    The API key description for API_KEY, if it is used as an authentication mechanism to access your data models.

    " + }, + "ExpirationTime": { + "shape": "__double", + "locationName": "expirationTime", + "documentation": "

    The API key expiration time for API_KEY, if it is used as an authentication mechanism to access your data models.

    " + }, + "OpenIDAuthTTL": { + "shape": "__string", + "locationName": "openIDAuthTTL", + "documentation": "

    The expiry time for the OpenID authentication mechanism.

    " + }, + "OpenIDClientId": { + "shape": "__string", + "locationName": "openIDClientId", + "documentation": "

    The clientID for openID, if openID is used as an authentication setting to access your data models.

    " + }, + "OpenIDIatTTL": { + "shape": "__string", + "locationName": "openIDIatTTL", + "documentation": "

    The expiry time for the OpenID authentication mechanism.

    " + }, + "OpenIDIssueURL": { + "shape": "__string", + "locationName": "openIDIssueURL", + "documentation": "

    The openID issuer URL, if openID is used as an authentication setting to access your data models.

    " + }, + "OpenIDProviderName": { + "shape": "__string", + "locationName": "openIDProviderName", + "documentation": "

    The openID provider name, if openID is used as an authentication mechanism to access your data models.

    " + } + }, + "documentation": "

    The authentication settings for accessing provisioned data models in your Amplify project.

    " + }, + "BackendAPIAuthType": { + "type": "structure", + "members": { + "Mode": { + "shape": "Mode", + "locationName": "mode", + "documentation": "

    Describes the authentication mode.

    " + }, + "Settings": { + "shape": "BackendAPIAppSyncAuthSettings", + "locationName": "settings", + "documentation": "

    Describes settings for the authentication mode.

    " + } + }, + "documentation": "

    Describes the auth types for your configured data models.

    " + }, + "BackendAPICodegenReqObj": { + "type": "structure", + "members": { + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "ResourceName" + ] + }, + "BackendAPICodegenRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The response object sent when a backend is created.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "BackendAPIConflictResolution": { + "type": "structure", + "members": { + "ResolutionStrategy": { + "shape": "ResolutionStrategy", + "locationName": "resolutionStrategy", + "documentation": "

    The strategy for conflict resolution.

    " + } + }, + "documentation": "

    Describes the conflict resolution configuration for the data model configured in your Amplify project.

    " + }, + "BackendAPIReqObj": { + "type": "structure", + "members": { + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    Defines the resource configuration for the data model in your Amplify project.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "ResourceName" + ] + }, + "BackendAPIResourceConfig": { + "type": "structure", + "members": { + "AdditionalAuthTypes": { + "shape": "ListOfBackendAPIAuthType", + "locationName": "additionalAuthTypes", + "documentation": "

    Additional authentication methods used to interact with your data models.

    " + }, + "ApiName": { + "shape": "__string", + "locationName": "apiName", + "documentation": "

    The API name used to interact with the data model, configured as a part of the Amplify project.

    " + }, + "ConflictResolution": { + "shape": "BackendAPIConflictResolution", + "locationName": "conflictResolution", + "documentation": "

    The conflict resolution strategy for your data stored in the data models.

    " + }, + "DefaultAuthType": { + "shape": "BackendAPIAuthType", + "locationName": "defaultAuthType", + "documentation": "

    The default authentication type for interacting with the configured data models in your Amplify project.

    " + }, + "Service": { + "shape": "__string", + "locationName": "service", + "documentation": "

    The service used to provision and interact with the data model.

    " + }, + "TransformSchema": { + "shape": "__string", + "locationName": "transformSchema", + "documentation": "

    The definition of the data model in the annotated transform of the GraphQL schema.

    " + } + }, + "documentation": "

    The resource configuration for the data model, configured as a part of the Amplify project.

    " + }, + "BackendAPIRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The response object sent when a backend is created.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "BackendAuthRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "BackendAuthSocialProviderConfig": { + "type": "structure", + "members": { + "ClientId": { + "shape": "__string", + "locationName": "client_id", + "documentation": "

    Describes the client_id that can be obtained from the third-party social federation provider.

    " + }, + "ClientSecret": { + "shape": "__string", + "locationName": "client_secret", + "documentation": "

    Describes the client_secret that can be obtained from third-party social federation providers.

    " + } + }, + "documentation": "

    Describes third-party social federation configurations for allowing your app users to sign in using OAuth.

    " + }, + "BackendConfigRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendManagerAppId": { + "shape": "__string", + "locationName": "backendManagerAppId", + "documentation": "

    The app ID for the backend manager.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "LoginAuthConfig": { + "shape": "LoginAuthConfigReqObj", + "locationName": "loginAuthConfig", + "documentation": "

    Describes the Amazon Cognito configurations for the Admin UI auth resource to log in with.

    " + } + }, + "documentation": "

    The response object for this operation.

    " + }, + "BackendJobReqObj": { + "type": "structure", + "members": { + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    Filters the list of response objects to include only those with the specified operation name.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    Filters the list of response objects to include only those with the specified status.

    " + } + }, + "documentation": "

    The request object for this operation.

    " + }, + "BackendJobRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "CreateTime": { + "shape": "__string", + "locationName": "createTime", + "documentation": "

    The time when the job was created.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + }, + "UpdateTime": { + "shape": "__string", + "locationName": "updateTime", + "documentation": "

    The time when the job was last updated.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "BadRequestException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    An error message to inform that the request failed.

    " + } + }, + "documentation": "

    An error returned if a request is not formed properly.

    ", + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "CloneBackendReqObj": { + "type": "structure", + "members": { + "TargetEnvironmentName": { + "shape": "__string", + "locationName": "targetEnvironmentName", + "documentation": "

    The name of the destination backend environment to be created.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "TargetEnvironmentName" + ] + }, + "CloneBackendRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "TargetEnvironmentName": { + "shape": "__string", + "locationName": "targetEnvironmentName", + "documentation": "

    The name of the destination backend environment to be created.

    " + } + }, + "documentation": "

    The request body for CloneBackend.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "TargetEnvironmentName" + ] + }, + "CloneBackendRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The response object sent when a backend is created.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "CloneBackendResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "CreateBackendAPIReqObj": { + "type": "structure", + "members": { + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for this request.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "ResourceName", + "BackendEnvironmentName", + "ResourceConfig" + ] + }, + "CreateBackendAPIRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for this request.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for CreateBackendAPI.

    ", + "required": [ + "AppId", + "ResourceName", + "BackendEnvironmentName", + "ResourceConfig" + ] + }, + "CreateBackendAPIResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "CreateBackendAuthForgotPasswordConfig": { + "type": "structure", + "members": { + "DeliveryMethod": { + "shape": "DeliveryMethod", + "locationName": "deliveryMethod", + "documentation": "

    Describes which method to use (either SMS or email) to deliver messages to app users that want to recover their password.

    " + }, + "EmailSettings": { + "shape": "EmailSettings", + "locationName": "emailSettings", + "documentation": "

    The configuration for the email sent when an app user forgets their password.

    " + }, + "SmsSettings": { + "shape": "SmsSettings", + "locationName": "smsSettings", + "documentation": "

    The configuration for the SMS message sent when an app user forgets their password.

    " + } + }, + "documentation": "

    Describes the forgot password policy for authenticating into the Amplify app.

    ", + "required": [ + "DeliveryMethod" + ] + }, + "CreateBackendAuthIdentityPoolConfig": { + "type": "structure", + "members": { + "IdentityPoolName": { + "shape": "__string", + "locationName": "identityPoolName", + "documentation": "

    Name of the identity pool used for authorization.

    " + }, + "UnauthenticatedLogin": { + "shape": "__boolean", + "locationName": "unauthenticatedLogin", + "documentation": "

    Set to true or false based on whether you want to enable guest authorization to your Amplify app.

    " + } + }, + "documentation": "

    Describes authorization configurations for the auth resources, configured as a part of your Amplify project.

    ", + "required": [ + "UnauthenticatedLogin", + "IdentityPoolName" + ] + }, + "CreateBackendAuthMFAConfig": { + "type": "structure", + "members": { + "MFAMode": { + "shape": "MFAMode", + "documentation": "

    Describes whether MFA should be [ON, OFF, or OPTIONAL] for authentication in your Amplify project.

    " + }, + "Settings": { + "shape": "Settings", + "locationName": "settings", + "documentation": "

    Describes the configuration settings and methods for your Amplify app users to use MFA.

    " + } + }, + "documentation": "

    Describes whether to apply multi-factor authentication (MFA) policies for your Amazon Cognito user pool that's configured as a part of your Amplify project.

    ", + "required": [ + "MFAMode" + ] + }, + "CreateBackendAuthOAuthConfig": { + "type": "structure", + "members": { + "DomainPrefix": { + "shape": "__string", + "locationName": "domainPrefix", + "documentation": "

    The domain prefix for your Amplify app.

    " + }, + "OAuthGrantType": { + "shape": "OAuthGrantType", + "locationName": "oAuthGrantType", + "documentation": "

    The OAuth grant type that you use to allow app users to authenticate from your Amplify app.

    " + }, + "OAuthScopes": { + "shape": "ListOfOAuthScopesElement", + "locationName": "oAuthScopes", + "documentation": "

    List of OAuth-related flows that allow your app users to authenticate from your Amplify app.

    " + }, + "RedirectSignInURIs": { + "shape": "ListOf__string", + "locationName": "redirectSignInURIs", + "documentation": "

    The redirected URI for signing in to your Amplify app.

    " + }, + "RedirectSignOutURIs": { + "shape": "ListOf__string", + "locationName": "redirectSignOutURIs", + "documentation": "

    Redirect URLs that OAuth uses when a user signs out of an Amplify app.

    " + }, + "SocialProviderSettings": { + "shape": "SocialProviderSettings", + "locationName": "socialProviderSettings", + "documentation": "

    The settings for using social identity providers for access to your Amplify app.

    " + } + }, + "documentation": "

    Creates the OAuth configuration for your Amplify project.

    ", + "required": [ + "RedirectSignOutURIs", + "RedirectSignInURIs", + "OAuthGrantType", + "OAuthScopes" + ] + }, + "CreateBackendAuthPasswordPolicyConfig": { + "type": "structure", + "members": { + "AdditionalConstraints": { + "shape": "ListOfAdditionalConstraintsElement", + "locationName": "additionalConstraints", + "documentation": "

    Additional constraints for the password used to access the backend of your Amplify project.

    " + }, + "MinimumLength": { + "shape": "__double", + "locationName": "minimumLength", + "documentation": "

    The minimum length of the password used to access the backend of your Amplify project.

    " + } + }, + "documentation": "

    The password policy configuration for the backend of your Amplify project.

    ", + "required": [ + "MinimumLength" + ] + }, + "CreateBackendAuthReqObj": { + "type": "structure", + "members": { + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "CreateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for this request object.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "ResourceName", + "BackendEnvironmentName", + "ResourceConfig" + ] + }, + "CreateBackendAuthRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "CreateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for this request object.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for CreateBackendAuth.

    ", + "required": [ + "AppId", + "ResourceName", + "BackendEnvironmentName", + "ResourceConfig" + ] + }, + "CreateBackendAuthResourceConfig": { + "type": "structure", + "members": { + "AuthResources": { + "shape": "AuthResources", + "locationName": "authResources", + "documentation": "

    Defines whether you want to configure only authentication or both authentication and authorization settings.

    " + }, + "IdentityPoolConfigs": { + "shape": "CreateBackendAuthIdentityPoolConfig", + "locationName": "identityPoolConfigs", + "documentation": "

    Describes the authorization configuration for the Amazon Cognito identity pool, provisioned as a part of the auth resource in your Amplify project.

    " + }, + "Service": { + "shape": "Service", + "locationName": "service", + "documentation": "

    Defines the service name to use when configuring an authentication resource in your Amplify project.

    " + }, + "UserPoolConfigs": { + "shape": "CreateBackendAuthUserPoolConfig", + "locationName": "userPoolConfigs", + "documentation": "

    Describes the authentication configuration for the Amazon Cognito user pool, provisioned as a part of the auth resource in your Amplify project.

    " + } + }, + "documentation": "

    Defines the resource configuration when creating an auth resource in your Amplify project.

    ", + "required": [ + "AuthResources", + "UserPoolConfigs", + "Service" + ] + }, + "CreateBackendAuthResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "CreateBackendAuthUserPoolConfig": { + "type": "structure", + "members": { + "ForgotPassword": { + "shape": "CreateBackendAuthForgotPasswordConfig", + "locationName": "forgotPassword", + "documentation": "

    Describes the forgotten password policy for your Amazon Cognito user pool, configured as a part of your Amplify project.

    " + }, + "Mfa": { + "shape": "CreateBackendAuthMFAConfig", + "locationName": "mfa", + "documentation": "

    Describes whether to apply multi-factor authentication (MFA) policies for your Amazon Cognito user pool that's configured as a part of your Amplify project.

    " + }, + "OAuth": { + "shape": "CreateBackendAuthOAuthConfig", + "locationName": "oAuth", + "documentation": "

    Describes the OAuth policy and rules for your Amazon Cognito user pool, configured as a part of your Amplify project.

    " + }, + "PasswordPolicy": { + "shape": "CreateBackendAuthPasswordPolicyConfig", + "locationName": "passwordPolicy", + "documentation": "

    Describes the password policy for your Amazon Cognito user pool, configured as a part of your Amplify project.

    " + }, + "RequiredSignUpAttributes": { + "shape": "ListOfRequiredSignUpAttributesElement", + "locationName": "requiredSignUpAttributes", + "documentation": "

    The required attributes to sign up new users in the Amazon Cognito user pool.

    " + }, + "SignInMethod": { + "shape": "SignInMethod", + "locationName": "signInMethod", + "documentation": "

    Describes the sign-in methods that your Amplify app users to log in using the Amazon Cognito user pool that's configured as a part of your Amplify project.

    " + }, + "UserPoolName": { + "shape": "__string", + "locationName": "userPoolName", + "documentation": "

    The Amazon Cognito user pool name.

    " + } + }, + "documentation": "

    Describes the Amazon Cognito user pool configuration for the auth resource to be configured for your Amplify project.

    ", + "required": [ + "RequiredSignUpAttributes", + "SignInMethod", + "UserPoolName" + ] + }, + "CreateBackendConfigReqObj": { + "type": "structure", + "members": { + "BackendManagerAppId": { + "shape": "__string", + "locationName": "backendManagerAppId", + "documentation": "

    The app ID for the backend manager.

    " + } + }, + "documentation": "

    The request object for this operation.

    " + }, + "CreateBackendConfigRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendManagerAppId": { + "shape": "__string", + "locationName": "backendManagerAppId", + "documentation": "

    The app ID for the backend manager.

    " + } + }, + "documentation": "

    The request body for CreateBackendConfig.

    ", + "required": [ + "AppId" + ] + }, + "CreateBackendConfigRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId" + ] + }, + "CreateBackendConfigResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "CreateBackendReqObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "AppName": { + "shape": "__string", + "locationName": "appName", + "documentation": "

    The name of the app.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "ResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for the backend creation request.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of the resource.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "AppName" + ] + }, + "CreateBackendRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "AppName": { + "shape": "__string", + "locationName": "appName", + "documentation": "

    The name of the app.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "ResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for the backend creation request.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of the resource.

    " + } + }, + "documentation": "

    The request body for CreateBackend.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "AppName" + ] + }, + "CreateBackendRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The response object sent when a backend is created.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "CreateBackendResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "CreateTokenRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + } + }, + "required": [ + "AppId" + ] + }, + "CreateTokenRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "ChallengeCode": { + "shape": "__string", + "locationName": "challengeCode", + "documentation": "

    One-time challenge code for authenticating into the Amplify Admin UI.

    " + }, + "SessionId": { + "shape": "__string", + "locationName": "sessionId", + "documentation": "

    A unique ID provided when creating a new challenge token.

    " + }, + "Ttl": { + "shape": "__string", + "locationName": "ttl", + "documentation": "

    The expiry time for the one-time generated token code.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId", + "Ttl", + "SessionId", + "ChallengeCode" + ] + }, + "CreateTokenResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "ChallengeCode": { + "shape": "__string", + "locationName": "challengeCode", + "documentation": "

    One-time challenge code for authenticating into the Amplify Admin UI.

    " + }, + "SessionId": { + "shape": "__string", + "locationName": "sessionId", + "documentation": "

    A unique ID provided when creating a new challenge token.

    " + }, + "Ttl": { + "shape": "__string", + "locationName": "ttl", + "documentation": "

    The expiry time for the one-time generated token code.

    " + } + } + }, + "DeleteBackendAPIRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    Defines the resource configuration for the data model in your Amplify project.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for DeleteBackendAPI.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "DeleteBackendAPIResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "DeleteBackendAuthRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for DeleteBackendAuth.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "DeleteBackendAuthResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "DeleteBackendRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + } + }, + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "DeleteBackendRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The returned object for a request to delete a backend.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "DeleteBackendResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "DeleteTokenRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "SessionId": { + "shape": "__string", + "location": "uri", + "locationName": "sessionId", + "documentation": "

    The session ID.

    " + } + }, + "required": [ + "SessionId", + "AppId" + ] + }, + "DeleteTokenRespObj": { + "type": "structure", + "members": { + "IsSuccess": { + "shape": "__boolean", + "locationName": "isSuccess", + "documentation": "

    Indicates whether the request succeeded or failed.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "IsSuccess" + ] + }, + "DeleteTokenResponse": { + "type": "structure", + "members": { + "IsSuccess": { + "shape": "__boolean", + "locationName": "isSuccess", + "documentation": "

    Indicates whether the request succeeded or failed.

    " + } + } + }, + "DeliveryMethod": { + "type": "string", + "enum": [ + "EMAIL", + "SMS" + ] + }, + "EmailSettings": { + "type": "structure", + "members": { + "EmailMessage": { + "shape": "__string", + "locationName": "emailMessage", + "documentation": "

    The body of the email.

    " + }, + "EmailSubject": { + "shape": "__string", + "locationName": "emailSubject", + "documentation": "

    The subject of the email.

    " + } + }, + "documentation": "

    The configuration for the email sent when an app user forgets their password.

    " + }, + "GatewayTimeoutException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    An error message to inform that the request failed.

    " + } + }, + "documentation": "

    An error returned if there's a temporary issue with the service.

    ", + "exception": true, + "error": { + "httpStatusCode": 504 + } + }, + "GenerateBackendAPIModelsRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for GenerateBackendAPIModels.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "GenerateBackendAPIModelsResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "GetBackendAPICodegenRespObj": { + "type": "structure", + "members": { + "Models": { + "shape": "__string", + "locationName": "models", + "documentation": "

    Stringified JSON of the DataStore model.

    " + }, + "Status": { + "shape": "Status", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The response object for this operation.

    " + }, + "GetBackendAPIModelsRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for GetBackendAPIModels.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "GetBackendAPIModelsResponse": { + "type": "structure", + "members": { + "Models": { + "shape": "__string", + "locationName": "models", + "documentation": "

    Stringified JSON of the DataStore model.

    " + }, + "Status": { + "shape": "Status", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "GetBackendAPIRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    Defines the resource configuration for the data model in your Amplify project.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for GetBackendAPI.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "GetBackendAPIRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for this response object.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "GetBackendAPIResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for this response object.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + } + }, + "GetBackendAuthReqObj": { + "type": "structure", + "members": { + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "ResourceName" + ] + }, + "GetBackendAuthRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for GetBackendAuth.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "GetBackendAuthRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "ResourceConfig": { + "shape": "CreateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for authorization requests to the backend of your Amplify project.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "GetBackendAuthResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "ResourceConfig": { + "shape": "CreateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for authorization requests to the backend of your Amplify project.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + } + }, + "GetBackendJobRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + } + }, + "required": [ + "AppId", + "BackendEnvironmentName", + "JobId" + ] + }, + "GetBackendJobResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "CreateTime": { + "shape": "__string", + "locationName": "createTime", + "documentation": "

    The time when the job was created.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + }, + "UpdateTime": { + "shape": "__string", + "locationName": "updateTime", + "documentation": "

    The time when the job was last updated.

    " + } + } + }, + "GetBackendReqObj": { + "type": "structure", + "members": { + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + } + }, + "documentation": "

    The request object for this operation.

    " + }, + "GetBackendRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + } + }, + "documentation": "

    The request body for GetBackend.

    ", + "required": [ + "AppId" + ] + }, + "GetBackendRespObj": { + "type": "structure", + "members": { + "AmplifyMetaConfig": { + "shape": "__string", + "locationName": "amplifyMetaConfig", + "documentation": "

    A stringified version of the current configurations for your Amplify project.

    " + }, + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "AppName": { + "shape": "__string", + "locationName": "appName", + "documentation": "

    The name of the app.

    " + }, + "BackendEnvironmentList": { + "shape": "ListOf__string", + "locationName": "backendEnvironmentList", + "documentation": "

    A list of backend environments in an array.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId" + ] + }, + "GetBackendResponse": { + "type": "structure", + "members": { + "AmplifyMetaConfig": { + "shape": "__string", + "locationName": "amplifyMetaConfig", + "documentation": "

    A stringified version of the current configurations for your Amplify project.

    " + }, + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "AppName": { + "shape": "__string", + "locationName": "appName", + "documentation": "

    The name of the app.

    " + }, + "BackendEnvironmentList": { + "shape": "ListOf__string", + "locationName": "backendEnvironmentList", + "documentation": "

    A list of backend environments in an array.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + } + } + }, + "GetTokenRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "SessionId": { + "shape": "__string", + "location": "uri", + "locationName": "sessionId", + "documentation": "

    The session ID.

    " + } + }, + "required": [ + "SessionId", + "AppId" + ] + }, + "GetTokenRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "ChallengeCode": { + "shape": "__string", + "locationName": "challengeCode", + "documentation": "

    The one-time challenge code for authenticating into the Amplify Admin UI.

    " + }, + "SessionId": { + "shape": "__string", + "locationName": "sessionId", + "documentation": "

    A unique ID provided when creating a new challenge token.

    " + }, + "Ttl": { + "shape": "__string", + "locationName": "ttl", + "documentation": "

    The expiry time for the one-time generated token code.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId", + "Ttl", + "SessionId", + "ChallengeCode" + ] + }, + "GetTokenResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "ChallengeCode": { + "shape": "__string", + "locationName": "challengeCode", + "documentation": "

    The one-time challenge code for authenticating into the Amplify Admin UI.

    " + }, + "SessionId": { + "shape": "__string", + "locationName": "sessionId", + "documentation": "

    A unique ID provided when creating a new challenge token.

    " + }, + "Ttl": { + "shape": "__string", + "locationName": "ttl", + "documentation": "

    The expiry time for the one-time generated token code.

    " + } + } + }, + "InternalServiceException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    An error message to inform that the request failed.

    " + } + }, + "documentation": "

    An error returned if there's a temporary issue with the service.

    " + }, + "LimitExceededException": { + "type": "structure", + "members": { + "LimitType": { + "shape": "__string", + "locationName": "limitType", + "documentation": "

    The type of limit that was exceeded.

    " + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    An error message to inform that the request failed.

    " + } + }, + "documentation": "

    An error that is returned when a limit of a specific type is exceeded.

    " + }, + "ListBackendJobReqObj": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "MaxResults": { + "shape": "__integerMin1Max25", + "locationName": "maxResults", + "documentation": "

    The maximum number of results that you want in the response.

    " + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The token for the next set of results.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    Filters the list of response objects to include only those with the specified operation name.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    Filters the list of response objects to include only those with the specified status.

    " + } + }, + "documentation": "

    The request object for this operation.

    " + }, + "ListBackendJobRespObj": { + "type": "structure", + "members": { + "Jobs": { + "shape": "ListOfBackendJobRespObj", + "locationName": "jobs", + "documentation": "

    An array of jobs and their properties.

    " + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The token for the next set of results.

    " + } + }, + "documentation": "

    The returned list of backend jobs.

    " + }, + "ListBackendJobsRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "MaxResults": { + "shape": "__integerMin1Max25", + "locationName": "maxResults", + "documentation": "

    The maximum number of results that you want in the response.

    " + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The token for the next set of results.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    Filters the list of response objects to include only those with the specified operation name.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    Filters the list of response objects to include only those with the specified status.

    " + } + }, + "documentation": "

    The request body for ListBackendJobs.

    ", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "ListBackendJobsResponse": { + "type": "structure", + "members": { + "Jobs": { + "shape": "ListOfBackendJobRespObj", + "locationName": "jobs", + "documentation": "

    An array of jobs and their properties.

    " + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The token for the next set of results.

    " + } + } + }, + "LoginAuthConfigReqObj": { + "type": "structure", + "members": { + "AwsCognitoIdentityPoolId": { + "shape": "__string", + "locationName": "aws_cognito_identity_pool_id", + "documentation": "

    The Amazon Cognito identity pool ID used for Amplify Admin UI login authorization.

    " + }, + "AwsCognitoRegion": { + "shape": "__string", + "locationName": "aws_cognito_region", + "documentation": "

    The AWS Region for the Amplify Admin UI login.

    " + }, + "AwsUserPoolsId": { + "shape": "__string", + "locationName": "aws_user_pools_id", + "documentation": "

    The Amazon Cognito user pool ID used for Amplify Admin UI login authentication.

    " + }, + "AwsUserPoolsWebClientId": { + "shape": "__string", + "locationName": "aws_user_pools_web_client_id", + "documentation": "

    The web client ID for the Amazon Cognito user pools.

    " + } + }, + "documentation": "

    The request object for this operation.

    " + }, + "MFAMode": { + "type": "string", + "enum": [ + "ON", + "OFF", + "OPTIONAL" + ] + }, + "Mode": { + "type": "string", + "enum": [ + "API_KEY", + "AWS_IAM", + "AMAZON_COGNITO_USER_POOLS", + "OPENID_CONNECT" + ] + }, + "NotFoundException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    An error message to inform that the request failed.

    " + }, + "ResourceType": { + "shape": "__string", + "locationName": "resourceType", + "documentation": "

    The type of resource that wasn't found.

    " + } + }, + "documentation": "

    An error returned when a specific resource type is not found.

    ", + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "OAuthGrantType": { + "type": "string", + "enum": [ + "CODE", + "IMPLICIT" + ] + }, + "RemoveAllBackendsReqObj": { + "type": "structure", + "members": { + "CleanAmplifyApp": { + "shape": "__boolean", + "locationName": "cleanAmplifyApp", + "documentation": "

    Cleans up the Amplify Console app if this value is set to true.

    " + } + }, + "documentation": "

    The request object for this operation.

    " + }, + "RemoveAllBackendsRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "CleanAmplifyApp": { + "shape": "__boolean", + "locationName": "cleanAmplifyApp", + "documentation": "

    Cleans up the Amplify Console app if this value is set to true.

    " + } + }, + "documentation": "

    The request body for RemoveAllBackends.

    ", + "required": [ + "AppId" + ] + }, + "RemoveAllBackendsRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + }, + "documentation": "

    The response object for this operation.

    ", + "required": [ + "AppId" + ] + }, + "RemoveAllBackendsResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "RemoveBackendAuthReqObj": { + "type": "structure", + "members": { + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "ResourceName" + ] + }, + "RemoveBackendConfigRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + } + }, + "required": [ + "AppId" + ] + }, + "RemoveBackendConfigRespObj": { + "type": "structure", + "members": { + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + } + }, + "documentation": "

    The response object for this operation.

    " + }, + "RemoveBackendConfigResponse": { + "type": "structure", + "members": { + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + } + } + }, + "ResolutionStrategy": { + "type": "string", + "enum": [ + "OPTIMISTIC_CONCURRENCY", + "LAMBDA", + "AUTOMERGE", + "NONE" + ] + }, + "ResourceConfig": { + "type": "structure", + "members": {}, + "documentation": "

    Defines the resource configuration for the data model in your Amplify project.

    " + }, + "Service": { + "type": "string", + "enum": [ + "COGNITO" + ] + }, + "Settings": { + "type": "structure", + "members": { + "MfaTypes": { + "shape": "ListOfMfaTypesElement", + "locationName": "mfaTypes", + "documentation": "

    The supported MFA types.

    " + }, + "SmsMessage": { + "shape": "__string", + "locationName": "smsMessage", + "documentation": "

    The body of the SMS message.

    " + } + }, + "documentation": "

    The settings of your MFA configuration for the backend of your Amplify project.

    " + }, + "SignInMethod": { + "type": "string", + "enum": [ + "EMAIL", + "EMAIL_AND_PHONE_NUMBER", + "PHONE_NUMBER", + "USERNAME" + ] + }, + "SmsSettings": { + "type": "structure", + "members": { + "SmsMessage": { + "shape": "__string", + "locationName": "smsMessage", + "documentation": "

    The body of the SMS message.

    " + } + }, + "documentation": "

    SMS settings for authentication.

    " + }, + "SocialProviderSettings": { + "type": "structure", + "members": { + "Facebook": { + "shape": "BackendAuthSocialProviderConfig" + }, + "Google": { + "shape": "BackendAuthSocialProviderConfig" + }, + "LoginWithAmazon": { + "shape": "BackendAuthSocialProviderConfig" + } + }, + "documentation": "

    The settings for using the social identity providers for access to your Amplify app.

    " + }, + "Status": { + "type": "string", + "enum": [ + "LATEST", + "STALE" + ] + }, + "TooManyRequestsException": { + "type": "structure", + "members": { + "LimitType": { + "shape": "__string", + "locationName": "limitType", + "documentation": "

    The type of limit that was exceeded.

    " + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    An error message to inform that the request failed.

    " + } + }, + "documentation": "

    An error that is returned when a limit of a specific type is exceeded.

    ", + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "UpdateBackendAPIRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    Defines the resource configuration for the data model in your Amplify project.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for UpdateBackendAPI.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "UpdateBackendAPIResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "UpdateBackendAuthForgotPasswordConfig": { + "type": "structure", + "members": { + "DeliveryMethod": { + "shape": "DeliveryMethod", + "locationName": "deliveryMethod", + "documentation": "

    Describes which method to use (either SMS or email) to deliver messages to app users that want to recover their password.

    " + }, + "EmailSettings": { + "shape": "EmailSettings", + "locationName": "emailSettings", + "documentation": "

    The configuration for the email sent when an app user forgets their password.

    " + }, + "SmsSettings": { + "shape": "SmsSettings", + "locationName": "smsSettings", + "documentation": "

    The configuration for the SMS message sent when an Amplify app user forgets their password.

    " + } + }, + "documentation": "

    Describes the forgot password policy for authenticating into the Amplify app.

    " + }, + "UpdateBackendAuthIdentityPoolConfig": { + "type": "structure", + "members": { + "UnauthenticatedLogin": { + "shape": "__boolean", + "locationName": "unauthenticatedLogin", + "documentation": "

    A Boolean value that you can set to allow or disallow guest-level authorization into your Amplify app.

    " + } + }, + "documentation": "

    Describes the authorization configuration for the Amazon Cognito identity pool, provisioned as a part of your auth resource in the Amplify project.

    " + }, + "UpdateBackendAuthMFAConfig": { + "type": "structure", + "members": { + "MFAMode": { + "shape": "MFAMode", + "documentation": "

    The MFA mode for the backend of your Amplify project.

    " + }, + "Settings": { + "shape": "Settings", + "locationName": "settings", + "documentation": "

    The settings of your MFA configuration for the backend of your Amplify project.

    " + } + }, + "documentation": "

    Updates the multi-factor authentication (MFA) configuration for the backend of your Amplify project.

    " + }, + "UpdateBackendAuthOAuthConfig": { + "type": "structure", + "members": { + "DomainPrefix": { + "shape": "__string", + "locationName": "domainPrefix", + "documentation": "

    The Amazon Cognito domain prefix used to create a hosted UI for authentication.

    " + }, + "OAuthGrantType": { + "shape": "OAuthGrantType", + "locationName": "oAuthGrantType", + "documentation": "

    The OAuth grant type to allow app users to authenticate from your Amplify app.

    " + }, + "OAuthScopes": { + "shape": "ListOfOAuthScopesElement", + "locationName": "oAuthScopes", + "documentation": "

    The list of OAuth-related flows that can allow users to authenticate from your Amplify app.

    " + }, + "RedirectSignInURIs": { + "shape": "ListOf__string", + "locationName": "redirectSignInURIs", + "documentation": "

    Redirect URLs that OAuth uses when a user signs in to an Amplify app.

    " + }, + "RedirectSignOutURIs": { + "shape": "ListOf__string", + "locationName": "redirectSignOutURIs", + "documentation": "

    Redirect URLs that OAuth uses when a user signs out of an Amplify app.

    " + }, + "SocialProviderSettings": { + "shape": "SocialProviderSettings", + "locationName": "socialProviderSettings", + "documentation": "

    Describes third-party social federation configurations for allowing your users to sign in with OAuth.

    " + } + }, + "documentation": "

    The OAuth configurations for authenticating users into your Amplify app.

    " + }, + "UpdateBackendAuthPasswordPolicyConfig": { + "type": "structure", + "members": { + "AdditionalConstraints": { + "shape": "ListOfAdditionalConstraintsElement", + "locationName": "additionalConstraints", + "documentation": "

    Describes additional constrains on the password requirements to sign in to the auth resource, configured as a part of your Amplify project.

    " + }, + "MinimumLength": { + "shape": "__double", + "locationName": "minimumLength", + "documentation": "

    Describes the minimum length of the password required to sign in to the auth resource, configured as a part of your Amplify project.

    " + } + }, + "documentation": "

    Describes the password policy for your Amazon Cognito user pool that's configured as a part of your Amplify project.

    " + }, + "UpdateBackendAuthReqObj": { + "type": "structure", + "members": { + "ResourceConfig": { + "shape": "UpdateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for this request object.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request object for this operation.

    ", + "required": [ + "ResourceName", + "ResourceConfig" + ] + }, + "UpdateBackendAuthRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "ResourceConfig": { + "shape": "UpdateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

    The resource configuration for this request object.

    " + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

    The name of this resource.

    " + } + }, + "documentation": "

    The request body for UpdateBackendAuth.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName", + "ResourceConfig" + ] + }, + "UpdateBackendAuthResourceConfig": { + "type": "structure", + "members": { + "AuthResources": { + "shape": "AuthResources", + "locationName": "authResources", + "documentation": "

    Defines the service name to use when configuring an authentication resource in your Amplify project.

    " + }, + "IdentityPoolConfigs": { + "shape": "UpdateBackendAuthIdentityPoolConfig", + "locationName": "identityPoolConfigs", + "documentation": "

    Describes the authorization configuration for the Amazon Cognito identity pool, provisioned as a part of the auth resource in your Amplify project.

    " + }, + "Service": { + "shape": "Service", + "locationName": "service", + "documentation": "

    Defines the service name to use when configuring an authentication resource in your Amplify project.

    " + }, + "UserPoolConfigs": { + "shape": "UpdateBackendAuthUserPoolConfig", + "locationName": "userPoolConfigs", + "documentation": "

    Describes the authentication configuration for the Amazon Cognito user pool, provisioned as a part of the auth resource in your Amplify project.

    " + } + }, + "documentation": "

    Defines the resource configuration when updating an authentication resource in your Amplify project.

    ", + "required": [ + "AuthResources", + "UserPoolConfigs", + "Service" + ] + }, + "UpdateBackendAuthResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + } + } + }, + "UpdateBackendAuthUserPoolConfig": { + "type": "structure", + "members": { + "ForgotPassword": { + "shape": "UpdateBackendAuthForgotPasswordConfig", + "locationName": "forgotPassword", + "documentation": "

    Describes the forgot password policy for your Amazon Cognito user pool, configured as a part of your Amplify project.

    " + }, + "Mfa": { + "shape": "UpdateBackendAuthMFAConfig", + "locationName": "mfa", + "documentation": "

    Describes whether to apply multi-factor authentication (MFA) policies for your Amazon Cognito user pool that's configured as a part of your Amplify project.

    " + }, + "OAuth": { + "shape": "UpdateBackendAuthOAuthConfig", + "locationName": "oAuth", + "documentation": "

    Describes the OAuth policy and rules for your Amazon Cognito user pool, configured as a part of your Amplify project.

    " + }, + "PasswordPolicy": { + "shape": "UpdateBackendAuthPasswordPolicyConfig", + "locationName": "passwordPolicy", + "documentation": "

    Describes the password policy for your Amazon Cognito user pool, configured as a part of your Amplify project.

    " + } + }, + "documentation": "

    Describes the Amazon Cognito user pool configuration to configure the authorization resource for your Amplify project on an update.

    " + }, + "UpdateBackendConfigReqObj": { + "type": "structure", + "members": { + "LoginAuthConfig": { + "shape": "LoginAuthConfigReqObj", + "locationName": "loginAuthConfig", + "documentation": "

    Describes the Amazon Cognito configuration for Admin UI access.

    " + } + }, + "documentation": "

    The request object for this operation.

    " + }, + "UpdateBackendConfigRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "LoginAuthConfig": { + "shape": "LoginAuthConfigReqObj", + "locationName": "loginAuthConfig", + "documentation": "

    Describes the Amazon Cognito configuration for Admin UI access.

    " + } + }, + "documentation": "

    The request body for UpdateBackendConfig.

    ", + "required": [ + "AppId" + ] + }, + "UpdateBackendConfigResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendManagerAppId": { + "shape": "__string", + "locationName": "backendManagerAppId", + "documentation": "

    The app ID for the backend manager.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "LoginAuthConfig": { + "shape": "LoginAuthConfigReqObj", + "locationName": "loginAuthConfig", + "documentation": "

    Describes the Amazon Cognito configurations for the Admin UI auth resource to log in with.

    " + } + } + }, + "UpdateBackendJobRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    Filters the list of response objects to include only those with the specified operation name.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    Filters the list of response objects to include only those with the specified status.

    " + } + }, + "documentation": "

    The request body for GetBackendJob.

    ", + "required": [ + "AppId", + "BackendEnvironmentName", + "JobId" + ] + }, + "UpdateBackendJobResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

    The app ID.

    " + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

    The name of the backend environment.

    " + }, + "CreateTime": { + "shape": "__string", + "locationName": "createTime", + "documentation": "

    The time when the job was created.

    " + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

    If the request failed, this is the returned error.

    " + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The ID for the job.

    " + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

    The name of the operation.

    " + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

    The current status of the request.

    " + }, + "UpdateTime": { + "shape": "__string", + "locationName": "updateTime", + "documentation": "

    The time when the job was last updated.

    " + } + } + }, + "AdditionalConstraintsElement": { + "type": "string", + "enum": [ + "REQUIRE_DIGIT", + "REQUIRE_LOWERCASE", + "REQUIRE_SYMBOL", + "REQUIRE_UPPERCASE" + ] + }, + "MfaTypesElement": { + "type": "string", + "enum": [ + "SMS", + "TOTP" + ] + }, + "OAuthScopesElement": { + "type": "string", + "enum": [ + "PHONE", + "EMAIL", + "OPENID", + "PROFILE", + "AWS_COGNITO_SIGNIN_USER_ADMIN" + ] + }, + "RequiredSignUpAttributesElement": { + "type": "string", + "enum": [ + "ADDRESS", + "BIRTHDATE", + "EMAIL", + "FAMILY_NAME", + "GENDER", + "GIVEN_NAME", + "LOCALE", + "MIDDLE_NAME", + "NAME", + "NICKNAME", + "PHONE_NUMBER", + "PICTURE", + "PREFERRED_USERNAME", + "PROFILE", + "UPDATED_AT", + "WEBSITE", + "ZONE_INFO" + ] + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__integer": { + "type": "integer" + }, + "__integerMin1Max25": { + "type": "integer", + "min": 1, + "max": 25 + }, + "ListOfBackendAPIAuthType": { + "type": "list", + "member": { + "shape": "BackendAPIAuthType" + } + }, + "ListOfBackendJobRespObj": { + "type": "list", + "member": { + "shape": "BackendJobRespObj" + } + }, + "ListOfAdditionalConstraintsElement": { + "type": "list", + "member": { + "shape": "AdditionalConstraintsElement" + } + }, + "ListOfMfaTypesElement": { + "type": "list", + "member": { + "shape": "MfaTypesElement" + } + }, + "ListOfOAuthScopesElement": { + "type": "list", + "member": { + "shape": "OAuthScopesElement" + } + }, + "ListOfRequiredSignUpAttributesElement": { + "type": "list", + "member": { + "shape": "RequiredSignUpAttributesElement" + } + }, + "ListOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "__long": { + "type": "long" + }, + "__string": { + "type": "string" + }, + "__timestampIso8601": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" + } + }, + "documentation": "

    AWS Amplify Admin API

    " +} diff --git a/services/apigateway/build.properties b/services/apigateway/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/apigateway/build.properties +++ b/services/apigateway/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index c0a1a064afd5..6fe18c71c0ba 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + appconfig + AWS Java SDK :: Services :: AppConfig + The AWS Java SDK for AppConfig module holds the client classes that are used for + communicating with AppConfig. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.appconfig + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/appconfig/src/main/resources/codegen-resources/paginators-1.json b/services/appconfig/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..c76cf37f2ccb --- /dev/null +++ b/services/appconfig/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,34 @@ +{ + "pagination": { + "ListApplications": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListConfigurationProfiles": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDeploymentStrategies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDeployments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListEnvironments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListHostedConfigurationVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/appconfig/src/main/resources/codegen-resources/service-2.json b/services/appconfig/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..a94258d35834 --- /dev/null +++ b/services/appconfig/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2195 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-10-09", + "endpointPrefix":"appconfig", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AppConfig", + "serviceFullName":"Amazon AppConfig", + "serviceId":"AppConfig", + "signatureVersion":"v4", + "signingName":"appconfig", + "uid":"appconfig-2019-10-09" + }, + "operations":{ + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/applications", + "responseCode":201 + }, + "input":{"shape":"CreateApplicationRequest"}, + "output":{"shape":"Application"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    An application in AppConfig is a logical unit of code that provides capabilities for your customers. For example, an application can be a microservice that runs on Amazon EC2 instances, a mobile application installed by your users, a serverless application using Amazon API Gateway and AWS Lambda, or any system you run on behalf of others.

    " + }, + "CreateConfigurationProfile":{ + "name":"CreateConfigurationProfile", + "http":{ + "method":"POST", + "requestUri":"/applications/{ApplicationId}/configurationprofiles", + "responseCode":201 + }, + "input":{"shape":"CreateConfigurationProfileRequest"}, + "output":{"shape":"ConfigurationProfile"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Information that enables AppConfig to access the configuration source. Valid configuration sources include Systems Manager (SSM) documents, SSM Parameter Store parameters, and Amazon S3 objects. A configuration profile includes the following information.

    • The Uri location of the configuration data.

    • The AWS Identity and Access Management (IAM) role that provides access to the configuration data.

    • A validator for the configuration data. Available validators include either a JSON Schema or an AWS Lambda function.

    For more information, see Create a Configuration and a Configuration Profile in the AWS AppConfig User Guide.

    " + }, + "CreateDeploymentStrategy":{ + "name":"CreateDeploymentStrategy", + "http":{ + "method":"POST", + "requestUri":"/deploymentstrategies", + "responseCode":201 + }, + "input":{"shape":"CreateDeploymentStrategyRequest"}, + "output":{"shape":"DeploymentStrategy"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    A deployment strategy defines important criteria for rolling out your configuration to the designated targets. A deployment strategy includes: the overall duration required, a percentage of targets to receive the deployment during each interval, an algorithm that defines how percentage grows, and bake time.

    " + }, + "CreateEnvironment":{ + "name":"CreateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/applications/{ApplicationId}/environments", + "responseCode":201 + }, + "input":{"shape":"CreateEnvironmentRequest"}, + "output":{"shape":"Environment"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    For each application, you define one or more environments. An environment is a logical deployment group of AppConfig targets, such as applications in a Beta or Production environment. You can also define environments for application subcomponents such as the Web, Mobile and Back-end components for your application. You can configure Amazon CloudWatch alarms for each environment. The system monitors alarms during a configuration deployment. If an alarm is triggered, the system rolls back the configuration.

    " + }, + "CreateHostedConfigurationVersion":{ + "name":"CreateHostedConfigurationVersion", + "http":{ + "method":"POST", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions", + "responseCode":201 + }, + "input":{"shape":"CreateHostedConfigurationVersionRequest"}, + "output":{"shape":"HostedConfigurationVersion"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"PayloadTooLargeException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Create a new configuration in the AppConfig configuration store.

    " + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{ApplicationId}", + "responseCode":204 + }, + "input":{"shape":"DeleteApplicationRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Delete an application. Deleting an application does not delete a configuration from a host.

    " + }, + "DeleteConfigurationProfile":{ + "name":"DeleteConfigurationProfile", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}", + "responseCode":204 + }, + "input":{"shape":"DeleteConfigurationProfileRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Delete a configuration profile. Deleting a configuration profile does not delete a configuration from a host.

    " + }, + "DeleteDeploymentStrategy":{ + "name":"DeleteDeploymentStrategy", + "http":{ + "method":"DELETE", + "requestUri":"/deployementstrategies/{DeploymentStrategyId}", + "responseCode":204 + }, + "input":{"shape":"DeleteDeploymentStrategyRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Delete a deployment strategy. Deleting a deployment strategy does not delete a configuration from a host.

    " + }, + "DeleteEnvironment":{ + "name":"DeleteEnvironment", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{ApplicationId}/environments/{EnvironmentId}", + "responseCode":204 + }, + "input":{"shape":"DeleteEnvironmentRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Delete an environment. Deleting an environment does not delete a configuration from a host.

    " + }, + "DeleteHostedConfigurationVersion":{ + "name":"DeleteHostedConfigurationVersion", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions/{VersionNumber}", + "responseCode":204 + }, + "input":{"shape":"DeleteHostedConfigurationVersionRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Delete a version of a configuration from the AppConfig configuration store.

    " + }, + "GetApplication":{ + "name":"GetApplication", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}", + "responseCode":200 + }, + "input":{"shape":"GetApplicationRequest"}, + "output":{"shape":"Application"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve information about an application.

    " + }, + "GetConfiguration":{ + "name":"GetConfiguration", + "http":{ + "method":"GET", + "requestUri":"/applications/{Application}/environments/{Environment}/configurations/{Configuration}", + "responseCode":200 + }, + "input":{"shape":"GetConfigurationRequest"}, + "output":{"shape":"Configuration"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Receive information about a configuration.

    AWS AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don’t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration.

    To avoid excess charges, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. This value must be saved on your client. Subsequent calls to GetConfiguration must pass this value by using the ClientConfigurationVersion parameter.

    " + }, + "GetConfigurationProfile":{ + "name":"GetConfigurationProfile", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}", + "responseCode":200 + }, + "input":{"shape":"GetConfigurationProfileRequest"}, + "output":{"shape":"ConfigurationProfile"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve information about a configuration profile.

    " + }, + "GetDeployment":{ + "name":"GetDeployment", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/environments/{EnvironmentId}/deployments/{DeploymentNumber}", + "responseCode":200 + }, + "input":{"shape":"GetDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve information about a configuration deployment.

    " + }, + "GetDeploymentStrategy":{ + "name":"GetDeploymentStrategy", + "http":{ + "method":"GET", + "requestUri":"/deploymentstrategies/{DeploymentStrategyId}", + "responseCode":200 + }, + "input":{"shape":"GetDeploymentStrategyRequest"}, + "output":{"shape":"DeploymentStrategy"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve information about a deployment strategy. A deployment strategy defines important criteria for rolling out your configuration to the designated targets. A deployment strategy includes: the overall duration required, a percentage of targets to receive the deployment during each interval, an algorithm that defines how percentage grows, and bake time.

    " + }, + "GetEnvironment":{ + "name":"GetEnvironment", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/environments/{EnvironmentId}", + "responseCode":200 + }, + "input":{"shape":"GetEnvironmentRequest"}, + "output":{"shape":"Environment"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve information about an environment. An environment is a logical deployment group of AppConfig applications, such as applications in a Production environment or in an EU_Region environment. Each configuration deployment targets an environment. You can enable one or more Amazon CloudWatch alarms for an environment. If an alarm is triggered during a deployment, AppConfig roles back the configuration.

    " + }, + "GetHostedConfigurationVersion":{ + "name":"GetHostedConfigurationVersion", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions/{VersionNumber}", + "responseCode":200 + }, + "input":{"shape":"GetHostedConfigurationVersionRequest"}, + "output":{"shape":"HostedConfigurationVersion"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Get information about a specific configuration version.

    " + }, + "ListApplications":{ + "name":"ListApplications", + "http":{ + "method":"GET", + "requestUri":"/applications", + "responseCode":200 + }, + "input":{"shape":"ListApplicationsRequest"}, + "output":{"shape":"Applications"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    List all applications in your AWS account.

    " + }, + "ListConfigurationProfiles":{ + "name":"ListConfigurationProfiles", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/configurationprofiles", + "responseCode":200 + }, + "input":{"shape":"ListConfigurationProfilesRequest"}, + "output":{"shape":"ConfigurationProfiles"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Lists the configuration profiles for an application.

    " + }, + "ListDeploymentStrategies":{ + "name":"ListDeploymentStrategies", + "http":{ + "method":"GET", + "requestUri":"/deploymentstrategies", + "responseCode":200 + }, + "input":{"shape":"ListDeploymentStrategiesRequest"}, + "output":{"shape":"DeploymentStrategies"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    List deployment strategies.

    " + }, + "ListDeployments":{ + "name":"ListDeployments", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/environments/{EnvironmentId}/deployments", + "responseCode":200 + }, + "input":{"shape":"ListDeploymentsRequest"}, + "output":{"shape":"Deployments"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Lists the deployments for an environment.

    " + }, + "ListEnvironments":{ + "name":"ListEnvironments", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/environments", + "responseCode":200 + }, + "input":{"shape":"ListEnvironmentsRequest"}, + "output":{"shape":"Environments"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    List the environments for an application.

    " + }, + "ListHostedConfigurationVersions":{ + "name":"ListHostedConfigurationVersions", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions", + "responseCode":200 + }, + "input":{"shape":"ListHostedConfigurationVersionsRequest"}, + "output":{"shape":"HostedConfigurationVersions"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    View a list of configurations stored in the AppConfig configuration store by version.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ResourceTags"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves the list of key-value tags assigned to the resource.

    " + }, + "StartDeployment":{ + "name":"StartDeployment", + "http":{ + "method":"POST", + "requestUri":"/applications/{ApplicationId}/environments/{EnvironmentId}/deployments", + "responseCode":201 + }, + "input":{"shape":"StartDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Starts a deployment.

    " + }, + "StopDeployment":{ + "name":"StopDeployment", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{ApplicationId}/environments/{EnvironmentId}/deployments/{DeploymentNumber}", + "responseCode":202 + }, + "input":{"shape":"StopDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Stops a deployment. This API action works only on deployments that have a status of DEPLOYING. This action moves the deployment to a status of ROLLED_BACK.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Metadata to assign to an AppConfig resource. Tags help organize and categorize your AppConfig resources. Each tag consists of a key and an optional value, both of which you define. You can specify a maximum of 50 tags for a resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a tag key and value from an AppConfig resource.

    " + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"PATCH", + "requestUri":"/applications/{ApplicationId}", + "responseCode":200 + }, + "input":{"shape":"UpdateApplicationRequest"}, + "output":{"shape":"Application"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates an application.

    " + }, + "UpdateConfigurationProfile":{ + "name":"UpdateConfigurationProfile", + "http":{ + "method":"PATCH", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}", + "responseCode":200 + }, + "input":{"shape":"UpdateConfigurationProfileRequest"}, + "output":{"shape":"ConfigurationProfile"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a configuration profile.

    " + }, + "UpdateDeploymentStrategy":{ + "name":"UpdateDeploymentStrategy", + "http":{ + "method":"PATCH", + "requestUri":"/deploymentstrategies/{DeploymentStrategyId}", + "responseCode":200 + }, + "input":{"shape":"UpdateDeploymentStrategyRequest"}, + "output":{"shape":"DeploymentStrategy"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a deployment strategy.

    " + }, + "UpdateEnvironment":{ + "name":"UpdateEnvironment", + "http":{ + "method":"PATCH", + "requestUri":"/applications/{ApplicationId}/environments/{EnvironmentId}", + "responseCode":200 + }, + "input":{"shape":"UpdateEnvironmentRequest"}, + "output":{"shape":"Environment"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates an environment.

    " + }, + "ValidateConfiguration":{ + "name":"ValidateConfiguration", + "http":{ + "method":"POST", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/validators", + "responseCode":204 + }, + "input":{"shape":"ValidateConfigurationRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Uses the validators in a configuration profile to validate a configuration.

    " + } + }, + "shapes":{ + "Application":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"Id", + "documentation":"

    The application ID.

    " + }, + "Name":{ + "shape":"Name", + "documentation":"

    The application name.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the application.

    " + } + } + }, + "ApplicationList":{ + "type":"list", + "member":{"shape":"Application"} + }, + "Applications":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ApplicationList", + "documentation":"

    The elements from this collection.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of items to return. Use this token to get the next set of results.

    " + } + } + }, + "Arn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:(aws[a-zA-Z-]*)?:[a-z]+:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:[a-zA-Z0-9-_/:.]+" + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The input fails to satisfy the constraints specified by an AWS service.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Blob":{ + "type":"blob", + "sensitive":true + }, + "BytesMeasure":{ + "type":"string", + "enum":["KILOBYTES"] + }, + "Configuration":{ + "type":"structure", + "members":{ + "Content":{ + "shape":"Blob", + "documentation":"

    The content of the configuration or the configuration data.

    " + }, + "ConfigurationVersion":{ + "shape":"Version", + "documentation":"

    The configuration version.

    ", + "location":"header", + "locationName":"Configuration-Version" + }, + "ContentType":{ + "shape":"String", + "documentation":"

    A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

    ", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"Content" + }, + "ConfigurationProfile":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    " + }, + "Id":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    " + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the configuration profile.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The configuration profile description.

    " + }, + "LocationUri":{ + "shape":"Uri", + "documentation":"

    The URI location of the configuration.

    " + }, + "RetrievalRoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

    " + }, + "Validators":{ + "shape":"ValidatorList", + "documentation":"

    A list of methods for validating the configuration.

    " + } + } + }, + "ConfigurationProfileSummary":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    " + }, + "Id":{ + "shape":"Id", + "documentation":"

    The ID of the configuration profile.

    " + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the configuration profile.

    " + }, + "LocationUri":{ + "shape":"Uri", + "documentation":"

    The URI location of the configuration.

    " + }, + "ValidatorTypes":{ + "shape":"ValidatorTypeList", + "documentation":"

    The types of validators in the configuration profile.

    " + } + }, + "documentation":"

    A summary of a configuration profile.

    " + }, + "ConfigurationProfileSummaryList":{ + "type":"list", + "member":{"shape":"ConfigurationProfileSummary"} + }, + "ConfigurationProfiles":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ConfigurationProfileSummaryList", + "documentation":"

    The elements from this collection.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of items to return. Use this token to get the next set of results.

    " + } + } + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The request could not be processed because of conflict in the current state of the resource.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateApplicationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    A name for the application.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the application.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata to assign to the application. Tags help organize and categorize your AppConfig resources. Each tag consists of a key and an optional value, both of which you define.

    " + } + } + }, + "CreateConfigurationProfileRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "Name", + "LocationUri" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "Name":{ + "shape":"Name", + "documentation":"

    A name for the configuration profile.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the configuration profile.

    " + }, + "LocationUri":{ + "shape":"Uri", + "documentation":"

    A URI to locate the configuration. You can specify a Systems Manager (SSM) document, an SSM Parameter Store parameter, or an Amazon S3 object. For an SSM document, specify either the document name in the format ssm-document://<Document_name> or the Amazon Resource Name (ARN). For a parameter, specify either the parameter name in the format ssm-parameter://<Parameter_name> or the ARN. For an Amazon S3 object, specify the URI in the following format: s3://<bucket>/<objectKey> . Here is an example: s3://my-bucket/my-app/us-east-1/my-config.json

    " + }, + "RetrievalRoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

    " + }, + "Validators":{ + "shape":"ValidatorList", + "documentation":"

    A list of methods for validating the configuration.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata to assign to the configuration profile. Tags help organize and categorize your AppConfig resources. Each tag consists of a key and an optional value, both of which you define.

    " + } + } + }, + "CreateDeploymentStrategyRequest":{ + "type":"structure", + "required":[ + "Name", + "DeploymentDurationInMinutes", + "GrowthFactor", + "ReplicateTo" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    A name for the deployment strategy.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the deployment strategy.

    " + }, + "DeploymentDurationInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    Total amount of time for a deployment to last.

    ", + "box":true + }, + "FinalBakeTimeInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    The amount of time AppConfig monitors for alarms before considering the deployment to be complete and no longer eligible for automatic roll back.

    " + }, + "GrowthFactor":{ + "shape":"GrowthFactor", + "documentation":"

    The percentage of targets to receive a deployed configuration during each interval.

    ", + "box":true + }, + "GrowthType":{ + "shape":"GrowthType", + "documentation":"

    The algorithm used to define how percentage grows over time. AWS AppConfig supports the following growth types:

    Linear: For this type, AppConfig processes the deployment by dividing the total number of targets by the value specified for Step percentage. For example, a linear deployment that uses a Step percentage of 10 deploys the configuration to 10 percent of the hosts. After those deployments are complete, the system deploys the configuration to the next 10 percent. This continues until 100% of the targets have successfully received the configuration.

    Exponential: For this type, AppConfig processes the deployment exponentially using the following formula: G*(2^N). In this formula, G is the growth factor specified by the user and N is the number of steps until the configuration is deployed to all targets. For example, if you specify a growth factor of 2, then the system rolls out the configuration as follows:

    2*(2^0)

    2*(2^1)

    2*(2^2)

    Expressed numerically, the deployment rolls out as follows: 2% of the targets, 4% of the targets, 8% of the targets, and continues until the configuration has been deployed to all targets.

    " + }, + "ReplicateTo":{ + "shape":"ReplicateTo", + "documentation":"

    Save the deployment strategy to a Systems Manager (SSM) document.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata to assign to the deployment strategy. Tags help organize and categorize your AppConfig resources. Each tag consists of a key and an optional value, both of which you define.

    " + } + } + }, + "CreateEnvironmentRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "Name" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "Name":{ + "shape":"Name", + "documentation":"

    A name for the environment.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the environment.

    " + }, + "Monitors":{ + "shape":"MonitorList", + "documentation":"

    Amazon CloudWatch alarms to monitor during the deployment process.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata to assign to the environment. Tags help organize and categorize your AppConfig resources. Each tag consists of a key and an optional value, both of which you define.

    " + } + } + }, + "CreateHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "Content", + "ContentType" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    ", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the configuration.

    ", + "location":"header", + "locationName":"Description" + }, + "Content":{ + "shape":"Blob", + "documentation":"

    The content of the configuration or the configuration data.

    " + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

    A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

    ", + "location":"header", + "locationName":"Content-Type" + }, + "LatestVersionNumber":{ + "shape":"Integer", + "documentation":"

    An optional locking token used to prevent race conditions from overwriting configuration updates when creating a new version. To ensure your data is not overwritten when creating multiple hosted configuration versions in rapid succession, specify the version of the latest hosted configuration version.

    ", + "box":true, + "location":"header", + "locationName":"Latest-Version-Number" + } + }, + "payload":"Content" + }, + "DeleteApplicationRequest":{ + "type":"structure", + "required":["ApplicationId"], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The ID of the application to delete.

    ", + "location":"uri", + "locationName":"ApplicationId" + } + } + }, + "DeleteConfigurationProfileRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID that includes the configuration profile you want to delete.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The ID of the configuration profile you want to delete.

    ", + "location":"uri", + "locationName":"ConfigurationProfileId" + } + } + }, + "DeleteDeploymentStrategyRequest":{ + "type":"structure", + "required":["DeploymentStrategyId"], + "members":{ + "DeploymentStrategyId":{ + "shape":"DeploymentStrategyId", + "documentation":"

    The ID of the deployment strategy you want to delete.

    ", + "location":"uri", + "locationName":"DeploymentStrategyId" + } + } + }, + "DeleteEnvironmentRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "EnvironmentId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID that includes the environment you want to delete.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "EnvironmentId":{ + "shape":"Id", + "documentation":"

    The ID of the environment you want to delete.

    ", + "location":"uri", + "locationName":"EnvironmentId" + } + } + }, + "DeleteHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "VersionNumber" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    ", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

    The versions number to delete.

    ", + "location":"uri", + "locationName":"VersionNumber" + } + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The ID of the application that was deployed.

    " + }, + "EnvironmentId":{ + "shape":"Id", + "documentation":"

    The ID of the environment that was deployed.

    " + }, + "DeploymentStrategyId":{ + "shape":"Id", + "documentation":"

    The ID of the deployment strategy that was deployed.

    " + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The ID of the configuration profile that was deployed.

    " + }, + "DeploymentNumber":{ + "shape":"Integer", + "documentation":"

    The sequence number of the deployment.

    " + }, + "ConfigurationName":{ + "shape":"Name", + "documentation":"

    The name of the configuration.

    " + }, + "ConfigurationLocationUri":{ + "shape":"Uri", + "documentation":"

    Information about the source location of the configuration.

    " + }, + "ConfigurationVersion":{ + "shape":"Version", + "documentation":"

    The configuration version that was deployed.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the deployment.

    " + }, + "DeploymentDurationInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    Total amount of time the deployment lasted.

    " + }, + "GrowthType":{ + "shape":"GrowthType", + "documentation":"

    The algorithm used to define how percentage grew over time.

    " + }, + "GrowthFactor":{ + "shape":"Percentage", + "documentation":"

    The percentage of targets to receive a deployed configuration during each interval.

    " + }, + "FinalBakeTimeInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    The amount of time AppConfig monitored for alarms before considering the deployment to be complete and no longer eligible for automatic roll back.

    " + }, + "State":{ + "shape":"DeploymentState", + "documentation":"

    The state of the deployment.

    " + }, + "EventLog":{ + "shape":"DeploymentEvents", + "documentation":"

    A list containing all events related to a deployment. The most recent events are displayed first.

    " + }, + "PercentageComplete":{ + "shape":"Percentage", + "documentation":"

    The percentage of targets for which the deployment is available.

    " + }, + "StartedAt":{ + "shape":"Iso8601DateTime", + "documentation":"

    The time the deployment started.

    " + }, + "CompletedAt":{ + "shape":"Iso8601DateTime", + "documentation":"

    The time the deployment completed.

    " + } + } + }, + "DeploymentEvent":{ + "type":"structure", + "members":{ + "EventType":{ + "shape":"DeploymentEventType", + "documentation":"

    The type of deployment event. Deployment event types include the start, stop, or completion of a deployment; a percentage update; the start or stop of a bake period; the start or completion of a rollback.

    " + }, + "TriggeredBy":{ + "shape":"TriggeredBy", + "documentation":"

    The entity that triggered the deployment event. Events can be triggered by a user, AWS AppConfig, an Amazon CloudWatch alarm, or an internal error.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the deployment event. Descriptions include, but are not limited to, the user account or the CloudWatch alarm ARN that initiated a rollback, the percentage of hosts that received the deployment, or in the case of an internal error, a recommendation to attempt a new deployment.

    " + }, + "OccurredAt":{ + "shape":"Iso8601DateTime", + "documentation":"

    The date and time the event occurred.

    " + } + }, + "documentation":"

    An object that describes a deployment event.

    " + }, + "DeploymentEventType":{ + "type":"string", + "enum":[ + "PERCENTAGE_UPDATED", + "ROLLBACK_STARTED", + "ROLLBACK_COMPLETED", + "BAKE_TIME_STARTED", + "DEPLOYMENT_STARTED", + "DEPLOYMENT_COMPLETED" + ] + }, + "DeploymentEvents":{ + "type":"list", + "member":{"shape":"DeploymentEvent"} + }, + "DeploymentList":{ + "type":"list", + "member":{"shape":"DeploymentSummary"} + }, + "DeploymentState":{ + "type":"string", + "enum":[ + "BAKING", + "VALIDATING", + "DEPLOYING", + "COMPLETE", + "ROLLING_BACK", + "ROLLED_BACK" + ] + }, + "DeploymentStrategies":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"DeploymentStrategyList", + "documentation":"

    The elements from this collection.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of items to return. Use this token to get the next set of results.

    " + } + } + }, + "DeploymentStrategy":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"Id", + "documentation":"

    The deployment strategy ID.

    " + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the deployment strategy.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the deployment strategy.

    " + }, + "DeploymentDurationInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    Total amount of time the deployment lasted.

    " + }, + "GrowthType":{ + "shape":"GrowthType", + "documentation":"

    The algorithm used to define how percentage grew over time.

    " + }, + "GrowthFactor":{ + "shape":"Percentage", + "documentation":"

    The percentage of targets that received a deployed configuration during each interval.

    " + }, + "FinalBakeTimeInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    The amount of time AppConfig monitored for alarms before considering the deployment to be complete and no longer eligible for automatic roll back.

    " + }, + "ReplicateTo":{ + "shape":"ReplicateTo", + "documentation":"

    Save the deployment strategy to a Systems Manager (SSM) document.

    " + } + } + }, + "DeploymentStrategyId":{ + "type":"string", + "pattern":"(^[a-z0-9]{4,7}$|^AppConfig\\.[A-Za-z0-9]{9,40}$)" + }, + "DeploymentStrategyList":{ + "type":"list", + "member":{"shape":"DeploymentStrategy"} + }, + "DeploymentSummary":{ + "type":"structure", + "members":{ + "DeploymentNumber":{ + "shape":"Integer", + "documentation":"

    The sequence number of the deployment.

    " + }, + "ConfigurationName":{ + "shape":"Name", + "documentation":"

    The name of the configuration.

    " + }, + "ConfigurationVersion":{ + "shape":"Version", + "documentation":"

    The version of the configuration.

    " + }, + "DeploymentDurationInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    Total amount of time the deployment lasted.

    " + }, + "GrowthType":{ + "shape":"GrowthType", + "documentation":"

    The algorithm used to define how percentage grows over time.

    " + }, + "GrowthFactor":{ + "shape":"Percentage", + "documentation":"

    The percentage of targets to receive a deployed configuration during each interval.

    " + }, + "FinalBakeTimeInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    The amount of time AppConfig monitors for alarms before considering the deployment to be complete and no longer eligible for automatic roll back.

    " + }, + "State":{ + "shape":"DeploymentState", + "documentation":"

    The state of the deployment.

    " + }, + "PercentageComplete":{ + "shape":"Percentage", + "documentation":"

    The percentage of targets for which the deployment is available.

    " + }, + "StartedAt":{ + "shape":"Iso8601DateTime", + "documentation":"

    Time the deployment started.

    " + }, + "CompletedAt":{ + "shape":"Iso8601DateTime", + "documentation":"

    Time the deployment completed.

    " + } + }, + "documentation":"

    Information about the deployment.

    " + }, + "Deployments":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"DeploymentList", + "documentation":"

    The elements from this collection.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of items to return. Use this token to get the next set of results.

    " + } + } + }, + "Description":{ + "type":"string", + "max":1024, + "min":0 + }, + "Environment":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    " + }, + "Id":{ + "shape":"Id", + "documentation":"

    The environment ID.

    " + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the environment.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the environment.

    " + }, + "State":{ + "shape":"EnvironmentState", + "documentation":"

    The state of the environment. An environment can be in one of the following states: READY_FOR_DEPLOYMENT, DEPLOYING, ROLLING_BACK, or ROLLED_BACK

    " + }, + "Monitors":{ + "shape":"MonitorList", + "documentation":"

    Amazon CloudWatch alarms monitored during the deployment.

    " + } + } + }, + "EnvironmentList":{ + "type":"list", + "member":{"shape":"Environment"} + }, + "EnvironmentState":{ + "type":"string", + "enum":[ + "READY_FOR_DEPLOYMENT", + "DEPLOYING", + "ROLLING_BACK", + "ROLLED_BACK" + ] + }, + "Environments":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"EnvironmentList", + "documentation":"

    The elements from this collection.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of items to return. Use this token to get the next set of results.

    " + } + } + }, + "Float":{"type":"float"}, + "GetApplicationRequest":{ + "type":"structure", + "required":["ApplicationId"], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The ID of the application you want to get.

    ", + "location":"uri", + "locationName":"ApplicationId" + } + } + }, + "GetConfigurationProfileRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The ID of the application that includes the configuration profile you want to get.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The ID of the configuration profile you want to get.

    ", + "location":"uri", + "locationName":"ConfigurationProfileId" + } + } + }, + "GetConfigurationRequest":{ + "type":"structure", + "required":[ + "Application", + "Environment", + "Configuration", + "ClientId" + ], + "members":{ + "Application":{ + "shape":"StringWithLengthBetween1And64", + "documentation":"

    The application to get. Specify either the application name or the application ID.

    ", + "location":"uri", + "locationName":"Application" + }, + "Environment":{ + "shape":"StringWithLengthBetween1And64", + "documentation":"

    The environment to get. Specify either the environment name or the environment ID.

    ", + "location":"uri", + "locationName":"Environment" + }, + "Configuration":{ + "shape":"StringWithLengthBetween1And64", + "documentation":"

    The configuration to get. Specify either the configuration name or the configuration ID.

    ", + "location":"uri", + "locationName":"Configuration" + }, + "ClientId":{ + "shape":"StringWithLengthBetween1And64", + "documentation":"

    A unique ID to identify the client for the configuration. This ID enables AppConfig to deploy the configuration in intervals, as defined in the deployment strategy.

    ", + "location":"querystring", + "locationName":"client_id" + }, + "ClientConfigurationVersion":{ + "shape":"Version", + "documentation":"

    The configuration version returned in the most recent GetConfiguration response.

    AWS AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don’t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration.

    To avoid excess charges, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. This value must be saved on your client. Subsequent calls to GetConfiguration must pass this value by using the ClientConfigurationVersion parameter.

    For more information about working with configurations, see Retrieving the Configuration in the AWS AppConfig User Guide.

    ", + "location":"querystring", + "locationName":"client_configuration_version" + } + } + }, + "GetDeploymentRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "EnvironmentId", + "DeploymentNumber" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The ID of the application that includes the deployment you want to get.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "EnvironmentId":{ + "shape":"Id", + "documentation":"

    The ID of the environment that includes the deployment you want to get.

    ", + "location":"uri", + "locationName":"EnvironmentId" + }, + "DeploymentNumber":{ + "shape":"Integer", + "documentation":"

    The sequence number of the deployment.

    ", + "box":true, + "location":"uri", + "locationName":"DeploymentNumber" + } + } + }, + "GetDeploymentStrategyRequest":{ + "type":"structure", + "required":["DeploymentStrategyId"], + "members":{ + "DeploymentStrategyId":{ + "shape":"DeploymentStrategyId", + "documentation":"

    The ID of the deployment strategy to get.

    ", + "location":"uri", + "locationName":"DeploymentStrategyId" + } + } + }, + "GetEnvironmentRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "EnvironmentId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The ID of the application that includes the environment you want to get.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "EnvironmentId":{ + "shape":"Id", + "documentation":"

    The ID of the environment you wnat to get.

    ", + "location":"uri", + "locationName":"EnvironmentId" + } + } + }, + "GetHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "VersionNumber" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    ", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

    The version.

    ", + "location":"uri", + "locationName":"VersionNumber" + } + } + }, + "GrowthFactor":{ + "type":"float", + "max":100.0, + "min":1.0 + }, + "GrowthType":{ + "type":"string", + "enum":[ + "LINEAR", + "EXPONENTIAL" + ] + }, + "HostedConfigurationVersion":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"header", + "locationName":"Application-Id" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    ", + "location":"header", + "locationName":"Configuration-Profile-Id" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

    The configuration version.

    ", + "location":"header", + "locationName":"Version-Number" + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the configuration.

    ", + "location":"header", + "locationName":"Description" + }, + "Content":{ + "shape":"Blob", + "documentation":"

    The content of the configuration or the configuration data.

    " + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

    A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

    ", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"Content" + }, + "HostedConfigurationVersionSummary":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    " + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    " + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

    The configuration version.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the configuration.

    " + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

    A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

    " + } + }, + "documentation":"

    Information about the configuration.

    " + }, + "HostedConfigurationVersionSummaryList":{ + "type":"list", + "member":{"shape":"HostedConfigurationVersionSummary"} + }, + "HostedConfigurationVersions":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"HostedConfigurationVersionSummaryList", + "documentation":"

    The elements from this collection.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of items to return. Use this token to get the next set of results.

    " + } + } + }, + "Id":{ + "type":"string", + "pattern":"[a-z0-9]{4,7}" + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    There was an internal failure in the AppConfig service.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "Iso8601DateTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "ListApplicationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "box":true, + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to start the list. Use this token to get the next set of results.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListConfigurationProfilesRequest":{ + "type":"structure", + "required":["ApplicationId"], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "box":true, + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to start the list. Use this token to get the next set of results.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListDeploymentStrategiesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "box":true, + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to start the list. Use this token to get the next set of results.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListDeploymentsRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "EnvironmentId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "EnvironmentId":{ + "shape":"Id", + "documentation":"

    The environment ID.

    ", + "location":"uri", + "locationName":"EnvironmentId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "box":true, + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to start the list. Use this token to get the next set of results.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListEnvironmentsRequest":{ + "type":"structure", + "required":["ApplicationId"], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "box":true, + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to start the list. Use this token to get the next set of results.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListHostedConfigurationVersionsRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    ", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "box":true, + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to start the list. Use this token to get the next set of results.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The resource ARN.

    ", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MinutesBetween0And24Hours":{ + "type":"integer", + "max":1440, + "min":0 + }, + "Monitor":{ + "type":"structure", + "members":{ + "AlarmArn":{ + "shape":"Arn", + "documentation":"

    ARN of the Amazon CloudWatch alarm.

    " + }, + "AlarmRoleArn":{ + "shape":"RoleArn", + "documentation":"

    ARN of an IAM role for AppConfig to monitor AlarmArn.

    " + } + }, + "documentation":"

    Amazon CloudWatch alarms to monitor during the deployment process.

    " + }, + "MonitorList":{ + "type":"list", + "member":{"shape":"Monitor"}, + "max":5, + "min":0 + }, + "Name":{ + "type":"string", + "max":64, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1 + }, + "PayloadTooLargeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "Measure":{"shape":"BytesMeasure"}, + "Limit":{"shape":"Float"}, + "Size":{"shape":"Float"} + }, + "documentation":"

    The configuration size is too large.

    ", + "error":{"httpStatusCode":413}, + "exception":true + }, + "Percentage":{ + "type":"float", + "max":100.0, + "min":1.0 + }, + "ReplicateTo":{ + "type":"string", + "enum":[ + "NONE", + "SSM_DOCUMENT" + ] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceName":{"shape":"String"} + }, + "documentation":"

    The requested resource could not be found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceTags":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata to assign to AppConfig resources. Tags help organize and categorize your AppConfig resources. Each tag consists of a key and an optional value, both of which you define.

    " + } + } + }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^((arn):(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):(iam)::\\d{12}:role[/].*)$" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The number of hosted configuration versions exceeds the limit for the AppConfig configuration store. Delete one or more versions and try again.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "StartDeploymentRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "EnvironmentId", + "DeploymentStrategyId", + "ConfigurationProfileId", + "ConfigurationVersion" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "EnvironmentId":{ + "shape":"Id", + "documentation":"

    The environment ID.

    ", + "location":"uri", + "locationName":"EnvironmentId" + }, + "DeploymentStrategyId":{ + "shape":"DeploymentStrategyId", + "documentation":"

    The deployment strategy ID.

    " + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    " + }, + "ConfigurationVersion":{ + "shape":"Version", + "documentation":"

    The configuration version to deploy.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the deployment.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata to assign to the deployment. Tags help organize and categorize your AppConfig resources. Each tag consists of a key and an optional value, both of which you define.

    " + } + } + }, + "StopDeploymentRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "EnvironmentId", + "DeploymentNumber" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "EnvironmentId":{ + "shape":"Id", + "documentation":"

    The environment ID.

    ", + "location":"uri", + "locationName":"EnvironmentId" + }, + "DeploymentNumber":{ + "shape":"Integer", + "documentation":"

    The sequence number of the deployment.

    ", + "box":true, + "location":"uri", + "locationName":"DeploymentNumber" + } + } + }, + "String":{"type":"string"}, + "StringWithLengthBetween0And32768":{ + "type":"string", + "max":32768, + "min":0, + "sensitive":true + }, + "StringWithLengthBetween1And255":{ + "type":"string", + "max":255, + "min":1 + }, + "StringWithLengthBetween1And64":{ + "type":"string", + "max":64, + "min":1 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the resource for which to retrieve tags.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The key-value string map. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters.

    " + } + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "TriggeredBy":{ + "type":"string", + "enum":[ + "USER", + "APPCONFIG", + "CLOUDWATCH_ALARM", + "INTERNAL_ERROR" + ] + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the resource for which to remove tags.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys to delete.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UpdateApplicationRequest":{ + "type":"structure", + "required":["ApplicationId"], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the application.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the application.

    " + } + } + }, + "UpdateConfigurationProfileRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The ID of the configuration profile.

    ", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the configuration profile.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the configuration profile.

    " + }, + "RetrievalRoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

    " + }, + "Validators":{ + "shape":"ValidatorList", + "documentation":"

    A list of methods for validating the configuration.

    " + } + } + }, + "UpdateDeploymentStrategyRequest":{ + "type":"structure", + "required":["DeploymentStrategyId"], + "members":{ + "DeploymentStrategyId":{ + "shape":"DeploymentStrategyId", + "documentation":"

    The deployment strategy ID.

    ", + "location":"uri", + "locationName":"DeploymentStrategyId" + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the deployment strategy.

    " + }, + "DeploymentDurationInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    Total amount of time for a deployment to last.

    ", + "box":true + }, + "FinalBakeTimeInMinutes":{ + "shape":"MinutesBetween0And24Hours", + "documentation":"

    The amount of time AppConfig monitors for alarms before considering the deployment to be complete and no longer eligible for automatic roll back.

    ", + "box":true + }, + "GrowthFactor":{ + "shape":"GrowthFactor", + "documentation":"

    The percentage of targets to receive a deployed configuration during each interval.

    ", + "box":true + }, + "GrowthType":{ + "shape":"GrowthType", + "documentation":"

    The algorithm used to define how percentage grows over time. AWS AppConfig supports the following growth types:

    Linear: For this type, AppConfig processes the deployment by increments of the growth factor evenly distributed over the deployment time. For example, a linear deployment that uses a growth factor of 20 initially makes the configuration available to 20 percent of the targets. After 1/5th of the deployment time has passed, the system updates the percentage to 40 percent. This continues until 100% of the targets are set to receive the deployed configuration.

    Exponential: For this type, AppConfig processes the deployment exponentially using the following formula: G*(2^N). In this formula, G is the growth factor specified by the user and N is the number of steps until the configuration is deployed to all targets. For example, if you specify a growth factor of 2, then the system rolls out the configuration as follows:

    2*(2^0)

    2*(2^1)

    2*(2^2)

    Expressed numerically, the deployment rolls out as follows: 2% of the targets, 4% of the targets, 8% of the targets, and continues until the configuration has been deployed to all targets.

    " + } + } + }, + "UpdateEnvironmentRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "EnvironmentId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "EnvironmentId":{ + "shape":"Id", + "documentation":"

    The environment ID.

    ", + "location":"uri", + "locationName":"EnvironmentId" + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the environment.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the environment.

    " + }, + "Monitors":{ + "shape":"MonitorList", + "documentation":"

    Amazon CloudWatch alarms to monitor during the deployment process.

    " + } + } + }, + "Uri":{ + "type":"string", + "max":2048, + "min":1 + }, + "ValidateConfigurationRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "ConfigurationVersion" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

    The application ID.

    ", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

    The configuration profile ID.

    ", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "ConfigurationVersion":{ + "shape":"Version", + "documentation":"

    The version of the configuration to validate.

    ", + "location":"querystring", + "locationName":"configuration_version" + } + } + }, + "Validator":{ + "type":"structure", + "required":[ + "Type", + "Content" + ], + "members":{ + "Type":{ + "shape":"ValidatorType", + "documentation":"

    AppConfig supports validators of type JSON_SCHEMA and LAMBDA

    " + }, + "Content":{ + "shape":"StringWithLengthBetween0And32768", + "documentation":"

    Either the JSON Schema content or the Amazon Resource Name (ARN) of an AWS Lambda function.

    " + } + }, + "documentation":"

    A validator provides a syntactic or semantic check to ensure the configuration you want to deploy functions as intended. To validate your application configuration data, you provide a schema or a Lambda function that runs against the configuration. The configuration deployment or update can only proceed when the configuration data is valid.

    " + }, + "ValidatorList":{ + "type":"list", + "member":{"shape":"Validator"}, + "max":2, + "min":0 + }, + "ValidatorType":{ + "type":"string", + "enum":[ + "JSON_SCHEMA", + "LAMBDA" + ] + }, + "ValidatorTypeList":{ + "type":"list", + "member":{"shape":"ValidatorType"}, + "max":2, + "min":0 + }, + "Version":{ + "type":"string", + "max":1024, + "min":1 + } + }, + "documentation":"AWS AppConfig

    Use AWS AppConfig, a capability of AWS Systems Manager, to create, manage, and quickly deploy application configurations. AppConfig supports controlled deployments to applications of any size and includes built-in validation checks and monitoring. You can use AppConfig with applications hosted on Amazon EC2 instances, AWS Lambda, containers, mobile applications, or IoT devices.

    To prevent errors when deploying application configurations, especially for production systems where a simple typo could cause an unexpected outage, AppConfig includes validators. A validator provides a syntactic or semantic check to ensure that the configuration you want to deploy works as intended. To validate your application configuration data, you provide a schema or a Lambda function that runs against the configuration. The configuration deployment or update can only proceed when the configuration data is valid.

    During a configuration deployment, AppConfig monitors the application to ensure that the deployment is successful. If the system encounters an error, AppConfig rolls back the change to minimize impact for your application users. You can configure a deployment strategy for each application or environment that includes deployment criteria, including velocity, bake time, and alarms to monitor. Similar to error monitoring, if a deployment triggers an alarm, AppConfig automatically rolls back to the previous version.

    AppConfig supports multiple use cases. Here are some examples.

    • Application tuning: Use AppConfig to carefully introduce changes to your application that can only be tested with production traffic.

    • Feature toggle: Use AppConfig to turn on new features that require a timely deployment, such as a product launch or announcement.

    • Allow list: Use AppConfig to allow premium subscribers to access paid content.

    • Operational issues: Use AppConfig to reduce stress on your application when a dependency or other external factor impacts the system.

    This reference is intended to be used with the AWS AppConfig User Guide.

    " +} diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml new file mode 100644 index 000000000000..eb0fe37b1a83 --- /dev/null +++ b/services/appflow/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + appflow + AWS Java SDK :: Services :: Appflow + The AWS Java SDK for Appflow module holds the client classes that are used for + communicating with Appflow. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.appflow + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/appflow/src/main/resources/codegen-resources/paginators-1.json b/services/appflow/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..deabfa47b562 --- /dev/null +++ b/services/appflow/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,23 @@ +{ + "pagination": { + "DescribeConnectorProfiles": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "DescribeConnectors": { + "input_token": "nextToken", + "output_token": "nextToken" + }, + "DescribeFlowExecutionRecords": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListFlows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + } + } +} diff --git a/services/appflow/src/main/resources/codegen-resources/service-2.json b/services/appflow/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..b1c0d1dd5f7f --- /dev/null +++ b/services/appflow/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3680 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-08-23", + "endpointPrefix":"appflow", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Appflow", + "serviceId":"Appflow", + "signatureVersion":"v4", + "signingName":"appflow", + "uid":"appflow-2020-08-23" + }, + "operations":{ + "CreateConnectorProfile":{ + "name":"CreateConnectorProfile", + "http":{ + "method":"POST", + "requestUri":"/create-connector-profile" + }, + "input":{"shape":"CreateConnectorProfileRequest"}, + "output":{"shape":"CreateConnectorProfileResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConnectorAuthenticationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new connector profile associated with your AWS account. There is a soft quota of 100 connector profiles per AWS account. If you need more connector profiles than this quota allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support channel.

    " + }, + "CreateFlow":{ + "name":"CreateFlow", + "http":{ + "method":"POST", + "requestUri":"/create-flow" + }, + "input":{"shape":"CreateFlowRequest"}, + "output":{"shape":"CreateFlowResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ConnectorAuthenticationException"}, + {"shape":"ConnectorServerException"} + ], + "documentation":"

    Enables your application to create a new flow using Amazon AppFlow. You must create a connector profile before calling this API. Please note that the Request Syntax below shows syntax for multiple destinations, however, you can only transfer data to one item in this list at a time. Amazon AppFlow does not currently support flows to multiple destinations at once.

    " + }, + "DeleteConnectorProfile":{ + "name":"DeleteConnectorProfile", + "http":{ + "method":"POST", + "requestUri":"/delete-connector-profile" + }, + "input":{"shape":"DeleteConnectorProfileRequest"}, + "output":{"shape":"DeleteConnectorProfileResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Enables you to delete an existing connector profile.

    " + }, + "DeleteFlow":{ + "name":"DeleteFlow", + "http":{ + "method":"POST", + "requestUri":"/delete-flow" + }, + "input":{"shape":"DeleteFlowRequest"}, + "output":{"shape":"DeleteFlowResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Enables your application to delete an existing flow. Before deleting the flow, Amazon AppFlow validates the request by checking the flow configuration and status. You can delete flows one at a time.

    " + }, + "DescribeConnectorEntity":{ + "name":"DescribeConnectorEntity", + "http":{ + "method":"POST", + "requestUri":"/describe-connector-entity" + }, + "input":{"shape":"DescribeConnectorEntityRequest"}, + "output":{"shape":"DescribeConnectorEntityResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConnectorAuthenticationException"}, + {"shape":"ConnectorServerException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Provides details regarding the entity used with the connector, with a description of the data model for each entity.

    " + }, + "DescribeConnectorProfiles":{ + "name":"DescribeConnectorProfiles", + "http":{ + "method":"POST", + "requestUri":"/describe-connector-profiles" + }, + "input":{"shape":"DescribeConnectorProfilesRequest"}, + "output":{"shape":"DescribeConnectorProfilesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of connector-profile details matching the provided connector-profile names and connector-types. Both input lists are optional, and you can use them to filter the result.

    If no names or connector-types are provided, returns all connector profiles in a paginated form. If there is no match, this operation returns an empty list.

    " + }, + "DescribeConnectors":{ + "name":"DescribeConnectors", + "http":{ + "method":"POST", + "requestUri":"/describe-connectors" + }, + "input":{"shape":"DescribeConnectorsRequest"}, + "output":{"shape":"DescribeConnectorsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes the connectors vended by Amazon AppFlow for specified connector types. If you don't specify a connector type, this operation describes all connectors vended by Amazon AppFlow. If there are more connectors than can be returned in one page, the response contains a nextToken object, which can be be passed in to the next call to the DescribeConnectors API operation to retrieve the next page.

    " + }, + "DescribeFlow":{ + "name":"DescribeFlow", + "http":{ + "method":"POST", + "requestUri":"/describe-flow" + }, + "input":{"shape":"DescribeFlowRequest"}, + "output":{"shape":"DescribeFlowResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Provides a description of the specified flow.

    " + }, + "DescribeFlowExecutionRecords":{ + "name":"DescribeFlowExecutionRecords", + "http":{ + "method":"POST", + "requestUri":"/describe-flow-execution-records" + }, + "input":{"shape":"DescribeFlowExecutionRecordsRequest"}, + "output":{"shape":"DescribeFlowExecutionRecordsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Fetches the execution history of the flow.

    " + }, + "ListConnectorEntities":{ + "name":"ListConnectorEntities", + "http":{ + "method":"POST", + "requestUri":"/list-connector-entities" + }, + "input":{"shape":"ListConnectorEntitiesRequest"}, + "output":{"shape":"ListConnectorEntitiesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConnectorAuthenticationException"}, + {"shape":"ConnectorServerException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns the list of available connector entities supported by Amazon AppFlow. For example, you can query Salesforce for Account and Opportunity entities, or query ServiceNow for the Incident entity.

    " + }, + "ListFlows":{ + "name":"ListFlows", + "http":{ + "method":"POST", + "requestUri":"/list-flows" + }, + "input":{"shape":"ListFlowsRequest"}, + "output":{"shape":"ListFlowsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all of the flows associated with your account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves the tags that are associated with a specified flow.

    " + }, + "StartFlow":{ + "name":"StartFlow", + "http":{ + "method":"POST", + "requestUri":"/start-flow" + }, + "input":{"shape":"StartFlowRequest"}, + "output":{"shape":"StartFlowResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Activates an existing flow. For on-demand flows, this operation runs the flow immediately. For schedule and event-triggered flows, this operation activates the flow.

    " + }, + "StopFlow":{ + "name":"StopFlow", + "http":{ + "method":"POST", + "requestUri":"/stop-flow" + }, + "input":{"shape":"StopFlowRequest"}, + "output":{"shape":"StopFlowResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deactivates the existing flow. For on-demand flows, this operation returns an unsupportedOperationException error message. For schedule and event-triggered flows, this operation deactivates the flow.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Applies a tag to the specified flow.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes a tag from the specified flow.

    " + }, + "UpdateConnectorProfile":{ + "name":"UpdateConnectorProfile", + "http":{ + "method":"POST", + "requestUri":"/update-connector-profile" + }, + "input":{"shape":"UpdateConnectorProfileRequest"}, + "output":{"shape":"UpdateConnectorProfileResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ConnectorAuthenticationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a given connector profile associated with your account.

    " + }, + "UpdateFlow":{ + "name":"UpdateFlow", + "http":{ + "method":"POST", + "requestUri":"/update-flow" + }, + "input":{"shape":"UpdateFlowRequest"}, + "output":{"shape":"UpdateFlowResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ConnectorAuthenticationException"}, + {"shape":"ConnectorServerException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates an existing flow.

    " + } + }, + "shapes":{ + "ARN":{ + "type":"string", + "max":512, + "pattern":"arn:aws:.*:.*:[0-9]+:.*" + }, + "AccessKeyId":{ + "type":"string", + "max":256, + "pattern":"\\S+", + "sensitive":true + }, + "AccessToken":{ + "type":"string", + "max":512, + "pattern":"\\S+", + "sensitive":true + }, + "AccountName":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "AggregationConfig":{ + "type":"structure", + "members":{ + "aggregationType":{ + "shape":"AggregationType", + "documentation":"

    Specifies whether Amazon AppFlow aggregates the flow records into a single file, or leave them unaggregated.

    " + } + }, + "documentation":"

    The aggregation settings that you can use to customize the output format of your flow data.

    " + }, + "AggregationType":{ + "type":"string", + "enum":[ + "None", + "SingleFile" + ] + }, + "AmplitudeConnectorOperator":{ + "type":"string", + "enum":["BETWEEN"] + }, + "AmplitudeConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "apiKey", + "secretKey" + ], + "members":{ + "apiKey":{ + "shape":"ApiKey", + "documentation":"

    A unique alphanumeric identifier used to authenticate a user, developer, or calling program to your API.

    " + }, + "secretKey":{ + "shape":"SecretKey", + "documentation":"

    The Secret Access Key portion of the credentials.

    " + } + }, + "documentation":"

    The connector-specific credentials required when using Amplitude.

    " + }, + "AmplitudeConnectorProfileProperties":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector-specific profile properties required when using Amplitude.

    " + }, + "AmplitudeMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Amplitude.

    " + }, + "AmplitudeSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Amplitude flow source.

    " + } + }, + "documentation":"

    The properties that are applied when Amplitude is being used as a source.

    " + }, + "ApiKey":{ + "type":"string", + "max":256, + "pattern":"\\S+" + }, + "ApiSecretKey":{ + "type":"string", + "max":256, + "pattern":"\\S+", + "sensitive":true + }, + "ApiToken":{ + "type":"string", + "max":256, + "pattern":"\\S+" + }, + "ApplicationKey":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "AuthCode":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "Boolean":{"type":"boolean"}, + "BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"\\S+" + }, + "BucketPrefix":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "ClientCredentialsArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:secretsmanager:.*:[0-9]+:.*", + "sensitive":true + }, + "ClientId":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "ClientSecret":{ + "type":"string", + "max":512, + "pattern":"\\S+", + "sensitive":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    There was a conflict when processing the request (for example, a flow with the given name already exists within the account. Check for conflicting resource names and try again.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ConnectionMode":{ + "type":"string", + "enum":[ + "Public", + "Private" + ] + }, + "ConnectorAuthenticationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An error occurred when authenticating with the connector endpoint.

    ", + "error":{"httpStatusCode":401}, + "exception":true + }, + "ConnectorConfiguration":{ + "type":"structure", + "members":{ + "canUseAsSource":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the connector can be used as a source.

    " + }, + "canUseAsDestination":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the connector can be used as a destination.

    " + }, + "supportedDestinationConnectors":{ + "shape":"ConnectorTypeList", + "documentation":"

    Lists the connectors that are available for use as destinations.

    " + }, + "supportedSchedulingFrequencies":{ + "shape":"SchedulingFrequencyTypeList", + "documentation":"

    Specifies the supported flow frequency for that connector.

    " + }, + "isPrivateLinkEnabled":{ + "shape":"Boolean", + "documentation":"

    Specifies if PrivateLink is enabled for that connector.

    " + }, + "isPrivateLinkEndpointUrlRequired":{ + "shape":"Boolean", + "documentation":"

    Specifies if a PrivateLink endpoint URL is required.

    " + }, + "supportedTriggerTypes":{ + "shape":"TriggerTypeList", + "documentation":"

    Specifies the supported trigger types for the flow.

    " + }, + "connectorMetadata":{ + "shape":"ConnectorMetadata", + "documentation":"

    Specifies connector-specific metadata such as oAuthScopes, supportedRegions, privateLinkServiceUrl, and so on.

    " + } + }, + "documentation":"

    The configuration settings related to a given connector.

    " + }, + "ConnectorConfigurationsMap":{ + "type":"map", + "key":{"shape":"ConnectorType"}, + "value":{"shape":"ConnectorConfiguration"} + }, + "ConnectorEntity":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the connector entity.

    " + }, + "label":{ + "shape":"Label", + "documentation":"

    The label applied to the connector entity.

    " + }, + "hasNestedEntities":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the connector entity is a parent or a category and has more entities nested underneath it. If another call is made with entitiesPath = \"the_current_entity_name_with_hasNestedEntities_true\", then it returns the nested entities underneath it. This provides a way to retrieve all supported entities in a recursive fashion.

    " + } + }, + "documentation":"

    The high-level entity that can be queried in Amazon AppFlow. For example, a Salesforce entity might be an Account or Opportunity, whereas a ServiceNow entity might be an Incident.

    " + }, + "ConnectorEntityField":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"Identifier", + "documentation":"

    The unique identifier of the connector field.

    " + }, + "label":{ + "shape":"Label", + "documentation":"

    The label applied to a connector entity field.

    " + }, + "supportedFieldTypeDetails":{ + "shape":"SupportedFieldTypeDetails", + "documentation":"

    Contains details regarding the supported FieldType, including the corresponding filterOperators and supportedValues.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    A description of the connector entity field.

    " + }, + "sourceProperties":{ + "shape":"SourceFieldProperties", + "documentation":"

    The properties that can be applied to a field when the connector is being used as a source.

    " + }, + "destinationProperties":{ + "shape":"DestinationFieldProperties", + "documentation":"

    The properties applied to a field when the connector is being used as a destination.

    " + } + }, + "documentation":"

    Describes the data model of a connector field. For example, for an account entity, the fields would be account name, account ID, and so on.

    " + }, + "ConnectorEntityFieldList":{ + "type":"list", + "member":{"shape":"ConnectorEntityField"} + }, + "ConnectorEntityList":{ + "type":"list", + "member":{"shape":"ConnectorEntity"} + }, + "ConnectorEntityMap":{ + "type":"map", + "key":{"shape":"Group"}, + "value":{"shape":"ConnectorEntityList"} + }, + "ConnectorMetadata":{ + "type":"structure", + "members":{ + "Amplitude":{ + "shape":"AmplitudeMetadata", + "documentation":"

    The connector metadata specific to Amplitude.

    " + }, + "Datadog":{ + "shape":"DatadogMetadata", + "documentation":"

    The connector metadata specific to Datadog.

    " + }, + "Dynatrace":{ + "shape":"DynatraceMetadata", + "documentation":"

    The connector metadata specific to Dynatrace.

    " + }, + "GoogleAnalytics":{ + "shape":"GoogleAnalyticsMetadata", + "documentation":"

    The connector metadata specific to Google Analytics.

    " + }, + "InforNexus":{ + "shape":"InforNexusMetadata", + "documentation":"

    The connector metadata specific to Infor Nexus.

    " + }, + "Marketo":{ + "shape":"MarketoMetadata", + "documentation":"

    The connector metadata specific to Marketo.

    " + }, + "Redshift":{ + "shape":"RedshiftMetadata", + "documentation":"

    The connector metadata specific to Amazon Redshift.

    " + }, + "S3":{ + "shape":"S3Metadata", + "documentation":"

    The connector metadata specific to Amazon S3.

    " + }, + "Salesforce":{ + "shape":"SalesforceMetadata", + "documentation":"

    The connector metadata specific to Salesforce.

    " + }, + "ServiceNow":{ + "shape":"ServiceNowMetadata", + "documentation":"

    The connector metadata specific to ServiceNow.

    " + }, + "Singular":{ + "shape":"SingularMetadata", + "documentation":"

    The connector metadata specific to Singular.

    " + }, + "Slack":{ + "shape":"SlackMetadata", + "documentation":"

    The connector metadata specific to Slack.

    " + }, + "Snowflake":{ + "shape":"SnowflakeMetadata", + "documentation":"

    The connector metadata specific to Snowflake.

    " + }, + "Trendmicro":{ + "shape":"TrendmicroMetadata", + "documentation":"

    The connector metadata specific to Trend Micro.

    " + }, + "Veeva":{ + "shape":"VeevaMetadata", + "documentation":"

    The connector metadata specific to Veeva.

    " + }, + "Zendesk":{ + "shape":"ZendeskMetadata", + "documentation":"

    The connector metadata specific to Zendesk.

    " + }, + "EventBridge":{ + "shape":"EventBridgeMetadata", + "documentation":"

    The connector metadata specific to Amazon EventBridge.

    " + }, + "Upsolver":{ + "shape":"UpsolverMetadata", + "documentation":"

    The connector metadata specific to Upsolver.

    " + } + }, + "documentation":"

    A structure to specify connector-specific metadata such as oAuthScopes, supportedRegions, privateLinkServiceUrl, and so on.

    " + }, + "ConnectorOAuthRequest":{ + "type":"structure", + "members":{ + "authCode":{ + "shape":"AuthCode", + "documentation":"

    The code provided by the connector when it has been authenticated via the connected app.

    " + }, + "redirectUri":{ + "shape":"RedirectUri", + "documentation":"

    The URL to which the authentication server redirects the browser after authorization has been granted.

    " + } + }, + "documentation":"

    Used by select connectors for which the OAuth workflow is supported, such as Salesforce, Google Analytics, Marketo, Zendesk, and Slack.

    " + }, + "ConnectorOperator":{ + "type":"structure", + "members":{ + "Amplitude":{ + "shape":"AmplitudeConnectorOperator", + "documentation":"

    The operation to be performed on the provided Amplitude source fields.

    " + }, + "Datadog":{ + "shape":"DatadogConnectorOperator", + "documentation":"

    The operation to be performed on the provided Datadog source fields.

    " + }, + "Dynatrace":{ + "shape":"DynatraceConnectorOperator", + "documentation":"

    The operation to be performed on the provided Dynatrace source fields.

    " + }, + "GoogleAnalytics":{ + "shape":"GoogleAnalyticsConnectorOperator", + "documentation":"

    The operation to be performed on the provided Google Analytics source fields.

    " + }, + "InforNexus":{ + "shape":"InforNexusConnectorOperator", + "documentation":"

    The operation to be performed on the provided Infor Nexus source fields.

    " + }, + "Marketo":{ + "shape":"MarketoConnectorOperator", + "documentation":"

    The operation to be performed on the provided Marketo source fields.

    " + }, + "S3":{ + "shape":"S3ConnectorOperator", + "documentation":"

    The operation to be performed on the provided Amazon S3 source fields.

    " + }, + "Salesforce":{ + "shape":"SalesforceConnectorOperator", + "documentation":"

    The operation to be performed on the provided Salesforce source fields.

    " + }, + "ServiceNow":{ + "shape":"ServiceNowConnectorOperator", + "documentation":"

    The operation to be performed on the provided ServiceNow source fields.

    " + }, + "Singular":{ + "shape":"SingularConnectorOperator", + "documentation":"

    The operation to be performed on the provided Singular source fields.

    " + }, + "Slack":{ + "shape":"SlackConnectorOperator", + "documentation":"

    The operation to be performed on the provided Slack source fields.

    " + }, + "Trendmicro":{ + "shape":"TrendmicroConnectorOperator", + "documentation":"

    The operation to be performed on the provided Trend Micro source fields.

    " + }, + "Veeva":{ + "shape":"VeevaConnectorOperator", + "documentation":"

    The operation to be performed on the provided Veeva source fields.

    " + }, + "Zendesk":{ + "shape":"ZendeskConnectorOperator", + "documentation":"

    The operation to be performed on the provided Zendesk source fields.

    " + } + }, + "documentation":"

    The operation to be performed on the provided source fields.

    " + }, + "ConnectorProfile":{ + "type":"structure", + "members":{ + "connectorProfileArn":{ + "shape":"ConnectorProfileArn", + "documentation":"

    The Amazon Resource Name (ARN) of the connector profile.

    " + }, + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile. The name is unique for each ConnectorProfile in the AWS account.

    " + }, + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

    The type of connector, such as Salesforce, Amplitude, and so on.

    " + }, + "connectionMode":{ + "shape":"ConnectionMode", + "documentation":"

    Indicates the connection mode and if it is public or private.

    " + }, + "credentialsArn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the connector profile credentials.

    " + }, + "connectorProfileProperties":{ + "shape":"ConnectorProfileProperties", + "documentation":"

    The connector-specific properties of the profile configuration.

    " + }, + "createdAt":{ + "shape":"Date", + "documentation":"

    Specifies when the connector profile was created.

    " + }, + "lastUpdatedAt":{ + "shape":"Date", + "documentation":"

    Specifies when the connector profile was last updated.

    " + } + }, + "documentation":"

    Describes an instance of a connector. This includes the provided name, credentials ARN, connection-mode, and so on. To keep the API intuitive and extensible, the fields that are common to all types of connector profiles are explicitly specified at the top level. The rest of the connector-specific properties are available via the connectorProfileProperties field.

    " + }, + "ConnectorProfileArn":{ + "type":"string", + "max":512, + "pattern":"arn:aws:kms:.*:[0-9]+:.*" + }, + "ConnectorProfileConfig":{ + "type":"structure", + "required":[ + "connectorProfileProperties", + "connectorProfileCredentials" + ], + "members":{ + "connectorProfileProperties":{ + "shape":"ConnectorProfileProperties", + "documentation":"

    The connector-specific properties of the profile configuration.

    " + }, + "connectorProfileCredentials":{ + "shape":"ConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required by each connector.

    " + } + }, + "documentation":"

    Defines the connector-specific configuration and credentials for the connector profile.

    " + }, + "ConnectorProfileCredentials":{ + "type":"structure", + "members":{ + "Amplitude":{ + "shape":"AmplitudeConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Amplitude.

    " + }, + "Datadog":{ + "shape":"DatadogConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Datadog.

    " + }, + "Dynatrace":{ + "shape":"DynatraceConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Dynatrace.

    " + }, + "GoogleAnalytics":{ + "shape":"GoogleAnalyticsConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Google Analytics.

    " + }, + "InforNexus":{ + "shape":"InforNexusConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Infor Nexus.

    " + }, + "Marketo":{ + "shape":"MarketoConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Marketo.

    " + }, + "Redshift":{ + "shape":"RedshiftConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Amazon Redshift.

    " + }, + "Salesforce":{ + "shape":"SalesforceConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Salesforce.

    " + }, + "ServiceNow":{ + "shape":"ServiceNowConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using ServiceNow.

    " + }, + "Singular":{ + "shape":"SingularConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Singular.

    " + }, + "Slack":{ + "shape":"SlackConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Slack.

    " + }, + "Snowflake":{ + "shape":"SnowflakeConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Snowflake.

    " + }, + "Trendmicro":{ + "shape":"TrendmicroConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Trend Micro.

    " + }, + "Veeva":{ + "shape":"VeevaConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Veeva.

    " + }, + "Zendesk":{ + "shape":"ZendeskConnectorProfileCredentials", + "documentation":"

    The connector-specific credentials required when using Zendesk.

    " + } + }, + "documentation":"

    The connector-specific credentials required by a connector.

    " + }, + "ConnectorProfileDetailList":{ + "type":"list", + "member":{"shape":"ConnectorProfile"} + }, + "ConnectorProfileName":{ + "type":"string", + "max":256, + "pattern":"[\\w/!@#+=.-]+" + }, + "ConnectorProfileNameList":{ + "type":"list", + "member":{"shape":"ConnectorProfileName"}, + "max":100, + "min":0 + }, + "ConnectorProfileProperties":{ + "type":"structure", + "members":{ + "Amplitude":{ + "shape":"AmplitudeConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Amplitude.

    " + }, + "Datadog":{ + "shape":"DatadogConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Datadog.

    " + }, + "Dynatrace":{ + "shape":"DynatraceConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Dynatrace.

    " + }, + "GoogleAnalytics":{ + "shape":"GoogleAnalyticsConnectorProfileProperties", + "documentation":"

    The connector-specific properties required Google Analytics.

    " + }, + "InforNexus":{ + "shape":"InforNexusConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Infor Nexus.

    " + }, + "Marketo":{ + "shape":"MarketoConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Marketo.

    " + }, + "Redshift":{ + "shape":"RedshiftConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Amazon Redshift.

    " + }, + "Salesforce":{ + "shape":"SalesforceConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Salesforce.

    " + }, + "ServiceNow":{ + "shape":"ServiceNowConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by serviceNow.

    " + }, + "Singular":{ + "shape":"SingularConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Singular.

    " + }, + "Slack":{ + "shape":"SlackConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Slack.

    " + }, + "Snowflake":{ + "shape":"SnowflakeConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Snowflake.

    " + }, + "Trendmicro":{ + "shape":"TrendmicroConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Trend Micro.

    " + }, + "Veeva":{ + "shape":"VeevaConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Veeva.

    " + }, + "Zendesk":{ + "shape":"ZendeskConnectorProfileProperties", + "documentation":"

    The connector-specific properties required by Zendesk.

    " + } + }, + "documentation":"

    The connector-specific profile properties required by each connector.

    " + }, + "ConnectorServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An error occurred when retrieving data from the connector endpoint.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ConnectorType":{ + "type":"string", + "enum":[ + "Salesforce", + "Singular", + "Slack", + "Redshift", + "S3", + "Marketo", + "Googleanalytics", + "Zendesk", + "Servicenow", + "Datadog", + "Trendmicro", + "Snowflake", + "Dynatrace", + "Infornexus", + "Amplitude", + "Veeva", + "EventBridge", + "Upsolver" + ] + }, + "ConnectorTypeList":{ + "type":"list", + "member":{"shape":"ConnectorType"}, + "max":100, + "min":0 + }, + "CreateConnectorProfileRequest":{ + "type":"structure", + "required":[ + "connectorProfileName", + "connectorType", + "connectionMode", + "connectorProfileConfig" + ], + "members":{ + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile. The name is unique for each ConnectorProfile in your AWS account.

    " + }, + "kmsArn":{ + "shape":"KMSArn", + "documentation":"

    The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key.

    " + }, + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

    The type of connector, such as Salesforce, Amplitude, and so on.

    " + }, + "connectionMode":{ + "shape":"ConnectionMode", + "documentation":"

    Indicates the connection mode and specifies whether it is public or private. Private flows use AWS PrivateLink to route data over AWS infrastructure without exposing it to the public internet.

    " + }, + "connectorProfileConfig":{ + "shape":"ConnectorProfileConfig", + "documentation":"

    Defines the connector-specific configuration and credentials.

    " + } + } + }, + "CreateConnectorProfileResponse":{ + "type":"structure", + "members":{ + "connectorProfileArn":{ + "shape":"ConnectorProfileArn", + "documentation":"

    The Amazon Resource Name (ARN) of the connector profile.

    " + } + } + }, + "CreateFlowRequest":{ + "type":"structure", + "required":[ + "flowName", + "triggerConfig", + "sourceFlowConfig", + "destinationFlowConfigList", + "tasks" + ], + "members":{ + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

    A description of the flow you want to create.

    " + }, + "kmsArn":{ + "shape":"KMSArn", + "documentation":"

    The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key.

    " + }, + "triggerConfig":{ + "shape":"TriggerConfig", + "documentation":"

    The trigger settings that determine how and when the flow runs.

    " + }, + "sourceFlowConfig":{ + "shape":"SourceFlowConfig", + "documentation":"

    The configuration that controls how Amazon AppFlow retrieves data from the source connector.

    " + }, + "destinationFlowConfigList":{ + "shape":"DestinationFlowConfigList", + "documentation":"

    The configuration that controls how Amazon AppFlow places data in the destination connector.

    " + }, + "tasks":{ + "shape":"Tasks", + "documentation":"

    A list of tasks that Amazon AppFlow performs while transferring the data in the flow run.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for your flow.

    " + } + } + }, + "CreateFlowResponse":{ + "type":"structure", + "members":{ + "flowArn":{ + "shape":"FlowArn", + "documentation":"

    The flow's Amazon Resource Name (ARN).

    " + }, + "flowStatus":{ + "shape":"FlowStatus", + "documentation":"

    Indicates the current status of the flow.

    " + } + } + }, + "CreatedBy":{ + "type":"string", + "max":256, + "pattern":"\\S+" + }, + "DataPullMode":{ + "type":"string", + "enum":[ + "Incremental", + "Complete" + ] + }, + "DatabaseUrl":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "DatadogConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "BETWEEN", + "EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "DatadogConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "apiKey", + "applicationKey" + ], + "members":{ + "apiKey":{ + "shape":"ApiKey", + "documentation":"

    A unique alphanumeric identifier used to authenticate a user, developer, or calling program to your API.

    " + }, + "applicationKey":{ + "shape":"ApplicationKey", + "documentation":"

    Application keys, in conjunction with your API key, give you full access to Datadog’s programmatic API. Application keys are associated with the user account that created them. The application key is used to log all requests made to the API.

    " + } + }, + "documentation":"

    The connector-specific credentials required by Datadog.

    " + }, + "DatadogConnectorProfileProperties":{ + "type":"structure", + "required":["instanceUrl"], + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the Datadog resource.

    " + } + }, + "documentation":"

    The connector-specific profile properties required by Datadog.

    " + }, + "DatadogMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Datadog.

    " + }, + "DatadogSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Datadog flow source.

    " + } + }, + "documentation":"

    The properties that are applied when Datadog is being used as a source.

    " + }, + "Date":{"type":"timestamp"}, + "DatetimeTypeFieldName":{ + "type":"string", + "max":256, + "pattern":".*" + }, + "DeleteConnectorProfileRequest":{ + "type":"structure", + "required":["connectorProfileName"], + "members":{ + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile. The name is unique for each ConnectorProfile in your account.

    " + }, + "forceDelete":{ + "shape":"Boolean", + "documentation":"

    Indicates whether Amazon AppFlow should delete the profile, even if it is currently in use in one or more flows.

    " + } + } + }, + "DeleteConnectorProfileResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteFlowRequest":{ + "type":"structure", + "required":["flowName"], + "members":{ + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + }, + "forceDelete":{ + "shape":"Boolean", + "documentation":"

    Indicates whether Amazon AppFlow should delete the flow, even if it is currently in use.

    " + } + } + }, + "DeleteFlowResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeConnectorEntityRequest":{ + "type":"structure", + "required":["connectorEntityName"], + "members":{ + "connectorEntityName":{ + "shape":"Name", + "documentation":"

    The entity name for that connector.

    " + }, + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

    The type of connector application, such as Salesforce, Amplitude, and so on.

    " + }, + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile. The name is unique for each ConnectorProfile in the AWS account.

    " + } + } + }, + "DescribeConnectorEntityResponse":{ + "type":"structure", + "required":["connectorEntityFields"], + "members":{ + "connectorEntityFields":{ + "shape":"ConnectorEntityFieldList", + "documentation":"

    Describes the fields for that connector entity. For example, for an account entity, the fields would be account name, account ID, and so on.

    " + } + } + }, + "DescribeConnectorProfilesRequest":{ + "type":"structure", + "members":{ + "connectorProfileNames":{ + "shape":"ConnectorProfileNameList", + "documentation":"

    The name of the connector profile. The name is unique for each ConnectorProfile in the AWS account.

    " + }, + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

    The type of connector, such as Salesforce, Amplitude, and so on.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Specifies the maximum number of items that should be returned in the result set. The default for maxResults is 20 (for all paginated API operations).

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token for the next page of data.

    " + } + } + }, + "DescribeConnectorProfilesResponse":{ + "type":"structure", + "members":{ + "connectorProfileDetails":{ + "shape":"ConnectorProfileDetailList", + "documentation":"

    Returns information about the connector profiles associated with the flow.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token for the next page of data. If nextToken=null, this means that all records have been fetched.

    " + } + } + }, + "DescribeConnectorsRequest":{ + "type":"structure", + "members":{ + "connectorTypes":{ + "shape":"ConnectorTypeList", + "documentation":"

    The type of connector, such as Salesforce, Amplitude, and so on.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token for the next page of data.

    " + } + } + }, + "DescribeConnectorsResponse":{ + "type":"structure", + "members":{ + "connectorConfigurations":{ + "shape":"ConnectorConfigurationsMap", + "documentation":"

    The configuration that is applied to the connectors used in the flow.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token for the next page of data.

    " + } + } + }, + "DescribeFlowExecutionRecordsRequest":{ + "type":"structure", + "required":["flowName"], + "members":{ + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Specifies the maximum number of items that should be returned in the result set. The default for maxResults is 20 (for all paginated API operations).

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token for the next page of data.

    " + } + } + }, + "DescribeFlowExecutionRecordsResponse":{ + "type":"structure", + "members":{ + "flowExecutions":{ + "shape":"FlowExecutionList", + "documentation":"

    Returns a list of all instances when this flow was run.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token for the next page of data.

    " + } + } + }, + "DescribeFlowRequest":{ + "type":"structure", + "required":["flowName"], + "members":{ + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + } + } + }, + "DescribeFlowResponse":{ + "type":"structure", + "members":{ + "flowArn":{ + "shape":"FlowArn", + "documentation":"

    The flow's Amazon Resource Name (ARN).

    " + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

    A description of the flow.

    " + }, + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + }, + "kmsArn":{ + "shape":"KMSArn", + "documentation":"

    The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key.

    " + }, + "flowStatus":{ + "shape":"FlowStatus", + "documentation":"

    Indicates the current status of the flow.

    " + }, + "flowStatusMessage":{ + "shape":"FlowStatusMessage", + "documentation":"

    Contains an error message if the flow status is in a suspended or error state. This applies only to scheduled or event-triggered flows.

    " + }, + "sourceFlowConfig":{ + "shape":"SourceFlowConfig", + "documentation":"

    The configuration that controls how Amazon AppFlow retrieves data from the source connector.

    " + }, + "destinationFlowConfigList":{ + "shape":"DestinationFlowConfigList", + "documentation":"

    The configuration that controls how Amazon AppFlow transfers data to the destination connector.

    " + }, + "lastRunExecutionDetails":{ + "shape":"ExecutionDetails", + "documentation":"

    Describes the details of the most recent flow run.

    " + }, + "triggerConfig":{ + "shape":"TriggerConfig", + "documentation":"

    The trigger settings that determine how and when the flow runs.

    " + }, + "tasks":{ + "shape":"Tasks", + "documentation":"

    A list of tasks that Amazon AppFlow performs while transferring the data in the flow run.

    " + }, + "createdAt":{ + "shape":"Date", + "documentation":"

    Specifies when the flow was created.

    " + }, + "lastUpdatedAt":{ + "shape":"Date", + "documentation":"

    Specifies when the flow was last updated.

    " + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

    The ARN of the user who created the flow.

    " + }, + "lastUpdatedBy":{ + "shape":"UpdatedBy", + "documentation":"

    Specifies the user name of the account that performed the most recent update.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for your flow.

    " + } + } + }, + "Description":{ + "type":"string", + "max":1024, + "pattern":"[\\s\\w/!@#+=.-]*" + }, + "DestinationConnectorProperties":{ + "type":"structure", + "members":{ + "Redshift":{ + "shape":"RedshiftDestinationProperties", + "documentation":"

    The properties required to query Amazon Redshift.

    " + }, + "S3":{ + "shape":"S3DestinationProperties", + "documentation":"

    The properties required to query Amazon S3.

    " + }, + "Salesforce":{ + "shape":"SalesforceDestinationProperties", + "documentation":"

    The properties required to query Salesforce.

    " + }, + "Snowflake":{ + "shape":"SnowflakeDestinationProperties", + "documentation":"

    The properties required to query Snowflake.

    " + }, + "EventBridge":{ + "shape":"EventBridgeDestinationProperties", + "documentation":"

    The properties required to query Amazon EventBridge.

    " + }, + "Upsolver":{ + "shape":"UpsolverDestinationProperties", + "documentation":"

    The properties required to query Upsolver.

    " + } + }, + "documentation":"

    This stores the information that is required to query a particular connector.

    " + }, + "DestinationField":{ + "type":"string", + "max":256, + "pattern":".*" + }, + "DestinationFieldProperties":{ + "type":"structure", + "members":{ + "isCreatable":{ + "shape":"Boolean", + "documentation":"

    Specifies if the destination field can be created by the current user.

    " + }, + "isNullable":{ + "shape":"Boolean", + "documentation":"

    Specifies if the destination field can have a null value.

    " + }, + "isUpsertable":{ + "shape":"Boolean", + "documentation":"

    Specifies if the flow run can either insert new rows in the destination field if they do not already exist, or update them if they do.

    " + }, + "isUpdatable":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the field can be updated during an UPDATE or UPSERT write operation.

    " + }, + "supportedWriteOperations":{ + "shape":"SupportedWriteOperationList", + "documentation":"

    A list of supported write operations. For each write operation listed, this field can be used in idFieldNames when that write operation is present as a destination option.

    " + } + }, + "documentation":"

    The properties that can be applied to a field when connector is being used as a destination.

    " + }, + "DestinationFlowConfig":{ + "type":"structure", + "required":[ + "connectorType", + "destinationConnectorProperties" + ], + "members":{ + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

    The type of connector, such as Salesforce, Amplitude, and so on.

    " + }, + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile. This name must be unique for each connector profile in the AWS account.

    " + }, + "destinationConnectorProperties":{ + "shape":"DestinationConnectorProperties", + "documentation":"

    This stores the information that is required to query a particular connector.

    " + } + }, + "documentation":"

    Contains information about the configuration of destination connectors present in the flow.

    " + }, + "DestinationFlowConfigList":{ + "type":"list", + "member":{"shape":"DestinationFlowConfig"} + }, + "DynatraceConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "BETWEEN", + "EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "DynatraceConnectorProfileCredentials":{ + "type":"structure", + "required":["apiToken"], + "members":{ + "apiToken":{ + "shape":"ApiToken", + "documentation":"

    The API tokens used by Dynatrace API to authenticate various API calls.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required by Dynatrace.

    " + }, + "DynatraceConnectorProfileProperties":{ + "type":"structure", + "required":["instanceUrl"], + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the Dynatrace resource.

    " + } + }, + "documentation":"

    The connector-specific profile properties required by Dynatrace.

    " + }, + "DynatraceMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Dynatrace.

    " + }, + "DynatraceSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Dynatrace flow source.

    " + } + }, + "documentation":"

    The properties that are applied when Dynatrace is being used as a source.

    " + }, + "EntitiesPath":{ + "type":"string", + "max":256, + "pattern":"[\\s\\w/!@#+=.-]*" + }, + "ErrorHandlingConfig":{ + "type":"structure", + "members":{ + "failOnFirstDestinationError":{ + "shape":"Boolean", + "documentation":"

    Specifies if the flow should fail after the first instance of a failure when attempting to place data in the destination.

    " + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

    Specifies the Amazon S3 bucket prefix.

    " + }, + "bucketName":{ + "shape":"BucketName", + "documentation":"

    Specifies the name of the Amazon S3 bucket.

    " + } + }, + "documentation":"

    The settings that determine how Amazon AppFlow handles an error when placing data in the destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig is a part of the destination connector details.

    " + }, + "ErrorInfo":{ + "type":"structure", + "members":{ + "putFailuresCount":{ + "shape":"Long", + "documentation":"

    Specifies the failure count for the attempted flow.

    " + }, + "executionMessage":{ + "shape":"ExecutionMessage", + "documentation":"

    Specifies the error message that appears if a flow fails.

    " + } + }, + "documentation":"

    Provides details in the event of a failed flow, including the failure count and the related error messages.

    " + }, + "ErrorMessage":{ + "type":"string", + "max":2048, + "pattern":"[\\s\\w/!@#+=.-]*" + }, + "EventBridgeDestinationProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Amazon EventBridge flow destination.

    " + }, + "errorHandlingConfig":{"shape":"ErrorHandlingConfig"} + }, + "documentation":"

    The properties that are applied when Amazon EventBridge is being used as a destination.

    " + }, + "EventBridgeMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Amazon EventBridge.

    " + }, + "ExecutionDetails":{ + "type":"structure", + "members":{ + "mostRecentExecutionMessage":{ + "shape":"MostRecentExecutionMessage", + "documentation":"

    Describes the details of the most recent flow run.

    " + }, + "mostRecentExecutionTime":{ + "shape":"Date", + "documentation":"

    Specifies the time of the most recent flow run.

    " + }, + "mostRecentExecutionStatus":{ + "shape":"ExecutionStatus", + "documentation":"

    Specifies the status of the most recent flow run.

    " + } + }, + "documentation":"

    Describes the details of the flow run, including the timestamp, status, and message.

    " + }, + "ExecutionId":{ + "type":"string", + "max":256, + "pattern":"\\S+" + }, + "ExecutionMessage":{ + "type":"string", + "max":2048, + "pattern":"[\\s\\w/!@#+=.-]*" + }, + "ExecutionRecord":{ + "type":"structure", + "members":{ + "executionId":{ + "shape":"ExecutionId", + "documentation":"

    Specifies the identifier of the given flow run.

    " + }, + "executionStatus":{ + "shape":"ExecutionStatus", + "documentation":"

    Specifies the flow run status and whether it is in progress, has completed successfully, or has failed.

    " + }, + "executionResult":{ + "shape":"ExecutionResult", + "documentation":"

    Describes the result of the given flow run.

    " + }, + "startedAt":{ + "shape":"Date", + "documentation":"

    Specifies the start time of the flow run.

    " + }, + "lastUpdatedAt":{ + "shape":"Date", + "documentation":"

    Specifies the time of the most recent update.

    " + } + }, + "documentation":"

    Specifies information about the past flow run instances for a given flow.

    " + }, + "ExecutionResult":{ + "type":"structure", + "members":{ + "errorInfo":{ + "shape":"ErrorInfo", + "documentation":"

    Provides any error message information related to the flow run.

    " + }, + "bytesProcessed":{ + "shape":"Long", + "documentation":"

    The total number of bytes processed by the flow run.

    " + }, + "bytesWritten":{ + "shape":"Long", + "documentation":"

    The total number of bytes written as a result of the flow run.

    " + }, + "recordsProcessed":{ + "shape":"Long", + "documentation":"

    The number of records processed in the flow run.

    " + } + }, + "documentation":"

    Specifies the end result of the flow run.

    " + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Successful", + "Error" + ] + }, + "FieldType":{"type":"string"}, + "FieldTypeDetails":{ + "type":"structure", + "required":[ + "fieldType", + "filterOperators" + ], + "members":{ + "fieldType":{ + "shape":"FieldType", + "documentation":"

    The type of field, such as string, integer, date, and so on.

    " + }, + "filterOperators":{ + "shape":"FilterOperatorList", + "documentation":"

    The list of operators supported by a field.

    " + }, + "supportedValues":{ + "shape":"SupportedValueList", + "documentation":"

    The list of values that a field can contain. For example, a Boolean fieldType can have two values: \"true\" and \"false\".

    " + } + }, + "documentation":"

    Contains details regarding the supported field type and the operators that can be applied for filtering.

    " + }, + "FileType":{ + "type":"string", + "enum":[ + "CSV", + "JSON", + "PARQUET" + ] + }, + "FilterOperatorList":{ + "type":"list", + "member":{"shape":"Operator"} + }, + "FlowArn":{ + "type":"string", + "max":512, + "pattern":"arn:aws:appflow:.*:[0-9]+:.*" + }, + "FlowDefinition":{ + "type":"structure", + "members":{ + "flowArn":{ + "shape":"FlowArn", + "documentation":"

    The flow's Amazon Resource Name (ARN).

    " + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

    A user-entered description of the flow.

    " + }, + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + }, + "flowStatus":{ + "shape":"FlowStatus", + "documentation":"

    Indicates the current status of the flow.

    " + }, + "sourceConnectorType":{ + "shape":"ConnectorType", + "documentation":"

    Specifies the source connector type, such as Salesforce, Amazon S3, Amplitude, and so on.

    " + }, + "destinationConnectorType":{ + "shape":"ConnectorType", + "documentation":"

    Specifies the destination connector type, such as Salesforce, Amazon S3, Amplitude, and so on.

    " + }, + "triggerType":{ + "shape":"TriggerType", + "documentation":"

    Specifies the type of flow trigger. This can be OnDemand, Scheduled, or Event.

    " + }, + "createdAt":{ + "shape":"Date", + "documentation":"

    Specifies when the flow was created.

    " + }, + "lastUpdatedAt":{ + "shape":"Date", + "documentation":"

    Specifies when the flow was last updated.

    " + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

    The ARN of the user who created the flow.

    " + }, + "lastUpdatedBy":{ + "shape":"UpdatedBy", + "documentation":"

    Specifies the account user name that most recently updated the flow.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for your flow.

    " + }, + "lastRunExecutionDetails":{ + "shape":"ExecutionDetails", + "documentation":"

    Describes the details of the most recent flow run.

    " + } + }, + "documentation":"

    The properties of the flow, such as its source, destination, trigger type, and so on.

    " + }, + "FlowDescription":{ + "type":"string", + "max":2048, + "pattern":"[\\w!@#\\-.?,\\s]*" + }, + "FlowExecutionList":{ + "type":"list", + "member":{"shape":"ExecutionRecord"} + }, + "FlowList":{ + "type":"list", + "member":{"shape":"FlowDefinition"} + }, + "FlowName":{ + "type":"string", + "max":256, + "pattern":"[a-zA-Z0-9][\\w!@#.-]+" + }, + "FlowStatus":{ + "type":"string", + "enum":[ + "Active", + "Deprecated", + "Deleted", + "Draft", + "Errored", + "Suspended" + ] + }, + "FlowStatusMessage":{ + "type":"string", + "max":2048, + "pattern":"[\\s\\w/!@#+=.-]*" + }, + "GoogleAnalyticsConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "BETWEEN" + ] + }, + "GoogleAnalyticsConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "clientId", + "clientSecret" + ], + "members":{ + "clientId":{ + "shape":"ClientId", + "documentation":"

    The identifier for the desired client.

    " + }, + "clientSecret":{ + "shape":"ClientSecret", + "documentation":"

    The client secret used by the OAuth client to authenticate to the authorization server.

    " + }, + "accessToken":{ + "shape":"AccessToken", + "documentation":"

    The credentials used to access protected Google Analytics resources.

    " + }, + "refreshToken":{ + "shape":"RefreshToken", + "documentation":"

    The credentials used to acquire new access tokens. This is required only for OAuth2 access tokens, and is not required for OAuth1 access tokens.

    " + }, + "oAuthRequest":{ + "shape":"ConnectorOAuthRequest", + "documentation":"

    The OAuth requirement needed to request security tokens from the connector endpoint.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required by Google Analytics.

    " + }, + "GoogleAnalyticsConnectorProfileProperties":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector-specific profile properties required by Google Analytics.

    " + }, + "GoogleAnalyticsMetadata":{ + "type":"structure", + "members":{ + "oAuthScopes":{ + "shape":"OAuthScopeList", + "documentation":"

    The desired authorization scope for the Google Analytics account.

    " + } + }, + "documentation":"

    The connector metadata specific to Google Analytics.

    " + }, + "GoogleAnalyticsSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Google Analytics flow source.

    " + } + }, + "documentation":"

    The properties that are applied when Google Analytics is being used as a source.

    " + }, + "Group":{ + "type":"string", + "max":128, + "pattern":"\\S+" + }, + "IdFieldNameList":{ + "type":"list", + "member":{"shape":"Name"}, + "documentation":"

    A list of field names that can be used as an ID field when performing a write operation.

    ", + "max":1, + "min":0 + }, + "Identifier":{ + "type":"string", + "max":128, + "pattern":"\\S+" + }, + "IncrementalPullConfig":{ + "type":"structure", + "members":{ + "datetimeTypeFieldName":{ + "shape":"DatetimeTypeFieldName", + "documentation":"

    A field that specifies the date time or timestamp field as the criteria to use when importing incremental records from the source.

    " + } + }, + "documentation":"

    Specifies the configuration used when importing incremental records from the source.

    " + }, + "InforNexusConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "BETWEEN", + "EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "InforNexusConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "accessKeyId", + "userId", + "secretAccessKey", + "datakey" + ], + "members":{ + "accessKeyId":{ + "shape":"AccessKeyId", + "documentation":"

    The Access Key portion of the credentials.

    " + }, + "userId":{ + "shape":"Username", + "documentation":"

    The identifier for the user.

    " + }, + "secretAccessKey":{ + "shape":"Key", + "documentation":"

    The secret key used to sign requests.

    " + }, + "datakey":{ + "shape":"Key", + "documentation":"

    The encryption keys used to encrypt data.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required by Infor Nexus.

    " + }, + "InforNexusConnectorProfileProperties":{ + "type":"structure", + "required":["instanceUrl"], + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the Infor Nexus resource.

    " + } + }, + "documentation":"

    The connector-specific profile properties required by Infor Nexus.

    " + }, + "InforNexusMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Infor Nexus.

    " + }, + "InforNexusSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Infor Nexus flow source.

    " + } + }, + "documentation":"

    The properties that are applied when Infor Nexus is being used as a source.

    " + }, + "InstanceUrl":{ + "type":"string", + "max":256, + "pattern":"\\S+" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An internal service error occurred during the processing of your request. Try again later.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "KMSArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:kms:.*:[0-9]+:.*" + }, + "Key":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "Label":{ + "type":"string", + "max":128, + "pattern":".*" + }, + "ListConnectorEntitiesRequest":{ + "type":"structure", + "members":{ + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile. The name is unique for each ConnectorProfile in the AWS account, and is used to query the downstream connector.

    " + }, + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

    The type of connector, such as Salesforce, Amplitude, and so on.

    " + }, + "entitiesPath":{ + "shape":"EntitiesPath", + "documentation":"

    This optional parameter is specific to connector implementation. Some connectors support multiple levels or categories of entities. You can find out the list of roots for such providers by sending a request without the entitiesPath parameter. If the connector supports entities at different roots, this initial request returns the list of roots. Otherwise, this request returns all entities supported by the provider.

    " + } + } + }, + "ListConnectorEntitiesResponse":{ + "type":"structure", + "required":["connectorEntityMap"], + "members":{ + "connectorEntityMap":{ + "shape":"ConnectorEntityMap", + "documentation":"

    The response of ListConnectorEntities lists entities grouped by category. This map's key represents the group name, and its value contains the list of entities belonging to that group.

    " + } + } + }, + "ListFlowsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Specifies the maximum number of items that should be returned in the result set.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token for next page of data.

    " + } + } + }, + "ListFlowsResponse":{ + "type":"structure", + "members":{ + "flows":{ + "shape":"FlowList", + "documentation":"

    The list of flows associated with your account.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token for next page of data.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the specified flow.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for your flow.

    " + } + } + }, + "Long":{"type":"long"}, + "MarketoConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "LESS_THAN", + "GREATER_THAN", + "BETWEEN", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "MarketoConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "clientId", + "clientSecret" + ], + "members":{ + "clientId":{ + "shape":"ClientId", + "documentation":"

    The identifier for the desired client.

    " + }, + "clientSecret":{ + "shape":"ClientSecret", + "documentation":"

    The client secret used by the OAuth client to authenticate to the authorization server.

    " + }, + "accessToken":{ + "shape":"AccessToken", + "documentation":"

    The credentials used to access protected Marketo resources.

    " + }, + "oAuthRequest":{ + "shape":"ConnectorOAuthRequest", + "documentation":"

    The OAuth requirement needed to request security tokens from the connector endpoint.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required by Marketo.

    " + }, + "MarketoConnectorProfileProperties":{ + "type":"structure", + "required":["instanceUrl"], + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the Marketo resource.

    " + } + }, + "documentation":"

    The connector-specific profile properties required when using Marketo.

    " + }, + "MarketoMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Marketo.

    " + }, + "MarketoSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Marketo flow source.

    " + } + }, + "documentation":"

    The properties that are applied when Marketo is being used as a source.

    " + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "MostRecentExecutionMessage":{ + "type":"string", + "max":2048, + "pattern":"[\\w!@#\\-.?,\\s]*" + }, + "Name":{ + "type":"string", + "max":128, + "pattern":"\\S+" + }, + "NextToken":{ + "type":"string", + "max":2048, + "pattern":"\\S+" + }, + "OAuthScope":{ + "type":"string", + "max":128, + "pattern":"[\\w]*" + }, + "OAuthScopeList":{ + "type":"list", + "member":{"shape":"OAuthScope"} + }, + "Object":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "Operator":{ + "type":"string", + "enum":[ + "PROJECTION", + "LESS_THAN", + "GREATER_THAN", + "CONTAINS", + "BETWEEN", + "LESS_THAN_OR_EQUAL_TO", + "GREATER_THAN_OR_EQUAL_TO", + "EQUAL_TO", + "NOT_EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "OperatorPropertiesKeys":{ + "type":"string", + "enum":[ + "VALUE", + "VALUES", + "DATA_TYPE", + "UPPER_BOUND", + "LOWER_BOUND", + "SOURCE_DATA_TYPE", + "DESTINATION_DATA_TYPE", + "VALIDATION_ACTION", + "MASK_VALUE", + "MASK_LENGTH", + "TRUNCATE_LENGTH", + "MATH_OPERATION_FIELDS_ORDER", + "CONCAT_FORMAT", + "SUBFIELD_CATEGORY_MAP" + ] + }, + "Password":{ + "type":"string", + "max":512, + "pattern":".*", + "sensitive":true + }, + "PrefixConfig":{ + "type":"structure", + "members":{ + "prefixType":{ + "shape":"PrefixType", + "documentation":"

    Determines the level of granularity that's included in the prefix.

    " + }, + "prefixFormat":{ + "shape":"PrefixFormat", + "documentation":"

    Determines the format of the prefix, and whether it applies to the file name, file path, or both.

    " + } + }, + "documentation":"

    Determines the prefix that Amazon AppFlow applies to the destination folder name. You can name your destination folders according to the flow frequency and date.

    " + }, + "PrefixFormat":{ + "type":"string", + "enum":[ + "YEAR", + "MONTH", + "DAY", + "HOUR", + "MINUTE" + ] + }, + "PrefixType":{ + "type":"string", + "enum":[ + "FILENAME", + "PATH", + "PATH_AND_FILENAME" + ] + }, + "PrivateLinkServiceName":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "Property":{ + "type":"string", + "max":2048, + "pattern":".+" + }, + "RedirectUri":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "RedshiftConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "username", + "password" + ], + "members":{ + "username":{ + "shape":"Username", + "documentation":"

    The name of the user.

    " + }, + "password":{ + "shape":"Password", + "documentation":"

    The password that corresponds to the user name.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using Amazon Redshift.

    " + }, + "RedshiftConnectorProfileProperties":{ + "type":"structure", + "required":[ + "databaseUrl", + "bucketName", + "roleArn" + ], + "members":{ + "databaseUrl":{ + "shape":"DatabaseUrl", + "documentation":"

    The JDBC URL of the Amazon Redshift cluster.

    " + }, + "bucketName":{ + "shape":"BucketName", + "documentation":"

    A name for the associated Amazon S3 bucket.

    " + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

    The object key for the destination bucket in which Amazon AppFlow places the files.

    " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role.

    " + } + }, + "documentation":"

    The connector-specific profile properties when using Amazon Redshift.

    " + }, + "RedshiftDestinationProperties":{ + "type":"structure", + "required":[ + "object", + "intermediateBucketName" + ], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Amazon Redshift flow destination.

    " + }, + "intermediateBucketName":{ + "shape":"BucketName", + "documentation":"

    The intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift.

    " + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

    The object key for the bucket in which Amazon AppFlow places the destination files.

    " + }, + "errorHandlingConfig":{ + "shape":"ErrorHandlingConfig", + "documentation":"

    The settings that determine how Amazon AppFlow handles an error when placing data in the Amazon Redshift destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig is a part of the destination connector details.

    " + } + }, + "documentation":"

    The properties that are applied when Amazon Redshift is being used as a destination.

    " + }, + "RedshiftMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Amazon Redshift.

    " + }, + "RefreshToken":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "Region":{ + "type":"string", + "max":64, + "pattern":"\\S+" + }, + "RegionList":{ + "type":"list", + "member":{"shape":"Region"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The resource specified in the request (such as the source or destination connector profile) is not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleArn":{ + "type":"string", + "max":512, + "pattern":"arn:aws:iam:.*:[0-9]+:.*" + }, + "S3ConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "LESS_THAN", + "GREATER_THAN", + "BETWEEN", + "LESS_THAN_OR_EQUAL_TO", + "GREATER_THAN_OR_EQUAL_TO", + "EQUAL_TO", + "NOT_EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "S3DestinationProperties":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

    The Amazon S3 bucket name in which Amazon AppFlow places the transferred data.

    " + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

    The object key for the destination bucket in which Amazon AppFlow places the files.

    " + }, + "s3OutputFormatConfig":{"shape":"S3OutputFormatConfig"} + }, + "documentation":"

    The properties that are applied when Amazon S3 is used as a destination.

    " + }, + "S3Metadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Amazon S3.

    " + }, + "S3OutputFormatConfig":{ + "type":"structure", + "members":{ + "fileType":{ + "shape":"FileType", + "documentation":"

    Indicates the file type that Amazon AppFlow places in the Amazon S3 bucket.

    " + }, + "prefixConfig":{ + "shape":"PrefixConfig", + "documentation":"

    Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date.

    " + }, + "aggregationConfig":{"shape":"AggregationConfig"} + }, + "documentation":"

    The configuration that determines how Amazon AppFlow should format the flow output data when Amazon S3 is used as the destination.

    " + }, + "S3SourceProperties":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

    The Amazon S3 bucket name where the source files are stored.

    " + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

    The object key for the Amazon S3 bucket in which the source files are stored.

    " + } + }, + "documentation":"

    The properties that are applied when Amazon S3 is being used as the flow source.

    " + }, + "SalesforceConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "LESS_THAN", + "CONTAINS", + "GREATER_THAN", + "BETWEEN", + "LESS_THAN_OR_EQUAL_TO", + "GREATER_THAN_OR_EQUAL_TO", + "EQUAL_TO", + "NOT_EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "SalesforceConnectorProfileCredentials":{ + "type":"structure", + "members":{ + "accessToken":{ + "shape":"AccessToken", + "documentation":"

    The credentials used to access protected Salesforce resources.

    " + }, + "refreshToken":{ + "shape":"RefreshToken", + "documentation":"

    The credentials used to acquire new access tokens.

    " + }, + "oAuthRequest":{ + "shape":"ConnectorOAuthRequest", + "documentation":"

    The OAuth requirement needed to request security tokens from the connector endpoint.

    " + }, + "clientCredentialsArn":{ + "shape":"ClientCredentialsArn", + "documentation":"

    The secret manager ARN, which contains the client ID and client secret of the connected app.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using Salesforce.

    " + }, + "SalesforceConnectorProfileProperties":{ + "type":"structure", + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the Salesforce resource.

    " + }, + "isSandboxEnvironment":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the connector profile applies to a sandbox or production environment.

    " + } + }, + "documentation":"

    The connector-specific profile properties required when using Salesforce.

    " + }, + "SalesforceDestinationProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Salesforce flow destination.

    " + }, + "idFieldNames":{ + "shape":"IdFieldNameList", + "documentation":"

    The name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update or delete.

    " + }, + "errorHandlingConfig":{ + "shape":"ErrorHandlingConfig", + "documentation":"

    The settings that determine how Amazon AppFlow handles an error when placing data in the Salesforce destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig is a part of the destination connector details.

    " + }, + "writeOperationType":{ + "shape":"WriteOperationType", + "documentation":"

    This specifies the type of write operation to be performed in Salesforce. When the value is UPSERT, then idFieldNames is required.

    " + } + }, + "documentation":"

    The properties that are applied when Salesforce is being used as a destination.

    " + }, + "SalesforceMetadata":{ + "type":"structure", + "members":{ + "oAuthScopes":{ + "shape":"OAuthScopeList", + "documentation":"

    The desired authorization scope for the Salesforce account.

    " + } + }, + "documentation":"

    The connector metadata specific to Salesforce.

    " + }, + "SalesforceSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Salesforce flow source.

    " + }, + "enableDynamicFieldUpdate":{ + "shape":"Boolean", + "documentation":"

    The flag that enables dynamic fetching of new (recently added) fields in the Salesforce objects while running a flow.

    " + }, + "includeDeletedRecords":{ + "shape":"Boolean", + "documentation":"

    Indicates whether Amazon AppFlow includes deleted files in the flow run.

    " + } + }, + "documentation":"

    The properties that are applied when Salesforce is being used as a source.

    " + }, + "ScheduleExpression":{ + "type":"string", + "max":256, + "pattern":".*" + }, + "ScheduleFrequencyType":{ + "type":"string", + "enum":[ + "BYMINUTE", + "HOURLY", + "DAILY", + "WEEKLY", + "MONTHLY", + "ONCE" + ] + }, + "ScheduledTriggerProperties":{ + "type":"structure", + "required":["scheduleExpression"], + "members":{ + "scheduleExpression":{ + "shape":"ScheduleExpression", + "documentation":"

    The scheduling expression that determines the rate at which the schedule will run, for example rate(5minutes).

    " + }, + "dataPullMode":{ + "shape":"DataPullMode", + "documentation":"

    Specifies whether a scheduled flow has an incremental data transfer or a complete data transfer for each flow run.

    " + }, + "scheduleStartTime":{ + "shape":"Date", + "documentation":"

    Specifies the scheduled start time for a schedule-triggered flow.

    " + }, + "scheduleEndTime":{ + "shape":"Date", + "documentation":"

    Specifies the scheduled end time for a schedule-triggered flow.

    " + }, + "timezone":{ + "shape":"Timezone", + "documentation":"

    Specifies the time zone used when referring to the date and time of a scheduled-triggered flow.

    " + } + }, + "documentation":"

    Specifies the configuration details of a schedule-triggered flow as defined by the user. Currently, these settings only apply to the Scheduled trigger type.

    " + }, + "SchedulingFrequencyTypeList":{ + "type":"list", + "member":{"shape":"ScheduleFrequencyType"} + }, + "SecretKey":{ + "type":"string", + "max":256, + "pattern":"\\S+", + "sensitive":true + }, + "ServiceNowConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "CONTAINS", + "LESS_THAN", + "GREATER_THAN", + "BETWEEN", + "LESS_THAN_OR_EQUAL_TO", + "GREATER_THAN_OR_EQUAL_TO", + "EQUAL_TO", + "NOT_EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "ServiceNowConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "username", + "password" + ], + "members":{ + "username":{ + "shape":"Username", + "documentation":"

    The name of the user.

    " + }, + "password":{ + "shape":"Password", + "documentation":"

    The password that corresponds to the user name.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using ServiceNow.

    " + }, + "ServiceNowConnectorProfileProperties":{ + "type":"structure", + "required":["instanceUrl"], + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the ServiceNow resource.

    " + } + }, + "documentation":"

    The connector-specific profile properties required when using ServiceNow.

    " + }, + "ServiceNowMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to ServiceNow.

    " + }, + "ServiceNowSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the ServiceNow flow source.

    " + } + }, + "documentation":"

    The properties that are applied when ServiceNow is being used as a source.

    " + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request would cause a service quota (such as the number of flows) to be exceeded.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "SingularConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "SingularConnectorProfileCredentials":{ + "type":"structure", + "required":["apiKey"], + "members":{ + "apiKey":{ + "shape":"ApiKey", + "documentation":"

    A unique alphanumeric identifier used to authenticate a user, developer, or calling program to your API.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using Singular.

    " + }, + "SingularConnectorProfileProperties":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector-specific profile properties required when using Singular.

    " + }, + "SingularMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Singular.

    " + }, + "SingularSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Singular flow source.

    " + } + }, + "documentation":"

    The properties that are applied when Singular is being used as a source.

    " + }, + "SlackConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "LESS_THAN", + "GREATER_THAN", + "BETWEEN", + "LESS_THAN_OR_EQUAL_TO", + "GREATER_THAN_OR_EQUAL_TO", + "EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "SlackConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "clientId", + "clientSecret" + ], + "members":{ + "clientId":{ + "shape":"ClientId", + "documentation":"

    The identifier for the client.

    " + }, + "clientSecret":{ + "shape":"ClientSecret", + "documentation":"

    The client secret used by the OAuth client to authenticate to the authorization server.

    " + }, + "accessToken":{ + "shape":"AccessToken", + "documentation":"

    The credentials used to access protected Slack resources.

    " + }, + "oAuthRequest":{ + "shape":"ConnectorOAuthRequest", + "documentation":"

    The OAuth requirement needed to request security tokens from the connector endpoint.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using Slack.

    " + }, + "SlackConnectorProfileProperties":{ + "type":"structure", + "required":["instanceUrl"], + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the Slack resource.

    " + } + }, + "documentation":"

    The connector-specific profile properties required when using Slack.

    " + }, + "SlackMetadata":{ + "type":"structure", + "members":{ + "oAuthScopes":{ + "shape":"OAuthScopeList", + "documentation":"

    The desired authorization scope for the Slack account.

    " + } + }, + "documentation":"

    The connector metadata specific to Slack.

    " + }, + "SlackSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Slack flow source.

    " + } + }, + "documentation":"

    The properties that are applied when Slack is being used as a source.

    " + }, + "SnowflakeConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "username", + "password" + ], + "members":{ + "username":{ + "shape":"Username", + "documentation":"

    The name of the user.

    " + }, + "password":{ + "shape":"Password", + "documentation":"

    The password that corresponds to the user name.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using Snowflake.

    " + }, + "SnowflakeConnectorProfileProperties":{ + "type":"structure", + "required":[ + "warehouse", + "stage", + "bucketName" + ], + "members":{ + "warehouse":{ + "shape":"Warehouse", + "documentation":"

    The name of the Snowflake warehouse.

    " + }, + "stage":{ + "shape":"Stage", + "documentation":"

    The name of the Amazon S3 stage that was created while setting up an Amazon S3 stage in the Snowflake account. This is written in the following format: < Database>< Schema><Stage Name>.

    " + }, + "bucketName":{ + "shape":"BucketName", + "documentation":"

    The name of the Amazon S3 bucket associated with Snowflake.

    " + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

    The bucket path that refers to the Amazon S3 bucket associated with Snowflake.

    " + }, + "privateLinkServiceName":{ + "shape":"PrivateLinkServiceName", + "documentation":"

    The Snowflake Private Link service name to be used for private data transfers.

    " + }, + "accountName":{ + "shape":"AccountName", + "documentation":"

    The name of the account.

    " + }, + "region":{ + "shape":"Region", + "documentation":"

    The AWS Region of the Snowflake account.

    " + } + }, + "documentation":"

    The connector-specific profile properties required when using Snowflake.

    " + }, + "SnowflakeDestinationProperties":{ + "type":"structure", + "required":[ + "object", + "intermediateBucketName" + ], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Snowflake flow destination.

    " + }, + "intermediateBucketName":{ + "shape":"BucketName", + "documentation":"

    The intermediate bucket that Amazon AppFlow uses when moving data into Snowflake.

    " + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

    The object key for the destination bucket in which Amazon AppFlow places the files.

    " + }, + "errorHandlingConfig":{ + "shape":"ErrorHandlingConfig", + "documentation":"

    The settings that determine how Amazon AppFlow handles an error when placing data in the Snowflake destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig is a part of the destination connector details.

    " + } + }, + "documentation":"

    The properties that are applied when Snowflake is being used as a destination.

    " + }, + "SnowflakeMetadata":{ + "type":"structure", + "members":{ + "supportedRegions":{ + "shape":"RegionList", + "documentation":"

    Specifies the supported AWS Regions when using Snowflake.

    " + } + }, + "documentation":"

    The connector metadata specific to Snowflake.

    " + }, + "SourceConnectorProperties":{ + "type":"structure", + "members":{ + "Amplitude":{ + "shape":"AmplitudeSourceProperties", + "documentation":"

    Specifies the information that is required for querying Amplitude.

    " + }, + "Datadog":{ + "shape":"DatadogSourceProperties", + "documentation":"

    Specifies the information that is required for querying Datadog.

    " + }, + "Dynatrace":{ + "shape":"DynatraceSourceProperties", + "documentation":"

    Specifies the information that is required for querying Dynatrace.

    " + }, + "GoogleAnalytics":{ + "shape":"GoogleAnalyticsSourceProperties", + "documentation":"

    Specifies the information that is required for querying Google Analytics.

    " + }, + "InforNexus":{ + "shape":"InforNexusSourceProperties", + "documentation":"

    Specifies the information that is required for querying Infor Nexus.

    " + }, + "Marketo":{ + "shape":"MarketoSourceProperties", + "documentation":"

    Specifies the information that is required for querying Marketo.

    " + }, + "S3":{ + "shape":"S3SourceProperties", + "documentation":"

    Specifies the information that is required for querying Amazon S3.

    " + }, + "Salesforce":{ + "shape":"SalesforceSourceProperties", + "documentation":"

    Specifies the information that is required for querying Salesforce.

    " + }, + "ServiceNow":{ + "shape":"ServiceNowSourceProperties", + "documentation":"

    Specifies the information that is required for querying ServiceNow.

    " + }, + "Singular":{ + "shape":"SingularSourceProperties", + "documentation":"

    Specifies the information that is required for querying Singular.

    " + }, + "Slack":{ + "shape":"SlackSourceProperties", + "documentation":"

    Specifies the information that is required for querying Slack.

    " + }, + "Trendmicro":{ + "shape":"TrendmicroSourceProperties", + "documentation":"

    Specifies the information that is required for querying Trend Micro.

    " + }, + "Veeva":{ + "shape":"VeevaSourceProperties", + "documentation":"

    Specifies the information that is required for querying Veeva.

    " + }, + "Zendesk":{ + "shape":"ZendeskSourceProperties", + "documentation":"

    Specifies the information that is required for querying Zendesk.

    " + } + }, + "documentation":"

    Specifies the information that is required to query a particular connector.

    " + }, + "SourceFieldProperties":{ + "type":"structure", + "members":{ + "isRetrievable":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the field can be returned in a search result.

    " + }, + "isQueryable":{ + "shape":"Boolean", + "documentation":"

    Indicates if the field can be queried.

    " + } + }, + "documentation":"

    The properties that can be applied to a field when the connector is being used as a source.

    " + }, + "SourceFields":{ + "type":"list", + "member":{"shape":"String"} + }, + "SourceFlowConfig":{ + "type":"structure", + "required":[ + "connectorType", + "sourceConnectorProperties" + ], + "members":{ + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

    The type of connector, such as Salesforce, Amplitude, and so on.

    " + }, + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile. This name must be unique for each connector profile in the AWS account.

    " + }, + "sourceConnectorProperties":{ + "shape":"SourceConnectorProperties", + "documentation":"

    Specifies the information that is required to query a particular source connector.

    " + }, + "incrementalPullConfig":{ + "shape":"IncrementalPullConfig", + "documentation":"

    Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull.

    " + } + }, + "documentation":"

    Contains information about the configuration of the source connector used in the flow.

    " + }, + "Stage":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "StartFlowRequest":{ + "type":"structure", + "required":["flowName"], + "members":{ + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + } + } + }, + "StartFlowResponse":{ + "type":"structure", + "members":{ + "flowArn":{ + "shape":"FlowArn", + "documentation":"

    The flow's Amazon Resource Name (ARN).

    " + }, + "flowStatus":{ + "shape":"FlowStatus", + "documentation":"

    Indicates the current status of the flow.

    " + }, + "executionId":{ + "shape":"ExecutionId", + "documentation":"

    Returns the internal execution ID of an on-demand flow when the flow is started. For scheduled or event-triggered flows, this value is null.

    " + } + } + }, + "StopFlowRequest":{ + "type":"structure", + "required":["flowName"], + "members":{ + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + } + } + }, + "StopFlowResponse":{ + "type":"structure", + "members":{ + "flowArn":{ + "shape":"FlowArn", + "documentation":"

    The flow's Amazon Resource Name (ARN).

    " + }, + "flowStatus":{ + "shape":"FlowStatus", + "documentation":"

    Indicates the current status of the flow.

    " + } + } + }, + "String":{ + "type":"string", + "max":2048, + "pattern":".*" + }, + "SupportedFieldTypeDetails":{ + "type":"structure", + "required":["v1"], + "members":{ + "v1":{ + "shape":"FieldTypeDetails", + "documentation":"

    The initial supported version for fieldType. If this is later changed to a different version, v2 will be introduced.

    " + } + }, + "documentation":"

    Contains details regarding all the supported FieldTypes and their corresponding filterOperators and supportedValues.

    " + }, + "SupportedValueList":{ + "type":"list", + "member":{"shape":"Value"} + }, + "SupportedWriteOperationList":{ + "type":"list", + "member":{"shape":"WriteOperationType"} + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the flow that you want to tag.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for your flow.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "pattern":"[\\s\\w+-=\\.:/@]*" + }, + "Task":{ + "type":"structure", + "required":[ + "sourceFields", + "taskType" + ], + "members":{ + "sourceFields":{ + "shape":"SourceFields", + "documentation":"

    The source fields to which a particular task is applied.

    " + }, + "connectorOperator":{ + "shape":"ConnectorOperator", + "documentation":"

    The operation to be performed on the provided source fields.

    " + }, + "destinationField":{ + "shape":"DestinationField", + "documentation":"

    A field in a destination connector, or a field value against which Amazon AppFlow validates a source field.

    " + }, + "taskType":{ + "shape":"TaskType", + "documentation":"

    Specifies the particular task implementation that Amazon AppFlow performs.

    " + }, + "taskProperties":{ + "shape":"TaskPropertiesMap", + "documentation":"

    A map used to store task-related information. The execution service looks for particular information based on the TaskType.

    " + } + }, + "documentation":"

    A class for modeling different type of tasks. Task implementation varies based on the TaskType.

    " + }, + "TaskPropertiesMap":{ + "type":"map", + "key":{"shape":"OperatorPropertiesKeys"}, + "value":{"shape":"Property"} + }, + "TaskType":{ + "type":"string", + "enum":[ + "Arithmetic", + "Filter", + "Map", + "Mask", + "Merge", + "Truncate", + "Validate" + ] + }, + "Tasks":{ + "type":"list", + "member":{"shape":"Task"} + }, + "Timezone":{ + "type":"string", + "max":256, + "pattern":".*" + }, + "TrendmicroConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "TrendmicroConnectorProfileCredentials":{ + "type":"structure", + "required":["apiSecretKey"], + "members":{ + "apiSecretKey":{ + "shape":"ApiSecretKey", + "documentation":"

    The Secret Access Key portion of the credentials.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using Trend Micro.

    " + }, + "TrendmicroConnectorProfileProperties":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector-specific profile properties required when using Trend Micro.

    " + }, + "TrendmicroMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Trend Micro.

    " + }, + "TrendmicroSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Trend Micro flow source.

    " + } + }, + "documentation":"

    The properties that are applied when using Trend Micro as a flow source.

    " + }, + "TriggerConfig":{ + "type":"structure", + "required":["triggerType"], + "members":{ + "triggerType":{ + "shape":"TriggerType", + "documentation":"

    Specifies the type of flow trigger. This can be OnDemand, Scheduled, or Event.

    " + }, + "triggerProperties":{ + "shape":"TriggerProperties", + "documentation":"

    Specifies the configuration details of a schedule-triggered flow as defined by the user. Currently, these settings only apply to the Scheduled trigger type.

    " + } + }, + "documentation":"

    The trigger settings that determine how and when Amazon AppFlow runs the specified flow.

    " + }, + "TriggerProperties":{ + "type":"structure", + "members":{ + "Scheduled":{ + "shape":"ScheduledTriggerProperties", + "documentation":"

    Specifies the configuration details of a schedule-triggered flow as defined by the user.

    " + } + }, + "documentation":"

    Specifies the configuration details that control the trigger for a flow. Currently, these settings only apply to the Scheduled trigger type.

    " + }, + "TriggerType":{ + "type":"string", + "enum":[ + "Scheduled", + "Event", + "OnDemand" + ] + }, + "TriggerTypeList":{ + "type":"list", + "member":{"shape":"TriggerType"} + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The requested operation is not supported for the current flow.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the flow that you want to untag.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys associated with the tag that you want to remove from your flow.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateConnectorProfileRequest":{ + "type":"structure", + "required":[ + "connectorProfileName", + "connectionMode", + "connectorProfileConfig" + ], + "members":{ + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile and is unique for each ConnectorProfile in the AWS Account.

    " + }, + "connectionMode":{ + "shape":"ConnectionMode", + "documentation":"

    Indicates the connection mode and if it is public or private.

    " + }, + "connectorProfileConfig":{ + "shape":"ConnectorProfileConfig", + "documentation":"

    Defines the connector-specific profile configuration and credentials.

    " + } + } + }, + "UpdateConnectorProfileResponse":{ + "type":"structure", + "members":{ + "connectorProfileArn":{ + "shape":"ConnectorProfileArn", + "documentation":"

    The Amazon Resource Name (ARN) of the connector profile.

    " + } + } + }, + "UpdateFlowRequest":{ + "type":"structure", + "required":[ + "flowName", + "triggerConfig", + "destinationFlowConfigList", + "tasks" + ], + "members":{ + "flowName":{ + "shape":"FlowName", + "documentation":"

    The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.

    " + }, + "description":{ + "shape":"FlowDescription", + "documentation":"

    A description of the flow.

    " + }, + "triggerConfig":{ + "shape":"TriggerConfig", + "documentation":"

    The trigger settings that determine how and when the flow runs.

    " + }, + "sourceFlowConfig":{"shape":"SourceFlowConfig"}, + "destinationFlowConfigList":{ + "shape":"DestinationFlowConfigList", + "documentation":"

    The configuration that controls how Amazon AppFlow transfers data to the destination connector.

    " + }, + "tasks":{ + "shape":"Tasks", + "documentation":"

    A list of tasks that Amazon AppFlow performs while transferring the data in the flow run.

    " + } + } + }, + "UpdateFlowResponse":{ + "type":"structure", + "members":{ + "flowStatus":{ + "shape":"FlowStatus", + "documentation":"

    Indicates the current status of the flow.

    " + } + } + }, + "UpdatedBy":{ + "type":"string", + "max":256, + "pattern":"\\S+" + }, + "UpsolverBucketName":{ + "type":"string", + "max":63, + "min":16, + "pattern":"^(upsolver-appflow)\\S*" + }, + "UpsolverDestinationProperties":{ + "type":"structure", + "required":[ + "bucketName", + "s3OutputFormatConfig" + ], + "members":{ + "bucketName":{ + "shape":"UpsolverBucketName", + "documentation":"

    The Upsolver Amazon S3 bucket name in which Amazon AppFlow places the transferred data.

    " + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

    The object key for the destination Upsolver Amazon S3 bucket in which Amazon AppFlow places the files.

    " + }, + "s3OutputFormatConfig":{ + "shape":"UpsolverS3OutputFormatConfig", + "documentation":"

    The configuration that determines how data is formatted when Upsolver is used as the flow destination.

    " + } + }, + "documentation":"

    The properties that are applied when Upsolver is used as a destination.

    " + }, + "UpsolverMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Upsolver.

    " + }, + "UpsolverS3OutputFormatConfig":{ + "type":"structure", + "required":["prefixConfig"], + "members":{ + "fileType":{ + "shape":"FileType", + "documentation":"

    Indicates the file type that Amazon AppFlow places in the Upsolver Amazon S3 bucket.

    " + }, + "prefixConfig":{"shape":"PrefixConfig"}, + "aggregationConfig":{"shape":"AggregationConfig"} + }, + "documentation":"

    The configuration that determines how Amazon AppFlow formats the flow output data when Upsolver is used as the destination.

    " + }, + "Username":{ + "type":"string", + "max":512, + "pattern":"\\S+" + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request has invalid or missing parameters.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Value":{ + "type":"string", + "max":128, + "pattern":"\\S+" + }, + "VeevaConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "LESS_THAN", + "GREATER_THAN", + "CONTAINS", + "BETWEEN", + "LESS_THAN_OR_EQUAL_TO", + "GREATER_THAN_OR_EQUAL_TO", + "EQUAL_TO", + "NOT_EQUAL_TO", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "VeevaConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "username", + "password" + ], + "members":{ + "username":{ + "shape":"Username", + "documentation":"

    The name of the user.

    " + }, + "password":{ + "shape":"Password", + "documentation":"

    The password that corresponds to the user name.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using Veeva.

    " + }, + "VeevaConnectorProfileProperties":{ + "type":"structure", + "required":["instanceUrl"], + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the Veeva resource.

    " + } + }, + "documentation":"

    The connector-specific profile properties required when using Veeva.

    " + }, + "VeevaMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The connector metadata specific to Veeva.

    " + }, + "VeevaSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Veeva flow source.

    " + } + }, + "documentation":"

    The properties that are applied when using Veeva as a flow source.

    " + }, + "Warehouse":{ + "type":"string", + "max":512, + "pattern":"[\\s\\w/!@#+=.-]*" + }, + "WriteOperationType":{ + "type":"string", + "documentation":"

    The possible write operations in the destination connector. When this value is not provided, this defaults to the INSERT operation.

    ", + "enum":[ + "INSERT", + "UPSERT", + "UPDATE" + ] + }, + "ZendeskConnectorOperator":{ + "type":"string", + "enum":[ + "PROJECTION", + "GREATER_THAN", + "ADDITION", + "MULTIPLICATION", + "DIVISION", + "SUBTRACTION", + "MASK_ALL", + "MASK_FIRST_N", + "MASK_LAST_N", + "VALIDATE_NON_NULL", + "VALIDATE_NON_ZERO", + "VALIDATE_NON_NEGATIVE", + "VALIDATE_NUMERIC", + "NO_OP" + ] + }, + "ZendeskConnectorProfileCredentials":{ + "type":"structure", + "required":[ + "clientId", + "clientSecret" + ], + "members":{ + "clientId":{ + "shape":"ClientId", + "documentation":"

    The identifier for the desired client.

    " + }, + "clientSecret":{ + "shape":"ClientSecret", + "documentation":"

    The client secret used by the OAuth client to authenticate to the authorization server.

    " + }, + "accessToken":{ + "shape":"AccessToken", + "documentation":"

    The credentials used to access protected Zendesk resources.

    " + }, + "oAuthRequest":{ + "shape":"ConnectorOAuthRequest", + "documentation":"

    The OAuth requirement needed to request security tokens from the connector endpoint.

    " + } + }, + "documentation":"

    The connector-specific profile credentials required when using Zendesk.

    " + }, + "ZendeskConnectorProfileProperties":{ + "type":"structure", + "required":["instanceUrl"], + "members":{ + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

    The location of the Zendesk resource.

    " + } + }, + "documentation":"

    The connector-specific profile properties required when using Zendesk.

    " + }, + "ZendeskMetadata":{ + "type":"structure", + "members":{ + "oAuthScopes":{ + "shape":"OAuthScopeList", + "documentation":"

    The desired authorization scope for the Zendesk account.

    " + } + }, + "documentation":"

    The connector metadata specific to Zendesk.

    " + }, + "ZendeskSourceProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

    The object specified in the Zendesk flow source.

    " + } + }, + "documentation":"

    The properties that are applied when using Zendesk as a flow source.

    " + } + }, + "documentation":"

    Welcome to the Amazon AppFlow API reference. This guide is for developers who need detailed information about the Amazon AppFlow API operations, data types, and errors.

    Amazon AppFlow is a fully managed integration service that enables you to securely transfer data between software as a service (SaaS) applications like Salesforce, Marketo, Slack, and ServiceNow, and AWS services like Amazon S3 and Amazon Redshift.

    Use the following links to get started on the Amazon AppFlow API:

    • Actions: An alphabetical list of all Amazon AppFlow API operations.

    • Data types: An alphabetical list of all Amazon AppFlow data types.

    • Common parameters: Parameters that all Query operations can use.

    • Common errors: Client and server errors that all operations can return.

    If you're new to Amazon AppFlow, we recommend that you review the Amazon AppFlow User Guide.

    Amazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include applicable OAuth attributes (such as auth-code and redirecturi) with the connector-specific ConnectorProfileProperties when creating a new connector profile using Amazon AppFlow API operations. For example, Salesforce users can refer to the Authorize Apps with OAuth documentation.

    " +} diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml new file mode 100644 index 000000000000..0939683caab1 --- /dev/null +++ b/services/appintegrations/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + appintegrations + AWS Java SDK :: Services :: App Integrations + The AWS Java SDK for App Integrations module holds the client classes that are used for + communicating with App Integrations. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.appintegrations + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/appintegrations/src/main/resources/codegen-resources/paginators-1.json b/services/appintegrations/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/appintegrations/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/appintegrations/src/main/resources/codegen-resources/service-2.json b/services/appintegrations/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..a35f0586f70b --- /dev/null +++ b/services/appintegrations/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,675 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-29", + "endpointPrefix":"app-integrations", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon AppIntegrations Service", + "serviceId":"AppIntegrations", + "signatureVersion":"v4", + "signingName":"app-integrations", + "uid":"appintegrations-2020-07-29" + }, + "operations":{ + "CreateEventIntegration":{ + "name":"CreateEventIntegration", + "http":{ + "method":"POST", + "requestUri":"/eventIntegrations" + }, + "input":{"shape":"CreateEventIntegrationRequest"}, + "output":{"shape":"CreateEventIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ResourceQuotaExceededException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Creates an EventIntegration, given a specified name, description, and a reference to an Amazon Eventbridge bus in your account and a partner event source that will push events to that bus. No objects are created in the your account, only metadata that is persisted on the EventIntegration control plane.

    " + }, + "DeleteEventIntegration":{ + "name":"DeleteEventIntegration", + "http":{ + "method":"DELETE", + "requestUri":"/eventIntegrations/{Name}" + }, + "input":{"shape":"DeleteEventIntegrationRequest"}, + "output":{"shape":"DeleteEventIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Deletes the specified existing event integration. If the event integration is associated with clients, the request is rejected.

    " + }, + "GetEventIntegration":{ + "name":"GetEventIntegration", + "http":{ + "method":"GET", + "requestUri":"/eventIntegrations/{Name}" + }, + "input":{"shape":"GetEventIntegrationRequest"}, + "output":{"shape":"GetEventIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Return information about the event integration.

    " + }, + "ListEventIntegrationAssociations":{ + "name":"ListEventIntegrationAssociations", + "http":{ + "method":"GET", + "requestUri":"/eventIntegrations/{Name}/associations" + }, + "input":{"shape":"ListEventIntegrationAssociationsRequest"}, + "output":{"shape":"ListEventIntegrationAssociationsResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Returns a paginated list of event integration associations in the account.

    " + }, + "ListEventIntegrations":{ + "name":"ListEventIntegrations", + "http":{ + "method":"GET", + "requestUri":"/eventIntegrations" + }, + "input":{"shape":"ListEventIntegrationsRequest"}, + "output":{"shape":"ListEventIntegrationsResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Returns a paginated list of event integrations in the account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Lists the tags for the specified resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Adds the specified tags to the specified resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Removes the specified tags from the specified resource.

    " + }, + "UpdateEventIntegration":{ + "name":"UpdateEventIntegration", + "http":{ + "method":"PATCH", + "requestUri":"/eventIntegrations/{Name}" + }, + "input":{"shape":"UpdateEventIntegrationRequest"}, + "output":{"shape":"UpdateEventIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    Updates the description of an event integration.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "Arn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "ClientAssociationMetadata":{ + "type":"map", + "key":{"shape":"NonBlankString"}, + "value":{"shape":"NonBlankString"} + }, + "ClientId":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "CreateEventIntegrationRequest":{ + "type":"structure", + "required":[ + "Name", + "EventFilter", + "EventBridgeBus" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the event integration.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the event integration.

    " + }, + "EventFilter":{ + "shape":"EventFilter", + "documentation":"

    The event filter.

    " + }, + "EventBridgeBus":{ + "shape":"EventBridgeBus", + "documentation":"

    The Eventbridge bus.

    " + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    One or more tags.

    " + } + } + }, + "CreateEventIntegrationResponse":{ + "type":"structure", + "members":{ + "EventIntegrationArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the event integration.

    " + } + } + }, + "DeleteEventIntegrationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the event integration.

    ", + "location":"uri", + "locationName":"Name" + } + } + }, + "DeleteEventIntegrationResponse":{ + "type":"structure", + "members":{ + } + }, + "Description":{ + "type":"string", + "max":1000, + "min":1, + "pattern":".*" + }, + "DuplicateResourceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    A resource with the specified name already exists.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "EventBridgeBus":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\._\\-]+$" + }, + "EventBridgeRuleName":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\._\\-]+$" + }, + "EventFilter":{ + "type":"structure", + "required":["Source"], + "members":{ + "Source":{ + "shape":"Source", + "documentation":"

    The source of the events.

    " + } + }, + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    The event filter.

    " + }, + "EventIntegration":{ + "type":"structure", + "members":{ + "EventIntegrationArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the event integration.

    " + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the event integration.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The event integration description.

    " + }, + "EventFilter":{ + "shape":"EventFilter", + "documentation":"

    The event integration filter.

    " + }, + "EventBridgeBus":{ + "shape":"EventBridgeBus", + "documentation":"

    The Amazon Eventbridge bus for the event integration.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags.

    " + } + }, + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    The event integration.

    " + }, + "EventIntegrationAssociation":{ + "type":"structure", + "members":{ + "EventIntegrationAssociationArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the event integration association.

    " + }, + "EventIntegrationAssociationId":{ + "shape":"UUID", + "documentation":"

    The identifier for the event integration association.

    " + }, + "EventIntegrationName":{ + "shape":"Name", + "documentation":"

    The name of the event integration.

    " + }, + "ClientId":{ + "shape":"ClientId", + "documentation":"

    The identifier for the client that is associated with the event integration.

    " + }, + "EventBridgeRuleName":{ + "shape":"EventBridgeRuleName", + "documentation":"

    The name of the Eventbridge rule.

    " + }, + "ClientAssociationMetadata":{ + "shape":"ClientAssociationMetadata", + "documentation":"

    The metadata associated with the client.

    " + } + }, + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    The event integration association.

    " + }, + "EventIntegrationAssociationsList":{ + "type":"list", + "member":{"shape":"EventIntegrationAssociation"}, + "max":50, + "min":1 + }, + "EventIntegrationsList":{ + "type":"list", + "member":{"shape":"EventIntegration"}, + "max":50, + "min":1 + }, + "GetEventIntegrationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the event integration.

    ", + "location":"uri", + "locationName":"Name" + } + } + }, + "GetEventIntegrationResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the event integration.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the event integration.

    " + }, + "EventIntegrationArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the event integration.

    " + }, + "EventBridgeBus":{ + "shape":"EventBridgeBus", + "documentation":"

    The Eventbridge bus.

    " + }, + "EventFilter":{ + "shape":"EventFilter", + "documentation":"

    The event filter.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    One or more tags.

    " + } + } + }, + "IdempotencyToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*" + }, + "InternalServiceError":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    Request processing failed due to an error or failure with the service.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The request is not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListEventIntegrationAssociationsRequest":{ + "type":"structure", + "required":["EventIntegrationName"], + "members":{ + "EventIntegrationName":{ + "shape":"Name", + "documentation":"

    The name of the event integration.

    ", + "location":"uri", + "locationName":"Name" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return per page.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListEventIntegrationAssociationsResponse":{ + "type":"structure", + "members":{ + "EventIntegrationAssociations":{ + "shape":"EventIntegrationAssociationsList", + "documentation":"

    The event integration associations.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + } + } + }, + "ListEventIntegrationsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return per page.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListEventIntegrationsResponse":{ + "type":"structure", + "members":{ + "EventIntegrations":{ + "shape":"EventIntegrationsList", + "documentation":"

    The event integrations.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    Information about the tags.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "Message":{"type":"string"}, + "Name":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\._\\-]+$" + }, + "NextToken":{ + "type":"string", + "max":1000, + "min":1, + "pattern":".*" + }, + "NonBlankString":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*\\S.*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The specified resource was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The allowed quota for the resource has been exceeded.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Source":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^aws\\.partner\\/.*$" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    One or more tags.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The throttling limit has been exceeded.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UUID":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateEventIntegrationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the event integration.

    ", + "location":"uri", + "locationName":"Name" + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the event inegration.

    " + } + } + }, + "UpdateEventIntegrationResponse":{ + "type":"structure", + "members":{ + } + } + }, + "documentation":"

    The Amazon AppIntegrations APIs are in preview release and are subject to change.

    The Amazon AppIntegrations service enables you to configure and reuse connections to external applications.

    For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations in the Amazon Connect Administrator Guide.

    " +} diff --git a/services/applicationautoscaling/build.properties b/services/applicationautoscaling/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/applicationautoscaling/build.properties +++ b/services/applicationautoscaling/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index d2cc70f7ca6b..770de5e00dc6 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + auditmanager + AWS Java SDK :: Services :: Audit Manager + The AWS Java SDK for Audit Manager module holds the client classes that are used for + communicating with Audit Manager. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.auditmanager + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/auditmanager/src/main/resources/codegen-resources/paginators-1.json b/services/auditmanager/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..33fc6b66d954 --- /dev/null +++ b/services/auditmanager/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,59 @@ +{ + "pagination": { + "GetChangeLogs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetDelegations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetEvidenceByEvidenceFolder": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetEvidenceFoldersByAssessment": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetEvidenceFoldersByAssessmentControl": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListAssessmentFrameworks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListAssessmentReports": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListAssessments": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListControls": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListKeywordsForDataSource": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListNotifications": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + } + } +} diff --git a/services/auditmanager/src/main/resources/codegen-resources/service-2.json b/services/auditmanager/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..d5e795ceaf9a --- /dev/null +++ b/services/auditmanager/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,4326 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-07-25", + "endpointPrefix":"auditmanager", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Audit Manager", + "serviceId":"AuditManager", + "signatureVersion":"v4", + "signingName":"auditmanager", + "uid":"auditmanager-2017-07-25" + }, + "operations":{ + "AssociateAssessmentReportEvidenceFolder":{ + "name":"AssociateAssessmentReportEvidenceFolder", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}/associateToAssessmentReport" + }, + "input":{"shape":"AssociateAssessmentReportEvidenceFolderRequest"}, + "output":{"shape":"AssociateAssessmentReportEvidenceFolderResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Associates an evidence folder to the specified assessment report in AWS Audit Manager.

    " + }, + "BatchAssociateAssessmentReportEvidence":{ + "name":"BatchAssociateAssessmentReportEvidence", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}/batchAssociateToAssessmentReport" + }, + "input":{"shape":"BatchAssociateAssessmentReportEvidenceRequest"}, + "output":{"shape":"BatchAssociateAssessmentReportEvidenceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Associates a list of evidence to an assessment report in an AWS Audit Manager assessment.

    " + }, + "BatchCreateDelegationByAssessment":{ + "name":"BatchCreateDelegationByAssessment", + "http":{ + "method":"POST", + "requestUri":"/assessments/{assessmentId}/delegations" + }, + "input":{"shape":"BatchCreateDelegationByAssessmentRequest"}, + "output":{"shape":"BatchCreateDelegationByAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Create a batch of delegations for a specified assessment in AWS Audit Manager.

    " + }, + "BatchDeleteDelegationByAssessment":{ + "name":"BatchDeleteDelegationByAssessment", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}/delegations" + }, + "input":{"shape":"BatchDeleteDelegationByAssessmentRequest"}, + "output":{"shape":"BatchDeleteDelegationByAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the delegations in the specified AWS Audit Manager assessment.

    " + }, + "BatchDisassociateAssessmentReportEvidence":{ + "name":"BatchDisassociateAssessmentReportEvidence", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}/batchDisassociateFromAssessmentReport" + }, + "input":{"shape":"BatchDisassociateAssessmentReportEvidenceRequest"}, + "output":{"shape":"BatchDisassociateAssessmentReportEvidenceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Disassociates a list of evidence from the specified assessment report in AWS Audit Manager.

    " + }, + "BatchImportEvidenceToAssessmentControl":{ + "name":"BatchImportEvidenceToAssessmentControl", + "http":{ + "method":"POST", + "requestUri":"/assessments/{assessmentId}/controlSets/{controlSetId}/controls/{controlId}/evidence" + }, + "input":{"shape":"BatchImportEvidenceToAssessmentControlRequest"}, + "output":{"shape":"BatchImportEvidenceToAssessmentControlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Uploads one or more pieces of evidence to the specified control in the assessment in AWS Audit Manager.

    " + }, + "CreateAssessment":{ + "name":"CreateAssessment", + "http":{ + "method":"POST", + "requestUri":"/assessments" + }, + "input":{"shape":"CreateAssessmentRequest"}, + "output":{"shape":"CreateAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates an assessment in AWS Audit Manager.

    " + }, + "CreateAssessmentFramework":{ + "name":"CreateAssessmentFramework", + "http":{ + "method":"POST", + "requestUri":"/assessmentFrameworks" + }, + "input":{"shape":"CreateAssessmentFrameworkRequest"}, + "output":{"shape":"CreateAssessmentFrameworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a custom framework in AWS Audit Manager.

    " + }, + "CreateAssessmentReport":{ + "name":"CreateAssessmentReport", + "http":{ + "method":"POST", + "requestUri":"/assessments/{assessmentId}/reports" + }, + "input":{"shape":"CreateAssessmentReportRequest"}, + "output":{"shape":"CreateAssessmentReportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Creates an assessment report for the specified assessment.

    " + }, + "CreateControl":{ + "name":"CreateControl", + "http":{ + "method":"POST", + "requestUri":"/controls" + }, + "input":{"shape":"CreateControlRequest"}, + "output":{"shape":"CreateControlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new custom control in AWS Audit Manager.

    " + }, + "DeleteAssessment":{ + "name":"DeleteAssessment", + "http":{ + "method":"DELETE", + "requestUri":"/assessments/{assessmentId}" + }, + "input":{"shape":"DeleteAssessmentRequest"}, + "output":{"shape":"DeleteAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an assessment in AWS Audit Manager.

    " + }, + "DeleteAssessmentFramework":{ + "name":"DeleteAssessmentFramework", + "http":{ + "method":"DELETE", + "requestUri":"/assessmentFrameworks/{frameworkId}" + }, + "input":{"shape":"DeleteAssessmentFrameworkRequest"}, + "output":{"shape":"DeleteAssessmentFrameworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a custom framework in AWS Audit Manager.

    " + }, + "DeleteAssessmentReport":{ + "name":"DeleteAssessmentReport", + "http":{ + "method":"DELETE", + "requestUri":"/assessments/{assessmentId}/reports/{assessmentReportId}" + }, + "input":{"shape":"DeleteAssessmentReportRequest"}, + "output":{"shape":"DeleteAssessmentReportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deletes an assessment report from an assessment in AWS Audit Manager.

    " + }, + "DeleteControl":{ + "name":"DeleteControl", + "http":{ + "method":"DELETE", + "requestUri":"/controls/{controlId}" + }, + "input":{"shape":"DeleteControlRequest"}, + "output":{"shape":"DeleteControlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a custom control in AWS Audit Manager.

    " + }, + "DeregisterAccount":{ + "name":"DeregisterAccount", + "http":{ + "method":"POST", + "requestUri":"/account/deregisterAccount" + }, + "input":{"shape":"DeregisterAccountRequest"}, + "output":{"shape":"DeregisterAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deregisters an account in AWS Audit Manager.

    " + }, + "DeregisterOrganizationAdminAccount":{ + "name":"DeregisterOrganizationAdminAccount", + "http":{ + "method":"POST", + "requestUri":"/account/deregisterOrganizationAdminAccount" + }, + "input":{"shape":"DeregisterOrganizationAdminAccountRequest"}, + "output":{"shape":"DeregisterOrganizationAdminAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deregisters the delegated AWS administrator account from the AWS organization.

    " + }, + "DisassociateAssessmentReportEvidenceFolder":{ + "name":"DisassociateAssessmentReportEvidenceFolder", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}/disassociateFromAssessmentReport" + }, + "input":{"shape":"DisassociateAssessmentReportEvidenceFolderRequest"}, + "output":{"shape":"DisassociateAssessmentReportEvidenceFolderResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Disassociates an evidence folder from the specified assessment report in AWS Audit Manager.

    " + }, + "GetAccountStatus":{ + "name":"GetAccountStatus", + "http":{ + "method":"GET", + "requestUri":"/account/status" + }, + "input":{"shape":"GetAccountStatusRequest"}, + "output":{"shape":"GetAccountStatusResponse"}, + "errors":[ + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns the registration status of an account in AWS Audit Manager.

    " + }, + "GetAssessment":{ + "name":"GetAssessment", + "http":{ + "method":"GET", + "requestUri":"/assessments/{assessmentId}" + }, + "input":{"shape":"GetAssessmentRequest"}, + "output":{"shape":"GetAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns an assessment from AWS Audit Manager.

    " + }, + "GetAssessmentFramework":{ + "name":"GetAssessmentFramework", + "http":{ + "method":"GET", + "requestUri":"/assessmentFrameworks/{frameworkId}" + }, + "input":{"shape":"GetAssessmentFrameworkRequest"}, + "output":{"shape":"GetAssessmentFrameworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a framework from AWS Audit Manager.

    " + }, + "GetAssessmentReportUrl":{ + "name":"GetAssessmentReportUrl", + "http":{ + "method":"GET", + "requestUri":"/assessments/{assessmentId}/reports/{assessmentReportId}/url" + }, + "input":{"shape":"GetAssessmentReportUrlRequest"}, + "output":{"shape":"GetAssessmentReportUrlResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns the URL of a specified assessment report in AWS Audit Manager.

    " + }, + "GetChangeLogs":{ + "name":"GetChangeLogs", + "http":{ + "method":"GET", + "requestUri":"/assessments/{assessmentId}/changelogs" + }, + "input":{"shape":"GetChangeLogsRequest"}, + "output":{"shape":"GetChangeLogsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of changelogs from AWS Audit Manager.

    " + }, + "GetControl":{ + "name":"GetControl", + "http":{ + "method":"GET", + "requestUri":"/controls/{controlId}" + }, + "input":{"shape":"GetControlRequest"}, + "output":{"shape":"GetControlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a control from AWS Audit Manager.

    " + }, + "GetDelegations":{ + "name":"GetDelegations", + "http":{ + "method":"GET", + "requestUri":"/delegations" + }, + "input":{"shape":"GetDelegationsRequest"}, + "output":{"shape":"GetDelegationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of delegations from an audit owner to a delegate.

    " + }, + "GetEvidence":{ + "name":"GetEvidence", + "http":{ + "method":"GET", + "requestUri":"/assessments/{assessmentId}/controlSets/{controlSetId}/evidenceFolders/{evidenceFolderId}/evidence/{evidenceId}" + }, + "input":{"shape":"GetEvidenceRequest"}, + "output":{"shape":"GetEvidenceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns evidence from AWS Audit Manager.

    " + }, + "GetEvidenceByEvidenceFolder":{ + "name":"GetEvidenceByEvidenceFolder", + "http":{ + "method":"GET", + "requestUri":"/assessments/{assessmentId}/controlSets/{controlSetId}/evidenceFolders/{evidenceFolderId}/evidence" + }, + "input":{"shape":"GetEvidenceByEvidenceFolderRequest"}, + "output":{"shape":"GetEvidenceByEvidenceFolderResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns all evidence from a specified evidence folder in AWS Audit Manager.

    " + }, + "GetEvidenceFolder":{ + "name":"GetEvidenceFolder", + "http":{ + "method":"GET", + "requestUri":"/assessments/{assessmentId}/controlSets/{controlSetId}/evidenceFolders/{evidenceFolderId}" + }, + "input":{"shape":"GetEvidenceFolderRequest"}, + "output":{"shape":"GetEvidenceFolderResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns an evidence folder from the specified assessment in AWS Audit Manager.

    " + }, + "GetEvidenceFoldersByAssessment":{ + "name":"GetEvidenceFoldersByAssessment", + "http":{ + "method":"GET", + "requestUri":"/assessments/{assessmentId}/evidenceFolders" + }, + "input":{"shape":"GetEvidenceFoldersByAssessmentRequest"}, + "output":{"shape":"GetEvidenceFoldersByAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns the evidence folders from a specified assessment in AWS Audit Manager.

    " + }, + "GetEvidenceFoldersByAssessmentControl":{ + "name":"GetEvidenceFoldersByAssessmentControl", + "http":{ + "method":"GET", + "requestUri":"/assessments/{assessmentId}/evidenceFolders-by-assessment-control/{controlSetId}/{controlId}" + }, + "input":{"shape":"GetEvidenceFoldersByAssessmentControlRequest"}, + "output":{"shape":"GetEvidenceFoldersByAssessmentControlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of evidence folders associated with a specified control of an assessment in AWS Audit Manager.

    " + }, + "GetOrganizationAdminAccount":{ + "name":"GetOrganizationAdminAccount", + "http":{ + "method":"GET", + "requestUri":"/account/organizationAdminAccount" + }, + "input":{"shape":"GetOrganizationAdminAccountRequest"}, + "output":{"shape":"GetOrganizationAdminAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns the name of the delegated AWS administrator account for the AWS organization.

    " + }, + "GetServicesInScope":{ + "name":"GetServicesInScope", + "http":{ + "method":"GET", + "requestUri":"/services" + }, + "input":{"shape":"GetServicesInScopeRequest"}, + "output":{"shape":"GetServicesInScopeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of the in-scope AWS services for the specified assessment.

    " + }, + "GetSettings":{ + "name":"GetSettings", + "http":{ + "method":"GET", + "requestUri":"/settings/{attribute}" + }, + "input":{"shape":"GetSettingsRequest"}, + "output":{"shape":"GetSettingsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns the settings for the specified AWS account.

    " + }, + "ListAssessmentFrameworks":{ + "name":"ListAssessmentFrameworks", + "http":{ + "method":"GET", + "requestUri":"/assessmentFrameworks" + }, + "input":{"shape":"ListAssessmentFrameworksRequest"}, + "output":{"shape":"ListAssessmentFrameworksResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of the frameworks available in the AWS Audit Manager framework library.

    " + }, + "ListAssessmentReports":{ + "name":"ListAssessmentReports", + "http":{ + "method":"GET", + "requestUri":"/assessmentReports" + }, + "input":{"shape":"ListAssessmentReportsRequest"}, + "output":{"shape":"ListAssessmentReportsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of assessment reports created in AWS Audit Manager.

    " + }, + "ListAssessments":{ + "name":"ListAssessments", + "http":{ + "method":"GET", + "requestUri":"/assessments" + }, + "input":{"shape":"ListAssessmentsRequest"}, + "output":{"shape":"ListAssessmentsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of current and past assessments from AWS Audit Manager.

    " + }, + "ListControls":{ + "name":"ListControls", + "http":{ + "method":"GET", + "requestUri":"/controls" + }, + "input":{"shape":"ListControlsRequest"}, + "output":{"shape":"ListControlsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of controls from AWS Audit Manager.

    " + }, + "ListKeywordsForDataSource":{ + "name":"ListKeywordsForDataSource", + "http":{ + "method":"GET", + "requestUri":"/dataSourceKeywords" + }, + "input":{"shape":"ListKeywordsForDataSourceRequest"}, + "output":{"shape":"ListKeywordsForDataSourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of keywords that pre-mapped to the specified control data source.

    " + }, + "ListNotifications":{ + "name":"ListNotifications", + "http":{ + "method":"GET", + "requestUri":"/notifications" + }, + "input":{"shape":"ListNotificationsRequest"}, + "output":{"shape":"ListNotificationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of all AWS Audit Manager notifications.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns a list of tags for the specified resource in AWS Audit Manager.

    " + }, + "RegisterAccount":{ + "name":"RegisterAccount", + "http":{ + "method":"POST", + "requestUri":"/account/registerAccount" + }, + "input":{"shape":"RegisterAccountRequest"}, + "output":{"shape":"RegisterAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Enables AWS Audit Manager for the specified AWS account.

    " + }, + "RegisterOrganizationAdminAccount":{ + "name":"RegisterOrganizationAdminAccount", + "http":{ + "method":"POST", + "requestUri":"/account/registerOrganizationAdminAccount" + }, + "input":{"shape":"RegisterOrganizationAdminAccountRequest"}, + "output":{"shape":"RegisterOrganizationAdminAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Enables an AWS account within the organization as the delegated administrator for AWS Audit Manager.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Tags the specified resource in AWS Audit Manager.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes a tag from a resource in AWS Audit Manager.

    " + }, + "UpdateAssessment":{ + "name":"UpdateAssessment", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}" + }, + "input":{"shape":"UpdateAssessmentRequest"}, + "output":{"shape":"UpdateAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Edits an AWS Audit Manager assessment.

    " + }, + "UpdateAssessmentControl":{ + "name":"UpdateAssessmentControl", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}/controlSets/{controlSetId}/controls/{controlId}" + }, + "input":{"shape":"UpdateAssessmentControlRequest"}, + "output":{"shape":"UpdateAssessmentControlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a control within an assessment in AWS Audit Manager.

    " + }, + "UpdateAssessmentControlSetStatus":{ + "name":"UpdateAssessmentControlSetStatus", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}/controlSets/{controlSetId}/status" + }, + "input":{"shape":"UpdateAssessmentControlSetStatusRequest"}, + "output":{"shape":"UpdateAssessmentControlSetStatusResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the status of a control set in an AWS Audit Manager assessment.

    " + }, + "UpdateAssessmentFramework":{ + "name":"UpdateAssessmentFramework", + "http":{ + "method":"PUT", + "requestUri":"/assessmentFrameworks/{frameworkId}" + }, + "input":{"shape":"UpdateAssessmentFrameworkRequest"}, + "output":{"shape":"UpdateAssessmentFrameworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a custom framework in AWS Audit Manager.

    " + }, + "UpdateAssessmentStatus":{ + "name":"UpdateAssessmentStatus", + "http":{ + "method":"PUT", + "requestUri":"/assessments/{assessmentId}/status" + }, + "input":{"shape":"UpdateAssessmentStatusRequest"}, + "output":{"shape":"UpdateAssessmentStatusResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the status of an assessment in AWS Audit Manager.

    " + }, + "UpdateControl":{ + "name":"UpdateControl", + "http":{ + "method":"PUT", + "requestUri":"/controls/{controlId}" + }, + "input":{"shape":"UpdateControlRequest"}, + "output":{"shape":"UpdateControlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a custom control in AWS Audit Manager.

    " + }, + "UpdateSettings":{ + "name":"UpdateSettings", + "http":{ + "method":"PUT", + "requestUri":"/settings" + }, + "input":{"shape":"UpdateSettingsRequest"}, + "output":{"shape":"UpdateSettingsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates AWS Audit Manager settings for the current user account.

    " + }, + "ValidateAssessmentReportIntegrity":{ + "name":"ValidateAssessmentReportIntegrity", + "http":{ + "method":"POST", + "requestUri":"/assessmentReports/integrity" + }, + "input":{"shape":"ValidateAssessmentReportIntegrityRequest"}, + "output":{"shape":"ValidateAssessmentReportIntegrityResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Validates the integrity of an assessment report in AWS Audit Manager.

    " + } + }, + "shapes":{ + "AWSAccount":{ + "type":"structure", + "members":{ + "id":{ + "shape":"AccountId", + "documentation":"

    The identifier for the specified AWS account.

    " + }, + "emailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address associated with the specified AWS account.

    " + }, + "name":{ + "shape":"AccountName", + "documentation":"

    The name of the specified AWS account.

    " + } + }, + "documentation":"

    The wrapper of AWS account details, such as account ID, email address, and so on.

    " + }, + "AWSAccounts":{ + "type":"list", + "member":{"shape":"AWSAccount"} + }, + "AWSService":{ + "type":"structure", + "members":{ + "serviceName":{ + "shape":"AWSServiceName", + "documentation":"

    The name of the AWS service.

    " + } + }, + "documentation":"

    An AWS service such as Amazon S3, AWS CloudTrail, and so on.

    " + }, + "AWSServiceName":{ + "type":"string", + "max":40, + "min":1, + "pattern":"^[a-zA-Z0-9-\\s().]+$" + }, + "AWSServices":{ + "type":"list", + "member":{"shape":"AWSService"} + }, + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^[0-9]{12}$" + }, + "AccountName":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^[\\u0020-\\u007E]+$" + }, + "AccountStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "PENDING_ACTIVATION" + ] + }, + "ActionEnum":{ + "type":"string", + "enum":[ + "CREATE", + "UPDATE_METADATA", + "ACTIVE", + "INACTIVE", + "DELETE", + "UNDER_REVIEW", + "REVIEWED", + "IMPORT_EVIDENCE" + ] + }, + "ActionPlanInstructions":{ + "type":"string", + "max":1000, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "ActionPlanTitle":{ + "type":"string", + "max":300, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "Assessment":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"AuditManagerArn", + "documentation":"

    The Amazon Resource Name (ARN) of the assessment.

    " + }, + "awsAccount":{ + "shape":"AWSAccount", + "documentation":"

    The AWS account associated with the assessment.

    " + }, + "metadata":{ + "shape":"AssessmentMetadata", + "documentation":"

    The metadata for the specified assessment.

    " + }, + "framework":{ + "shape":"AssessmentFramework", + "documentation":"

    The framework from which the assessment was created.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the assessment.

    " + } + }, + "documentation":"

    An entity that defines the scope of audit evidence collected by AWS Audit Manager. An AWS Audit Manager assessment is an implementation of an AWS Audit Manager framework.

    " + }, + "AssessmentControl":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified control.

    " + }, + "name":{ + "shape":"ControlName", + "documentation":"

    The name of the specified control.

    " + }, + "description":{ + "shape":"ControlDescription", + "documentation":"

    The description of the specified control.

    " + }, + "status":{ + "shape":"ControlStatus", + "documentation":"

    The status of the specified control.

    " + }, + "response":{ + "shape":"ControlResponse", + "documentation":"

    The response of the specified control.

    " + }, + "comments":{ + "shape":"ControlComments", + "documentation":"

    The list of comments attached to the specified control.

    " + }, + "evidenceSources":{ + "shape":"EvidenceSources", + "documentation":"

    The list of data sources for the specified evidence.

    " + }, + "evidenceCount":{ + "shape":"Integer", + "documentation":"

    The amount of evidence generated for the control.

    " + }, + "assessmentReportEvidenceCount":{ + "shape":"Integer", + "documentation":"

    The amount of evidence in the assessment report.

    " + } + }, + "documentation":"

    The control entity that represents a standard or custom control used in an AWS Audit Manager assessment.

    " + }, + "AssessmentControlSet":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ControlSetId", + "documentation":"

    The identifier of the control set in the assessment. This is the control set name in a plain string format.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description for the control set.

    " + }, + "status":{ + "shape":"ControlSetStatus", + "documentation":"

    Specifies the current status of the control set.

    " + }, + "roles":{ + "shape":"Roles", + "documentation":"

    The roles associated with the control set.

    " + }, + "controls":{ + "shape":"AssessmentControls", + "documentation":"

    The list of controls contained with the control set.

    " + }, + "delegations":{ + "shape":"Delegations", + "documentation":"

    The delegations associated with the control set.

    " + }, + "systemEvidenceCount":{ + "shape":"Integer", + "documentation":"

    The total number of evidence objects retrieved automatically for the control set.

    " + }, + "manualEvidenceCount":{ + "shape":"Integer", + "documentation":"

    The total number of evidence objects uploaded manually to the control set.

    " + } + }, + "documentation":"

    Represents a set of controls in an AWS Audit Manager assessment.

    " + }, + "AssessmentControlSets":{ + "type":"list", + "member":{"shape":"AssessmentControlSet"} + }, + "AssessmentControls":{ + "type":"list", + "member":{"shape":"AssessmentControl"} + }, + "AssessmentDescription":{ + "type":"string", + "max":1000, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "AssessmentEvidenceFolder":{ + "type":"structure", + "members":{ + "name":{ + "shape":"AssessmentEvidenceFolderName", + "documentation":"

    The name of the specified evidence folder.

    " + }, + "date":{ + "shape":"Timestamp", + "documentation":"

    The date when the first evidence was added to the evidence folder.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    " + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the control set.

    " + }, + "controlId":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the specified control.

    " + }, + "id":{ + "shape":"UUID", + "documentation":"

    The identifier for the folder in which evidence is stored.

    " + }, + "dataSource":{ + "shape":"String", + "documentation":"

    The AWS service from which the evidence was collected.

    " + }, + "author":{ + "shape":"String", + "documentation":"

    The name of the user who created the evidence folder.

    " + }, + "totalEvidence":{ + "shape":"Integer", + "documentation":"

    The total amount of evidence in the evidence folder.

    " + }, + "assessmentReportSelectionCount":{ + "shape":"Integer", + "documentation":"

    The total count of evidence included in the assessment report.

    " + }, + "controlName":{ + "shape":"ControlName", + "documentation":"

    The name of the control.

    " + }, + "evidenceResourcesIncludedCount":{ + "shape":"Integer", + "documentation":"

    The amount of evidence included in the evidence folder.

    " + }, + "evidenceByTypeConfigurationDataCount":{ + "shape":"Integer", + "documentation":"

    The number of evidence that falls under the configuration data category. This evidence is collected from configuration snapshots of other AWS services such as Amazon EC2, Amazon S3, or IAM.

    " + }, + "evidenceByTypeManualCount":{ + "shape":"Integer", + "documentation":"

    The number of evidence that falls under the manual category. This evidence is imported manually.

    " + }, + "evidenceByTypeComplianceCheckCount":{ + "shape":"Integer", + "documentation":"

    The number of evidence that falls under the compliance check category. This evidence is collected from AWS Config or AWS Security Hub.

    " + }, + "evidenceByTypeComplianceCheckIssuesCount":{ + "shape":"Integer", + "documentation":"

    The total number of issues that were reported directly from AWS Security Hub, AWS Config, or both.

    " + }, + "evidenceByTypeUserActivityCount":{ + "shape":"Integer", + "documentation":"

    The number of evidence that falls under the user activity category. This evidence is collected from AWS CloudTrail logs.

    " + }, + "evidenceAwsServiceSourceCount":{ + "shape":"Integer", + "documentation":"

    The total number of AWS resources assessed to generate the evidence.

    " + } + }, + "documentation":"

    The folder in which AWS Audit Manager stores evidence for an assessment.

    " + }, + "AssessmentEvidenceFolderName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "AssessmentEvidenceFolders":{ + "type":"list", + "member":{"shape":"AssessmentEvidenceFolder"} + }, + "AssessmentFramework":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the framework.

    " + }, + "arn":{ + "shape":"AuditManagerArn", + "documentation":"

    The Amazon Resource Name (ARN) of the specified framework.

    " + }, + "metadata":{"shape":"FrameworkMetadata"}, + "controlSets":{ + "shape":"AssessmentControlSets", + "documentation":"

    The control sets associated with the framework.

    " + } + }, + "documentation":"

    The file used to structure and automate AWS Audit Manager assessments for a given compliance standard.

    " + }, + "AssessmentFrameworkDescription":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "AssessmentFrameworkMetadata":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The unique identified for the specified framework.

    " + }, + "type":{ + "shape":"FrameworkType", + "documentation":"

    The framework type, such as standard or custom.

    " + }, + "name":{ + "shape":"FrameworkName", + "documentation":"

    The name of the specified framework.

    " + }, + "description":{ + "shape":"FrameworkDescription", + "documentation":"

    The description of the specified framework.

    " + }, + "logo":{ + "shape":"Filename", + "documentation":"

    The logo associated with the framework.

    " + }, + "complianceType":{ + "shape":"ComplianceType", + "documentation":"

    The compliance type that the new custom framework supports, such as CIS or HIPAA.

    " + }, + "controlsCount":{ + "shape":"ControlsCount", + "documentation":"

    The number of controls associated with the specified framework.

    " + }, + "controlSetsCount":{ + "shape":"ControlSetsCount", + "documentation":"

    The number of control sets associated with the specified framework.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the framework was created.

    " + }, + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the framework was most recently updated.

    " + } + }, + "documentation":"

    The metadata associated with a standard or custom framework.

    " + }, + "AssessmentMetadata":{ + "type":"structure", + "members":{ + "name":{ + "shape":"AssessmentName", + "documentation":"

    The name of the assessment.

    " + }, + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the assessment.

    " + }, + "description":{ + "shape":"AssessmentDescription", + "documentation":"

    The description of the assessment.

    " + }, + "complianceType":{ + "shape":"ComplianceType", + "documentation":"

    The name of a compliance standard related to the assessment, such as PCI-DSS.

    " + }, + "status":{ + "shape":"AssessmentStatus", + "documentation":"

    The overall status of the assessment.

    " + }, + "assessmentReportsDestination":{ + "shape":"AssessmentReportsDestination", + "documentation":"

    The destination in which evidence reports are stored for the specified assessment.

    " + }, + "scope":{ + "shape":"Scope", + "documentation":"

    The wrapper of AWS accounts and services in scope for the assessment.

    " + }, + "roles":{ + "shape":"Roles", + "documentation":"

    The roles associated with the assessment.

    " + }, + "delegations":{ + "shape":"Delegations", + "documentation":"

    The delegations associated with the assessment.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the assessment was created.

    " + }, + "lastUpdated":{ + "shape":"Timestamp", + "documentation":"

    The time of the most recent update.

    " + } + }, + "documentation":"

    The metadata associated with the specified assessment.

    " + }, + "AssessmentMetadataItem":{ + "type":"structure", + "members":{ + "name":{ + "shape":"AssessmentName", + "documentation":"

    The name of the assessment.

    " + }, + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the assessment.

    " + }, + "complianceType":{ + "shape":"ComplianceType", + "documentation":"

    The name of the compliance standard related to the assessment, such as PCI-DSS.

    " + }, + "status":{ + "shape":"AssessmentStatus", + "documentation":"

    The current status of the assessment.

    " + }, + "roles":{ + "shape":"Roles", + "documentation":"

    The roles associated with the assessment.

    " + }, + "delegations":{ + "shape":"Delegations", + "documentation":"

    The delegations associated with the assessment.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the assessment was created.

    " + }, + "lastUpdated":{ + "shape":"Timestamp", + "documentation":"

    The time of the most recent update.

    " + } + }, + "documentation":"

    A metadata object associated with an assessment in AWS Audit Manager.

    " + }, + "AssessmentName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "AssessmentReport":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the specified assessment report.

    " + }, + "name":{ + "shape":"AssessmentReportName", + "documentation":"

    The name given to the assessment report.

    " + }, + "description":{ + "shape":"AssessmentReportDescription", + "documentation":"

    The description of the specified assessment report.

    " + }, + "awsAccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier for the specified AWS account.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    " + }, + "assessmentName":{ + "shape":"AssessmentName", + "documentation":"

    The name of the associated assessment.

    " + }, + "author":{ + "shape":"Username", + "documentation":"

    The name of the user who created the assessment report.

    " + }, + "status":{ + "shape":"AssessmentReportStatus", + "documentation":"

    The current status of the specified assessment report.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the assessment report was created.

    " + } + }, + "documentation":"

    A finalized document generated from an AWS Audit Manager assessment. These reports summarize the relevant evidence collected for your audit, and link to the relevant evidence folders which are named and organized according to the controls specified in your assessment.

    " + }, + "AssessmentReportDescription":{ + "type":"string", + "max":1000, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "AssessmentReportDestinationType":{ + "type":"string", + "enum":["S3"] + }, + "AssessmentReportEvidenceError":{ + "type":"structure", + "members":{ + "evidenceId":{ + "shape":"UUID", + "documentation":"

    The identifier for the evidence.

    " + }, + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

    The error code returned by the AssessmentReportEvidence API.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    The error message returned by the AssessmentReportEvidence API.

    " + } + }, + "documentation":"

    An error entity for the AssessmentReportEvidence API. This is used to provide more meaningful errors than a simple string message.

    " + }, + "AssessmentReportEvidenceErrors":{ + "type":"list", + "member":{"shape":"AssessmentReportEvidenceError"} + }, + "AssessmentReportMetadata":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the assessment report.

    " + }, + "name":{ + "shape":"AssessmentReportName", + "documentation":"

    The name of the assessment report.

    " + }, + "description":{ + "shape":"AssessmentReportDescription", + "documentation":"

    The description of the specified assessment report.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the associated assessment.

    " + }, + "assessmentName":{ + "shape":"AssessmentName", + "documentation":"

    The name of the associated assessment.

    " + }, + "author":{ + "shape":"Username", + "documentation":"

    The name of the user who created the assessment report.

    " + }, + "status":{ + "shape":"AssessmentReportStatus", + "documentation":"

    The current status of the assessment report.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the assessment report was created.

    " + } + }, + "documentation":"

    The metadata objects associated with the specified assessment report.

    " + }, + "AssessmentReportName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[a-zA-Z0-9-_\\.]+$" + }, + "AssessmentReportStatus":{ + "type":"string", + "enum":[ + "COMPLETE", + "IN_PROGRESS", + "FAILED" + ] + }, + "AssessmentReportsDestination":{ + "type":"structure", + "members":{ + "destinationType":{ + "shape":"AssessmentReportDestinationType", + "documentation":"

    The destination type, such as Amazon S3.

    " + }, + "destination":{ + "shape":"S3Url", + "documentation":"

    The destination of the assessment report.

    " + } + }, + "documentation":"

    The location in which AWS Audit Manager saves assessment reports for the given assessment.

    " + }, + "AssessmentReportsMetadata":{ + "type":"list", + "member":{"shape":"AssessmentReportMetadata"} + }, + "AssessmentStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE" + ] + }, + "AssociateAssessmentReportEvidenceFolderRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "evidenceFolderId" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "evidenceFolderId":{ + "shape":"UUID", + "documentation":"

    The identifier for the folder in which evidence is stored.

    " + } + } + }, + "AssociateAssessmentReportEvidenceFolderResponse":{ + "type":"structure", + "members":{ + } + }, + "AuditManagerArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:.*:auditmanager:.*" + }, + "BatchAssociateAssessmentReportEvidenceRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "evidenceFolderId", + "evidenceIds" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "evidenceFolderId":{ + "shape":"UUID", + "documentation":"

    The identifier for the folder in which the evidence is stored.

    " + }, + "evidenceIds":{ + "shape":"EvidenceIds", + "documentation":"

    The list of evidence identifiers.

    " + } + } + }, + "BatchAssociateAssessmentReportEvidenceResponse":{ + "type":"structure", + "members":{ + "evidenceIds":{ + "shape":"EvidenceIds", + "documentation":"

    The identifier for the evidence.

    " + }, + "errors":{ + "shape":"AssessmentReportEvidenceErrors", + "documentation":"

    A list of errors returned by the BatchAssociateAssessmentReportEvidence API.

    " + } + } + }, + "BatchCreateDelegationByAssessmentError":{ + "type":"structure", + "members":{ + "createDelegationRequest":{ + "shape":"CreateDelegationRequest", + "documentation":"

    The API request to batch create delegations in AWS Audit Manager.

    " + }, + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

    The error code returned by the BatchCreateDelegationByAssessment API.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    The error message returned by the BatchCreateDelegationByAssessment API.

    " + } + }, + "documentation":"

    An error entity for the BatchCreateDelegationByAssessment API. This is used to provide more meaningful errors than a simple string message.

    " + }, + "BatchCreateDelegationByAssessmentErrors":{ + "type":"list", + "member":{"shape":"BatchCreateDelegationByAssessmentError"} + }, + "BatchCreateDelegationByAssessmentRequest":{ + "type":"structure", + "required":[ + "createDelegationRequests", + "assessmentId" + ], + "members":{ + "createDelegationRequests":{ + "shape":"CreateDelegationRequests", + "documentation":"

    The API request to batch create delegations in AWS Audit Manager.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + } + } + }, + "BatchCreateDelegationByAssessmentResponse":{ + "type":"structure", + "members":{ + "delegations":{ + "shape":"Delegations", + "documentation":"

    The delegations associated with the assessment.

    " + }, + "errors":{ + "shape":"BatchCreateDelegationByAssessmentErrors", + "documentation":"

    A list of errors returned by the BatchCreateDelegationByAssessment API.

    " + } + } + }, + "BatchDeleteDelegationByAssessmentError":{ + "type":"structure", + "members":{ + "delegationId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified delegation.

    " + }, + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

    The error code returned by the BatchDeleteDelegationByAssessment API.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    The error message returned by the BatchDeleteDelegationByAssessment API.

    " + } + }, + "documentation":"

    An error entity for the BatchDeleteDelegationByAssessment API. This is used to provide more meaningful errors than a simple string message.

    " + }, + "BatchDeleteDelegationByAssessmentErrors":{ + "type":"list", + "member":{"shape":"BatchDeleteDelegationByAssessmentError"} + }, + "BatchDeleteDelegationByAssessmentRequest":{ + "type":"structure", + "required":[ + "delegationIds", + "assessmentId" + ], + "members":{ + "delegationIds":{ + "shape":"DelegationIds", + "documentation":"

    The identifiers for the specified delegations.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + } + } + }, + "BatchDeleteDelegationByAssessmentResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"BatchDeleteDelegationByAssessmentErrors", + "documentation":"

    A list of errors returned by the BatchDeleteDelegationByAssessment API.

    " + } + } + }, + "BatchDisassociateAssessmentReportEvidenceRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "evidenceFolderId", + "evidenceIds" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "evidenceFolderId":{ + "shape":"UUID", + "documentation":"

    The identifier for the folder in which evidence is stored.

    " + }, + "evidenceIds":{ + "shape":"EvidenceIds", + "documentation":"

    The list of evidence identifiers.

    " + } + } + }, + "BatchDisassociateAssessmentReportEvidenceResponse":{ + "type":"structure", + "members":{ + "evidenceIds":{ + "shape":"EvidenceIds", + "documentation":"

    The identifier for the evidence.

    " + }, + "errors":{ + "shape":"AssessmentReportEvidenceErrors", + "documentation":"

    A list of errors returned by the BatchDisassociateAssessmentReportEvidence API.

    " + } + } + }, + "BatchImportEvidenceToAssessmentControlError":{ + "type":"structure", + "members":{ + "manualEvidence":{ + "shape":"ManualEvidence", + "documentation":"

    Manual evidence that cannot be collected automatically by AWS Audit Manager.

    " + }, + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

    The error code returned by the BatchImportEvidenceToAssessmentControl API.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    The error message returned by the BatchImportEvidenceToAssessmentControlError API.

    " + } + }, + "documentation":"

    An error entity for the BatchImportEvidenceToAssessmentControl API. This is used to provide more meaningful errors than a simple string message.

    " + }, + "BatchImportEvidenceToAssessmentControlErrors":{ + "type":"list", + "member":{"shape":"BatchImportEvidenceToAssessmentControlError"} + }, + "BatchImportEvidenceToAssessmentControlRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "controlSetId", + "controlId", + "manualEvidence" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the specified control set.

    ", + "location":"uri", + "locationName":"controlSetId" + }, + "controlId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified control.

    ", + "location":"uri", + "locationName":"controlId" + }, + "manualEvidence":{ + "shape":"ManualEvidenceList", + "documentation":"

    The list of manual evidence objects.

    " + } + } + }, + "BatchImportEvidenceToAssessmentControlResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"BatchImportEvidenceToAssessmentControlErrors", + "documentation":"

    A list of errors returned by the BatchImportEvidenceToAssessmentControl API.

    " + } + } + }, + "Boolean":{"type":"boolean"}, + "ChangeLog":{ + "type":"structure", + "members":{ + "objectType":{ + "shape":"ObjectTypeEnum", + "documentation":"

    The changelog object type, such as an assessment, control, or control set.

    " + }, + "objectName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the changelog object.

    " + }, + "action":{ + "shape":"ActionEnum", + "documentation":"

    The action performed.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time of creation for the changelog object.

    " + }, + "createdBy":{ + "shape":"IamArn", + "documentation":"

    The IAM user or role that performed the action.

    " + } + }, + "documentation":"

    The record of a change within AWS Audit Manager, such as a modified assessment, a delegated control set, and so on.

    " + }, + "ChangeLogs":{ + "type":"list", + "member":{"shape":"ChangeLog"} + }, + "ComplianceType":{ + "type":"string", + "max":100, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "Control":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"AuditManagerArn", + "documentation":"

    The Amazon Resource Name (ARN) of the specified control.

    " + }, + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the control.

    " + }, + "type":{ + "shape":"ControlType", + "documentation":"

    The type of control, such as custom or standard.

    " + }, + "name":{ + "shape":"ControlName", + "documentation":"

    The name of the specified control.

    " + }, + "description":{ + "shape":"ControlDescription", + "documentation":"

    The description of the specified control.

    " + }, + "testingInformation":{ + "shape":"TestingInformation", + "documentation":"

    The steps to follow to determine if the control has been satisfied.

    " + }, + "actionPlanTitle":{ + "shape":"ActionPlanTitle", + "documentation":"

    The title of the action plan for remediating the control.

    " + }, + "actionPlanInstructions":{ + "shape":"ActionPlanInstructions", + "documentation":"

    The recommended actions to carry out if the control is not fulfilled.

    " + }, + "controlSources":{ + "shape":"ControlSources", + "documentation":"

    The data mapping sources for the specified control.

    " + }, + "controlMappingSources":{ + "shape":"ControlMappingSources", + "documentation":"

    The data mapping sources for the specified control.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the control was created.

    " + }, + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the control was most recently updated.

    " + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

    The IAM user or role that created the control.

    " + }, + "lastUpdatedBy":{ + "shape":"LastUpdatedBy", + "documentation":"

    The IAM user or role that most recently updated the control.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the control.

    " + } + }, + "documentation":"

    A control in AWS Audit Manager.

    " + }, + "ControlComment":{ + "type":"structure", + "members":{ + "authorName":{ + "shape":"Username", + "documentation":"

    The name of the user who authored the comment.

    " + }, + "commentBody":{ + "shape":"ControlCommentBody", + "documentation":"

    The body text of a control comment.

    " + }, + "postedDate":{ + "shape":"Timestamp", + "documentation":"

    The time when the comment was posted.

    " + } + }, + "documentation":"

    A comment posted by a user on a control. This includes the author's name, the comment text, and a timestamp.

    " + }, + "ControlCommentBody":{ + "type":"string", + "max":500, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "ControlComments":{ + "type":"list", + "member":{"shape":"ControlComment"} + }, + "ControlDescription":{ + "type":"string", + "max":1000, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "ControlMappingSource":{ + "type":"structure", + "members":{ + "sourceId":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the specified source.

    " + }, + "sourceName":{ + "shape":"SourceName", + "documentation":"

    The name of the specified source.

    " + }, + "sourceDescription":{ + "shape":"SourceDescription", + "documentation":"

    The description of the specified source.

    " + }, + "sourceSetUpOption":{ + "shape":"SourceSetUpOption", + "documentation":"

    The setup option for the data source, which reflects if the evidence collection is automated or manual.

    " + }, + "sourceType":{ + "shape":"SourceType", + "documentation":"

    Specifies one of the five types of data sources for evidence collection.

    " + }, + "sourceKeyword":{"shape":"SourceKeyword"}, + "sourceFrequency":{ + "shape":"SourceFrequency", + "documentation":"

    The frequency of evidence collection for the specified control mapping source.

    " + }, + "troubleshootingText":{ + "shape":"TroubleshootingText", + "documentation":"

    The instructions for troubleshooting the specified control.

    " + } + }, + "documentation":"

    The data source that determines from where AWS Audit Manager collects evidence for the control.

    " + }, + "ControlMappingSources":{ + "type":"list", + "member":{"shape":"ControlMappingSource"}, + "min":1 + }, + "ControlMetadata":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"AuditManagerArn", + "documentation":"

    The Amazon Resource Name (ARN) of the specified control.

    " + }, + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the specified control.

    " + }, + "name":{ + "shape":"ControlName", + "documentation":"

    The name of the specified control.

    " + }, + "controlSources":{ + "shape":"ControlSources", + "documentation":"

    The data source that determines from where AWS Audit Manager collects evidence for the control.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the control was created.

    " + }, + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the control was most recently updated.

    " + } + }, + "documentation":"

    The metadata associated with the specified standard or custom control.

    " + }, + "ControlMetadataList":{ + "type":"list", + "member":{"shape":"ControlMetadata"} + }, + "ControlName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "ControlResponse":{ + "type":"string", + "enum":[ + "MANUAL", + "AUTOMATE", + "DEFER", + "IGNORE" + ] + }, + "ControlSet":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The identifier of the control set in the assessment. This is the control set name in a plain string format.

    " + }, + "name":{ + "shape":"ControlSetName", + "documentation":"

    The name of the control set.

    " + }, + "controls":{ + "shape":"Controls", + "documentation":"

    The list of controls within the control set.

    " + } + }, + "documentation":"

    A set of controls in AWS Audit Manager.

    " + }, + "ControlSetId":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "ControlSetName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[^\\_]*$" + }, + "ControlSetStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "UNDER_REVIEW", + "REVIEWED" + ] + }, + "ControlSets":{ + "type":"list", + "member":{"shape":"ControlSet"}, + "min":1 + }, + "ControlSetsCount":{"type":"integer"}, + "ControlSources":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z_0-9-\\s.,]+$" + }, + "ControlStatus":{ + "type":"string", + "enum":[ + "UNDER_REVIEW", + "REVIEWED", + "INACTIVE" + ] + }, + "ControlType":{ + "type":"string", + "enum":[ + "Standard", + "Custom" + ] + }, + "Controls":{ + "type":"list", + "member":{"shape":"Control"}, + "min":1 + }, + "ControlsCount":{"type":"integer"}, + "CreateAssessmentFrameworkControl":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier of the control.

    " + } + }, + "documentation":"

    Control entity attributes that uniquely identify an existing control to be added to a framework in AWS Audit Manager.

    " + }, + "CreateAssessmentFrameworkControlSet":{ + "type":"structure", + "members":{ + "name":{ + "shape":"ControlSetName", + "documentation":"

    The name of the specified control set.

    " + }, + "controls":{ + "shape":"CreateAssessmentFrameworkControls", + "documentation":"

    The list of controls within the control set. This does not contain the control set ID.

    " + } + }, + "documentation":"

    A controlSet entity that represents a collection of controls in AWS Audit Manager. This does not contain the control set ID.

    " + }, + "CreateAssessmentFrameworkControlSets":{ + "type":"list", + "member":{"shape":"CreateAssessmentFrameworkControlSet"}, + "min":1 + }, + "CreateAssessmentFrameworkControls":{ + "type":"list", + "member":{"shape":"CreateAssessmentFrameworkControl"}, + "min":1 + }, + "CreateAssessmentFrameworkRequest":{ + "type":"structure", + "required":[ + "name", + "controlSets" + ], + "members":{ + "name":{ + "shape":"FrameworkName", + "documentation":"

    The name of the new custom framework.

    " + }, + "description":{ + "shape":"FrameworkDescription", + "documentation":"

    An optional description for the new custom framework.

    " + }, + "complianceType":{ + "shape":"ComplianceType", + "documentation":"

    The compliance type that the new custom framework supports, such as CIS or HIPAA.

    " + }, + "controlSets":{ + "shape":"CreateAssessmentFrameworkControlSets", + "documentation":"

    The control sets to be associated with the framework.

    " + } + } + }, + "CreateAssessmentFrameworkResponse":{ + "type":"structure", + "members":{ + "framework":{ + "shape":"Framework", + "documentation":"

    The name of the new framework returned by the CreateAssessmentFramework API.

    " + } + } + }, + "CreateAssessmentReportRequest":{ + "type":"structure", + "required":[ + "name", + "assessmentId" + ], + "members":{ + "name":{ + "shape":"AssessmentReportName", + "documentation":"

    The name of the new assessment report.

    " + }, + "description":{ + "shape":"AssessmentReportDescription", + "documentation":"

    The description of the assessment report.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + } + } + }, + "CreateAssessmentReportResponse":{ + "type":"structure", + "members":{ + "assessmentReport":{ + "shape":"AssessmentReport", + "documentation":"

    The new assessment report returned by the CreateAssessmentReport API.

    " + } + } + }, + "CreateAssessmentRequest":{ + "type":"structure", + "required":[ + "name", + "assessmentReportsDestination", + "scope", + "roles", + "frameworkId" + ], + "members":{ + "name":{ + "shape":"AssessmentName", + "documentation":"

    The name of the assessment to be created.

    " + }, + "description":{ + "shape":"AssessmentDescription", + "documentation":"

    The optional description of the assessment to be created.

    " + }, + "assessmentReportsDestination":{ + "shape":"AssessmentReportsDestination", + "documentation":"

    The assessment report storage destination for the specified assessment that is being created.

    " + }, + "scope":{"shape":"Scope"}, + "roles":{ + "shape":"Roles", + "documentation":"

    The list of roles for the specified assessment.

    " + }, + "frameworkId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified framework.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the assessment.

    " + } + } + }, + "CreateAssessmentResponse":{ + "type":"structure", + "members":{ + "assessment":{"shape":"Assessment"} + } + }, + "CreateControlMappingSource":{ + "type":"structure", + "members":{ + "sourceName":{ + "shape":"SourceName", + "documentation":"

    The name of the control mapping data source.

    " + }, + "sourceDescription":{ + "shape":"SourceDescription", + "documentation":"

    The description of the data source that determines from where AWS Audit Manager collects evidence for the control.

    " + }, + "sourceSetUpOption":{ + "shape":"SourceSetUpOption", + "documentation":"

    The setup option for the data source, which reflects if the evidence collection is automated or manual.

    " + }, + "sourceType":{ + "shape":"SourceType", + "documentation":"

    Specifies one of the five types of data sources for evidence collection.

    " + }, + "sourceKeyword":{"shape":"SourceKeyword"}, + "sourceFrequency":{ + "shape":"SourceFrequency", + "documentation":"

    The frequency of evidence collection for the specified control mapping source.

    " + }, + "troubleshootingText":{ + "shape":"TroubleshootingText", + "documentation":"

    The instructions for troubleshooting the specified control.

    " + } + }, + "documentation":"

    Control mapping fields that represent the source for evidence collection, along with related parameters and metadata. This does not contain mappingID.

    " + }, + "CreateControlMappingSources":{ + "type":"list", + "member":{"shape":"CreateControlMappingSource"}, + "min":1 + }, + "CreateControlRequest":{ + "type":"structure", + "required":[ + "name", + "controlMappingSources" + ], + "members":{ + "name":{ + "shape":"ControlName", + "documentation":"

    The name of the control.

    " + }, + "description":{ + "shape":"ControlDescription", + "documentation":"

    The description of the control.

    " + }, + "testingInformation":{ + "shape":"TestingInformation", + "documentation":"

    The steps to follow to determine if the control has been satisfied.

    " + }, + "actionPlanTitle":{ + "shape":"ActionPlanTitle", + "documentation":"

    The title of the action plan for remediating the control.

    " + }, + "actionPlanInstructions":{ + "shape":"ActionPlanInstructions", + "documentation":"

    The recommended actions to carry out if the control is not fulfilled.

    " + }, + "controlMappingSources":{ + "shape":"CreateControlMappingSources", + "documentation":"

    The data source that determines from where AWS Audit Manager collects evidence for the control.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the control.

    " + } + } + }, + "CreateControlResponse":{ + "type":"structure", + "members":{ + "control":{ + "shape":"Control", + "documentation":"

    The new control returned by the CreateControl API.

    " + } + } + }, + "CreateDelegationRequest":{ + "type":"structure", + "members":{ + "comment":{ + "shape":"DelegationComment", + "documentation":"

    A comment related to the delegation request.

    " + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The unique identifier for the control set.

    " + }, + "roleArn":{ + "shape":"IamArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role.

    " + }, + "roleType":{ + "shape":"RoleType", + "documentation":"

    The type of customer persona.

    In CreateAssessment, roleType can only be PROCESS_OWNER.

    In UpdateSettings, roleType can only be PROCESS_OWNER.

    In BatchCreateDelegationByAssessment, roleType can only be RESOURCE_OWNER.

    " + } + }, + "documentation":"

    A collection of attributes used to create a delegation for an assessment in AWS Audit Manager.

    " + }, + "CreateDelegationRequests":{ + "type":"list", + "member":{"shape":"CreateDelegationRequest"}, + "max":50, + "min":1 + }, + "CreatedBy":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$" + }, + "Delegation":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the delegation.

    " + }, + "assessmentName":{ + "shape":"AssessmentName", + "documentation":"

    The name of the associated assessment.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the associated assessment.

    " + }, + "status":{ + "shape":"DelegationStatus", + "documentation":"

    The status of the delegation.

    " + }, + "roleArn":{ + "shape":"IamArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role.

    " + }, + "roleType":{ + "shape":"RoleType", + "documentation":"

    The type of customer persona.

    In CreateAssessment, roleType can only be PROCESS_OWNER.

    In UpdateSettings, roleType can only be PROCESS_OWNER.

    In BatchCreateDelegationByAssessment, roleType can only be RESOURCE_OWNER.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the delegation was created.

    " + }, + "lastUpdated":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the delegation was last updated.

    " + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the associated control set.

    " + }, + "comment":{ + "shape":"DelegationComment", + "documentation":"

    The comment related to the delegation.

    " + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

    The IAM user or role that created the delegation.

    " + } + }, + "documentation":"

    The assignment of a control set to a delegate for review.

    " + }, + "DelegationComment":{ + "type":"string", + "max":350, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "DelegationIds":{ + "type":"list", + "member":{"shape":"UUID"}, + "max":50, + "min":1 + }, + "DelegationMetadata":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the delegation.

    " + }, + "assessmentName":{ + "shape":"AssessmentName", + "documentation":"

    The name of the associated assessment.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the specified assessment.

    " + }, + "status":{ + "shape":"DelegationStatus", + "documentation":"

    The current status of the delgation.

    " + }, + "roleArn":{ + "shape":"IamArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the delegation was created.

    " + }, + "controlSetName":{ + "shape":"NonEmptyString", + "documentation":"

    Specifies the name of the control set delegated for review.

    " + } + }, + "documentation":"

    The metadata associated with the specified delegation.

    " + }, + "DelegationMetadataList":{ + "type":"list", + "member":{"shape":"DelegationMetadata"} + }, + "DelegationStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "UNDER_REVIEW", + "COMPLETE" + ] + }, + "Delegations":{ + "type":"list", + "member":{"shape":"Delegation"} + }, + "DeleteAssessmentFrameworkRequest":{ + "type":"structure", + "required":["frameworkId"], + "members":{ + "frameworkId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified framework.

    ", + "location":"uri", + "locationName":"frameworkId" + } + } + }, + "DeleteAssessmentFrameworkResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteAssessmentReportRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "assessmentReportId" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "assessmentReportId":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the assessment report.

    ", + "location":"uri", + "locationName":"assessmentReportId" + } + } + }, + "DeleteAssessmentReportResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteAssessmentRequest":{ + "type":"structure", + "required":["assessmentId"], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + } + } + }, + "DeleteAssessmentResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteControlRequest":{ + "type":"structure", + "required":["controlId"], + "members":{ + "controlId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified control.

    ", + "location":"uri", + "locationName":"controlId" + } + } + }, + "DeleteControlResponse":{ + "type":"structure", + "members":{ + } + }, + "DeregisterAccountRequest":{ + "type":"structure", + "members":{ + } + }, + "DeregisterAccountResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"AccountStatus", + "documentation":"

    The registration status of the account.

    " + } + } + }, + "DeregisterOrganizationAdminAccountRequest":{ + "type":"structure", + "members":{ + "adminAccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier for the specified administrator account.

    " + } + } + }, + "DeregisterOrganizationAdminAccountResponse":{ + "type":"structure", + "members":{ + } + }, + "DisassociateAssessmentReportEvidenceFolderRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "evidenceFolderId" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "evidenceFolderId":{ + "shape":"UUID", + "documentation":"

    The identifier for the folder in which evidence is stored.

    " + } + } + }, + "DisassociateAssessmentReportEvidenceFolderResponse":{ + "type":"structure", + "members":{ + } + }, + "EmailAddress":{ + "type":"string", + "max":320, + "min":1, + "pattern":"^.*@.*$" + }, + "ErrorCode":{ + "type":"string", + "max":3, + "min":3, + "pattern":"[0-9]{3}" + }, + "ErrorMessage":{ + "type":"string", + "max":300, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "EventName":{ + "type":"string", + "max":100, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "Evidence":{ + "type":"structure", + "members":{ + "dataSource":{ + "shape":"String", + "documentation":"

    The data source from which the specified evidence was collected.

    " + }, + "evidenceAwsAccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier for the specified AWS account.

    " + }, + "time":{ + "shape":"Timestamp", + "documentation":"

    The timestamp that represents when the evidence was collected.

    " + }, + "eventSource":{ + "shape":"AWSServiceName", + "documentation":"

    The AWS service from which the evidence is collected.

    " + }, + "eventName":{ + "shape":"EventName", + "documentation":"

    The name of the specified evidence event.

    " + }, + "evidenceByType":{ + "shape":"String", + "documentation":"

    The type of automated evidence.

    " + }, + "resourcesIncluded":{ + "shape":"Resources", + "documentation":"

    The list of resources assessed to generate the evidence.

    " + }, + "attributes":{ + "shape":"EvidenceAttributes", + "documentation":"

    The names and values used by the evidence event, including an attribute name (such as allowUsersToChangePassword) and value (such as true or false).

    " + }, + "iamId":{ + "shape":"IamArn", + "documentation":"

    The unique identifier for the IAM user or role associated with the evidence.

    " + }, + "complianceCheck":{ + "shape":"String", + "documentation":"

    The evaluation status for evidence that falls under the compliance check category. For evidence collected from AWS Security Hub, a Pass or Fail result is shown. For evidence collected from AWS Config, a Compliant or Noncompliant result is shown.

    " + }, + "awsOrganization":{ + "shape":"String", + "documentation":"

    The AWS account from which the evidence is collected, and its AWS organization path.

    " + }, + "awsAccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier for the specified AWS account.

    " + }, + "evidenceFolderId":{ + "shape":"UUID", + "documentation":"

    The identifier for the folder in which the evidence is stored.

    " + }, + "id":{ + "shape":"UUID", + "documentation":"

    The identifier for the evidence.

    " + }, + "assessmentReportSelection":{ + "shape":"String", + "documentation":"

    Specifies whether the evidence is inclded in the assessment report.

    " + } + }, + "documentation":"

    A record that contains the information needed to demonstrate compliance with the requirements specified by a control. Examples of evidence include change activity triggered by a user, or a system configuration snapshot.

    " + }, + "EvidenceAttributeKey":{ + "type":"string", + "max":100, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "EvidenceAttributeValue":{ + "type":"string", + "max":200, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "EvidenceAttributes":{ + "type":"map", + "key":{"shape":"EvidenceAttributeKey"}, + "value":{"shape":"EvidenceAttributeValue"} + }, + "EvidenceIds":{ + "type":"list", + "member":{"shape":"UUID"}, + "max":50, + "min":0 + }, + "EvidenceList":{ + "type":"list", + "member":{"shape":"Evidence"} + }, + "EvidenceSources":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "Filename":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\w,\\s-]+\\.[A-Za-z]+$" + }, + "Framework":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"AuditManagerArn", + "documentation":"

    The Amazon Resource Name (ARN) of the specified framework.

    " + }, + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the specified framework.

    " + }, + "name":{ + "shape":"FrameworkName", + "documentation":"

    The name of the specified framework.

    " + }, + "type":{ + "shape":"FrameworkType", + "documentation":"

    The framework type, such as custom or standard.

    " + }, + "complianceType":{ + "shape":"ComplianceType", + "documentation":"

    The compliance type that the new custom framework supports, such as CIS or HIPAA.

    " + }, + "description":{ + "shape":"FrameworkDescription", + "documentation":"

    The description of the specified framework.

    " + }, + "logo":{ + "shape":"Filename", + "documentation":"

    The logo associated with the framework.

    " + }, + "controlSources":{ + "shape":"ControlSources", + "documentation":"

    The sources from which AWS Audit Manager collects evidence for the control.

    " + }, + "controlSets":{ + "shape":"ControlSets", + "documentation":"

    The control sets associated with the framework.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the framework was created.

    " + }, + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    Specifies when the framework was most recently updated.

    " + }, + "createdBy":{ + "shape":"CreatedBy", + "documentation":"

    The IAM user or role that created the framework.

    " + }, + "lastUpdatedBy":{ + "shape":"LastUpdatedBy", + "documentation":"

    The IAM user or role that most recently updated the framework.

    " + } + }, + "documentation":"

    The file used to structure and automate AWS Audit Manager assessments for a given compliance standard.

    " + }, + "FrameworkDescription":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "FrameworkMetadata":{ + "type":"structure", + "members":{ + "name":{ + "shape":"AssessmentName", + "documentation":"

    The name of the framework.

    " + }, + "description":{ + "shape":"AssessmentFrameworkDescription", + "documentation":"

    The description of the framework.

    " + }, + "logo":{ + "shape":"Filename", + "documentation":"

    The logo associated with the framework.

    " + }, + "complianceType":{ + "shape":"ComplianceType", + "documentation":"

    The compliance standard associated with the framework, such as PCI-DSS or HIPAA.

    " + } + }, + "documentation":"

    The metadata of a framework, such as the name, ID, description, and so on.

    " + }, + "FrameworkMetadataList":{ + "type":"list", + "member":{"shape":"AssessmentFrameworkMetadata"} + }, + "FrameworkName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "FrameworkType":{ + "type":"string", + "enum":[ + "Standard", + "Custom" + ] + }, + "GenericArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:.*" + }, + "GetAccountStatusRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountStatusResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"AccountStatus", + "documentation":"

    The status of the specified AWS account.

    " + } + } + }, + "GetAssessmentFrameworkRequest":{ + "type":"structure", + "required":["frameworkId"], + "members":{ + "frameworkId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified framework.

    ", + "location":"uri", + "locationName":"frameworkId" + } + } + }, + "GetAssessmentFrameworkResponse":{ + "type":"structure", + "members":{ + "framework":{ + "shape":"Framework", + "documentation":"

    The framework returned by the GetAssessmentFramework API.

    " + } + } + }, + "GetAssessmentReportUrlRequest":{ + "type":"structure", + "required":[ + "assessmentReportId", + "assessmentId" + ], + "members":{ + "assessmentReportId":{ + "shape":"UUID", + "documentation":"

    The identifier for the assessment report.

    ", + "location":"uri", + "locationName":"assessmentReportId" + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + } + } + }, + "GetAssessmentReportUrlResponse":{ + "type":"structure", + "members":{ + "preSignedUrl":{"shape":"URL"} + } + }, + "GetAssessmentRequest":{ + "type":"structure", + "required":["assessmentId"], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + } + } + }, + "GetAssessmentResponse":{ + "type":"structure", + "members":{ + "assessment":{"shape":"Assessment"} + } + }, + "GetChangeLogsRequest":{ + "type":"structure", + "required":["assessmentId"], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the specified control set.

    ", + "location":"querystring", + "locationName":"controlSetId" + }, + "controlId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified control.

    ", + "location":"querystring", + "locationName":"controlId" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetChangeLogsResponse":{ + "type":"structure", + "members":{ + "changeLogs":{ + "shape":"ChangeLogs", + "documentation":"

    The list of user activity for the control.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "GetControlRequest":{ + "type":"structure", + "required":["controlId"], + "members":{ + "controlId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified control.

    ", + "location":"uri", + "locationName":"controlId" + } + } + }, + "GetControlResponse":{ + "type":"structure", + "members":{ + "control":{ + "shape":"Control", + "documentation":"

    The name of the control returned by the GetControl API.

    " + } + } + }, + "GetDelegationsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetDelegationsResponse":{ + "type":"structure", + "members":{ + "delegations":{ + "shape":"DelegationMetadataList", + "documentation":"

    The list of delegations returned by the GetDelegations API.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "GetEvidenceByEvidenceFolderRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "controlSetId", + "evidenceFolderId" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the control set.

    ", + "location":"uri", + "locationName":"controlSetId" + }, + "evidenceFolderId":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the folder in which the evidence is stored.

    ", + "location":"uri", + "locationName":"evidenceFolderId" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetEvidenceByEvidenceFolderResponse":{ + "type":"structure", + "members":{ + "evidence":{ + "shape":"EvidenceList", + "documentation":"

    The list of evidence returned by the GetEvidenceByEvidenceFolder API.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "GetEvidenceFolderRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "controlSetId", + "evidenceFolderId" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the specified control set.

    ", + "location":"uri", + "locationName":"controlSetId" + }, + "evidenceFolderId":{ + "shape":"UUID", + "documentation":"

    The identifier for the folder in which the evidence is stored.

    ", + "location":"uri", + "locationName":"evidenceFolderId" + } + } + }, + "GetEvidenceFolderResponse":{ + "type":"structure", + "members":{ + "evidenceFolder":{ + "shape":"AssessmentEvidenceFolder", + "documentation":"

    The folder in which evidence is stored.

    " + } + } + }, + "GetEvidenceFoldersByAssessmentControlRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "controlSetId", + "controlId" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the specified control set.

    ", + "location":"uri", + "locationName":"controlSetId" + }, + "controlId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified control.

    ", + "location":"uri", + "locationName":"controlId" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetEvidenceFoldersByAssessmentControlResponse":{ + "type":"structure", + "members":{ + "evidenceFolders":{ + "shape":"AssessmentEvidenceFolders", + "documentation":"

    The list of evidence folders returned by the GetEvidenceFoldersByAssessmentControl API.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "GetEvidenceFoldersByAssessmentRequest":{ + "type":"structure", + "required":["assessmentId"], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetEvidenceFoldersByAssessmentResponse":{ + "type":"structure", + "members":{ + "evidenceFolders":{ + "shape":"AssessmentEvidenceFolders", + "documentation":"

    The list of evidence folders returned by the GetEvidenceFoldersByAssessment API.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "GetEvidenceRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "controlSetId", + "evidenceFolderId", + "evidenceId" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the specified control set.

    ", + "location":"uri", + "locationName":"controlSetId" + }, + "evidenceFolderId":{ + "shape":"UUID", + "documentation":"

    The identifier for the folder in which the evidence is stored.

    ", + "location":"uri", + "locationName":"evidenceFolderId" + }, + "evidenceId":{ + "shape":"UUID", + "documentation":"

    The identifier for the evidence.

    ", + "location":"uri", + "locationName":"evidenceId" + } + } + }, + "GetEvidenceResponse":{ + "type":"structure", + "members":{ + "evidence":{ + "shape":"Evidence", + "documentation":"

    The evidence returned by the GetEvidenceResponse API.

    " + } + } + }, + "GetOrganizationAdminAccountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetOrganizationAdminAccountResponse":{ + "type":"structure", + "members":{ + "adminAccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier for the specified administrator account.

    " + }, + "organizationId":{ + "shape":"organizationId", + "documentation":"

    The identifier for the specified organization.

    " + } + } + }, + "GetServicesInScopeRequest":{ + "type":"structure", + "members":{ + } + }, + "GetServicesInScopeResponse":{ + "type":"structure", + "members":{ + "serviceMetadata":{ + "shape":"ServiceMetadataList", + "documentation":"

    The metadata associated with the aAWS service.

    " + } + } + }, + "GetSettingsRequest":{ + "type":"structure", + "required":["attribute"], + "members":{ + "attribute":{ + "shape":"SettingAttribute", + "documentation":"

    The list of SettingAttribute enum values.

    ", + "location":"uri", + "locationName":"attribute" + } + } + }, + "GetSettingsResponse":{ + "type":"structure", + "members":{ + "settings":{ + "shape":"Settings", + "documentation":"

    The settings object that holds all supported AWS Audit Manager settings.

    " + } + } + }, + "HyperlinkName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "IamArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:.*:iam:.*" + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    An internal service error occurred during the processing of your request. Try again later.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KeywordInputType":{ + "type":"string", + "enum":["SELECT_FROM_LIST"] + }, + "KeywordValue":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z_0-9-\\s().]+$" + }, + "Keywords":{ + "type":"list", + "member":{"shape":"KeywordValue"} + }, + "KmsKey":{ + "type":"string", + "max":2048, + "min":7, + "pattern":"^arn:.*:kms:.*|DEFAULT" + }, + "LastUpdatedBy":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$" + }, + "ListAssessmentFrameworksRequest":{ + "type":"structure", + "required":["frameworkType"], + "members":{ + "frameworkType":{ + "shape":"FrameworkType", + "documentation":"

    The type of framework, such as standard or custom.

    ", + "location":"querystring", + "locationName":"frameworkType" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssessmentFrameworksResponse":{ + "type":"structure", + "members":{ + "frameworkMetadataList":{ + "shape":"FrameworkMetadataList", + "documentation":"

    The list of metadata objects for the specified framework.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "ListAssessmentMetadata":{ + "type":"list", + "member":{"shape":"AssessmentMetadataItem"} + }, + "ListAssessmentReportsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssessmentReportsResponse":{ + "type":"structure", + "members":{ + "assessmentReports":{ + "shape":"AssessmentReportsMetadata", + "documentation":"

    The list of assessment reports returned by the ListAssessmentReports API.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "ListAssessmentsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssessmentsResponse":{ + "type":"structure", + "members":{ + "assessmentMetadata":{ + "shape":"ListAssessmentMetadata", + "documentation":"

    The metadata associated with the assessment.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "ListControlsRequest":{ + "type":"structure", + "required":["controlType"], + "members":{ + "controlType":{ + "shape":"ControlType", + "documentation":"

    The type of control, such as standard or custom.

    ", + "location":"querystring", + "locationName":"controlType" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListControlsResponse":{ + "type":"structure", + "members":{ + "controlMetadataList":{ + "shape":"ControlMetadataList", + "documentation":"

    The list of control metadata objects returned by the ListControls API.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "ListKeywordsForDataSourceRequest":{ + "type":"structure", + "required":["source"], + "members":{ + "source":{ + "shape":"SourceType", + "documentation":"

    The control mapping data source to which the keywords apply.

    ", + "location":"querystring", + "locationName":"source" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListKeywordsForDataSourceResponse":{ + "type":"structure", + "members":{ + "keywords":{ + "shape":"Keywords", + "documentation":"

    The list of keywords for the specified event mapping source.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "ListNotificationsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Represents the maximum number of results per page, or per API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListNotificationsResponse":{ + "type":"structure", + "members":{ + "notifications":{ + "shape":"Notifications", + "documentation":"

    The returned list of notifications.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    The pagination token used to fetch the next set of results.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"AuditManagerArn", + "documentation":"

    The Amazon Resource Name (ARN) of the specified resource.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The list of tags returned by the ListTagsForResource API.

    " + } + } + }, + "ManualEvidence":{ + "type":"structure", + "members":{ + "s3ResourcePath":{ + "shape":"S3Url", + "documentation":"

    The Amazon S3 URL that points to a manual evidence object.

    " + } + }, + "documentation":"

    Evidence that is uploaded to AWS Audit Manager manually.

    " + }, + "ManualEvidenceList":{ + "type":"list", + "member":{"shape":"ManualEvidence"}, + "max":50, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "documentation":"Max results in the page.", + "max":1000, + "min":1 + }, + "NonEmptyString":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*\\S.*" + }, + "Notification":{ + "type":"structure", + "members":{ + "id":{ + "shape":"TimestampUUID", + "documentation":"

    The unique identifier for the notification.

    " + }, + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    " + }, + "assessmentName":{ + "shape":"AssessmentName", + "documentation":"

    The name of the related assessment.

    " + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the specified control set.

    " + }, + "controlSetName":{ + "shape":"NonEmptyString", + "documentation":"

    Specifies the name of the control set that the notification is about.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the notification.

    " + }, + "eventTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the notification was sent.

    " + }, + "source":{ + "shape":"NonEmptyString", + "documentation":"

    The sender of the notification.

    " + } + }, + "documentation":"

    The notification used to inform a user of an update in AWS Audit Manager. For example, this includes the notification that is sent when a control set is delegated for review.

    " + }, + "Notifications":{ + "type":"list", + "member":{"shape":"Notification"} + }, + "ObjectTypeEnum":{ + "type":"string", + "enum":[ + "ASSESSMENT", + "CONTROL_SET", + "CONTROL", + "DELEGATION", + "ASSESSMENT_REPORT" + ] + }, + "RegisterAccountRequest":{ + "type":"structure", + "members":{ + "kmsKey":{ + "shape":"KmsKey", + "documentation":"

    The AWS KMS key details.

    " + }, + "delegatedAdminAccount":{ + "shape":"AccountId", + "documentation":"

    The delegated administrator account for AWS Audit Manager.

    " + } + } + }, + "RegisterAccountResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"AccountStatus", + "documentation":"

    The status of the account registration request.

    " + } + } + }, + "RegisterOrganizationAdminAccountRequest":{ + "type":"structure", + "required":["adminAccountId"], + "members":{ + "adminAccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier for the specified delegated administrator account.

    " + } + } + }, + "RegisterOrganizationAdminAccountResponse":{ + "type":"structure", + "members":{ + "adminAccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier for the specified delegated administrator account.

    " + }, + "organizationId":{ + "shape":"organizationId", + "documentation":"

    The identifier for the specified AWS organization.

    " + } + } + }, + "Resource":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"GenericArn", + "documentation":"

    The Amazon Resource Name (ARN) for the specified resource.

    " + }, + "value":{ + "shape":"String", + "documentation":"

    The value of the specified resource.

    " + } + }, + "documentation":"

    A system asset that is evaluated in an AWS Audit Manager assessment.

    " + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The unique identifier for the specified resource.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of resource affected by the error.

    " + } + }, + "documentation":"

    The resource specified in the request cannot be found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "Resources":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "Role":{ + "type":"structure", + "members":{ + "roleType":{ + "shape":"RoleType", + "documentation":"

    The type of customer persona.

    In CreateAssessment, roleType can only be PROCESS_OWNER.

    In UpdateSettings, roleType can only be PROCESS_OWNER.

    In BatchCreateDelegationByAssessment, roleType can only be RESOURCE_OWNER.

    " + }, + "roleArn":{ + "shape":"IamArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role.

    " + } + }, + "documentation":"

    The wrapper that contains AWS Audit Manager role information, such as the role type and IAM Amazon Resource Name (ARN).

    " + }, + "RoleType":{ + "type":"string", + "enum":[ + "PROCESS_OWNER", + "RESOURCE_OWNER" + ] + }, + "Roles":{ + "type":"list", + "member":{"shape":"Role"} + }, + "S3Url":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^(S|s)3:\\/\\/[a-zA-Z0-9-_\\/.]+$" + }, + "SNSTopic":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9-_\\(\\)\\[\\]]+$" + }, + "Scope":{ + "type":"structure", + "members":{ + "awsAccounts":{ + "shape":"AWSAccounts", + "documentation":"

    The AWS accounts included in the scope of the assessment.

    " + }, + "awsServices":{ + "shape":"AWSServices", + "documentation":"

    The AWS services included in the scope of the assessment.

    " + } + }, + "documentation":"

    The wrapper that contains the AWS accounts and AWS services in scope for the assessment.

    " + }, + "ServiceMetadata":{ + "type":"structure", + "members":{ + "name":{ + "shape":"AWSServiceName", + "documentation":"

    The name of the AWS service.

    " + }, + "displayName":{ + "shape":"NonEmptyString", + "documentation":"

    The display name of the AWS service.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the specified AWS service.

    " + }, + "category":{ + "shape":"NonEmptyString", + "documentation":"

    The category in which the AWS service belongs, such as compute, storage, database, and so on.

    " + } + }, + "documentation":"

    The metadata associated with the specified AWS service.

    " + }, + "ServiceMetadataList":{ + "type":"list", + "member":{"shape":"ServiceMetadata"} + }, + "SettingAttribute":{ + "type":"string", + "enum":[ + "ALL", + "IS_AWS_ORG_ENABLED", + "SNS_TOPIC", + "DEFAULT_ASSESSMENT_REPORTS_DESTINATION", + "DEFAULT_PROCESS_OWNERS" + ] + }, + "Settings":{ + "type":"structure", + "members":{ + "isAwsOrgEnabled":{ + "shape":"Boolean", + "documentation":"

    Specifies whether AWS Organizations is enabled.

    " + }, + "snsTopic":{ + "shape":"SNSTopic", + "documentation":"

    The designated Amazon Simple Notification Service (Amazon SNS) topic.

    " + }, + "defaultAssessmentReportsDestination":{ + "shape":"AssessmentReportsDestination", + "documentation":"

    The default storage destination for assessment reports.

    " + }, + "defaultProcessOwners":{ + "shape":"Roles", + "documentation":"

    The designated default audit owners.

    " + }, + "kmsKey":{ + "shape":"KmsKey", + "documentation":"

    The AWS KMS key details.

    " + } + }, + "documentation":"

    The settings object that holds all supported AWS Audit Manager settings.

    " + }, + "SnsArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:.*:sns:.*" + }, + "SourceDescription":{ + "type":"string", + "max":1000, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "SourceFrequency":{ + "type":"string", + "enum":[ + "DAILY", + "WEEKLY", + "MONTHLY" + ] + }, + "SourceKeyword":{ + "type":"structure", + "members":{ + "keywordInputType":{ + "shape":"KeywordInputType", + "documentation":"

    The method of input for the specified keyword.

    " + }, + "keywordValue":{ + "shape":"KeywordValue", + "documentation":"

    The value of the keyword used to search AWS CloudTrail logs when mapping a control data source.

    " + } + }, + "documentation":"

    The keyword to search for in AWS CloudTrail logs.

    " + }, + "SourceName":{ + "type":"string", + "max":100, + "min":1 + }, + "SourceSetUpOption":{ + "type":"string", + "enum":[ + "System_Controls_Mapping", + "Procedural_Controls_Mapping" + ] + }, + "SourceType":{ + "type":"string", + "enum":[ + "AWS_Cloudtrail", + "AWS_Config", + "AWS_Security_Hub", + "AWS_API_Call", + "MANUAL" + ] + }, + "String":{ + "type":"string", + "max":2048, + "min":0, + "pattern":".*" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"AuditManagerArn", + "documentation":"

    The Amazon Resource Name (ARN) of the specified resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags to be associated with the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":".{0,255}" + }, + "TestingInformation":{ + "type":"string", + "max":1000, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "Timestamp":{"type":"timestamp"}, + "TimestampUUID":{ + "type":"string", + "max":50, + "min":47, + "pattern":"^[0-9]{10,13}_[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "Token":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[A-Za-z0-9+\\/=]*$" + }, + "TroubleshootingText":{ + "type":"string", + "max":1000, + "pattern":"^[\\w\\W\\s\\S]*$" + }, + "URL":{ + "type":"structure", + "members":{ + "hyperlinkName":{ + "shape":"HyperlinkName", + "documentation":"

    The name or word used as a hyperlink to the URL.

    " + }, + "link":{ + "shape":"UrlLink", + "documentation":"

    The unique identifier for the internet resource.

    " + } + }, + "documentation":"

    A uniform resource locator, used as a unique identifier to locate a resource on the internet.

    " + }, + "UUID":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"AuditManagerArn", + "documentation":"

    The Amazon Resource Name (ARN) of the specified resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The name or key of the tag.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAssessmentControlRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "controlSetId", + "controlId" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "controlSetId":{ + "shape":"ControlSetId", + "documentation":"

    The identifier for the specified control set.

    ", + "location":"uri", + "locationName":"controlSetId" + }, + "controlId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified control.

    ", + "location":"uri", + "locationName":"controlId" + }, + "controlStatus":{ + "shape":"ControlStatus", + "documentation":"

    The status of the specified control.

    " + }, + "commentBody":{ + "shape":"ControlCommentBody", + "documentation":"

    The comment body text for the specified control.

    " + } + } + }, + "UpdateAssessmentControlResponse":{ + "type":"structure", + "members":{ + "control":{ + "shape":"AssessmentControl", + "documentation":"

    The name of the updated control set returned by the UpdateAssessmentControl API.

    " + } + } + }, + "UpdateAssessmentControlSetStatusRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "controlSetId", + "status", + "comment" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "controlSetId":{ + "shape":"String", + "documentation":"

    The identifier for the specified control set.

    ", + "location":"uri", + "locationName":"controlSetId" + }, + "status":{ + "shape":"ControlSetStatus", + "documentation":"

    The status of the control set that is being updated.

    " + }, + "comment":{ + "shape":"DelegationComment", + "documentation":"

    The comment related to the status update.

    " + } + } + }, + "UpdateAssessmentControlSetStatusResponse":{ + "type":"structure", + "members":{ + "controlSet":{ + "shape":"AssessmentControlSet", + "documentation":"

    The name of the updated control set returned by the UpdateAssessmentControlSetStatus API.

    " + } + } + }, + "UpdateAssessmentFrameworkControlSet":{ + "type":"structure", + "members":{ + "id":{ + "shape":"UUID", + "documentation":"

    The unique identifier for the control set.

    " + }, + "name":{ + "shape":"ControlSetName", + "documentation":"

    The name of the control set.

    " + }, + "controls":{ + "shape":"CreateAssessmentFrameworkControls", + "documentation":"

    The list of controls contained within the control set.

    " + } + }, + "documentation":"

    A controlSet entity that represents a collection of controls in AWS Audit Manager. This does not contain the control set ID.

    " + }, + "UpdateAssessmentFrameworkControlSets":{ + "type":"list", + "member":{"shape":"UpdateAssessmentFrameworkControlSet"} + }, + "UpdateAssessmentFrameworkRequest":{ + "type":"structure", + "required":[ + "frameworkId", + "name", + "controlSets" + ], + "members":{ + "frameworkId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified framework.

    ", + "location":"uri", + "locationName":"frameworkId" + }, + "name":{ + "shape":"FrameworkName", + "documentation":"

    The name of the framework to be updated.

    " + }, + "description":{ + "shape":"FrameworkDescription", + "documentation":"

    The description of the framework that is to be updated.

    " + }, + "complianceType":{ + "shape":"ComplianceType", + "documentation":"

    The compliance type that the new custom framework supports, such as CIS or HIPAA.

    " + }, + "controlSets":{ + "shape":"UpdateAssessmentFrameworkControlSets", + "documentation":"

    The control sets associated with the framework.

    " + } + } + }, + "UpdateAssessmentFrameworkResponse":{ + "type":"structure", + "members":{ + "framework":{ + "shape":"Framework", + "documentation":"

    The name of the specified framework.

    " + } + } + }, + "UpdateAssessmentRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "scope" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "assessmentName":{ + "shape":"AssessmentName", + "documentation":"

    The name of the specified assessment to be updated.

    " + }, + "assessmentDescription":{ + "shape":"AssessmentDescription", + "documentation":"

    The description of the specified assessment.

    " + }, + "scope":{ + "shape":"Scope", + "documentation":"

    The scope of the specified assessment.

    " + }, + "assessmentReportsDestination":{ + "shape":"AssessmentReportsDestination", + "documentation":"

    The assessment report storage destination for the specified assessment that is being updated.

    " + }, + "roles":{ + "shape":"Roles", + "documentation":"

    The list of roles for the specified assessment.

    " + } + } + }, + "UpdateAssessmentResponse":{ + "type":"structure", + "members":{ + "assessment":{ + "shape":"Assessment", + "documentation":"

    The response object (name of the updated assessment) for the UpdateAssessmentRequest API.

    " + } + } + }, + "UpdateAssessmentStatusRequest":{ + "type":"structure", + "required":[ + "assessmentId", + "status" + ], + "members":{ + "assessmentId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified assessment.

    ", + "location":"uri", + "locationName":"assessmentId" + }, + "status":{ + "shape":"AssessmentStatus", + "documentation":"

    The current status of the specified assessment.

    " + } + } + }, + "UpdateAssessmentStatusResponse":{ + "type":"structure", + "members":{ + "assessment":{ + "shape":"Assessment", + "documentation":"

    The name of the updated assessment returned by the UpdateAssessmentStatus API.

    " + } + } + }, + "UpdateControlRequest":{ + "type":"structure", + "required":[ + "controlId", + "name", + "controlMappingSources" + ], + "members":{ + "controlId":{ + "shape":"UUID", + "documentation":"

    The identifier for the specified control.

    ", + "location":"uri", + "locationName":"controlId" + }, + "name":{ + "shape":"ControlName", + "documentation":"

    The name of the control to be updated.

    " + }, + "description":{ + "shape":"ControlDescription", + "documentation":"

    The optional description of the control.

    " + }, + "testingInformation":{ + "shape":"TestingInformation", + "documentation":"

    The steps that to follow to determine if the control has been satisfied.

    " + }, + "actionPlanTitle":{ + "shape":"ActionPlanTitle", + "documentation":"

    The title of the action plan for remediating the control.

    " + }, + "actionPlanInstructions":{ + "shape":"ActionPlanInstructions", + "documentation":"

    The recommended actions to carry out if the control is not fulfilled.

    " + }, + "controlMappingSources":{ + "shape":"ControlMappingSources", + "documentation":"

    The data source that determines from where AWS Audit Manager collects evidence for the control.

    " + } + } + }, + "UpdateControlResponse":{ + "type":"structure", + "members":{ + "control":{ + "shape":"Control", + "documentation":"

    The name of the updated control set returned by the UpdateControl API.

    " + } + } + }, + "UpdateSettingsRequest":{ + "type":"structure", + "members":{ + "snsTopic":{ + "shape":"SnsArn", + "documentation":"

    The Amazon Simple Notification Service (Amazon SNS) topic to which AWS Audit Manager sends notifications.

    " + }, + "defaultAssessmentReportsDestination":{ + "shape":"AssessmentReportsDestination", + "documentation":"

    The default storage destination for assessment reports.

    " + }, + "defaultProcessOwners":{ + "shape":"Roles", + "documentation":"

    A list of the default audit owners.

    " + }, + "kmsKey":{ + "shape":"KmsKey", + "documentation":"

    The AWS KMS key details.

    " + } + } + }, + "UpdateSettingsResponse":{ + "type":"structure", + "members":{ + "settings":{ + "shape":"Settings", + "documentation":"

    The current list of settings.

    " + } + } + }, + "UrlLink":{ + "type":"string", + "max":8192, + "min":1, + "pattern":"^(https?:\\/\\/)?(www\\.)?[a-zA-Z0-9-_]+([\\.]+[a-zA-Z]+)+[\\/\\w]*$" + }, + "Username":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-_()\\s\\+=,.@]+$" + }, + "ValidateAssessmentReportIntegrityRequest":{ + "type":"structure", + "required":["s3RelativePath"], + "members":{ + "s3RelativePath":{ + "shape":"S3Url", + "documentation":"

    The relative path of the specified Amazon S3 bucket in which the assessment report is stored.

    " + } + } + }, + "ValidateAssessmentReportIntegrityResponse":{ + "type":"structure", + "members":{ + "signatureValid":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the signature key is valid.

    " + }, + "signatureAlgorithm":{ + "shape":"String", + "documentation":"

    The signature algorithm used to code sign the assessment report file.

    " + }, + "signatureDateTime":{ + "shape":"String", + "documentation":"

    The date and time signature that specifies when the assessment report was created.

    " + }, + "signatureKeyId":{ + "shape":"String", + "documentation":"

    The unique identifier for the validation signature key.

    " + }, + "validationErrors":{ + "shape":"ValidationErrors", + "documentation":"

    Represents any errors that occurred when validating the assessment report.

    " + } + } + }, + "ValidationErrors":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason the request failed validation.

    " + }, + "fields":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The fields that caused the error, if applicable.

    " + } + }, + "documentation":"

    The request has invalid or missing parameters.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of the validation error.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    The body of the error message.

    " + } + }, + "documentation":"

    Indicates that the request has invalid or missing parameters for the specified field.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + }, + "organizationId":{ + "type":"string", + "max":34, + "min":12, + "pattern":"o-[a-z0-9]{10,32}" + } + }, + "documentation":"

    Welcome to the AWS Audit Manager API reference. This guide is for developers who need detailed information about the AWS Audit Manager API operations, data types, and errors.

    AWS Audit Manager is a service that provides automated evidence collection so that you can continuously audit your AWS usage, and assess the effectiveness of your controls to better manage risk and simplify compliance.

    AWS Audit Manager provides pre-built frameworks that structure and automate assessments for a given compliance standard. Frameworks include a pre-built collection of controls with descriptions and testing procedures, which are grouped according to the requirements of the specified compliance standard or regulation. You can also customize frameworks and controls to support internal audits with unique requirements.

    Use the following links to get started with the AWS Audit Manager API:

    • Actions: An alphabetical list of all AWS Audit Manager API operations.

    • Data types: An alphabetical list of all AWS Audit Manager data types.

    • Common parameters: Parameters that all Query operations can use.

    • Common errors: Client and server errors that all operations can return.

    If you're new to AWS Audit Manager, we recommend that you review the AWS Audit Manager User Guide.

    " +} diff --git a/services/autoscaling/build.properties b/services/autoscaling/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/autoscaling/build.properties +++ b/services/autoscaling/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index cb7cc95e0f59..57b1f0159f6d 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + braket + AWS Java SDK :: Services :: Braket + The AWS Java SDK for Braket module holds the client classes that are used for + communicating with Braket. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.braket + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/braket/src/main/resources/codegen-resources/paginators-1.json b/services/braket/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..f4dd4a1ee674 --- /dev/null +++ b/services/braket/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,16 @@ +{ + "pagination": { + "SearchDevices": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "devices" + }, + "SearchQuantumTasks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "quantumTasks" + } + } +} diff --git a/services/braket/src/main/resources/codegen-resources/service-2.json b/services/braket/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..4f5b0c78b3af --- /dev/null +++ b/services/braket/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,900 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-09-01", + "endpointPrefix":"braket", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Braket", + "serviceId":"Braket", + "signatureVersion":"v4", + "signingName":"braket", + "uid":"braket-2019-09-01" + }, + "operations":{ + "CancelQuantumTask":{ + "name":"CancelQuantumTask", + "http":{ + "method":"PUT", + "requestUri":"/quantum-task/{quantumTaskArn}/cancel", + "responseCode":200 + }, + "input":{"shape":"CancelQuantumTaskRequest"}, + "output":{"shape":"CancelQuantumTaskResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Cancels the specified task.

    ", + "idempotent":true + }, + "CreateQuantumTask":{ + "name":"CreateQuantumTask", + "http":{ + "method":"POST", + "requestUri":"/quantum-task", + "responseCode":201 + }, + "input":{"shape":"CreateQuantumTaskRequest"}, + "output":{"shape":"CreateQuantumTaskResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"DeviceOfflineException"}, + {"shape":"InternalServiceException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a quantum task.

    " + }, + "GetDevice":{ + "name":"GetDevice", + "http":{ + "method":"GET", + "requestUri":"/device/{deviceArn}", + "responseCode":200 + }, + "input":{"shape":"GetDeviceRequest"}, + "output":{"shape":"GetDeviceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Retrieves the devices available in Amazon Braket.

    " + }, + "GetQuantumTask":{ + "name":"GetQuantumTask", + "http":{ + "method":"GET", + "requestUri":"/quantum-task/{quantumTaskArn}", + "responseCode":200 + }, + "input":{"shape":"GetQuantumTaskRequest"}, + "output":{"shape":"GetQuantumTaskResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Retrieves the specified quantum task.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Shows the tags associated with this resource.

    " + }, + "SearchDevices":{ + "name":"SearchDevices", + "http":{ + "method":"POST", + "requestUri":"/devices", + "responseCode":200 + }, + "input":{"shape":"SearchDevicesRequest"}, + "output":{"shape":"SearchDevicesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Searches for devices using the specified filters.

    " + }, + "SearchQuantumTasks":{ + "name":"SearchQuantumTasks", + "http":{ + "method":"POST", + "requestUri":"/quantum-tasks", + "responseCode":200 + }, + "input":{"shape":"SearchQuantumTasksRequest"}, + "output":{"shape":"SearchQuantumTasksResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Searches for tasks that match the specified filter values.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Add a tag to the specified resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Remove tags from a resource.

    ", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "CancelQuantumTaskRequest":{ + "type":"structure", + "required":[ + "clientToken", + "quantumTaskArn" + ], + "members":{ + "clientToken":{ + "shape":"String64", + "documentation":"

    The client token associated with the request.

    ", + "idempotencyToken":true + }, + "quantumTaskArn":{ + "shape":"QuantumTaskArn", + "documentation":"

    The ARN of the task to cancel.

    ", + "location":"uri", + "locationName":"quantumTaskArn" + } + } + }, + "CancelQuantumTaskResponse":{ + "type":"structure", + "required":[ + "cancellationStatus", + "quantumTaskArn" + ], + "members":{ + "cancellationStatus":{ + "shape":"CancellationStatus", + "documentation":"

    The status of the cancellation request.

    " + }, + "quantumTaskArn":{ + "shape":"QuantumTaskArn", + "documentation":"

    The ARN of the task.

    " + } + } + }, + "CancellationStatus":{ + "type":"string", + "enum":[ + "CANCELLING", + "CANCELLED" + ] + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    An error occurred due to a conflict.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateQuantumTaskRequest":{ + "type":"structure", + "required":[ + "action", + "clientToken", + "deviceArn", + "outputS3Bucket", + "outputS3KeyPrefix", + "shots" + ], + "members":{ + "action":{ + "shape":"JsonValue", + "documentation":"

    The action associated with the task.

    ", + "jsonvalue":true + }, + "clientToken":{ + "shape":"String64", + "documentation":"

    The client token associated with the request.

    ", + "idempotencyToken":true + }, + "deviceArn":{ + "shape":"DeviceArn", + "documentation":"

    The ARN of the device to run the task on.

    " + }, + "deviceParameters":{ + "shape":"CreateQuantumTaskRequestDeviceParametersString", + "documentation":"

    The parameters for the device to run the task on.

    ", + "jsonvalue":true + }, + "outputS3Bucket":{ + "shape":"CreateQuantumTaskRequestOutputS3BucketString", + "documentation":"

    The S3 bucket to store task result files in.

    " + }, + "outputS3KeyPrefix":{ + "shape":"CreateQuantumTaskRequestOutputS3KeyPrefixString", + "documentation":"

    The key prefix for the location in the S3 bucket to store task results in.

    " + }, + "shots":{ + "shape":"CreateQuantumTaskRequestShotsLong", + "documentation":"

    The number of shots to use for the task.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    Tags to be added to the quantum task you're creating.

    " + } + } + }, + "CreateQuantumTaskRequestDeviceParametersString":{ + "type":"string", + "max":2048, + "min":1 + }, + "CreateQuantumTaskRequestOutputS3BucketString":{ + "type":"string", + "max":63, + "min":3 + }, + "CreateQuantumTaskRequestOutputS3KeyPrefixString":{ + "type":"string", + "max":1024, + "min":1 + }, + "CreateQuantumTaskRequestShotsLong":{ + "type":"long", + "box":true, + "min":0 + }, + "CreateQuantumTaskResponse":{ + "type":"structure", + "required":["quantumTaskArn"], + "members":{ + "quantumTaskArn":{ + "shape":"QuantumTaskArn", + "documentation":"

    The ARN of the task created by the request.

    " + } + } + }, + "DeviceArn":{ + "type":"string", + "max":256, + "min":1 + }, + "DeviceOfflineException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The specified device is currently offline.

    ", + "error":{ + "httpStatusCode":424, + "senderFault":true + }, + "exception":true + }, + "DeviceStatus":{ + "type":"string", + "enum":[ + "ONLINE", + "OFFLINE" + ] + }, + "DeviceSummary":{ + "type":"structure", + "required":[ + "deviceArn", + "deviceName", + "deviceStatus", + "deviceType", + "providerName" + ], + "members":{ + "deviceArn":{ + "shape":"DeviceArn", + "documentation":"

    The ARN of the device.

    " + }, + "deviceName":{ + "shape":"String", + "documentation":"

    The name of the device.

    " + }, + "deviceStatus":{ + "shape":"DeviceStatus", + "documentation":"

    The status of the device.

    " + }, + "deviceType":{ + "shape":"DeviceType", + "documentation":"

    The type of the device.

    " + }, + "providerName":{ + "shape":"String", + "documentation":"

    The provider of the device.

    " + } + }, + "documentation":"

    Includes information about the device.

    " + }, + "DeviceSummaryList":{ + "type":"list", + "member":{"shape":"DeviceSummary"} + }, + "DeviceType":{ + "type":"string", + "enum":[ + "QPU", + "SIMULATOR" + ] + }, + "GetDeviceRequest":{ + "type":"structure", + "required":["deviceArn"], + "members":{ + "deviceArn":{ + "shape":"DeviceArn", + "documentation":"

    The ARN of the device to retrieve.

    ", + "location":"uri", + "locationName":"deviceArn" + } + } + }, + "GetDeviceResponse":{ + "type":"structure", + "required":[ + "deviceArn", + "deviceCapabilities", + "deviceName", + "deviceStatus", + "deviceType", + "providerName" + ], + "members":{ + "deviceArn":{ + "shape":"DeviceArn", + "documentation":"

    The ARN of the device.

    " + }, + "deviceCapabilities":{ + "shape":"JsonValue", + "documentation":"

    Details about the capabilities of the device.

    ", + "jsonvalue":true + }, + "deviceName":{ + "shape":"String", + "documentation":"

    The name of the device.

    " + }, + "deviceStatus":{ + "shape":"DeviceStatus", + "documentation":"

    The status of the device.

    " + }, + "deviceType":{ + "shape":"DeviceType", + "documentation":"

    The type of the device.

    " + }, + "providerName":{ + "shape":"String", + "documentation":"

    The name of the partner company for the device.

    " + } + } + }, + "GetQuantumTaskRequest":{ + "type":"structure", + "required":["quantumTaskArn"], + "members":{ + "quantumTaskArn":{ + "shape":"QuantumTaskArn", + "documentation":"

    the ARN of the task to retrieve.

    ", + "location":"uri", + "locationName":"quantumTaskArn" + } + } + }, + "GetQuantumTaskResponse":{ + "type":"structure", + "required":[ + "createdAt", + "deviceArn", + "deviceParameters", + "outputS3Bucket", + "outputS3Directory", + "quantumTaskArn", + "shots", + "status" + ], + "members":{ + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The time at which the task was created.

    " + }, + "deviceArn":{ + "shape":"DeviceArn", + "documentation":"

    The ARN of the device the task was run on.

    " + }, + "deviceParameters":{ + "shape":"JsonValue", + "documentation":"

    The parameters for the device on which the task ran.

    ", + "jsonvalue":true + }, + "endedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The time at which the task ended.

    " + }, + "failureReason":{ + "shape":"String", + "documentation":"

    The reason that a task failed.

    " + }, + "outputS3Bucket":{ + "shape":"String", + "documentation":"

    The S3 bucket where task results are stored.

    " + }, + "outputS3Directory":{ + "shape":"String", + "documentation":"

    The folder in the S3 bucket where task results are stored.

    " + }, + "quantumTaskArn":{ + "shape":"QuantumTaskArn", + "documentation":"

    The ARN of the task.

    " + }, + "shots":{ + "shape":"Long", + "documentation":"

    The number of shots used in the task.

    " + }, + "status":{ + "shape":"QuantumTaskStatus", + "documentation":"

    The status of the task.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    The tags that belong to this task.

    " + } + } + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The request processing has failed because of an unknown error, exception, or failure.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "JsonValue":{"type":"string"}, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    Specify the resourceArn for the resource whose tags to display.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagsMap", + "documentation":"

    Displays the key, value pairs of tags associated with this resource.

    " + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "QuantumTaskArn":{ + "type":"string", + "max":256, + "min":1 + }, + "QuantumTaskStatus":{ + "type":"string", + "enum":[ + "CREATED", + "QUEUED", + "RUNNING", + "COMPLETED", + "FAILED", + "CANCELLING", + "CANCELLED" + ] + }, + "QuantumTaskSummary":{ + "type":"structure", + "required":[ + "createdAt", + "deviceArn", + "outputS3Bucket", + "outputS3Directory", + "quantumTaskArn", + "shots", + "status" + ], + "members":{ + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The time at which the task was created.

    " + }, + "deviceArn":{ + "shape":"DeviceArn", + "documentation":"

    The ARN of the device the task ran on.

    " + }, + "endedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The time at which the task finished.

    " + }, + "outputS3Bucket":{ + "shape":"String", + "documentation":"

    The S3 bucket where the task result file is stored..

    " + }, + "outputS3Directory":{ + "shape":"String", + "documentation":"

    The folder in the S3 bucket where the task result file is stored.

    " + }, + "quantumTaskArn":{ + "shape":"QuantumTaskArn", + "documentation":"

    The ARN of the task.

    " + }, + "shots":{ + "shape":"Long", + "documentation":"

    The shots used for the task.

    " + }, + "status":{ + "shape":"QuantumTaskStatus", + "documentation":"

    The status of the task.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    Displays the key, value pairs of tags associated with this quantum task.

    " + } + }, + "documentation":"

    Includes information about a quantum task.

    " + }, + "QuantumTaskSummaryList":{ + "type":"list", + "member":{"shape":"QuantumTaskSummary"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The specified resource was not found.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SearchDevicesFilter":{ + "type":"structure", + "required":[ + "name", + "values" + ], + "members":{ + "name":{ + "shape":"SearchDevicesFilterNameString", + "documentation":"

    The name to use to filter results.

    " + }, + "values":{ + "shape":"SearchDevicesFilterValuesList", + "documentation":"

    The values to use to filter results.

    " + } + }, + "documentation":"

    The filter to use for searching devices.

    " + }, + "SearchDevicesFilterNameString":{ + "type":"string", + "max":64, + "min":1 + }, + "SearchDevicesFilterValuesList":{ + "type":"list", + "member":{"shape":"String256"}, + "max":10, + "min":1 + }, + "SearchDevicesRequest":{ + "type":"structure", + "required":["filters"], + "members":{ + "filters":{ + "shape":"SearchDevicesRequestFiltersList", + "documentation":"

    The filter values to use to search for a device.

    " + }, + "maxResults":{ + "shape":"SearchDevicesRequestMaxResultsInteger", + "documentation":"

    The maximum number of results to return in the response.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    A token used for pagination of results returned in the response. Use the token returned from the previous request continue results where the previous request ended.

    " + } + } + }, + "SearchDevicesRequestFiltersList":{ + "type":"list", + "member":{"shape":"SearchDevicesFilter"}, + "max":10, + "min":0 + }, + "SearchDevicesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "SearchDevicesResponse":{ + "type":"structure", + "required":["devices"], + "members":{ + "devices":{ + "shape":"DeviceSummaryList", + "documentation":"

    An array of DeviceSummary objects for devices that match the specified filter values.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    A token used for pagination of results, or null if there are no additional results. Use the token value in a subsequent request to continue results where the previous request ended.

    " + } + } + }, + "SearchQuantumTasksFilter":{ + "type":"structure", + "required":[ + "name", + "operator", + "values" + ], + "members":{ + "name":{ + "shape":"String64", + "documentation":"

    The name of the device used for the task.

    " + }, + "operator":{ + "shape":"SearchQuantumTasksFilterOperator", + "documentation":"

    An operator to use in the filter.

    " + }, + "values":{ + "shape":"SearchQuantumTasksFilterValuesList", + "documentation":"

    The values to use for the filter.

    " + } + }, + "documentation":"

    A filter to use to search for tasks.

    " + }, + "SearchQuantumTasksFilterOperator":{ + "type":"string", + "enum":[ + "LT", + "LTE", + "EQUAL", + "GT", + "GTE", + "BETWEEN" + ] + }, + "SearchQuantumTasksFilterValuesList":{ + "type":"list", + "member":{"shape":"String256"}, + "max":10, + "min":1 + }, + "SearchQuantumTasksRequest":{ + "type":"structure", + "required":["filters"], + "members":{ + "filters":{ + "shape":"SearchQuantumTasksRequestFiltersList", + "documentation":"

    Array of SearchQuantumTasksFilter objects.

    " + }, + "maxResults":{ + "shape":"SearchQuantumTasksRequestMaxResultsInteger", + "documentation":"

    Maximum number of results to return in the response.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    A token used for pagination of results returned in the response. Use the token returned from the previous request continue results where the previous request ended.

    " + } + } + }, + "SearchQuantumTasksRequestFiltersList":{ + "type":"list", + "member":{"shape":"SearchQuantumTasksFilter"}, + "max":10, + "min":0 + }, + "SearchQuantumTasksRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "SearchQuantumTasksResponse":{ + "type":"structure", + "required":["quantumTasks"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

    A token used for pagination of results, or null if there are no additional results. Use the token value in a subsequent request to continue results where the previous request ended.

    " + }, + "quantumTasks":{ + "shape":"QuantumTaskSummaryList", + "documentation":"

    An array of QuantumTaskSummary objects for tasks that match the specified filters.

    " + } + } + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The request failed because a service quota is exceeded.

    ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "String256":{ + "type":"string", + "max":256, + "min":1 + }, + "String64":{ + "type":"string", + "max":64, + "min":1 + }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    Specify the resourceArn of the resource to which a tag will be added.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    Specify the tags to add to the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The throttling rate limit is met.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    Specify the resourceArn for the resource from which to remove the tags.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

    pecify the keys for the tags to remove from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The input fails to satisfy the constraints specified by an AWS service.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

    The Amazon Braket API Reference provides information about the operations and structures supported in Amazon Braket.

    " +} diff --git a/services/budgets/build.properties b/services/budgets/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/budgets/build.properties +++ b/services/budgets/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 96a170764586..c4c651f04ab8 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + codeartifact + AWS Java SDK :: Services :: Codeartifact + The AWS Java SDK for Codeartifact module holds the client classes that are used for + communicating with Codeartifact. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.codeartifact + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/codeartifact/src/main/resources/codegen-resources/paginators-1.json b/services/codeartifact/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..ef8602842ebc --- /dev/null +++ b/services/codeartifact/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListDomains": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "domains" + }, + "ListPackageVersionAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assets" + }, + "ListPackageVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "versions" + }, + "ListPackages": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "packages" + }, + "ListRepositories": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "repositories" + }, + "ListRepositoriesInDomain": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "repositories" + } + } +} diff --git a/services/codeartifact/src/main/resources/codegen-resources/service-2.json b/services/codeartifact/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..43b491adcad1 --- /dev/null +++ b/services/codeartifact/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3134 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-09-22", + "endpointPrefix":"codeartifact", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"CodeArtifact", + "serviceId":"codeartifact", + "signatureVersion":"v4", + "signingName":"codeartifact", + "uid":"codeartifact-2018-09-22" + }, + "operations":{ + "AssociateExternalConnection":{ + "name":"AssociateExternalConnection", + "http":{ + "method":"POST", + "requestUri":"/v1/repository/external-connection" + }, + "input":{"shape":"AssociateExternalConnectionRequest"}, + "output":{"shape":"AssociateExternalConnectionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Adds an existing external connection to a repository. One external connection is allowed per repository.

    A repository can have one or more upstream repositories, or an external connection.

    " + }, + "CopyPackageVersions":{ + "name":"CopyPackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/copy" + }, + "input":{"shape":"CopyPackageVersionsRequest"}, + "output":{"shape":"CopyPackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Copies package versions from one repository to another repository in the same domain.

    You must specify versions or versionRevisions. You cannot specify both.

    " + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/v1/domain" + }, + "input":{"shape":"CreateDomainRequest"}, + "output":{"shape":"CreateDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a domain. CodeArtifact domains make it easier to manage multiple repositories across an organization. You can use a domain to apply permissions across many repositories owned by different AWS accounts. An asset is stored only once in a domain, even if it's in multiple repositories.

    Although you can have multiple domains, we recommend a single production domain that contains all published artifacts so that your development teams can find and share packages. You can use a second pre-production domain to test changes to the production domain configuration.

    " + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/v1/repository" + }, + "input":{"shape":"CreateRepositoryRequest"}, + "output":{"shape":"CreateRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a repository.

    " + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"DELETE", + "requestUri":"/v1/domain" + }, + "input":{"shape":"DeleteDomainRequest"}, + "output":{"shape":"DeleteDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes a domain. You cannot delete a domain that contains repositories. If you want to delete a domain with repositories, first delete its repositories.

    " + }, + "DeleteDomainPermissionsPolicy":{ + "name":"DeleteDomainPermissionsPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"DeleteDomainPermissionsPolicyRequest"}, + "output":{"shape":"DeleteDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes the resource policy set on a domain.

    " + }, + "DeletePackageVersions":{ + "name":"DeletePackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/delete" + }, + "input":{"shape":"DeletePackageVersionsRequest"}, + "output":{"shape":"DeletePackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes one or more versions of a package. A deleted package version cannot be restored in your repository. If you want to remove a package version from your repository and be able to restore it later, set its status to Archived. Archived packages cannot be downloaded from a repository and don't show up with list package APIs (for example, ListackageVersions ), but you can restore them using UpdatePackageVersionsStatus .

    " + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository" + }, + "input":{"shape":"DeleteRepositoryRequest"}, + "output":{"shape":"DeleteRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes a repository.

    " + }, + "DeleteRepositoryPermissionsPolicy":{ + "name":"DeleteRepositoryPermissionsPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository/permissions/policies" + }, + "input":{"shape":"DeleteRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"DeleteRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes the resource policy that is set on a repository. After a resource policy is deleted, the permissions allowed and denied by the deleted policy are removed. The effect of deleting a resource policy might not be immediate.

    Use DeleteRepositoryPermissionsPolicy with caution. After a policy is deleted, AWS users, roles, and accounts lose permissions to perform the repository actions granted by the deleted policy.

    " + }, + "DescribeDomain":{ + "name":"DescribeDomain", + "http":{ + "method":"GET", + "requestUri":"/v1/domain" + }, + "input":{"shape":"DescribeDomainRequest"}, + "output":{"shape":"DescribeDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a DomainDescription object that contains information about the requested domain.

    " + }, + "DescribePackageVersion":{ + "name":"DescribePackageVersion", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version" + }, + "input":{"shape":"DescribePackageVersionRequest"}, + "output":{"shape":"DescribePackageVersionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a PackageVersionDescription object that contains information about the requested package version.

    " + }, + "DescribeRepository":{ + "name":"DescribeRepository", + "http":{ + "method":"GET", + "requestUri":"/v1/repository" + }, + "input":{"shape":"DescribeRepositoryRequest"}, + "output":{"shape":"DescribeRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a RepositoryDescription object that contains detailed information about the requested repository.

    " + }, + "DisassociateExternalConnection":{ + "name":"DisassociateExternalConnection", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository/external-connection" + }, + "input":{"shape":"DisassociateExternalConnectionRequest"}, + "output":{"shape":"DisassociateExternalConnectionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Removes an existing external connection from a repository.

    " + }, + "DisposePackageVersions":{ + "name":"DisposePackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/dispose" + }, + "input":{"shape":"DisposePackageVersionsRequest"}, + "output":{"shape":"DisposePackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes the assets in package versions and sets the package versions' status to Disposed. A disposed package version cannot be restored in your repository because its assets are deleted.

    To view all disposed package versions in a repository, use ListPackageVersions and set the status parameter to Disposed.

    To view information about a disposed package version, use DescribePackageVersion ..

    " + }, + "GetAuthorizationToken":{ + "name":"GetAuthorizationToken", + "http":{ + "method":"POST", + "requestUri":"/v1/authorization-token" + }, + "input":{"shape":"GetAuthorizationTokenRequest"}, + "output":{"shape":"GetAuthorizationTokenResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Generates a temporary authorization token for accessing repositories in the domain. This API requires the codeartifact:GetAuthorizationToken and sts:GetServiceBearerToken permissions. For more information about authorization tokens, see AWS CodeArtifact authentication and tokens.

    CodeArtifact authorization tokens are valid for a period of 12 hours when created with the login command. You can call login periodically to refresh the token. When you create an authorization token with the GetAuthorizationToken API, you can set a custom authorization period, up to a maximum of 12 hours, with the durationSeconds parameter.

    The authorization period begins after login or GetAuthorizationToken is called. If login or GetAuthorizationToken is called while assuming a role, the token lifetime is independent of the maximum session duration of the role. For example, if you call sts assume-role and specify a session duration of 15 minutes, then generate a CodeArtifact authorization token, the token will be valid for the full authorization period even though this is longer than the 15-minute session duration.

    See Using IAM Roles for more information on controlling session duration.

    " + }, + "GetDomainPermissionsPolicy":{ + "name":"GetDomainPermissionsPolicy", + "http":{ + "method":"GET", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"GetDomainPermissionsPolicyRequest"}, + "output":{"shape":"GetDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the resource policy attached to the specified domain.

    The policy is a resource-based policy, not an identity-based policy. For more information, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide.

    " + }, + "GetPackageVersionAsset":{ + "name":"GetPackageVersionAsset", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version/asset" + }, + "input":{"shape":"GetPackageVersionAssetRequest"}, + "output":{"shape":"GetPackageVersionAssetResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Returns an asset (or file) that is in a package. For example, for a Maven package version, use GetPackageVersionAsset to download a JAR file, a POM file, or any other assets in the package version.

    " + }, + "GetPackageVersionReadme":{ + "name":"GetPackageVersionReadme", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version/readme" + }, + "input":{"shape":"GetPackageVersionReadmeRequest"}, + "output":{"shape":"GetPackageVersionReadmeResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Gets the readme file or descriptive text for a package version. For packages that do not contain a readme file, CodeArtifact extracts a description from a metadata file. For example, from the <description> element in the pom.xml file of a Maven package.

    The returned text might contain formatting. For example, it might contain formatting for Markdown or reStructuredText.

    " + }, + "GetRepositoryEndpoint":{ + "name":"GetRepositoryEndpoint", + "http":{ + "method":"GET", + "requestUri":"/v1/repository/endpoint" + }, + "input":{"shape":"GetRepositoryEndpointRequest"}, + "output":{"shape":"GetRepositoryEndpointResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

    • npm

    • pypi

    • maven

    • nuget

    " + }, + "GetRepositoryPermissionsPolicy":{ + "name":"GetRepositoryPermissionsPolicy", + "http":{ + "method":"GET", + "requestUri":"/v1/repository/permissions/policy" + }, + "input":{"shape":"GetRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"GetRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the resource policy that is set on a repository.

    " + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/v1/domains" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{"shape":"ListDomainsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of DomainSummary objects for all domains owned by the AWS account that makes this call. Each returned DomainSummary object contains information about a domain.

    " + }, + "ListPackageVersionAssets":{ + "name":"ListPackageVersionAssets", + "http":{ + "method":"POST", + "requestUri":"/v1/package/version/assets" + }, + "input":{"shape":"ListPackageVersionAssetsRequest"}, + "output":{"shape":"ListPackageVersionAssetsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of AssetSummary objects for assets in a package version.

    " + }, + "ListPackageVersionDependencies":{ + "name":"ListPackageVersionDependencies", + "http":{ + "method":"POST", + "requestUri":"/v1/package/version/dependencies" + }, + "input":{"shape":"ListPackageVersionDependenciesRequest"}, + "output":{"shape":"ListPackageVersionDependenciesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the direct dependencies for a package version. The dependencies are returned as PackageDependency objects. CodeArtifact extracts the dependencies for a package version from the metadata file for the package format (for example, the package.json file for npm packages and the pom.xml file for Maven). Any package version dependencies that are not listed in the configuration file are not returned.

    " + }, + "ListPackageVersions":{ + "name":"ListPackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions" + }, + "input":{"shape":"ListPackageVersionsRequest"}, + "output":{"shape":"ListPackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of PackageVersionSummary objects for package versions in a repository that match the request parameters.

    " + }, + "ListPackages":{ + "name":"ListPackages", + "http":{ + "method":"POST", + "requestUri":"/v1/packages" + }, + "input":{"shape":"ListPackagesRequest"}, + "output":{"shape":"ListPackagesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of PackageSummary objects for packages in a repository that match the request parameters.

    " + }, + "ListRepositories":{ + "name":"ListRepositories", + "http":{ + "method":"POST", + "requestUri":"/v1/repositories" + }, + "input":{"shape":"ListRepositoriesRequest"}, + "output":{"shape":"ListRepositoriesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of RepositorySummary objects. Each RepositorySummary contains information about a repository in the specified AWS account and that matches the input parameters.

    " + }, + "ListRepositoriesInDomain":{ + "name":"ListRepositoriesInDomain", + "http":{ + "method":"POST", + "requestUri":"/v1/domain/repositories" + }, + "input":{"shape":"ListRepositoriesInDomainRequest"}, + "output":{"shape":"ListRepositoriesInDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of RepositorySummary objects. Each RepositorySummary contains information about a repository in the specified domain and that matches the input parameters.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/v1/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeArtifact.

    " + }, + "PutDomainPermissionsPolicy":{ + "name":"PutDomainPermissionsPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"PutDomainPermissionsPolicyRequest"}, + "output":{"shape":"PutDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Sets a resource policy on a domain that specifies permissions to access it.

    When you call PutDomainPermissionsPolicy, the resource policy on the domain is ignored when evaluting permissions. This ensures that the owner of a domain cannot lock themselves out of the domain, which would prevent them from being able to update the resource policy.

    " + }, + "PutRepositoryPermissionsPolicy":{ + "name":"PutRepositoryPermissionsPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v1/repository/permissions/policy" + }, + "input":{"shape":"PutRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"PutRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Sets the resource policy on a repository that specifies permissions to access it.

    When you call PutRepositoryPermissionsPolicy, the resource policy on the repository is ignored when evaluting permissions. This ensures that the owner of a repository cannot lock themselves out of the repository, which would prevent them from being able to update the resource policy.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/v1/tag" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Adds or updates tags for a resource in AWS CodeArtifact.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/v1/untag" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Removes tags from a resource in AWS CodeArtifact.

    " + }, + "UpdatePackageVersionsStatus":{ + "name":"UpdatePackageVersionsStatus", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/update_status" + }, + "input":{"shape":"UpdatePackageVersionsStatusRequest"}, + "output":{"shape":"UpdatePackageVersionsStatusResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Updates the status of one or more versions of a package.

    " + }, + "UpdateRepository":{ + "name":"UpdateRepository", + "http":{ + "method":"PUT", + "requestUri":"/v1/repository" + }, + "input":{"shape":"UpdateRepositoryRequest"}, + "output":{"shape":"UpdateRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Update the properties of a repository.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The operation did not succeed because of an unauthorized access attempt.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"[0-9]{12}" + }, + "Arn":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"\\S+" + }, + "Asset":{ + "type":"blob", + "streaming":true + }, + "AssetHashes":{ + "type":"map", + "key":{"shape":"HashAlgorithm"}, + "value":{"shape":"HashValue"} + }, + "AssetName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"\\P{C}+" + }, + "AssetSummary":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"AssetName", + "documentation":"

    The name of the asset.

    " + }, + "size":{ + "shape":"LongOptional", + "documentation":"

    The size of the asset.

    " + }, + "hashes":{ + "shape":"AssetHashes", + "documentation":"

    The hashes of the asset.

    " + } + }, + "documentation":"

    Contains details about a package version asset.

    " + }, + "AssetSummaryList":{ + "type":"list", + "member":{"shape":"AssetSummary"} + }, + "AssociateExternalConnectionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "externalConnection" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to which the external connection is added.

    ", + "location":"querystring", + "locationName":"repository" + }, + "externalConnection":{ + "shape":"ExternalConnectionName", + "documentation":"

    The name of the external connection to add to the repository. The following values are supported:

    • public:npmjs - for the npm public repository.

    • public:pypi - for the Python Package Index.

    • public:maven-central - for Maven Central.

    • public:maven-googleandroid - for the Google Android repository.

    • public:maven-gradleplugins - for the Gradle plugins repository.

    • public:maven-commonsware - for the CommonsWare Android repository.

    • public:nuget-org - for the NuGet Gallery.

    ", + "location":"querystring", + "locationName":"external-connection" + } + } + }, + "AssociateExternalConnectionResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

    Information about the connected repository after processing the request.

    " + } + } + }, + "AuthorizationTokenDurationSeconds":{ + "type":"long", + "max":43200, + "min":0 + }, + "BooleanOptional":{"type":"boolean"}, + "ConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of AWS resource.

    " + } + }, + "documentation":"

    The operation did not succeed because prerequisites are not met.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CopyPackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "sourceRepository", + "destinationRepository", + "format", + "package" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the source and destination repositories.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "sourceRepository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the package versions to copy.

    ", + "location":"querystring", + "locationName":"source-repository" + }, + "destinationRepository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository into which package versions are copied.

    ", + "location":"querystring", + "locationName":"destination-repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the package that is copied. The valid package types are:

    • npm: A Node Package Manager (npm) package.

    • pypi: A Python Package Index (PyPI) package.

    • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

    • nuget: A NuGet package.

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package that is copied.

    ", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

    The versions of the package to copy.

    You must specify versions or versionRevisions. You cannot specify both.

    " + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

    A list of key-value pairs. The keys are package versions and the values are package version revisions. A CopyPackageVersion operation succeeds if the specified versions in the source repository match the specified package version revision.

    You must specify versions or versionRevisions. You cannot specify both.

    " + }, + "allowOverwrite":{ + "shape":"BooleanOptional", + "documentation":"

    Set to true to overwrite a package version that already exists in the destination repository. If set to false and the package version already exists in the destination repository, the package version is returned in the failedVersions field of the response with an ALREADY_EXISTS error code.

    " + }, + "includeFromUpstream":{ + "shape":"BooleanOptional", + "documentation":"

    Set to true to copy packages from repositories that are upstream from the source repository to the destination repository. The default setting is false. For more information, see Working with upstream repositories.

    " + } + } + }, + "CopyPackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

    A list of the package versions that were successfully copied to your repository.

    " + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

    A map of package versions that failed to copy and their error codes. The possible error codes are in the PackageVersionError data type. They are:

    • ALREADY_EXISTS

    • MISMATCHED_REVISION

    • MISMATCHED_STATUS

    • NOT_ALLOWED

    • NOT_FOUND

    • SKIPPED

    " + } + } + }, + "CreateDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain to create. All domain names in an AWS Region that are in the same AWS account must be unique. The domain name is used as the prefix in DNS hostnames. Do not use sensitive information in a domain name because it is publicly discoverable.

    ", + "location":"querystring", + "locationName":"domain" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

    The encryption key for the domain. This is used to encrypt content stored in a domain. An encryption key can be a key ID, a key Amazon Resource Name (ARN), a key alias, or a key alias ARN. To specify an encryptionKey, your IAM role must have kms:DescribeKey and kms:CreateGrant permissions on the encryption key that is used. For more information, see DescribeKey in the AWS Key Management Service API Reference and AWS KMS API Permissions Reference in the AWS Key Management Service Developer Guide.

    CodeArtifact supports only symmetric CMKs. Do not associate an asymmetric CMK with your domain. For more information, see Using symmetric and asymmetric keys in the AWS Key Management Service Developer Guide.

    " + }, + "tags":{ + "shape":"TagList", + "documentation":"

    One or more tag key-value pairs for the domain.

    " + } + } + }, + "CreateDomainResult":{ + "type":"structure", + "members":{ + "domain":{ + "shape":"DomainDescription", + "documentation":"

    Contains information about the created domain after processing the request.

    " + } + } + }, + "CreateRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The domain that contains the created repository.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to create.

    ", + "location":"querystring", + "locationName":"repository" + }, + "description":{ + "shape":"Description", + "documentation":"

    A description of the created repository.

    " + }, + "upstreams":{ + "shape":"UpstreamRepositoryList", + "documentation":"

    A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

    " + }, + "tags":{ + "shape":"TagList", + "documentation":"

    One or more tag key-value pairs for the repository.

    " + } + } + }, + "CreateRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

    Information about the created repository after processing the request.

    " + } + } + }, + "DeleteDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain associated with the resource policy to be deleted.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

    The current revision of the resource policy to be deleted. This revision is used for optimistic locking, which prevents others from overwriting your changes to the domain's resource policy.

    ", + "location":"querystring", + "locationName":"policy-revision" + } + } + }, + "DeleteDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

    Information about the deleted resource policy after processing the request.

    " + } + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain to delete.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "DeleteDomainResult":{ + "type":"structure", + "members":{ + "domain":{ + "shape":"DomainDescription", + "documentation":"

    Contains information about the deleted domain after processing the request.

    " + } + } + }, + "DeletePackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the package to delete.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the package versions to delete.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the package versions to delete. The valid values are:

    • npm

    • pypi

    • maven

    • nuget

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package with the versions to delete.

    ", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

    An array of strings that specify the versions of the package to delete.

    " + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

    The expected status of the package version to delete. Valid values are:

    • Published

    • Unfinished

    • Unlisted

    • Archived

    • Disposed

    " + } + } + }, + "DeletePackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

    A list of the package versions that were successfully deleted.

    " + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

    A PackageVersionError object that contains a map of errors codes for the deleted package that failed. The possible error codes are:

    • ALREADY_EXISTS

    • MISMATCHED_REVISION

    • MISMATCHED_STATUS

    • NOT_ALLOWED

    • NOT_FOUND

    • SKIPPED

    " + } + } + }, + "DeleteRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository associated with the resource policy to be deleted.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that is associated with the resource policy to be deleted

    ", + "location":"querystring", + "locationName":"repository" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

    The revision of the repository's resource policy to be deleted. This revision is used for optimistic locking, which prevents others from accidentally overwriting your changes to the repository's resource policy.

    ", + "location":"querystring", + "locationName":"policy-revision" + } + } + }, + "DeleteRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

    Information about the deleted policy after processing the request.

    " + } + } + }, + "DeleteRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository to delete.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to delete.

    ", + "location":"querystring", + "locationName":"repository" + } + } + }, + "DeleteRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

    Information about the deleted repository after processing the request.

    " + } + } + }, + "DescribeDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    A string that specifies the name of the requested domain.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "DescribeDomainResult":{ + "type":"structure", + "members":{ + "domain":{"shape":"DomainDescription"} + } + }, + "DescribePackageVersionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository that contains the package version.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the package version.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    A format that specifies the type of the requested package version. The valid values are:

    • npm

    • pypi

    • maven

    • nuget

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the requested package version.

    ", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

    A string that contains the package version (for example, 3.5.2).

    ", + "location":"querystring", + "locationName":"version" + } + } + }, + "DescribePackageVersionResult":{ + "type":"structure", + "required":["packageVersion"], + "members":{ + "packageVersion":{ + "shape":"PackageVersionDescription", + "documentation":"

    A PackageVersionDescription object that contains information about the requested package version.

    " + } + } + }, + "DescribeRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository to describe.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    A string that specifies the name of the requested repository.

    ", + "location":"querystring", + "locationName":"repository" + } + } + }, + "DescribeRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

    A RepositoryDescription object that contains the requested repository information.

    " + } + } + }, + "Description":{ + "type":"string", + "max":1000, + "pattern":"\\P{C}+" + }, + "DisassociateExternalConnectionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "externalConnection" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository from which to remove the external repository.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository from which the external connection will be removed.

    ", + "location":"querystring", + "locationName":"repository" + }, + "externalConnection":{ + "shape":"ExternalConnectionName", + "documentation":"

    The name of the external connection to be removed from the repository.

    ", + "location":"querystring", + "locationName":"external-connection" + } + } + }, + "DisassociateExternalConnectionResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

    The repository associated with the removed external connection.

    " + } + } + }, + "DisposePackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository you want to dispose.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the package versions you want to dispose.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    A format that specifies the type of package versions you want to dispose. The valid values are:

    • npm

    • pypi

    • maven

    • nuget

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package with the versions you want to dispose.

    ", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

    The versions of the package you want to dispose.

    " + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

    The revisions of the package versions you want to dispose.

    " + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

    The expected status of the package version to dispose. Valid values are:

    • Published

    • Unfinished

    • Unlisted

    • Archived

    • Disposed

    " + } + } + }, + "DisposePackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

    A list of the package versions that were successfully disposed.

    " + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

    A PackageVersionError object that contains a map of errors codes for the disposed package versions that failed. The possible error codes are:

    • ALREADY_EXISTS

    • MISMATCHED_REVISION

    • MISMATCHED_STATUS

    • NOT_ALLOWED

    • NOT_FOUND

    • SKIPPED

    " + } + } + }, + "DomainDescription":{ + "type":"structure", + "members":{ + "name":{ + "shape":"DomainName", + "documentation":"

    The name of the domain.

    " + }, + "owner":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID that owns the domain.

    " + }, + "arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the domain.

    " + }, + "status":{ + "shape":"DomainStatus", + "documentation":"

    The current status of a domain. The valid values are

    • Active

    • Deleted

    " + }, + "createdTime":{ + "shape":"Timestamp", + "documentation":"

    A timestamp that represents the date and time the domain was created.

    " + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

    The ARN of an AWS Key Management Service (AWS KMS) key associated with a domain.

    " + }, + "repositoryCount":{ + "shape":"Integer", + "documentation":"

    The number of repositories in the domain.

    " + }, + "assetSizeBytes":{ + "shape":"Long", + "documentation":"

    The total size of all assets in the domain.

    " + }, + "s3BucketArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon S3 bucket that is used to store package assets in the domain.

    " + } + }, + "documentation":"

    Information about a domain. A domain is a container for repositories. When you create a domain, it is empty until you add one or more repositories.

    " + }, + "DomainName":{ + "type":"string", + "max":50, + "min":2, + "pattern":"[a-z][a-z0-9\\-]{0,48}[a-z0-9]" + }, + "DomainStatus":{ + "type":"string", + "enum":[ + "Active", + "Deleted" + ] + }, + "DomainSummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"DomainName", + "documentation":"

    The name of the domain.

    " + }, + "owner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    " + }, + "arn":{ + "shape":"Arn", + "documentation":"

    The ARN of the domain.

    " + }, + "status":{ + "shape":"DomainStatus", + "documentation":"

    A string that contains the status of the domain. The valid values are:

    • Active

    • Deleted

    " + }, + "createdTime":{ + "shape":"Timestamp", + "documentation":"

    A timestamp that contains the date and time the domain was created.

    " + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

    The key used to encrypt the domain.

    " + } + }, + "documentation":"

    Information about a domain, including its name, Amazon Resource Name (ARN), and status. The ListDomains operation returns a list of DomainSummary objects.

    " + }, + "DomainSummaryList":{ + "type":"list", + "member":{"shape":"DomainSummary"} + }, + "ErrorMessage":{"type":"string"}, + "ExternalConnectionName":{ + "type":"string", + "pattern":"[A-Za-z0-9][A-Za-z0-9._\\-:]{1,99}" + }, + "ExternalConnectionStatus":{ + "type":"string", + "enum":["Available"] + }, + "GetAuthorizationTokenRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that is in scope for the generated authorization token.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "durationSeconds":{ + "shape":"AuthorizationTokenDurationSeconds", + "documentation":"

    The time, in seconds, that the generated authorization token is valid. Valid values are 0 and any number between 900 (15 minutes) and 43200 (12 hours). A value of 0 will set the expiration of the authorization token to the same expiration of the user's role's temporary credentials.

    ", + "location":"querystring", + "locationName":"duration" + } + } + }, + "GetAuthorizationTokenResult":{ + "type":"structure", + "members":{ + "authorizationToken":{ + "shape":"String", + "documentation":"

    The returned authentication token.

    " + }, + "expiration":{ + "shape":"Timestamp", + "documentation":"

    A timestamp that specifies the date and time the authorization token expires.

    " + } + } + }, + "GetDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain to which the resource policy is attached.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "GetDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

    The returned resource policy.

    " + } + } + }, + "GetPackageVersionAssetRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion", + "asset" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The domain that contains the repository that contains the package version with the requested asset.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The repository that contains the package version with the requested asset.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    A format that specifies the type of the package version with the requested asset file. The valid values are:

    • npm

    • pypi

    • maven

    • nuget

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package that contains the requested asset.

    ", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

    A string that contains the package version (for example, 3.5.2).

    ", + "location":"querystring", + "locationName":"version" + }, + "asset":{ + "shape":"AssetName", + "documentation":"

    The name of the requested asset.

    ", + "location":"querystring", + "locationName":"asset" + }, + "packageVersionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

    The name of the package version revision that contains the requested asset.

    ", + "location":"querystring", + "locationName":"revision" + } + } + }, + "GetPackageVersionAssetResult":{ + "type":"structure", + "members":{ + "asset":{ + "shape":"Asset", + "documentation":"

    The binary file, or asset, that is downloaded.

    " + }, + "assetName":{ + "shape":"AssetName", + "documentation":"

    The name of the asset that is downloaded.

    ", + "location":"header", + "locationName":"X-AssetName" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

    A string that contains the package version (for example, 3.5.2).

    ", + "location":"header", + "locationName":"X-PackageVersion" + }, + "packageVersionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

    The name of the package version revision that contains the downloaded asset.

    ", + "location":"header", + "locationName":"X-PackageVersionRevision" + } + }, + "payload":"asset" + }, + "GetPackageVersionReadmeRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository that contains the package version with the requested readme file.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The repository that contains the package with the requested readme file.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    A format that specifies the type of the package version with the requested readme file. The valid values are:

    • npm

    • pypi

    • maven

    • nuget

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package version that contains the requested readme file.

    ", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

    A string that contains the package version (for example, 3.5.2).

    ", + "location":"querystring", + "locationName":"version" + } + } + }, + "GetPackageVersionReadmeResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the package with the requested readme file. Valid format types are:

    • npm

    • pypi

    • maven

    • nuget

    " + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    " + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package that contains the returned readme file.

    " + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

    The version of the package with the requested readme file.

    " + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

    The current revision associated with the package version.

    " + }, + "readme":{ + "shape":"String", + "documentation":"

    The text of the returned readme file.

    " + } + } + }, + "GetRepositoryEndpointRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain that contains the repository. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    Returns which endpoint of a repository to return. A repository has one endpoint for each package format:

    • npm

    • pypi

    • maven

    • nuget

    ", + "location":"querystring", + "locationName":"format" + } + } + }, + "GetRepositoryEndpointResult":{ + "type":"structure", + "members":{ + "repositoryEndpoint":{ + "shape":"String", + "documentation":"

    A string that specifies the URL of the returned endpoint.

    " + } + } + }, + "GetRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain containing the repository whose associated resource policy is to be retrieved.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository whose associated resource policy is to be retrieved.

    ", + "location":"querystring", + "locationName":"repository" + } + } + }, + "GetRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

    The returned resource policy.

    " + } + } + }, + "HashAlgorithm":{ + "type":"string", + "enum":[ + "MD5", + "SHA-1", + "SHA-256", + "SHA-512" + ] + }, + "HashValue":{ + "type":"string", + "max":512, + "min":32, + "pattern":"[0-9a-f]+" + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The operation did not succeed because of an error that occurred inside AWS CodeArtifact.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "LicenseInfo":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

    Name of the license.

    " + }, + "url":{ + "shape":"String", + "documentation":"

    The URL for license data.

    " + } + }, + "documentation":"

    Details of the license data.

    " + }, + "LicenseInfoList":{ + "type":"list", + "member":{"shape":"LicenseInfo"} + }, + "ListDomainsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListDomainsMaxResults", + "documentation":"

    The maximum number of results to return per page.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + } + } + }, + "ListDomainsResult":{ + "type":"structure", + "members":{ + "domains":{ + "shape":"DomainSummaryList", + "documentation":"

    The returned list of DomainSummary objects.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + } + } + }, + "ListPackageVersionAssetsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackageVersionAssetsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository associated with the package version assets.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the package that contains the returned package version assets.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the package that contains the returned package version assets. The valid package types are:

    • npm: A Node Package Manager (npm) package.

    • pypi: A Python Package Index (PyPI) package.

    • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

    • nuget: A NuGet package.

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package that contains the returned package version assets.

    ", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

    A string that contains the package version (for example, 3.5.2).

    ", + "location":"querystring", + "locationName":"version" + }, + "maxResults":{ + "shape":"ListPackageVersionAssetsMaxResults", + "documentation":"

    The maximum number of results to return per page.

    ", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionAssetsResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the package that contains the returned package version assets.

    " + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    " + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package that contains the returned package version assets.

    " + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

    The version of the package associated with the returned assets.

    " + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

    The current revision associated with the package version.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + }, + "assets":{ + "shape":"AssetSummaryList", + "documentation":"

    The returned list of AssetSummary objects.

    " + } + } + }, + "ListPackageVersionDependenciesRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The domain that contains the repository that contains the requested package version dependencies.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the requested package version.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the package with the requested dependencies. The valid package types are:

    • npm: A Node Package Manager (npm) package.

    • pypi: A Python Package Index (PyPI) package.

    • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

    • nuget: A NuGet package.

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package versions' package.

    ", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

    A string that contains the package version (for example, 3.5.2).

    ", + "location":"querystring", + "locationName":"version" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionDependenciesResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

    A format that specifies the type of the package that contains the returned dependencies. The valid values are:

    • npm

    • pypi

    • maven

    • nuget

    " + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    " + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package that contains the returned package versions dependencies.

    " + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

    The version of the package that is specified in the request.

    " + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

    The current revision associated with the package version.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + }, + "dependencies":{ + "shape":"PackageDependencyList", + "documentation":"

    The returned list of PackageDependency objects.

    " + } + } + }, + "ListPackageVersionsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository that contains the returned package versions.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the package.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the returned packages. The valid package types are:

    • npm: A Node Package Manager (npm) package.

    • pypi: A Python Package Index (PyPI) package.

    • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

    • nuget: A NuGet package.

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package for which you want to return a list of package versions.

    ", + "location":"querystring", + "locationName":"package" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

    A string that specifies the status of the package versions to include in the returned list. It can be one of the following:

    • Published

    • Unfinished

    • Unlisted

    • Archived

    • Disposed

    ", + "location":"querystring", + "locationName":"status" + }, + "sortBy":{ + "shape":"PackageVersionSortType", + "documentation":"

    How to sort the returned list of package versions.

    ", + "location":"querystring", + "locationName":"sortBy" + }, + "maxResults":{ + "shape":"ListPackageVersionsMaxResults", + "documentation":"

    The maximum number of results to return per page.

    ", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionsResult":{ + "type":"structure", + "members":{ + "defaultDisplayVersion":{ + "shape":"PackageVersion", + "documentation":"

    The default package version to display. This depends on the package format:

    • For Maven and PyPI packages, it's the most recently published package version.

    • For npm packages, it's the version referenced by the latest tag. If the latest tag is not set, it's the most recently published package version.

    " + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    A format of the package. Valid package format values are:

    • npm

    • pypi

    • maven

    • nuget

    " + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    " + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package.

    " + }, + "versions":{ + "shape":"PackageVersionSummaryList", + "documentation":"

    The returned list of PackageVersionSummary objects.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + } + } + }, + "ListPackagesMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackagesRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The domain that contains the repository that contains the requested list of packages.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository from which packages are to be listed.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the packages. The valid package types are:

    • npm: A Node Package Manager (npm) package.

    • pypi: A Python Package Index (PyPI) package.

    • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

    • nuget: A NuGet package.

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "packagePrefix":{ + "shape":"PackageName", + "documentation":"

    A prefix used to filter returned packages. Only packages with names that start with packagePrefix are returned.

    ", + "location":"querystring", + "locationName":"package-prefix" + }, + "maxResults":{ + "shape":"ListPackagesMaxResults", + "documentation":"

    The maximum number of results to return per page.

    ", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackagesResult":{ + "type":"structure", + "members":{ + "packages":{ + "shape":"PackageSummaryList", + "documentation":"

    The list of returned PackageSummary objects.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + } + } + }, + "ListRepositoriesInDomainMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListRepositoriesInDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the returned list of repositories.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

    Filter the list of repositories to only include those that are managed by the AWS account ID.

    ", + "location":"querystring", + "locationName":"administrator-account" + }, + "repositoryPrefix":{ + "shape":"RepositoryName", + "documentation":"

    A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

    ", + "location":"querystring", + "locationName":"repository-prefix" + }, + "maxResults":{ + "shape":"ListRepositoriesInDomainMaxResults", + "documentation":"

    The maximum number of results to return per page.

    ", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListRepositoriesInDomainResult":{ + "type":"structure", + "members":{ + "repositories":{ + "shape":"RepositorySummaryList", + "documentation":"

    The returned list of repositories.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + } + } + }, + "ListRepositoriesMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListRepositoriesRequest":{ + "type":"structure", + "members":{ + "repositoryPrefix":{ + "shape":"RepositoryName", + "documentation":"

    A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

    ", + "location":"querystring", + "locationName":"repository-prefix" + }, + "maxResults":{ + "shape":"ListRepositoriesMaxResults", + "documentation":"

    The maximum number of results to return per page.

    ", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListRepositoriesResult":{ + "type":"structure", + "members":{ + "repositories":{ + "shape":"RepositorySummaryList", + "documentation":"

    The returned list of RepositorySummary objects.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to get tags for.

    ", + "location":"querystring", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResult":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagList", + "documentation":"

    A list of tag key and value pairs associated with the specified resource.

    " + } + } + }, + "Long":{"type":"long"}, + "LongOptional":{"type":"long"}, + "PackageDependency":{ + "type":"structure", + "members":{ + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    " + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package that this package depends on.

    " + }, + "dependencyType":{ + "shape":"String", + "documentation":"

    The type of a package dependency. The possible values depend on the package type. Example types are compile, runtime, and test for Maven packages, and dev, prod, and optional for npm packages.

    " + }, + "versionRequirement":{ + "shape":"String", + "documentation":"

    The required version, or version range, of the package that this package depends on. The version format is specific to the package type. For example, the following are possible valid required versions: 1.2.3, ^2.3.4, or 4.x.

    " + } + }, + "documentation":"

    Details about a package dependency.

    " + }, + "PackageDependencyList":{ + "type":"list", + "member":{"shape":"PackageDependency"} + }, + "PackageFormat":{ + "type":"string", + "enum":[ + "npm", + "pypi", + "maven", + "nuget" + ] + }, + "PackageName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageNamespace":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageSummary":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the package. Valid values are:

    • npm

    • pypi

    • maven

    • nuget

    " + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    " + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package.

    " + } + }, + "documentation":"

    Details about a package, including its format, namespace, and name. The ListPackages operation returns a list of PackageSummary objects.

    " + }, + "PackageSummaryList":{ + "type":"list", + "member":{"shape":"PackageSummary"} + }, + "PackageVersion":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageVersionDescription":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

    The format of the package version. The valid package formats are:

    • npm: A Node Package Manager (npm) package.

    • pypi: A Python Package Index (PyPI) package.

    • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

    • nuget: A NuGet package.

    " + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    " + }, + "packageName":{ + "shape":"PackageName", + "documentation":"

    The name of the requested package.

    " + }, + "displayName":{ + "shape":"String255", + "documentation":"

    The name of the package that is displayed. The displayName varies depending on the package version's format. For example, if an npm package is named ui, is in the namespace vue, and has the format npm, then the displayName is @vue/ui.

    " + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

    The version of the package.

    " + }, + "summary":{ + "shape":"String", + "documentation":"

    A summary of the package version. The summary is extracted from the package. The information in and detail level of the summary depends on the package version's format.

    " + }, + "homePage":{ + "shape":"String", + "documentation":"

    The homepage associated with the package.

    " + }, + "sourceCodeRepository":{ + "shape":"String", + "documentation":"

    The repository for the source code in the package version, or the source code used to build it.

    " + }, + "publishedTime":{ + "shape":"Timestamp", + "documentation":"

    A timestamp that contains the date and time the package version was published.

    " + }, + "licenses":{ + "shape":"LicenseInfoList", + "documentation":"

    Information about licenses associated with the package version.

    " + }, + "revision":{ + "shape":"PackageVersionRevision", + "documentation":"

    The revision of the package version.

    " + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

    A string that contains the status of the package version. It can be one of the following:

    • Published

    • Unfinished

    • Unlisted

    • Archived

    • Disposed

    " + } + }, + "documentation":"

    Details about a package version.

    " + }, + "PackageVersionError":{ + "type":"structure", + "members":{ + "errorCode":{ + "shape":"PackageVersionErrorCode", + "documentation":"

    The error code associated with the error. Valid error codes are:

    • ALREADY_EXISTS

    • MISMATCHED_REVISION

    • MISMATCHED_STATUS

    • NOT_ALLOWED

    • NOT_FOUND

    • SKIPPED

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    The error message associated with the error.

    " + } + }, + "documentation":"

    An error associated with package.

    " + }, + "PackageVersionErrorCode":{ + "type":"string", + "enum":[ + "ALREADY_EXISTS", + "MISMATCHED_REVISION", + "MISMATCHED_STATUS", + "NOT_ALLOWED", + "NOT_FOUND", + "SKIPPED" + ] + }, + "PackageVersionErrorMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"PackageVersionError"} + }, + "PackageVersionList":{ + "type":"list", + "member":{"shape":"PackageVersion"}, + "max":100 + }, + "PackageVersionRevision":{ + "type":"string", + "max":50, + "min":1, + "pattern":"\\S+" + }, + "PackageVersionRevisionMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"PackageVersionRevision"} + }, + "PackageVersionSortType":{ + "type":"string", + "enum":["PUBLISHED_TIME"] + }, + "PackageVersionStatus":{ + "type":"string", + "enum":[ + "Published", + "Unfinished", + "Unlisted", + "Archived", + "Disposed", + "Deleted" + ] + }, + "PackageVersionSummary":{ + "type":"structure", + "required":[ + "version", + "status" + ], + "members":{ + "version":{ + "shape":"PackageVersion", + "documentation":"

    Information about a package version.

    " + }, + "revision":{ + "shape":"PackageVersionRevision", + "documentation":"

    The revision associated with a package version.

    " + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

    A string that contains the status of the package version. It can be one of the following:

    • Published

    • Unfinished

    • Unlisted

    • Archived

    • Disposed

    " + } + }, + "documentation":"

    Details about a package version, including its status, version, and revision. The ListPackageVersions operation returns a list of PackageVersionSummary objects.

    " + }, + "PackageVersionSummaryList":{ + "type":"list", + "member":{"shape":"PackageVersionSummary"} + }, + "PaginationToken":{ + "type":"string", + "max":2000, + "min":1, + "pattern":"\\S+" + }, + "PolicyDocument":{ + "type":"string", + "max":5120, + "min":1 + }, + "PolicyRevision":{ + "type":"string", + "max":100, + "min":1, + "pattern":"\\S+" + }, + "PutDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "policyDocument" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain on which to set the resource policy.

    " + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    " + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

    The current revision of the resource policy to be set. This revision is used for optimistic locking, which prevents others from overwriting your changes to the domain's resource policy.

    " + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"

    A valid displayable JSON Aspen policy string to be set as the access control resource policy on the provided domain.

    " + } + } + }, + "PutDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

    The resource policy that was set after processing the request.

    " + } + } + }, + "PutRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "policyDocument" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain containing the repository to set the resource policy on.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to set the resource policy on.

    ", + "location":"querystring", + "locationName":"repository" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

    Sets the revision of the resource policy that specifies permissions to access the repository. This revision is used for optimistic locking, which prevents others from overwriting your changes to the repository's resource policy.

    " + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"

    A valid displayable JSON Aspen policy string to be set as the access control resource policy on the provided repository.

    " + } + } + }, + "PutRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

    The resource policy that was set after processing the request.

    " + } + } + }, + "RepositoryDescription":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository.

    " + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that manages the repository.

    " + }, + "domainName":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository.

    " + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain that contains the repository. It does not include dashes or spaces.

    " + }, + "arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the repository.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    A text description of the repository.

    " + }, + "upstreams":{ + "shape":"UpstreamRepositoryInfoList", + "documentation":"

    A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

    " + }, + "externalConnections":{ + "shape":"RepositoryExternalConnectionInfoList", + "documentation":"

    An array of external connections associated with the repository.

    " + } + }, + "documentation":"

    The details of a repository stored in AWS CodeArtifact. A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets. Repositories are polyglot—a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm CLI, the Maven CLI (mvn), and pip. You can create up to 100 repositories per AWS account.

    " + }, + "RepositoryExternalConnectionInfo":{ + "type":"structure", + "members":{ + "externalConnectionName":{ + "shape":"ExternalConnectionName", + "documentation":"

    The name of the external connection associated with a repository.

    " + }, + "packageFormat":{ + "shape":"PackageFormat", + "documentation":"

    The package format associated with a repository's external connection. The valid package formats are:

    • npm: A Node Package Manager (npm) package.

    • pypi: A Python Package Index (PyPI) package.

    • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

    • nuget: A NuGet package.

    " + }, + "status":{ + "shape":"ExternalConnectionStatus", + "documentation":"

    The status of the external connection of a repository. There is one valid value, Available.

    " + } + }, + "documentation":"

    Contains information about the external connection of a repository.

    " + }, + "RepositoryExternalConnectionInfoList":{ + "type":"list", + "member":{"shape":"RepositoryExternalConnectionInfo"} + }, + "RepositoryName":{ + "type":"string", + "max":100, + "min":2, + "pattern":"[A-Za-z0-9][A-Za-z0-9._\\-]{1,99}" + }, + "RepositorySummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository.

    " + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID that manages the repository.

    " + }, + "domainName":{ + "shape":"DomainName", + "documentation":"

    The name of the domain that contains the repository.

    " + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    " + }, + "arn":{ + "shape":"Arn", + "documentation":"

    The ARN of the repository.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the repository.

    " + } + }, + "documentation":"

    Details about a repository, including its Amazon Resource Name (ARN), description, and domain information. The ListRepositories operation returns a list of RepositorySummary objects.

    " + }, + "RepositorySummaryList":{ + "type":"list", + "member":{"shape":"RepositorySummary"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of AWS resource.

    " + } + }, + "documentation":"

    The operation did not succeed because the resource requested is not found in the service.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourcePolicy":{ + "type":"structure", + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the resource associated with the resource policy

    " + }, + "revision":{ + "shape":"PolicyRevision", + "documentation":"

    The current revision of the resource policy.

    " + }, + "document":{ + "shape":"PolicyDocument", + "documentation":"

    The resource policy formatted in JSON.

    " + } + }, + "documentation":"

    An AWS CodeArtifact resource policy that contains a resource ARN, document details, and a revision.

    " + }, + "ResourceType":{ + "type":"string", + "enum":[ + "domain", + "repository", + "package", + "package-version", + "asset" + ] + }, + "RetryAfterSeconds":{"type":"integer"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of AWS resource.

    " + } + }, + "documentation":"

    The operation did not succeed because it would have exceeded a service limit for your account.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "String":{"type":"string"}, + "String255":{ + "type":"string", + "max":255, + "min":1 + }, + "SuccessfulPackageVersionInfo":{ + "type":"structure", + "members":{ + "revision":{ + "shape":"String", + "documentation":"

    The revision of a package version.

    " + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

    The status of a package version. Valid statuses are:

    • Published

    • Unfinished

    • Unlisted

    • Archived

    • Disposed

    " + } + }, + "documentation":"

    Contains the revision and status of a package version.

    " + }, + "SuccessfulPackageVersionInfoMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"SuccessfulPackageVersionInfo"} + }, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"TagKey", + "documentation":"

    The tag key.

    " + }, + "value":{ + "shape":"TagValue", + "documentation":"

    The tag value.

    " + } + }, + "documentation":"

    A tag is a key-value pair that can be used to manage, search for, or filter resources in AWS CodeArtifact.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to add or update tags for.

    ", + "location":"querystring", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagList", + "documentation":"

    The tags you want to modify or add to the resource.

    " + } + } + }, + "TagResourceResult":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    The time period, in seconds, to wait before retrying the request.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    The operation did not succeed because too many requests are sent to the service.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to remove tags from.

    ", + "location":"querystring", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag key for each tag that you want to remove from the resource.

    " + } + } + }, + "UntagResourceResult":{ + "type":"structure", + "members":{ + } + }, + "UpdatePackageVersionsStatusRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions", + "targetStatus" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The domain that contains the repository that contains the package versions with a status to be updated.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The repository that contains the package versions with the status you want to update.

    ", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

    A format that specifies the type of the package with the statuses to update. The valid values are:

    • npm

    • pypi

    • maven

    • nuget

    ", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

    The namespace of the package. The package component that specifies its namespace depends on its type. For example:

    • The namespace of a Maven package is its groupId.

    • The namespace of an npm package is its scope.

    • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

    • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

    The name of the package with the version statuses to update.

    ", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

    An array of strings that specify the versions of the package with the statuses to update.

    " + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

    A map of package versions and package version revisions. The map key is the package version (for example, 3.5.2), and the map value is the package version revision.

    " + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

    The package version’s expected status before it is updated. If expectedStatus is provided, the package version's status is updated only if its status at the time UpdatePackageVersionsStatus is called matches expectedStatus.

    " + }, + "targetStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

    The status you want to change the package version status to.

    " + } + } + }, + "UpdatePackageVersionsStatusResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

    A list of PackageVersionError objects, one for each package version with a status that failed to update.

    " + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

    A list of SuccessfulPackageVersionInfo objects, one for each package version with a status that successfully updated.

    " + } + } + }, + "UpdateRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

    The name of the domain associated with the repository to update.

    ", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

    The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

    ", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to update.

    ", + "location":"querystring", + "locationName":"repository" + }, + "description":{ + "shape":"Description", + "documentation":"

    An updated repository description.

    " + }, + "upstreams":{ + "shape":"UpstreamRepositoryList", + "documentation":"

    A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

    " + } + } + }, + "UpdateRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

    The updated repository.

    " + } + } + }, + "UpstreamRepository":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of an upstream repository.

    " + } + }, + "documentation":"

    Information about an upstream repository. A list of UpstreamRepository objects is an input parameter to CreateRepository and UpdateRepository .

    " + }, + "UpstreamRepositoryInfo":{ + "type":"structure", + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of an upstream repository.

    " + } + }, + "documentation":"

    Information about an upstream repository.

    " + }, + "UpstreamRepositoryInfoList":{ + "type":"list", + "member":{"shape":"UpstreamRepositoryInfo"} + }, + "UpstreamRepositoryList":{ + "type":"list", + "member":{"shape":"UpstreamRepository"} + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    " + } + }, + "documentation":"

    The operation did not succeed because a parameter in the request was sent with an invalid value.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "CANNOT_PARSE", + "ENCRYPTION_KEY_ERROR", + "FIELD_VALIDATION_FAILED", + "UNKNOWN_OPERATION", + "OTHER" + ] + } + }, + "documentation":"

    AWS CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, NuGet, and pip. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

    AWS CodeArtifact Components

    Use the information in this guide to help you work with the following CodeArtifact components:

    • Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm CLI, the NuGet CLI, the Maven CLI ( mvn ), and pip .

    • Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in AWS Key Management Service (AWS KMS).

      Each repository is a member of a single domain and can't be moved to a different domain.

      The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages.

      Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization.

    • Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, and NuGet package formats.

      In CodeArtifact, a package consists of:

      • A name (for example, webpack is the name of a popular npm package)

      • An optional namespace (for example, @types in @types/node)

      • A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.)

      • Package-level metadata (for example, npm tags)

    • Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets.

    • Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories.

    • Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files.

    CodeArtifact supports these operations:

    • AssociateExternalConnection: Adds an existing external connection to a repository.

    • CopyPackageVersions: Copies package versions from one repository to another repository in the same domain.

    • CreateDomain: Creates a domain

    • CreateRepository: Creates a CodeArtifact repository in a domain.

    • DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories.

    • DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain.

    • DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage.

    • DeleteRepository: Deletes a repository.

    • DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository.

    • DescribeDomain: Returns a DomainDescription object that contains information about the requested domain.

    • DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version.

    • DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository.

    • DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage.

    • DisassociateExternalConnection: Removes an existing external connection from a repository.

    • GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.

    • GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain.

    • GetPackageVersionAsset: Returns the contents of an asset that is in a package version.

    • GetPackageVersionReadme: Gets the readme file or descriptive text for a package version.

    • GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

      • npm

      • pypi

      • maven

      • nuget

    • GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository.

    • ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain.

    • ListPackages: Lists the packages in a repository.

    • ListPackageVersionAssets: Lists the assets for a given package version.

    • ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version.

    • ListPackageVersions: Returns a list of package versions for a specified package in a repository.

    • ListRepositories: Returns a list of repositories owned by the AWS account that called this method.

    • ListRepositoriesInDomain: Returns a list of the repositories in a domain.

    • ListTagsForResource: Returns a list of the tags associated with a resource.

    • PutDomainPermissionsPolicy: Attaches a resource policy to a domain.

    • PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it.

    • TagResource: Adds or updates tags for a resource.

    • UntagResource: Removes a tag from a resource.

    • UpdatePackageVersionsStatus: Updates the status of one or more versions of a package.

    • UpdateRepository: Updates the properties of a repository.

    " +} diff --git a/services/codebuild/build.properties b/services/codebuild/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/codebuild/build.properties +++ b/services/codebuild/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index f3b811687ecc..cb54e97c3485 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + codeguruprofiler + AWS Java SDK :: Services :: CodeGuruProfiler + The AWS Java SDK for CodeGuruProfiler module holds the client classes that are used for + communicating with CodeGuruProfiler. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.codeguruprofiler + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/codeguruprofiler/src/main/resources/codegen-resources/paginators-1.json b/services/codeguruprofiler/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..67d53f6bf119 --- /dev/null +++ b/services/codeguruprofiler/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,25 @@ +{ + "pagination": { + "GetFindingsReportAccountSummary": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListFindingsReports": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListProfileTimes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "profileTimes" + }, + "ListProfilingGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + } + } +} diff --git a/services/codeguruprofiler/src/main/resources/codegen-resources/service-2.json b/services/codeguruprofiler/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..308588c1daea --- /dev/null +++ b/services/codeguruprofiler/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2091 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-07-18", + "endpointPrefix":"codeguru-profiler", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon CodeGuru Profiler", + "serviceId":"CodeGuruProfiler", + "signatureVersion":"v4", + "signingName":"codeguru-profiler", + "uid":"codeguruprofiler-2019-07-18" + }, + "operations":{ + "AddNotificationChannels":{ + "name":"AddNotificationChannels", + "http":{ + "method":"POST", + "requestUri":"/profilingGroups/{profilingGroupName}/notificationConfiguration", + "responseCode":200 + }, + "input":{"shape":"AddNotificationChannelsRequest"}, + "output":{"shape":"AddNotificationChannelsResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Add up to 2 anomaly notifications channels for a profiling group.

    " + }, + "BatchGetFrameMetricData":{ + "name":"BatchGetFrameMetricData", + "http":{ + "method":"POST", + "requestUri":"/profilingGroups/{profilingGroupName}/frames/-/metrics", + "responseCode":200 + }, + "input":{"shape":"BatchGetFrameMetricDataRequest"}, + "output":{"shape":"BatchGetFrameMetricDataResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns the time series of values for a requested list of frame metrics from a time period.

    " + }, + "ConfigureAgent":{ + "name":"ConfigureAgent", + "http":{ + "method":"POST", + "requestUri":"/profilingGroups/{profilingGroupName}/configureAgent", + "responseCode":200 + }, + "input":{"shape":"ConfigureAgentRequest"}, + "output":{"shape":"ConfigureAgentResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Used by profiler agents to report their current state and to receive remote configuration updates. For example, ConfigureAgent can be used to tell and agent whether to profile or not and for how long to return profiling data.

    " + }, + "CreateProfilingGroup":{ + "name":"CreateProfilingGroup", + "http":{ + "method":"POST", + "requestUri":"/profilingGroups", + "responseCode":201 + }, + "input":{"shape":"CreateProfilingGroupRequest"}, + "output":{"shape":"CreateProfilingGroupResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a profiling group.

    ", + "idempotent":true + }, + "DeleteProfilingGroup":{ + "name":"DeleteProfilingGroup", + "http":{ + "method":"DELETE", + "requestUri":"/profilingGroups/{profilingGroupName}", + "responseCode":204 + }, + "input":{"shape":"DeleteProfilingGroupRequest"}, + "output":{"shape":"DeleteProfilingGroupResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deletes a profiling group.

    ", + "idempotent":true + }, + "DescribeProfilingGroup":{ + "name":"DescribeProfilingGroup", + "http":{ + "method":"GET", + "requestUri":"/profilingGroups/{profilingGroupName}", + "responseCode":200 + }, + "input":{"shape":"DescribeProfilingGroupRequest"}, + "output":{"shape":"DescribeProfilingGroupResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns a ProfilingGroupDescription object that contains information about the requested profiling group.

    " + }, + "GetFindingsReportAccountSummary":{ + "name":"GetFindingsReportAccountSummary", + "http":{ + "method":"GET", + "requestUri":"/internal/findingsReports", + "responseCode":200 + }, + "input":{"shape":"GetFindingsReportAccountSummaryRequest"}, + "output":{"shape":"GetFindingsReportAccountSummaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a list of FindingsReportSummary objects that contain analysis results for all profiling groups in your AWS account.

    " + }, + "GetNotificationConfiguration":{ + "name":"GetNotificationConfiguration", + "http":{ + "method":"GET", + "requestUri":"/profilingGroups/{profilingGroupName}/notificationConfiguration", + "responseCode":200 + }, + "input":{"shape":"GetNotificationConfigurationRequest"}, + "output":{"shape":"GetNotificationConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Get the current configuration for anomaly notifications for a profiling group.

    " + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"GET", + "requestUri":"/profilingGroups/{profilingGroupName}/policy", + "responseCode":200 + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{"shape":"GetPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns the JSON-formatted resource-based policy on a profiling group.

    " + }, + "GetProfile":{ + "name":"GetProfile", + "http":{ + "method":"GET", + "requestUri":"/profilingGroups/{profilingGroupName}/profile", + "responseCode":200 + }, + "input":{"shape":"GetProfileRequest"}, + "output":{"shape":"GetProfileResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets the aggregated profile of a profiling group for a specified time range. Amazon CodeGuru Profiler collects posted agent profiles for a profiling group into aggregated profiles.

     <note> <p> Because aggregated profiles expire over time <code>GetProfile</code> is not idempotent. </p> </note> <p> Specify the time range for the requested aggregated profile using 1 or 2 of the following parameters: <code>startTime</code>, <code>endTime</code>, <code>period</code>. The maximum time range allowed is 7 days. If you specify all 3 parameters, an exception is thrown. If you specify only <code>period</code>, the latest aggregated profile is returned. </p> <p> Aggregated profiles are available with aggregation periods of 5 minutes, 1 hour, and 1 day, aligned to UTC. The aggregation period of an aggregated profile determines how long it is retained. For more information, see <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_AggregatedProfileTime.html"> <code>AggregatedProfileTime</code> </a>. The aggregated profile's aggregation period determines how long it is retained by CodeGuru Profiler. </p> <ul> <li> <p> If the aggregation period is 5 minutes, the aggregated profile is retained for 15 days. </p> </li> <li> <p> If the aggregation period is 1 hour, the aggregated profile is retained for 60 days. </p> </li> <li> <p> If the aggregation period is 1 day, the aggregated profile is retained for 3 years. </p> </li> </ul> <p>There are two use cases for calling <code>GetProfile</code>.</p> <ol> <li> <p> If you want to return an aggregated profile that already exists, use <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ListProfileTimes.html"> <code>ListProfileTimes</code> </a> to view the time ranges of existing aggregated profiles. Use them in a <code>GetProfile</code> request to return a specific, existing aggregated profile. </p> </li> <li> <p> If you want to return an aggregated profile for a time range that doesn't align with an existing aggregated profile, then CodeGuru Profiler makes a best effort to combine existing aggregated profiles from the requested time range and return them as one aggregated profile. </p> <p> If aggregated profiles do not exist for the full time range requested, then aggregated profiles for a smaller time range are returned. For example, if the requested time range is from 00:00 to 00:20, and the existing aggregated profiles are from 00:15 and 00:25, then the aggregated profiles from 00:15 to 00:20 are returned. </p> </li> </ol> 
    " + }, + "GetRecommendations":{ + "name":"GetRecommendations", + "http":{ + "method":"GET", + "requestUri":"/internal/profilingGroups/{profilingGroupName}/recommendations", + "responseCode":200 + }, + "input":{"shape":"GetRecommendationsRequest"}, + "output":{"shape":"GetRecommendationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns a list of Recommendation objects that contain recommendations for a profiling group for a given time period. A list of Anomaly objects that contains details about anomalies detected in the profiling group for the same time period is also returned.

    " + }, + "ListFindingsReports":{ + "name":"ListFindingsReports", + "http":{ + "method":"GET", + "requestUri":"/internal/profilingGroups/{profilingGroupName}/findingsReports", + "responseCode":200 + }, + "input":{"shape":"ListFindingsReportsRequest"}, + "output":{"shape":"ListFindingsReportsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    List the available reports for a given profiling group and time range.

    " + }, + "ListProfileTimes":{ + "name":"ListProfileTimes", + "http":{ + "method":"GET", + "requestUri":"/profilingGroups/{profilingGroupName}/profileTimes", + "responseCode":200 + }, + "input":{"shape":"ListProfileTimesRequest"}, + "output":{"shape":"ListProfileTimesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists the start times of the available aggregated profiles of a profiling group for an aggregation period within the specified time range.

    " + }, + "ListProfilingGroups":{ + "name":"ListProfilingGroups", + "http":{ + "method":"GET", + "requestUri":"/profilingGroups", + "responseCode":200 + }, + "input":{"shape":"ListProfilingGroupsRequest"}, + "output":{"shape":"ListProfilingGroupsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a list of profiling groups. The profiling groups are returned as ProfilingGroupDescription objects.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns a list of the tags that are assigned to a specified resource.

    " + }, + "PostAgentProfile":{ + "name":"PostAgentProfile", + "http":{ + "method":"POST", + "requestUri":"/profilingGroups/{profilingGroupName}/agentProfile", + "responseCode":204 + }, + "input":{"shape":"PostAgentProfileRequest"}, + "output":{"shape":"PostAgentProfileResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Submits profiling data to an aggregated profile of a profiling group. To get an aggregated profile that is created with this profiling data, use GetProfile .

    " + }, + "PutPermission":{ + "name":"PutPermission", + "http":{ + "method":"PUT", + "requestUri":"/profilingGroups/{profilingGroupName}/policy/{actionGroup}", + "responseCode":200 + }, + "input":{"shape":"PutPermissionRequest"}, + "output":{"shape":"PutPermissionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Adds permissions to a profiling group's resource-based policy that are provided using an action group. If a profiling group doesn't have a resource-based policy, one is created for it using the permissions in the action group and the roles and users in the principals parameter.

     <p> The one supported action group that can be added is <code>agentPermission</code> which grants <code>ConfigureAgent</code> and <code>PostAgent</code> permissions. For more information, see <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-ug/resource-based-policies.html">Resource-based policies in CodeGuru Profiler</a> in the <i>Amazon CodeGuru Profiler User Guide</i>, <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ConfigureAgent.html"> <code>ConfigureAgent</code> </a>, and <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_PostAgentProfile.html"> <code>PostAgentProfile</code> </a>. </p> <p> The first time you call <code>PutPermission</code> on a profiling group, do not specify a <code>revisionId</code> because it doesn't have a resource-based policy. Subsequent calls must provide a <code>revisionId</code> to specify which revision of the resource-based policy to add the permissions to. </p> <p> The response contains the profiling group's JSON-formatted resource policy. </p> 
    ", + "idempotent":true + }, + "RemoveNotificationChannel":{ + "name":"RemoveNotificationChannel", + "http":{ + "method":"DELETE", + "requestUri":"/profilingGroups/{profilingGroupName}/notificationConfiguration/{channelId}", + "responseCode":200 + }, + "input":{"shape":"RemoveNotificationChannelRequest"}, + "output":{"shape":"RemoveNotificationChannelResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Remove one anomaly notifications channel for a profiling group.

    ", + "idempotent":true + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"DELETE", + "requestUri":"/profilingGroups/{profilingGroupName}/policy/{actionGroup}", + "responseCode":200 + }, + "input":{"shape":"RemovePermissionRequest"}, + "output":{"shape":"RemovePermissionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes permissions from a profiling group's resource-based policy that are provided using an action group. The one supported action group that can be removed is agentPermission which grants ConfigureAgent and PostAgent permissions. For more information, see Resource-based policies in CodeGuru Profiler in the Amazon CodeGuru Profiler User Guide, ConfigureAgent , and PostAgentProfile .

    " + }, + "SubmitFeedback":{ + "name":"SubmitFeedback", + "http":{ + "method":"POST", + "requestUri":"/internal/profilingGroups/{profilingGroupName}/anomalies/{anomalyInstanceId}/feedback", + "responseCode":204 + }, + "input":{"shape":"SubmitFeedbackRequest"}, + "output":{"shape":"SubmitFeedbackResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Sends feedback to CodeGuru Profiler about whether the anomaly detected by the analysis is useful or not.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Use to assign one or more tags to a resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Use to remove one or more tags from a resource.

    ", + "idempotent":true + }, + "UpdateProfilingGroup":{ + "name":"UpdateProfilingGroup", + "http":{ + "method":"PUT", + "requestUri":"/profilingGroups/{profilingGroupName}", + "responseCode":200 + }, + "input":{"shape":"UpdateProfilingGroupRequest"}, + "output":{"shape":"UpdateProfilingGroupResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Updates a profiling group.

    ", + "idempotent":true + } + }, + "shapes":{ + "ActionGroup":{ + "type":"string", + "enum":["agentPermissions"] + }, + "AddNotificationChannelsRequest":{ + "type":"structure", + "required":[ + "channels", + "profilingGroupName" + ], + "members":{ + "channels":{ + "shape":"Channels", + "documentation":"

    One or 2 channels to report to when anomalies are detected.

    " + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group that we are setting up notifications for.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the AddNotificationChannelsRequest.

    " + }, + "AddNotificationChannelsResponse":{ + "type":"structure", + "members":{ + "notificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

    The new notification configuration for this profiling group.

    " + } + }, + "documentation":"

    The structure representing the AddNotificationChannelsResponse.

    " + }, + "AgentConfiguration":{ + "type":"structure", + "required":[ + "periodInSeconds", + "shouldProfile" + ], + "members":{ + "agentParameters":{ + "shape":"AgentParameters", + "documentation":"

    Parameters used by the profiler. The valid parameters are:

    • MaxStackDepth - The maximum depth of the stacks in the code that is represented in the profile. For example, if CodeGuru Profiler finds a method A, which calls method B, which calls method C, which calls method D, then the depth is 4. If the maxDepth is set to 2, then the profiler evaluates A and B.

    • MemoryUsageLimitPercent - The percentage of memory that is used by the profiler.

    • MinimumTimeForReportingInMilliseconds - The minimum time in milliseconds between sending reports.

    • ReportingIntervalInMilliseconds - The reporting interval in milliseconds used to report profiles.

    • SamplingIntervalInMilliseconds - The sampling interval in milliseconds that is used to profile samples.

    " + }, + "periodInSeconds":{ + "shape":"Integer", + "documentation":"

    How long a profiling agent should send profiling data using ConfigureAgent . For example, if this is set to 300, the profiling agent calls ConfigureAgent every 5 minutes to submit the profiled data collected during that period.

    " + }, + "shouldProfile":{ + "shape":"Boolean", + "documentation":"

    A Boolean that specifies whether the profiling agent collects profiling data or not. Set to true to enable profiling.

    " + } + }, + "documentation":"

    The response of ConfigureAgent that specifies if an agent profiles or not and for how long to return profiling data.

    " + }, + "AgentOrchestrationConfig":{ + "type":"structure", + "required":["profilingEnabled"], + "members":{ + "profilingEnabled":{ + "shape":"Boolean", + "documentation":"

    A Boolean that specifies whether the profiling agent collects profiling data or not. Set to true to enable profiling.

    " + } + }, + "documentation":"

    Specifies whether profiling is enabled or disabled for a profiling group. It is used by ConfigureAgent to enable or disable profiling for a profiling group.

    " + }, + "AgentParameterField":{ + "type":"string", + "enum":[ + "MaxStackDepth", + "MemoryUsageLimitPercent", + "MinimumTimeForReportingInMilliseconds", + "ReportingIntervalInMilliseconds", + "SamplingIntervalInMilliseconds" + ] + }, + "AgentParameters":{ + "type":"map", + "key":{"shape":"AgentParameterField"}, + "value":{"shape":"String"} + }, + "AgentProfile":{"type":"blob"}, + "AggregatedProfile":{"type":"blob"}, + "AggregatedProfileTime":{ + "type":"structure", + "members":{ + "period":{ + "shape":"AggregationPeriod", + "documentation":"

    The aggregation period. This indicates the period during which an aggregation profile collects posted agent profiles for a profiling group. Use one of three valid durations that are specified using the ISO 8601 format.

    • P1D — 1 day

    • PT1H — 1 hour

    • PT5M — 5 minutes

    " + }, + "start":{ + "shape":"Timestamp", + "documentation":"

    The time that aggregation of posted agent profiles for a profiling group starts. The aggregation profile contains profiles posted by the agent starting at this time for an aggregation period specified by the period property of the AggregatedProfileTime object.

    Specify start using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + } + }, + "documentation":"

    Specifies the aggregation period and aggregation start time for an aggregated profile. An aggregated profile is used to collect posted agent profiles during an aggregation period. There are three possible aggregation periods (1 day, 1 hour, or 5 minutes).

    " + }, + "AggregationPeriod":{ + "type":"string", + "enum":[ + "P1D", + "PT1H", + "PT5M" + ] + }, + "Anomalies":{ + "type":"list", + "member":{"shape":"Anomaly"} + }, + "Anomaly":{ + "type":"structure", + "required":[ + "instances", + "metric", + "reason" + ], + "members":{ + "instances":{ + "shape":"AnomalyInstances", + "documentation":"

    A list of the instances of the detected anomalies during the requested period.

    " + }, + "metric":{ + "shape":"Metric", + "documentation":"

    Details about the metric that the analysis used when it detected the anomaly. The metric includes the name of the frame that was analyzed with the type and thread states used to derive the metric value for that frame.

    " + }, + "reason":{ + "shape":"String", + "documentation":"

    The reason for which metric was flagged as anomalous.

    " + } + }, + "documentation":"

    Details about an anomaly in a specific metric of application profile. The anomaly is detected using analysis of the metric data over a period of time.

    " + }, + "AnomalyInstance":{ + "type":"structure", + "required":[ + "id", + "startTime" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the period during which the metric is flagged as anomalous. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "id":{ + "shape":"String", + "documentation":"

    The universally unique identifier (UUID) of an instance of an anomaly in a metric.

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the period during which the metric is flagged as anomalous. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "userFeedback":{ + "shape":"UserFeedback", + "documentation":"

    Feedback type on a specific instance of anomaly submitted by the user.

    " + } + }, + "documentation":"

    The specific duration in which the metric is flagged as anomalous.

    " + }, + "AnomalyInstanceId":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "AnomalyInstances":{ + "type":"list", + "member":{"shape":"AnomalyInstance"} + }, + "BatchGetFrameMetricDataRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the time period for the returned time series values. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    ", + "location":"querystring", + "locationName":"endTime" + }, + "frameMetrics":{ + "shape":"FrameMetrics", + "documentation":"

    The details of the metrics that are used to request a time series of values. The metric includes the name of the frame, the aggregation type to calculate the metric value for the frame, and the thread states to use to get the count for the metric value of the frame.

    " + }, + "period":{ + "shape":"Period", + "documentation":"

    The duration of the frame metrics used to return the time series values. Specify using the ISO 8601 format. The maximum period duration is one day (PT24H or P1D).

    ", + "location":"querystring", + "locationName":"period" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group associated with the the frame metrics used to return the time series values.

    ", + "location":"uri", + "locationName":"profilingGroupName" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the time period for the frame metrics used to return the time series values. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    ", + "location":"querystring", + "locationName":"startTime" + }, + "targetResolution":{ + "shape":"AggregationPeriod", + "documentation":"

    The requested resolution of time steps for the returned time series of values. If the requested target resolution is not available due to data not being retained we provide a best effort result by falling back to the most granular available resolution after the target resolution. There are 3 valid values.

    • P1D — 1 day

    • PT1H — 1 hour

    • PT5M — 5 minutes

    ", + "location":"querystring", + "locationName":"targetResolution" + } + }, + "documentation":"

    The structure representing the BatchGetFrameMetricDataRequest.

    " + }, + "BatchGetFrameMetricDataResponse":{ + "type":"structure", + "required":[ + "endTime", + "endTimes", + "frameMetricData", + "resolution", + "startTime", + "unprocessedEndTimes" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the time period for the returned time series values. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "endTimes":{ + "shape":"ListOfTimestamps", + "documentation":"

    List of instances, or time steps, in the time series. For example, if the period is one day (PT24H)), and the resolution is five minutes (PT5M), then there are 288 endTimes in the list that are each five minutes appart.

    " + }, + "frameMetricData":{ + "shape":"FrameMetricData", + "documentation":"

    Details of the metrics to request a time series of values. The metric includes the name of the frame, the aggregation type to calculate the metric value for the frame, and the thread states to use to get the count for the metric value of the frame.

    " + }, + "resolution":{ + "shape":"AggregationPeriod", + "documentation":"

    Resolution or granularity of the profile data used to generate the time series. This is the value used to jump through time steps in a time series. There are 3 valid values.

    • P1D — 1 day

    • PT1H — 1 hour

    • PT5M — 5 minutes

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the time period for the returned time series values. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "unprocessedEndTimes":{ + "shape":"UnprocessedEndTimeMap", + "documentation":"

    List of instances which remained unprocessed. This will create a missing time step in the list of end times.

    " + } + }, + "documentation":"

    The structure representing the BatchGetFrameMetricDataResponse.

    " + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "Channel":{ + "type":"structure", + "required":[ + "eventPublishers", + "uri" + ], + "members":{ + "eventPublishers":{ + "shape":"EventPublishers", + "documentation":"

    List of publishers for different type of events that may be detected in an application from the profile. Anomaly detection is the only event publisher in Profiler.

    " + }, + "id":{ + "shape":"ChannelId", + "documentation":"

    Unique identifier for each Channel in the notification configuration of a Profiling Group. A random UUID for channelId is used when adding a channel to the notification configuration if not specified in the request.

    " + }, + "uri":{ + "shape":"ChannelUri", + "documentation":"

    Unique arn of the resource to be used for notifications. We support a valid SNS topic arn as a channel uri.

    " + } + }, + "documentation":"

    Notification medium for users to get alerted for events that occur in application profile. We support SNS topic as a notification channel.

    " + }, + "ChannelId":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "ChannelUri":{ + "type":"string", + "documentation":"

    Channel URI uniquely identifies a Notification Channel. TopicArn is the uri for an SNS channel, emailId is uri for an email channel etc. Currently we only support SNS channels and thus required to be an ARN

    " + }, + "Channels":{ + "type":"list", + "member":{"shape":"Channel"}, + "max":2, + "min":1 + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\w-]+$" + }, + "ComputePlatform":{ + "type":"string", + "enum":[ + "AWSLambda", + "Default" + ] + }, + "ConfigureAgentRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "fleetInstanceId":{ + "shape":"FleetInstanceId", + "documentation":"

    A universally unique identifier (UUID) for a profiling instance. For example, if the profiling instance is an Amazon EC2 instance, it is the instance ID. If it is an AWS Fargate container, it is the container's task ID.

    " + }, + "metadata":{ + "shape":"Metadata", + "documentation":"

    Metadata captured about the compute platform the agent is running on. It includes information about sampling and reporting. The valid fields are:

    • COMPUTE_PLATFORM - The compute platform on which the agent is running

    • AGENT_ID - The ID for an agent instance.

    • AWS_REQUEST_ID - The AWS request ID of a Lambda invocation.

    • EXECUTION_ENVIRONMENT - The execution environment a Lambda function is running on.

    • LAMBDA_FUNCTION_ARN - The Amazon Resource Name (ARN) that is used to invoke a Lambda function.

    • LAMBDA_MEMORY_LIMIT_IN_MB - The memory allocated to a Lambda function.

    • LAMBDA_REMAINING_TIME_IN_MILLISECONDS - The time in milliseconds before execution of a Lambda function times out.

    • LAMBDA_TIME_GAP_BETWEEN_INVOKES_IN_MILLISECONDS - The time in milliseconds between two invocations of a Lambda function.

    • LAMBDA_PREVIOUS_EXECUTION_TIME_IN_MILLISECONDS - The time in milliseconds for the previous Lambda invocation.

    " + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group for which the configured agent is collecting profiling data.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the configureAgentRequest.

    " + }, + "ConfigureAgentResponse":{ + "type":"structure", + "required":["configuration"], + "members":{ + "configuration":{ + "shape":"AgentConfiguration", + "documentation":"

    An AgentConfiguration object that specifies if an agent profiles or not and for how long to return profiling data.

    " + } + }, + "documentation":"

    The structure representing the configureAgentResponse.

    ", + "payload":"configuration" + }, + "ConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateProfilingGroupRequest":{ + "type":"structure", + "required":[ + "clientToken", + "profilingGroupName" + ], + "members":{ + "agentOrchestrationConfig":{ + "shape":"AgentOrchestrationConfig", + "documentation":"

    Specifies whether profiling is enabled or disabled for the created profiling group.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    Amazon CodeGuru Profiler uses this universally unique identifier (UUID) to prevent the accidental creation of duplicate profiling groups if there are failures and retries.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "computePlatform":{ + "shape":"ComputePlatform", + "documentation":"

    The compute platform of the profiling group. Use AWSLambda if your application runs on AWS Lambda. Use Default if your application runs on a compute platform that is not AWS Lambda, such an Amazon EC2 instance, an on-premises server, or a different platform. If not specified, Default is used.

    " + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group to create.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    A list of tags to add to the created profiling group.

    " + } + }, + "documentation":"

    The structure representing the createProfiliingGroupRequest.

    " + }, + "CreateProfilingGroupResponse":{ + "type":"structure", + "required":["profilingGroup"], + "members":{ + "profilingGroup":{ + "shape":"ProfilingGroupDescription", + "documentation":"

    The returned ProfilingGroupDescription object that contains information about the created profiling group.

    " + } + }, + "documentation":"

    The structure representing the createProfilingGroupResponse.

    ", + "payload":"profilingGroup" + }, + "DeleteProfilingGroupRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group to delete.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the deleteProfilingGroupRequest.

    " + }, + "DeleteProfilingGroupResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The structure representing the deleteProfilingGroupResponse.

    " + }, + "DescribeProfilingGroupRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group to get information about.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the describeProfilingGroupRequest.

    " + }, + "DescribeProfilingGroupResponse":{ + "type":"structure", + "required":["profilingGroup"], + "members":{ + "profilingGroup":{ + "shape":"ProfilingGroupDescription", + "documentation":"

    The returned ProfilingGroupDescription object that contains information about the requested profiling group.

    " + } + }, + "documentation":"

    The structure representing the describeProfilingGroupResponse.

    ", + "payload":"profilingGroup" + }, + "Double":{ + "type":"double", + "box":true + }, + "EventPublisher":{ + "type":"string", + "enum":["AnomalyDetection"] + }, + "EventPublishers":{ + "type":"list", + "member":{"shape":"EventPublisher"}, + "max":1, + "min":1 + }, + "FeedbackType":{ + "type":"string", + "enum":[ + "Negative", + "Positive" + ] + }, + "FindingsReportId":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "FindingsReportSummaries":{ + "type":"list", + "member":{"shape":"FindingsReportSummary"} + }, + "FindingsReportSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"FindingsReportId", + "documentation":"

    The universally unique identifier (UUID) of the recommendation report.

    " + }, + "profileEndTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the period during which the metric is flagged as anomalous. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "profileStartTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the profile the analysis data is about. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "profilingGroupName":{ + "shape":"String", + "documentation":"

    The name of the profiling group that is associated with the analysis data.

    " + }, + "totalNumberOfFindings":{ + "shape":"Integer", + "documentation":"

    The total number of different recommendations that were found by the analysis.

    " + } + }, + "documentation":"

    Information about potential recommendations that might be created from the analysis of profiling data.

    " + }, + "FleetInstanceId":{ + "type":"string", + "max":255, + "min":1 + }, + "FrameMetric":{ + "type":"structure", + "required":[ + "frameName", + "threadStates", + "type" + ], + "members":{ + "frameName":{ + "shape":"String", + "documentation":"

    Name of the method common across the multiple occurrences of a frame in an application profile.

    " + }, + "threadStates":{ + "shape":"ThreadStates", + "documentation":"

    List of application runtime thread states used to get the counts for a frame a derive a metric value.

    " + }, + "type":{ + "shape":"MetricType", + "documentation":"

    A type of aggregation that specifies how a metric for a frame is analyzed. The supported value AggregatedRelativeTotalTime is an aggregation of the metric value for one frame that is calculated across the occurrences of all frames in a profile.

    " + } + }, + "documentation":"

    The frame name, metric type, and thread states. These are used to derive the value of the metric for the frame.

    " + }, + "FrameMetricData":{ + "type":"list", + "member":{"shape":"FrameMetricDatum"} + }, + "FrameMetricDatum":{ + "type":"structure", + "required":[ + "frameMetric", + "values" + ], + "members":{ + "frameMetric":{"shape":"FrameMetric"}, + "values":{ + "shape":"FrameMetricValues", + "documentation":"

    A list of values that are associated with a frame metric.

    " + } + }, + "documentation":"

    Information about a frame metric and its values.

    " + }, + "FrameMetricValues":{ + "type":"list", + "member":{"shape":"Double"} + }, + "FrameMetrics":{ + "type":"list", + "member":{"shape":"FrameMetric"} + }, + "GetFindingsReportAccountSummaryRequest":{ + "type":"structure", + "members":{ + "dailyReportsOnly":{ + "shape":"Boolean", + "documentation":"

    A Boolean value indicating whether to only return reports from daily profiles. If set to True, only analysis data from daily profiles is returned. If set to False, analysis data is returned from smaller time windows (for example, one hour).

    ", + "location":"querystring", + "locationName":"dailyReportsOnly" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results returned by GetFindingsReportAccountSummary in paginated output. When this parameter is used, GetFindingsReportAccountSummary only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another GetFindingsReportAccountSummary request with the returned nextToken value.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The nextToken value returned from a previous paginated GetFindingsReportAccountSummary request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

    The structure representing the GetFindingsReportAccountSummaryRequest.

    " + }, + "GetFindingsReportAccountSummaryResponse":{ + "type":"structure", + "required":["reportSummaries"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The nextToken value to include in a future GetFindingsReportAccountSummary request. When the results of a GetFindingsReportAccountSummary request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + }, + "reportSummaries":{ + "shape":"FindingsReportSummaries", + "documentation":"

    The return list of FindingsReportSummary objects taht contain summaries of analysis results for all profiling groups in your AWS account.

    " + } + }, + "documentation":"

    The structure representing the GetFindingsReportAccountSummaryResponse.

    " + }, + "GetNotificationConfigurationRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group we want to get the notification configuration for.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the GetNotificationConfigurationRequest.

    " + }, + "GetNotificationConfigurationResponse":{ + "type":"structure", + "required":["notificationConfiguration"], + "members":{ + "notificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

    The current notification configuration for this profiling group.

    " + } + }, + "documentation":"

    The structure representing the GetNotificationConfigurationResponse.

    " + }, + "GetPolicyRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the getPolicyRequest.

    " + }, + "GetPolicyResponse":{ + "type":"structure", + "required":[ + "policy", + "revisionId" + ], + "members":{ + "policy":{ + "shape":"String", + "documentation":"

    The JSON-formatted resource-based policy attached to the ProfilingGroup.

    " + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

    A unique identifier for the current revision of the returned policy.

    " + } + }, + "documentation":"

    The structure representing the getPolicyResponse.

    " + }, + "GetProfileRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "accept":{ + "shape":"String", + "documentation":"

    The format of the returned profiling data. The format maps to the Accept and Content-Type headers of the HTTP request. You can specify one of the following: or the default .

     <ul> <li> <p> <code>application/json</code> — standard JSON format </p> </li> <li> <p> <code>application/x-amzn-ion</code> — the Amazon Ion data format. For more information, see <a href="http://amzn.github.io/ion-docs/">Amazon Ion</a>. </p> </li> </ul> 
    ", + "location":"header", + "locationName":"Accept" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the requested profile. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    If you specify endTime, then you must also specify period or startTime, but not both.

    ", + "location":"querystring", + "locationName":"endTime" + }, + "maxDepth":{ + "shape":"MaxDepth", + "documentation":"

    The maximum depth of the stacks in the code that is represented in the aggregated profile. For example, if CodeGuru Profiler finds a method A, which calls method B, which calls method C, which calls method D, then the depth is 4. If the maxDepth is set to 2, then the aggregated profile contains representations of methods A and B.

    ", + "location":"querystring", + "locationName":"maxDepth" + }, + "period":{ + "shape":"Period", + "documentation":"

    Used with startTime or endTime to specify the time range for the returned aggregated profile. Specify using the ISO 8601 format. For example, P1DT1H1M1S.

     <p> To get the latest aggregated profile, specify only <code>period</code>. </p> 
    ", + "location":"querystring", + "locationName":"period" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group to get.

    ", + "location":"uri", + "locationName":"profilingGroupName" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the profile to get. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

     <p> If you specify <code>startTime</code>, then you must also specify <code>period</code> or <code>endTime</code>, but not both. </p> 
    ", + "location":"querystring", + "locationName":"startTime" + } + }, + "documentation":"

    The structure representing the getProfileRequest.

    " + }, + "GetProfileResponse":{ + "type":"structure", + "required":[ + "contentType", + "profile" + ], + "members":{ + "contentEncoding":{ + "shape":"String", + "documentation":"

    The content encoding of the profile.

    ", + "location":"header", + "locationName":"Content-Encoding" + }, + "contentType":{ + "shape":"String", + "documentation":"

    The content type of the profile in the payload. It is either application/json or the default application/x-amzn-ion.

    ", + "location":"header", + "locationName":"Content-Type" + }, + "profile":{ + "shape":"AggregatedProfile", + "documentation":"

    Information about the profile.

    " + } + }, + "documentation":"

    The structure representing the getProfileResponse.

    ", + "payload":"profile" + }, + "GetRecommendationsRequest":{ + "type":"structure", + "required":[ + "endTime", + "profilingGroupName", + "startTime" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the profile to get analysis data about. You must specify startTime and endTime. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    ", + "location":"querystring", + "locationName":"endTime" + }, + "locale":{ + "shape":"Locale", + "documentation":"

    The language used to provide analysis. Specify using a string that is one of the following BCP 47 language codes.

    • de-DE - German, Germany

    • en-GB - English, United Kingdom

    • en-US - English, United States

    • es-ES - Spanish, Spain

    • fr-FR - French, France

    • it-IT - Italian, Italy

    • ja-JP - Japanese, Japan

    • ko-KR - Korean, Republic of Korea

    • pt-BR - Portugese, Brazil

    • zh-CN - Chinese, China

    • zh-TW - Chinese, Taiwan

    ", + "location":"querystring", + "locationName":"locale" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group to get analysis data about.

    ", + "location":"uri", + "locationName":"profilingGroupName" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the profile to get analysis data about. You must specify startTime and endTime. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    ", + "location":"querystring", + "locationName":"startTime" + } + }, + "documentation":"

    The structure representing the GetRecommendationsRequest.

    " + }, + "GetRecommendationsResponse":{ + "type":"structure", + "required":[ + "anomalies", + "profileEndTime", + "profileStartTime", + "profilingGroupName", + "recommendations" + ], + "members":{ + "anomalies":{ + "shape":"Anomalies", + "documentation":"

    The list of anomalies that the analysis has found for this profile.

    " + }, + "profileEndTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the profile the analysis data is about. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "profileStartTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the profile the analysis data is about. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group the analysis data is about.

    " + }, + "recommendations":{ + "shape":"Recommendations", + "documentation":"

    The list of recommendations that the analysis found for this profile.

    " + } + }, + "documentation":"

    The structure representing the GetRecommendationsResponse.

    " + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The server encountered an internal error and is unable to complete the request.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListFindingsReportsRequest":{ + "type":"structure", + "required":[ + "endTime", + "profilingGroupName", + "startTime" + ], + "members":{ + "dailyReportsOnly":{ + "shape":"Boolean", + "documentation":"

    A Boolean value indicating whether to only return reports from daily profiles. If set to True, only analysis data from daily profiles is returned. If set to False, analysis data is returned from smaller time windows (for example, one hour).

    ", + "location":"querystring", + "locationName":"dailyReportsOnly" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the profile to get analysis data about. You must specify startTime and endTime. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    ", + "location":"querystring", + "locationName":"endTime" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of report results returned by ListFindingsReports in paginated output. When this parameter is used, ListFindingsReports only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFindingsReports request with the returned nextToken value.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The nextToken value returned from a previous paginated ListFindingsReportsRequest request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group from which to search for analysis data.

    ", + "location":"uri", + "locationName":"profilingGroupName" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the profile to get analysis data about. You must specify startTime and endTime. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    ", + "location":"querystring", + "locationName":"startTime" + } + }, + "documentation":"

    The structure representing the ListFindingsReportsRequest.

    " + }, + "ListFindingsReportsResponse":{ + "type":"structure", + "required":["findingsReportSummaries"], + "members":{ + "findingsReportSummaries":{ + "shape":"FindingsReportSummaries", + "documentation":"

    The list of analysis results summaries.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The nextToken value to include in a future ListFindingsReports request. When the results of a ListFindingsReports request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + }, + "documentation":"

    The structure representing the ListFindingsReportsResponse.

    " + }, + "ListOfTimestamps":{ + "type":"list", + "member":{"shape":"TimestampStructure"} + }, + "ListProfileTimesRequest":{ + "type":"structure", + "required":[ + "endTime", + "period", + "profilingGroupName", + "startTime" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the time range from which to list the profiles.

    ", + "location":"querystring", + "locationName":"endTime" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of profile time results returned by ListProfileTimes in paginated output. When this parameter is used, ListProfileTimes only returns maxResults results in a single page with a nextToken response element. The remaining results of the initial request can be seen by sending another ListProfileTimes request with the returned nextToken value.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The nextToken value returned from a previous paginated ListProfileTimes request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "orderBy":{ + "shape":"OrderBy", + "documentation":"

    The order (ascending or descending by start time of the profile) to use when listing profiles. Defaults to TIMESTAMP_DESCENDING.

    ", + "location":"querystring", + "locationName":"orderBy" + }, + "period":{ + "shape":"AggregationPeriod", + "documentation":"

    The aggregation period. This specifies the period during which an aggregation profile collects posted agent profiles for a profiling group. There are 3 valid values.

    • P1D — 1 day

    • PT1H — 1 hour

    • PT5M — 5 minutes

    ", + "location":"querystring", + "locationName":"period" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group.

    ", + "location":"uri", + "locationName":"profilingGroupName" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the time range from which to list the profiles.

    ", + "location":"querystring", + "locationName":"startTime" + } + }, + "documentation":"

    The structure representing the listProfileTimesRequest.

    " + }, + "ListProfileTimesResponse":{ + "type":"structure", + "required":["profileTimes"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The nextToken value to include in a future ListProfileTimes request. When the results of a ListProfileTimes request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + }, + "profileTimes":{ + "shape":"ProfileTimes", + "documentation":"

    The list of start times of the available profiles for the aggregation period in the specified time range.

    " + } + }, + "documentation":"

    The structure representing the listProfileTimesResponse.

    " + }, + "ListProfilingGroupsRequest":{ + "type":"structure", + "members":{ + "includeDescription":{ + "shape":"Boolean", + "documentation":"

    A Boolean value indicating whether to include a description. If true, then a list of ProfilingGroupDescription objects that contain detailed information about profiling groups is returned. If false, then a list of profiling group names is returned.

    ", + "location":"querystring", + "locationName":"includeDescription" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of profiling groups results returned by ListProfilingGroups in paginated output. When this parameter is used, ListProfilingGroups only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListProfilingGroups request with the returned nextToken value.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The nextToken value returned from a previous paginated ListProfilingGroups request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

    The structure representing the listProfilingGroupsRequest.

    " + }, + "ListProfilingGroupsResponse":{ + "type":"structure", + "required":["profilingGroupNames"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The nextToken value to include in a future ListProfilingGroups request. When the results of a ListProfilingGroups request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + }, + "profilingGroupNames":{ + "shape":"ProfilingGroupNames", + "documentation":"

    A returned list of profiling group names. A list of the names is returned only if includeDescription is false, otherwise a list of ProfilingGroupDescription objects is returned.

    " + }, + "profilingGroups":{ + "shape":"ProfilingGroupDescriptions", + "documentation":"

    A returned list ProfilingGroupDescription objects. A list of ProfilingGroupDescription objects is returned only if includeDescription is true, otherwise a list of profiling group names is returned.

    " + } + }, + "documentation":"

    The structure representing the listProfilingGroupsResponse.

    " + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ProfilingGroupArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that contains the tags to return.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagsMap", + "documentation":"

    The list of tags assigned to the specified resource. This is the list of tags returned in the response.

    " + } + } + }, + "Locale":{ + "type":"string", + "documentation":"

    BCP47 language code. Supported locales: de-DE, en-GB, en-US, es-ES, fr-FR, it-IT, ja-JP, ko-KR, pt-BR, zh-CN, zh-TW

    " + }, + "Match":{ + "type":"structure", + "members":{ + "frameAddress":{ + "shape":"String", + "documentation":"

    The location in the profiling graph that contains a recommendation found during analysis.

    " + }, + "targetFramesIndex":{ + "shape":"Integer", + "documentation":"

    The target frame that triggered a match.

    " + }, + "thresholdBreachValue":{ + "shape":"Double", + "documentation":"

    The value in the profile data that exceeded the recommendation threshold.

    " + } + }, + "documentation":"

    The part of a profile that contains a recommendation found during analysis.

    " + }, + "Matches":{ + "type":"list", + "member":{"shape":"Match"} + }, + "MaxDepth":{ + "type":"integer", + "box":true, + "max":10000, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "Metadata":{ + "type":"map", + "key":{"shape":"MetadataField"}, + "value":{"shape":"String"} + }, + "MetadataField":{ + "type":"string", + "enum":[ + "AgentId", + "AwsRequestId", + "ComputePlatform", + "ExecutionEnvironment", + "LambdaFunctionArn", + "LambdaMemoryLimitInMB", + "LambdaPreviousExecutionTimeInMilliseconds", + "LambdaRemainingTimeInMilliseconds", + "LambdaTimeGapBetweenInvokesInMilliseconds" + ] + }, + "Metric":{ + "type":"structure", + "required":[ + "frameName", + "threadStates", + "type" + ], + "members":{ + "frameName":{ + "shape":"String", + "documentation":"

    The name of the method that appears as a frame in any stack in a profile.

    " + }, + "threadStates":{ + "shape":"Strings", + "documentation":"

    The list of application runtime thread states that is used to calculate the metric value for the frame.

    " + }, + "type":{ + "shape":"MetricType", + "documentation":"

    A type that specifies how a metric for a frame is analyzed. The supported value AggregatedRelativeTotalTime is an aggregation of the metric value for one frame that is calculated across the occurences of all frames in a profile.

    " + } + }, + "documentation":"

    Details about the metric that the analysis used when it detected the anomaly. The metric what is analyzed to create recommendations. It includes the name of the frame that was analyzed and the type and thread states used to derive the metric value for that frame.

    " + }, + "MetricType":{ + "type":"string", + "enum":["AggregatedRelativeTotalTime"] + }, + "NotificationConfiguration":{ + "type":"structure", + "members":{ + "channels":{ + "shape":"Channels", + "documentation":"

    List of up to two channels to be used for sending notifications for events detected from the application profile.

    " + } + }, + "documentation":"

    The configuration for notifications stored for each profiling group. This includes up to to two channels and a list of event publishers associated with each channel.

    " + }, + "OrderBy":{ + "type":"string", + "enum":[ + "TimestampAscending", + "TimestampDescending" + ] + }, + "PaginationToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\w-]+$" + }, + "Pattern":{ + "type":"structure", + "members":{ + "countersToAggregate":{ + "shape":"Strings", + "documentation":"

    A list of the different counters used to determine if there is a match.

    " + }, + "description":{ + "shape":"String", + "documentation":"

    The description of the recommendation. This explains a potential inefficiency in a profiled application.

    " + }, + "id":{ + "shape":"String", + "documentation":"

    The universally unique identifier (UUID) of this pattern.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    The name for this pattern.

    " + }, + "resolutionSteps":{ + "shape":"String", + "documentation":"

    A string that contains the steps recommended to address the potential inefficiency.

    " + }, + "targetFrames":{ + "shape":"TargetFrames", + "documentation":"

    A list of frame names that were searched during the analysis that generated a recommendation.

    " + }, + "thresholdPercent":{ + "shape":"Percentage", + "documentation":"

    The percentage of time an application spends in one method that triggers a recommendation. The percentage of time is the same as the percentage of the total gathered sample counts during analysis.

    " + } + }, + "documentation":"

    A set of rules used to make a recommendation during an analysis.

    " + }, + "Percentage":{ + "type":"double", + "max":100, + "min":0 + }, + "Period":{ + "type":"string", + "max":64, + "min":1 + }, + "PostAgentProfileRequest":{ + "type":"structure", + "required":[ + "agentProfile", + "contentType", + "profilingGroupName" + ], + "members":{ + "agentProfile":{ + "shape":"AgentProfile", + "documentation":"

    The submitted profiling data.

    " + }, + "contentType":{ + "shape":"String", + "documentation":"

    The format of the submitted profiling data. The format maps to the Accept and Content-Type headers of the HTTP request. You can specify one of the following: or the default .

     <ul> <li> <p> <code>application/json</code> — standard JSON format </p> </li> <li> <p> <code>application/x-amzn-ion</code> — the Amazon Ion data format. For more information, see <a href="http://amzn.github.io/ion-docs/">Amazon Ion</a>. </p> </li> </ul> 
    ", + "location":"header", + "locationName":"Content-Type" + }, + "profileToken":{ + "shape":"ClientToken", + "documentation":"

    Amazon CodeGuru Profiler uses this universally unique identifier (UUID) to prevent the accidental submission of duplicate profiling data if there are failures and retries.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"profileToken" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group with the aggregated profile that receives the submitted profiling data.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the postAgentProfileRequest.

    ", + "payload":"agentProfile" + }, + "PostAgentProfileResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The structure representing the postAgentProfileResponse.

    " + }, + "Principal":{"type":"string"}, + "Principals":{ + "type":"list", + "member":{"shape":"Principal"}, + "max":50, + "min":1 + }, + "ProfileTime":{ + "type":"structure", + "members":{ + "start":{ + "shape":"Timestamp", + "documentation":"

    The start time of a profile. It is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + } + }, + "documentation":"

    Contains the start time of a profile.

    " + }, + "ProfileTimes":{ + "type":"list", + "member":{"shape":"ProfileTime"} + }, + "ProfilingGroupArn":{"type":"string"}, + "ProfilingGroupDescription":{ + "type":"structure", + "members":{ + "agentOrchestrationConfig":{ + "shape":"AgentOrchestrationConfig", + "documentation":"

    An AgentOrchestrationConfig object that indicates if the profiling group is enabled for profiled or not.

    " + }, + "arn":{ + "shape":"ProfilingGroupArn", + "documentation":"

    The Amazon Resource Name (ARN) identifying the profiling group resource.

    " + }, + "computePlatform":{ + "shape":"ComputePlatform", + "documentation":"

    The compute platform of the profiling group. If it is set to AWSLambda, then the profiled application runs on AWS Lambda. If it is set to Default, then the profiled application runs on a compute platform that is not AWS Lambda, such an Amazon EC2 instance, an on-premises server, or a different platform. The default is Default.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time when the profiling group was created. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "name":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group.

    " + }, + "profilingStatus":{ + "shape":"ProfilingStatus", + "documentation":"

    A ProfilingStatus object that includes information about the last time a profile agent pinged back, the last time a profile was received, and the aggregation period and start time for the most recent aggregated profile.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    A list of the tags that belong to this profiling group.

    " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the profiling group was last updated. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + } + }, + "documentation":"

    Contains information about a profiling group.

    " + }, + "ProfilingGroupDescriptions":{ + "type":"list", + "member":{"shape":"ProfilingGroupDescription"} + }, + "ProfilingGroupName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\w-]+$" + }, + "ProfilingGroupNames":{ + "type":"list", + "member":{"shape":"ProfilingGroupName"} + }, + "ProfilingStatus":{ + "type":"structure", + "members":{ + "latestAgentOrchestratedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the profiling agent most recently pinged back. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "latestAgentProfileReportedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the most recent profile was received. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "latestAggregatedProfile":{ + "shape":"AggregatedProfileTime", + "documentation":"

    An AggregatedProfileTime object that contains the aggregation period and start time for an aggregated profile.

    " + } + }, + "documentation":"

    Profiling status includes information about the last time a profile agent pinged back, the last time a profile was received, and the aggregation period and start time for the most recent aggregated profile.

    " + }, + "PutPermissionRequest":{ + "type":"structure", + "required":[ + "actionGroup", + "principals", + "profilingGroupName" + ], + "members":{ + "actionGroup":{ + "shape":"ActionGroup", + "documentation":"

    Specifies an action group that contains permissions to add to a profiling group resource. One action group is supported, agentPermissions, which grants permission to perform actions required by the profiling agent, ConfigureAgent and PostAgentProfile permissions.

    ", + "location":"uri", + "locationName":"actionGroup" + }, + "principals":{ + "shape":"Principals", + "documentation":"

    A list ARNs for the roles and users you want to grant access to the profiling group. Wildcards are not are supported in the ARNs.

    " + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group to grant access to.

    ", + "location":"uri", + "locationName":"profilingGroupName" + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

    A universally unique identifier (UUID) for the revision of the policy you are adding to the profiling group. Do not specify this when you add permissions to a profiling group for the first time. If a policy already exists on the profiling group, you must specify the revisionId.

    " + } + }, + "documentation":"

    The structure representing the putPermissionRequest.

    " + }, + "PutPermissionResponse":{ + "type":"structure", + "required":[ + "policy", + "revisionId" + ], + "members":{ + "policy":{ + "shape":"String", + "documentation":"

    The JSON-formatted resource-based policy on the profiling group that includes the added permissions.

    " + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

    A universally unique identifier (UUID) for the revision of the resource-based policy that includes the added permissions. The JSON-formatted policy is in the policy element of the response.

    " + } + }, + "documentation":"

    The structure representing the putPermissionResponse.

    " + }, + "Recommendation":{ + "type":"structure", + "required":[ + "allMatchesCount", + "allMatchesSum", + "endTime", + "pattern", + "startTime", + "topMatches" + ], + "members":{ + "allMatchesCount":{ + "shape":"Integer", + "documentation":"

    How many different places in the profile graph triggered a match.

    " + }, + "allMatchesSum":{ + "shape":"Double", + "documentation":"

    How much of the total sample count is potentially affected.

    " + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

    End time of the profile that was used by this analysis. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "pattern":{ + "shape":"Pattern", + "documentation":"

    The pattern that analysis recognized in the profile to make this recommendation.

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the profile that was used by this analysis. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "topMatches":{ + "shape":"Matches", + "documentation":"

    List of the matches with most impact.

    " + } + }, + "documentation":"

    A potential improvement that was found from analyzing the profiling data.

    " + }, + "Recommendations":{ + "type":"list", + "member":{"shape":"Recommendation"} + }, + "RemoveNotificationChannelRequest":{ + "type":"structure", + "required":[ + "channelId", + "profilingGroupName" + ], + "members":{ + "channelId":{ + "shape":"ChannelId", + "documentation":"

    The id of the channel that we want to stop receiving notifications.

    ", + "location":"uri", + "locationName":"channelId" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group we want to change notification configuration for.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the RemoveNotificationChannelRequest.

    " + }, + "RemoveNotificationChannelResponse":{ + "type":"structure", + "members":{ + "notificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

    The new notification configuration for this profiling group.

    " + } + }, + "documentation":"

    The structure representing the RemoveNotificationChannelResponse.

    " + }, + "RemovePermissionRequest":{ + "type":"structure", + "required":[ + "actionGroup", + "profilingGroupName", + "revisionId" + ], + "members":{ + "actionGroup":{ + "shape":"ActionGroup", + "documentation":"

    Specifies an action group that contains the permissions to remove from a profiling group's resource-based policy. One action group is supported, agentPermissions, which grants ConfigureAgent and PostAgentProfile permissions.

    ", + "location":"uri", + "locationName":"actionGroup" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group.

    ", + "location":"uri", + "locationName":"profilingGroupName" + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

    A universally unique identifier (UUID) for the revision of the resource-based policy from which you want to remove permissions.

    ", + "location":"querystring", + "locationName":"revisionId" + } + }, + "documentation":"

     The structure representing the <code>removePermissionRequest</code>.</p> 
    " + }, + "RemovePermissionResponse":{ + "type":"structure", + "required":[ + "policy", + "revisionId" + ], + "members":{ + "policy":{ + "shape":"String", + "documentation":"

    The JSON-formatted resource-based policy on the profiling group after the specified permissions were removed.

    " + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

    A universally unique identifier (UUID) for the revision of the resource-based policy after the specified permissions were removed. The updated JSON-formatted policy is in the policy element of the response.

    " + } + }, + "documentation":"

    The structure representing the removePermissionResponse.

    " + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The resource specified in the request does not exist.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "RevisionId":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    You have exceeded your service quota. To perform the requested action, remove some of the relevant resources, or use Service Quotas to request a service quota increase.

    ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Strings":{ + "type":"list", + "member":{"shape":"String"} + }, + "SubmitFeedbackRequest":{ + "type":"structure", + "required":[ + "anomalyInstanceId", + "profilingGroupName", + "type" + ], + "members":{ + "anomalyInstanceId":{ + "shape":"AnomalyInstanceId", + "documentation":"

    The universally unique identifier (UUID) of the AnomalyInstance object that is included in the analysis data.

    ", + "location":"uri", + "locationName":"anomalyInstanceId" + }, + "comment":{ + "shape":"String", + "documentation":"

    Optional feedback about this anomaly.

    " + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group that is associated with the analysis data.

    ", + "location":"uri", + "locationName":"profilingGroupName" + }, + "type":{ + "shape":"FeedbackType", + "documentation":"

    The feedback tpye. Thee are two valid values, Positive and Negative.

    " + } + }, + "documentation":"

    The structure representing the SubmitFeedbackRequest.

    " + }, + "SubmitFeedbackResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The structure representing the SubmitFeedbackResponse.

    " + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ProfilingGroupArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that the tags are added to.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    The list of tags that are added to the specified resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "TargetFrame":{ + "type":"list", + "member":{"shape":"String"} + }, + "TargetFrames":{ + "type":"list", + "member":{"shape":"TargetFrame"} + }, + "ThreadStates":{ + "type":"list", + "member":{"shape":"String"} + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "TimestampStructure":{ + "type":"structure", + "required":["value"], + "members":{ + "value":{ + "shape":"Timestamp", + "documentation":"

    A Timestamp. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + } + }, + "documentation":"

    A data type that contains a Timestamp object. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

    " + }, + "UnprocessedEndTimeMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"ListOfTimestamps"} + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ProfilingGroupArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that contains the tags to remove.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

    A list of tag keys. Existing tags of resources with keys in this list are removed from the specified resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateProfilingGroupRequest":{ + "type":"structure", + "required":[ + "agentOrchestrationConfig", + "profilingGroupName" + ], + "members":{ + "agentOrchestrationConfig":{ + "shape":"AgentOrchestrationConfig", + "documentation":"

    Specifies whether profiling is enabled or disabled for a profiling group.

    " + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

    The name of the profiling group to update.

    ", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

    The structure representing the updateProfilingGroupRequest.

    " + }, + "UpdateProfilingGroupResponse":{ + "type":"structure", + "required":["profilingGroup"], + "members":{ + "profilingGroup":{ + "shape":"ProfilingGroupDescription", + "documentation":"

    A ProfilingGroupDescription that contains information about the returned updated profiling group.

    " + } + }, + "documentation":"

    The structure representing the updateProfilingGroupResponse.

    ", + "payload":"profilingGroup" + }, + "UserFeedback":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"FeedbackType", + "documentation":"

    Optional Positive or Negative feedback submitted by the user about whether the recommendation is useful or not.

    " + } + }, + "documentation":"

    Feedback that can be submitted for each instance of an anomaly by the user. Feedback is be used for improvements in generating recommendations for the application.

    " + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The parameter is not valid.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

    This section provides documentation for the Amazon CodeGuru Profiler API operations.

     <p>Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance. Using machine learning algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can improve efficiency and remove CPU bottlenecks. </p> <p>Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization. </p> <note> <p>Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM) languages. While CodeGuru Profiler supports both visualizations and recommendations for applications written in Java, it can also generate visualizations and a subset of recommendations for applications written in other JVM languages.</p> </note> <p> For more information, see <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-ug/what-is-codeguru-profiler.html">What is Amazon CodeGuru Profiler</a> in the <i>Amazon CodeGuru Profiler User Guide</i>. </p> 
    " +} diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml new file mode 100644 index 000000000000..f7a6a0ca65ee --- /dev/null +++ b/services/codegurureviewer/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + codegurureviewer + AWS Java SDK :: Services :: CodeGuru Reviewer + The AWS Java SDK for CodeGuru Reviewer module holds the client classes that are used for + communicating with CodeGuru Reviewer. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.codegurureviewer + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/codegurureviewer/src/main/resources/codegen-resources/paginators-1.json b/services/codegurureviewer/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..a9c76f5b737f --- /dev/null +++ b/services/codegurureviewer/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,25 @@ +{ + "pagination": { + "ListCodeReviews": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRecommendationFeedback": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRecommendations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRepositoryAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RepositoryAssociationSummaries" + } + } +} diff --git a/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json b/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..09adc9daf5e3 --- /dev/null +++ b/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1429 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-09-19", + "endpointPrefix":"codeguru-reviewer", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"CodeGuruReviewer", + "serviceFullName":"Amazon CodeGuru Reviewer", + "serviceId":"CodeGuru Reviewer", + "signatureVersion":"v4", + "signingName":"codeguru-reviewer", + "uid":"codeguru-reviewer-2019-09-19" + }, + "operations":{ + "AssociateRepository":{ + "name":"AssociateRepository", + "http":{ + "method":"POST", + "requestUri":"/associations" + }, + "input":{"shape":"AssociateRepositoryRequest"}, + "output":{"shape":"AssociateRepositoryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Use to associate an AWS CodeCommit repository or a repostory managed by AWS CodeStar Connections with Amazon CodeGuru Reviewer. When you associate a repository, CodeGuru Reviewer reviews source code changes in the repository's pull requests and provides automatic recommendations. You can view recommendations using the CodeGuru Reviewer console. For more information, see Recommendations in Amazon CodeGuru Reviewer in the Amazon CodeGuru Reviewer User Guide.

    If you associate a CodeCommit repository, it must be in the same AWS Region and AWS account where its CodeGuru Reviewer code reviews are configured.

    Bitbucket and GitHub Enterprise Server repositories are managed by AWS CodeStar Connections to connect to CodeGuru Reviewer. For more information, see Connect to a repository source provider in the Amazon CodeGuru Reviewer User Guide.

    You cannot use the CodeGuru Reviewer SDK or the AWS CLI to associate a GitHub repository with Amazon CodeGuru Reviewer. To associate a GitHub repository, use the console. For more information, see Getting started with CodeGuru Reviewer in the CodeGuru Reviewer User Guide.

    " + }, + "CreateCodeReview":{ + "name":"CreateCodeReview", + "http":{ + "method":"POST", + "requestUri":"/codereviews" + }, + "input":{"shape":"CreateCodeReviewRequest"}, + "output":{"shape":"CreateCodeReviewResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Use to create a code review with a CodeReviewType of RepositoryAnalysis. This type of code review analyzes all code under a specified branch in an associated repository. PullRequest code reviews are automatically triggered by a pull request so cannot be created using this method.

    " + }, + "DescribeCodeReview":{ + "name":"DescribeCodeReview", + "http":{ + "method":"GET", + "requestUri":"/codereviews/{CodeReviewArn}" + }, + "input":{"shape":"DescribeCodeReviewRequest"}, + "output":{"shape":"DescribeCodeReviewResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the metadata associated with the code review along with its status.

    " + }, + "DescribeRecommendationFeedback":{ + "name":"DescribeRecommendationFeedback", + "http":{ + "method":"GET", + "requestUri":"/feedback/{CodeReviewArn}" + }, + "input":{"shape":"DescribeRecommendationFeedbackRequest"}, + "output":{"shape":"DescribeRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Describes the customer feedback for a CodeGuru Reviewer recommendation.

    " + }, + "DescribeRepositoryAssociation":{ + "name":"DescribeRepositoryAssociation", + "http":{ + "method":"GET", + "requestUri":"/associations/{AssociationArn}" + }, + "input":{"shape":"DescribeRepositoryAssociationRequest"}, + "output":{"shape":"DescribeRepositoryAssociationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a RepositoryAssociation object that contains information about the requested repository association.

    " + }, + "DisassociateRepository":{ + "name":"DisassociateRepository", + "http":{ + "method":"DELETE", + "requestUri":"/associations/{AssociationArn}" + }, + "input":{"shape":"DisassociateRepositoryRequest"}, + "output":{"shape":"DisassociateRepositoryResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Removes the association between Amazon CodeGuru Reviewer and a repository.

    " + }, + "ListCodeReviews":{ + "name":"ListCodeReviews", + "http":{ + "method":"GET", + "requestUri":"/codereviews" + }, + "input":{"shape":"ListCodeReviewsRequest"}, + "output":{"shape":"ListCodeReviewsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists all the code reviews that the customer has created in the past 90 days.

    " + }, + "ListRecommendationFeedback":{ + "name":"ListRecommendationFeedback", + "http":{ + "method":"GET", + "requestUri":"/feedback/{CodeReviewArn}/RecommendationFeedback" + }, + "input":{"shape":"ListRecommendationFeedbackRequest"}, + "output":{"shape":"ListRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a list of RecommendationFeedbackSummary objects that contain customer recommendation feedback for all CodeGuru Reviewer users.

    " + }, + "ListRecommendations":{ + "name":"ListRecommendations", + "http":{ + "method":"GET", + "requestUri":"/codereviews/{CodeReviewArn}/Recommendations" + }, + "input":{"shape":"ListRecommendationsRequest"}, + "output":{"shape":"ListRecommendationsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the list of all recommendations for a completed code review.

    " + }, + "ListRepositoryAssociations":{ + "name":"ListRepositoryAssociations", + "http":{ + "method":"GET", + "requestUri":"/associations" + }, + "input":{"shape":"ListRepositoryAssociationsRequest"}, + "output":{"shape":"ListRepositoryAssociationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a list of RepositoryAssociationSummary objects that contain summary information about a repository association. You can filter the returned list by ProviderType , Name , State , and Owner .

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns the list of tags associated with an associated repository resource.

    " + }, + "PutRecommendationFeedback":{ + "name":"PutRecommendationFeedback", + "http":{ + "method":"PUT", + "requestUri":"/feedback" + }, + "input":{"shape":"PutRecommendationFeedbackRequest"}, + "output":{"shape":"PutRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Stores customer feedback for a CodeGuru Reviewer recommendation. When this API is called again with different reactions the previous feedback is overwritten.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Adds one or more tags to an associated repository.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes a tag from an associated repository.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "Arn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws[^:\\s]*:codeguru-reviewer:[^:\\s]+:[\\d]{12}:[a-z-]+:[\\w-]+$" + }, + "AssociateRepositoryRequest":{ + "type":"structure", + "required":["Repository"], + "members":{ + "Repository":{ + "shape":"Repository", + "documentation":"

    The repository to associate.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Amazon CodeGuru Reviewer uses this value to prevent the accidental creation of duplicate repository associations if there are failures and retries.

    ", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

    • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

    • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

    " + } + } + }, + "AssociateRepositoryResponse":{ + "type":"structure", + "members":{ + "RepositoryAssociation":{ + "shape":"RepositoryAssociation", + "documentation":"

    Information about the repository association.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

    • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

    • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

    " + } + } + }, + "AssociationArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws[^:\\s]*:codeguru-reviewer:[^:\\s]+:[\\d]{12}:association:[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "AssociationId":{ + "type":"string", + "max":64, + "min":1 + }, + "BranchName":{ + "type":"string", + "max":256, + "min":1 + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\w-]+$" + }, + "CodeCommitRepository":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the AWS CodeCommit repository. For more information, see repositoryName in the AWS CodeCommit API Reference.

    " + } + }, + "documentation":"

    Information about an AWS CodeCommit repository. The CodeCommit repository must be in the same AWS Region and AWS account where its CodeGuru Reviewer code reviews are configured.

    " + }, + "CodeReview":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the code review.

    " + }, + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the CodeReview object.

    " + }, + "RepositoryName":{ + "shape":"Name", + "documentation":"

    The name of the repository.

    " + }, + "Owner":{ + "shape":"Owner", + "documentation":"

    The owner of the repository. For an AWS CodeCommit repository, this is the AWS account ID of the account that owns the repository. For a GitHub, GitHub Enterprise Server, or Bitbucket repository, this is the username for the account that owns the repository.

    " + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The type of repository that contains the reviewed code (for example, GitHub or Bitbucket).

    " + }, + "State":{ + "shape":"JobState", + "documentation":"

    The valid code review states are:

    • Completed: The code review is complete.

    • Pending: The code review started and has not completed or failed.

    • Failed: The code review failed.

    • Deleting: The code review is being deleted.

    " + }, + "StateReason":{ + "shape":"StateReason", + "documentation":"

    The reason for the state of the code review.

    " + }, + "CreatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time, in milliseconds since the epoch, when the code review was created.

    " + }, + "LastUpdatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time, in milliseconds since the epoch, when the code review was last updated.

    " + }, + "Type":{ + "shape":"Type", + "documentation":"

    The type of code review.

    " + }, + "PullRequestId":{ + "shape":"PullRequestId", + "documentation":"

    The pull request ID for the code review.

    " + }, + "SourceCodeType":{ + "shape":"SourceCodeType", + "documentation":"

    The type of the source code for the code review.

    " + }, + "AssociationArn":{ + "shape":"AssociationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RepositoryAssociation that contains the reviewed source code. You can retrieve associated repository ARNs by calling ListRepositoryAssociations .

    " + }, + "Metrics":{ + "shape":"Metrics", + "documentation":"

    The statistics from the code review.

    " + } + }, + "documentation":"

    Information about a code review. A code review belongs to the associated repository that contains the reviewed code.

    " + }, + "CodeReviewName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9-_]*" + }, + "CodeReviewSummaries":{ + "type":"list", + "member":{"shape":"CodeReviewSummary"} + }, + "CodeReviewSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the code review.

    " + }, + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the CodeReview object.

    " + }, + "RepositoryName":{ + "shape":"Name", + "documentation":"

    The name of the repository.

    " + }, + "Owner":{ + "shape":"Owner", + "documentation":"

    The owner of the repository. For an AWS CodeCommit repository, this is the AWS account ID of the account that owns the repository. For a GitHub, GitHub Enterprise Server, or Bitbucket repository, this is the username for the account that owns the repository.

    " + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The provider type of the repository association.

    " + }, + "State":{ + "shape":"JobState", + "documentation":"

    The state of the code review.

    The valid code review states are:

    • Completed: The code review is complete.

    • Pending: The code review started and has not completed or failed.

    • Failed: The code review failed.

    • Deleting: The code review is being deleted.

    " + }, + "CreatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time, in milliseconds since the epoch, when the code review was created.

    " + }, + "LastUpdatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time, in milliseconds since the epoch, when the code review was last updated.

    " + }, + "Type":{ + "shape":"Type", + "documentation":"

    The type of the code review.

    " + }, + "PullRequestId":{ + "shape":"PullRequestId", + "documentation":"

    The pull request ID for the code review.

    " + }, + "MetricsSummary":{ + "shape":"MetricsSummary", + "documentation":"

    The statistics from the code review.

    " + } + }, + "documentation":"

    Information about the summary of the code review.

    " + }, + "CodeReviewType":{ + "type":"structure", + "required":["RepositoryAnalysis"], + "members":{ + "RepositoryAnalysis":{ + "shape":"RepositoryAnalysis", + "documentation":"

    A code review that analyzes all code under a specified branch in an associated respository. The assocated repository is specified using its ARN in CreateCodeReview .

    " + } + }, + "documentation":"

    The type of a code review. There are two code review types:

    • PullRequest - A code review that is automatically triggered by a pull request on an assocaited repository. Because this type of code review is automatically generated, you cannot specify this code review type using CreateCodeReview .

    • RepositoryAnalysis - A code review that analyzes all code under a specified branch in an associated respository. The assocated repository is specified using its ARN in CreateCodeReview .

    " + }, + "CommitDiffSourceCodeType":{ + "type":"structure", + "members":{ + "SourceCommit":{ + "shape":"CommitId", + "documentation":"

    The SHA of the source commit used to generate a commit diff.

    " + }, + "DestinationCommit":{ + "shape":"CommitId", + "documentation":"

    The SHA of the destination commit used to generate a commit diff.

    " + } + }, + "documentation":"

    A type of SourceCodeType that specifies the commit diff for a pull request on an associated repository.

    " + }, + "CommitId":{ + "type":"string", + "max":64, + "min":6 + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ConnectionArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws(-[\\w]+)*:.+:.+:[0-9]{12}:.+" + }, + "CreateCodeReviewRequest":{ + "type":"structure", + "required":[ + "Name", + "RepositoryAssociationArn", + "Type" + ], + "members":{ + "Name":{ + "shape":"CodeReviewName", + "documentation":"

    The name of the code review. The name of each code review in your AWS account must be unique.

    " + }, + "RepositoryAssociationArn":{ + "shape":"AssociationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

    A code review can only be created on an associated repository. This is the ARN of the associated repository.

    " + }, + "Type":{ + "shape":"CodeReviewType", + "documentation":"

    The type of code review to create. This is specified using a CodeReviewType object. You can create a code review only of type RepositoryAnalysis.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Amazon CodeGuru Reviewer uses this value to prevent the accidental creation of duplicate code reviews if there are failures and retries.

    ", + "idempotencyToken":true + } + } + }, + "CreateCodeReviewResponse":{ + "type":"structure", + "members":{ + "CodeReview":{"shape":"CodeReview"} + } + }, + "DescribeCodeReviewRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the CodeReview object.

    ", + "location":"uri", + "locationName":"CodeReviewArn" + } + } + }, + "DescribeCodeReviewResponse":{ + "type":"structure", + "members":{ + "CodeReview":{ + "shape":"CodeReview", + "documentation":"

    Information about the code review.

    " + } + } + }, + "DescribeRecommendationFeedbackRequest":{ + "type":"structure", + "required":[ + "CodeReviewArn", + "RecommendationId" + ], + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the CodeReview object.

    ", + "location":"uri", + "locationName":"CodeReviewArn" + }, + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

    The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.

    ", + "location":"querystring", + "locationName":"RecommendationId" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the user making the request.

    The UserId is an IAM principal that can be specified as an AWS account ID or an Amazon Resource Name (ARN). For more information, see Specifying a Principal in the AWS Identity and Access Management User Guide.

    ", + "location":"querystring", + "locationName":"UserId" + } + } + }, + "DescribeRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + "RecommendationFeedback":{ + "shape":"RecommendationFeedback", + "documentation":"

    The recommendation feedback given by the user.

    " + } + } + }, + "DescribeRepositoryAssociationRequest":{ + "type":"structure", + "required":["AssociationArn"], + "members":{ + "AssociationArn":{ + "shape":"AssociationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

    ", + "location":"uri", + "locationName":"AssociationArn" + } + } + }, + "DescribeRepositoryAssociationResponse":{ + "type":"structure", + "members":{ + "RepositoryAssociation":{ + "shape":"RepositoryAssociation", + "documentation":"

    Information about the repository association.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

    • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

    • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

    " + } + } + }, + "DisassociateRepositoryRequest":{ + "type":"structure", + "required":["AssociationArn"], + "members":{ + "AssociationArn":{ + "shape":"AssociationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

    ", + "location":"uri", + "locationName":"AssociationArn" + } + } + }, + "DisassociateRepositoryResponse":{ + "type":"structure", + "members":{ + "RepositoryAssociation":{ + "shape":"RepositoryAssociation", + "documentation":"

    Information about the disassociated repository.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

    • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

    • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

    " + } + } + }, + "ErrorMessage":{"type":"string"}, + "FilePath":{ + "type":"string", + "max":1024, + "min":1 + }, + "FindingsCount":{"type":"long"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The server encountered an internal error and is unable to complete the request.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "JobState":{ + "type":"string", + "enum":[ + "Completed", + "Pending", + "Failed", + "Deleting" + ] + }, + "JobStates":{ + "type":"list", + "member":{"shape":"JobState"}, + "max":3, + "min":1 + }, + "LineNumber":{"type":"integer"}, + "ListCodeReviewsMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListCodeReviewsRequest":{ + "type":"structure", + "required":["Type"], + "members":{ + "ProviderTypes":{ + "shape":"ProviderTypes", + "documentation":"

    List of provider types for filtering that needs to be applied before displaying the result. For example, providerTypes=[GitHub] lists code reviews from GitHub.

    ", + "location":"querystring", + "locationName":"ProviderTypes" + }, + "States":{ + "shape":"JobStates", + "documentation":"

    List of states for filtering that needs to be applied before displaying the result. For example, states=[Pending] lists code reviews in the Pending state.

    The valid code review states are:

    • Completed: The code review is complete.

    • Pending: The code review started and has not completed or failed.

    • Failed: The code review failed.

    • Deleting: The code review is being deleted.

    ", + "location":"querystring", + "locationName":"States" + }, + "RepositoryNames":{ + "shape":"RepositoryNames", + "documentation":"

    List of repository names for filtering that needs to be applied before displaying the result.

    ", + "location":"querystring", + "locationName":"RepositoryNames" + }, + "Type":{ + "shape":"Type", + "documentation":"

    The type of code reviews to list in the response.

    ", + "location":"querystring", + "locationName":"Type" + }, + "MaxResults":{ + "shape":"ListCodeReviewsMaxResults", + "documentation":"

    The maximum number of results that are returned per call. The default is 100.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListCodeReviewsResponse":{ + "type":"structure", + "members":{ + "CodeReviewSummaries":{ + "shape":"CodeReviewSummaries", + "documentation":"

    A list of code reviews that meet the criteria of the request.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Pagination token.

    " + } + } + }, + "ListRecommendationFeedbackRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results that are returned per call. The default is 100.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the CodeReview object.

    ", + "location":"uri", + "locationName":"CodeReviewArn" + }, + "UserIds":{ + "shape":"UserIds", + "documentation":"

    An AWS user's account ID or Amazon Resource Name (ARN). Use this ID to query the recommendation feedback for a code review from that user.

    The UserId is an IAM principal that can be specified as an AWS account ID or an Amazon Resource Name (ARN). For more information, see Specifying a Principal in the AWS Identity and Access Management User Guide.

    ", + "location":"querystring", + "locationName":"UserIds" + }, + "RecommendationIds":{ + "shape":"RecommendationIds", + "documentation":"

    Used to query the recommendation feedback for a given recommendation.

    ", + "location":"querystring", + "locationName":"RecommendationIds" + } + } + }, + "ListRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + "RecommendationFeedbackSummaries":{ + "shape":"RecommendationFeedbackSummaries", + "documentation":"

    Recommendation feedback summaries corresponding to the code review ARN.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

    " + } + } + }, + "ListRecommendationsRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Pagination token.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results that are returned per call. The default is 100.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the CodeReview object.

    ", + "location":"uri", + "locationName":"CodeReviewArn" + } + } + }, + "ListRecommendationsResponse":{ + "type":"structure", + "members":{ + "RecommendationSummaries":{ + "shape":"RecommendationSummaries", + "documentation":"

    List of recommendations for the requested code review.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Pagination token.

    " + } + } + }, + "ListRepositoryAssociationsRequest":{ + "type":"structure", + "members":{ + "ProviderTypes":{ + "shape":"ProviderTypes", + "documentation":"

    List of provider types to use as a filter.

    ", + "location":"querystring", + "locationName":"ProviderType" + }, + "States":{ + "shape":"RepositoryAssociationStates", + "documentation":"

    List of repository association states to use as a filter.

    The valid repository association states are:

    • Associated: The repository association is complete.

    • Associating: CodeGuru Reviewer is:

      • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

        If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

      • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

    • Failed: The repository failed to associate or disassociate.

    • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

    • Disassociated: CodeGuru Reviewer successfully disassociated the repository. You can create a new association with this repository if you want to review source code in it later. You can control access to code reviews created in an associated repository with tags after it has been disassociated. For more information, see Using tags to control access to associated repositories in the Amazon CodeGuru Reviewer User Guide.

    ", + "location":"querystring", + "locationName":"State" + }, + "Names":{ + "shape":"Names", + "documentation":"

    List of repository names to use as a filter.

    ", + "location":"querystring", + "locationName":"Name" + }, + "Owners":{ + "shape":"Owners", + "documentation":"

    List of owners to use as a filter. For AWS CodeCommit, it is the name of the CodeCommit account that was used to associate the repository. For other repository source providers, such as Bitbucket and GitHub Enterprise Server, this is name of the account that was used to associate the repository.

    ", + "location":"querystring", + "locationName":"Owner" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of repository association results returned by ListRepositoryAssociations in paginated output. When this parameter is used, ListRepositoryAssociations only returns maxResults results in a single page with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRepositoryAssociations request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, ListRepositoryAssociations returns up to 100 results and a nextToken value if applicable.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value returned from a previous paginated ListRepositoryAssociations request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

    Treat this token as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListRepositoryAssociationsResponse":{ + "type":"structure", + "members":{ + "RepositoryAssociationSummaries":{ + "shape":"RepositoryAssociationSummaries", + "documentation":"

    A list of repository associations that meet the criteria of the request.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value to include in a future ListRecommendations request. When the results of a ListRecommendations request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"AssociationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

    • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

    • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "MeteredLinesOfCodeCount":{"type":"long"}, + "Metrics":{ + "type":"structure", + "members":{ + "MeteredLinesOfCodeCount":{ + "shape":"MeteredLinesOfCodeCount", + "documentation":"

    Lines of code metered in the code review. For the initial code review pull request and all subsequent revisions, this includes all lines of code in the files added to the pull request. In subsequent revisions, for files that already existed in the pull request, this includes only the changed lines of code. In both cases, this does not include non-code lines such as comments and import statements. For example, if you submit a pull request containing 5 files, each with 500 lines of code, and in a subsequent revision you added a new file with 200 lines of code, and also modified a total of 25 lines across the initial 5 files, MeteredLinesOfCodeCount includes the first 5 files (5 * 500 = 2,500 lines), the new file (200 lines) and the 25 changed lines of code for a total of 2,725 lines of code.

    " + }, + "FindingsCount":{ + "shape":"FindingsCount", + "documentation":"

    Total number of recommendations found in the code review.

    " + } + }, + "documentation":"

    Information about the statistics from the code review.

    " + }, + "MetricsSummary":{ + "type":"structure", + "members":{ + "MeteredLinesOfCodeCount":{ + "shape":"MeteredLinesOfCodeCount", + "documentation":"

    Lines of code metered in the code review. For the initial code review pull request and all subsequent revisions, this includes all lines of code in the files added to the pull request. In subsequent revisions, for files that already existed in the pull request, this includes only the changed lines of code. In both cases, this does not include non-code lines such as comments and import statements. For example, if you submit a pull request containing 5 files, each with 500 lines of code, and in a subsequent revision you added a new file with 200 lines of code, and also modified a total of 25 lines across the initial 5 files, MeteredLinesOfCodeCount includes the first 5 files (5 * 500 = 2,500 lines), the new file (200 lines) and the 25 changed lines of code for a total of 2,725 lines of code.

    " + }, + "FindingsCount":{ + "shape":"FindingsCount", + "documentation":"

    Total number of recommendations found in the code review.

    " + } + }, + "documentation":"

    Information about metrics summaries.

    " + }, + "Name":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^\\S[\\w.-]*$" + }, + "Names":{ + "type":"list", + "member":{"shape":"Name"}, + "max":3, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1 + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The resource specified in the request was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "Owner":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^\\S(.*\\S)?$" + }, + "Owners":{ + "type":"list", + "member":{"shape":"Owner"}, + "max":3, + "min":1 + }, + "ProviderType":{ + "type":"string", + "enum":[ + "CodeCommit", + "GitHub", + "Bitbucket", + "GitHubEnterpriseServer" + ] + }, + "ProviderTypes":{ + "type":"list", + "member":{"shape":"ProviderType"}, + "max":3, + "min":1 + }, + "PullRequestId":{ + "type":"string", + "max":64, + "min":1 + }, + "PutRecommendationFeedbackRequest":{ + "type":"structure", + "required":[ + "CodeReviewArn", + "RecommendationId", + "Reactions" + ], + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the CodeReview object.

    " + }, + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

    The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.

    " + }, + "Reactions":{ + "shape":"Reactions", + "documentation":"

    List for storing reactions. Reactions are utf-8 text code for emojis. If you send an empty list it clears all your feedback.

    " + } + } + }, + "PutRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + } + }, + "Reaction":{ + "type":"string", + "enum":[ + "ThumbsUp", + "ThumbsDown" + ] + }, + "Reactions":{ + "type":"list", + "member":{"shape":"Reaction"}, + "max":1, + "min":0 + }, + "RecommendationFeedback":{ + "type":"structure", + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the CodeReview object.

    " + }, + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

    The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

    " + }, + "Reactions":{ + "shape":"Reactions", + "documentation":"

    List for storing reactions. Reactions are utf-8 text code for emojis. You can send an empty list to clear off all your feedback.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    The ID of the user that made the API call.

    The UserId is an IAM principal that can be specified as an AWS account ID or an Amazon Resource Name (ARN). For more information, see Specifying a Principal in the AWS Identity and Access Management User Guide.

    " + }, + "CreatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time at which the feedback was created.

    " + }, + "LastUpdatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time at which the feedback was last updated.

    " + } + }, + "documentation":"

    Information about the recommendation feedback.

    " + }, + "RecommendationFeedbackSummaries":{ + "type":"list", + "member":{"shape":"RecommendationFeedbackSummary"} + }, + "RecommendationFeedbackSummary":{ + "type":"structure", + "members":{ + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

    The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

    " + }, + "Reactions":{ + "shape":"Reactions", + "documentation":"

    List for storing reactions. Reactions are utf-8 text code for emojis.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    The ID of the user that gave the feedback.

    The UserId is an IAM principal that can be specified as an AWS account ID or an Amazon Resource Name (ARN). For more information, see Specifying a Principal in the AWS Identity and Access Management User Guide.

    " + } + }, + "documentation":"

    Information about recommendation feedback summaries.

    " + }, + "RecommendationId":{ + "type":"string", + "max":64, + "min":1 + }, + "RecommendationIds":{ + "type":"list", + "member":{"shape":"RecommendationId"}, + "max":100, + "min":1 + }, + "RecommendationSummaries":{ + "type":"list", + "member":{"shape":"RecommendationSummary"} + }, + "RecommendationSummary":{ + "type":"structure", + "members":{ + "FilePath":{ + "shape":"FilePath", + "documentation":"

    Name of the file on which a recommendation is provided.

    " + }, + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

    The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

    " + }, + "StartLine":{ + "shape":"LineNumber", + "documentation":"

    Start line from where the recommendation is applicable in the source commit or source branch.

    " + }, + "EndLine":{ + "shape":"LineNumber", + "documentation":"

    Last line where the recommendation is applicable in the source commit or source branch. For a single line comment the start line and end line values are the same.

    " + }, + "Description":{ + "shape":"Text", + "documentation":"

    A description of the recommendation generated by CodeGuru Reviewer for the lines of code between the start line and the end line.

    " + } + }, + "documentation":"

    Information about recommendations.

    " + }, + "Repository":{ + "type":"structure", + "members":{ + "CodeCommit":{ + "shape":"CodeCommitRepository", + "documentation":"

    Information about an AWS CodeCommit repository.

    " + }, + "Bitbucket":{ + "shape":"ThirdPartySourceRepository", + "documentation":"

    Information about a Bitbucket repository.

    " + }, + "GitHubEnterpriseServer":{ + "shape":"ThirdPartySourceRepository", + "documentation":"

    Information about a GitHub Enterprise Server repository.

    " + } + }, + "documentation":"

    Information about an associated AWS CodeCommit repository or an associated repository that is managed by AWS CodeStar Connections (for example, Bitbucket). This Repository object is not used if your source code is in an associated GitHub repository.

    " + }, + "RepositoryAnalysis":{ + "type":"structure", + "required":["RepositoryHead"], + "members":{ + "RepositoryHead":{ + "shape":"RepositoryHeadSourceCodeType", + "documentation":"

    A SourceCodeType that specifies the tip of a branch in an associated repository.

    " + } + }, + "documentation":"

    A code review type that analyzes all code under a specified branch in an associated respository. The assocated repository is specified using its ARN when you call CreateCodeReview .

    " + }, + "RepositoryAssociation":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

    The ID of the repository association.

    " + }, + "AssociationArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) identifying the repository association.

    " + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

    The Amazon Resource Name (ARN) of an AWS CodeStar Connections connection. Its format is arn:aws:codestar-connections:region-id:aws-account_id:connection/connection-id. For more information, see Connection in the AWS CodeStar Connections API Reference.

    " + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the repository.

    " + }, + "Owner":{ + "shape":"Owner", + "documentation":"

    The owner of the repository. For an AWS CodeCommit repository, this is the AWS account ID of the account that owns the repository. For a GitHub, GitHub Enterprise Server, or Bitbucket repository, this is the username for the account that owns the repository.

    " + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The provider type of the repository association.

    " + }, + "State":{ + "shape":"RepositoryAssociationState", + "documentation":"

    The state of the repository association.

    The valid repository association states are:

    • Associated: The repository association is complete.

    • Associating: CodeGuru Reviewer is:

      • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

        If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

      • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

    • Failed: The repository failed to associate or disassociate.

    • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

    • Disassociated: CodeGuru Reviewer successfully disassociated the repository. You can create a new association with this repository if you want to review source code in it later. You can control access to code reviews created in an associated repository with tags after it has been disassociated. For more information, see Using tags to control access to associated repositories in the Amazon CodeGuru Reviewer User Guide.

    " + }, + "StateReason":{ + "shape":"StateReason", + "documentation":"

    A description of why the repository association is in the current state.

    " + }, + "LastUpdatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time, in milliseconds since the epoch, when the repository association was last updated.

    " + }, + "CreatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time, in milliseconds since the epoch, when the repository association was created.

    " + } + }, + "documentation":"

    Information about a repository association. The DescribeRepositoryAssociation operation returns a RepositoryAssociation object.

    " + }, + "RepositoryAssociationState":{ + "type":"string", + "enum":[ + "Associated", + "Associating", + "Failed", + "Disassociating", + "Disassociated" + ] + }, + "RepositoryAssociationStates":{ + "type":"list", + "member":{"shape":"RepositoryAssociationState"}, + "max":5, + "min":1 + }, + "RepositoryAssociationSummaries":{ + "type":"list", + "member":{"shape":"RepositoryAssociationSummary"} + }, + "RepositoryAssociationSummary":{ + "type":"structure", + "members":{ + "AssociationArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

    " + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

    The Amazon Resource Name (ARN) of an AWS CodeStar Connections connection. Its format is arn:aws:codestar-connections:region-id:aws-account_id:connection/connection-id. For more information, see Connection in the AWS CodeStar Connections API Reference.

    " + }, + "LastUpdatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

    The time, in milliseconds since the epoch, since the repository association was last updated.

    " + }, + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

    The repository association ID.

    " + }, + "Name":{ + "shape":"Name", + "documentation":"

    The name of the repository association.

    " + }, + "Owner":{ + "shape":"Owner", + "documentation":"

    The owner of the repository. For an AWS CodeCommit repository, this is the AWS account ID of the account that owns the repository. For a GitHub, GitHub Enterprise Server, or Bitbucket repository, this is the username for the account that owns the repository.

    " + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The provider type of the repository association.

    " + }, + "State":{ + "shape":"RepositoryAssociationState", + "documentation":"

    The state of the repository association.

    The valid repository association states are:

    • Associated: The repository association is complete.

    • Associating: CodeGuru Reviewer is:

      • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

        If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

      • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

    • Failed: The repository failed to associate or disassociate.

    • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

    • Disassociated: CodeGuru Reviewer successfully disassociated the repository. You can create a new association with this repository if you want to review source code in it later. You can control access to code reviews created in an associated repository with tags after it has been disassociated. For more information, see Using tags to control access to associated repositories in the Amazon CodeGuru Reviewer User Guide.

    " + } + }, + "documentation":"

    Summary information about a repository association. The ListRepositoryAssociations operation returns a list of RepositoryAssociationSummary objects.

    " + }, + "RepositoryHeadSourceCodeType":{ + "type":"structure", + "required":["BranchName"], + "members":{ + "BranchName":{ + "shape":"BranchName", + "documentation":"

    The name of the branch in an associated repository. The RepositoryHeadSourceCodeType specifies the tip of this branch.

    " + } + }, + "documentation":"

    A SourceCodeType that specifies the tip of a branch in an associated repository.

    " + }, + "RepositoryNames":{ + "type":"list", + "member":{"shape":"Name"}, + "max":100, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The resource specified in the request was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SourceCodeType":{ + "type":"structure", + "members":{ + "CommitDiff":{ + "shape":"CommitDiffSourceCodeType", + "documentation":"

    A SourceCodeType that specifies a commit diff created by a pull request on an associated repository.

    " + }, + "RepositoryHead":{"shape":"RepositoryHeadSourceCodeType"} + }, + "documentation":"

    Specifies the source code that is analyzed in a code review. A code review can analyze the source code that is specified using a pull request diff or a branch in an associated repository.

    " + }, + "StateReason":{ + "type":"string", + "max":256, + "min":0 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "Tags" + ], + "members":{ + "resourceArn":{ + "shape":"AssociationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

    • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

    • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Text":{ + "type":"string", + "max":2048, + "min":1 + }, + "ThirdPartySourceRepository":{ + "type":"structure", + "required":[ + "Name", + "ConnectionArn", + "Owner" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the third party source repository.

    " + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

    The Amazon Resource Name (ARN) of an AWS CodeStar Connections connection. Its format is arn:aws:codestar-connections:region-id:aws-account_id:connection/connection-id. For more information, see Connection in the AWS CodeStar Connections API Reference.

    " + }, + "Owner":{ + "shape":"Owner", + "documentation":"

    The owner of the repository. For a GitHub, GitHub Enterprise, or Bitbucket repository, this is the username for the account that owns the repository.

    " + } + }, + "documentation":"

    Information about a third-party source repository connected to CodeGuru Reviewer.

    " + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "TimeStamp":{"type":"timestamp"}, + "Type":{ + "type":"string", + "enum":[ + "PullRequest", + "RepositoryAnalysis" + ] + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "TagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"AssociationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    A list of the keys for each tag you want to remove from an associated repository.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UserId":{ + "type":"string", + "max":256, + "min":1 + }, + "UserIds":{ + "type":"list", + "member":{"shape":"UserId"}, + "max":100, + "min":1 + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The input fails to satisfy the specified constraints.

    ", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

    This section provides documentation for the Amazon CodeGuru Reviewer API operations. CodeGuru Reviewer is a service that uses program analysis and machine learning to detect potential defects that are difficult for developers to find and recommends fixes in your Java code.

    By proactively detecting and providing recommendations for addressing code defects and implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of your code base during the code review stage. For more information about CodeGuru Reviewer, see the Amazon CodeGuru Reviewer User Guide.

    To improve the security of your CodeGuru Reviewer API calls, you can establish a private connection between your VPC and CodeGuru Reviewer by creating an interface VPC endpoint. For more information, see CodeGuru Reviewer and interface VPC endpoints (AWS PrivateLink) in the Amazon CodeGuru Reviewer User Guide.

    " +} diff --git a/services/codepipeline/build.properties b/services/codepipeline/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/codepipeline/build.properties +++ b/services/codepipeline/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 0ebb67215ad2..e32cbc2ef548 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + codestarconnections + AWS Java SDK :: Services :: CodeStar connections + The AWS Java SDK for CodeStar connections module holds the client classes that are used for + communicating with CodeStar connections. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.codestarconnections + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/codestarconnections/src/main/resources/codegen-resources/paginators-1.json b/services/codestarconnections/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..1b3698862cab --- /dev/null +++ b/services/codestarconnections/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,14 @@ +{ + "pagination": { + "ListConnections": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListHosts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..7c3dce35a31a --- /dev/null +++ b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,772 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-01", + "endpointPrefix":"codestar-connections", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"AWS CodeStar connections", + "serviceId":"CodeStar connections", + "signatureVersion":"v4", + "signingName":"codestar-connections", + "targetPrefix":"com.amazonaws.codestar.connections.CodeStar_connections_20191201", + "uid":"codestar-connections-2019-12-01" + }, + "operations":{ + "CreateConnection":{ + "name":"CreateConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConnectionInput"}, + "output":{"shape":"CreateConnectionOutput"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

    Creates a connection that can then be given to other AWS services like CodePipeline so that it can access third-party code repositories. The connection is in pending status until the third-party connection handshake is completed from the console.

    " + }, + "CreateHost":{ + "name":"CreateHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHostInput"}, + "output":{"shape":"CreateHostOutput"}, + "errors":[ + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a resource that represents the infrastructure where a third-party provider is installed. The host is used when you create connections to an installed third-party provider type, such as GitHub Enterprise Server. You create one host for all connections to that provider.

    A host created through the CLI or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by setting up the host in the console.

    " + }, + "DeleteConnection":{ + "name":"DeleteConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConnectionInput"}, + "output":{"shape":"DeleteConnectionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    The connection to be deleted.

    " + }, + "DeleteHost":{ + "name":"DeleteHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHostInput"}, + "output":{"shape":"DeleteHostOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

    The host to be deleted. Before you delete a host, all connections associated to the host must be deleted.

    A host cannot be deleted if it is in the VPC_CONFIG_INITIALIZING or VPC_CONFIG_DELETING state.

    " + }, + "GetConnection":{ + "name":"GetConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConnectionInput"}, + "output":{"shape":"GetConnectionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

    Returns the connection ARN and details such as status, owner, and provider type.

    " + }, + "GetHost":{ + "name":"GetHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetHostInput"}, + "output":{"shape":"GetHostOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

    Returns the host ARN and details such as status, provider type, endpoint, and, if applicable, the VPC configuration.

    " + }, + "ListConnections":{ + "name":"ListConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListConnectionsInput"}, + "output":{"shape":"ListConnectionsOutput"}, + "documentation":"

    Lists the connections associated with your account.

    " + }, + "ListHosts":{ + "name":"ListHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListHostsInput"}, + "output":{"shape":"ListHostsOutput"}, + "documentation":"

    Lists the hosts associated with your account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets the set of key-value pairs (metadata) that are used to manage the resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes tags from an AWS resource.

    " + }, + "UpdateHost":{ + "name":"UpdateHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateHostInput"}, + "output":{"shape":"UpdateHostOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"

    Updates a specified host with the provided configurations.

    " + } + }, + "shapes":{ + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"[0-9]{12}" + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"arn:aws(-[\\w]+)*:.+:.+:[0-9]{12}:.+" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Two conflicting operations have been made on the same resource.

    ", + "exception":true + }, + "Connection":{ + "type":"structure", + "members":{ + "ConnectionName":{ + "shape":"ConnectionName", + "documentation":"

    The name of the connection. Connection names must be unique in an AWS user account.

    " + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between AWS services.

    The ARN is never reused if the connection is deleted.

    " + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The name of the external provider where your third-party code repository is configured.

    " + }, + "OwnerAccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier of the external provider where your third-party code repository is configured. For Bitbucket, this is the account ID of the owner of the Bitbucket repository.

    " + }, + "ConnectionStatus":{ + "shape":"ConnectionStatus", + "documentation":"

    The current status of the connection.

    " + }, + "HostArn":{ + "shape":"HostArn", + "documentation":"

    The Amazon Resource Name (ARN) of the host associated with the connection.

    " + } + }, + "documentation":"

    A resource that is used to connect third-party source providers with services like AWS CodePipeline.

    Note: A connection created through CloudFormation, the CLI, or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by updating the connection in the console.

    " + }, + "ConnectionArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws(-[\\w]+)*:.+:.+:[0-9]{12}:.+" + }, + "ConnectionList":{ + "type":"list", + "member":{"shape":"Connection"} + }, + "ConnectionName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[\\s\\S]*" + }, + "ConnectionStatus":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "ERROR" + ] + }, + "CreateConnectionInput":{ + "type":"structure", + "required":["ConnectionName"], + "members":{ + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The name of the external provider where your third-party code repository is configured.

    " + }, + "ConnectionName":{ + "shape":"ConnectionName", + "documentation":"

    The name of the connection to be created. The name must be unique in the calling AWS account.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The key-value pair to use when tagging the resource.

    " + }, + "HostArn":{ + "shape":"HostArn", + "documentation":"

    The Amazon Resource Name (ARN) of the host associated with the connection to be created.

    " + } + } + }, + "CreateConnectionOutput":{ + "type":"structure", + "required":["ConnectionArn"], + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the connection to be created. The ARN is used as the connection reference when the connection is shared between AWS services.

    The ARN is never reused if the connection is deleted.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    Specifies the tags applied to the resource.

    " + } + } + }, + "CreateHostInput":{ + "type":"structure", + "required":[ + "Name", + "ProviderType", + "ProviderEndpoint" + ], + "members":{ + "Name":{ + "shape":"HostName", + "documentation":"

    The name of the host to be created. The name must be unique in the calling AWS account.

    " + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The name of the installed provider to be associated with your connection. The host resource represents the infrastructure where your provider type is installed. The valid provider type is GitHub Enterprise Server.

    " + }, + "ProviderEndpoint":{ + "shape":"Url", + "documentation":"

    The endpoint of the infrastructure to be represented by the host after it is created.

    " + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

    The VPC configuration to be provisioned for the host. A VPC must be configured and the infrastructure to be represented by the host must already be connected to the VPC.

    " + } + } + }, + "CreateHostOutput":{ + "type":"structure", + "members":{ + "HostArn":{ + "shape":"HostArn", + "documentation":"

    The Amazon Resource Name (ARN) of the host to be created.

    " + } + } + }, + "DeleteConnectionInput":{ + "type":"structure", + "required":["ConnectionArn"], + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the connection to be deleted.

    The ARN is never reused if the connection is deleted.

    " + } + } + }, + "DeleteConnectionOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteHostInput":{ + "type":"structure", + "required":["HostArn"], + "members":{ + "HostArn":{ + "shape":"HostArn", + "documentation":"

    The Amazon Resource Name (ARN) of the host to be deleted.

    " + } + } + }, + "DeleteHostOutput":{ + "type":"structure", + "members":{ + } + }, + "ErrorMessage":{ + "type":"string", + "max":600 + }, + "GetConnectionInput":{ + "type":"structure", + "required":["ConnectionArn"], + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

    The Amazon Resource Name (ARN) of a connection.

    " + } + } + }, + "GetConnectionOutput":{ + "type":"structure", + "members":{ + "Connection":{ + "shape":"Connection", + "documentation":"

    The connection details, such as status, owner, and provider type.

    " + } + } + }, + "GetHostInput":{ + "type":"structure", + "required":["HostArn"], + "members":{ + "HostArn":{ + "shape":"HostArn", + "documentation":"

    The Amazon Resource Name (ARN) of the requested host.

    " + } + } + }, + "GetHostOutput":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"HostName", + "documentation":"

    The name of the requested host.

    " + }, + "Status":{ + "shape":"HostStatus", + "documentation":"

    The status of the requested host.

    " + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The provider type of the requested host, such as GitHub Enterprise Server.

    " + }, + "ProviderEndpoint":{ + "shape":"Url", + "documentation":"

    The endpoint of the infrastructure represented by the requested host.

    " + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

    The VPC configuration of the requested host.

    " + } + } + }, + "Host":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"HostName", + "documentation":"

    The name of the host.

    " + }, + "HostArn":{ + "shape":"HostArn", + "documentation":"

    The Amazon Resource Name (ARN) of the host.

    " + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

    The name of the installed provider to be associated with your connection. The host resource represents the infrastructure where your provider type is installed. The valid provider type is GitHub Enterprise Server.

    " + }, + "ProviderEndpoint":{ + "shape":"Url", + "documentation":"

    The endpoint of the infrastructure where your provider type is installed.

    " + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

    The VPC configuration provisioned for the host.

    " + }, + "Status":{ + "shape":"HostStatus", + "documentation":"

    The status of the host, such as PENDING, AVAILABLE, VPC_CONFIG_DELETING, VPC_CONFIG_INITIALIZING, and VPC_CONFIG_FAILED_INITIALIZATION.

    " + }, + "StatusMessage":{ + "shape":"HostStatusMessage", + "documentation":"

    The status description for the host.

    " + } + }, + "documentation":"

    A resource that represents the infrastructure where a third-party provider is installed. The host is used when you create connections to an installed third-party provider type, such as GitHub Enterprise Server. You create one host for all connections to that provider.

    A host created through the CLI or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by setting up the host in the console.

    " + }, + "HostArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws(-[\\w]+)*:codestar-connections:.+:[0-9]{12}:host\\/.+" + }, + "HostList":{ + "type":"list", + "member":{"shape":"Host"} + }, + "HostName":{ + "type":"string", + "max":64, + "min":1, + "pattern":".*" + }, + "HostStatus":{"type":"string"}, + "HostStatusMessage":{"type":"string"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Exceeded the maximum limit for connections.

    ", + "exception":true + }, + "ListConnectionsInput":{ + "type":"structure", + "members":{ + "ProviderTypeFilter":{ + "shape":"ProviderType", + "documentation":"

    Filters the list of connections to those associated with a specified provider, such as Bitbucket.

    " + }, + "HostArnFilter":{ + "shape":"HostArn", + "documentation":"

    Filters the list of connections to those associated with a specified host.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token that was returned from the previous ListConnections call, which can be used to return the next set of connections in the list.

    " + } + } + }, + "ListConnectionsOutput":{ + "type":"structure", + "members":{ + "Connections":{ + "shape":"ConnectionList", + "documentation":"

    A list of connections and the details for each connection, such as status, owner, and provider type.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that can be used in the next ListConnections call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned.

    " + } + } + }, + "ListHostsInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token that was returned from the previous ListHosts call, which can be used to return the next set of hosts in the list.

    " + } + } + }, + "ListHostsOutput":{ + "type":"structure", + "members":{ + "Hosts":{ + "shape":"HostList", + "documentation":"

    A list of hosts and the details for each host, such as status, endpoint, and provider type.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that can be used in the next ListHosts call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned.

    " + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the resource for which you want to get information about tags, if any.

    " + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of tag key and value pairs associated with the specified resource.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "ProviderType":{ + "type":"string", + "enum":[ + "Bitbucket", + "GitHub", + "GitHubEnterpriseServer" + ] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Resource not found. Verify the connection resource ARN and try again.

    ", + "exception":true + }, + "ResourceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Resource not found. Verify the ARN for the host resource and try again.

    ", + "exception":true + }, + "SecurityGroupId":{ + "type":"string", + "max":20, + "min":11, + "pattern":"sg-\\w{8}(\\w{9})?" + }, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":10, + "min":1 + }, + "SubnetId":{ + "type":"string", + "max":24, + "min":15, + "pattern":"subnet-\\w{8}(\\w{9})?" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":10, + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    The tag's key.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The tag's value.

    " + } + }, + "documentation":"

    A tag is a key-value pair that is used to manage the resource.

    This tag is available for use by AWS services that support tags.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to which you want to add or update tags.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags you want to modify or add to the resource.

    " + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":".*" + }, + "TlsCertificate":{ + "type":"string", + "max":16384, + "min":1, + "pattern":"[\\s\\S]*" + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The operation is not supported. Check the connection status and try again.

    ", + "exception":true + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to remove tags from.

    " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The list of keys for the tags to be removed from the resource.

    " + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateHostInput":{ + "type":"structure", + "required":["HostArn"], + "members":{ + "HostArn":{ + "shape":"HostArn", + "documentation":"

    The Amazon Resource Name (ARN) of the host to be updated.

    " + }, + "ProviderEndpoint":{ + "shape":"Url", + "documentation":"

    The URL or endpoint of the host to be updated.

    " + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

    The VPC configuration of the host to be updated. A VPC must be configured and the infrastructure to be represented by the host must already be connected to the VPC.

    " + } + } + }, + "UpdateHostOutput":{ + "type":"structure", + "members":{ + } + }, + "Url":{ + "type":"string", + "max":512, + "min":1, + "pattern":".*" + }, + "VpcConfiguration":{ + "type":"structure", + "required":[ + "VpcId", + "SubnetIds", + "SecurityGroupIds" + ], + "members":{ + "VpcId":{ + "shape":"VpcId", + "documentation":"

    The ID of the Amazon VPC connected to the infrastructure where your provider type is installed.

    " + }, + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

    The ID of the subnet or subnets associated with the Amazon VPC connected to the infrastructure where your provider type is installed.

    " + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

    The ID of the security group or security groups associated with the Amazon VPC connected to the infrastructure where your provider type is installed.

    " + }, + "TlsCertificate":{ + "shape":"TlsCertificate", + "documentation":"

    The value of the Transport Layer Security (TLS) certificate associated with the infrastructure where your provider type is installed.

    " + } + }, + "documentation":"

    The VPC configuration provisioned for the host.

    " + }, + "VpcId":{ + "type":"string", + "max":21, + "min":12, + "pattern":"vpc-\\w{8}(\\w{9})?" + } + }, + "documentation":"AWS CodeStar Connections

    This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the connections API to work with connections and installations.

    Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.

    When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one.

    When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections.

    You can work with connections by calling:

    • CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.

    • DeleteConnection, which deletes the specified connection.

    • GetConnection, which returns information about the connection, including the connection status.

    • ListConnections, which lists the connections associated with your account.

    You can work with hosts by calling:

    • CreateHost, which creates a host that represents the infrastructure where your provider is installed.

    • DeleteHost, which deletes the specified host.

    • GetHost, which returns information about the host, including the setup status.

    • ListHosts, which lists the hosts associated with your account.

    You can work with tags in AWS CodeStar Connections by calling the following:

    • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeStar Connections.

    • TagResource, which adds or updates tags for a resource in AWS CodeStar Connections.

    • UntagResource, which removes tags for a resource in AWS CodeStar Connections.

    For information about how to use AWS CodeStar Connections, see the Developer Tools User Guide.

    " +} diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml new file mode 100644 index 000000000000..24622bc43b49 --- /dev/null +++ b/services/codestarnotifications/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + codestarnotifications + AWS Java SDK :: Services :: Codestar Notifications + The AWS Java SDK for Codestar Notifications module holds the client classes that are used for + communicating with Codestar Notifications. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.codestarnotifications + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/codestarnotifications/src/main/resources/codegen-resources/paginators-1.json b/services/codestarnotifications/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..c958196e0843 --- /dev/null +++ b/services/codestarnotifications/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListEventTypes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "EventTypes" + }, + "ListNotificationRules": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "NotificationRules" + }, + "ListTargets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Targets" + } + } +} \ No newline at end of file diff --git a/services/codestarnotifications/src/main/resources/codegen-resources/service-2.json b/services/codestarnotifications/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..df94d9bcdccb --- /dev/null +++ b/services/codestarnotifications/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,973 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-10-15", + "endpointPrefix":"codestar-notifications", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS CodeStar Notifications", + "serviceId":"codestar notifications", + "signatureVersion":"v4", + "signingName":"codestar-notifications", + "uid":"codestar-notifications-2019-10-15" + }, + "operations":{ + "CreateNotificationRule":{ + "name":"CreateNotificationRule", + "http":{ + "method":"POST", + "requestUri":"/createNotificationRule" + }, + "input":{"shape":"CreateNotificationRuleRequest"}, + "output":{"shape":"CreateNotificationRuleResult"}, + "errors":[ + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConfigurationException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a notification rule for a resource. The rule specifies the events you want notifications about and the targets (such as SNS topics) where you want to receive them.

    " + }, + "DeleteNotificationRule":{ + "name":"DeleteNotificationRule", + "http":{ + "method":"POST", + "requestUri":"/deleteNotificationRule" + }, + "input":{"shape":"DeleteNotificationRuleRequest"}, + "output":{"shape":"DeleteNotificationRuleResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Deletes a notification rule for a resource.

    " + }, + "DeleteTarget":{ + "name":"DeleteTarget", + "http":{ + "method":"POST", + "requestUri":"/deleteTarget" + }, + "input":{"shape":"DeleteTargetRequest"}, + "output":{"shape":"DeleteTargetResult"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes a specified target for notifications.

    " + }, + "DescribeNotificationRule":{ + "name":"DescribeNotificationRule", + "http":{ + "method":"POST", + "requestUri":"/describeNotificationRule" + }, + "input":{"shape":"DescribeNotificationRuleRequest"}, + "output":{"shape":"DescribeNotificationRuleResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns information about a specified notification rule.

    " + }, + "ListEventTypes":{ + "name":"ListEventTypes", + "http":{ + "method":"POST", + "requestUri":"/listEventTypes" + }, + "input":{"shape":"ListEventTypesRequest"}, + "output":{"shape":"ListEventTypesResult"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns information about the event types available for configuring notifications.

    " + }, + "ListNotificationRules":{ + "name":"ListNotificationRules", + "http":{ + "method":"POST", + "requestUri":"/listNotificationRules" + }, + "input":{"shape":"ListNotificationRulesRequest"}, + "output":{"shape":"ListNotificationRulesResult"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of the notification rules for an AWS account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/listTagsForResource" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of the tags associated with a notification rule.

    " + }, + "ListTargets":{ + "name":"ListTargets", + "http":{ + "method":"POST", + "requestUri":"/listTargets" + }, + "input":{"shape":"ListTargetsRequest"}, + "output":{"shape":"ListTargetsResult"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of the notification rule targets for an AWS account.

    " + }, + "Subscribe":{ + "name":"Subscribe", + "http":{ + "method":"POST", + "requestUri":"/subscribe" + }, + "input":{"shape":"SubscribeRequest"}, + "output":{"shape":"SubscribeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Creates an association between a notification rule and an SNS topic so that the associated target can receive notifications when the events described in the rule are triggered.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tagResource" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Associates a set of provided tags with a notification rule.

    " + }, + "Unsubscribe":{ + "name":"Unsubscribe", + "http":{ + "method":"POST", + "requestUri":"/unsubscribe" + }, + "input":{"shape":"UnsubscribeRequest"}, + "output":{"shape":"UnsubscribeResult"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Removes an association between a notification rule and an Amazon SNS topic so that subscribers to that topic stop receiving notifications when the events described in the rule are triggered.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/untagResource" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Removes the association between one or more provided tags and a notification rule.

    " + }, + "UpdateNotificationRule":{ + "name":"UpdateNotificationRule", + "http":{ + "method":"POST", + "requestUri":"/updateNotificationRule" + }, + "input":{"shape":"UpdateNotificationRuleRequest"}, + "output":{"shape":"UpdateNotificationRuleResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Updates a notification rule for a resource. You can change the events that trigger the notification rule, the status of the rule, and the targets that receive the notifications.

    To add or remove tags for a notification rule, you must use TagResource and UntagResource.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    AWS CodeStar Notifications can't create the notification rule because you do not have sufficient permissions.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "ClientRequestToken":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[\\w:/-]+$" + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    AWS CodeStar Notifications can't complete the request because the resource is being modified by another process. Wait a few minutes and try again.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ConfigurationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    Some or all of the configuration is incomplete, missing, or not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateNotificationRuleRequest":{ + "type":"structure", + "required":[ + "Name", + "EventTypeIds", + "Resource", + "Targets", + "DetailType" + ], + "members":{ + "Name":{ + "shape":"NotificationRuleName", + "documentation":"

    The name for the notification rule. Notifictaion rule names must be unique in your AWS account.

    " + }, + "EventTypeIds":{ + "shape":"EventTypeIds", + "documentation":"

    A list of event types associated with this notification rule. For a list of allowed events, see EventTypeSummary.

    " + }, + "Resource":{ + "shape":"NotificationRuleResource", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to associate with the notification rule. Supported resources include pipelines in AWS CodePipeline, repositories in AWS CodeCommit, and build projects in AWS CodeBuild.

    " + }, + "Targets":{ + "shape":"Targets", + "documentation":"

    A list of Amazon Resource Names (ARNs) of SNS topics to associate with the notification rule.

    " + }, + "DetailType":{ + "shape":"DetailType", + "documentation":"

    The level of detail to include in the notifications for this resource. BASIC will include only the contents of the event as it would appear in AWS CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request with the same parameters is received and a token is included, the request returns information about the initial request that used that token.

    The AWS SDKs prepopulate client request tokens. If you are using an AWS SDK, an idempotency token is created for you.

    ", + "idempotencyToken":true + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    A list of tags to apply to this notification rule. Key names cannot start with \"aws\".

    " + }, + "Status":{ + "shape":"NotificationRuleStatus", + "documentation":"

    The status of the notification rule. The default value is ENABLED. If the status is set to DISABLED, notifications aren't sent for the notification rule.

    " + } + } + }, + "CreateNotificationRuleResult":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule.

    " + } + } + }, + "CreatedTimestamp":{"type":"timestamp"}, + "DeleteNotificationRuleRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule you want to delete.

    " + } + } + }, + "DeleteNotificationRuleResult":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the deleted notification rule.

    " + } + } + }, + "DeleteTargetRequest":{ + "type":"structure", + "required":["TargetAddress"], + "members":{ + "TargetAddress":{ + "shape":"TargetAddress", + "documentation":"

    The Amazon Resource Name (ARN) of the SNS topic to delete.

    " + }, + "ForceUnsubscribeAll":{ + "shape":"ForceUnsubscribeAll", + "documentation":"

    A Boolean value that can be used to delete all associations with this SNS topic. The default value is FALSE. If set to TRUE, all associations between that target and every notification rule in your AWS account are deleted.

    " + } + } + }, + "DeleteTargetResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeNotificationRuleRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule.

    " + } + } + }, + "DescribeNotificationRuleResult":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule.

    " + }, + "Name":{ + "shape":"NotificationRuleName", + "documentation":"

    The name of the notification rule.

    " + }, + "EventTypes":{ + "shape":"EventTypeBatch", + "documentation":"

    A list of the event types associated with the notification rule.

    " + }, + "Resource":{ + "shape":"NotificationRuleResource", + "documentation":"

    The Amazon Resource Name (ARN) of the resource associated with the notification rule.

    " + }, + "Targets":{ + "shape":"TargetsBatch", + "documentation":"

    A list of the SNS topics associated with the notification rule.

    " + }, + "DetailType":{ + "shape":"DetailType", + "documentation":"

    The level of detail included in the notifications for this resource. BASIC will include only the contents of the event as it would appear in AWS CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.

    " + }, + "CreatedBy":{ + "shape":"NotificationRuleCreatedBy", + "documentation":"

    The name or email alias of the person who created the notification rule.

    " + }, + "Status":{ + "shape":"NotificationRuleStatus", + "documentation":"

    The status of the notification rule. Valid statuses are on (sending notifications) or off (not sending notifications).

    " + }, + "CreatedTimestamp":{ + "shape":"CreatedTimestamp", + "documentation":"

    The date and time the notification rule was created, in timestamp format.

    " + }, + "LastModifiedTimestamp":{ + "shape":"LastModifiedTimestamp", + "documentation":"

    The date and time the notification rule was most recently updated, in timestamp format.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The tags associated with the notification rule.

    " + } + } + }, + "DetailType":{ + "type":"string", + "enum":[ + "BASIC", + "FULL" + ] + }, + "EventTypeBatch":{ + "type":"list", + "member":{"shape":"EventTypeSummary"} + }, + "EventTypeId":{ + "type":"string", + "max":200, + "min":1 + }, + "EventTypeIds":{ + "type":"list", + "member":{"shape":"EventTypeId"} + }, + "EventTypeName":{"type":"string"}, + "EventTypeSummary":{ + "type":"structure", + "members":{ + "EventTypeId":{ + "shape":"EventTypeId", + "documentation":"

    The system-generated ID of the event.

    " + }, + "ServiceName":{ + "shape":"ServiceName", + "documentation":"

    The name of the service for which the event applies.

    " + }, + "EventTypeName":{ + "shape":"EventTypeName", + "documentation":"

    The name of the event.

    " + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The resource type of the event.

    " + } + }, + "documentation":"

    Returns information about an event that has triggered a notification rule.

    " + }, + "ForceUnsubscribeAll":{"type":"boolean"}, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The value for the enumeration token used in the request to return the next batch of the results is not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LastModifiedTimestamp":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    One of the AWS CodeStar Notifications limits has been exceeded. Limits apply to accounts, notification rules, notifications, resources, and targets. For more information, see Limits.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListEventTypesFilter":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"ListEventTypesFilterName", + "documentation":"

    The system-generated name of the filter type you want to filter by.

    " + }, + "Value":{ + "shape":"ListEventTypesFilterValue", + "documentation":"

    The name of the resource type (for example, pipeline) or service name (for example, CodePipeline) that you want to filter by.

    " + } + }, + "documentation":"

    Information about a filter to apply to the list of returned event types. You can filter by resource type or service name.

    " + }, + "ListEventTypesFilterName":{ + "type":"string", + "enum":[ + "RESOURCE_TYPE", + "SERVICE_NAME" + ] + }, + "ListEventTypesFilterValue":{"type":"string"}, + "ListEventTypesFilters":{ + "type":"list", + "member":{"shape":"ListEventTypesFilter"} + }, + "ListEventTypesRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"ListEventTypesFilters", + "documentation":"

    The filters to use to return information by service or resource type.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that, when provided in a request, returns the next batch of the results.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    A non-negative integer used to limit the number of returned results. The default number is 50. The maximum number of results that can be returned is 100.

    ", + "box":true + } + } + }, + "ListEventTypesResult":{ + "type":"structure", + "members":{ + "EventTypes":{ + "shape":"EventTypeBatch", + "documentation":"

    Information about each event, including service name, resource type, event ID, and event name.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that can be used in a request to return the next batch of the results.

    " + } + } + }, + "ListNotificationRulesFilter":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"ListNotificationRulesFilterName", + "documentation":"

    The name of the attribute you want to use to filter the returned notification rules.

    " + }, + "Value":{ + "shape":"ListNotificationRulesFilterValue", + "documentation":"

    The value of the attribute you want to use to filter the returned notification rules. For example, if you specify filtering by RESOURCE in Name, you might specify the ARN of a pipeline in AWS CodePipeline for the value.

    " + } + }, + "documentation":"

    Information about a filter to apply to the list of returned notification rules. You can filter by event type, owner, resource, or target.

    " + }, + "ListNotificationRulesFilterName":{ + "type":"string", + "enum":[ + "EVENT_TYPE_ID", + "CREATED_BY", + "RESOURCE", + "TARGET_ADDRESS" + ] + }, + "ListNotificationRulesFilterValue":{"type":"string"}, + "ListNotificationRulesFilters":{ + "type":"list", + "member":{"shape":"ListNotificationRulesFilter"} + }, + "ListNotificationRulesRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"ListNotificationRulesFilters", + "documentation":"

    The filters to use to return information by service or resource type. For valid values, see ListNotificationRulesFilter.

    A filter with the same name can appear more than once when used with OR statements. Filters with different names should be applied with AND statements.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that, when provided in a request, returns the next batch of the results.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    A non-negative integer used to limit the number of returned results. The maximum number of results that can be returned is 100.

    ", + "box":true + } + } + }, + "ListNotificationRulesResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that can be used in a request to return the next batch of the results.

    " + }, + "NotificationRules":{ + "shape":"NotificationRuleBatch", + "documentation":"

    The list of notification rules for the AWS account, by Amazon Resource Name (ARN) and ID.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) for the notification rule.

    " + } + } + }, + "ListTagsForResourceResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

    The tags associated with the notification rule.

    " + } + } + }, + "ListTargetsFilter":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"ListTargetsFilterName", + "documentation":"

    The name of the attribute you want to use to filter the returned targets.

    " + }, + "Value":{ + "shape":"ListTargetsFilterValue", + "documentation":"

    The value of the attribute you want to use to filter the returned targets. For example, if you specify SNS for the Target type, you could specify an Amazon Resource Name (ARN) for a topic as the value.

    " + } + }, + "documentation":"

    Information about a filter to apply to the list of returned targets. You can filter by target type, address, or status. For example, to filter results to notification rules that have active Amazon SNS topics as targets, you could specify a ListTargetsFilter Name as TargetType and a Value of SNS, and a Name of TARGET_STATUS and a Value of ACTIVE.

    " + }, + "ListTargetsFilterName":{ + "type":"string", + "enum":[ + "TARGET_TYPE", + "TARGET_ADDRESS", + "TARGET_STATUS" + ] + }, + "ListTargetsFilterValue":{"type":"string"}, + "ListTargetsFilters":{ + "type":"list", + "member":{"shape":"ListTargetsFilter"} + }, + "ListTargetsRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"ListTargetsFilters", + "documentation":"

    The filters to use to return information by service or resource type. Valid filters include target type, target address, and target status.

    A filter with the same name can appear more than once when used with OR statements. Filters with different names should be applied with AND statements.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that, when provided in a request, returns the next batch of the results.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    A non-negative integer used to limit the number of returned results. The maximum number of results that can be returned is 100.

    ", + "box":true + } + } + }, + "ListTargetsResult":{ + "type":"structure", + "members":{ + "Targets":{ + "shape":"TargetsBatch", + "documentation":"

    The list of notification rule targets.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that can be used in a request to return the next batch of results.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "Message":{ + "type":"string", + "min":1 + }, + "NextToken":{ + "type":"string", + "pattern":"^[\\w/+=]+$" + }, + "NotificationRuleArn":{ + "type":"string", + "pattern":"^arn:aws[^:\\s]*:codestar-notifications:[^:\\s]+:\\d{12}:notificationrule\\/(.*\\S)?$" + }, + "NotificationRuleBatch":{ + "type":"list", + "member":{"shape":"NotificationRuleSummary"} + }, + "NotificationRuleCreatedBy":{ + "type":"string", + "min":1 + }, + "NotificationRuleId":{ + "type":"string", + "max":40, + "min":1 + }, + "NotificationRuleName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9\\-_ ]+$", + "sensitive":true + }, + "NotificationRuleResource":{ + "type":"string", + "pattern":"^arn:aws[^:\\s]*:[^:\\s]*:[^:\\s]*:[0-9]{12}:[^\\s]+$" + }, + "NotificationRuleStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "NotificationRuleSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"NotificationRuleId", + "documentation":"

    The unique ID of the notification rule.

    " + }, + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule.

    " + } + }, + "documentation":"

    Information about a specified notification rule.

    " + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    A resource with the same name or ID already exists. Notification rule names must be unique in your AWS account.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    AWS CodeStar Notifications can't find a resource that matches the provided ARN.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceType":{ + "type":"string", + "min":1, + "pattern":"^([a-zA-Z0-9-])+$" + }, + "ServiceName":{"type":"string"}, + "SubscribeRequest":{ + "type":"structure", + "required":[ + "Arn", + "Target" + ], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule for which you want to create the association.

    " + }, + "Target":{"shape":"Target"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    An enumeration token that, when provided in a request, returns the next batch of the results.

    " + } + } + }, + "SubscribeResult":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule for which you have created assocations.

    " + } + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "Arn", + "Tags" + ], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule to tag.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The list of tags to associate with the resource. Tag key names cannot start with \"aws\".

    " + } + } + }, + "TagResourceResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

    The list of tags associated with the resource.

    " + } + } + }, + "TagValue":{ + "type":"string", + "max":256, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "Target":{ + "type":"structure", + "members":{ + "TargetType":{ + "shape":"TargetType", + "documentation":"

    The target type. Can be an Amazon SNS topic.

    " + }, + "TargetAddress":{ + "shape":"TargetAddress", + "documentation":"

    The Amazon Resource Name (ARN) of the SNS topic.

    " + } + }, + "documentation":"

    Information about the SNS topics associated with a notification rule.

    " + }, + "TargetAddress":{ + "type":"string", + "max":320, + "min":1, + "sensitive":true + }, + "TargetStatus":{ + "type":"string", + "enum":[ + "PENDING", + "ACTIVE", + "UNREACHABLE", + "INACTIVE", + "DEACTIVATED" + ] + }, + "TargetSummary":{ + "type":"structure", + "members":{ + "TargetAddress":{ + "shape":"TargetAddress", + "documentation":"

    The Amazon Resource Name (ARN) of the SNS topic.

    " + }, + "TargetType":{ + "shape":"TargetType", + "documentation":"

    The type of the target (for example, SNS).

    " + }, + "TargetStatus":{ + "shape":"TargetStatus", + "documentation":"

    The status of the target.

    " + } + }, + "documentation":"

    Information about the targets specified for a notification rule.

    " + }, + "TargetType":{ + "type":"string", + "pattern":"^[A-Za-z]+$" + }, + "Targets":{ + "type":"list", + "member":{"shape":"Target"}, + "max":10 + }, + "TargetsBatch":{ + "type":"list", + "member":{"shape":"TargetSummary"} + }, + "UnsubscribeRequest":{ + "type":"structure", + "required":[ + "Arn", + "TargetAddress" + ], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule.

    " + }, + "TargetAddress":{ + "shape":"TargetAddress", + "documentation":"

    The ARN of the SNS topic to unsubscribe from the notification rule.

    " + } + } + }, + "UnsubscribeResult":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the the notification rule from which you have removed a subscription.

    " + } + } + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "Arn", + "TagKeys" + ], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule from which to remove the tags.

    " + }, + "TagKeys":{ + "shape":"TagKeys", + "documentation":"

    The key names of the tags to remove.

    " + } + } + }, + "UntagResourceResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateNotificationRuleRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"NotificationRuleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the notification rule.

    " + }, + "Name":{ + "shape":"NotificationRuleName", + "documentation":"

    The name of the notification rule.

    " + }, + "Status":{ + "shape":"NotificationRuleStatus", + "documentation":"

    The status of the notification rule. Valid statuses include enabled (sending notifications) or disabled (not sending notifications).

    " + }, + "EventTypeIds":{ + "shape":"EventTypeIds", + "documentation":"

    A list of event types associated with this notification rule.

    " + }, + "Targets":{ + "shape":"Targets", + "documentation":"

    The address and type of the targets to receive notifications from this notification rule.

    " + }, + "DetailType":{ + "shape":"DetailType", + "documentation":"

    The level of detail to include in the notifications for this resource. BASIC will include only the contents of the event as it would appear in AWS CloudWatch. FULL will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.

    " + } + } + }, + "UpdateNotificationRuleResult":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    One or more parameter values are not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

    This AWS CodeStar Notifications API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Notifications API. You can use the AWS CodeStar Notifications API to work with the following objects:

    Notification rules, by calling the following:

    Targets, by calling the following:

    • DeleteTarget, which removes a notification rule target (SNS topic) from a notification rule.

    • ListTargets, which lists the targets associated with a notification rule.

    Events, by calling the following:

    • ListEventTypes, which lists the event types you can include in a notification rule.

    Tags, by calling the following:

    • ListTagsForResource, which lists the tags already associated with a notification rule in your account.

    • TagResource, which associates a tag you provide with a notification rule in your account.

    • UntagResource, which removes a tag from a notification rule in your account.

    For information about how to use AWS CodeStar Notifications, see link in the CodeStarNotifications User Guide.

    " +} diff --git a/services/cognitoidentity/build.properties b/services/cognitoidentity/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/cognitoidentity/build.properties +++ b/services/cognitoidentity/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index cc093007c29c..571e83edd989 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + computeoptimizer + AWS Java SDK :: Services :: Compute Optimizer + The AWS Java SDK for Compute Optimizer module holds the client classes that are used for + communicating with Compute Optimizer. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.computeoptimizer + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/paginators-1.json b/services/computeoptimizer/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/computeoptimizer/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..1c83df5e6d3a --- /dev/null +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1791 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-11-01", + "endpointPrefix":"compute-optimizer", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"AWS Compute Optimizer", + "serviceId":"Compute Optimizer", + "signatureVersion":"v4", + "signingName":"compute-optimizer", + "targetPrefix":"ComputeOptimizerService", + "uid":"compute-optimizer-2019-11-01" + }, + "operations":{ + "DescribeRecommendationExportJobs":{ + "name":"DescribeRecommendationExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRecommendationExportJobsRequest"}, + "output":{"shape":"DescribeRecommendationExportJobsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Describes recommendation export jobs created in the last seven days.

    Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your recommendations. Then use the DescribeRecommendationExportJobs action to view your export jobs.

    " + }, + "ExportAutoScalingGroupRecommendations":{ + "name":"ExportAutoScalingGroupRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportAutoScalingGroupRecommendationsRequest"}, + "output":{"shape":"ExportAutoScalingGroupRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Exports optimization recommendations for Auto Scaling groups.

    Recommendations are exported in a comma-separated values (.csv) file, and its metadata in a JavaScript Object Notation (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one Auto Scaling group export job in progress per AWS Region.

    " + }, + "ExportEC2InstanceRecommendations":{ + "name":"ExportEC2InstanceRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportEC2InstanceRecommendationsRequest"}, + "output":{"shape":"ExportEC2InstanceRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Exports optimization recommendations for Amazon EC2 instances.

    Recommendations are exported in a comma-separated values (.csv) file, and its metadata in a JavaScript Object Notation (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one Amazon EC2 instance export job in progress per AWS Region.

    " + }, + "GetAutoScalingGroupRecommendations":{ + "name":"GetAutoScalingGroupRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAutoScalingGroupRecommendationsRequest"}, + "output":{"shape":"GetAutoScalingGroupRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns Auto Scaling group recommendations.

    AWS Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that meet a specific set of requirements. For more information, see the Supported resources and requirements in the AWS Compute Optimizer User Guide.

    " + }, + "GetEBSVolumeRecommendations":{ + "name":"GetEBSVolumeRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEBSVolumeRecommendationsRequest"}, + "output":{"shape":"GetEBSVolumeRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns Amazon Elastic Block Store (Amazon EBS) volume recommendations.

    AWS Compute Optimizer generates recommendations for Amazon EBS volumes that meet a specific set of requirements. For more information, see the Supported resources and requirements in the AWS Compute Optimizer User Guide.

    " + }, + "GetEC2InstanceRecommendations":{ + "name":"GetEC2InstanceRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEC2InstanceRecommendationsRequest"}, + "output":{"shape":"GetEC2InstanceRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns Amazon EC2 instance recommendations.

    AWS Compute Optimizer generates recommendations for Amazon Elastic Compute Cloud (Amazon EC2) instances that meet a specific set of requirements. For more information, see the Supported resources and requirements in the AWS Compute Optimizer User Guide.

    " + }, + "GetEC2RecommendationProjectedMetrics":{ + "name":"GetEC2RecommendationProjectedMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEC2RecommendationProjectedMetricsRequest"}, + "output":{"shape":"GetEC2RecommendationProjectedMetricsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the projected utilization metrics of Amazon EC2 instance recommendations.

    The Cpu and Memory metrics are the only projected utilization metrics returned when you run this action. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    " + }, + "GetEnrollmentStatus":{ + "name":"GetEnrollmentStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEnrollmentStatusRequest"}, + "output":{"shape":"GetEnrollmentStatusResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

    If the account is the management account of an organization, this action also confirms the enrollment status of member accounts within the organization.

    " + }, + "GetLambdaFunctionRecommendations":{ + "name":"GetLambdaFunctionRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLambdaFunctionRecommendationsRequest"}, + "output":{"shape":"GetLambdaFunctionRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Returns AWS Lambda function recommendations.

    AWS Compute Optimizer generates recommendations for functions that meet a specific set of requirements. For more information, see the Supported resources and requirements in the AWS Compute Optimizer User Guide.

    " + }, + "GetRecommendationSummaries":{ + "name":"GetRecommendationSummaries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRecommendationSummariesRequest"}, + "output":{"shape":"GetRecommendationSummariesResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the optimization findings for an account.

    For example, it returns the number of Amazon EC2 instances in an account that are under-provisioned, over-provisioned, or optimized. It also returns the number of Auto Scaling groups in an account that are not optimized, or optimized.

    " + }, + "UpdateEnrollmentStatus":{ + "name":"UpdateEnrollmentStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateEnrollmentStatusRequest"}, + "output":{"shape":"UpdateEnrollmentStatusResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

    If the account is a management account of an organization, this action can also be used to enroll member accounts within the organization.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "exception":true, + "synthetic":true + }, + "AccountId":{"type":"string"}, + "AccountIds":{ + "type":"list", + "member":{"shape":"AccountId"} + }, + "AutoScalingGroupArn":{"type":"string"}, + "AutoScalingGroupArns":{ + "type":"list", + "member":{"shape":"AutoScalingGroupArn"} + }, + "AutoScalingGroupConfiguration":{ + "type":"structure", + "members":{ + "desiredCapacity":{ + "shape":"DesiredCapacity", + "documentation":"

    The desired capacity, or number of instances, for the Auto Scaling group.

    " + }, + "minSize":{ + "shape":"MinSize", + "documentation":"

    The minimum size, or minimum number of instances, for the Auto Scaling group.

    " + }, + "maxSize":{ + "shape":"MaxSize", + "documentation":"

    The maximum size, or maximum number of instances, for the Auto Scaling group.

    " + }, + "instanceType":{ + "shape":"InstanceType", + "documentation":"

    The instance type for the Auto Scaling group.

    " + } + }, + "documentation":"

    Describes the configuration of an Auto Scaling group.

    " + }, + "AutoScalingGroupName":{"type":"string"}, + "AutoScalingGroupRecommendation":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Auto Scaling group.

    " + }, + "autoScalingGroupArn":{ + "shape":"AutoScalingGroupArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Auto Scaling group.

    " + }, + "autoScalingGroupName":{ + "shape":"AutoScalingGroupName", + "documentation":"

    The name of the Auto Scaling group.

    " + }, + "finding":{ + "shape":"Finding", + "documentation":"

    The finding classification for the Auto Scaling group.

    Findings for Auto Scaling groups include:

    • NotOptimized —An Auto Scaling group is considered not optimized when AWS Compute Optimizer identifies a recommendation that can provide better performance for your workload.

    • Optimized —An Auto Scaling group is considered optimized when Compute Optimizer determines that the group is correctly provisioned to run your workload based on the chosen instance type. For optimized resources, Compute Optimizer might recommend a new generation instance type.

    " + }, + "utilizationMetrics":{ + "shape":"UtilizationMetrics", + "documentation":"

    An array of objects that describe the utilization metrics of the Auto Scaling group.

    " + }, + "lookBackPeriodInDays":{ + "shape":"LookBackPeriodInDays", + "documentation":"

    The number of days for which utilization metrics were analyzed for the Auto Scaling group.

    " + }, + "currentConfiguration":{ + "shape":"AutoScalingGroupConfiguration", + "documentation":"

    An array of objects that describe the current configuration of the Auto Scaling group.

    " + }, + "recommendationOptions":{ + "shape":"AutoScalingGroupRecommendationOptions", + "documentation":"

    An array of objects that describe the recommendation options for the Auto Scaling group.

    " + }, + "lastRefreshTimestamp":{ + "shape":"LastRefreshTimestamp", + "documentation":"

    The time stamp of when the Auto Scaling group recommendation was last refreshed.

    " + } + }, + "documentation":"

    Describes an Auto Scaling group recommendation.

    " + }, + "AutoScalingGroupRecommendationOption":{ + "type":"structure", + "members":{ + "configuration":{ + "shape":"AutoScalingGroupConfiguration", + "documentation":"

    An array of objects that describe an Auto Scaling group configuration.

    " + }, + "projectedUtilizationMetrics":{ + "shape":"ProjectedUtilizationMetrics", + "documentation":"

    An array of objects that describe the projected utilization metrics of the Auto Scaling group recommendation option.

    The Cpu and Memory metrics are the only projected utilization metrics returned. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    " + }, + "performanceRisk":{ + "shape":"PerformanceRisk", + "documentation":"

    The performance risk of the Auto Scaling group configuration recommendation.

    Performance risk is the likelihood of the recommended instance type not meeting the performance requirement of your workload.

    The lowest performance risk is categorized as 0, and the highest as 5.

    " + }, + "rank":{ + "shape":"Rank", + "documentation":"

    The rank of the Auto Scaling group recommendation option.

    The top recommendation option is ranked as 1.

    " + } + }, + "documentation":"

    Describes a recommendation option for an Auto Scaling group.

    " + }, + "AutoScalingGroupRecommendationOptions":{ + "type":"list", + "member":{"shape":"AutoScalingGroupRecommendationOption"} + }, + "AutoScalingGroupRecommendations":{ + "type":"list", + "member":{"shape":"AutoScalingGroupRecommendation"} + }, + "Code":{"type":"string"}, + "CreationTimestamp":{"type":"timestamp"}, + "CurrentInstanceType":{"type":"string"}, + "DescribeRecommendationExportJobsRequest":{ + "type":"structure", + "members":{ + "jobIds":{ + "shape":"JobIds", + "documentation":"

    The identification numbers of the export jobs to return.

    An export job ID is returned when you create an export using the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions.

    All export jobs created in the last seven days are returned if this parameter is omitted.

    " + }, + "filters":{ + "shape":"JobFilters", + "documentation":"

    An array of objects that describe a filter to return a more specific list of export jobs.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to advance to the next page of export jobs.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of export jobs to return with a single request.

    To retrieve the remaining results, make another request with the returned NextToken value.

    " + } + } + }, + "DescribeRecommendationExportJobsResponse":{ + "type":"structure", + "members":{ + "recommendationExportJobs":{ + "shape":"RecommendationExportJobs", + "documentation":"

    An array of objects that describe recommendation export jobs.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to advance to the next page of export jobs.

    This value is null when there are no more pages of export jobs to return.

    " + } + } + }, + "DesiredCapacity":{"type":"integer"}, + "DestinationBucket":{"type":"string"}, + "DestinationKey":{"type":"string"}, + "DestinationKeyPrefix":{"type":"string"}, + "EBSFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"EBSFilterName", + "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification (e.g., Optimized).

    " + }, + "values":{ + "shape":"FilterValues", + "documentation":"

    The value of the filter.

    The valid values are Optimized, or NotOptimized.

    " + } + }, + "documentation":"

    Describes a filter that returns a more specific list of Amazon Elastic Block Store (Amazon EBS) volume recommendations.

    This filter is used with the GetEBSVolumeRecommendations action.

    " + }, + "EBSFilterName":{ + "type":"string", + "enum":["Finding"] + }, + "EBSFilters":{ + "type":"list", + "member":{"shape":"EBSFilter"} + }, + "EBSFinding":{ + "type":"string", + "enum":[ + "Optimized", + "NotOptimized" + ] + }, + "EBSMetricName":{ + "type":"string", + "enum":[ + "VolumeReadOpsPerSecond", + "VolumeWriteOpsPerSecond", + "VolumeReadBytesPerSecond", + "VolumeWriteBytesPerSecond" + ] + }, + "EBSUtilizationMetric":{ + "type":"structure", + "members":{ + "name":{ + "shape":"EBSMetricName", + "documentation":"

    The name of the utilization metric.

    The following utilization metrics are available:

    • VolumeReadOpsPerSecond - The completed read operations per second from the volume in a specified period of time.

      Unit: Count

    • VolumeWriteOpsPerSecond - The completed write operations per second to the volume in a specified period of time.

      Unit: Count

    • VolumeReadBytesPerSecond - The bytes read per second from the volume in a specified period of time.

      Unit: Bytes

    • VolumeWriteBytesPerSecond - The bytes written to the volume in a specified period of time.

      Unit: Bytes

    " + }, + "statistic":{ + "shape":"MetricStatistic", + "documentation":"

    The statistic of the utilization metric.

    The following statistics are available:

    • Average - This is the value of Sum / SampleCount during the specified period, or the average value observed during the specified period.

    • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

    " + }, + "value":{ + "shape":"MetricValue", + "documentation":"

    The value of the utilization metric.

    " + } + }, + "documentation":"

    Describes a utilization metric of an Amazon Elastic Block Store (Amazon EBS) volume.

    Compare the utilization metric data of your resource against its projected utilization metric data to determine the performance difference between your current resource and the recommended option.

    " + }, + "EBSUtilizationMetrics":{ + "type":"list", + "member":{"shape":"EBSUtilizationMetric"} + }, + "ErrorMessage":{"type":"string"}, + "ExportAutoScalingGroupRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the AWS accounts for which to export Auto Scaling group recommendations.

    If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

    This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

    Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

    You can specify multiple account IDs per request.

    " + }, + "filters":{ + "shape":"Filters", + "documentation":"

    An array of objects that describe a filter to export a more specific set of Auto Scaling group recommendations.

    " + }, + "fieldsToExport":{ + "shape":"ExportableAutoScalingGroupFields", + "documentation":"

    The recommendations data to include in the export file. For more information about the fields that can be exported, see Exported files in the Compute Optimizer User Guide.

    " + }, + "s3DestinationConfig":{ + "shape":"S3DestinationConfig", + "documentation":"

    An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for the export job.

    You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

    " + }, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

    The format of the export file.

    The only export file format currently supported is Csv.

    " + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

    Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the management account of an organization.

    The member accounts must also be opted in to Compute Optimizer.

    Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

    This parameter cannot be specified together with the account IDs parameter. The parameters are mutually exclusive.

    Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

    " + } + } + }, + "ExportAutoScalingGroupRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

    The identification number of the export job.

    Use the DescribeRecommendationExportJobs action, and specify the job ID to view the status of an export job.

    " + }, + "s3Destination":{ + "shape":"S3Destination", + "documentation":"

    An object that describes the destination Amazon S3 bucket of a recommendations export file.

    " + } + } + }, + "ExportDestination":{ + "type":"structure", + "members":{ + "s3":{ + "shape":"S3Destination", + "documentation":"

    An object that describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and object keys of a recommendations export file, and its associated metadata file.

    " + } + }, + "documentation":"

    Describes the destination of the recommendations export and metadata files.

    " + }, + "ExportEC2InstanceRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the AWS accounts for which to export instance recommendations.

    If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

    This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

    Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

    You can specify multiple account IDs per request.

    " + }, + "filters":{ + "shape":"Filters", + "documentation":"

    An array of objects that describe a filter to export a more specific set of instance recommendations.

    " + }, + "fieldsToExport":{ + "shape":"ExportableInstanceFields", + "documentation":"

    The recommendations data to include in the export file. For more information about the fields that can be exported, see Exported files in the Compute Optimizer User Guide.

    " + }, + "s3DestinationConfig":{ + "shape":"S3DestinationConfig", + "documentation":"

    An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for the export job.

    You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

    " + }, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

    The format of the export file.

    The only export file format currently supported is Csv.

    " + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

    Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the management account of an organization.

    The member accounts must also be opted in to Compute Optimizer.

    Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

    Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

    " + } + } + }, + "ExportEC2InstanceRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

    The identification number of the export job.

    Use the DescribeRecommendationExportJobs action, and specify the job ID to view the status of an export job.

    " + }, + "s3Destination":{ + "shape":"S3Destination", + "documentation":"

    An object that describes the destination Amazon S3 bucket of a recommendations export file.

    " + } + } + }, + "ExportableAutoScalingGroupField":{ + "type":"string", + "enum":[ + "AccountId", + "AutoScalingGroupArn", + "AutoScalingGroupName", + "Finding", + "UtilizationMetricsCpuMaximum", + "UtilizationMetricsMemoryMaximum", + "UtilizationMetricsEbsReadOpsPerSecondMaximum", + "UtilizationMetricsEbsWriteOpsPerSecondMaximum", + "UtilizationMetricsEbsReadBytesPerSecondMaximum", + "UtilizationMetricsEbsWriteBytesPerSecondMaximum", + "LookbackPeriodInDays", + "CurrentConfigurationInstanceType", + "CurrentConfigurationDesiredCapacity", + "CurrentConfigurationMinSize", + "CurrentConfigurationMaxSize", + "CurrentOnDemandPrice", + "CurrentStandardOneYearNoUpfrontReservedPrice", + "CurrentStandardThreeYearNoUpfrontReservedPrice", + "CurrentVCpus", + "CurrentMemory", + "CurrentStorage", + "CurrentNetwork", + "RecommendationOptionsConfigurationInstanceType", + "RecommendationOptionsConfigurationDesiredCapacity", + "RecommendationOptionsConfigurationMinSize", + "RecommendationOptionsConfigurationMaxSize", + "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", + "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", + "RecommendationOptionsPerformanceRisk", + "RecommendationOptionsOnDemandPrice", + "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice", + "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice", + "RecommendationOptionsVcpus", + "RecommendationOptionsMemory", + "RecommendationOptionsStorage", + "RecommendationOptionsNetwork", + "LastRefreshTimestamp" + ] + }, + "ExportableAutoScalingGroupFields":{ + "type":"list", + "member":{"shape":"ExportableAutoScalingGroupField"} + }, + "ExportableInstanceField":{ + "type":"string", + "enum":[ + "AccountId", + "InstanceArn", + "InstanceName", + "Finding", + "LookbackPeriodInDays", + "CurrentInstanceType", + "UtilizationMetricsCpuMaximum", + "UtilizationMetricsMemoryMaximum", + "UtilizationMetricsEbsReadOpsPerSecondMaximum", + "UtilizationMetricsEbsWriteOpsPerSecondMaximum", + "UtilizationMetricsEbsReadBytesPerSecondMaximum", + "UtilizationMetricsEbsWriteBytesPerSecondMaximum", + "CurrentOnDemandPrice", + "CurrentStandardOneYearNoUpfrontReservedPrice", + "CurrentStandardThreeYearNoUpfrontReservedPrice", + "CurrentVCpus", + "CurrentMemory", + "CurrentStorage", + "CurrentNetwork", + "RecommendationOptionsInstanceType", + "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", + "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", + "RecommendationOptionsPerformanceRisk", + "RecommendationOptionsVcpus", + "RecommendationOptionsMemory", + "RecommendationOptionsStorage", + "RecommendationOptionsNetwork", + "RecommendationOptionsOnDemandPrice", + "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice", + "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice", + "RecommendationsSourcesRecommendationSourceArn", + "RecommendationsSourcesRecommendationSourceType", + "LastRefreshTimestamp" + ] + }, + "ExportableInstanceFields":{ + "type":"list", + "member":{"shape":"ExportableInstanceField"} + }, + "FailureReason":{"type":"string"}, + "FileFormat":{ + "type":"string", + "enum":["Csv"] + }, + "Filter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"FilterName", + "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification (e.g., Overprovisioned).

    Specify RecommendationSourceType to return recommendations of a specific resource type (e.g., AutoScalingGroup).

    " + }, + "values":{ + "shape":"FilterValues", + "documentation":"

    The value of the filter.

    The valid values for this parameter are as follows, depending on what you specify for the name parameter and the resource type that you wish to filter results for:

    • Specify Optimized or NotOptimized if you specified the name parameter as Finding and you want to filter results for Auto Scaling groups.

    • Specify Underprovisioned, Overprovisioned, or Optimized if you specified the name parameter as Finding and you want to filter results for EC2 instances.

    • Specify Ec2Instance or AutoScalingGroup if you specified the name parameter as RecommendationSourceType.

    " + } + }, + "documentation":"

    Describes a filter that returns a more specific list of recommendations.

    This filter is used with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

    " + }, + "FilterName":{ + "type":"string", + "enum":[ + "Finding", + "RecommendationSourceType" + ] + }, + "FilterValue":{"type":"string"}, + "FilterValues":{ + "type":"list", + "member":{"shape":"FilterValue"} + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "Finding":{ + "type":"string", + "enum":[ + "Underprovisioned", + "Overprovisioned", + "Optimized", + "NotOptimized" + ] + }, + "FindingReasonCode":{ + "type":"string", + "enum":[ + "MemoryOverprovisioned", + "MemoryUnderprovisioned" + ] + }, + "FunctionArn":{"type":"string"}, + "FunctionArns":{ + "type":"list", + "member":{"shape":"FunctionArn"} + }, + "FunctionVersion":{"type":"string"}, + "GetAutoScalingGroupRecommendationsRequest":{ + "type":"structure", + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the AWS accounts for which to return Auto Scaling group recommendations.

    If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return Auto Scaling group recommendations.

    Only one account ID can be specified per request.

    " + }, + "autoScalingGroupArns":{ + "shape":"AutoScalingGroupArns", + "documentation":"

    The Amazon Resource Name (ARN) of the Auto Scaling groups for which to return recommendations.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to advance to the next page of Auto Scaling group recommendations.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of Auto Scaling group recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned NextToken value.

    " + }, + "filters":{ + "shape":"Filters", + "documentation":"

    An array of objects that describe a filter that returns a more specific list of Auto Scaling group recommendations.

    " + } + } + }, + "GetAutoScalingGroupRecommendationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to advance to the next page of Auto Scaling group recommendations.

    This value is null when there are no more pages of Auto Scaling group recommendations to return.

    " + }, + "autoScalingGroupRecommendations":{ + "shape":"AutoScalingGroupRecommendations", + "documentation":"

    An array of objects that describe Auto Scaling group recommendations.

    " + }, + "errors":{ + "shape":"GetRecommendationErrors", + "documentation":"

    An array of objects that describe errors of the request.

    For example, an error is returned if you request recommendations for an unsupported Auto Scaling group.

    " + } + } + }, + "GetEBSVolumeRecommendationsRequest":{ + "type":"structure", + "members":{ + "volumeArns":{ + "shape":"VolumeArns", + "documentation":"

    The Amazon Resource Name (ARN) of the volumes for which to return recommendations.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to advance to the next page of volume recommendations.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of volume recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned NextToken value.

    " + }, + "filters":{ + "shape":"EBSFilters", + "documentation":"

    An array of objects that describe a filter that returns a more specific list of volume recommendations.

    " + }, + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the AWS accounts for which to return volume recommendations.

    If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return volume recommendations.

    Only one account ID can be specified per request.

    " + } + } + }, + "GetEBSVolumeRecommendationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to advance to the next page of volume recommendations.

    This value is null when there are no more pages of volume recommendations to return.

    " + }, + "volumeRecommendations":{ + "shape":"VolumeRecommendations", + "documentation":"

    An array of objects that describe volume recommendations.

    " + }, + "errors":{ + "shape":"GetRecommendationErrors", + "documentation":"

    An array of objects that describe errors of the request.

    For example, an error is returned if you request recommendations for an unsupported volume.

    " + } + } + }, + "GetEC2InstanceRecommendationsRequest":{ + "type":"structure", + "members":{ + "instanceArns":{ + "shape":"InstanceArns", + "documentation":"

    The Amazon Resource Name (ARN) of the instances for which to return recommendations.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to advance to the next page of instance recommendations.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of instance recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned NextToken value.

    " + }, + "filters":{ + "shape":"Filters", + "documentation":"

    An array of objects that describe a filter that returns a more specific list of instance recommendations.

    " + }, + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the AWS accounts for which to return instance recommendations.

    If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return instance recommendations.

    Only one account ID can be specified per request.

    " + } + } + }, + "GetEC2InstanceRecommendationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to advance to the next page of instance recommendations.

    This value is null when there are no more pages of instance recommendations to return.

    " + }, + "instanceRecommendations":{ + "shape":"InstanceRecommendations", + "documentation":"

    An array of objects that describe instance recommendations.

    " + }, + "errors":{ + "shape":"GetRecommendationErrors", + "documentation":"

    An array of objects that describe errors of the request.

    For example, an error is returned if you request recommendations for an instance of an unsupported instance family.

    " + } + } + }, + "GetEC2RecommendationProjectedMetricsRequest":{ + "type":"structure", + "required":[ + "instanceArn", + "stat", + "period", + "startTime", + "endTime" + ], + "members":{ + "instanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the instances for which to return recommendation projected metrics.

    " + }, + "stat":{ + "shape":"MetricStatistic", + "documentation":"

    The statistic of the projected metrics.

    " + }, + "period":{ + "shape":"Period", + "documentation":"

    The granularity, in seconds, of the projected metrics data points.

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The time stamp of the first projected metrics data point to return.

    " + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The time stamp of the last projected metrics data point to return.

    " + } + } + }, + "GetEC2RecommendationProjectedMetricsResponse":{ + "type":"structure", + "members":{ + "recommendedOptionProjectedMetrics":{ + "shape":"RecommendedOptionProjectedMetrics", + "documentation":"

    An array of objects that describe a projected metrics.

    " + } + } + }, + "GetEnrollmentStatusRequest":{ + "type":"structure", + "members":{ + } + }, + "GetEnrollmentStatusResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"Status", + "documentation":"

    The enrollment status of the account.

    " + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

    The reason for the enrollment status of the account.

    For example, an account might show a status of Pending because member accounts of an organization require more time to be enrolled in the service.

    " + }, + "memberAccountsEnrolled":{ + "shape":"MemberAccountsEnrolled", + "documentation":"

    Confirms the enrollment status of member accounts within the organization, if the account is a management account of an organization.

    " + } + } + }, + "GetLambdaFunctionRecommendationsRequest":{ + "type":"structure", + "members":{ + "functionArns":{ + "shape":"FunctionArns", + "documentation":"

    The Amazon Resource Name (ARN) of the functions for which to return recommendations.

    You can specify a qualified or unqualified ARN. If you specify an unqualified ARN without a function version suffix, Compute Optimizer will return recommendations for the latest ($LATEST) version of the function. If you specify a qualified ARN with a version suffix, Compute Optimizer will return recommendations for the specified function version. For more information about using function versions, see Using versions in the AWS Lambda Developer Guide.

    " + }, + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the AWS accounts for which to return function recommendations.

    If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return function recommendations.

    Only one account ID can be specified per request.

    " + }, + "filters":{ + "shape":"LambdaFunctionRecommendationFilters", + "documentation":"

    An array of objects that describe a filter that returns a more specific list of function recommendations.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to advance to the next page of function recommendations.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of function recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned NextToken value.

    " + } + } + }, + "GetLambdaFunctionRecommendationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to advance to the next page of function recommendations.

    This value is null when there are no more pages of function recommendations to return.

    " + }, + "lambdaFunctionRecommendations":{ + "shape":"LambdaFunctionRecommendations", + "documentation":"

    An array of objects that describe function recommendations.

    " + } + } + }, + "GetRecommendationError":{ + "type":"structure", + "members":{ + "identifier":{ + "shape":"Identifier", + "documentation":"

    The ID of the error.

    " + }, + "code":{ + "shape":"Code", + "documentation":"

    The error code.

    " + }, + "message":{ + "shape":"Message", + "documentation":"

    The message, or reason, for the error.

    " + } + }, + "documentation":"

    Describes an error experienced when getting recommendations.

    For example, an error is returned if you request recommendations for an unsupported Auto Scaling group, or if you request recommendations for an instance of an unsupported instance family.

    " + }, + "GetRecommendationErrors":{ + "type":"list", + "member":{"shape":"GetRecommendationError"} + }, + "GetRecommendationSummariesRequest":{ + "type":"structure", + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the AWS accounts for which to return recommendation summaries.

    If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return recommendation summaries.

    Only one account ID can be specified per request.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to advance to the next page of recommendation summaries.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of recommendation summaries to return with a single request.

    To retrieve the remaining results, make another request with the returned NextToken value.

    " + } + } + }, + "GetRecommendationSummariesResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to advance to the next page of recommendation summaries.

    This value is null when there are no more pages of recommendation summaries to return.

    " + }, + "recommendationSummaries":{ + "shape":"RecommendationSummaries", + "documentation":"

    An array of objects that summarize a recommendation.

    " + } + } + }, + "Identifier":{"type":"string"}, + "IncludeMemberAccounts":{"type":"boolean"}, + "InstanceArn":{"type":"string"}, + "InstanceArns":{ + "type":"list", + "member":{"shape":"InstanceArn"} + }, + "InstanceName":{"type":"string"}, + "InstanceRecommendation":{ + "type":"structure", + "members":{ + "instanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the current instance.

    " + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the instance.

    " + }, + "instanceName":{ + "shape":"InstanceName", + "documentation":"

    The name of the current instance.

    " + }, + "currentInstanceType":{ + "shape":"CurrentInstanceType", + "documentation":"

    The instance type of the current instance.

    " + }, + "finding":{ + "shape":"Finding", + "documentation":"

    The finding classification for the instance.

    Findings for instances include:

    • Underprovisioned —An instance is considered under-provisioned when at least one specification of your instance, such as CPU, memory, or network, does not meet the performance requirements of your workload. Under-provisioned instances may lead to poor application performance.

    • Overprovisioned —An instance is considered over-provisioned when at least one specification of your instance, such as CPU, memory, or network, can be sized down while still meeting the performance requirements of your workload, and no specification is under-provisioned. Over-provisioned instances may lead to unnecessary infrastructure cost.

    • Optimized —An instance is considered optimized when all specifications of your instance, such as CPU, memory, and network, meet the performance requirements of your workload and is not over provisioned. An optimized instance runs your workloads with optimal performance and infrastructure cost. For optimized resources, AWS Compute Optimizer might recommend a new generation instance type.

    " + }, + "utilizationMetrics":{ + "shape":"UtilizationMetrics", + "documentation":"

    An array of objects that describe the utilization metrics of the instance.

    " + }, + "lookBackPeriodInDays":{ + "shape":"LookBackPeriodInDays", + "documentation":"

    The number of days for which utilization metrics were analyzed for the instance.

    " + }, + "recommendationOptions":{ + "shape":"RecommendationOptions", + "documentation":"

    An array of objects that describe the recommendation options for the instance.

    " + }, + "recommendationSources":{ + "shape":"RecommendationSources", + "documentation":"

    An array of objects that describe the source resource of the recommendation.

    " + }, + "lastRefreshTimestamp":{ + "shape":"LastRefreshTimestamp", + "documentation":"

    The time stamp of when the instance recommendation was last refreshed.

    " + } + }, + "documentation":"

    Describes an Amazon EC2 instance recommendation.

    " + }, + "InstanceRecommendationOption":{ + "type":"structure", + "members":{ + "instanceType":{ + "shape":"InstanceType", + "documentation":"

    The instance type of the instance recommendation.

    " + }, + "projectedUtilizationMetrics":{ + "shape":"ProjectedUtilizationMetrics", + "documentation":"

    An array of objects that describe the projected utilization metrics of the instance recommendation option.

    The Cpu and Memory metrics are the only projected utilization metrics returned. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    " + }, + "performanceRisk":{ + "shape":"PerformanceRisk", + "documentation":"

    The performance risk of the instance recommendation option.

    Performance risk is the likelihood of the recommended instance type not meeting the performance requirement of your workload.

    The lowest performance risk is categorized as 0, and the highest as 5.

    " + }, + "rank":{ + "shape":"Rank", + "documentation":"

    The rank of the instance recommendation option.

    The top recommendation option is ranked as 1.

    " + } + }, + "documentation":"

    Describes a recommendation option for an Amazon EC2 instance.

    " + }, + "InstanceRecommendations":{ + "type":"list", + "member":{"shape":"InstanceRecommendation"} + }, + "InstanceType":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An internal error has occurred. Try your call again.

    ", + "exception":true, + "fault":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An invalid or out-of-range value was supplied for the input parameter.

    ", + "exception":true, + "synthetic":true + }, + "JobFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"JobFilterName", + "documentation":"

    The name of the filter.

    Specify ResourceType to return export jobs of a specific resource type (e.g., Ec2Instance).

    Specify JobStatus to return export jobs with a specific status (e.g, Complete).

    " + }, + "values":{ + "shape":"FilterValues", + "documentation":"

    The value of the filter.

    The valid values for this parameter are as follows, depending on what you specify for the name parameter:

    • Specify Ec2Instance or AutoScalingGroup if you specified the name parameter as ResourceType. There is no filter for EBS volumes because volume recommendations cannot be exported at this time.

    • Specify Queued, InProgress, Complete, or Failed if you specified the name parameter as JobStatus.

    " + } + }, + "documentation":"

    Describes a filter that returns a more specific list of recommendation export jobs.

    This filter is used with the DescribeRecommendationExportJobs action.

    " + }, + "JobFilterName":{ + "type":"string", + "enum":[ + "ResourceType", + "JobStatus" + ] + }, + "JobFilters":{ + "type":"list", + "member":{"shape":"JobFilter"} + }, + "JobId":{"type":"string"}, + "JobIds":{ + "type":"list", + "member":{"shape":"JobId"} + }, + "JobStatus":{ + "type":"string", + "enum":[ + "Queued", + "InProgress", + "Complete", + "Failed" + ] + }, + "LambdaFunctionMemoryMetricName":{ + "type":"string", + "enum":["Duration"] + }, + "LambdaFunctionMemoryMetricStatistic":{ + "type":"string", + "enum":[ + "LowerBound", + "UpperBound", + "Expected" + ] + }, + "LambdaFunctionMemoryProjectedMetric":{ + "type":"structure", + "members":{ + "name":{ + "shape":"LambdaFunctionMemoryMetricName", + "documentation":"

    The name of the projected utilization metric.

    " + }, + "statistic":{ + "shape":"LambdaFunctionMemoryMetricStatistic", + "documentation":"

    The statistic of the projected utilization metric.

    " + }, + "value":{ + "shape":"MetricValue", + "documentation":"

    The values of the projected utilization metrics.

    " + } + }, + "documentation":"

    Describes a projected utilization metric of an AWS Lambda function recommendation option.

    " + }, + "LambdaFunctionMemoryProjectedMetrics":{ + "type":"list", + "member":{"shape":"LambdaFunctionMemoryProjectedMetric"} + }, + "LambdaFunctionMemoryRecommendationOption":{ + "type":"structure", + "members":{ + "rank":{ + "shape":"Rank", + "documentation":"

    The rank of the function recommendation option.

    The top recommendation option is ranked as 1.

    " + }, + "memorySize":{ + "shape":"MemorySize", + "documentation":"

    The memory size, in MB, of the function recommendation option.

    " + }, + "projectedUtilizationMetrics":{ + "shape":"LambdaFunctionMemoryProjectedMetrics", + "documentation":"

    An array of objects that describe the projected utilization metrics of the function recommendation option.

    " + } + }, + "documentation":"

    Describes a recommendation option for an AWS Lambda function.

    " + }, + "LambdaFunctionMemoryRecommendationOptions":{ + "type":"list", + "member":{"shape":"LambdaFunctionMemoryRecommendationOption"} + }, + "LambdaFunctionMetricName":{ + "type":"string", + "enum":[ + "Duration", + "Memory" + ] + }, + "LambdaFunctionMetricStatistic":{ + "type":"string", + "enum":[ + "Maximum", + "Average" + ] + }, + "LambdaFunctionRecommendation":{ + "type":"structure", + "members":{ + "functionArn":{ + "shape":"FunctionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the current function.

    " + }, + "functionVersion":{ + "shape":"FunctionVersion", + "documentation":"

    The version number of the current function.

    " + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the function.

    " + }, + "currentMemorySize":{ + "shape":"MemorySize", + "documentation":"

    The amount of memory, in MB, that's allocated to the current function.

    " + }, + "numberOfInvocations":{ + "shape":"NumberOfInvocations", + "documentation":"

    The number of times your function code was executed during the look-back period.

    " + }, + "utilizationMetrics":{ + "shape":"LambdaFunctionUtilizationMetrics", + "documentation":"

    An array of objects that describe the utilization metrics of the function.

    " + }, + "lookbackPeriodInDays":{ + "shape":"LookBackPeriodInDays", + "documentation":"

    The number of days for which utilization metrics were analyzed for the function.

    " + }, + "lastRefreshTimestamp":{ + "shape":"LastRefreshTimestamp", + "documentation":"

    The time stamp of when the function recommendation was last refreshed.

    " + }, + "finding":{ + "shape":"LambdaFunctionRecommendationFinding", + "documentation":"

    The finding classification for the function.

    Findings for functions include:

    • Optimized — The function is correctly provisioned to run your workload based on its current configuration and its utilization history. This finding classification does not include finding reason codes.

    • NotOptimized — The function is performing at a higher level (over-provisioned) or at a lower level (under-provisioned) than required for your workload because its current configuration is not optimal. Over-provisioned resources might lead to unnecessary infrastructure cost, and under-provisioned resources might lead to poor application performance. This finding classification can include the MemoryUnderprovisioned and MemoryUnderprovisioned finding reason codes.

    • Unavailable — Compute Optimizer was unable to generate a recommendation for the function. This could be because the function has not accumulated sufficient metric data, or the function does not qualify for a recommendation. This finding classification can include the InsufficientData and Inconclusive finding reason codes.

      Functions with a finding of unavailable are not returned unless you specify the filter parameter with a value of Unavailable in your GetLambdaFunctionRecommendations request.

    " + }, + "findingReasonCodes":{ + "shape":"LambdaFunctionRecommendationFindingReasonCodes", + "documentation":"

    The reason for the finding classification of the function.

    Functions that have a finding classification of Optimized don't have a finding reason code.

    Reason codes include:

    • MemoryOverprovisioned — The function is over-provisioned when its memory configuration can be sized down while still meeting the performance requirements of your workload. An over-provisioned function might lead to unnecessary infrastructure cost. This finding reason code is part of the NotOptimized finding classification.

    • MemoryUnderprovisioned — The function is under-provisioned when its memory configuration doesn't meet the performance requirements of the workload. An under-provisioned function might lead to poor application performance. This finding reason code is part of the NotOptimized finding classification.

    • InsufficientData — The function does not have sufficient metric data for Compute Optimizer to generate a recommendation. For more information, see the Supported resources and requirements in the AWS Compute Optimizer User Guide. This finding reason code is part of the Unavailable finding classification.

    • Inconclusive — The function does not qualify for a recommendation, or there was an internal error. This finding reason code is part of the Unavailable finding classification.

    " + }, + "memorySizeRecommendationOptions":{ + "shape":"LambdaFunctionMemoryRecommendationOptions", + "documentation":"

    An array of objects that describe the memory configuration recommendation options for the function.

    " + } + }, + "documentation":"

    Describes an AWS Lambda function recommendation.

    " + }, + "LambdaFunctionRecommendationFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"LambdaFunctionRecommendationFilterName", + "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification (e.g., NotOptimized).

    Specify FindingReasonCode to return recommendations with a specific finding reason code (e.g., MemoryUnderprovisioned).

    " + }, + "values":{ + "shape":"FilterValues", + "documentation":"

    The value of the filter.

    The valid values for this parameter are as follows, depending on what you specify for the name parameter:

    • Specify Optimized, NotOptimized, or Unavailable if you specified the name parameter as Finding.

    • Specify MemoryOverprovisioned, MemoryUnderprovisioned, InsufficientData, or Inconclusive if you specified the name parameter as FindingReasonCode.

    " + } + }, + "documentation":"

    Describes a filter that returns a more specific list of AWS Lambda function recommendations.

    " + }, + "LambdaFunctionRecommendationFilterName":{ + "type":"string", + "enum":[ + "Finding", + "FindingReasonCode" + ] + }, + "LambdaFunctionRecommendationFilters":{ + "type":"list", + "member":{"shape":"LambdaFunctionRecommendationFilter"} + }, + "LambdaFunctionRecommendationFinding":{ + "type":"string", + "enum":[ + "Optimized", + "NotOptimized", + "Unavailable" + ] + }, + "LambdaFunctionRecommendationFindingReasonCode":{ + "type":"string", + "enum":[ + "MemoryOverprovisioned", + "MemoryUnderprovisioned", + "InsufficientData", + "Inconclusive" + ] + }, + "LambdaFunctionRecommendationFindingReasonCodes":{ + "type":"list", + "member":{"shape":"LambdaFunctionRecommendationFindingReasonCode"} + }, + "LambdaFunctionRecommendations":{ + "type":"list", + "member":{"shape":"LambdaFunctionRecommendation"} + }, + "LambdaFunctionUtilizationMetric":{ + "type":"structure", + "members":{ + "name":{ + "shape":"LambdaFunctionMetricName", + "documentation":"

    The name of the utilization metric.

    " + }, + "statistic":{ + "shape":"LambdaFunctionMetricStatistic", + "documentation":"

    The statistic of the utilization metric.

    " + }, + "value":{ + "shape":"MetricValue", + "documentation":"

    The value of the utilization metric.

    " + } + }, + "documentation":"

    Describes a utilization metric of an AWS Lambda function.

    " + }, + "LambdaFunctionUtilizationMetrics":{ + "type":"list", + "member":{"shape":"LambdaFunctionUtilizationMetric"} + }, + "LastRefreshTimestamp":{"type":"timestamp"}, + "LastUpdatedTimestamp":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request exceeds a limit of the service.

    ", + "exception":true, + "synthetic":true + }, + "LookBackPeriodInDays":{"type":"double"}, + "MaxResults":{ + "type":"integer", + "box":true + }, + "MaxSize":{"type":"integer"}, + "MemberAccountsEnrolled":{"type":"boolean"}, + "MemorySize":{"type":"integer"}, + "Message":{"type":"string"}, + "MetadataKey":{"type":"string"}, + "MetricName":{ + "type":"string", + "enum":[ + "Cpu", + "Memory", + "EBS_READ_OPS_PER_SECOND", + "EBS_WRITE_OPS_PER_SECOND", + "EBS_READ_BYTES_PER_SECOND", + "EBS_WRITE_BYTES_PER_SECOND" + ] + }, + "MetricStatistic":{ + "type":"string", + "enum":[ + "Maximum", + "Average" + ] + }, + "MetricValue":{"type":"double"}, + "MetricValues":{ + "type":"list", + "member":{"shape":"MetricValue"} + }, + "MinSize":{"type":"integer"}, + "MissingAuthenticationToken":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request must contain either a valid (registered) AWS access key ID or X.509 certificate.

    ", + "exception":true, + "synthetic":true + }, + "NextToken":{"type":"string"}, + "NumberOfInvocations":{"type":"long"}, + "OptInRequiredException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The account is not opted in to AWS Compute Optimizer.

    ", + "exception":true, + "synthetic":true + }, + "PerformanceRisk":{ + "type":"double", + "max":5, + "min":0 + }, + "Period":{"type":"integer"}, + "ProjectedMetric":{ + "type":"structure", + "members":{ + "name":{ + "shape":"MetricName", + "documentation":"

    The name of the projected utilization metric.

    The following projected utilization metrics are returned:

    • Cpu - The projected percentage of allocated EC2 compute units that would be in use on the recommendation option had you used that resource during the analyzed period. This metric identifies the processing power required to run an application on the recommendation option.

      Depending on the instance type, tools in your operating system can show a lower percentage than CloudWatch when the instance is not allocated a full processor core.

      Units: Percent

    • Memory - The percentage of memory that would be in use on the recommendation option had you used that resource during the analyzed period. This metric identifies the amount of memory required to run an application on the recommendation option.

      Units: Percent

      The Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    " + }, + "timestamps":{ + "shape":"Timestamps", + "documentation":"

    The time stamps of the projected utilization metric.

    " + }, + "values":{ + "shape":"MetricValues", + "documentation":"

    The values of the projected utilization metrics.

    " + } + }, + "documentation":"

    Describes a projected utilization metric of a recommendation option, such as an Amazon EC2 instance. This represents the projected utilization of a recommendation option had you used that resource during the analyzed period.

    Compare the utilization metric data of your resource against its projected utilization metric data to determine the performance difference between your current resource and the recommended option.

    The Cpu and Memory metrics are the only projected utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    " + }, + "ProjectedMetrics":{ + "type":"list", + "member":{"shape":"ProjectedMetric"} + }, + "ProjectedUtilizationMetrics":{ + "type":"list", + "member":{"shape":"UtilizationMetric"} + }, + "Rank":{"type":"integer"}, + "ReasonCodeSummaries":{ + "type":"list", + "member":{"shape":"ReasonCodeSummary"} + }, + "ReasonCodeSummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"FindingReasonCode", + "documentation":"

    The name of the finding reason code.

    " + }, + "value":{ + "shape":"SummaryValue", + "documentation":"

    The value of the finding reason code summary.

    " + } + }, + "documentation":"

    A summary of a finding reason code.

    " + }, + "RecommendationExportJob":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

    The identification number of the export job.

    " + }, + "destination":{ + "shape":"ExportDestination", + "documentation":"

    An object that describes the destination of the export file.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The resource type of the exported recommendations.

    " + }, + "status":{ + "shape":"JobStatus", + "documentation":"

    The status of the export job.

    " + }, + "creationTimestamp":{ + "shape":"CreationTimestamp", + "documentation":"

    The timestamp of when the export job was created.

    " + }, + "lastUpdatedTimestamp":{ + "shape":"LastUpdatedTimestamp", + "documentation":"

    The timestamp of when the export job was last updated.

    " + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

    The reason for an export job failure.

    " + } + }, + "documentation":"

    Describes a recommendation export job.

    Use the DescribeRecommendationExportJobs action to view your recommendation export jobs.

    Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your recommendations.

    " + }, + "RecommendationExportJobs":{ + "type":"list", + "member":{"shape":"RecommendationExportJob"} + }, + "RecommendationOptions":{ + "type":"list", + "member":{"shape":"InstanceRecommendationOption"} + }, + "RecommendationSource":{ + "type":"structure", + "members":{ + "recommendationSourceArn":{ + "shape":"RecommendationSourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the recommendation source.

    " + }, + "recommendationSourceType":{ + "shape":"RecommendationSourceType", + "documentation":"

    The resource type of the recommendation source.

    " + } + }, + "documentation":"

    Describes the source of a recommendation, such as an Amazon EC2 instance or Auto Scaling group.

    " + }, + "RecommendationSourceArn":{"type":"string"}, + "RecommendationSourceType":{ + "type":"string", + "enum":[ + "Ec2Instance", + "AutoScalingGroup", + "EbsVolume", + "LambdaFunction" + ] + }, + "RecommendationSources":{ + "type":"list", + "member":{"shape":"RecommendationSource"} + }, + "RecommendationSummaries":{ + "type":"list", + "member":{"shape":"RecommendationSummary"} + }, + "RecommendationSummary":{ + "type":"structure", + "members":{ + "summaries":{ + "shape":"Summaries", + "documentation":"

    An array of objects that describe a recommendation summary.

    " + }, + "recommendationResourceType":{ + "shape":"RecommendationSourceType", + "documentation":"

    The resource type of the recommendation.

    " + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the recommendation summary.

    " + } + }, + "documentation":"

    A summary of a recommendation.

    " + }, + "RecommendedInstanceType":{"type":"string"}, + "RecommendedOptionProjectedMetric":{ + "type":"structure", + "members":{ + "recommendedInstanceType":{ + "shape":"RecommendedInstanceType", + "documentation":"

    The recommended instance type.

    " + }, + "rank":{ + "shape":"Rank", + "documentation":"

    The rank of the recommendation option projected metric.

    The top recommendation option is ranked as 1.

    The projected metric rank correlates to the recommendation option rank. For example, the projected metric ranked as 1 is related to the recommendation option that is also ranked as 1 in the same response.

    " + }, + "projectedMetrics":{ + "shape":"ProjectedMetrics", + "documentation":"

    An array of objects that describe a projected utilization metric.

    " + } + }, + "documentation":"

    Describes a projected utilization metric of a recommendation option.

    The Cpu and Memory metrics are the only projected utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    " + }, + "RecommendedOptionProjectedMetrics":{ + "type":"list", + "member":{"shape":"RecommendedOptionProjectedMetric"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    A resource that is required for the action doesn't exist.

    ", + "exception":true, + "synthetic":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "Ec2Instance", + "AutoScalingGroup" + ] + }, + "S3Destination":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"DestinationBucket", + "documentation":"

    The name of the Amazon S3 bucket used as the destination of an export file.

    " + }, + "key":{ + "shape":"DestinationKey", + "documentation":"

    The Amazon S3 bucket key of an export file.

    The key uniquely identifies the object, or export file, in the S3 bucket.

    " + }, + "metadataKey":{ + "shape":"MetadataKey", + "documentation":"

    The Amazon S3 bucket key of a metadata file.

    The key uniquely identifies the object, or metadata file, in the S3 bucket.

    " + } + }, + "documentation":"

    Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and object keys of a recommendations export file, and its associated metadata file.

    " + }, + "S3DestinationConfig":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"DestinationBucket", + "documentation":"

    The name of the Amazon S3 bucket to use as the destination for an export job.

    " + }, + "keyPrefix":{ + "shape":"DestinationKeyPrefix", + "documentation":"

    The Amazon S3 bucket prefix for an export job.

    " + } + }, + "documentation":"

    Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for a recommendations export job.

    You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

    " + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request has failed due to a temporary failure of the server.

    ", + "exception":true, + "fault":true + }, + "Status":{ + "type":"string", + "enum":[ + "Active", + "Inactive", + "Pending", + "Failed" + ] + }, + "StatusReason":{"type":"string"}, + "Summaries":{ + "type":"list", + "member":{"shape":"Summary"} + }, + "Summary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"Finding", + "documentation":"

    The finding classification of the recommendation.

    " + }, + "value":{ + "shape":"SummaryValue", + "documentation":"

    The value of the recommendation summary.

    " + }, + "reasonCodeSummaries":{ + "shape":"ReasonCodeSummaries", + "documentation":"

    An array of objects that summarize a finding reason code.

    " + } + }, + "documentation":"

    The summary of a recommendation.

    " + }, + "SummaryValue":{"type":"double"}, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "exception":true, + "synthetic":true + }, + "Timestamp":{"type":"timestamp"}, + "Timestamps":{ + "type":"list", + "member":{"shape":"Timestamp"} + }, + "UpdateEnrollmentStatusRequest":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"Status", + "documentation":"

    The new enrollment status of the account.

    Accepted options are Active or Inactive. You will get an error if Pending or Failed are specified.

    " + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

    Indicates whether to enroll member accounts of the organization if the your account is the management account of an organization.

    " + } + } + }, + "UpdateEnrollmentStatusResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"Status", + "documentation":"

    The enrollment status of the account.

    " + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

    The reason for the enrollment status of the account. For example, an account might show a status of Pending because member accounts of an organization require more time to be enrolled in the service.

    " + } + } + }, + "UtilizationMetric":{ + "type":"structure", + "members":{ + "name":{ + "shape":"MetricName", + "documentation":"

    The name of the utilization metric.

    The following utilization metrics are available:

    • Cpu - The percentage of allocated EC2 compute units that are currently in use on the instance. This metric identifies the processing power required to run an application on the instance.

      Depending on the instance type, tools in your operating system can show a lower percentage than CloudWatch when the instance is not allocated a full processor core.

      Units: Percent

    • Memory - The percentage of memory that is currently in use on the instance. This metric identifies the amount of memory required to run an application on the instance.

      Units: Percent

      The Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    • EBS_READ_OPS_PER_SECOND - The completed read operations from all EBS volumes attached to the instance in a specified period of time.

      Unit: Count

    • EBS_WRITE_OPS_PER_SECOND - The completed write operations to all EBS volumes attached to the instance in a specified period of time.

      Unit: Count

    • EBS_READ_BYTES_PER_SECOND - The bytes read from all EBS volumes attached to the instance in a specified period of time.

      Unit: Bytes

    • EBS_WRITE_BYTES_PER_SECOND - The bytes written to all EBS volumes attached to the instance in a specified period of time.

      Unit: Bytes

    " + }, + "statistic":{ + "shape":"MetricStatistic", + "documentation":"

    The statistic of the utilization metric.

    The following statistics are available:

    • Average - This is the value of Sum / SampleCount during the specified period, or the average value observed during the specified period.

    • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

    " + }, + "value":{ + "shape":"MetricValue", + "documentation":"

    The value of the utilization metric.

    " + } + }, + "documentation":"

    Describes a utilization metric of a resource, such as an Amazon EC2 instance.

    Compare the utilization metric data of your resource against its projected utilization metric data to determine the performance difference between your current resource and the recommended option.

    " + }, + "UtilizationMetrics":{ + "type":"list", + "member":{"shape":"UtilizationMetric"} + }, + "VolumeArn":{"type":"string"}, + "VolumeArns":{ + "type":"list", + "member":{"shape":"VolumeArn"} + }, + "VolumeBaselineIOPS":{"type":"integer"}, + "VolumeBaselineThroughput":{"type":"integer"}, + "VolumeBurstIOPS":{"type":"integer"}, + "VolumeBurstThroughput":{"type":"integer"}, + "VolumeConfiguration":{ + "type":"structure", + "members":{ + "volumeType":{ + "shape":"VolumeType", + "documentation":"

    The volume type.

    This can be gp2 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

    " + }, + "volumeSize":{ + "shape":"VolumeSize", + "documentation":"

    The size of the volume, in GiB.

    " + }, + "volumeBaselineIOPS":{ + "shape":"VolumeBaselineIOPS", + "documentation":"

    The baseline IOPS of the volume.

    " + }, + "volumeBurstIOPS":{ + "shape":"VolumeBurstIOPS", + "documentation":"

    The burst IOPS of the volume.

    " + }, + "volumeBaselineThroughput":{ + "shape":"VolumeBaselineThroughput", + "documentation":"

    The baseline throughput of the volume.

    " + }, + "volumeBurstThroughput":{ + "shape":"VolumeBurstThroughput", + "documentation":"

    The burst throughput of the volume.

    " + } + }, + "documentation":"

    Describes the configuration of an Amazon Elastic Block Store (Amazon EBS) volume.

    " + }, + "VolumeRecommendation":{ + "type":"structure", + "members":{ + "volumeArn":{ + "shape":"VolumeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the current volume.

    " + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the volume.

    " + }, + "currentConfiguration":{ + "shape":"VolumeConfiguration", + "documentation":"

    An array of objects that describe the current configuration of the volume.

    " + }, + "finding":{ + "shape":"EBSFinding", + "documentation":"

    The finding classification for the volume.

    Findings for volumes include:

    • NotOptimized —A volume is considered not optimized when AWS Compute Optimizer identifies a recommendation that can provide better performance for your workload.

    • Optimized —An volume is considered optimized when Compute Optimizer determines that the volume is correctly provisioned to run your workload based on the chosen volume type. For optimized resources, Compute Optimizer might recommend a new generation volume type.

    " + }, + "utilizationMetrics":{ + "shape":"EBSUtilizationMetrics", + "documentation":"

    An array of objects that describe the utilization metrics of the volume.

    " + }, + "lookBackPeriodInDays":{ + "shape":"LookBackPeriodInDays", + "documentation":"

    The number of days for which utilization metrics were analyzed for the volume.

    " + }, + "volumeRecommendationOptions":{ + "shape":"VolumeRecommendationOptions", + "documentation":"

    An array of objects that describe the recommendation options for the volume.

    " + }, + "lastRefreshTimestamp":{ + "shape":"LastRefreshTimestamp", + "documentation":"

    The time stamp of when the volume recommendation was last refreshed.

    " + } + }, + "documentation":"

    Describes an Amazon Elastic Block Store (Amazon EBS) volume recommendation.

    " + }, + "VolumeRecommendationOption":{ + "type":"structure", + "members":{ + "configuration":{ + "shape":"VolumeConfiguration", + "documentation":"

    An array of objects that describe a volume configuration.

    " + }, + "performanceRisk":{ + "shape":"PerformanceRisk", + "documentation":"

    The performance risk of the volume recommendation option.

    Performance risk is the likelihood of the recommended volume type not meeting the performance requirement of your workload.

    The lowest performance risk is categorized as 0, and the highest as 5.

    " + }, + "rank":{ + "shape":"Rank", + "documentation":"

    The rank of the volume recommendation option.

    The top recommendation option is ranked as 1.

    " + } + }, + "documentation":"

    Describes a recommendation option for an Amazon Elastic Block Store (Amazon EBS) instance.

    " + }, + "VolumeRecommendationOptions":{ + "type":"list", + "member":{"shape":"VolumeRecommendationOption"} + }, + "VolumeRecommendations":{ + "type":"list", + "member":{"shape":"VolumeRecommendation"} + }, + "VolumeSize":{"type":"integer"}, + "VolumeType":{"type":"string"} + }, + "documentation":"

    AWS Compute Optimizer is a service that analyzes the configuration and utilization metrics of your AWS compute resources, such as EC2 instances, Auto Scaling groups, AWS Lambda functions, and Amazon EBS volumes. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, as well as projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, including the required permissions to use the service, see the AWS Compute Optimizer User Guide.

    " +} diff --git a/services/config/build.properties b/services/config/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/config/build.properties +++ b/services/config/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/config/pom.xml b/services/config/pom.xml index bcf0ad01e005..1bf0f31b69ff 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + connectcontactlens + AWS Java SDK :: Services :: Connect Contact Lens + The AWS Java SDK for Connect Contact Lens module holds the client classes that are used for + communicating with Connect Contact Lens. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.connectcontactlens + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/connectcontactlens/src/main/resources/codegen-resources/paginators-1.json b/services/connectcontactlens/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..d1d69dccd15d --- /dev/null +++ b/services/connectcontactlens/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "ListRealtimeContactAnalysisSegments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/connectcontactlens/src/main/resources/codegen-resources/service-2.json b/services/connectcontactlens/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..08a0c4e78b5b --- /dev/null +++ b/services/connectcontactlens/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,361 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-08-21", + "endpointPrefix":"contact-lens", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon Connect Contact Lens", + "serviceFullName":"Amazon Connect Contact Lens", + "serviceId":"Connect Contact Lens", + "signatureVersion":"v4", + "signingName":"connect", + "uid":"connect-contact-lens-2020-08-21" + }, + "operations":{ + "ListRealtimeContactAnalysisSegments":{ + "name":"ListRealtimeContactAnalysisSegments", + "http":{ + "method":"POST", + "requestUri":"/realtime-contact-analysis/analysis-segments" + }, + "input":{"shape":"ListRealtimeContactAnalysisSegmentsRequest"}, + "output":{"shape":"ListRealtimeContactAnalysisSegmentsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Provides a list of analysis segments for a real-time analysis session.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "Categories":{ + "type":"structure", + "required":[ + "MatchedCategories", + "MatchedDetails" + ], + "members":{ + "MatchedCategories":{ + "shape":"MatchedCategories", + "documentation":"

    The category rules that have been matched in the analyzed segment.

    " + }, + "MatchedDetails":{ + "shape":"MatchedDetails", + "documentation":"

    The category rule that was matched and when it occurred in the transcript.

    " + } + }, + "documentation":"

    Provides the category rules that are used to automatically categorize contacts based on uttered keywords and phrases.

    " + }, + "CategoryDetails":{ + "type":"structure", + "required":["PointsOfInterest"], + "members":{ + "PointsOfInterest":{ + "shape":"PointsOfInterest", + "documentation":"

    The section of audio where the category rule was detected.

    " + } + }, + "documentation":"

    Provides information about the category rule that was matched.

    " + }, + "CategoryName":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "CharacterOffset":{ + "type":"integer", + "min":0 + }, + "CharacterOffsets":{ + "type":"structure", + "required":[ + "BeginOffsetChar", + "EndOffsetChar" + ], + "members":{ + "BeginOffsetChar":{ + "shape":"CharacterOffset", + "documentation":"

    The beginning of the issue.

    " + }, + "EndOffsetChar":{ + "shape":"CharacterOffset", + "documentation":"

    The end of the issue.

    " + } + }, + "documentation":"

    For characters that were detected as issues, where they occur in the transcript.

    " + }, + "ContactId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "InstanceId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    Request processing failed due to an error or failure with the service.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The request is not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "IssueDetected":{ + "type":"structure", + "required":["CharacterOffsets"], + "members":{ + "CharacterOffsets":{ + "shape":"CharacterOffsets", + "documentation":"

    The offset for when the issue was detected in the segment.

    " + } + }, + "documentation":"

    Potential issues that are detected based on an artificial intelligence analysis of each turn in the conversation.

    " + }, + "IssuesDetected":{ + "type":"list", + "member":{"shape":"IssueDetected"}, + "max":20, + "min":0 + }, + "ListRealtimeContactAnalysisSegmentsRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ContactId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

    The identifier of the Amazon Connect instance.

    " + }, + "ContactId":{ + "shape":"ContactId", + "documentation":"

    The identifier of the contact.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximimum number of results to return per page.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + } + } + }, + "ListRealtimeContactAnalysisSegmentsResponse":{ + "type":"structure", + "required":["Segments"], + "members":{ + "Segments":{ + "shape":"RealtimeContactAnalysisSegments", + "documentation":"

    An analyzed transcript or category.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If there are additional results, this is the token for the next set of results. If response includes nextToken there are two possible scenarios:

    • There are more segments so another call is required to get them.

    • There are no more segments at this time, but more may be available later (real-time analysis is in progress) so the client should call the operation again to get new segments.

    If response does not include nextToken, the analysis is completed (successfully or failed) and there are no more segments to retrieve.

    " + } + } + }, + "MatchedCategories":{ + "type":"list", + "member":{"shape":"CategoryName"}, + "max":150, + "min":0 + }, + "MatchedDetails":{ + "type":"map", + "key":{"shape":"CategoryName"}, + "value":{"shape":"CategoryDetails"}, + "max":150, + "min":0 + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "Message":{"type":"string"}, + "NextToken":{ + "type":"string", + "max":131070, + "min":1, + "pattern":".*\\S.*" + }, + "OffsetMillis":{ + "type":"integer", + "min":0 + }, + "ParticipantId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "ParticipantRole":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "PointOfInterest":{ + "type":"structure", + "required":[ + "BeginOffsetMillis", + "EndOffsetMillis" + ], + "members":{ + "BeginOffsetMillis":{ + "shape":"OffsetMillis", + "documentation":"

    The beginning offset in milliseconds where the category rule was detected.

    " + }, + "EndOffsetMillis":{ + "shape":"OffsetMillis", + "documentation":"

    The ending offset in milliseconds where the category rule was detected.

    " + } + }, + "documentation":"

    The section of the contact audio where that category rule was detected.

    " + }, + "PointsOfInterest":{ + "type":"list", + "member":{"shape":"PointOfInterest"}, + "max":20, + "min":0 + }, + "RealtimeContactAnalysisSegment":{ + "type":"structure", + "members":{ + "Transcript":{ + "shape":"Transcript", + "documentation":"

    The analyzed transcript.

    " + }, + "Categories":{ + "shape":"Categories", + "documentation":"

    The matched category rules.

    " + } + }, + "documentation":"

    An analyzed segment for a real-time analysis session.

    " + }, + "RealtimeContactAnalysisSegments":{ + "type":"list", + "member":{"shape":"RealtimeContactAnalysisSegment"}, + "max":100, + "min":0 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The specified resource was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SentimentValue":{ + "type":"string", + "enum":[ + "POSITIVE", + "NEUTRAL", + "NEGATIVE" + ] + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The throttling limit has been exceeded.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Transcript":{ + "type":"structure", + "required":[ + "Id", + "ParticipantId", + "ParticipantRole", + "Content", + "BeginOffsetMillis", + "EndOffsetMillis", + "Sentiment" + ], + "members":{ + "Id":{ + "shape":"TranscriptId", + "documentation":"

    The identifier of the transcript.

    " + }, + "ParticipantId":{ + "shape":"ParticipantId", + "documentation":"

    The identifier of the participant.

    " + }, + "ParticipantRole":{ + "shape":"ParticipantRole", + "documentation":"

    The role of participant. For example, is it a customer, agent, or system.

    " + }, + "Content":{ + "shape":"TranscriptContent", + "documentation":"

    The content of the transcript.

    " + }, + "BeginOffsetMillis":{ + "shape":"OffsetMillis", + "documentation":"

    The beginning offset in the contact for this transcript.

    " + }, + "EndOffsetMillis":{ + "shape":"OffsetMillis", + "documentation":"

    The end offset in the contact for this transcript.

    " + }, + "Sentiment":{ + "shape":"SentimentValue", + "documentation":"

    The sentiment of the detected for this piece of transcript.

    " + }, + "IssuesDetected":{ + "shape":"IssuesDetected", + "documentation":"

    List of positions where issues were detected on the transcript.

    " + } + }, + "documentation":"

    A list of messages in the session.

    " + }, + "TranscriptContent":{ + "type":"string", + "min":1, + "pattern":".*\\S.*" + }, + "TranscriptId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + } + }, + "documentation":"

    Contact Lens for Amazon Connect enables you to analyze conversations between customer and agents, by using speech transcription, natural language processing, and intelligent search capabilities. It performs sentiment analysis, detects issues, and enables you to automatically categorize contacts.

    Contact Lens for Amazon Connect provides both real-time and post-call analytics of customer-agent conversations. For more information, see Analyze conversations using Contact Lens in the Amazon Connect Administrator Guide.

    " +} diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml new file mode 100644 index 000000000000..fe3fa4eddc8d --- /dev/null +++ b/services/connectparticipant/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + connectparticipant + AWS Java SDK :: Services :: ConnectParticipant + The AWS Java SDK for ConnectParticipant module holds the client classes that are used for + communicating with ConnectParticipant. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.connectparticipant + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/connectparticipant/src/main/resources/codegen-resources/paginators-1.json b/services/connectparticipant/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..d37a1a26126f --- /dev/null +++ b/services/connectparticipant/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "GetTranscript": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/connectparticipant/src/main/resources/codegen-resources/service-2.json b/services/connectparticipant/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..4a9707281586 --- /dev/null +++ b/services/connectparticipant/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,832 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-09-07", + "endpointPrefix":"participant.connect", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon Connect Participant", + "serviceFullName":"Amazon Connect Participant Service", + "serviceId":"ConnectParticipant", + "signatureVersion":"v4", + "signingName":"execute-api", + "uid":"connectparticipant-2018-09-07" + }, + "operations":{ + "CompleteAttachmentUpload":{ + "name":"CompleteAttachmentUpload", + "http":{ + "method":"POST", + "requestUri":"/participant/complete-attachment-upload" + }, + "input":{"shape":"CompleteAttachmentUploadRequest"}, + "output":{"shape":"CompleteAttachmentUploadResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API.

    " + }, + "CreateParticipantConnection":{ + "name":"CreateParticipantConnection", + "http":{ + "method":"POST", + "requestUri":"/participant/connection" + }, + "input":{"shape":"CreateParticipantConnectionRequest"}, + "output":{"shape":"CreateParticipantConnectionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates the participant's connection. Note that ParticipantToken is used for invoking this API instead of ConnectionToken.

    The participant token is valid for the lifetime of the participant – until they are part of a contact.

    The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic.

    For chat, you need to publish the following on the established websocket connection:

    {\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}

    Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " + }, + "DisconnectParticipant":{ + "name":"DisconnectParticipant", + "http":{ + "method":"POST", + "requestUri":"/participant/disconnect" + }, + "input":{"shape":"DisconnectParticipantRequest"}, + "output":{"shape":"DisconnectParticipantResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Disconnects a participant. Note that ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " + }, + "GetAttachment":{ + "name":"GetAttachment", + "http":{ + "method":"POST", + "requestUri":"/participant/attachment" + }, + "input":{"shape":"GetAttachmentRequest"}, + "output":{"shape":"GetAttachmentResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts.

    " + }, + "GetTranscript":{ + "name":"GetTranscript", + "http":{ + "method":"POST", + "requestUri":"/participant/transcript" + }, + "input":{"shape":"GetTranscriptRequest"}, + "output":{"shape":"GetTranscriptResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Retrieves a transcript of the session, including details about any attachments. Note that ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " + }, + "SendEvent":{ + "name":"SendEvent", + "http":{ + "method":"POST", + "requestUri":"/participant/event" + }, + "input":{"shape":"SendEventRequest"}, + "output":{"shape":"SendEventResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Sends an event. Note that ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " + }, + "SendMessage":{ + "name":"SendMessage", + "http":{ + "method":"POST", + "requestUri":"/participant/message" + }, + "input":{"shape":"SendMessageRequest"}, + "output":{"shape":"SendMessageResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Sends a message. Note that ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " + }, + "StartAttachmentUpload":{ + "name":"StartAttachmentUpload", + "http":{ + "method":"POST", + "requestUri":"/participant/start-attachment-upload" + }, + "input":{"shape":"StartAttachmentUploadRequest"}, + "output":{"shape":"StartAttachmentUploadResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "ArtifactId":{ + "type":"string", + "max":256, + "min":1 + }, + "ArtifactStatus":{ + "type":"string", + "enum":[ + "APPROVED", + "REJECTED", + "IN_PROGRESS" + ] + }, + "AttachmentIdList":{ + "type":"list", + "member":{"shape":"ArtifactId"}, + "max":1, + "min":1 + }, + "AttachmentItem":{ + "type":"structure", + "members":{ + "ContentType":{ + "shape":"ContentType", + "documentation":"

    Describes the MIME file type of the attachment. For a list of supported file types, see Feature specifications in the Amazon Connect Administrator Guide.

    " + }, + "AttachmentId":{ + "shape":"ArtifactId", + "documentation":"

    A unique identifier for the attachment.

    " + }, + "AttachmentName":{ + "shape":"AttachmentName", + "documentation":"

    A case-sensitive name of the attachment being uploaded.

    " + }, + "Status":{ + "shape":"ArtifactStatus", + "documentation":"

    Status of the attachment.

    " + } + }, + "documentation":"

    The case-insensitive input to indicate standard MIME type that describes the format of the file that will be uploaded.

    " + }, + "AttachmentName":{ + "type":"string", + "max":256, + "min":1 + }, + "AttachmentSizeInBytes":{ + "type":"long", + "min":1 + }, + "Attachments":{ + "type":"list", + "member":{"shape":"AttachmentItem"} + }, + "ChatContent":{ + "type":"string", + "max":1024, + "min":1 + }, + "ChatContentType":{ + "type":"string", + "max":100, + "min":1 + }, + "ChatItemId":{ + "type":"string", + "max":256, + "min":1 + }, + "ChatItemType":{ + "type":"string", + "enum":[ + "TYPING", + "PARTICIPANT_JOINED", + "PARTICIPANT_LEFT", + "CHAT_ENDED", + "TRANSFER_SUCCEEDED", + "TRANSFER_FAILED", + "MESSAGE", + "EVENT", + "ATTACHMENT", + "CONNECTION_ACK" + ] + }, + "ClientToken":{ + "type":"string", + "max":500 + }, + "CompleteAttachmentUploadRequest":{ + "type":"structure", + "required":[ + "AttachmentIds", + "ClientToken", + "ConnectionToken" + ], + "members":{ + "AttachmentIds":{ + "shape":"AttachmentIdList", + "documentation":"

    A list of unique identifiers for the attachments.

    " + }, + "ClientToken":{ + "shape":"NonEmptyClientToken", + "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The authentication token associated with the participant's connection.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "CompleteAttachmentUploadResponse":{ + "type":"structure", + "members":{ + } + }, + "ConflictException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Reason"} + }, + "documentation":"

    An attachment with that identifier is already being uploaded.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ConnectionCredentials":{ + "type":"structure", + "members":{ + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The connection token.

    " + }, + "Expiry":{ + "shape":"ISO8601Datetime", + "documentation":"

    The expiration of the token.

    It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

    " + } + }, + "documentation":"

    Connection credentials.

    " + }, + "ConnectionType":{ + "type":"string", + "enum":[ + "WEBSOCKET", + "CONNECTION_CREDENTIALS" + ] + }, + "ConnectionTypeList":{ + "type":"list", + "member":{"shape":"ConnectionType"}, + "min":1 + }, + "ContactId":{ + "type":"string", + "max":256, + "min":1 + }, + "ContentType":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateParticipantConnectionRequest":{ + "type":"structure", + "required":[ + "Type", + "ParticipantToken" + ], + "members":{ + "Type":{ + "shape":"ConnectionTypeList", + "documentation":"

    Type of connection information required.

    " + }, + "ParticipantToken":{ + "shape":"ParticipantToken", + "documentation":"

    This is a header parameter.

    The Participant Token as obtained from StartChatContact API response.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "CreateParticipantConnectionResponse":{ + "type":"structure", + "members":{ + "Websocket":{ + "shape":"Websocket", + "documentation":"

    Creates the participant's websocket connection.

    " + }, + "ConnectionCredentials":{ + "shape":"ConnectionCredentials", + "documentation":"

    Creates the participant's connection credentials. The authentication token associated with the participant's connection.

    " + } + } + }, + "DisconnectParticipantRequest":{ + "type":"structure", + "required":["ConnectionToken"], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The authentication token associated with the participant's connection.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "DisconnectParticipantResponse":{ + "type":"structure", + "members":{ + } + }, + "DisplayName":{ + "type":"string", + "max":256, + "min":1 + }, + "GetAttachmentRequest":{ + "type":"structure", + "required":[ + "AttachmentId", + "ConnectionToken" + ], + "members":{ + "AttachmentId":{ + "shape":"ArtifactId", + "documentation":"

    A unique identifier for the attachment.

    " + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The authentication token associated with the participant's connection.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "GetAttachmentResponse":{ + "type":"structure", + "members":{ + "Url":{ + "shape":"PreSignedAttachmentUrl", + "documentation":"

    The pre-signed URL using which file would be downloaded from Amazon S3 by the API caller.

    " + }, + "UrlExpiry":{ + "shape":"ISO8601Datetime", + "documentation":"

    The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

    " + } + } + }, + "GetTranscriptRequest":{ + "type":"structure", + "required":["ConnectionToken"], + "members":{ + "ContactId":{ + "shape":"ContactId", + "documentation":"

    The contactId from the current contact chain for which transcript is needed.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in the page. Default: 10.

    ", + "box":true + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token. Use the value returned previously in the next subsequent request to retrieve the next set of results.

    " + }, + "ScanDirection":{ + "shape":"ScanDirection", + "documentation":"

    The direction from StartPosition from which to retrieve message. Default: BACKWARD when no StartPosition is provided, FORWARD with StartPosition.

    " + }, + "SortOrder":{ + "shape":"SortKey", + "documentation":"

    The sort order for the records. Default: DESCENDING.

    " + }, + "StartPosition":{ + "shape":"StartPosition", + "documentation":"

    A filtering option for where to start.

    " + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The authentication token associated with the participant's connection.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "GetTranscriptResponse":{ + "type":"structure", + "members":{ + "InitialContactId":{ + "shape":"ContactId", + "documentation":"

    The initial contact ID for the contact.

    " + }, + "Transcript":{ + "shape":"Transcript", + "documentation":"

    The list of messages in the session.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token. Use the value returned previously in the next subsequent request to retrieve the next set of results.

    " + } + } + }, + "ISO8601Datetime":{"type":"string"}, + "Instant":{ + "type":"string", + "max":100, + "min":1 + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    This exception occurs when there is an internal failure in the Amazon Connect service.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "Item":{ + "type":"structure", + "members":{ + "AbsoluteTime":{ + "shape":"Instant", + "documentation":"

    The time when the message or event was sent.

    It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

    " + }, + "Content":{ + "shape":"ChatContent", + "documentation":"

    The content of the message or event.

    " + }, + "ContentType":{ + "shape":"ChatContentType", + "documentation":"

    The type of content of the item.

    " + }, + "Id":{ + "shape":"ChatItemId", + "documentation":"

    The ID of the item.

    " + }, + "Type":{ + "shape":"ChatItemType", + "documentation":"

    Type of the item: message or event.

    " + }, + "ParticipantId":{ + "shape":"ParticipantId", + "documentation":"

    The ID of the sender in the session.

    " + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

    The chat display name of the sender.

    " + }, + "ParticipantRole":{ + "shape":"ParticipantRole", + "documentation":"

    The role of the sender. For example, is it a customer, agent, or system.

    " + }, + "Attachments":{ + "shape":"Attachments", + "documentation":"

    Provides information about the attachments.

    " + } + }, + "documentation":"

    An item - message or event - that has been sent.

    " + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "Message":{"type":"string"}, + "MostRecent":{ + "type":"integer", + "max":100, + "min":0 + }, + "NextToken":{ + "type":"string", + "max":1000, + "min":1 + }, + "NonEmptyClientToken":{ + "type":"string", + "max":500, + "min":1 + }, + "ParticipantId":{ + "type":"string", + "max":256, + "min":1 + }, + "ParticipantRole":{ + "type":"string", + "enum":[ + "AGENT", + "CUSTOMER", + "SYSTEM" + ] + }, + "ParticipantToken":{ + "type":"string", + "max":1000, + "min":1 + }, + "PreSignedAttachmentUrl":{ + "type":"string", + "max":2000, + "min":1 + }, + "PreSignedConnectionUrl":{ + "type":"string", + "max":2000, + "min":1 + }, + "Reason":{ + "type":"string", + "max":2000, + "min":1 + }, + "ScanDirection":{ + "type":"string", + "enum":[ + "FORWARD", + "BACKWARD" + ] + }, + "SendEventRequest":{ + "type":"structure", + "required":[ + "ContentType", + "ConnectionToken" + ], + "members":{ + "ContentType":{ + "shape":"ChatContentType", + "documentation":"

    The content type of the request. Supported types are:

    • application/vnd.amazonaws.connect.event.typing

    • application/vnd.amazonaws.connect.event.connection.acknowledged

    " + }, + "Content":{ + "shape":"ChatContent", + "documentation":"

    The content of the event to be sent (for example, message text). This is not yet supported.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The authentication token associated with the participant's connection.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "SendEventResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ChatItemId", + "documentation":"

    The ID of the response.

    " + }, + "AbsoluteTime":{ + "shape":"Instant", + "documentation":"

    The time when the event was sent.

    It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

    " + } + } + }, + "SendMessageRequest":{ + "type":"structure", + "required":[ + "ContentType", + "Content", + "ConnectionToken" + ], + "members":{ + "ContentType":{ + "shape":"ChatContentType", + "documentation":"

    The type of the content. Supported types are text/plain.

    " + }, + "Content":{ + "shape":"ChatContent", + "documentation":"

    The content of the message.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The authentication token associated with the connection.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "SendMessageResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ChatItemId", + "documentation":"

    The ID of the message.

    " + }, + "AbsoluteTime":{ + "shape":"Instant", + "documentation":"

    The time when the message was sent.

    It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

    " + } + } + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The number of attachments per contact exceeds the quota.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "SortKey":{ + "type":"string", + "enum":[ + "DESCENDING", + "ASCENDING" + ] + }, + "StartAttachmentUploadRequest":{ + "type":"structure", + "required":[ + "ContentType", + "AttachmentSizeInBytes", + "AttachmentName", + "ClientToken", + "ConnectionToken" + ], + "members":{ + "ContentType":{ + "shape":"ContentType", + "documentation":"

    Describes the MIME file type of the attachment. For a list of supported file types, see Feature specifications in the Amazon Connect Administrator Guide.

    " + }, + "AttachmentSizeInBytes":{ + "shape":"AttachmentSizeInBytes", + "documentation":"

    The size of the attachment in bytes.

    " + }, + "AttachmentName":{ + "shape":"AttachmentName", + "documentation":"

    A case-sensitive name of the attachment being uploaded.

    " + }, + "ClientToken":{ + "shape":"NonEmptyClientToken", + "documentation":"

    A unique case sensitive identifier to support idempotency of request.

    ", + "idempotencyToken":true + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The authentication token associated with the participant's connection.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "StartAttachmentUploadResponse":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"ArtifactId", + "documentation":"

    A unique identifier for the attachment.

    " + }, + "UploadMetadata":{ + "shape":"UploadMetadata", + "documentation":"

    Fields to be used while uploading the attachment.

    " + } + } + }, + "StartPosition":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ChatItemId", + "documentation":"

    The ID of the message or event where to start.

    " + }, + "AbsoluteTime":{ + "shape":"Instant", + "documentation":"

    The time in ISO format where to start.

    It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

    " + }, + "MostRecent":{ + "shape":"MostRecent", + "documentation":"

    The start position of the most recent message where you want to start.

    " + } + }, + "documentation":"

    A filtering option for where to start. For example, if you sent 100 messages, start with message 50.

    " + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Transcript":{ + "type":"list", + "member":{"shape":"Item"} + }, + "UploadMetadata":{ + "type":"structure", + "members":{ + "Url":{ + "shape":"UploadMetadataUrl", + "documentation":"

    The pre-signed URL using which file would be downloaded from Amazon S3 by the API caller.

    " + }, + "UrlExpiry":{ + "shape":"ISO8601Datetime", + "documentation":"

    The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

    " + }, + "HeadersToInclude":{ + "shape":"UploadMetadataSignedHeaders", + "documentation":"

    The headers to be provided while uploading the file to the URL.

    " + } + }, + "documentation":"

    Fields to be used while uploading the attachment.

    " + }, + "UploadMetadataSignedHeaders":{ + "type":"map", + "key":{"shape":"UploadMetadataSignedHeadersKey"}, + "value":{"shape":"UploadMetadataSignedHeadersValue"} + }, + "UploadMetadataSignedHeadersKey":{ + "type":"string", + "max":128, + "min":1 + }, + "UploadMetadataSignedHeadersValue":{ + "type":"string", + "max":256, + "min":1 + }, + "UploadMetadataUrl":{ + "type":"string", + "max":2000, + "min":1 + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Reason"} + }, + "documentation":"

    The input fails to satisfy the constraints specified by Amazon Connect.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Websocket":{ + "type":"structure", + "members":{ + "Url":{ + "shape":"PreSignedConnectionUrl", + "documentation":"

    The URL of the websocket.

    " + }, + "ConnectionExpiry":{ + "shape":"ISO8601Datetime", + "documentation":"

    The URL expiration timestamp in ISO date format.

    It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

    " + } + }, + "documentation":"

    The websocket for the participant's connection.

    " + } + }, + "documentation":"

    Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage a customer contact center and provide reliable customer engagement at any scale.

    Amazon Connect enables customer contacts through voice or chat.

    The APIs described here are used by chat participants, such as agents and customers.

    " +} diff --git a/services/costandusagereport/build.properties b/services/costandusagereport/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/costandusagereport/build.properties +++ b/services/costandusagereport/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index a1e02cdb0c4e..e3b73b8c8265 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + customerprofiles + AWS Java SDK :: Services :: Customer Profiles + The AWS Java SDK for Customer Profiles module holds the client classes that are used for + communicating with Customer Profiles. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.customerprofiles + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..83ca3c5325a6 --- /dev/null +++ b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2448 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-08-15", + "endpointPrefix":"profile", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Customer Profiles", + "serviceFullName":"Amazon Connect Customer Profiles", + "serviceId":"Customer Profiles", + "signatureVersion":"v4", + "signingName":"profile", + "uid":"customer-profiles-2020-08-15" + }, + "operations":{ + "AddProfileKey":{ + "name":"AddProfileKey", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/keys" + }, + "input":{"shape":"AddProfileKeyRequest"}, + "output":{"shape":"AddProfileKeyResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Associates a new key value with a specific profile, such as a Contact Trace Record (CTR) ContactId.

    A profile object can have a single unique key and any number of additional keys that can be used to identify the profile that it belongs to.

    " + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}" + }, + "input":{"shape":"CreateDomainRequest"}, + "output":{"shape":"CreateDomainResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

    Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

    " + }, + "CreateProfile":{ + "name":"CreateProfile", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles" + }, + "input":{"shape":"CreateProfileRequest"}, + "output":{"shape":"CreateProfileResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a standard profile.

    A standard profile represents the following attributes for a customer profile in a domain.

    " + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"DELETE", + "requestUri":"/domains/{DomainName}" + }, + "input":{"shape":"DeleteDomainRequest"}, + "output":{"shape":"DeleteDomainResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a specific domain and all of its customer data, such as customer profile attributes and their related objects.

    " + }, + "DeleteIntegration":{ + "name":"DeleteIntegration", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/integrations/delete" + }, + "input":{"shape":"DeleteIntegrationRequest"}, + "output":{"shape":"DeleteIntegrationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes an integration from a specific domain.

    " + }, + "DeleteProfile":{ + "name":"DeleteProfile", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/delete" + }, + "input":{"shape":"DeleteProfileRequest"}, + "output":{"shape":"DeleteProfileResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the standard customer profile and all data pertaining to the profile.

    " + }, + "DeleteProfileKey":{ + "name":"DeleteProfileKey", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/keys/delete" + }, + "input":{"shape":"DeleteProfileKeyRequest"}, + "output":{"shape":"DeleteProfileKeyResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes a searchable key from a customer profile.

    " + }, + "DeleteProfileObject":{ + "name":"DeleteProfileObject", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/objects/delete" + }, + "input":{"shape":"DeleteProfileObjectRequest"}, + "output":{"shape":"DeleteProfileObjectResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes an object associated with a profile of a given ProfileObjectType.

    " + }, + "DeleteProfileObjectType":{ + "name":"DeleteProfileObjectType", + "http":{ + "method":"DELETE", + "requestUri":"/domains/{DomainName}/object-types/{ObjectTypeName}" + }, + "input":{"shape":"DeleteProfileObjectTypeRequest"}, + "output":{"shape":"DeleteProfileObjectTypeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes a ProfileObjectType from a specific domain as well as removes all the ProfileObjects of that type. It also disables integrations from this specific ProfileObjectType. In addition, it scrubs all of the fields of the standard profile that were populated from this ProfileObjectType.

    " + }, + "GetDomain":{ + "name":"GetDomain", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}" + }, + "input":{"shape":"GetDomainRequest"}, + "output":{"shape":"GetDomainResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns information about a specific domain.

    " + }, + "GetIntegration":{ + "name":"GetIntegration", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/integrations" + }, + "input":{"shape":"GetIntegrationRequest"}, + "output":{"shape":"GetIntegrationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns an integration for a domain.

    " + }, + "GetProfileObjectType":{ + "name":"GetProfileObjectType", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/object-types/{ObjectTypeName}" + }, + "input":{"shape":"GetProfileObjectTypeRequest"}, + "output":{"shape":"GetProfileObjectTypeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns the object types for a specific domain.

    " + }, + "GetProfileObjectTypeTemplate":{ + "name":"GetProfileObjectTypeTemplate", + "http":{ + "method":"GET", + "requestUri":"/templates/{TemplateId}" + }, + "input":{"shape":"GetProfileObjectTypeTemplateRequest"}, + "output":{"shape":"GetProfileObjectTypeTemplateResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns the template information for a specific object type.

    A template is a predefined ProfileObjectType, such as “Salesforce-Account” or “Salesforce-Contact.” When a user sends a ProfileObject, using the PutProfileObject API, with an ObjectTypeName that matches one of the TemplateIds, it uses the mappings from the template.

    " + }, + "ListAccountIntegrations":{ + "name":"ListAccountIntegrations", + "http":{ + "method":"POST", + "requestUri":"/integrations" + }, + "input":{"shape":"ListAccountIntegrationsRequest"}, + "output":{"shape":"ListAccountIntegrationsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all of the integrations associated to a specific URI in the AWS account.

    " + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"GET", + "requestUri":"/domains" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{"shape":"ListDomainsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of all the domains for an AWS account that have been created.

    " + }, + "ListIntegrations":{ + "name":"ListIntegrations", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/integrations" + }, + "input":{"shape":"ListIntegrationsRequest"}, + "output":{"shape":"ListIntegrationsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all of the integrations in your domain.

    " + }, + "ListProfileObjectTypeTemplates":{ + "name":"ListProfileObjectTypeTemplates", + "http":{ + "method":"GET", + "requestUri":"/templates" + }, + "input":{"shape":"ListProfileObjectTypeTemplatesRequest"}, + "output":{"shape":"ListProfileObjectTypeTemplatesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all of the template information for object types.

    " + }, + "ListProfileObjectTypes":{ + "name":"ListProfileObjectTypes", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/object-types" + }, + "input":{"shape":"ListProfileObjectTypesRequest"}, + "output":{"shape":"ListProfileObjectTypesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all of the templates available within the service.

    " + }, + "ListProfileObjects":{ + "name":"ListProfileObjects", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/objects" + }, + "input":{"shape":"ListProfileObjectsRequest"}, + "output":{"shape":"ListProfileObjectsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a list of objects associated with a profile of a given ProfileObjectType.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Displays the tags associated with an Amazon Connect Customer Profiles resource. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.

    " + }, + "PutIntegration":{ + "name":"PutIntegration", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/integrations" + }, + "input":{"shape":"PutIntegrationRequest"}, + "output":{"shape":"PutIntegrationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Adds an integration between the service and a third-party service, which includes Amazon AppFlow and Amazon Connect.

    An integration can belong to only one domain.

    " + }, + "PutProfileObject":{ + "name":"PutProfileObject", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/profiles/objects" + }, + "input":{"shape":"PutProfileObjectRequest"}, + "output":{"shape":"PutProfileObjectResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Adds additional objects to customer profiles of a given ObjectType.

    When adding a specific profile object, like a Contact Trace Record (CTR), an inferred profile can get created if it is not mapped to an existing profile. The resulting profile will only have a phone number populated in the standard ProfileObject. Any additional CTRs with the same phone number will be mapped to the same inferred profile.

    When a ProfileObject is created and if a ProfileObjectType already exists for the ProfileObject, it will provide data to a standard profile depending on the ProfileObjectType definition.

    PutProfileObject needs an ObjectType, which can be created using PutProfileObjectType.

    " + }, + "PutProfileObjectType":{ + "name":"PutProfileObjectType", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/object-types/{ObjectTypeName}" + }, + "input":{"shape":"PutProfileObjectTypeRequest"}, + "output":{"shape":"PutProfileObjectTypeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Defines a ProfileObjectType.

    " + }, + "SearchProfiles":{ + "name":"SearchProfiles", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/search" + }, + "input":{"shape":"SearchProfilesRequest"}, + "output":{"shape":"SearchProfilesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Searches for profiles within a specific domain name using name, phone number, email address, account number, or a custom defined index.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Assigns one or more tags (key-value pairs) to the specified Amazon Connect Customer Profiles resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.

    Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

    You can use the TagResource action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

    You can associate as many as 50 tags with a resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes one or more tags from the specified Amazon Connect Customer Profiles resource. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.

    " + }, + "UpdateDomain":{ + "name":"UpdateDomain", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}" + }, + "input":{"shape":"UpdateDomainRequest"}, + "output":{"shape":"UpdateDomainResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.

    Once a domain is created, the name can’t be changed.

    " + }, + "UpdateProfile":{ + "name":"UpdateProfile", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/profiles" + }, + "input":{"shape":"UpdateProfileRequest"}, + "output":{"shape":"UpdateProfileResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the properties of a profile. The ProfileId is required for updating a customer profile.

    When calling the UpdateProfile API, specifying an empty string value means that any existing value will be removed. Not specifying a string value means that any value already there will be kept.

    " + } + }, + "shapes":{ + "name":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9_-]+$" + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AddProfileKeyRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "KeyName", + "Values", + "DomainName" + ], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + }, + "KeyName":{ + "shape":"name", + "documentation":"

    A searchable identifier of a customer profile.

    " + }, + "Values":{ + "shape":"requestValueList", + "documentation":"

    A list of key values.

    " + }, + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "AddProfileKeyResponse":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"name", + "documentation":"

    A searchable identifier of a customer profile.

    " + }, + "Values":{ + "shape":"requestValueList", + "documentation":"

    A list of key values.

    " + } + } + }, + "Address":{ + "type":"structure", + "members":{ + "Address1":{ + "shape":"string1To255", + "documentation":"

    The first line of a customer address.

    " + }, + "Address2":{ + "shape":"string1To255", + "documentation":"

    The second line of a customer address.

    " + }, + "Address3":{ + "shape":"string1To255", + "documentation":"

    The third line of a customer address.

    " + }, + "Address4":{ + "shape":"string1To255", + "documentation":"

    The fourth line of a customer address.

    " + }, + "City":{ + "shape":"string1To255", + "documentation":"

    The city in which a customer lives.

    " + }, + "County":{ + "shape":"string1To255", + "documentation":"

    The county in which a customer lives.

    " + }, + "State":{ + "shape":"string1To255", + "documentation":"

    The state in which a customer lives.

    " + }, + "Province":{ + "shape":"string1To255", + "documentation":"

    The province in which a customer lives.

    " + }, + "Country":{ + "shape":"string1To255", + "documentation":"

    The country in which a customer lives.

    " + }, + "PostalCode":{ + "shape":"string1To255", + "documentation":"

    The postal code of a customer address.

    " + } + }, + "documentation":"

    A generic address associated with the customer that is not mailing, shipping, or billing.

    " + }, + "Attributes":{ + "type":"map", + "key":{"shape":"string1To255"}, + "value":{"shape":"string1To255"} + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

    The input you provided is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "DefaultExpirationDays" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

    The default number of days until the data within the domain expires.

    " + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

    The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

    " + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

    The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications. You must set up a policy on the DeadLetterQueue for the SendMessage operation to enable Amazon Connect Customer Profiles to send messages to the DeadLetterQueue.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "CreateDomainResponse":{ + "type":"structure", + "required":[ + "DomainName", + "DefaultExpirationDays", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    " + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

    The default number of days until the data within the domain expires.

    " + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

    The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

    " + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

    The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "CreateProfileRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "AccountNumber":{ + "shape":"string1To255", + "documentation":"

    A unique account number that you have given to the customer.

    " + }, + "AdditionalInformation":{ + "shape":"string1To1000", + "documentation":"

    Any additional information relevant to the customer's profile.

    " + }, + "PartyType":{ + "shape":"PartyType", + "documentation":"

    The type of profile used to describe the customer.

    " + }, + "BusinessName":{ + "shape":"string1To255", + "documentation":"

    The name of the customer’s business.

    " + }, + "FirstName":{ + "shape":"string1To255", + "documentation":"

    The customer’s first name.

    " + }, + "MiddleName":{ + "shape":"string1To255", + "documentation":"

    The customer’s middle name.

    " + }, + "LastName":{ + "shape":"string1To255", + "documentation":"

    The customer’s last name.

    " + }, + "BirthDate":{ + "shape":"string1To255", + "documentation":"

    The customer’s birth date.

    " + }, + "Gender":{ + "shape":"Gender", + "documentation":"

    The gender with which the customer identifies.

    " + }, + "PhoneNumber":{ + "shape":"string1To255", + "documentation":"

    The customer's phone number, which has not been specified as a mobile, home, or business number.

    " + }, + "MobilePhoneNumber":{ + "shape":"string1To255", + "documentation":"

    The customer’s mobile phone number.

    " + }, + "HomePhoneNumber":{ + "shape":"string1To255", + "documentation":"

    The customer’s home phone number.

    " + }, + "BusinessPhoneNumber":{ + "shape":"string1To255", + "documentation":"

    The customer’s business phone number.

    " + }, + "EmailAddress":{ + "shape":"string1To255", + "documentation":"

    The customer's email address, which has not been specified as a personal or business address.

    " + }, + "PersonalEmailAddress":{ + "shape":"string1To255", + "documentation":"

    The customer’s personal email address.

    " + }, + "BusinessEmailAddress":{ + "shape":"string1To255", + "documentation":"

    The customer’s business email address.

    " + }, + "Address":{ + "shape":"Address", + "documentation":"

    A generic address associated with the customer that is not mailing, shipping, or billing.

    " + }, + "ShippingAddress":{ + "shape":"Address", + "documentation":"

    The customer’s shipping address.

    " + }, + "MailingAddress":{ + "shape":"Address", + "documentation":"

    The customer’s mailing address.

    " + }, + "BillingAddress":{ + "shape":"Address", + "documentation":"

    The customer’s billing address.

    " + }, + "Attributes":{ + "shape":"Attributes", + "documentation":"

    A key value pair of attributes of a customer profile.

    " + } + } + }, + "CreateProfileResponse":{ + "type":"structure", + "required":["ProfileId"], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + } + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteDomainResponse":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"message", + "documentation":"

    A message that indicates the delete request is done.

    " + } + } + }, + "DeleteIntegrationRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

    The URI of the S3 bucket or any other type of data source.

    " + } + } + }, + "DeleteIntegrationResponse":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"message", + "documentation":"

    A message that indicates the delete request is done.

    " + } + } + }, + "DeleteProfileKeyRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "KeyName", + "Values", + "DomainName" + ], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + }, + "KeyName":{ + "shape":"name", + "documentation":"

    A searchable identifier of a customer profile.

    " + }, + "Values":{ + "shape":"requestValueList", + "documentation":"

    A list of key values.

    " + }, + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteProfileKeyResponse":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"message", + "documentation":"

    A message that indicates the delete request is done.

    " + } + } + }, + "DeleteProfileObjectRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "ProfileObjectUniqueKey", + "ObjectTypeName", + "DomainName" + ], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + }, + "ProfileObjectUniqueKey":{ + "shape":"string1To255", + "documentation":"

    The unique identifier of the profile object generated by the service.

    " + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteProfileObjectResponse":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"message", + "documentation":"

    A message that indicates the delete request is done.

    " + } + } + }, + "DeleteProfileObjectTypeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ObjectTypeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    ", + "location":"uri", + "locationName":"ObjectTypeName" + } + } + }, + "DeleteProfileObjectTypeResponse":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"message", + "documentation":"

    A message that indicates the delete request is done.

    " + } + } + }, + "DeleteProfileRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "DomainName" + ], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + }, + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteProfileResponse":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"message", + "documentation":"

    A message that indicates the delete request is done.

    " + } + } + }, + "DomainList":{ + "type":"list", + "member":{"shape":"ListDomainItem"} + }, + "DomainStats":{ + "type":"structure", + "members":{ + "ProfileCount":{ + "shape":"long", + "documentation":"

    The total number of profiles currently in the domain.

    " + }, + "MeteringProfileCount":{ + "shape":"long", + "documentation":"

    The number of profiles that you are currently paying for in the domain. If you have more than 100 objects associated with a single profile, that profile counts as two profiles. If you have more than 200 objects, that profile counts as three, and so on.

    " + }, + "ObjectCount":{ + "shape":"long", + "documentation":"

    The total number of objects in domain.

    " + }, + "TotalSize":{ + "shape":"long", + "documentation":"

    The total size, in bytes, of all objects in the domain.

    " + } + }, + "documentation":"

    Usage-specific statistics about the domain.

    " + }, + "FieldContentType":{ + "type":"string", + "enum":[ + "STRING", + "NUMBER", + "PHONE_NUMBER", + "EMAIL_ADDRESS", + "NAME" + ] + }, + "FieldMap":{ + "type":"map", + "key":{"shape":"name"}, + "value":{"shape":"ObjectTypeField"} + }, + "FieldNameList":{ + "type":"list", + "member":{"shape":"name"} + }, + "Gender":{ + "type":"string", + "enum":[ + "MALE", + "FEMALE", + "UNSPECIFIED" + ] + }, + "GetDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    A unique name for the domain.

    ", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "GetDomainResponse":{ + "type":"structure", + "required":[ + "DomainName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    " + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

    The default number of days until the data within the domain expires.

    " + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

    The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

    " + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

    The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications.

    " + }, + "Stats":{ + "shape":"DomainStats", + "documentation":"

    Usage-specific statistics about the domain.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "GetIntegrationRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

    The URI of the S3 bucket or any other type of data source.

    " + } + } + }, + "GetIntegrationResponse":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "ObjectTypeName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    " + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

    The URI of the S3 bucket or any other type of data source.

    " + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "GetProfileObjectTypeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ObjectTypeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    ", + "location":"uri", + "locationName":"ObjectTypeName" + } + } + }, + "GetProfileObjectTypeResponse":{ + "type":"structure", + "required":[ + "ObjectTypeName", + "Description" + ], + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "Description":{ + "shape":"text", + "documentation":"

    The description of the profile object type.

    " + }, + "TemplateId":{ + "shape":"name", + "documentation":"

    A unique identifier for the object template.

    " + }, + "ExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

    The number of days until the data in the object expires.

    " + }, + "EncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

    The customer-provided key to encrypt the profile object that will be created in this profile object type.

    " + }, + "AllowProfileCreation":{ + "shape":"boolean", + "documentation":"

    Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

    " + }, + "Fields":{ + "shape":"FieldMap", + "documentation":"

    A map of the name and ObjectType field.

    " + }, + "Keys":{ + "shape":"KeyMap", + "documentation":"

    A list of unique keys that can be used to map data to the profile.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "GetProfileObjectTypeTemplateRequest":{ + "type":"structure", + "required":["TemplateId"], + "members":{ + "TemplateId":{ + "shape":"name", + "documentation":"

    A unique identifier for the object template.

    ", + "location":"uri", + "locationName":"TemplateId" + } + } + }, + "GetProfileObjectTypeTemplateResponse":{ + "type":"structure", + "members":{ + "TemplateId":{ + "shape":"name", + "documentation":"

    A unique identifier for the object template.

    " + }, + "SourceName":{ + "shape":"name", + "documentation":"

    The name of the source of the object template.

    " + }, + "SourceObject":{ + "shape":"name", + "documentation":"

    The source of the object template.

    " + }, + "AllowProfileCreation":{ + "shape":"boolean", + "documentation":"

    Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

    " + }, + "Fields":{ + "shape":"FieldMap", + "documentation":"

    A map of the name and ObjectType field.

    " + }, + "Keys":{ + "shape":"KeyMap", + "documentation":"

    A list of unique keys that can be used to map data to the profile.

    " + } + } + }, + "IntegrationList":{ + "type":"list", + "member":{"shape":"ListIntegrationItem"} + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

    An internal service error occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KeyMap":{ + "type":"map", + "key":{"shape":"name"}, + "value":{"shape":"ObjectTypeKeyList"} + }, + "ListAccountIntegrationsRequest":{ + "type":"structure", + "required":["Uri"], + "members":{ + "Uri":{ + "shape":"string1To255", + "documentation":"

    The URI of the S3 bucket or any other type of data source.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous ListAccountIntegrations API call.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListAccountIntegrationsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"IntegrationList", + "documentation":"

    The list of ListAccountIntegration instances.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous ListAccountIntegrations API call.

    " + } + } + }, + "ListDomainItem":{ + "type":"structure", + "required":[ + "DomainName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + }, + "documentation":"

    An object in a list that represents a domain.

    " + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous ListDomain API call.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDomainsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"DomainList", + "documentation":"

    The list of ListDomains instances.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous ListDomains API call.

    " + } + } + }, + "ListIntegrationItem":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "ObjectTypeName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    " + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

    The URI of the S3 bucket or any other type of data source.

    " + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + }, + "documentation":"

    An integration in list of integrations.

    " + }, + "ListIntegrationsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous ListIntegrations API call.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListIntegrationsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"IntegrationList", + "documentation":"

    The list of ListIntegrations instances.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous ListIntegrations API call.

    " + } + } + }, + "ListProfileObjectTypeItem":{ + "type":"structure", + "required":[ + "ObjectTypeName", + "Description" + ], + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "Description":{ + "shape":"text", + "documentation":"

    Description of the profile object type.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + }, + "documentation":"

    A ProfileObjectType instance.

    " + }, + "ListProfileObjectTypeTemplateItem":{ + "type":"structure", + "members":{ + "TemplateId":{ + "shape":"name", + "documentation":"

    A unique identifier for the object template.

    " + }, + "SourceName":{ + "shape":"name", + "documentation":"

    The name of the source of the object template.

    " + }, + "SourceObject":{ + "shape":"name", + "documentation":"

    The source of the object template.

    " + } + }, + "documentation":"

    A ProfileObjectTypeTemplate in a list of ProfileObjectTypeTemplates.

    " + }, + "ListProfileObjectTypeTemplatesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous ListObjectTypeTemplates API call.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListProfileObjectTypeTemplatesResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ProfileObjectTypeTemplateList", + "documentation":"

    The list of ListProfileObjectType template instances.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous ListObjectTypeTemplates API call.

    " + } + } + }, + "ListProfileObjectTypesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

    Identifies the next page of results to return.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListProfileObjectTypesResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ProfileObjectTypeList", + "documentation":"

    The list of ListProfileObjectTypes instances.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    Identifies the next page of results to return.

    " + } + } + }, + "ListProfileObjectsItem":{ + "type":"structure", + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    Specifies the kind of object being added to a profile, such as \"Salesforce-Account.\"

    " + }, + "ProfileObjectUniqueKey":{ + "shape":"string1To255", + "documentation":"

    The unique identifier of the ProfileObject generated by the service.

    " + }, + "Object":{ + "shape":"stringifiedJson", + "documentation":"

    A JSON representation of a ProfileObject that belongs to a profile.

    " + } + }, + "documentation":"

    A ProfileObject in a list of ProfileObjects.

    " + }, + "ListProfileObjectsRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ObjectTypeName", + "ProfileId" + ], + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous call to ListProfileObjects.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + } + } + }, + "ListProfileObjectsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ProfileObjectList", + "documentation":"

    The list of ListProfileObject instances.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous call to ListProfileObjects.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

    The ARN of the resource for which you want to view tags.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "ObjectTypeField":{ + "type":"structure", + "members":{ + "Source":{ + "shape":"text", + "documentation":"

    A field of a ProfileObject. For example: _source.FirstName, where “_source” is a ProfileObjectType of a Zendesk user and “FirstName” is a field in that ObjectType.

    " + }, + "Target":{ + "shape":"text", + "documentation":"

    The location of the data in the standard ProfileObject model. For example: _profile.Address.PostalCode.

    " + }, + "ContentType":{ + "shape":"FieldContentType", + "documentation":"

    The content type of the field. Used for determining equality when searching.

    " + } + }, + "documentation":"

    Represents a field in a ProfileObjectType.

    " + }, + "ObjectTypeKey":{ + "type":"structure", + "members":{ + "StandardIdentifiers":{ + "shape":"StandardIdentifierList", + "documentation":"

    The types of keys that a ProfileObject can have. Each ProfileObject can have only 1 UNIQUE key but multiple PROFILE keys. PROFILE means that this key can be used to tie an object to a PROFILE. UNIQUE means that it can be used to uniquely identify an object. If a key a is marked as SECONDARY, it will be used to search for profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a profile but is not persisted to be used for searching of the profile. A NEW_ONLY key is only used if the profile does not already exist before the object is ingested, otherwise it is only used for matching objects to profiles.

    " + }, + "FieldNames":{ + "shape":"FieldNameList", + "documentation":"

    The reference for the key name of the fields map.

    " + } + }, + "documentation":"

    An object that defines the Key element of a ProfileObject. A Key is a special element that can be used to search for a customer profile.

    " + }, + "ObjectTypeKeyList":{ + "type":"list", + "member":{"shape":"ObjectTypeKey"} + }, + "PartyType":{ + "type":"string", + "enum":[ + "INDIVIDUAL", + "BUSINESS", + "OTHER" + ] + }, + "Profile":{ + "type":"structure", + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + }, + "AccountNumber":{ + "shape":"string1To255", + "documentation":"

    A unique account number that you have given to the customer.

    " + }, + "AdditionalInformation":{ + "shape":"string1To1000", + "documentation":"

    Any additional information relevant to the customer's profile.

    " + }, + "PartyType":{ + "shape":"PartyType", + "documentation":"

    The type of profile used to describe the customer.

    " + }, + "BusinessName":{ + "shape":"string1To255", + "documentation":"

    The name of the customer’s business.

    " + }, + "FirstName":{ + "shape":"string1To255", + "documentation":"

    The customer’s first name.

    " + }, + "MiddleName":{ + "shape":"string1To255", + "documentation":"

    The customer’s middle name.

    " + }, + "LastName":{ + "shape":"string1To255", + "documentation":"

    The customer’s last name.

    " + }, + "BirthDate":{ + "shape":"string1To255", + "documentation":"

    The customer’s birth date.

    " + }, + "Gender":{ + "shape":"Gender", + "documentation":"

    The gender with which the customer identifies.

    " + }, + "PhoneNumber":{ + "shape":"string1To255", + "documentation":"

    The customer's phone number, which has not been specified as a mobile, home, or business number.

    " + }, + "MobilePhoneNumber":{ + "shape":"string1To255", + "documentation":"

    The customer’s mobile phone number.

    " + }, + "HomePhoneNumber":{ + "shape":"string1To255", + "documentation":"

    The customer’s home phone number.

    " + }, + "BusinessPhoneNumber":{ + "shape":"string1To255", + "documentation":"

    The customer’s home phone number.

    " + }, + "EmailAddress":{ + "shape":"string1To255", + "documentation":"

    The customer's email address, which has not been specified as a personal or business address.

    " + }, + "PersonalEmailAddress":{ + "shape":"string1To255", + "documentation":"

    The customer’s personal email address.

    " + }, + "BusinessEmailAddress":{ + "shape":"string1To255", + "documentation":"

    The customer’s business email address.

    " + }, + "Address":{ + "shape":"Address", + "documentation":"

    A generic address associated with the customer that is not mailing, shipping, or billing.

    " + }, + "ShippingAddress":{ + "shape":"Address", + "documentation":"

    The customer’s shipping address.

    " + }, + "MailingAddress":{ + "shape":"Address", + "documentation":"

    The customer’s mailing address.

    " + }, + "BillingAddress":{ + "shape":"Address", + "documentation":"

    The customer’s billing address.

    " + }, + "Attributes":{ + "shape":"Attributes", + "documentation":"

    A key value pair of attributes of a customer profile.

    " + } + }, + "documentation":"

    The standard profile of a customer.

    " + }, + "ProfileList":{ + "type":"list", + "member":{"shape":"Profile"} + }, + "ProfileObjectList":{ + "type":"list", + "member":{"shape":"ListProfileObjectsItem"} + }, + "ProfileObjectTypeList":{ + "type":"list", + "member":{"shape":"ListProfileObjectTypeItem"} + }, + "ProfileObjectTypeTemplateList":{ + "type":"list", + "member":{"shape":"ListProfileObjectTypeTemplateItem"} + }, + "PutIntegrationRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "ObjectTypeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

    The URI of the S3 bucket or any other type of data source.

    " + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "PutIntegrationResponse":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "ObjectTypeName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    " + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

    The URI of the S3 bucket or any other type of data source.

    " + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "PutProfileObjectRequest":{ + "type":"structure", + "required":[ + "ObjectTypeName", + "Object", + "DomainName" + ], + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "Object":{ + "shape":"stringifiedJson", + "documentation":"

    A string that is serialized from a JSON object.

    " + }, + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "PutProfileObjectResponse":{ + "type":"structure", + "members":{ + "ProfileObjectUniqueKey":{ + "shape":"string1To255", + "documentation":"

    The unique identifier of the profile object generated by the service.

    " + } + } + }, + "PutProfileObjectTypeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ObjectTypeName", + "Description" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    ", + "location":"uri", + "locationName":"ObjectTypeName" + }, + "Description":{ + "shape":"text", + "documentation":"

    Description of the profile object type.

    " + }, + "TemplateId":{ + "shape":"name", + "documentation":"

    A unique identifier for the object template.

    " + }, + "ExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

    The number of days until the data in the object expires.

    " + }, + "EncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

    The customer-provided key to encrypt the profile object that will be created in this profile object type.

    " + }, + "AllowProfileCreation":{ + "shape":"boolean", + "documentation":"

    Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

    " + }, + "Fields":{ + "shape":"FieldMap", + "documentation":"

    A map of the name and ObjectType field.

    " + }, + "Keys":{ + "shape":"KeyMap", + "documentation":"

    A list of unique keys that can be used to map data to the profile.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "PutProfileObjectTypeResponse":{ + "type":"structure", + "required":[ + "ObjectTypeName", + "Description" + ], + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

    The name of the profile object type.

    " + }, + "Description":{ + "shape":"text", + "documentation":"

    Description of the profile object type.

    " + }, + "TemplateId":{ + "shape":"name", + "documentation":"

    A unique identifier for the object template.

    " + }, + "ExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

    The number of days until the data in the object expires.

    " + }, + "EncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

    The customer-provided key to encrypt the profile object that will be created in this profile object type.

    " + }, + "AllowProfileCreation":{ + "shape":"boolean", + "documentation":"

    Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

    " + }, + "Fields":{ + "shape":"FieldMap", + "documentation":"

    A map of the name and ObjectType field.

    " + }, + "Keys":{ + "shape":"KeyMap", + "documentation":"

    A list of unique keys that can be used to map data to the profile.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

    The requested resource does not exist, or access was denied.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SearchProfilesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "KeyName", + "Values" + ], + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous SearchProfiles API call.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "KeyName":{ + "shape":"name", + "documentation":"

    A searchable identifier of a customer profile. The predefined keys you can use to search include: _account, _profileId, _fullName, _phone, _email, _ctrContactId, _marketoLeadId, _salesforceAccountId, _salesforceContactId, _zendeskUserId, _zendeskExternalId, _serviceNowSystemId.

    " + }, + "Values":{ + "shape":"requestValueList", + "documentation":"

    A list of key values.

    " + } + } + }, + "SearchProfilesResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ProfileList", + "documentation":"

    The list of SearchProfiles instances.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    The pagination token from the previous SearchProfiles API call.

    " + } + } + }, + "StandardIdentifier":{ + "type":"string", + "enum":[ + "PROFILE", + "UNIQUE", + "SECONDARY", + "LOOKUP_ONLY", + "NEW_ONLY" + ] + }, + "StandardIdentifierList":{ + "type":"list", + "member":{"shape":"StandardIdentifier"} + }, + "TagArn":{ + "type":"string", + "max":256, + "pattern":"^arn:[a-z0-9]{1,10}:profile" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

    The ARN of the resource that you're adding tags to.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

    You exceeded the maximum number of requests.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

    The ARN of the resource from which you are removing tags.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The list of tag keys to remove from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAddress":{ + "type":"structure", + "members":{ + "Address1":{ + "shape":"string0To255", + "documentation":"

    The first line of a customer address.

    " + }, + "Address2":{ + "shape":"string0To255", + "documentation":"

    The second line of a customer address.

    " + }, + "Address3":{ + "shape":"string0To255", + "documentation":"

    The third line of a customer address.

    " + }, + "Address4":{ + "shape":"string0To255", + "documentation":"

    The fourth line of a customer address.

    " + }, + "City":{ + "shape":"string0To255", + "documentation":"

    The city in which a customer lives.

    " + }, + "County":{ + "shape":"string0To255", + "documentation":"

    The county in which a customer lives.

    " + }, + "State":{ + "shape":"string0To255", + "documentation":"

    The state in which a customer lives.

    " + }, + "Province":{ + "shape":"string0To255", + "documentation":"

    The province in which a customer lives.

    " + }, + "Country":{ + "shape":"string0To255", + "documentation":"

    The country in which a customer lives.

    " + }, + "PostalCode":{ + "shape":"string0To255", + "documentation":"

    The postal code of a customer address.

    " + } + }, + "documentation":"

    Updates associated with the address properties of a customer profile.

    " + }, + "UpdateAttributes":{ + "type":"map", + "key":{"shape":"string1To255"}, + "value":{"shape":"string0To255"} + }, + "UpdateDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name for the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

    The default number of days until the data within the domain expires.

    " + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

    The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage. If specified as an empty string, it will clear any existing value.

    " + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

    The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications. If specified as an empty string, it will clear any existing value. You must set up a policy on the DeadLetterQueue for the SendMessage operation to enable Amazon Connect Customer Profiles to send messages to the DeadLetterQueue.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "UpdateDomainResponse":{ + "type":"structure", + "required":[ + "DomainName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name for the domain.

    " + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

    The default number of days until the data within the domain expires.

    " + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

    The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

    " + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

    The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the domain was most recently edited.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "UpdateProfileRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ProfileId" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + }, + "AdditionalInformation":{ + "shape":"string0To1000", + "documentation":"

    Any additional information relevant to the customer's profile.

    " + }, + "AccountNumber":{ + "shape":"string0To255", + "documentation":"

    A unique account number that you have given to the customer.

    " + }, + "PartyType":{ + "shape":"PartyType", + "documentation":"

    The type of profile used to describe the customer.

    " + }, + "BusinessName":{ + "shape":"string0To255", + "documentation":"

    The name of the customer’s business.

    " + }, + "FirstName":{ + "shape":"string0To255", + "documentation":"

    The customer’s first name.

    " + }, + "MiddleName":{ + "shape":"string0To255", + "documentation":"

    The customer’s middle name.

    " + }, + "LastName":{ + "shape":"string0To255", + "documentation":"

    The customer’s last name.

    " + }, + "BirthDate":{ + "shape":"string0To255", + "documentation":"

    The customer’s birth date.

    " + }, + "Gender":{ + "shape":"Gender", + "documentation":"

    The gender with which the customer identifies.

    " + }, + "PhoneNumber":{ + "shape":"string0To255", + "documentation":"

    The customer's phone number, which has not been specified as a mobile, home, or business number.

    " + }, + "MobilePhoneNumber":{ + "shape":"string0To255", + "documentation":"

    The customer’s mobile phone number.

    " + }, + "HomePhoneNumber":{ + "shape":"string0To255", + "documentation":"

    The customer’s home phone number.

    " + }, + "BusinessPhoneNumber":{ + "shape":"string0To255", + "documentation":"

    The customer’s business phone number.

    " + }, + "EmailAddress":{ + "shape":"string0To255", + "documentation":"

    The customer's email address, which has not been specified as a personal or business address.

    " + }, + "PersonalEmailAddress":{ + "shape":"string0To255", + "documentation":"

    The customer’s personal email address.

    " + }, + "BusinessEmailAddress":{ + "shape":"string0To255", + "documentation":"

    The customer’s business email address.

    " + }, + "Address":{ + "shape":"UpdateAddress", + "documentation":"

    A generic address associated with the customer that is not mailing, shipping, or billing.

    " + }, + "ShippingAddress":{ + "shape":"UpdateAddress", + "documentation":"

    The customer’s shipping address.

    " + }, + "MailingAddress":{ + "shape":"UpdateAddress", + "documentation":"

    The customer’s mailing address.

    " + }, + "BillingAddress":{ + "shape":"UpdateAddress", + "documentation":"

    The customer’s billing address.

    " + }, + "Attributes":{ + "shape":"UpdateAttributes", + "documentation":"

    A key value pair of attributes of a customer profile.

    " + } + } + }, + "UpdateProfileResponse":{ + "type":"structure", + "required":["ProfileId"], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

    The unique identifier of a customer profile.

    " + } + } + }, + "boolean":{"type":"boolean"}, + "encryptionKey":{ + "type":"string", + "max":255, + "min":0 + }, + "expirationDaysInteger":{ + "type":"integer", + "max":1098, + "min":1 + }, + "long":{"type":"long"}, + "maxSize100":{ + "type":"integer", + "max":100, + "min":1 + }, + "message":{"type":"string"}, + "requestValueList":{ + "type":"list", + "member":{"shape":"string1To255"} + }, + "sqsQueueUrl":{ + "type":"string", + "max":255, + "min":0 + }, + "string0To1000":{ + "type":"string", + "max":1000, + "min":0 + }, + "string0To255":{ + "type":"string", + "max":255, + "min":0 + }, + "string1To1000":{ + "type":"string", + "max":1000, + "min":1 + }, + "string1To255":{ + "type":"string", + "max":255, + "min":1 + }, + "stringifiedJson":{ + "type":"string", + "max":256000, + "min":1 + }, + "text":{ + "type":"string", + "max":1000, + "min":1 + }, + "timestamp":{"type":"timestamp"}, + "token":{ + "type":"string", + "max":1024, + "min":1 + }, + "typeName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z_][a-zA-Z_0-9-]*$" + }, + "uuid":{ + "type":"string", + "pattern":"[a-f0-9]{32}" + } + }, + "documentation":"Amazon Connect Customer Profiles

    Welcome to the Amazon Connect Customer Profiles API Reference. This guide provides information about the Amazon Connect Customer Profiles API, including supported operations, data types, parameters, and schemas.

    Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

    If you're new to Amazon Connect, you might find it helpful to also review the Amazon Connect Administrator Guide.

    " +} diff --git a/services/databasemigration/build.properties b/services/databasemigration/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/databasemigration/build.properties +++ b/services/databasemigration/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 62d490a028e4..1d6b71020969 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + databrew + AWS Java SDK :: Services :: Data Brew + The AWS Java SDK for Data Brew module holds the client classes that are used for + communicating with Data Brew. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.databrew + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/databrew/src/main/resources/codegen-resources/paginators-1.json b/services/databrew/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..d9620b8c4cd3 --- /dev/null +++ b/services/databrew/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,46 @@ +{ + "pagination": { + "ListDatasets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Datasets" + }, + "ListJobRuns": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "JobRuns" + }, + "ListJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Jobs" + }, + "ListProjects": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Projects" + }, + "ListRecipeVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Recipes" + }, + "ListRecipes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Recipes" + }, + "ListSchedules": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Schedules" + } + } +} \ No newline at end of file diff --git a/services/databrew/src/main/resources/codegen-resources/service-2.json b/services/databrew/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..7b61b3922202 --- /dev/null +++ b/services/databrew/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3099 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-07-25", + "endpointPrefix":"databrew", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Glue DataBrew", + "serviceId":"DataBrew", + "signatureVersion":"v4", + "signingName":"databrew", + "uid":"databrew-2017-07-25" + }, + "operations":{ + "BatchDeleteRecipeVersion":{ + "name":"BatchDeleteRecipeVersion", + "http":{ + "method":"POST", + "requestUri":"/recipes/{name}/batchDeleteRecipeVersion" + }, + "input":{"shape":"BatchDeleteRecipeVersionRequest"}, + "output":{"shape":"BatchDeleteRecipeVersionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes one or more versions of a recipe at a time.

    " + }, + "CreateDataset":{ + "name":"CreateDataset", + "http":{ + "method":"POST", + "requestUri":"/datasets" + }, + "input":{"shape":"CreateDatasetRequest"}, + "output":{"shape":"CreateDatasetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a new AWS Glue DataBrew dataset for this AWS account.

    " + }, + "CreateProfileJob":{ + "name":"CreateProfileJob", + "http":{ + "method":"POST", + "requestUri":"/profileJobs" + }, + "input":{"shape":"CreateProfileJobRequest"}, + "output":{"shape":"CreateProfileJobResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a new job to profile an AWS Glue DataBrew dataset that exists in the current AWS account.

    " + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/projects" + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a new AWS Glue DataBrew project in the current AWS account.

    " + }, + "CreateRecipe":{ + "name":"CreateRecipe", + "http":{ + "method":"POST", + "requestUri":"/recipes" + }, + "input":{"shape":"CreateRecipeRequest"}, + "output":{"shape":"CreateRecipeResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a new AWS Glue DataBrew recipe for the current AWS account.

    " + }, + "CreateRecipeJob":{ + "name":"CreateRecipeJob", + "http":{ + "method":"POST", + "requestUri":"/recipeJobs" + }, + "input":{"shape":"CreateRecipeJobRequest"}, + "output":{"shape":"CreateRecipeJobResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a new job for an existing AWS Glue DataBrew recipe in the current AWS account. You can create a standalone job using either a project, or a combination of a recipe and a dataset.

    " + }, + "CreateSchedule":{ + "name":"CreateSchedule", + "http":{ + "method":"POST", + "requestUri":"/schedules" + }, + "input":{"shape":"CreateScheduleRequest"}, + "output":{"shape":"CreateScheduleResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a new schedule for one or more AWS Glue DataBrew jobs. Jobs can be run at a specific date and time, or at regular intervals.

    " + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"DELETE", + "requestUri":"/datasets/{name}" + }, + "input":{"shape":"DeleteDatasetRequest"}, + "output":{"shape":"DeleteDatasetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes a dataset from AWS Glue DataBrew.

    " + }, + "DeleteJob":{ + "name":"DeleteJob", + "http":{ + "method":"DELETE", + "requestUri":"/jobs/{name}" + }, + "input":{"shape":"DeleteJobRequest"}, + "output":{"shape":"DeleteJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes the specified AWS Glue DataBrew job from the current AWS account. The job can be for a recipe or for a profile.

    " + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"DELETE", + "requestUri":"/projects/{name}" + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes an existing AWS Glue DataBrew project from the current AWS account.

    " + }, + "DeleteRecipeVersion":{ + "name":"DeleteRecipeVersion", + "http":{ + "method":"DELETE", + "requestUri":"/recipes/{name}/recipeVersion/{recipeVersion}" + }, + "input":{"shape":"DeleteRecipeVersionRequest"}, + "output":{"shape":"DeleteRecipeVersionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes a single version of an AWS Glue DataBrew recipe.

    " + }, + "DeleteSchedule":{ + "name":"DeleteSchedule", + "http":{ + "method":"DELETE", + "requestUri":"/schedules/{name}" + }, + "input":{"shape":"DeleteScheduleRequest"}, + "output":{"shape":"DeleteScheduleResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes the specified AWS Glue DataBrew schedule from the current AWS account.

    " + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"GET", + "requestUri":"/datasets/{name}" + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the definition of a specific AWS Glue DataBrew dataset that is in the current AWS account.

    " + }, + "DescribeJob":{ + "name":"DescribeJob", + "http":{ + "method":"GET", + "requestUri":"/jobs/{name}" + }, + "input":{"shape":"DescribeJobRequest"}, + "output":{"shape":"DescribeJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the definition of a specific AWS Glue DataBrew job that is in the current AWS account.

    " + }, + "DescribeProject":{ + "name":"DescribeProject", + "http":{ + "method":"GET", + "requestUri":"/projects/{name}" + }, + "input":{"shape":"DescribeProjectRequest"}, + "output":{"shape":"DescribeProjectResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the definition of a specific AWS Glue DataBrew project that is in the current AWS account.

    " + }, + "DescribeRecipe":{ + "name":"DescribeRecipe", + "http":{ + "method":"GET", + "requestUri":"/recipes/{name}" + }, + "input":{"shape":"DescribeRecipeRequest"}, + "output":{"shape":"DescribeRecipeResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the definition of a specific AWS Glue DataBrew recipe that is in the current AWS account.

    " + }, + "DescribeSchedule":{ + "name":"DescribeSchedule", + "http":{ + "method":"GET", + "requestUri":"/schedules/{name}" + }, + "input":{"shape":"DescribeScheduleRequest"}, + "output":{"shape":"DescribeScheduleResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the definition of a specific AWS Glue DataBrew schedule that is in the current AWS account.

    " + }, + "ListDatasets":{ + "name":"ListDatasets", + "http":{ + "method":"GET", + "requestUri":"/datasets" + }, + "input":{"shape":"ListDatasetsRequest"}, + "output":{"shape":"ListDatasetsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Lists all of the AWS Glue DataBrew datasets for the current AWS account.

    " + }, + "ListJobRuns":{ + "name":"ListJobRuns", + "http":{ + "method":"GET", + "requestUri":"/jobs/{name}/jobRuns" + }, + "input":{"shape":"ListJobRunsRequest"}, + "output":{"shape":"ListJobRunsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Lists all of the previous runs of a particular AWS Glue DataBrew job in the current AWS account.

    " + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"GET", + "requestUri":"/jobs" + }, + "input":{"shape":"ListJobsRequest"}, + "output":{"shape":"ListJobsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Lists the AWS Glue DataBrew jobs in the current AWS account.

    " + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"GET", + "requestUri":"/projects" + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Lists all of the DataBrew projects in the current AWS account.

    " + }, + "ListRecipeVersions":{ + "name":"ListRecipeVersions", + "http":{ + "method":"GET", + "requestUri":"/recipeVersions" + }, + "input":{"shape":"ListRecipeVersionsRequest"}, + "output":{"shape":"ListRecipeVersionsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Lists all of the versions of a particular AWS Glue DataBrew recipe in the current AWS account.

    " + }, + "ListRecipes":{ + "name":"ListRecipes", + "http":{ + "method":"GET", + "requestUri":"/recipes" + }, + "input":{"shape":"ListRecipesRequest"}, + "output":{"shape":"ListRecipesResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Lists all of the AWS Glue DataBrew recipes in the current AWS account.

    " + }, + "ListSchedules":{ + "name":"ListSchedules", + "http":{ + "method":"GET", + "requestUri":"/schedules" + }, + "input":{"shape":"ListSchedulesRequest"}, + "output":{"shape":"ListSchedulesResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Lists the AWS Glue DataBrew schedules in the current AWS account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Lists all the tags for an AWS Glue DataBrew resource.

    " + }, + "PublishRecipe":{ + "name":"PublishRecipe", + "http":{ + "method":"POST", + "requestUri":"/recipes/{name}/publishRecipe" + }, + "input":{"shape":"PublishRecipeRequest"}, + "output":{"shape":"PublishRecipeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Publishes a new major version of an AWS Glue DataBrew recipe that exists in the current AWS account.

    " + }, + "SendProjectSessionAction":{ + "name":"SendProjectSessionAction", + "http":{ + "method":"PUT", + "requestUri":"/projects/{name}/sendProjectSessionAction" + }, + "input":{"shape":"SendProjectSessionActionRequest"}, + "output":{"shape":"SendProjectSessionActionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Performs a recipe step within an interactive AWS Glue DataBrew session that's currently open.

    " + }, + "StartJobRun":{ + "name":"StartJobRun", + "http":{ + "method":"POST", + "requestUri":"/jobs/{name}/startJobRun" + }, + "input":{"shape":"StartJobRunRequest"}, + "output":{"shape":"StartJobRunResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Runs an AWS Glue DataBrew job that exists in the current AWS account.

    " + }, + "StartProjectSession":{ + "name":"StartProjectSession", + "http":{ + "method":"PUT", + "requestUri":"/projects/{name}/startProjectSession" + }, + "input":{"shape":"StartProjectSessionRequest"}, + "output":{"shape":"StartProjectSessionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates an interactive session, enabling you to manipulate an AWS Glue DataBrew project.

    " + }, + "StopJobRun":{ + "name":"StopJobRun", + "http":{ + "method":"POST", + "requestUri":"/jobs/{name}/jobRun/{runId}/stopJobRun" + }, + "input":{"shape":"StopJobRunRequest"}, + "output":{"shape":"StopJobRunResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Stops the specified job from running in the current AWS account.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Adds metadata tags to an AWS Glue DataBrew resource, such as a dataset, job, project, or recipe.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Removes metadata tags from an AWS Glue DataBrew resource.

    " + }, + "UpdateDataset":{ + "name":"UpdateDataset", + "http":{ + "method":"PUT", + "requestUri":"/datasets/{name}" + }, + "input":{"shape":"UpdateDatasetRequest"}, + "output":{"shape":"UpdateDatasetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Modifies the definition of an existing AWS Glue DataBrew dataset in the current AWS account.

    " + }, + "UpdateProfileJob":{ + "name":"UpdateProfileJob", + "http":{ + "method":"PUT", + "requestUri":"/profileJobs/{name}" + }, + "input":{"shape":"UpdateProfileJobRequest"}, + "output":{"shape":"UpdateProfileJobResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Modifies the definition of an existing AWS Glue DataBrew job in the current AWS account.

    " + }, + "UpdateProject":{ + "name":"UpdateProject", + "http":{ + "method":"PUT", + "requestUri":"/projects/{name}" + }, + "input":{"shape":"UpdateProjectRequest"}, + "output":{"shape":"UpdateProjectResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Modifies the definition of an existing AWS Glue DataBrew project in the current AWS account.

    " + }, + "UpdateRecipe":{ + "name":"UpdateRecipe", + "http":{ + "method":"PUT", + "requestUri":"/recipes/{name}" + }, + "input":{"shape":"UpdateRecipeRequest"}, + "output":{"shape":"UpdateRecipeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Modifies the definition of the latest working version of an AWS Glue DataBrew recipe in the current AWS account.

    " + }, + "UpdateRecipeJob":{ + "name":"UpdateRecipeJob", + "http":{ + "method":"PUT", + "requestUri":"/recipeJobs/{name}" + }, + "input":{"shape":"UpdateRecipeJobRequest"}, + "output":{"shape":"UpdateRecipeJobResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Modifies the definition of an existing AWS Glue DataBrew recipe job in the current AWS account.

    " + }, + "UpdateSchedule":{ + "name":"UpdateSchedule", + "http":{ + "method":"PUT", + "requestUri":"/schedules/{name}" + }, + "input":{"shape":"UpdateScheduleRequest"}, + "output":{"shape":"UpdateScheduleResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Modifies the definition of an existing AWS Glue DataBrew schedule in the current AWS account.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    Access to the specified resource was denied.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccountId":{ + "type":"string", + "max":255 + }, + "ActionId":{"type":"integer"}, + "Arn":{ + "type":"string", + "max":2048, + "min":20 + }, + "AssumeControl":{"type":"boolean"}, + "Attempt":{"type":"integer"}, + "BatchDeleteRecipeVersionRequest":{ + "type":"structure", + "required":[ + "Name", + "RecipeVersions" + ], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe to be modified.

    ", + "location":"uri", + "locationName":"name" + }, + "RecipeVersions":{ + "shape":"RecipeVersionList", + "documentation":"

    An array of version identifiers to be deleted.

    " + } + } + }, + "BatchDeleteRecipeVersionResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe that was modified.

    " + }, + "Errors":{ + "shape":"RecipeErrorList", + "documentation":"

    Errors, if any, that were encountered when deleting the recipe versions.

    " + } + } + }, + "Bucket":{ + "type":"string", + "max":63, + "min":3 + }, + "CatalogId":{ + "type":"string", + "max":255, + "min":1 + }, + "ClientSessionId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9-]*$" + }, + "ColumnName":{ + "type":"string", + "max":255, + "min":1 + }, + "ColumnNameList":{ + "type":"list", + "member":{"shape":"ColumnName"}, + "max":200 + }, + "ColumnRange":{ + "type":"integer", + "max":20, + "min":0 + }, + "CompressionFormat":{ + "type":"string", + "enum":[ + "GZIP", + "LZ4", + "SNAPPY", + "BZIP2", + "DEFLATE", + "LZO", + "BROTLI", + "ZSTD", + "ZLIB" + ] + }, + "Condition":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Z\\_]+$" + }, + "ConditionExpression":{ + "type":"structure", + "required":[ + "Condition", + "TargetColumn" + ], + "members":{ + "Condition":{ + "shape":"Condition", + "documentation":"

    A specific condition to apply to a recipe action. For more information, see Recipe structure in the AWS Glue DataBrew Developer Guide.

    " + }, + "Value":{ + "shape":"ConditionValue", + "documentation":"

    A value that the condition must evaluate to for the condition to succeed.

    " + }, + "TargetColumn":{ + "shape":"TargetColumn", + "documentation":"

    A column to apply this condition to, within an AWS Glue DataBrew dataset.

    " + } + }, + "documentation":"

    Represents an individual condition that evaluates to true or false.

    Conditions are used with recipe actions: The action is only performed for column values where the condition evaluates to true.

    If a recipe requires more than one condition, then the recipe must specify multiple ConditionExpression elements. Each condition is applied to the rows in a dataset first, before the recipe action is performed.

    " + }, + "ConditionExpressionList":{ + "type":"list", + "member":{"shape":"ConditionExpression"} + }, + "ConditionValue":{ + "type":"string", + "max":1024 + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    Updating or deleting a resource can cause an inconsistent state.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateDatasetRequest":{ + "type":"structure", + "required":[ + "Name", + "Input" + ], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset to be created.

    " + }, + "FormatOptions":{"shape":"FormatOptions"}, + "Input":{"shape":"Input"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags to apply to this dataset.

    " + } + } + }, + "CreateDatasetResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset that you created.

    " + } + } + }, + "CreateProfileJobRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "Name", + "OutputLocation", + "RoleArn" + ], + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset that this job is to act upon.

    " + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

    " + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

    The encryption mode for the job, which can be one of the following:

    • SSE-KMS - para>SSE-KMS - server-side encryption with AWS KMS-managed keys.

    • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

    " + }, + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job to be created.

    " + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

    A value that enables or disables Amazon CloudWatch logging for the current AWS account. If logging is enabled, CloudWatch writes one log stream for each job run.

    " + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

    The maximum number of nodes that DataBrew can use when the job processes data.

    " + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

    The maximum number of times to retry the job after a job run fails.

    " + }, + "OutputLocation":{"shape":"S3Location"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags to apply to this job.

    " + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

    The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

    " + } + } + }, + "CreateProfileJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job that was created.

    " + } + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "Name", + "RecipeName", + "RoleArn" + ], + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset to associate this project with.

    " + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

    A unique name for the new project.

    " + }, + "RecipeName":{ + "shape":"RecipeName", + "documentation":"

    The name of an existing recipe to associate with the project.

    " + }, + "Sample":{"shape":"Sample"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags to apply to this project.

    " + } + } + }, + "CreateProjectResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that you created.

    " + } + } + }, + "CreateRecipeJobRequest":{ + "type":"structure", + "required":[ + "Name", + "Outputs", + "RoleArn" + ], + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset that this job processes.

    " + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

    " + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

    The encryption mode for the job, which can be one of the following:

    • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

    • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

    " + }, + "Name":{ + "shape":"JobName", + "documentation":"

    A unique name for the job.

    " + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

    A value that enables or disables Amazon CloudWatch logging for the current AWS account. If logging is enabled, CloudWatch writes one log stream for each job run.

    " + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

    The maximum number of nodes that DataBrew can consume when the job processes data.

    " + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

    The maximum number of times to retry the job after a job run fails.

    " + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

    One or more artifacts that represent the output from running the job.

    " + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    Either the name of an existing project, or a combination of a recipe and a dataset to associate with the recipe.

    " + }, + "RecipeReference":{"shape":"RecipeReference"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags to apply to this job dataset.

    " + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

    The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

    " + } + } + }, + "CreateRecipeJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job that you created.

    " + } + } + }, + "CreateRecipeRequest":{ + "type":"structure", + "required":[ + "Name", + "Steps" + ], + "members":{ + "Description":{ + "shape":"RecipeDescription", + "documentation":"

    A description for the recipe.

    " + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

    A unique name for the recipe.

    " + }, + "Steps":{ + "shape":"RecipeStepList", + "documentation":"

    An array containing the steps to be performed by the recipe. Each recipe step consists of one recipe action and (optionally) an array of condition expressions.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags to apply to this recipe.

    " + } + } + }, + "CreateRecipeResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe that you created.

    " + } + } + }, + "CreateScheduleRequest":{ + "type":"structure", + "required":[ + "CronExpression", + "Name" + ], + "members":{ + "JobNames":{ + "shape":"JobNameList", + "documentation":"

    The name or names of one or more jobs to be run.

    " + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

    The date or dates and time or times, in cron format, when the jobs are to be run.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags to apply to this schedule.

    " + }, + "Name":{ + "shape":"ScheduleName", + "documentation":"

    A unique name for the schedule.

    " + } + } + }, + "CreateScheduleResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

    The name of the schedule that was created.

    " + } + } + }, + "CreatedBy":{"type":"string"}, + "CronExpression":{ + "type":"string", + "max":512, + "min":1 + }, + "DataCatalogInputDefinition":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogId", + "documentation":"

    The unique identifier of the AWS account that holds the Data Catalog that stores the data.

    " + }, + "DatabaseName":{ + "shape":"DatabaseName", + "documentation":"

    The name of a database in the Data Catalog.

    " + }, + "TableName":{ + "shape":"TableName", + "documentation":"

    The name of a database table in the Data Catalog. This table corresponds to a DataBrew dataset.

    " + }, + "TempDirectory":{ + "shape":"S3Location", + "documentation":"

    An Amazon location that AWS Glue Data Catalog can use as a temporary directory.

    " + } + }, + "documentation":"

    Represents how metadata stored in the AWS Glue Data Catalog is defined in an AWS Glue DataBrew dataset.

    " + }, + "DatabaseName":{ + "type":"string", + "max":255, + "min":1 + }, + "Dataset":{ + "type":"structure", + "required":[ + "Name", + "Input" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The ID of the AWS account that owns the dataset.

    " + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (the user name) of the user who created the dataset.

    " + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the dataset was created.

    " + }, + "Name":{ + "shape":"DatasetName", + "documentation":"

    The unique name of the dataset.

    " + }, + "FormatOptions":{ + "shape":"FormatOptions", + "documentation":"

    Options that define how DataBrew interprets the data in the dataset.

    " + }, + "Input":{ + "shape":"Input", + "documentation":"

    Information on how DataBrew can find the dataset, in either the AWS Glue Data Catalog or Amazon S3.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The last modification date and time of the dataset.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (the user name) of the user who last modified the dataset.

    " + }, + "Source":{ + "shape":"Source", + "documentation":"

    The location of the data for the dataset, either Amazon S3 or the AWS Glue Data Catalog.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags that have been applied to the dataset.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The unique Amazon Resource Name (ARN) for the dataset.

    " + } + }, + "documentation":"

    Represents a dataset that can be processed by AWS Glue DataBrew.

    " + }, + "DatasetList":{ + "type":"list", + "member":{"shape":"Dataset"} + }, + "DatasetName":{ + "type":"string", + "max":255, + "min":1 + }, + "Date":{"type":"timestamp"}, + "DeleteDatasetRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset to be deleted.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteDatasetResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset that you deleted.

    " + } + } + }, + "DeleteJobRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job to be deleted.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job that you deleted.

    " + } + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project to be deleted.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteProjectResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that you deleted.

    " + } + } + }, + "DeleteRecipeVersionRequest":{ + "type":"structure", + "required":[ + "Name", + "RecipeVersion" + ], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe to be deleted.

    ", + "location":"uri", + "locationName":"name" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

    The version of the recipe to be deleted.

    ", + "location":"uri", + "locationName":"recipeVersion" + } + } + }, + "DeleteRecipeVersionResponse":{ + "type":"structure", + "required":[ + "Name", + "RecipeVersion" + ], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe that was deleted.

    " + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

    The version of the recipe that was deleted.

    " + } + } + }, + "DeleteScheduleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

    The name of the schedule to be deleted.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteScheduleResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

    The name of the schedule that was deleted.

    " + } + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset to be described.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "required":[ + "Name", + "Input" + ], + "members":{ + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (user name) of the user who created the dataset.

    " + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the dataset was created.

    " + }, + "Name":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset.

    " + }, + "FormatOptions":{"shape":"FormatOptions"}, + "Input":{"shape":"Input"}, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The date and time that the dataset was last modified.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (user name) of the user who last modified the dataset.

    " + }, + "Source":{ + "shape":"Source", + "documentation":"

    The location of the data for this dataset, Amazon S3 or the AWS Glue Data Catalog.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags associated with this dataset.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset.

    " + } + } + }, + "DescribeJobRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job to be described.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the job was created.

    " + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (user name) of the user associated with the creation of the job.

    " + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    The dataset that the job acts upon.

    " + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

    " + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

    The encryption mode for the job, which can be one of the following:

    • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

    • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

    " + }, + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job.

    " + }, + "Type":{ + "shape":"JobType", + "documentation":"

    The job type, which must be one of the following:

    • PROFILE - The job analyzes the dataset to determine its size, data types, data distribution, and more.

    • RECIPE - The job applies one or more transformations to a dataset.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (user name) of the user who last modified the job.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The date and time that the job was last modified.

    " + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

    A value that indicates whether Amazon CloudWatch logging is enabled for this job.

    " + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

    The maximum number of nodes that AWS Glue DataBrew can consume when the job processes data.

    " + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

    The maximum number of times to retry the job after a job run fails.

    " + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

    One or more artifacts that represent the output from running the job.

    " + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The DataBrew project associated with this job.

    " + }, + "RecipeReference":{"shape":"RecipeReference"}, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the job.

    " + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the AWS Identity and Access Management (IAM) role that was assumed for this request.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags associated with this job.

    " + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

    The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

    " + } + } + }, + "DescribeProjectRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project to be described.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeProjectResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the project was created.

    " + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (user name) of the user who created the project.

    " + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    The dataset associated with the project.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The date and time that the project was last modified.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (user name) of the user who last modified the project.

    " + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project.

    " + }, + "RecipeName":{ + "shape":"RecipeName", + "documentation":"

    The recipe associated with this job.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the project.

    " + }, + "Sample":{"shape":"Sample"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the AWS Identity and Access Management (IAM) role that was assumed for this request.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags associated with this project.

    " + }, + "SessionStatus":{ + "shape":"SessionStatus", + "documentation":"

    Describes the current state of the session:

    • PROVISIONING - allocating resources for the session.

    • INITIALIZING - getting the session ready for first use.

    • ASSIGNED - the session is ready for use.

    " + }, + "OpenedBy":{ + "shape":"OpenedBy", + "documentation":"

    The identifier (user name) of the user that opened the project for use.

    " + }, + "OpenDate":{ + "shape":"Date", + "documentation":"

    The date and time when the project was opened.

    " + } + } + }, + "DescribeRecipeRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe to be described.

    ", + "location":"uri", + "locationName":"name" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

    The recipe version identifier. If this parameter isn't specified, then the latest published version is returned.

    ", + "location":"querystring", + "locationName":"recipeVersion" + } + } + }, + "DescribeRecipeResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (user name) of the user who created the recipe.

    " + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the recipe was created.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (user name) of the user who last modified the recipe.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The date and time that the recipe was last modified.

    " + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project associated with this recipe.

    " + }, + "PublishedBy":{ + "shape":"PublishedBy", + "documentation":"

    The identifier (user name) of the user who last published the recipe.

    " + }, + "PublishedDate":{ + "shape":"Date", + "documentation":"

    The date and time when the recipe was last published.

    " + }, + "Description":{ + "shape":"RecipeDescription", + "documentation":"

    The description of the recipe.

    " + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe.

    " + }, + "Steps":{ + "shape":"RecipeStepList", + "documentation":"

    One or more steps to be performed by the recipe. Each step consists of an action, and the conditions under which the action should succeed.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags associated with this project.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the recipe.

    " + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

    The recipe version identifier.

    " + } + } + }, + "DescribeScheduleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

    The name of the schedule to be described.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeScheduleResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the schedule was created.

    " + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (user name) of the user who created the schedule.

    " + }, + "JobNames":{ + "shape":"JobNameList", + "documentation":"

    The name or names of one or more jobs to be run by using the schedule.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (user name) of the user who last modified the schedule.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The date and time that the schedule was last modified.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the schedule.

    " + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

    The date or dates and time or times, in cron format, when the jobs are to be run for the schedule.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags associated with this schedule.

    " + }, + "Name":{ + "shape":"ScheduleName", + "documentation":"

    The name of the schedule.

    " + } + } + }, + "EncryptionKeyArn":{ + "type":"string", + "max":2048, + "min":20 + }, + "EncryptionMode":{ + "type":"string", + "enum":[ + "SSE-KMS", + "SSE-S3" + ] + }, + "ErrorCode":{ + "type":"string", + "pattern":"^[1-5][0-9][0-9]$" + }, + "ExcelOptions":{ + "type":"structure", + "members":{ + "SheetNames":{ + "shape":"SheetNameList", + "documentation":"

    Specifies one or more named sheets in the Excel file, which will be included in the dataset.

    " + }, + "SheetIndexes":{ + "shape":"SheetIndexList", + "documentation":"

    Specifies one or more sheet numbers in the Excel file, which will be included in the dataset.

    " + } + }, + "documentation":"

    Options that define how DataBrew will interpret a Microsoft Excel file, when creating a dataset from that file.

    " + }, + "ExecutionTime":{"type":"integer"}, + "FormatOptions":{ + "type":"structure", + "members":{ + "Json":{ + "shape":"JsonOptions", + "documentation":"

    Options that define how JSON input is to be interpreted by DataBrew.

    " + }, + "Excel":{ + "shape":"ExcelOptions", + "documentation":"

    Options that define how Excel input is to be interpreted by DataBrew.

    " + } + }, + "documentation":"

    Options that define how Microsoft Excel input is to be interpreted by DataBrew.

    " + }, + "HiddenColumnList":{ + "type":"list", + "member":{"shape":"ColumnName"} + }, + "Input":{ + "type":"structure", + "members":{ + "S3InputDefinition":{ + "shape":"S3Location", + "documentation":"

    The Amazon S3 location where the data is stored.

    " + }, + "DataCatalogInputDefinition":{ + "shape":"DataCatalogInputDefinition", + "documentation":"

    The AWS Glue Data Catalog parameters for the data.

    " + } + }, + "documentation":"

    Information on how AWS Glue DataBrew can find data, in either the AWS Glue Data Catalog or Amazon S3.

    " + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    An internal service failure occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "Job":{ + "type":"structure", + "required":["Name"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The ID of the AWS account that owns the job.

    " + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (the user name) of the user who created the job.

    " + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the job was created.

    " + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    A dataset that the job is to process.

    " + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of an encryption key that is used to protect a job.

    " + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

    The encryption mode for the job, which can be one of the following:

    • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

    • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

    " + }, + "Name":{ + "shape":"JobName", + "documentation":"

    The unique name of the job.

    " + }, + "Type":{ + "shape":"JobType", + "documentation":"

    The job type of the job, which must be one of the following:

    • PROFILE - A job to analyze a dataset, to determine its size, data types, data distribution, and more.

    • RECIPE - A job to apply one or more transformations to a dataset.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (the user name) of the user who last modified the job.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The modification date and time of the job.

    " + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

    The current status of Amazon CloudWatch logging for the job.

    " + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

    The maximum number of nodes that can be consumed when the job processes data.

    " + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

    The maximum number of times to retry the job after a job run fails.

    " + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

    One or more artifacts that represent output from running the job.

    " + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that the job is associated with.

    " + }, + "RecipeReference":{ + "shape":"RecipeReference", + "documentation":"

    A set of steps that the job runs.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The unique Amazon Resource Name (ARN) for the job.

    " + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the role that will be assumed for this job.

    " + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

    The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags that have been applied to the job.

    " + } + }, + "documentation":"

    Represents all of the attributes of an AWS Glue DataBrew job.

    " + }, + "JobList":{ + "type":"list", + "member":{"shape":"Job"} + }, + "JobName":{ + "type":"string", + "max":240, + "min":1 + }, + "JobNameList":{ + "type":"list", + "member":{"shape":"JobName"}, + "max":50 + }, + "JobRun":{ + "type":"structure", + "members":{ + "Attempt":{ + "shape":"Attempt", + "documentation":"

    The number of times that DataBrew has attempted to run the job.

    " + }, + "CompletedOn":{ + "shape":"Date", + "documentation":"

    The date and time when the job completed processing.

    " + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset for the job to process.

    " + }, + "ErrorMessage":{ + "shape":"JobRunErrorMessage", + "documentation":"

    A message indicating an error (if any) that was encountered when the job ran.

    " + }, + "ExecutionTime":{ + "shape":"ExecutionTime", + "documentation":"

    The amount of time, in seconds, during which a job run consumed resources.

    " + }, + "JobName":{ + "shape":"JobName", + "documentation":"

    The name of the job being processed during this run.

    " + }, + "RunId":{ + "shape":"JobRunId", + "documentation":"

    The unique identifier of the job run.

    " + }, + "State":{ + "shape":"JobRunState", + "documentation":"

    The current state of the job run entity itself.

    " + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

    The current status of Amazon CloudWatch logging for the job run.

    " + }, + "LogGroupName":{ + "shape":"LogGroupName", + "documentation":"

    The name of an Amazon CloudWatch log group, where the job writes diagnostic messages when it runs.

    " + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

    One or more output artifacts from a job run.

    " + }, + "RecipeReference":{ + "shape":"RecipeReference", + "documentation":"

    The set of steps processed by the job.

    " + }, + "StartedBy":{ + "shape":"StartedBy", + "documentation":"

    The identifier (the user name) of the user who initiated the job run.

    " + }, + "StartedOn":{ + "shape":"Date", + "documentation":"

    The date and time when the job run began.

    " + } + }, + "documentation":"

    Represents one run of an AWS Glue DataBrew job.

    " + }, + "JobRunErrorMessage":{"type":"string"}, + "JobRunId":{ + "type":"string", + "max":255, + "min":1 + }, + "JobRunList":{ + "type":"list", + "member":{"shape":"JobRun"} + }, + "JobRunState":{ + "type":"string", + "enum":[ + "STARTING", + "RUNNING", + "STOPPING", + "STOPPED", + "SUCCEEDED", + "FAILED", + "TIMEOUT" + ] + }, + "JobType":{ + "type":"string", + "enum":[ + "PROFILE", + "RECIPE" + ] + }, + "JsonOptions":{ + "type":"structure", + "members":{ + "MultiLine":{ + "shape":"MultiLine", + "documentation":"

    A value that specifies whether JSON input contains embedded new line characters.

    " + } + }, + "documentation":"

    Represents the JSON-specific options that define how input is to be interpreted by AWS Glue DataBrew.

    " + }, + "Key":{ + "type":"string", + "max":1280, + "min":1 + }, + "LastModifiedBy":{"type":"string"}, + "ListDatasetsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

    The maximum number of results to return in this request.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListDatasetsResponse":{ + "type":"structure", + "required":["Datasets"], + "members":{ + "Datasets":{ + "shape":"DatasetList", + "documentation":"

    A list of datasets that are defined in the current AWS account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

    " + } + } + }, + "ListJobRunsRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job.

    ", + "location":"uri", + "locationName":"name" + }, + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

    The maximum number of results to return in this request.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by AWS Glue DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListJobRunsResponse":{ + "type":"structure", + "required":["JobRuns"], + "members":{ + "JobRuns":{ + "shape":"JobRunList", + "documentation":"

    A list of job runs that have occurred for the specified job.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

    " + } + } + }, + "ListJobsRequest":{ + "type":"structure", + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    The name of a dataset. Using this parameter indicates to return only those jobs that act on the specified dataset.

    ", + "location":"querystring", + "locationName":"datasetName" + }, + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

    The maximum number of results to return in this request.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of a project. Using this parameter indicates to return only those jobs that are associated with the specified project.

    ", + "location":"querystring", + "locationName":"projectName" + } + } + }, + "ListJobsResponse":{ + "type":"structure", + "required":["Jobs"], + "members":{ + "Jobs":{ + "shape":"JobList", + "documentation":"

    A list of jobs that are defined in the current AWS account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

    " + } + } + }, + "ListProjectsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token that can be used in a subsequent request.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

    The maximum number of results to return in this request.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectsResponse":{ + "type":"structure", + "required":["Projects"], + "members":{ + "Projects":{ + "shape":"ProjectList", + "documentation":"

    A list of projects that are defined in the current AWS account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

    " + } + } + }, + "ListRecipeVersionsRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

    The maximum number of results to return in this request.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token that can be used in a subsequent request.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe for which to return version information.

    ", + "location":"querystring", + "locationName":"name" + } + } + }, + "ListRecipeVersionsResponse":{ + "type":"structure", + "required":["Recipes"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

    " + }, + "Recipes":{ + "shape":"RecipeList", + "documentation":"

    A list of versions for the specified recipe.

    " + } + } + }, + "ListRecipesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

    The maximum number of results to return in this request.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token that can be used in a subsequent request.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

    A version identifier. Using this parameter indicates to return only those recipes that have this version identifier.

    ", + "location":"querystring", + "locationName":"recipeVersion" + } + } + }, + "ListRecipesResponse":{ + "type":"structure", + "required":["Recipes"], + "members":{ + "Recipes":{ + "shape":"RecipeList", + "documentation":"

    A list of recipes that are defined in the current AWS account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

    " + } + } + }, + "ListSchedulesRequest":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

    The name of the job that these schedules apply to.

    ", + "location":"querystring", + "locationName":"jobName" + }, + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

    The maximum number of results to return in this request.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token that can be used in a subsequent request.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSchedulesResponse":{ + "type":"structure", + "required":["Schedules"], + "members":{ + "Schedules":{ + "shape":"ScheduleList", + "documentation":"

    A list of schedules in the current AWS account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) string that uniquely identifies the DataBrew resource.

    ", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

    A list of tags associated with the DataBrew resource.

    " + } + } + }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1 + }, + "LogSubscription":{ + "type":"string", + "enum":[ + "ENABLE", + "DISABLE" + ] + }, + "MaxCapacity":{"type":"integer"}, + "MaxResults100":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxRetries":{ + "type":"integer", + "min":0 + }, + "Message":{"type":"string"}, + "MultiLine":{"type":"boolean"}, + "NextToken":{ + "type":"string", + "max":2000, + "min":1 + }, + "OpenedBy":{"type":"string"}, + "Operation":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Z\\_]+$" + }, + "Output":{ + "type":"structure", + "required":["Location"], + "members":{ + "CompressionFormat":{ + "shape":"CompressionFormat", + "documentation":"

    The compression algorithm used to compress the output text of the job.

    " + }, + "Format":{ + "shape":"OutputFormat", + "documentation":"

    The data format of the output of the job.

    " + }, + "PartitionColumns":{ + "shape":"ColumnNameList", + "documentation":"

    The names of one or more partition columns for the output of the job.

    " + }, + "Location":{ + "shape":"S3Location", + "documentation":"

    The location in Amazon S3 where the job writes its output.

    " + }, + "Overwrite":{ + "shape":"OverwriteOutput", + "documentation":"

    A value that, if true, means that any data in the location specified for output is overwritten with new output.

    " + } + }, + "documentation":"

    Represents individual output from a particular job run.

    " + }, + "OutputFormat":{ + "type":"string", + "enum":[ + "CSV", + "JSON", + "PARQUET", + "GLUEPARQUET", + "AVRO", + "ORC", + "XML" + ] + }, + "OutputList":{ + "type":"list", + "member":{"shape":"Output"}, + "min":1 + }, + "OverwriteOutput":{"type":"boolean"}, + "ParameterMap":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValue"} + }, + "ParameterName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Za-z0-9]+$" + }, + "ParameterValue":{ + "type":"string", + "max":8192, + "min":1 + }, + "Preview":{"type":"boolean"}, + "Project":{ + "type":"structure", + "required":[ + "Name", + "RecipeName" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The ID of the AWS account that owns the project.

    " + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the project was created.

    " + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (the user name) of the user who crated the project.

    " + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

    The dataset that the project is to act upon.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The last modification date and time for the project.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (user name) of the user who last modified the project.

    " + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

    The unique name of a project.

    " + }, + "RecipeName":{ + "shape":"RecipeName", + "documentation":"

    The name of a recipe that will be developed during a project session.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the project.

    " + }, + "Sample":{ + "shape":"Sample", + "documentation":"

    The sample size and sampling type to apply to the data. If this parameter isn't specified, then the sample will consiste of the first 500 rows from the dataset.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags that have been applied to the project.

    " + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the role that will be assumed for this project.

    " + }, + "OpenedBy":{ + "shape":"OpenedBy", + "documentation":"

    The identifier (the user name) of the user that opened the project for use.

    " + }, + "OpenDate":{ + "shape":"Date", + "documentation":"

    The date and time when the project was opened.

    " + } + }, + "documentation":"

    Represents all of the attributes of an AWS Glue DataBrew project.

    " + }, + "ProjectList":{ + "type":"list", + "member":{"shape":"Project"} + }, + "ProjectName":{ + "type":"string", + "max":255, + "min":1 + }, + "PublishRecipeRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Description":{ + "shape":"RecipeDescription", + "documentation":"

    A description of the recipe to be published, for this version of the recipe.

    " + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe to be published.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "PublishRecipeResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe that you published.

    " + } + } + }, + "PublishedBy":{"type":"string"}, + "Recipe":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (the user name) of the user who created the recipe.

    " + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the recipe was created.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (user name) of the user who last modified the recipe.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The last modification date and time of the recipe.

    " + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that the recipe is associated with.

    " + }, + "PublishedBy":{ + "shape":"PublishedBy", + "documentation":"

    The identifier (the user name) of the user who published the recipe.

    " + }, + "PublishedDate":{ + "shape":"Date", + "documentation":"

    The date and time when the recipe was published.

    " + }, + "Description":{ + "shape":"RecipeDescription", + "documentation":"

    The description of the recipe.

    " + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

    The unique name for the recipe.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the recipe.

    " + }, + "Steps":{ + "shape":"RecipeStepList", + "documentation":"

    A list of steps that are defined by the recipe.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags that have been applied to the recipe.

    " + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

    The identifier for the version for the recipe.

    " + } + }, + "documentation":"

    Represents one or more actions to be performed on an AWS Glue DataBrew dataset.

    " + }, + "RecipeAction":{ + "type":"structure", + "required":["Operation"], + "members":{ + "Operation":{ + "shape":"Operation", + "documentation":"

    The name of a valid DataBrew transformation to be performed on the data.

    " + }, + "Parameters":{ + "shape":"ParameterMap", + "documentation":"

    Contextual parameters for the transformation.

    " + } + }, + "documentation":"

    Represents a transformation and associated parameters that are used to apply a change to an AWS Glue DataBrew dataset. For more information, see Recipe structure and ecipe actions reference .

    " + }, + "RecipeDescription":{ + "type":"string", + "max":1024 + }, + "RecipeErrorList":{ + "type":"list", + "member":{"shape":"RecipeVersionErrorDetail"} + }, + "RecipeErrorMessage":{"type":"string"}, + "RecipeList":{ + "type":"list", + "member":{"shape":"Recipe"} + }, + "RecipeName":{ + "type":"string", + "max":255, + "min":1 + }, + "RecipeReference":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe.

    " + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

    The identifier for the version for the recipe.

    " + } + }, + "documentation":"

    Represents all of the attributes of an AWS Glue DataBrew recipe.

    " + }, + "RecipeStep":{ + "type":"structure", + "required":["Action"], + "members":{ + "Action":{ + "shape":"RecipeAction", + "documentation":"

    The particular action to be performed in the recipe step.

    " + }, + "ConditionExpressions":{ + "shape":"ConditionExpressionList", + "documentation":"

    One or more conditions that must be met, in order for the recipe step to succeed.

    All of the conditions in the array must be met. In other words, all of the conditions must be combined using a logical AND operation.

    " + } + }, + "documentation":"

    Represents a single step to be performed in an AWS Glue DataBrew recipe.

    " + }, + "RecipeStepList":{ + "type":"list", + "member":{"shape":"RecipeStep"} + }, + "RecipeVersion":{ + "type":"string", + "max":16, + "min":1 + }, + "RecipeVersionErrorDetail":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    The HTTP status code for the error.

    " + }, + "ErrorMessage":{ + "shape":"RecipeErrorMessage", + "documentation":"

    The text of the error message.

    " + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

    The identifier for the recipe version associated with this error.

    " + } + }, + "documentation":"

    Represents any errors encountered when attempting to delete multiple recipe versions.

    " + }, + "RecipeVersionList":{ + "type":"list", + "member":{"shape":"RecipeVersion"}, + "max":50, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    One or more resources can't be found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "Result":{"type":"string"}, + "S3Location":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"Bucket", + "documentation":"

    The S3 bucket name.

    " + }, + "Key":{ + "shape":"Key", + "documentation":"

    The unique name of the object in the bucket.

    " + } + }, + "documentation":"

    An Amazon S3 location (bucket name an object key) where DataBrew can read input data, or write output from a job.

    " + }, + "Sample":{ + "type":"structure", + "required":["Type"], + "members":{ + "Size":{ + "shape":"SampleSize", + "documentation":"

    The number of rows in the sample.

    " + }, + "Type":{ + "shape":"SampleType", + "documentation":"

    The way in which DataBrew obtains rows from a dataset.

    " + } + }, + "documentation":"

    Represents the sample size and sampling type for AWS Glue DataBrew to use for interactive data analysis.

    " + }, + "SampleSize":{ + "type":"integer", + "max":5000, + "min":1 + }, + "SampleType":{ + "type":"string", + "enum":[ + "FIRST_N", + "LAST_N", + "RANDOM" + ] + }, + "Schedule":{ + "type":"structure", + "required":["Name"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The ID of the AWS account that owns the schedule.

    " + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

    The identifier (the user name) of the user who created the schedule.

    " + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

    The date and time that the schedule was created.

    " + }, + "JobNames":{ + "shape":"JobNameList", + "documentation":"

    A list of jobs to be run, according to the schedule.

    " + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

    The identifier (the user name) of the user who last modified the schedule.

    " + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The date and time when the schedule was last modified.

    " + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the schedule.

    " + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

    The date(s) and time(s), in cron format, when the job will run.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    Metadata tags that have been applied to the schedule.

    " + }, + "Name":{ + "shape":"ScheduleName", + "documentation":"

    The name of the schedule.

    " + } + }, + "documentation":"

    Represents one or more dates and times when a job is to run.

    " + }, + "ScheduleList":{ + "type":"list", + "member":{"shape":"Schedule"} + }, + "ScheduleName":{ + "type":"string", + "max":255, + "min":1 + }, + "SendProjectSessionActionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Preview":{ + "shape":"Preview", + "documentation":"

    Returns the result of the recipe step, without applying it. The result isn't added to the view frame stack.

    " + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project to apply the action to.

    ", + "location":"uri", + "locationName":"name" + }, + "RecipeStep":{"shape":"RecipeStep"}, + "StepIndex":{ + "shape":"StepIndex", + "documentation":"

    The index from which to preview a step. This index is used to preview the result of steps that have already been applied, so that the resulting view frame is from earlier in the view frame stack.

    " + }, + "ClientSessionId":{ + "shape":"ClientSessionId", + "documentation":"

    A unique identifier for an interactive session that's currently open and ready for work. The action will be performed on this session.

    " + }, + "ViewFrame":{"shape":"ViewFrame"} + } + }, + "SendProjectSessionActionResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Result":{ + "shape":"Result", + "documentation":"

    A message indicating the result of performing the action.

    " + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that was affected by the action.

    " + }, + "ActionId":{ + "shape":"ActionId", + "documentation":"

    A unique identifier for the action that was performed.

    " + } + } + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    A service quota is exceeded.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "SessionStatus":{ + "type":"string", + "enum":[ + "ASSIGNED", + "FAILED", + "INITIALIZING", + "PROVISIONING", + "READY", + "RECYCLING", + "ROTATING", + "TERMINATED", + "TERMINATING", + "UPDATING" + ] + }, + "SheetIndex":{ + "type":"integer", + "max":200, + "min":0 + }, + "SheetIndexList":{ + "type":"list", + "member":{"shape":"SheetIndex"}, + "max":1, + "min":1 + }, + "SheetName":{ + "type":"string", + "max":31, + "min":1 + }, + "SheetNameList":{ + "type":"list", + "member":{"shape":"SheetName"}, + "max":1, + "min":1 + }, + "Source":{ + "type":"string", + "enum":[ + "S3", + "DATA-CATALOG" + ] + }, + "StartColumnIndex":{ + "type":"integer", + "min":0 + }, + "StartJobRunRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job to be run.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "StartJobRunResponse":{ + "type":"structure", + "required":["RunId"], + "members":{ + "RunId":{ + "shape":"JobRunId", + "documentation":"

    A system-generated identifier for this particular job run.

    " + } + } + }, + "StartProjectSessionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project to act upon.

    ", + "location":"uri", + "locationName":"name" + }, + "AssumeControl":{ + "shape":"AssumeControl", + "documentation":"

    A value that, if true, enables you to take control of a session, even if a different client is currently accessing the project.

    " + } + } + }, + "StartProjectSessionResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project to be acted upon.

    " + }, + "ClientSessionId":{ + "shape":"ClientSessionId", + "documentation":"

    A system-generated identifier for the session.

    " + } + } + }, + "StartedBy":{"type":"string"}, + "StepIndex":{ + "type":"integer", + "min":0 + }, + "StopJobRunRequest":{ + "type":"structure", + "required":[ + "Name", + "RunId" + ], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job to be stopped.

    ", + "location":"uri", + "locationName":"name" + }, + "RunId":{ + "shape":"JobRunId", + "documentation":"

    The ID of the job run to be stopped.

    ", + "location":"uri", + "locationName":"runId" + } + } + }, + "StopJobRunResponse":{ + "type":"structure", + "required":["RunId"], + "members":{ + "RunId":{ + "shape":"JobRunId", + "documentation":"

    The ID of the job run that you stopped.

    " + } + } + }, + "TableName":{ + "type":"string", + "max":255, + "min":1 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The DataBrew resource to which tags should be added. The value for this parameter is an Amazon Resource Name (ARN). For DataBrew, you can tag a dataset, a job, a project, or a recipe.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    One or more tags to be assigned to the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "TargetColumn":{ + "type":"string", + "max":1024, + "min":1 + }, + "Timeout":{ + "type":"integer", + "min":0 + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    An DataBrew resource from which you want to remove a tag or tags. The value for this parameter is an Amazon Resource Name (ARN).

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys (names) of one or more tags to be removed.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDatasetRequest":{ + "type":"structure", + "required":[ + "Name", + "Input" + ], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset to be updated.

    ", + "location":"uri", + "locationName":"name" + }, + "FormatOptions":{"shape":"FormatOptions"}, + "Input":{"shape":"Input"} + } + }, + "UpdateDatasetResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

    The name of the dataset that you updated.

    " + } + } + }, + "UpdateProfileJobRequest":{ + "type":"structure", + "required":[ + "Name", + "OutputLocation", + "RoleArn" + ], + "members":{ + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

    " + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

    The encryption mode for the job, which can be one of the following:

    • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

    • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

    " + }, + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job to be updated.

    ", + "location":"uri", + "locationName":"name" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

    A value that enables or disables Amazon CloudWatch logging for the current AWS account. If logging is enabled, CloudWatch writes one log stream for each job run.

    " + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

    The maximum number of nodes that DataBrew can use when the job processes data.

    " + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

    The maximum number of times to retry the job after a job run fails.

    " + }, + "OutputLocation":{"shape":"S3Location"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

    " + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

    The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

    " + } + } + }, + "UpdateProfileJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job that was updated.

    " + } + } + }, + "UpdateProjectRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "Name" + ], + "members":{ + "Sample":{"shape":"Sample"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed for this request.

    " + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project to be updated.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "UpdateProjectResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

    The date and time that the project was last modified.

    " + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that you updated.

    " + } + } + }, + "UpdateRecipeJobRequest":{ + "type":"structure", + "required":[ + "Name", + "Outputs", + "RoleArn" + ], + "members":{ + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

    " + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

    The encryption mode for the job, which can be one of the following:

    • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

    • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

    " + }, + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job to update.

    ", + "location":"uri", + "locationName":"name" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

    A value that enables or disables Amazon CloudWatch logging for the current AWS account. If logging is enabled, CloudWatch writes one log stream for each job run.

    " + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

    The maximum number of nodes that DataBrew can consume when the job processes data.

    " + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

    The maximum number of times to retry the job after a job run fails.

    " + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

    One or more artifacts that represent the output from running the job.

    " + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

    " + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

    The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

    " + } + } + }, + "UpdateRecipeJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

    The name of the job that you updated.

    " + } + } + }, + "UpdateRecipeRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Description":{ + "shape":"RecipeDescription", + "documentation":"

    A description of the recipe.

    " + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe to be updated.

    ", + "location":"uri", + "locationName":"name" + }, + "Steps":{ + "shape":"RecipeStepList", + "documentation":"

    One or more steps to be performed by the recipe. Each step consists of an action, and the conditions under which the action should succeed.

    " + } + } + }, + "UpdateRecipeResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

    The name of the recipe that was updated.

    " + } + } + }, + "UpdateScheduleRequest":{ + "type":"structure", + "required":[ + "CronExpression", + "Name" + ], + "members":{ + "JobNames":{ + "shape":"JobNameList", + "documentation":"

    The name or names of one or more jobs to be run for this schedule.

    " + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

    The date or dates and time or times, in cron format, when the jobs are to be run.

    " + }, + "Name":{ + "shape":"ScheduleName", + "documentation":"

    The name of the schedule to update.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "UpdateScheduleResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

    The name of the schedule that was updated.

    " + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The input parameters for this request failed validation.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ViewFrame":{ + "type":"structure", + "required":["StartColumnIndex"], + "members":{ + "StartColumnIndex":{ + "shape":"StartColumnIndex", + "documentation":"

    The starting index for the range of columns to return in the view frame.

    " + }, + "ColumnRange":{ + "shape":"ColumnRange", + "documentation":"

    The number of columns to include in the view frame, beginning with the StartColumnIndex value and ignoring any columns in the HiddenColumns list.

    " + }, + "HiddenColumns":{ + "shape":"HiddenColumnList", + "documentation":"

    A list of columns to hide in the view frame.

    " + } + }, + "documentation":"

    Represents the data being being transformed during an AWS Glue DataBrew project session.

    " + } + }, + "documentation":"

    AWS Glue DataBrew is a visual, cloud-scale data-preparation service. DataBrew simplifies data preparation tasks, targeting data issues that are hard to spot and time-consuming to fix. DataBrew empowers users of all technical levels to visualize the data and perform one-click data transformations, with no coding required.

    " +} diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml new file mode 100644 index 000000000000..bddb3844909b --- /dev/null +++ b/services/dataexchange/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + dataexchange + AWS Java SDK :: Services :: DataExchange + The AWS Java SDK for DataExchange module holds the client classes that are used for + communicating with DataExchange. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.dataexchange + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/dataexchange/src/main/resources/codegen-resources/paginators-1.json b/services/dataexchange/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..d76ccf7f38be --- /dev/null +++ b/services/dataexchange/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListDataSetRevisions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Revisions" + }, + "ListDataSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DataSets" + }, + "ListJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Jobs" + }, + "ListRevisionAssets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Assets" + } + } +} \ No newline at end of file diff --git a/services/dataexchange/src/main/resources/codegen-resources/service-2.json b/services/dataexchange/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..7074c6b0b29f --- /dev/null +++ b/services/dataexchange/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2711 @@ +{ + "metadata": { + "apiVersion": "2017-07-25", + "endpointPrefix": "dataexchange", + "signingName": "dataexchange", + "serviceFullName": "AWS Data Exchange", + "serviceId": "DataExchange", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "dataexchange-2017-07-25", + "signatureVersion": "v4" + }, + "operations": { + "CancelJob": { + "name": "CancelJob", + "http": { + "method": "DELETE", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 204 + }, + "input": { + "shape": "CancelJobRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    This operation cancels a job. Jobs can be cancelled only when they are in the WAITING state.

    " + }, + "CreateDataSet": { + "name": "CreateDataSet", + "http": { + "method": "POST", + "requestUri": "/v1/data-sets", + "responseCode": 201 + }, + "input": { + "shape": "CreateDataSetRequest" + }, + "output": { + "shape": "CreateDataSetResponse", + "documentation": "

    201 response

    " + }, + "errors": [ + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "ServiceLimitExceededException", + "documentation": "

    402 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    This operation creates a data set.

    " + }, + "CreateJob": { + "name": "CreateJob", + "http": { + "method": "POST", + "requestUri": "/v1/jobs", + "responseCode": 201 + }, + "input": { + "shape": "CreateJobRequest" + }, + "output": { + "shape": "CreateJobResponse", + "documentation": "

    201 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    This operation creates a job.

    " + }, + "CreateRevision": { + "name": "CreateRevision", + "http": { + "method": "POST", + "requestUri": "/v1/data-sets/{DataSetId}/revisions", + "responseCode": 201 + }, + "input": { + "shape": "CreateRevisionRequest" + }, + "output": { + "shape": "CreateRevisionResponse", + "documentation": "

    201 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    This operation creates a revision for a data set.

    " + }, + "DeleteAsset": { + "name": "DeleteAsset", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteAssetRequest" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    This operation deletes an asset.

    " + }, + "DeleteDataSet": { + "name": "DeleteDataSet", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteDataSetRequest" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    This operation deletes a data set.

    " + }, + "DeleteRevision": { + "name": "DeleteRevision", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteRevisionRequest" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    This operation deletes a revision.

    " + }, + "GetAsset": { + "name": "GetAsset", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 200 + }, + "input": { + "shape": "GetAssetRequest" + }, + "output": { + "shape": "GetAssetResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + } + ], + "documentation": "

    This operation returns information about an asset.

    " + }, + "GetDataSet": { + "name": "GetDataSet", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 200 + }, + "input": { + "shape": "GetDataSetRequest" + }, + "output": { + "shape": "GetDataSetResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + } + ], + "documentation": "

    This operation returns information about a data set.

    " + }, + "GetJob": { + "name": "GetJob", + "http": { + "method": "GET", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 200 + }, + "input": { + "shape": "GetJobRequest" + }, + "output": { + "shape": "GetJobResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + } + ], + "documentation": "

    This operation returns information about a job.

    " + }, + "GetRevision": { + "name": "GetRevision", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 200 + }, + "input": { + "shape": "GetRevisionRequest" + }, + "output": { + "shape": "GetRevisionResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + } + ], + "documentation": "

    This operation returns information about a revision.

    " + }, + "ListDataSetRevisions": { + "name": "ListDataSetRevisions", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions", + "responseCode": 200 + }, + "input": { + "shape": "ListDataSetRevisionsRequest" + }, + "output": { + "shape": "ListDataSetRevisionsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + } + ], + "documentation": "

    This operation lists a data set's revisions sorted by CreatedAt in descending order.

    " + }, + "ListDataSets": { + "name": "ListDataSets", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets", + "responseCode": 200 + }, + "input": { + "shape": "ListDataSetsRequest" + }, + "output": { + "shape": "ListDataSetsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + } + ], + "documentation": "

    This operation lists your data sets. When listing by origin OWNED, results are sorted by CreatedAt in descending order. When listing by origin ENTITLED, there is no order and the maxResults parameter is ignored.

    " + }, + "ListJobs": { + "name": "ListJobs", + "http": { + "method": "GET", + "requestUri": "/v1/jobs", + "responseCode": 200 + }, + "input": { + "shape": "ListJobsRequest" + }, + "output": { + "shape": "ListJobsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + } + ], + "documentation": "

    This operation lists your jobs sorted by CreatedAt in descending order.

    " + }, + "ListRevisionAssets": { + "name": "ListRevisionAssets", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets", + "responseCode": 200 + }, + "input": { + "shape": "ListRevisionAssetsRequest" + }, + "output": { + "shape": "ListRevisionAssetsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + } + ], + "documentation": "

    This operation lists a revision's assets sorted alphabetically in descending order.

    " + }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "output": { + "shape": "ListTagsForResourceResponse", + "documentation": "

    200 response

    " + }, + "errors": [], + "documentation": "

    This operation lists the tags on the resource.

    " + }, + "StartJob": { + "name": "StartJob", + "http": { + "method": "PATCH", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 202 + }, + "input": { + "shape": "StartJobRequest" + }, + "output": { + "shape": "StartJobResponse", + "documentation": "

    202 response

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    This operation starts a job.

    " + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "POST", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "errors": [], + "documentation": "

    This operation tags a resource.

    " + }, + "UntagResource": { + "name": "UntagResource", + "http": { + "method": "DELETE", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "errors": [], + "documentation": "

    This operation removes one or more tags from a resource.

    " + }, + "UpdateAsset": { + "name": "UpdateAsset", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateAssetRequest" + }, + "output": { + "shape": "UpdateAssetResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    This operation updates an asset.

    " + }, + "UpdateDataSet": { + "name": "UpdateDataSet", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateDataSetRequest" + }, + "output": { + "shape": "UpdateDataSetResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    This operation updates a data set.

    " + }, + "UpdateRevision": { + "name": "UpdateRevision", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateRevisionRequest" + }, + "output": { + "shape": "UpdateRevisionResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    500 response

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    403 response

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    429 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    This operation updates a revision.

    " + } + }, + "shapes": { + "AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "documentation": "

    Access to the resource is denied.

    " + } + }, + "documentation": "

    Access to the resource is denied.

    ", + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 403 + } + }, + "Arn": { + "type": "string", + "documentation": "

    An Amazon Resource Name (ARN) that uniquely identifies an AWS resource.

    " + }, + "AssetDestinationEntry": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the asset.

    " + }, + "Bucket": { + "shape": "__string", + "documentation": "

    The S3 bucket that is the destination for the asset.

    " + }, + "Key": { + "shape": "__string", + "documentation": "

    The name of the object in Amazon S3 for the asset.

    " + } + }, + "documentation": "

    The destination for the asset.

    ", + "required": [ + "Bucket", + "AssetId" + ] + }, + "AssetDetails": { + "type": "structure", + "members": { + "S3SnapshotAsset": { + "shape": "S3SnapshotAsset" + } + } + }, + "AssetEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the asset.

    " + }, + "AssetDetails": { + "shape": "AssetDetails", + "documentation": "

    Information about the asset, including its size.

    " + }, + "AssetType": { + "shape": "AssetType", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the asset was created, in ISO 8601 format.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this asset.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the asset.

    " + }, + "Name": { + "shape": "AssetName", + "documentation": "

    The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this asset.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the asset was last updated, in ISO 8601 format.

    " + } + }, + "documentation": "

    An asset in AWS Data Exchange is a piece of data that can be stored as an S3 object. The asset can be a structured data file, an image file, or some other data file. When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.

    ", + "required": [ + "AssetType", + "CreatedAt", + "DataSetId", + "Id", + "Arn", + "AssetDetails", + "UpdatedAt", + "RevisionId", + "Name" + ] + }, + "AssetName": { + "type": "string", + "documentation": "

    The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

    " + }, + "AssetSourceEntry": { + "type": "structure", + "members": { + "Bucket": { + "shape": "__string", + "documentation": "

    The S3 bucket that's part of the source of the asset.

    " + }, + "Key": { + "shape": "__string", + "documentation": "

    The name of the object in Amazon S3 for the asset.

    " + } + }, + "documentation": "

    The source of the assets.

    ", + "required": [ + "Bucket", + "Key" + ] + }, + "AssetType": { + "type": "string", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    ", + "enum": [ + "S3_SNAPSHOT" + ] + }, + "CancelJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId", + "documentation": "

    The unique identifier for a job.

    " + } + }, + "required": [ + "JobId" + ] + }, + "Code": { + "type": "string", + "enum": [ + "ACCESS_DENIED_EXCEPTION", + "INTERNAL_SERVER_EXCEPTION", + "MALWARE_DETECTED", + "RESOURCE_NOT_FOUND_EXCEPTION", + "SERVICE_QUOTA_EXCEEDED_EXCEPTION", + "VALIDATION_EXCEPTION", + "MALWARE_SCAN_ENCRYPTED_FILE" + ] + }, + "ConflictException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "documentation": "

    The request couldn't be completed because it conflicted with the current state of the resource.

    " + }, + "ResourceId": { + "shape": "__string", + "documentation": "

    The unique identifier for the resource with the conflict.

    " + }, + "ResourceType": { + "shape": "ResourceType", + "documentation": "

    The type of the resource with the conflict.

    " + } + }, + "documentation": "

    The request couldn't be completed because it conflicted with the current state of the resource.

    ", + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 409 + } + }, + "CreateDataSetRequest": { + "type": "structure", + "members": { + "AssetType": { + "shape": "AssetType", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    " + }, + "Description": { + "shape": "Description", + "documentation": "

    A description for the data set. This value can be up to 16,348 characters long.

    " + }, + "Name": { + "shape": "Name", + "documentation": "

    The name of the data set.

    " + }, + "Tags": { + "shape": "MapOf__string", + "documentation": "

    A data set tag is an optional label that you can assign to a data set when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.

    " + } + }, + "documentation": "

    The request body for CreateDataSet.

    ", + "required": [ + "AssetType", + "Description", + "Name" + ] + }, + "CreateDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the data set.

    " + }, + "AssetType": { + "shape": "AssetType", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the data set was created, in ISO 8601 format.

    " + }, + "Description": { + "shape": "Description", + "documentation": "

    The description for the data set.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set.

    " + }, + "Name": { + "shape": "Name", + "documentation": "

    The name of the data set.

    " + }, + "Origin": { + "shape": "Origin", + "documentation": "

    A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).

    " + }, + "OriginDetails": { + "shape": "OriginDetails", + "documentation": "

    If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.

    " + }, + "Tags": { + "shape": "MapOf__string", + "documentation": "

    The tags for the data set.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the data set was last updated, in ISO 8601 format.

    " + } + } + }, + "CreateJobRequest": { + "type": "structure", + "members": { + "Details": { + "shape": "RequestDetails", + "documentation": "

    The details for the CreateJob request.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The type of job to be created.

    " + } + }, + "documentation": "

    The request body for CreateJob.

    ", + "required": [ + "Type", + "Details" + ] + }, + "CreateJobResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the job.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the job was created, in ISO 8601 format.

    " + }, + "Details": { + "shape": "ResponseDetails", + "documentation": "

    Details about the job.

    " + }, + "Errors": { + "shape": "ListOfJobError", + "documentation": "

    The errors associated with jobs.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the job.

    " + }, + "State": { + "shape": "State", + "documentation": "

    The state of the job.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The job type.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the job was last updated, in ISO 8601 format.

    " + } + } + }, + "CreateRevisionRequest": { + "type": "structure", + "members": { + "Comment": { + "shape": "__stringMin0Max16384", + "documentation": "

    An optional comment about the revision.

    " + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "Tags": { + "shape": "MapOf__string", + "documentation": "

    A revision tag is an optional label that you can assign to a revision when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.

    " + } + }, + "documentation": "

    The request body for CreateRevision.

    ", + "required": [ + "DataSetId" + ] + }, + "CreateRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the revision

    " + }, + "Comment": { + "shape": "__stringMin0Max16384", + "documentation": "

    An optional comment about the revision.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the revision was created, in ISO 8601 format.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this revision.

    " + }, + "Finalized": { + "shape": "__boolean", + "documentation": "

    To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

    Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.

    " + }, + "Tags": { + "shape": "MapOf__string", + "documentation": "

    The tags for the revision.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the revision was last updated, in ISO 8601 format.

    " + } + } + }, + "DataSetEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the data set.

    " + }, + "AssetType": { + "shape": "AssetType", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the data set was created, in ISO 8601 format.

    " + }, + "Description": { + "shape": "Description", + "documentation": "

    The description for the data set.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set.

    " + }, + "Name": { + "shape": "Name", + "documentation": "

    The name of the data set.

    " + }, + "Origin": { + "shape": "Origin", + "documentation": "

    A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).

    " + }, + "OriginDetails": { + "shape": "OriginDetails", + "documentation": "

    If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the data set was last updated, in ISO 8601 format.

    " + } + }, + "documentation": "

    A data set is an AWS resource with one or more revisions.

    ", + "required": [ + "Origin", + "AssetType", + "Description", + "CreatedAt", + "Id", + "Arn", + "UpdatedAt", + "Name" + ] + }, + "DeleteAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId", + "documentation": "

    The unique identifier for an asset.

    " + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId", + "documentation": "

    The unique identifier for a revision.

    " + } + }, + "required": [ + "RevisionId", + "AssetId", + "DataSetId" + ] + }, + "DeleteDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + } + }, + "required": [ + "DataSetId" + ] + }, + "DeleteRevisionRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId", + "documentation": "

    The unique identifier for a revision.

    " + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "Description": { + "type": "string", + "documentation": "

    A description of a resource.

    " + }, + "Details": { + "type": "structure", + "members": { + "ImportAssetFromSignedUrlJobErrorDetails": { + "shape": "ImportAssetFromSignedUrlJobErrorDetails" + }, + "ImportAssetsFromS3JobErrorDetails": { + "shape": "ListOfAssetSourceEntry" + } + } + }, + "ExportAssetToSignedUrlRequestDetails": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the asset that is exported to a signed URL.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this export job.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this export request.

    " + } + }, + "documentation": "

    Details of the operation to be performed by the job.

    ", + "required": [ + "DataSetId", + "AssetId", + "RevisionId" + ] + }, + "ExportAssetToSignedUrlResponseDetails": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the asset associated with this export job.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this export job.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this export response.

    " + }, + "SignedUrl": { + "shape": "__string", + "documentation": "

    The signed URL for the export request.

    " + }, + "SignedUrlExpiresAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the signed URL expires, in ISO 8601 format.

    " + } + }, + "documentation": "

    The details of the export to signed URL response.

    ", + "required": [ + "DataSetId", + "AssetId", + "RevisionId" + ] + }, + "ExportAssetsToS3RequestDetails": { + "type": "structure", + "members": { + "AssetDestinations": { + "shape": "ListOfAssetDestinationEntry", + "documentation": "

    The destination for the asset.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this export job.

    " + }, + "Encryption": { + "shape": "ExportServerSideEncryption", + "documentation": "

    Encryption configuration for the export job.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this export request.

    " + } + }, + "documentation": "

    Details of the operation to be performed by the job.

    ", + "required": [ + "AssetDestinations", + "DataSetId", + "RevisionId" + ] + }, + "ExportAssetsToS3ResponseDetails": { + "type": "structure", + "members": { + "AssetDestinations": { + "shape": "ListOfAssetDestinationEntry", + "documentation": "

    The destination in Amazon S3 where the asset is exported.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this export job.

    " + }, + "Encryption": { + "shape": "ExportServerSideEncryption", + "documentation": "

    Encryption configuration of the export job.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this export response.

    " + } + }, + "documentation": "

    Details about the export to Amazon S3 response.

    ", + "required": [ + "AssetDestinations", + "DataSetId", + "RevisionId" + ] + }, + "ExportServerSideEncryption": { + "type": "structure", + "members": { + "KmsKeyArn": { + "shape": "__string", + "documentation": "

    The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.

    " + }, + "Type": { + "shape": "ServerSideEncryptionTypes", + "documentation": "

    The type of server side encryption used for encrypting the objects in Amazon S3.

    " + } + }, + "documentation": "

    Encryption configuration of the export job. Includes the encryption type as well as the AWS KMS key. The KMS key is only necessary if you chose the KMS encryption type.

    ", + "required": [ + "Type" + ] + }, + "GetAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId", + "documentation": "

    The unique identifier for an asset.

    " + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId", + "documentation": "

    The unique identifier for a revision.

    " + } + }, + "required": [ + "RevisionId", + "AssetId", + "DataSetId" + ] + }, + "GetAssetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the asset.

    " + }, + "AssetDetails": { + "shape": "AssetDetails", + "documentation": "

    Information about the asset, including its size.

    " + }, + "AssetType": { + "shape": "AssetType", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the asset was created, in ISO 8601 format.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this asset.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the asset.

    " + }, + "Name": { + "shape": "AssetName", + "documentation": "

    The name of the asset When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this asset.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the asset was last updated, in ISO 8601 format.

    " + } + } + }, + "GetDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + } + }, + "required": [ + "DataSetId" + ] + }, + "GetDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the data set.

    " + }, + "AssetType": { + "shape": "AssetType", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the data set was created, in ISO 8601 format.

    " + }, + "Description": { + "shape": "Description", + "documentation": "

    The description for the data set.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set.

    " + }, + "Name": { + "shape": "Name", + "documentation": "

    The name of the data set.

    " + }, + "Origin": { + "shape": "Origin", + "documentation": "

    A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).

    " + }, + "OriginDetails": { + "shape": "OriginDetails", + "documentation": "

    If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.

    " + }, + "Tags": { + "shape": "MapOf__string", + "documentation": "

    The tags for the data set.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the data set was last updated, in ISO 8601 format.

    " + } + } + }, + "GetJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId", + "documentation": "

    The unique identifier for a job.

    " + } + }, + "required": [ + "JobId" + ] + }, + "GetJobResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the job.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the job was created, in ISO 8601 format.

    " + }, + "Details": { + "shape": "ResponseDetails", + "documentation": "

    Details about the job.

    " + }, + "Errors": { + "shape": "ListOfJobError", + "documentation": "

    The errors associated with jobs.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the job.

    " + }, + "State": { + "shape": "State", + "documentation": "

    The state of the job.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The job type.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the job was last updated, in ISO 8601 format.

    " + } + } + }, + "GetRevisionRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId", + "documentation": "

    The unique identifier for a revision.

    " + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "GetRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the revision

    " + }, + "Comment": { + "shape": "__stringMin0Max16384", + "documentation": "

    An optional comment about the revision.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the revision was created, in ISO 8601 format.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this revision.

    " + }, + "Finalized": { + "shape": "__boolean", + "documentation": "

    To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

    Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.

    " + }, + "Tags": { + "shape": "MapOf__string", + "documentation": "

    The tags for the revision.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the revision was last updated, in ISO 8601 format.

    " + } + } + }, + "Id": { + "type": "string", + "documentation": "

    A unique identifier.

    " + }, + "ImportAssetFromSignedUrlJobErrorDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName" + } + }, + "required": [ + "AssetName" + ] + }, + "ImportAssetFromSignedUrlRequestDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName", + "documentation": "

    The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this import job.

    " + }, + "Md5Hash": { + "shape": "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093", + "documentation": "

    The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this import request.

    " + } + }, + "documentation": "

    Details of the operation to be performed by the job.

    ", + "required": [ + "DataSetId", + "Md5Hash", + "RevisionId", + "AssetName" + ] + }, + "ImportAssetFromSignedUrlResponseDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName", + "documentation": "

    The name for the asset associated with this import response.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this import job.

    " + }, + "Md5Hash": { + "shape": "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093", + "documentation": "

    The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this import response.

    " + }, + "SignedUrl": { + "shape": "__string", + "documentation": "

    The signed URL.

    " + }, + "SignedUrlExpiresAt": { + "shape": "Timestamp", + "documentation": "

    The time and date at which the signed URL expires, in ISO 8601 format.

    " + } + }, + "documentation": "

    The details in the response for an import request, including the signed URL and other information.

    ", + "required": [ + "DataSetId", + "AssetName", + "RevisionId" + ] + }, + "ImportAssetsFromS3RequestDetails": { + "type": "structure", + "members": { + "AssetSources": { + "shape": "ListOfAssetSourceEntry", + "documentation": "

    Is a list of S3 bucket and object key pairs.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this import job.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this import request.

    " + } + }, + "documentation": "

    Details of the operation to be performed by the job.

    ", + "required": [ + "DataSetId", + "AssetSources", + "RevisionId" + ] + }, + "ImportAssetsFromS3ResponseDetails": { + "type": "structure", + "members": { + "AssetSources": { + "shape": "ListOfAssetSourceEntry", + "documentation": "

    Is a list of Amazon S3 bucket and object key pairs.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this import job.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this import response.

    " + } + }, + "documentation": "

    Details from an import from Amazon S3 response.

    ", + "required": [ + "DataSetId", + "AssetSources", + "RevisionId" + ] + }, + "InternalServerException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "documentation": "The message identifying the service exception that occurred." + } + }, + "documentation": "An exception occurred with the service.", + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 500 + } + }, + "JobEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the job.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the job was created, in ISO 8601 format.

    " + }, + "Details": { + "shape": "ResponseDetails", + "documentation": "

    Details of the operation to be performed by the job, such as export destination details or import source details.

    " + }, + "Errors": { + "shape": "ListOfJobError", + "documentation": "

    Errors for jobs.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the job.

    " + }, + "State": { + "shape": "State", + "documentation": "

    The state of the job.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The job type.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the job was last updated, in ISO 8601 format.

    " + } + }, + "documentation": "AWS Data Exchange Jobs are asynchronous import or export operations used to create or copy assets. A data set owner can both import and export as they see fit. Someone with an entitlement to a data set can only export. Jobs are deleted 90 days after they are created.", + "required": [ + "Type", + "Details", + "State", + "CreatedAt", + "Id", + "Arn", + "UpdatedAt" + ] + }, + "JobError": { + "type": "structure", + "members": { + "Code": { + "shape": "Code", + "documentation": "The code for the job error." + }, + "Details": { + "shape": "Details" + }, + "LimitName": { + "shape": "JobErrorLimitName", + "documentation": "

    The name of the limit that was reached.

    " + }, + "LimitValue": { + "shape": "__double", + "documentation": "The value of the exceeded limit." + }, + "Message": { + "shape": "__string", + "documentation": "The message related to the job error." + }, + "ResourceId": { + "shape": "__string", + "documentation": "The unique identifier for the resource related to the error." + }, + "ResourceType": { + "shape": "JobErrorResourceTypes", + "documentation": "The type of resource related to the error." + } + }, + "documentation": "An error that occurred with the job request.", + "required": [ + "Message", + "Code" + ] + }, + "JobErrorLimitName": { + "type": "string", + "documentation": "The name of the limit that was reached.", + "enum": [ + "Assets per revision", + "Asset size in GB" + ] + }, + "JobErrorResourceTypes": { + "type": "string", + "documentation": "The types of resource which the job error can apply to.", + "enum": [ + "REVISION", + "ASSET" + ] + }, + "LimitName": { + "type": "string", + "enum": [ + "Products per account", + "Data sets per account", + "Data sets per product", + "Revisions per data set", + "Assets per revision", + "Assets per import job from Amazon S3", + "Asset per export job from Amazon S3", + "Asset size in GB", + "Concurrent in progress jobs to import assets from Amazon S3", + "Concurrent in progress jobs to import assets from a signed URL", + "Concurrent in progress jobs to export assets to Amazon S3", + "Concurrent in progress jobs to export assets to a signed URL" + ] + }, + "ListDataSetRevisionsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of results returned by a single call.

    " + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + } + }, + "required": [ + "DataSetId" + ] + }, + "ListDataSetRevisionsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "NextToken", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + }, + "Revisions": { + "shape": "ListOfRevisionEntry", + "documentation": "

    The asset objects listed by the request.

    " + } + } + }, + "ListDataSetsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of results returned by a single call.

    " + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + }, + "Origin": { + "shape": "__string", + "location": "querystring", + "locationName": "origin", + "documentation": "

    A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).

    " + } + } + }, + "ListDataSetsResponse": { + "type": "structure", + "members": { + "DataSets": { + "shape": "ListOfDataSetEntry", + "documentation": "

    The data set objects listed by the request.

    " + }, + "NextToken": { + "shape": "NextToken", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + } + } + }, + "ListJobsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "querystring", + "locationName": "dataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of results returned by a single call.

    " + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + }, + "RevisionId": { + "shape": "__string", + "location": "querystring", + "locationName": "revisionId", + "documentation": "

    The unique identifier for a revision.

    " + } + } + }, + "ListJobsResponse": { + "type": "structure", + "members": { + "Jobs": { + "shape": "ListOfJobEntry", + "documentation": "

    The jobs listed by the request.

    " + }, + "NextToken": { + "shape": "NextToken", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + } + } + }, + "ListOfAssetDestinationEntry": { + "type": "list", + "documentation": "

    The destination where the assets will be exported.

    ", + "member": { + "shape": "AssetDestinationEntry" + } + }, + "ListOfAssetSourceEntry": { + "type": "list", + "documentation": "

    The list of sources for the assets.

    ", + "member": { + "shape": "AssetSourceEntry" + } + }, + "ListRevisionAssetsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of results returned by a single call.

    " + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId", + "documentation": "

    The unique identifier for a revision.

    " + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "ListRevisionAssetsResponse": { + "type": "structure", + "members": { + "Assets": { + "shape": "ListOfAssetEntry", + "documentation": "

    The asset objects listed by the request.

    " + }, + "NextToken": { + "shape": "NextToken", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + } + } + }, + "ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

    An Amazon Resource Name (ARN) that uniquely identifies an AWS resource.

    " + } + }, + "required": [ + "ResourceArn" + ] + }, + "ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "shape": "MapOf__string", + "locationName": "tags", + "documentation": "A label that consists of a customer-defined key and an optional value." + } + } + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 25 + }, + "Name": { + "type": "string", + "documentation": "The name of the model." + }, + "NextToken": { + "type": "string", + "documentation": "

    The token value retrieved from a previous call to access the next page of results.

    " + }, + "Origin": { + "type": "string", + "documentation": "

    A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers). When an owned data set is published in a product, AWS Data Exchange creates a copy of the data set. Subscribers can access that copy of the data set as an entitled data set.

    ", + "enum": [ + "OWNED", + "ENTITLED" + ] + }, + "OriginDetails": { + "type": "structure", + "members": { + "ProductId": { + "shape": "__string" + } + }, + "required": [ + "ProductId" + ] + }, + "RequestDetails": { + "type": "structure", + "members": { + "ExportAssetToSignedUrl": { + "shape": "ExportAssetToSignedUrlRequestDetails", + "documentation": "

    Details about the export to signed URL request.

    " + }, + "ExportAssetsToS3": { + "shape": "ExportAssetsToS3RequestDetails", + "documentation": "

    Details about the export to Amazon S3 request.

    " + }, + "ImportAssetFromSignedUrl": { + "shape": "ImportAssetFromSignedUrlRequestDetails", + "documentation": "

    Details about the import from signed URL request.

    " + }, + "ImportAssetsFromS3": { + "shape": "ImportAssetsFromS3RequestDetails", + "documentation": "

    Details about the import from Amazon S3 request.

    " + } + }, + "documentation": "

    The details for the request.

    " + }, + "ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "documentation": "

    The resource couldn't be found.

    " + }, + "ResourceId": { + "shape": "__string", + "documentation": "

    The unique identifier for the resource that couldn't be found.

    " + }, + "ResourceType": { + "shape": "ResourceType", + "documentation": "

    The type of resource that couldn't be found.

    " + } + }, + "documentation": "

    The resource couldn't be found.

    ", + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "ResourceType": { + "type": "string", + "enum": [ + "DATA_SET", + "REVISION", + "ASSET", + "JOB" + ] + }, + "ResponseDetails": { + "type": "structure", + "members": { + "ExportAssetToSignedUrl": { + "shape": "ExportAssetToSignedUrlResponseDetails", + "documentation": "

    Details for the export to signed URL response.

    " + }, + "ExportAssetsToS3": { + "shape": "ExportAssetsToS3ResponseDetails", + "documentation": "

    Details for the export to Amazon S3 response.

    " + }, + "ImportAssetFromSignedUrl": { + "shape": "ImportAssetFromSignedUrlResponseDetails", + "documentation": "

    Details for the import from signed URL response.

    " + }, + "ImportAssetsFromS3": { + "shape": "ImportAssetsFromS3ResponseDetails", + "documentation": "

    Details for the import from Amazon S3 response.

    " + } + }, + "documentation": "

    Details for the response.

    " + }, + "RevisionEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the revision.

    " + }, + "Comment": { + "shape": "__stringMin0Max16384", + "documentation": "

    An optional comment about the revision.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the revision was created, in ISO 8601 format.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this revision.

    " + }, + "Finalized": { + "shape": "__boolean", + "documentation": "

    To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

    Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the revision was last updated, in ISO 8601 format.

    " + } + }, + "documentation": "

    A revision is a container for one or more assets.

    ", + "required": [ + "CreatedAt", + "DataSetId", + "Id", + "Arn", + "UpdatedAt" + ] + }, + "S3SnapshotAsset": { + "type": "structure", + "members": { + "Size": { + "shape": "__doubleMin0", + "documentation": "

    The size of the S3 object that is the object.

    " + } + }, + "documentation": "

    The S3 object that is the asset.

    ", + "required": [ + "Size" + ] + }, + "ServerSideEncryptionTypes": { + "type": "string", + "documentation": "

    The types of encryption supported in export jobs to Amazon S3.

    ", + "enum": [ + "aws:kms", + "AES256" + ] + }, + "ServiceLimitExceededException": { + "type": "structure", + "members": { + "LimitName": { + "shape": "LimitName", + "documentation": "

    The name of the quota that was exceeded.

    " + }, + "LimitValue": { + "shape": "__double", + "documentation": "

    The maximum value for the service-specific limit.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The request has exceeded the quotas imposed by the service.

    " + } + }, + "documentation": "

    The request has exceeded the quotas imposed by the service.

    ", + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 402 + } + }, + "StartJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId", + "documentation": "

    The unique identifier for a job.

    " + } + }, + "required": [ + "JobId" + ] + }, + "StartJobResponse": { + "type": "structure", + "members": {} + }, + "State": { + "type": "string", + "enum": [ + "WAITING", + "IN_PROGRESS", + "ERROR", + "COMPLETED", + "CANCELLED", + "TIMED_OUT" + ] + }, + "TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

    An Amazon Resource Name (ARN) that uniquely identifies an AWS resource.

    " + }, + "Tags": { + "shape": "MapOf__string", + "locationName": "tags", + "documentation": "A label that consists of a customer-defined key and an optional value." + } + }, + "documentation": "

    The request body for TagResource.

    ", + "required": [ + "ResourceArn", + "Tags" + ] + }, + "ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "documentation": "

    The limit on the number of requests per second was exceeded.

    " + } + }, + "documentation": "

    The limit on the number of requests per second was exceeded.

    ", + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "Timestamp": { + "type": "timestamp", + "documentation": "

    Dates and times in AWS Data Exchange are recorded in ISO 8601 format.

    ", + "timestampFormat": "iso8601" + }, + "Type": { + "type": "string", + "enum": [ + "IMPORT_ASSETS_FROM_S3", + "IMPORT_ASSET_FROM_SIGNED_URL", + "EXPORT_ASSETS_TO_S3", + "EXPORT_ASSET_TO_SIGNED_URL" + ] + }, + "UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

    An Amazon Resource Name (ARN) that uniquely identifies an AWS resource.

    " + }, + "TagKeys": { + "shape": "ListOf__string", + "location": "querystring", + "locationName": "tagKeys", + "documentation": "The key tags." + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ] + }, + "UpdateAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId", + "documentation": "

    The unique identifier for an asset.

    " + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "Name": { + "shape": "AssetName", + "documentation": "

    The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

    " + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId", + "documentation": "

    The unique identifier for a revision.

    " + } + }, + "documentation": "

    The request body for UpdateAsset.

    ", + "required": [ + "RevisionId", + "AssetId", + "DataSetId", + "Name" + ] + }, + "UpdateAssetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the asset.

    " + }, + "AssetDetails": { + "shape": "AssetDetails", + "documentation": "

    Information about the asset, including its size.

    " + }, + "AssetType": { + "shape": "AssetType", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the asset was created, in ISO 8601 format.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this asset.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the asset.

    " + }, + "Name": { + "shape": "AssetName", + "documentation": "

    The name of the asset When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

    " + }, + "RevisionId": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision associated with this asset.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the asset was last updated, in ISO 8601 format.

    " + } + } + }, + "UpdateDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "Description": { + "shape": "Description", + "documentation": "

    The description for the data set.

    " + }, + "Name": { + "shape": "Name", + "documentation": "

    The name of the data set.

    " + } + }, + "documentation": "

    The request body for UpdateDataSet.

    ", + "required": [ + "DataSetId" + ] + }, + "UpdateDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the data set.

    " + }, + "AssetType": { + "shape": "AssetType", + "documentation": "

    The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the data set was created, in ISO 8601 format.

    " + }, + "Description": { + "shape": "Description", + "documentation": "

    The description for the data set.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set.

    " + }, + "Name": { + "shape": "Name", + "documentation": "

    The name of the data set.

    " + }, + "Origin": { + "shape": "Origin", + "documentation": "

    A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).

    " + }, + "OriginDetails": { + "shape": "OriginDetails", + "documentation": "

    If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the data set was last updated, in ISO 8601 format.

    " + } + } + }, + "UpdateRevisionRequest": { + "type": "structure", + "members": { + "Comment": { + "shape": "__stringMin0Max16384", + "documentation": "

    An optional comment about the revision.

    " + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId", + "documentation": "

    The unique identifier for a data set.

    " + }, + "Finalized": { + "shape": "__boolean", + "documentation": "

    Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

    " + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId", + "documentation": "

    The unique identifier for a revision.

    " + } + }, + "documentation": "

    The request body for UpdateRevision.

    ", + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "UpdateRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn", + "documentation": "

    The ARN for the revision.

    " + }, + "Comment": { + "shape": "__stringMin0Max16384", + "documentation": "

    An optional comment about the revision.

    " + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the revision was created, in ISO 8601 format.

    " + }, + "DataSetId": { + "shape": "Id", + "documentation": "

    The unique identifier for the data set associated with this revision.

    " + }, + "Finalized": { + "shape": "__boolean", + "documentation": "

    To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

    Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.

    " + }, + "Id": { + "shape": "Id", + "documentation": "

    The unique identifier for the revision.

    " + }, + "SourceId": { + "shape": "Id", + "documentation": "

    The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.

    " + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

    The date and time that the revision was last updated, in ISO 8601 format.

    " + } + } + }, + "ValidationException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "documentation": "

    The message that informs you about what was invalid about the request.

    " + } + }, + "documentation": "

    The request was invalid.

    ", + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__doubleMin0": { + "type": "double" + }, + "ListOfAssetEntry": { + "type": "list", + "member": { + "shape": "AssetEntry" + } + }, + "ListOfDataSetEntry": { + "type": "list", + "member": { + "shape": "DataSetEntry" + } + }, + "ListOfJobEntry": { + "type": "list", + "member": { + "shape": "JobEntry" + } + }, + "ListOfJobError": { + "type": "list", + "member": { + "shape": "JobError" + } + }, + "ListOfRevisionEntry": { + "type": "list", + "member": { + "shape": "RevisionEntry" + } + }, + "ListOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "MapOf__string": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + } + }, + "__string": { + "type": "string" + }, + "__stringMin0Max16384": { + "type": "string", + "min": 0, + "max": 16384 + }, + "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093": { + "type": "string", + "min": 24, + "max": 24, + "pattern": "/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/" + } + }, + "documentation": "

    AWS Data Exchange is a service that makes it easy for AWS customers to exchange data in the cloud. You can use the AWS Data Exchange APIs to create, update, manage, and access file-based data set in the AWS Cloud.

    As a subscriber, you can view and access the data sets that you have an entitlement to through a subscription. You can use the APIS to download or copy your entitled data sets to Amazon S3 for use across a variety of AWS analytics and machine learning services.

    As a provider, you can create and manage your data sets that you would like to publish to a product. Being able to package and provide your data sets into products requires a few steps to determine eligibility. For more information, visit the AWS Data Exchange User Guide.

    A data set is a collection of data that can be changed or updated over time. Data sets can be updated using revisions, which represent a new version or incremental change to a data set. A revision contains one or more assets. An asset in AWS Data Exchange is a piece of data that can be stored as an Amazon S3 object. The asset can be a structured data file, an image file, or some other data file. Jobs are asynchronous import or export operations used to create or copy assets.

    " +} \ No newline at end of file diff --git a/services/datapipeline/build.properties b/services/datapipeline/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/datapipeline/build.properties +++ b/services/datapipeline/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index 4fc403581b11..777ebe898a35 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + detective + AWS Java SDK :: Services :: Detective + The AWS Java SDK for Detective module holds the client classes that are used for + communicating with Detective. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.detective + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/detective/src/main/resources/codegen-resources/paginators-1.json b/services/detective/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..8ead136f5ee8 --- /dev/null +++ b/services/detective/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,19 @@ +{ + "pagination": { + "ListGraphs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListInvitations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListMembers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/detective/src/main/resources/codegen-resources/service-2.json b/services/detective/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..67d527b795c3 --- /dev/null +++ b/services/detective/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,655 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-10-26", + "endpointPrefix":"api.detective", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Detective", + "serviceId":"Detective", + "signatureVersion":"v4", + "signingName":"detective", + "uid":"detective-2018-10-26" + }, + "operations":{ + "AcceptInvitation":{ + "name":"AcceptInvitation", + "http":{ + "method":"PUT", + "requestUri":"/invitation" + }, + "input":{"shape":"AcceptInvitationRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Accepts an invitation for the member account to contribute data to a behavior graph. This operation can only be called by an invited member account.

    The request provides the ARN of behavior graph.

    The member account status in the graph must be INVITED.

    " + }, + "CreateGraph":{ + "name":"CreateGraph", + "http":{ + "method":"POST", + "requestUri":"/graph" + }, + "output":{"shape":"CreateGraphResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new behavior graph for the calling account, and sets that account as the master account. This operation is called by the account that is enabling Detective.

    Before you try to enable Detective, make sure that your account has been enrolled in Amazon GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable Detective. If you do meet the GuardDuty prerequisite, then when you make the request to enable Detective, it checks whether your data volume is within the Detective quota. If it exceeds the quota, then you cannot enable Detective.

    The operation also enables Detective for the calling account in the currently selected Region. It returns the ARN of the new behavior graph.

    CreateGraph triggers a process to create the corresponding data tables for the new behavior graph.

    An account can only be the master account for one behavior graph within a Region. If the same account calls CreateGraph with the same master account, it always returns the same behavior graph ARN. It does not create a new behavior graph.

    " + }, + "CreateMembers":{ + "name":"CreateMembers", + "http":{ + "method":"POST", + "requestUri":"/graph/members" + }, + "input":{"shape":"CreateMembersRequest"}, + "output":{"shape":"CreateMembersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Sends a request to invite the specified AWS accounts to be member accounts in the behavior graph. This operation can only be called by the master account for a behavior graph.

    CreateMembers verifies the accounts and then sends invitations to the verified accounts.

    The request provides the behavior graph ARN and the list of accounts to invite.

    The response separates the requested accounts into two lists:

    • The accounts that CreateMembers was able to start the verification for. This list includes member accounts that are being verified, that have passed verification and are being sent an invitation, and that have failed verification.

    • The accounts that CreateMembers was unable to process. This list includes accounts that were already invited to be member accounts in the behavior graph.

    " + }, + "DeleteGraph":{ + "name":"DeleteGraph", + "http":{ + "method":"POST", + "requestUri":"/graph/removal" + }, + "input":{"shape":"DeleteGraphRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Disables the specified behavior graph and queues it to be deleted. This operation removes the graph from each member account's list of behavior graphs.

    DeleteGraph can only be called by the master account for a behavior graph.

    " + }, + "DeleteMembers":{ + "name":"DeleteMembers", + "http":{ + "method":"POST", + "requestUri":"/graph/members/removal" + }, + "input":{"shape":"DeleteMembersRequest"}, + "output":{"shape":"DeleteMembersResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes one or more member accounts from the master account behavior graph. This operation can only be called by a Detective master account. That account cannot use DeleteMembers to delete their own account from the behavior graph. To disable a behavior graph, the master account uses the DeleteGraph API method.

    " + }, + "DisassociateMembership":{ + "name":"DisassociateMembership", + "http":{ + "method":"POST", + "requestUri":"/membership/removal" + }, + "input":{"shape":"DisassociateMembershipRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Removes the member account from the specified behavior graph. This operation can only be called by a member account that has the ENABLED status.

    " + }, + "GetMembers":{ + "name":"GetMembers", + "http":{ + "method":"POST", + "requestUri":"/graph/members/get" + }, + "input":{"shape":"GetMembersRequest"}, + "output":{"shape":"GetMembersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the membership details for specified member accounts for a behavior graph.

    " + }, + "ListGraphs":{ + "name":"ListGraphs", + "http":{ + "method":"POST", + "requestUri":"/graphs/list" + }, + "input":{"shape":"ListGraphsRequest"}, + "output":{"shape":"ListGraphsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the list of behavior graphs that the calling account is a master of. This operation can only be called by a master account.

    Because an account can currently only be the master of one behavior graph within a Region, the results always contain a single graph.

    " + }, + "ListInvitations":{ + "name":"ListInvitations", + "http":{ + "method":"POST", + "requestUri":"/invitations/list" + }, + "input":{"shape":"ListInvitationsRequest"}, + "output":{"shape":"ListInvitationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Retrieves the list of open and accepted behavior graph invitations for the member account. This operation can only be called by a member account.

    Open invitations are invitations that the member account has not responded to.

    The results do not include behavior graphs for which the member account declined the invitation. The results also do not include behavior graphs that the member account resigned from or was removed from.

    " + }, + "ListMembers":{ + "name":"ListMembers", + "http":{ + "method":"POST", + "requestUri":"/graph/members/list" + }, + "input":{"shape":"ListMembersRequest"}, + "output":{"shape":"ListMembersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Retrieves the list of member accounts for a behavior graph. Does not return member accounts that were removed from the behavior graph.

    " + }, + "RejectInvitation":{ + "name":"RejectInvitation", + "http":{ + "method":"POST", + "requestUri":"/invitation/removal" + }, + "input":{"shape":"RejectInvitationRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Rejects an invitation to contribute the account data to a behavior graph. This operation must be called by a member account that has the INVITED status.

    " + }, + "StartMonitoringMember":{ + "name":"StartMonitoringMember", + "http":{ + "method":"POST", + "requestUri":"/graph/member/monitoringstate" + }, + "input":{"shape":"StartMonitoringMemberRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Sends a request to enable data ingest for a member account that has a status of ACCEPTED_BUT_DISABLED.

    For valid member accounts, the status is updated as follows.

    • If Detective enabled the member account, then the new status is ENABLED.

    • If Detective cannot enable the member account, the status remains ACCEPTED_BUT_DISABLED.

    " + } + }, + "shapes":{ + "AcceptInvitationRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph that the member account is accepting the invitation for.

    The member account status in the behavior graph must be INVITED.

    " + } + } + }, + "Account":{ + "type":"structure", + "required":[ + "AccountId", + "EmailAddress" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account identifier of the AWS account.

    " + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The AWS account root user email address for the AWS account.

    " + } + }, + "documentation":"

    An AWS account that is the master of or a member of a behavior graph.

    " + }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^[0-9]+$" + }, + "AccountIdList":{ + "type":"list", + "member":{"shape":"AccountId"}, + "max":50, + "min":1 + }, + "AccountList":{ + "type":"list", + "member":{"shape":"Account"}, + "max":50, + "min":1 + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request attempted an invalid action.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateGraphResponse":{ + "type":"structure", + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the new behavior graph.

    " + } + } + }, + "CreateMembersRequest":{ + "type":"structure", + "required":[ + "GraphArn", + "Accounts" + ], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph to invite the member accounts to contribute their data to.

    " + }, + "Message":{ + "shape":"EmailMessage", + "documentation":"

    Customized message text to include in the invitation email message to the invited member accounts.

    " + }, + "Accounts":{ + "shape":"AccountList", + "documentation":"

    The list of AWS accounts to invite to become member accounts in the behavior graph. For each invited account, the account list contains the account identifier and the AWS account root user email address.

    " + } + } + }, + "CreateMembersResponse":{ + "type":"structure", + "members":{ + "Members":{ + "shape":"MemberDetailList", + "documentation":"

    The set of member account invitation requests that Detective was able to process. This includes accounts that are being verified, that failed verification, and that passed verification and are being sent an invitation.

    " + }, + "UnprocessedAccounts":{ + "shape":"UnprocessedAccountList", + "documentation":"

    The list of accounts for which Detective was unable to process the invitation request. For each account, the list provides the reason why the request could not be processed. The list includes accounts that are already member accounts in the behavior graph.

    " + } + } + }, + "DeleteGraphRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph to disable.

    " + } + } + }, + "DeleteMembersRequest":{ + "type":"structure", + "required":[ + "GraphArn", + "AccountIds" + ], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph to delete members from.

    " + }, + "AccountIds":{ + "shape":"AccountIdList", + "documentation":"

    The list of AWS account identifiers for the member accounts to delete from the behavior graph.

    " + } + } + }, + "DeleteMembersResponse":{ + "type":"structure", + "members":{ + "AccountIds":{ + "shape":"AccountIdList", + "documentation":"

    The list of AWS account identifiers for the member accounts that Detective successfully deleted from the behavior graph.

    " + }, + "UnprocessedAccounts":{ + "shape":"UnprocessedAccountList", + "documentation":"

    The list of member accounts that Detective was not able to delete from the behavior graph. For each member account, provides the reason that the deletion could not be processed.

    " + } + } + }, + "DisassociateMembershipRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph to remove the member account from.

    The member account's member status in the behavior graph must be ENABLED.

    " + } + } + }, + "EmailAddress":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^.+@.+$" + }, + "EmailMessage":{ + "type":"string", + "max":1000, + "min":1 + }, + "ErrorMessage":{"type":"string"}, + "GetMembersRequest":{ + "type":"structure", + "required":[ + "GraphArn", + "AccountIds" + ], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph for which to request the member details.

    " + }, + "AccountIds":{ + "shape":"AccountIdList", + "documentation":"

    The list of AWS account identifiers for the member account for which to return member details.

    You cannot use GetMembers to retrieve information about member accounts that were removed from the behavior graph.

    " + } + } + }, + "GetMembersResponse":{ + "type":"structure", + "members":{ + "MemberDetails":{ + "shape":"MemberDetailList", + "documentation":"

    The member account details that Detective is returning in response to the request.

    " + }, + "UnprocessedAccounts":{ + "shape":"UnprocessedAccountList", + "documentation":"

    The requested member accounts for which Detective was unable to return member details.

    For each account, provides the reason why the request could not be processed.

    " + } + } + }, + "Graph":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph.

    " + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the behavior graph was created. The value is in milliseconds since the epoch.

    " + } + }, + "documentation":"

    A behavior graph in Detective.

    " + }, + "GraphArn":{ + "type":"string", + "pattern":"^arn:aws[-\\w]{0,10}?:detective:[-\\w]{2,20}?:\\d{12}?:graph:[abcdef\\d]{32}?$" + }, + "GraphList":{ + "type":"list", + "member":{"shape":"Graph"} + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request was valid but failed because of a problem with the service.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "ListGraphsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    For requests to get the next page of results, the pagination token that was returned with the previous set of results. The initial request does not include a pagination token.

    " + }, + "MaxResults":{ + "shape":"MemberResultsLimit", + "documentation":"

    The maximum number of graphs to return at a time. The total must be less than the overall limit on the number of results to return, which is currently 200.

    " + } + } + }, + "ListGraphsResponse":{ + "type":"structure", + "members":{ + "GraphList":{ + "shape":"GraphList", + "documentation":"

    A list of behavior graphs that the account is a master for.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are more behavior graphs remaining in the results, then this is the pagination token to use to request the next page of behavior graphs.

    " + } + } + }, + "ListInvitationsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    For requests to retrieve the next page of results, the pagination token that was returned with the previous page of results. The initial request does not include a pagination token.

    " + }, + "MaxResults":{ + "shape":"MemberResultsLimit", + "documentation":"

    The maximum number of behavior graph invitations to return in the response. The total must be less than the overall limit on the number of results to return, which is currently 200.

    " + } + } + }, + "ListInvitationsResponse":{ + "type":"structure", + "members":{ + "Invitations":{ + "shape":"MemberDetailList", + "documentation":"

    The list of behavior graphs for which the member account has open or accepted invitations.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are more behavior graphs remaining in the results, then this is the pagination token to use to request the next page of behavior graphs.

    " + } + } + }, + "ListMembersRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph for which to retrieve the list of member accounts.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    For requests to retrieve the next page of member account results, the pagination token that was returned with the previous page of results. The initial request does not include a pagination token.

    " + }, + "MaxResults":{ + "shape":"MemberResultsLimit", + "documentation":"

    The maximum number of member accounts to include in the response. The total must be less than the overall limit on the number of results to return, which is currently 200.

    " + } + } + }, + "ListMembersResponse":{ + "type":"structure", + "members":{ + "MemberDetails":{ + "shape":"MemberDetailList", + "documentation":"

    The list of member accounts in the behavior graph.

    The results include member accounts that did not pass verification and member accounts that have not yet accepted the invitation to the behavior graph. The results do not include member accounts that were removed from the behavior graph.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are more member accounts remaining in the results, then this is the pagination token to use to request the next page of member accounts.

    " + } + } + }, + "MemberDetail":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account identifier for the member account.

    " + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The AWS account root user email address for the member account.

    " + }, + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph that the member account was invited to.

    " + }, + "MasterId":{ + "shape":"AccountId", + "documentation":"

    The AWS account identifier of the master account for the behavior graph.

    " + }, + "Status":{ + "shape":"MemberStatus", + "documentation":"

    The current membership status of the member account. The status can have one of the following values:

    • INVITED - Indicates that the member was sent an invitation but has not yet responded.

    • VERIFICATION_IN_PROGRESS - Indicates that Detective is verifying that the account identifier and email address provided for the member account match. If they do match, then Detective sends the invitation. If the email address and account identifier don't match, then the member cannot be added to the behavior graph.

    • VERIFICATION_FAILED - Indicates that the account and email address provided for the member account do not match, and Detective did not send an invitation to the account.

    • ENABLED - Indicates that the member account accepted the invitation to contribute to the behavior graph.

    • ACCEPTED_BUT_DISABLED - Indicates that the member account accepted the invitation but is prevented from contributing data to the behavior graph. DisabledReason provides the reason why the member account is not enabled.

    Member accounts that declined an invitation or that were removed from the behavior graph are not included.

    " + }, + "DisabledReason":{ + "shape":"MemberDisabledReason", + "documentation":"

    For member accounts with a status of ACCEPTED_BUT_DISABLED, the reason that the member account is not enabled.

    The reason can have one of the following values:

    • VOLUME_TOO_HIGH - Indicates that adding the member account would cause the data volume for the behavior graph to be too high.

    • VOLUME_UNKNOWN - Indicates that Detective is unable to verify the data volume for the member account. This is usually because the member account is not enrolled in Amazon GuardDuty.

    " + }, + "InvitedTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time that Detective sent the invitation to the member account. The value is in milliseconds since the epoch.

    " + }, + "UpdatedTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the member account was last updated. The value is in milliseconds since the epoch.

    " + }, + "PercentOfGraphUtilization":{ + "shape":"Percentage", + "documentation":"

    The member account data volume as a percentage of the maximum allowed data volume. 0 indicates 0 percent, and 100 indicates 100 percent.

    Note that this is not the percentage of the behavior graph data volume.

    For example, the data volume for the behavior graph is 80 GB per day. The maximum data volume is 160 GB per day. If the data volume for the member account is 40 GB per day, then PercentOfGraphUtilization is 25. It represents 25% of the maximum allowed data volume.

    " + }, + "PercentOfGraphUtilizationUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the graph utilization percentage was last updated.

    " + } + }, + "documentation":"

    Details about a member account that was invited to contribute to a behavior graph.

    " + }, + "MemberDetailList":{ + "type":"list", + "member":{"shape":"MemberDetail"} + }, + "MemberDisabledReason":{ + "type":"string", + "enum":[ + "VOLUME_TOO_HIGH", + "VOLUME_UNKNOWN" + ] + }, + "MemberResultsLimit":{ + "type":"integer", + "box":true, + "max":200, + "min":1 + }, + "MemberStatus":{ + "type":"string", + "enum":[ + "INVITED", + "VERIFICATION_IN_PROGRESS", + "VERIFICATION_FAILED", + "ENABLED", + "ACCEPTED_BUT_DISABLED" + ] + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "Percentage":{"type":"double"}, + "RejectInvitationRequest":{ + "type":"structure", + "required":["GraphArn"], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph to reject the invitation to.

    The member account's current member status in the behavior graph must be INVITED.

    " + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request refers to a nonexistent resource.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    This request cannot be completed for one of the following reasons.

    • The request would cause the number of member accounts in the behavior graph to exceed the maximum allowed. A behavior graph cannot have more than 1000 member accounts.

    • The request would cause the data rate for the behavior graph to exceed the maximum allowed.

    • Detective is unable to verify the data rate for the member account. This is usually because the member account is not enrolled in Amazon GuardDuty.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "StartMonitoringMemberRequest":{ + "type":"structure", + "required":[ + "GraphArn", + "AccountId" + ], + "members":{ + "GraphArn":{ + "shape":"GraphArn", + "documentation":"

    The ARN of the behavior graph.

    " + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the member account to try to enable.

    The account must be an invited member account with a status of ACCEPTED_BUT_DISABLED.

    " + } + } + }, + "Timestamp":{"type":"timestamp"}, + "UnprocessedAccount":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account identifier of the member account that was not processed.

    " + }, + "Reason":{ + "shape":"UnprocessedReason", + "documentation":"

    The reason that the member account request could not be processed.

    " + } + }, + "documentation":"

    A member account that was included in a request but for which the request could not be processed.

    " + }, + "UnprocessedAccountList":{ + "type":"list", + "member":{"shape":"UnprocessedAccount"} + }, + "UnprocessedReason":{"type":"string"}, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request parameters are invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

    Detective uses machine learning and purpose-built visualizations to help you analyze and investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

    The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by a master account.

    Every behavior graph is specific to a Region. You can only use the API to manage graphs that belong to the Region that is associated with the currently selected endpoint.

    A Detective master account can use the Detective API to do the following:

    • Enable and disable Detective. Enabling Detective creates a new behavior graph.

    • View the list of member accounts in a behavior graph.

    • Add member accounts to a behavior graph.

    • Remove member accounts from a behavior graph.

    A member account can use the Detective API to do the following:

    • View the list of behavior graphs that they are invited to.

    • Accept an invitation to contribute to a behavior graph.

    • Decline an invitation to contribute to a behavior graph.

    • Remove their account from a behavior graph.

    All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

    " +} diff --git a/services/devicefarm/build.properties b/services/devicefarm/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/devicefarm/build.properties +++ b/services/devicefarm/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 506c7d2f02bf..8423690cc9de 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + devopsguru + AWS Java SDK :: Services :: Dev Ops Guru + The AWS Java SDK for Dev Ops Guru module holds the client classes that are used for + communicating with Dev Ops Guru. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.devopsguru + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json b/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..6e9b69977bec --- /dev/null +++ b/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,64 @@ +{ + "pagination": { + "DescribeResourceCollectionHealth": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": [ + "CloudFormation" + ] + }, + "GetResourceCollection": { + "input_token": "NextToken", + "non_aggregate_keys": [ + "ResourceCollection" + ], + "output_token": "NextToken", + "result_key": [ + "ResourceCollection.CloudFormation.StackNames" + ] + }, + "ListAnomaliesForInsight": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": [ + "ReactiveAnomalies", + "ProactiveAnomalies" + ] + }, + "ListEvents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Events" + }, + "ListInsights": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": [ + "ProactiveInsights", + "ReactiveInsights" + ] + }, + "ListNotificationChannels": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Channels" + }, + "ListRecommendations": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Recommendations" + }, + "SearchInsights": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": [ + "ProactiveInsights", + "ReactiveInsights" + ] + } + } +} \ No newline at end of file diff --git a/services/devopsguru/src/main/resources/codegen-resources/service-2.json b/services/devopsguru/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..d4125f049768 --- /dev/null +++ b/services/devopsguru/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2125 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-12-01", + "endpointPrefix":"devops-guru", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon DevOps Guru", + "serviceId":"DevOps Guru", + "signatureVersion":"v4", + "signingName":"devops-guru", + "uid":"devops-guru-2020-12-01" + }, + "operations":{ + "AddNotificationChannel":{ + "name":"AddNotificationChannel", + "http":{ + "method":"PUT", + "requestUri":"/channels", + "responseCode":200 + }, + "input":{"shape":"AddNotificationChannelRequest"}, + "output":{"shape":"AddNotificationChannelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Adds a notification channel to DevOps Guru. A notification channel is used to notify you about important DevOps Guru events, such as when an insight is generated.

    If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. For more information, see Permissions for cross account Amazon SNS topics.

    If you use an Amazon SNS topic that is encrypted by an AWS Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for AWS KMS–encrypted Amazon SNS topics.

    " + }, + "DescribeAccountHealth":{ + "name":"DescribeAccountHealth", + "http":{ + "method":"GET", + "requestUri":"/accounts/health", + "responseCode":200 + }, + "input":{"shape":"DescribeAccountHealthRequest"}, + "output":{"shape":"DescribeAccountHealthResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the number of open reactive insights, the number of open proactive insights, and the number of metrics analyzed in your AWS account. Use these numbers to gauge the health of operations in your AWS account.

    " + }, + "DescribeAccountOverview":{ + "name":"DescribeAccountOverview", + "http":{ + "method":"POST", + "requestUri":"/accounts/overview", + "responseCode":200 + }, + "input":{"shape":"DescribeAccountOverviewRequest"}, + "output":{"shape":"DescribeAccountOverviewResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    For the time range passed in, returns the number of open reactive insight that were created, the number of open proactive insights that were created, and the Mean Time to Recover (MTTR) for all closed reactive insights.

    " + }, + "DescribeAnomaly":{ + "name":"DescribeAnomaly", + "http":{ + "method":"GET", + "requestUri":"/anomalies/{Id}", + "responseCode":200 + }, + "input":{"shape":"DescribeAnomalyRequest"}, + "output":{"shape":"DescribeAnomalyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns details about an anomaly that you specify using its ID.

    " + }, + "DescribeInsight":{ + "name":"DescribeInsight", + "http":{ + "method":"GET", + "requestUri":"/insights/{Id}", + "responseCode":200 + }, + "input":{"shape":"DescribeInsightRequest"}, + "output":{"shape":"DescribeInsightResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns details about an insight that you specify using its ID.

    " + }, + "DescribeResourceCollectionHealth":{ + "name":"DescribeResourceCollectionHealth", + "http":{ + "method":"GET", + "requestUri":"/accounts/health/resource-collection/{ResourceCollectionType}", + "responseCode":200 + }, + "input":{"shape":"DescribeResourceCollectionHealthRequest"}, + "output":{"shape":"DescribeResourceCollectionHealthResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the number of open proactive insights, open reactive insights, and the Mean Time to Recover (MTTR) for all closed insights in resource collections in your account. You specify the type of AWS resources collection. The one type of AWS resource collection supported is AWS CloudFormation stacks. DevOps Guru can be configured to analyze only the AWS resources that are defined in the stacks.

    " + }, + "DescribeServiceIntegration":{ + "name":"DescribeServiceIntegration", + "http":{ + "method":"GET", + "requestUri":"/service-integrations", + "responseCode":200 + }, + "input":{"shape":"DescribeServiceIntegrationRequest"}, + "output":{"shape":"DescribeServiceIntegrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the integration status of services that are integrated with DevOps Guru. The one service that can be integrated with DevOps Guru is AWS Systems Manager, which can be used to create an OpsItem for each generated insight.

    " + }, + "GetResourceCollection":{ + "name":"GetResourceCollection", + "http":{ + "method":"GET", + "requestUri":"/resource-collections/{ResourceCollectionType}", + "responseCode":200 + }, + "input":{"shape":"GetResourceCollectionRequest"}, + "output":{"shape":"GetResourceCollectionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns lists AWS resources that are of the specified resource collection type. The one type of AWS resource collection supported is AWS CloudFormation stacks. DevOps Guru can be configured to analyze only the AWS resources that are defined in the stacks.

    " + }, + "ListAnomaliesForInsight":{ + "name":"ListAnomaliesForInsight", + "http":{ + "method":"POST", + "requestUri":"/anomalies/insight/{InsightId}", + "responseCode":200 + }, + "input":{"shape":"ListAnomaliesForInsightRequest"}, + "output":{"shape":"ListAnomaliesForInsightResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of the anomalies that belong to an insight that you specify using its ID.

    " + }, + "ListEvents":{ + "name":"ListEvents", + "http":{ + "method":"POST", + "requestUri":"/events", + "responseCode":200 + }, + "input":{"shape":"ListEventsRequest"}, + "output":{"shape":"ListEventsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of the events emitted by the resources that are evaluated by DevOps Guru. You can use filters to specify which events are returned.

    " + }, + "ListInsights":{ + "name":"ListInsights", + "http":{ + "method":"POST", + "requestUri":"/insights", + "responseCode":200 + }, + "input":{"shape":"ListInsightsRequest"}, + "output":{"shape":"ListInsightsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of insights in your AWS account. You can specify which insights are returned by their start time and status (ONGOING, CLOSED, or ANY).

    " + }, + "ListNotificationChannels":{ + "name":"ListNotificationChannels", + "http":{ + "method":"POST", + "requestUri":"/channels", + "responseCode":200 + }, + "input":{"shape":"ListNotificationChannelsRequest"}, + "output":{"shape":"ListNotificationChannelsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of notification channels configured for DevOps Guru. Each notification channel is used to notify you when DevOps Guru generates an insight that contains information about how to improve your operations. The one supported notification channel is Amazon Simple Notification Service (Amazon SNS).

    " + }, + "ListRecommendations":{ + "name":"ListRecommendations", + "http":{ + "method":"POST", + "requestUri":"/recommendations", + "responseCode":200 + }, + "input":{"shape":"ListRecommendationsRequest"}, + "output":{"shape":"ListRecommendationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of a specified insight's recommendations. Each recommendation includes a list of related metrics and a list of related events.

    " + }, + "PutFeedback":{ + "name":"PutFeedback", + "http":{ + "method":"PUT", + "requestUri":"/feedback", + "responseCode":200 + }, + "input":{"shape":"PutFeedbackRequest"}, + "output":{"shape":"PutFeedbackResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Collects customer feedback about the specified insight.

    " + }, + "RemoveNotificationChannel":{ + "name":"RemoveNotificationChannel", + "http":{ + "method":"DELETE", + "requestUri":"/channels/{Id}", + "responseCode":200 + }, + "input":{"shape":"RemoveNotificationChannelRequest"}, + "output":{"shape":"RemoveNotificationChannelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Removes a notification channel from DevOps Guru. A notification channel is used to notify you when DevOps Guru generates an insight that contains information about how to improve your operations.

    " + }, + "SearchInsights":{ + "name":"SearchInsights", + "http":{ + "method":"POST", + "requestUri":"/insights/search", + "responseCode":200 + }, + "input":{"shape":"SearchInsightsRequest"}, + "output":{"shape":"SearchInsightsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of insights in your AWS account. You can specify which insights are returned by their start time, one or more statuses (ONGOING, CLOSED, and CLOSED), one or more severities (LOW, MEDIUM, and HIGH), and type (REACTIVE or PROACTIVE).

    Use the Filters parameter to specify status and severity search parameters. Use the Type parameter to specify REACTIVE or PROACTIVE in your search.

    " + }, + "UpdateResourceCollection":{ + "name":"UpdateResourceCollection", + "http":{ + "method":"PUT", + "requestUri":"/resource-collections", + "responseCode":200 + }, + "input":{"shape":"UpdateResourceCollectionRequest"}, + "output":{"shape":"UpdateResourceCollectionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Updates the collection of resources that DevOps Guru analyzes. The one type of AWS resource collection supported is AWS CloudFormation stacks. DevOps Guru can be configured to analyze only the AWS resources that are defined in the stacks. This method also creates the IAM role required for you to use DevOps Guru.

    " + }, + "UpdateServiceIntegration":{ + "name":"UpdateServiceIntegration", + "http":{ + "method":"PUT", + "requestUri":"/service-integrations", + "responseCode":200 + }, + "input":{"shape":"UpdateServiceIntegrationRequest"}, + "output":{"shape":"UpdateServiceIntegrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Enables or disables integration with a service that can be integrated with DevOps Guru. The one service that can be integrated with DevOps Guru is AWS Systems Manager, which can be used to create an OpsItem for each generated insight.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessageString"} + }, + "documentation":"

    You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AddNotificationChannelRequest":{ + "type":"structure", + "required":["Config"], + "members":{ + "Config":{ + "shape":"NotificationChannelConfig", + "documentation":"

    A NotificationChannelConfig object that specifies what type of notification channel to add. The one supported notification channel is Amazon Simple Notification Service (Amazon SNS).

    " + } + } + }, + "AddNotificationChannelResponse":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"NotificationChannelId", + "documentation":"

    The ID of the added notification channel.

    " + } + } + }, + "AnomalyId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[\\w-]*$" + }, + "AnomalyLimit":{ + "type":"double", + "box":true + }, + "AnomalySeverity":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH" + ] + }, + "AnomalySourceDetails":{ + "type":"structure", + "members":{ + "CloudWatchMetrics":{ + "shape":"CloudWatchMetricsDetails", + "documentation":"

    An array of CloudWatchMetricsDetail object that contains information about the analyzed metrics that displayed anomalous behavior.

    " + } + }, + "documentation":"

    Details about the source of the anomalous operational data that triggered the anomaly. The one supported source is Amazon CloudWatch metrics.

    " + }, + "AnomalyStatus":{ + "type":"string", + "enum":[ + "ONGOING", + "CLOSED" + ] + }, + "AnomalyTimeRange":{ + "type":"structure", + "required":["StartTime"], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the anomalous behavior started.

    " + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the anomalous behavior ended.

    " + } + }, + "documentation":"

    A time range that specifies when the observed unusual behavior in an anomaly started and ended.

    " + }, + "Channels":{ + "type":"list", + "member":{"shape":"NotificationChannel"} + }, + "CloudFormationCollection":{ + "type":"structure", + "members":{ + "StackNames":{ + "shape":"StackNames", + "documentation":"

    An array of CloudFormation stack names.

    " + } + }, + "documentation":"

    Information about AWS CloudFormation stacks. You can use stacks to specify which AWS resources in your account to analyze. For more information, see Stacks in the AWS CloudFormation User Guide.

    " + }, + "CloudFormationCollectionFilter":{ + "type":"structure", + "members":{ + "StackNames":{ + "shape":"StackNames", + "documentation":"

    An array of CloudFormation stack names.

    " + } + }, + "documentation":"

    Information about AWS CloudFormation stacks. You can use stacks to specify which AWS resources in your account to analyze. For more information, see Stacks in the AWS CloudFormation User Guide.

    " + }, + "CloudFormationHealth":{ + "type":"structure", + "members":{ + "StackName":{ + "shape":"StackName", + "documentation":"

    The name of the CloudFormation stack.

    " + }, + "Insight":{ + "shape":"InsightHealth", + "documentation":"

    Information about the health of the AWS resources in your account that are specified by an AWS CloudFormation stack, including the number of open proactive, open reactive insights, and the Mean Time to Recover (MTTR) of closed insights.

    " + } + }, + "documentation":"

    Information about the health of AWS resources in your account that are specified by an AWS CloudFormation stack.

    " + }, + "CloudFormationHealths":{ + "type":"list", + "member":{"shape":"CloudFormationHealth"} + }, + "CloudWatchMetricsDetail":{ + "type":"structure", + "members":{ + "MetricName":{ + "shape":"CloudWatchMetricsMetricName", + "documentation":"

    The name of the CloudWatch metric.

    " + }, + "Namespace":{ + "shape":"CloudWatchMetricsNamespace", + "documentation":"

    The namespace of the CloudWatch metric. A namespace is a container for CloudWatch metrics.

    " + }, + "Dimensions":{ + "shape":"CloudWatchMetricsDimensions", + "documentation":"

    An array of CloudWatch dimensions associated with

    " + }, + "Stat":{ + "shape":"CloudWatchMetricsStat", + "documentation":"

    The type of statistic associated with the CloudWatch metric. For more information, see Statistics in the Amazon CloudWatch User Guide.

    " + }, + "Unit":{ + "shape":"CloudWatchMetricsUnit", + "documentation":"

    The unit of measure used for the CloudWatch metric. For example, Bytes, Seconds, Count, and Percent.

    " + }, + "Period":{ + "shape":"CloudWatchMetricsPeriod", + "documentation":"

    The length of time associated with the CloudWatch metric in number of seconds.

    " + } + }, + "documentation":"

    Information about an Amazon CloudWatch metric.

    " + }, + "CloudWatchMetricsDetails":{ + "type":"list", + "member":{"shape":"CloudWatchMetricsDetail"} + }, + "CloudWatchMetricsDimension":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"CloudWatchMetricsDimensionName", + "documentation":"

    The name of the CloudWatch dimension.

    " + }, + "Value":{ + "shape":"CloudWatchMetricsDimensionValue", + "documentation":"

    The value of the CloudWatch dimension.

    " + } + }, + "documentation":"

    The dimension of a Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in your account for operational problems and anomalous behavior. A dimension is a name/value pair that is part of the identity of a metric. A metric can have up to 10 dimensions. For more information, see Dimensions in the Amazon CloudWatch User Guide.

    " + }, + "CloudWatchMetricsDimensionName":{"type":"string"}, + "CloudWatchMetricsDimensionValue":{"type":"string"}, + "CloudWatchMetricsDimensions":{ + "type":"list", + "member":{"shape":"CloudWatchMetricsDimension"} + }, + "CloudWatchMetricsMetricName":{"type":"string"}, + "CloudWatchMetricsNamespace":{"type":"string"}, + "CloudWatchMetricsPeriod":{"type":"integer"}, + "CloudWatchMetricsStat":{ + "type":"string", + "enum":[ + "Sum", + "Average", + "SampleCount", + "Minimum", + "Maximum", + "p99", + "p90", + "p50" + ] + }, + "CloudWatchMetricsUnit":{"type":"string"}, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"ErrorMessageString"}, + "ResourceId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the AWS resource in which a conflict occurred.

    " + }, + "ResourceType":{ + "shape":"ResourceIdType", + "documentation":"

    The type of the AWS resource in which a conflict occurred.

    " + } + }, + "documentation":"

    An exception that is thrown when a conflict occurs.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "DescribeAccountHealthRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeAccountHealthResponse":{ + "type":"structure", + "required":[ + "OpenReactiveInsights", + "OpenProactiveInsights", + "MetricsAnalyzed", + "ResourceHours" + ], + "members":{ + "OpenReactiveInsights":{ + "shape":"NumOpenReactiveInsights", + "documentation":"

    An integer that specifies the number of open reactive insights in your AWS account.

    " + }, + "OpenProactiveInsights":{ + "shape":"NumOpenProactiveInsights", + "documentation":"

    An integer that specifies the number of open proactive insights in your AWS account.

    " + }, + "MetricsAnalyzed":{ + "shape":"NumMetricsAnalyzed", + "documentation":"

    An integer that specifies the number of metrics that have been analyzed in your AWS account.

    " + }, + "ResourceHours":{ + "shape":"ResourceHours", + "documentation":"

    The number of Amazon DevOps Guru resource analysis hours billed to the current AWS account in the last hour.

    " + } + } + }, + "DescribeAccountOverviewRequest":{ + "type":"structure", + "required":["FromTime"], + "members":{ + "FromTime":{ + "shape":"Timestamp", + "documentation":"

    The start of the time range passed in. The start time granularity is at the day level. The floor of the start time is used. Returned information occurred after this day.

    " + }, + "ToTime":{ + "shape":"Timestamp", + "documentation":"

    The end of the time range passed in. The start time granularity is at the day level. The floor of the start time is used. Returned information occurred before this day. If this is not specified, then the current day is used.

    " + } + } + }, + "DescribeAccountOverviewResponse":{ + "type":"structure", + "required":[ + "ReactiveInsights", + "ProactiveInsights", + "MeanTimeToRecoverInMilliseconds" + ], + "members":{ + "ReactiveInsights":{ + "shape":"NumReactiveInsights", + "documentation":"

    An integer that specifies the number of open reactive insights in your AWS account that were created during the time range passed in.

    " + }, + "ProactiveInsights":{ + "shape":"NumProactiveInsights", + "documentation":"

    An integer that specifies the number of open proactive insights in your AWS account that were created during the time range passed in.

    " + }, + "MeanTimeToRecoverInMilliseconds":{ + "shape":"MeanTimeToRecoverInMilliseconds", + "documentation":"

    The Mean Time to Recover (MTTR) for all closed insights that were created during the time range passed in.

    " + } + } + }, + "DescribeAnomalyRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"AnomalyId", + "documentation":"

    The ID of the anomaly.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DescribeAnomalyResponse":{ + "type":"structure", + "members":{ + "ProactiveAnomaly":{ + "shape":"ProactiveAnomaly", + "documentation":"

    An ReactiveAnomaly object that represents the requested anomaly.

    " + }, + "ReactiveAnomaly":{ + "shape":"ReactiveAnomaly", + "documentation":"

    An ProactiveAnomaly object that represents the requested anomaly.

    " + } + } + }, + "DescribeInsightRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"InsightId", + "documentation":"

    The ID of the insight.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DescribeInsightResponse":{ + "type":"structure", + "members":{ + "ProactiveInsight":{ + "shape":"ProactiveInsight", + "documentation":"

    An ProactiveInsight object that represents the requested insight.

    " + }, + "ReactiveInsight":{ + "shape":"ReactiveInsight", + "documentation":"

    An ReactiveInsight object that represents the requested insight.

    " + } + } + }, + "DescribeResourceCollectionHealthRequest":{ + "type":"structure", + "required":["ResourceCollectionType"], + "members":{ + "ResourceCollectionType":{ + "shape":"ResourceCollectionType", + "documentation":"

    An AWS resource collection type. This type specifies how analyzed AWS resources are defined. The one type of AWS resource collection supported is AWS CloudFormation stacks. DevOps Guru can be configured to analyze only the AWS resources that are defined in the stacks.

    ", + "location":"uri", + "locationName":"ResourceCollectionType" + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "DescribeResourceCollectionHealthResponse":{ + "type":"structure", + "required":["CloudFormation"], + "members":{ + "CloudFormation":{ + "shape":"CloudFormationHealths", + "documentation":"

    The returned CloudFormationHealthOverview object that contains an InsightHealthOverview object with the requested system health information.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + } + }, + "DescribeServiceIntegrationRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeServiceIntegrationResponse":{ + "type":"structure", + "members":{ + "ServiceIntegration":{"shape":"ServiceIntegrationConfig"} + } + }, + "EndTimeRange":{ + "type":"structure", + "members":{ + "FromTime":{ + "shape":"Timestamp", + "documentation":"

    The earliest end time in the time range.

    " + }, + "ToTime":{ + "shape":"Timestamp", + "documentation":"

    The latest end time in the time range.

    " + } + }, + "documentation":"

    A range of time that specifies when anomalous behavior in an anomaly or insight ended.

    " + }, + "ErrorMessageString":{"type":"string"}, + "ErrorNameString":{"type":"string"}, + "ErrorQuotaCodeString":{"type":"string"}, + "ErrorServiceCodeString":{"type":"string"}, + "Event":{ + "type":"structure", + "members":{ + "ResourceCollection":{"shape":"ResourceCollection"}, + "Id":{ + "shape":"EventId", + "documentation":"

    The ID of the event.

    " + }, + "Time":{ + "shape":"Timestamp", + "documentation":"

    A Timestamp that specifies the time the event occurred.

    " + }, + "EventSource":{ + "shape":"EventSource", + "documentation":"

    The AWS source that emitted the event.

    " + }, + "Name":{ + "shape":"EventName", + "documentation":"

    The name of the event.

    " + }, + "DataSource":{ + "shape":"EventDataSource", + "documentation":"

    The source, AWS_CLOUD_TRAIL or AWS_CODE_DEPLOY, where DevOps Guru analysis found the event.

    " + }, + "EventClass":{ + "shape":"EventClass", + "documentation":"

    The class of the event. The class specifies what the event is related to, such as an infrastructure change, a deployment, or a schema change.

    " + }, + "Resources":{ + "shape":"EventResources", + "documentation":"

    An EventResource object that contains information about the resource that emitted the event.

    " + } + }, + "documentation":"

    An AWS resource event. AWS resource events and metrics are analyzed by DevOps Guru to find anomalous behavior and provide recommendations to improve your operational solutions.

    " + }, + "EventClass":{ + "type":"string", + "enum":[ + "INFRASTRUCTURE", + "DEPLOYMENT", + "SECURITY_CHANGE", + "CONFIG_CHANGE", + "SCHEMA_CHANGE" + ] + }, + "EventDataSource":{ + "type":"string", + "enum":[ + "AWS_CLOUD_TRAIL", + "AWS_CODE_DEPLOY" + ] + }, + "EventId":{"type":"string"}, + "EventName":{ + "type":"string", + "max":50, + "min":0 + }, + "EventResource":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"EventResourceType", + "documentation":"

    The type of resource that emitted an event.

    " + }, + "Name":{ + "shape":"EventResourceName", + "documentation":"

    The name of the resource that emitted an event.

    " + }, + "Arn":{ + "shape":"EventResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that emitted an event.

    " + } + }, + "documentation":"

    The AWS resource that emitted an event. AWS resource events and metrics are analyzed by DevOps Guru to find anomalous behavior and provide recommendations to improve your operational solutions.

    " + }, + "EventResourceArn":{ + "type":"string", + "max":2048, + "min":36, + "pattern":"^arn:aws[-a-z]*:[a-z0-9-]*:[a-z0-9-]*:\\d{12}:.*$" + }, + "EventResourceName":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^.*$" + }, + "EventResourceType":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^.*$" + }, + "EventResources":{ + "type":"list", + "member":{"shape":"EventResource"} + }, + "EventSource":{ + "type":"string", + "max":50, + "min":10, + "pattern":"^[a-z]+[a-z0-9]*\\.amazonaws\\.com|aws\\.events$" + }, + "EventTimeRange":{ + "type":"structure", + "required":[ + "FromTime", + "ToTime" + ], + "members":{ + "FromTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the event started.

    " + }, + "ToTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the event ended.

    " + } + }, + "documentation":"

    The time range during which an AWS event occurred. AWS resource events and metrics are analyzed by DevOps Guru to find anomalous behavior and provide recommendations to improve your operational solutions.

    " + }, + "Events":{ + "type":"list", + "member":{"shape":"Event"} + }, + "GetResourceCollectionRequest":{ + "type":"structure", + "required":["ResourceCollectionType"], + "members":{ + "ResourceCollectionType":{ + "shape":"ResourceCollectionType", + "documentation":"

    The type of AWS resource collections to return. The one valid value is CLOUD_FORMATION for AWS CloudFormation stacks.

    ", + "location":"uri", + "locationName":"ResourceCollectionType" + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "GetResourceCollectionResponse":{ + "type":"structure", + "members":{ + "ResourceCollection":{ + "shape":"ResourceCollectionFilter", + "documentation":"

    The requested list of AWS resource collections. The one type of AWS resource collection supported is AWS CloudFormation stacks. DevOps Guru can be configured to analyze only the AWS resources that are defined in the stacks.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + } + }, + "InsightFeedback":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"InsightId", + "documentation":"

    The insight feedback ID.

    " + }, + "Feedback":{ + "shape":"InsightFeedbackOption", + "documentation":"

    The feedback provided by the customer.

    " + } + }, + "documentation":"

    Information about insight feedback received from a customer.

    " + }, + "InsightFeedbackOption":{ + "type":"string", + "enum":[ + "VALID_COLLECTION", + "RECOMMENDATION_USEFUL", + "ALERT_TOO_SENSITIVE", + "DATA_NOISY_ANOMALY", + "DATA_INCORRECT" + ] + }, + "InsightHealth":{ + "type":"structure", + "members":{ + "OpenProactiveInsights":{ + "shape":"NumOpenProactiveInsights", + "documentation":"

    The number of open proactive insights.

    " + }, + "OpenReactiveInsights":{ + "shape":"NumOpenReactiveInsights", + "documentation":"

    The number of open reactive insights.

    " + }, + "MeanTimeToRecoverInMilliseconds":{ + "shape":"MeanTimeToRecoverInMilliseconds", + "documentation":"

    The Meant Time to Recover (MTTR) for the insight.

    " + } + }, + "documentation":"

    Information about the number of open reactive and proactive insights that can be used to gauge the health of your system.

    " + }, + "InsightId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[\\w-]*$" + }, + "InsightName":{ + "type":"string", + "max":530, + "min":1, + "pattern":"^[\\s\\S]*$" + }, + "InsightSeverities":{ + "type":"list", + "member":{"shape":"InsightSeverity"}, + "max":3, + "min":0 + }, + "InsightSeverity":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH" + ] + }, + "InsightStatus":{ + "type":"string", + "enum":[ + "ONGOING", + "CLOSED" + ] + }, + "InsightStatuses":{ + "type":"list", + "member":{"shape":"InsightStatus"}, + "max":2, + "min":0 + }, + "InsightTimeRange":{ + "type":"structure", + "required":["StartTime"], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the behavior described in an insight started.

    " + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the behavior described in an insight ended.

    " + } + }, + "documentation":"

    A time ranged that specifies when the observed behavior in an insight started and ended.

    " + }, + "InsightType":{ + "type":"string", + "enum":[ + "REACTIVE", + "PROACTIVE" + ] + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessageString"}, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    The number of seconds after which the action that caused the internal server exception can be retried.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    An internal failure in an Amazon service occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListAnomaliesForInsightMaxResults":{ + "type":"integer", + "max":500, + "min":1 + }, + "ListAnomaliesForInsightRequest":{ + "type":"structure", + "required":["InsightId"], + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

    The ID of the insight. The returned anomalies belong to this insight.

    ", + "location":"uri", + "locationName":"InsightId" + }, + "StartTimeRange":{ + "shape":"StartTimeRange", + "documentation":"

    A time range used to specify when the requested anomalies started. All returned anomalies started during this time range.

    " + }, + "MaxResults":{ + "shape":"ListAnomaliesForInsightMaxResults", + "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

    " + } + } + }, + "ListAnomaliesForInsightResponse":{ + "type":"structure", + "members":{ + "ProactiveAnomalies":{ + "shape":"ProactiveAnomalies", + "documentation":"

    An array of ProactiveAnomalySummary objects that represent the requested anomalies

    " + }, + "ReactiveAnomalies":{ + "shape":"ReactiveAnomalies", + "documentation":"

    An array of ReactiveAnomalySummary objects that represent the requested anomalies

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + } + }, + "ListEventsFilters":{ + "type":"structure", + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

    An ID of an insight that is related to the events you want to filter for.

    " + }, + "EventTimeRange":{ + "shape":"EventTimeRange", + "documentation":"

    A time range during which you want the filtered events to have occurred.

    " + }, + "EventClass":{ + "shape":"EventClass", + "documentation":"

    The class of the events you want to filter for, such as an infrastructure change, a deployment, or a schema change.

    " + }, + "EventSource":{ + "shape":"EventSource", + "documentation":"

    The AWS source that emitted the events you want to filter for.

    " + }, + "DataSource":{ + "shape":"EventDataSource", + "documentation":"

    The source, AWS_CLOUD_TRAIL or AWS_CODE_DEPLOY, of the events you want returned.

    " + }, + "ResourceCollection":{"shape":"ResourceCollection"} + }, + "documentation":"

    Filters you can use to specify which events are returned when ListEvents is called.

    " + }, + "ListEventsMaxResults":{ + "type":"integer", + "max":200, + "min":1 + }, + "ListEventsRequest":{ + "type":"structure", + "required":["Filters"], + "members":{ + "Filters":{ + "shape":"ListEventsFilters", + "documentation":"

    A ListEventsFilters object used to specify which events to return.

    " + }, + "MaxResults":{ + "shape":"ListEventsMaxResults", + "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

    " + } + } + }, + "ListEventsResponse":{ + "type":"structure", + "required":["Events"], + "members":{ + "Events":{ + "shape":"Events", + "documentation":"

    A list of the requested events.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + } + }, + "ListInsightsAnyStatusFilter":{ + "type":"structure", + "required":[ + "Type", + "StartTimeRange" + ], + "members":{ + "Type":{ + "shape":"InsightType", + "documentation":"

    Use to filter for either REACTIVE or PROACTIVE insights.

    " + }, + "StartTimeRange":{ + "shape":"StartTimeRange", + "documentation":"

    A time range used to specify when the behavior of the filtered insights started.

    " + } + }, + "documentation":"

    Used to filter for insights that have any status.

    " + }, + "ListInsightsClosedStatusFilter":{ + "type":"structure", + "required":[ + "Type", + "EndTimeRange" + ], + "members":{ + "Type":{ + "shape":"InsightType", + "documentation":"

    Use to filter for either REACTIVE or PROACTIVE insights.

    " + }, + "EndTimeRange":{ + "shape":"EndTimeRange", + "documentation":"

    A time range used to specify when the behavior of the filtered insights ended.

    " + } + }, + "documentation":"

    Used to filter for insights that have the status CLOSED.

    " + }, + "ListInsightsMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListInsightsOngoingStatusFilter":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"InsightType", + "documentation":"

    Use to filter for either REACTIVE or PROACTIVE insights.

    " + } + }, + "documentation":"

    Used to filter for insights that have the status ONGOING.

    " + }, + "ListInsightsRequest":{ + "type":"structure", + "required":["StatusFilter"], + "members":{ + "StatusFilter":{ + "shape":"ListInsightsStatusFilter", + "documentation":"

    A filter used to filter the returned insights by their status. You can specify one status filter.

    " + }, + "MaxResults":{ + "shape":"ListInsightsMaxResults", + "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

    " + } + } + }, + "ListInsightsResponse":{ + "type":"structure", + "members":{ + "ProactiveInsights":{ + "shape":"ProactiveInsights", + "documentation":"

    The returned list of proactive insights.

    " + }, + "ReactiveInsights":{ + "shape":"ReactiveInsights", + "documentation":"

    The returned list of reactive insights.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + } + }, + "ListInsightsStatusFilter":{ + "type":"structure", + "members":{ + "Ongoing":{ + "shape":"ListInsightsOngoingStatusFilter", + "documentation":"

    A ListInsightsAnyStatusFilter that specifies ongoing insights that are either REACTIVE or PROACTIVE.

    " + }, + "Closed":{ + "shape":"ListInsightsClosedStatusFilter", + "documentation":"

    A ListInsightsClosedStatusFilter that specifies closed insights that are either REACTIVE or PROACTIVE.

    " + }, + "Any":{ + "shape":"ListInsightsAnyStatusFilter", + "documentation":"

    A ListInsightsAnyStatusFilter that specifies insights of any status that are either REACTIVE or PROACTIVE.

    " + } + }, + "documentation":"

    A filter used by ListInsights to specify which insights to return.

    " + }, + "ListNotificationChannelsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

    " + } + } + }, + "ListNotificationChannelsResponse":{ + "type":"structure", + "members":{ + "Channels":{ + "shape":"Channels", + "documentation":"

    An array that contains the requested notification channels.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + } + }, + "ListRecommendationsRequest":{ + "type":"structure", + "required":["InsightId"], + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

    The ID of the requested insight.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

    " + } + } + }, + "ListRecommendationsResponse":{ + "type":"structure", + "members":{ + "Recommendations":{ + "shape":"Recommendations", + "documentation":"

    An array of the requested recommendations.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + } + }, + "MeanTimeToRecoverInMilliseconds":{"type":"long"}, + "NotificationChannel":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"NotificationChannelId", + "documentation":"

    The ID of a notification channel.

    " + }, + "Config":{ + "shape":"NotificationChannelConfig", + "documentation":"

    A NotificationChannelConfig object that contains information about configured notification channels.

    " + } + }, + "documentation":"

    Information about a notification channel. A notification channel is used to notify you when DevOps Guru creates an insight. The one supported notification channel is Amazon Simple Notification Service (Amazon SNS).

    If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. For more information, see Permissions for cross account Amazon SNS topics.

    If you use an Amazon SNS topic that is encrypted by an AWS Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for AWS KMS–encrypted Amazon SNS topics.

    " + }, + "NotificationChannelConfig":{ + "type":"structure", + "required":["Sns"], + "members":{ + "Sns":{ + "shape":"SnsChannelConfig", + "documentation":"

    Information about a notification channel configured in DevOps Guru to send notifications when insights are created.

    If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. For more information, see Permissions for cross account Amazon SNS topics.

    If you use an Amazon SNS topic that is encrypted by an AWS Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for AWS KMS–encrypted Amazon SNS topics.

    " + } + }, + "documentation":"

    Information about notification channels you have configured with DevOps Guru. The one supported notification channel is Amazon Simple Notification Service (Amazon SNS).

    " + }, + "NotificationChannelId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "NumMetricsAnalyzed":{"type":"integer"}, + "NumOpenProactiveInsights":{"type":"integer"}, + "NumOpenReactiveInsights":{"type":"integer"}, + "NumProactiveInsights":{"type":"integer"}, + "NumReactiveInsights":{"type":"integer"}, + "OpsCenterIntegration":{ + "type":"structure", + "members":{ + "OptInStatus":{ + "shape":"OptInStatus", + "documentation":"

    Specifies if DevOps Guru is enabled to create an AWS Systems Manager OpsItem for each created insight.

    " + } + }, + "documentation":"

    Information about whether DevOps Guru is configured to create an OpsItem in AWS Systems Manager OpsCenter for each created insight.

    " + }, + "OpsCenterIntegrationConfig":{ + "type":"structure", + "members":{ + "OptInStatus":{ + "shape":"OptInStatus", + "documentation":"

    Specifies if DevOps Guru is enabled to create an AWS Systems Manager OpsItem for each created insight.

    " + } + }, + "documentation":"

    Information about whether DevOps Guru is configured to create an OpsItem in AWS Systems Manager OpsCenter for each created insight.

    " + }, + "OptInStatus":{ + "type":"string", + "documentation":"

    Specifies if DevOps Guru is enabled to create an AWS Systems Manager OpsItem for each created insight.

    ", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "PredictionTimeRange":{ + "type":"structure", + "required":["StartTime"], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

    The time range during which a metric limit is expected to be exceeded. This applies to proactive insights only.

    " + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the behavior in a proactive insight is expected to end.

    " + } + }, + "documentation":"

    The time range during which anomalous behavior in a proactive anomaly or an insight is expected to occur.

    " + }, + "ProactiveAnomalies":{ + "type":"list", + "member":{"shape":"ProactiveAnomalySummary"} + }, + "ProactiveAnomaly":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"AnomalyId", + "documentation":"

    The ID of a proactive anomaly.

    " + }, + "Severity":{ + "shape":"AnomalySeverity", + "documentation":"

    The severity of a proactive anomaly.

    " + }, + "Status":{ + "shape":"AnomalyStatus", + "documentation":"

    The status of a proactive anomaly.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The time of the anomaly's most recent update.

    " + }, + "AnomalyTimeRange":{"shape":"AnomalyTimeRange"}, + "PredictionTimeRange":{"shape":"PredictionTimeRange"}, + "SourceDetails":{ + "shape":"AnomalySourceDetails", + "documentation":"

    Details about the source of the analyzed operational data that triggered the anomaly. The one supported source is Amazon CloudWatch metrics.

    " + }, + "AssociatedInsightId":{ + "shape":"InsightId", + "documentation":"

    The ID of the insight that contains this anomaly. An insight is composed of related anomalies.

    " + }, + "ResourceCollection":{"shape":"ResourceCollection"}, + "Limit":{ + "shape":"AnomalyLimit", + "documentation":"

    A threshold that was exceeded by behavior in analyzed resources. Exceeding this threshold is related to the anomalous behavior that generated this anomaly.

    " + } + }, + "documentation":"

    Information about an anomaly. This object is returned by ListAnomalies.

    " + }, + "ProactiveAnomalySummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"AnomalyId", + "documentation":"

    The ID of the anomaly.

    " + }, + "Severity":{ + "shape":"AnomalySeverity", + "documentation":"

    The severity of the anomaly.

    " + }, + "Status":{ + "shape":"AnomalyStatus", + "documentation":"

    The status of the anomaly.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The time of the anomaly's most recent update.

    " + }, + "AnomalyTimeRange":{"shape":"AnomalyTimeRange"}, + "PredictionTimeRange":{"shape":"PredictionTimeRange"}, + "SourceDetails":{ + "shape":"AnomalySourceDetails", + "documentation":"

    Details about the source of the analyzed operational data that triggered the anomaly. The one supported source is Amazon CloudWatch metrics.

    " + }, + "AssociatedInsightId":{ + "shape":"InsightId", + "documentation":"

    The ID of the insight that contains this anomaly. An insight is composed of related anomalies.

    " + }, + "ResourceCollection":{"shape":"ResourceCollection"}, + "Limit":{ + "shape":"AnomalyLimit", + "documentation":"

    A threshold that was exceeded by behavior in analyzed resources. Exceeding this threshold is related to the anomalous behavior that generated this anomaly.

    " + } + }, + "documentation":"

    Details about a proactive anomaly. This object is returned by DescribeAnomaly.

    " + }, + "ProactiveInsight":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"InsightId", + "documentation":"

    The ID of the proactive insight.

    " + }, + "Name":{ + "shape":"InsightName", + "documentation":"

    The name of the proactive insight.

    " + }, + "Severity":{ + "shape":"InsightSeverity", + "documentation":"

    The severity of the proactive insight.

    " + }, + "Status":{ + "shape":"InsightStatus", + "documentation":"

    The status of the proactive insight.

    " + }, + "InsightTimeRange":{"shape":"InsightTimeRange"}, + "PredictionTimeRange":{"shape":"PredictionTimeRange"}, + "ResourceCollection":{"shape":"ResourceCollection"}, + "SsmOpsItemId":{ + "shape":"SsmOpsItemId", + "documentation":"

    The ID of the AWS System Manager OpsItem created for this insight. You must enable the creation of OpstItems insights before they are created for each insight.

    " + } + }, + "documentation":"

    Details about a proactive insight. This object is returned by ListInsights.

    " + }, + "ProactiveInsightSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"InsightId", + "documentation":"

    The ID of the proactive insight.

    " + }, + "Name":{ + "shape":"InsightName", + "documentation":"

    The name of the proactive insight.

    " + }, + "Severity":{ + "shape":"InsightSeverity", + "documentation":"

    The severity of the proactive insight.

    " + }, + "Status":{ + "shape":"InsightStatus", + "documentation":"

    The status of the proactive insight.

    " + }, + "InsightTimeRange":{"shape":"InsightTimeRange"}, + "PredictionTimeRange":{"shape":"PredictionTimeRange"}, + "ResourceCollection":{"shape":"ResourceCollection"} + }, + "documentation":"

    Details about a proactive insight. This object is returned by DescribeInsight.

    " + }, + "ProactiveInsights":{ + "type":"list", + "member":{"shape":"ProactiveInsightSummary"} + }, + "PutFeedbackRequest":{ + "type":"structure", + "members":{ + "InsightFeedback":{ + "shape":"InsightFeedback", + "documentation":"

    The feedback from customers is about the recommendations in this insight.

    " + } + } + }, + "PutFeedbackResponse":{ + "type":"structure", + "members":{ + } + }, + "ReactiveAnomalies":{ + "type":"list", + "member":{"shape":"ReactiveAnomalySummary"} + }, + "ReactiveAnomaly":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"AnomalyId", + "documentation":"

    The ID of the reactive anomaly.

    " + }, + "Severity":{ + "shape":"AnomalySeverity", + "documentation":"

    The severity of the anomaly.

    " + }, + "Status":{ + "shape":"AnomalyStatus", + "documentation":"

    The status of the anomaly.

    " + }, + "AnomalyTimeRange":{"shape":"AnomalyTimeRange"}, + "SourceDetails":{ + "shape":"AnomalySourceDetails", + "documentation":"

    Details about the source of the analyzed operational data that triggered the anomaly. The one supported source is Amazon CloudWatch metrics.

    " + }, + "AssociatedInsightId":{ + "shape":"InsightId", + "documentation":"

    The ID of the insight that contains this anomaly. An insight is composed of related anomalies.

    " + }, + "ResourceCollection":{"shape":"ResourceCollection"} + }, + "documentation":"

    Details about a reactive anomaly. This object is returned by ListAnomalies.

    " + }, + "ReactiveAnomalySummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"AnomalyId", + "documentation":"

    The ID of the reactive anomaly.

    " + }, + "Severity":{ + "shape":"AnomalySeverity", + "documentation":"

    The severity of the reactive anomaly.

    " + }, + "Status":{ + "shape":"AnomalyStatus", + "documentation":"

    The status of the reactive anomaly.

    " + }, + "AnomalyTimeRange":{"shape":"AnomalyTimeRange"}, + "SourceDetails":{ + "shape":"AnomalySourceDetails", + "documentation":"

    Details about the source of the analyzed operational data that triggered the anomaly. The one supported source is Amazon CloudWatch metrics.

    " + }, + "AssociatedInsightId":{ + "shape":"InsightId", + "documentation":"

    The ID of the insight that contains this anomaly. An insight is composed of related anomalies.

    " + }, + "ResourceCollection":{"shape":"ResourceCollection"} + }, + "documentation":"

    Details about a reactive anomaly. This object is returned by DescribeAnomaly.

    " + }, + "ReactiveInsight":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"InsightId", + "documentation":"

    The ID of a reactive insight.

    " + }, + "Name":{ + "shape":"InsightName", + "documentation":"

    The name of a reactive insight.

    " + }, + "Severity":{ + "shape":"InsightSeverity", + "documentation":"

    The severity of a reactive insight.

    " + }, + "Status":{ + "shape":"InsightStatus", + "documentation":"

    The status of a reactive insight.

    " + }, + "InsightTimeRange":{"shape":"InsightTimeRange"}, + "ResourceCollection":{"shape":"ResourceCollection"}, + "SsmOpsItemId":{ + "shape":"SsmOpsItemId", + "documentation":"

    The ID of the AWS System Manager OpsItem created for this insight. You must enable the creation of OpstItems insights before they are created for each insight.

    " + } + }, + "documentation":"

    Information about a reactive insight. This object is returned by ListInsights.

    " + }, + "ReactiveInsightSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"InsightId", + "documentation":"

    The ID of a reactive summary.

    " + }, + "Name":{ + "shape":"InsightName", + "documentation":"

    The name of a reactive insight.

    " + }, + "Severity":{ + "shape":"InsightSeverity", + "documentation":"

    The severity of a reactive insight.

    " + }, + "Status":{ + "shape":"InsightStatus", + "documentation":"

    The status of a reactive insight.

    " + }, + "InsightTimeRange":{"shape":"InsightTimeRange"}, + "ResourceCollection":{"shape":"ResourceCollection"} + }, + "documentation":"

    Information about a reactive insight. This object is returned by DescribeInsight.

    " + }, + "ReactiveInsights":{ + "type":"list", + "member":{"shape":"ReactiveInsightSummary"} + }, + "Recommendation":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"RecommendationDescription", + "documentation":"

    A description of the problem.

    " + }, + "Link":{ + "shape":"RecommendationLink", + "documentation":"

    A hyperlink to information to help you address the problem.

    " + }, + "Name":{ + "shape":"RecommendationName", + "documentation":"

    The name of the recommendation.

    " + }, + "Reason":{ + "shape":"RecommendationReason", + "documentation":"

    The reason DevOps Guru flagged the anomalous behavior as a problem.

    " + }, + "RelatedEvents":{ + "shape":"RecommendationRelatedEvents", + "documentation":"

    Events that are related to the problem. Use these events to learn more about what's happening and to help address the issue.

    " + }, + "RelatedAnomalies":{ + "shape":"RecommendationRelatedAnomalies", + "documentation":"

    Anomalies that are related to the problem. Use these Anomalies to learn more about what's happening and to help address the issue.

    " + } + }, + "documentation":"

    Recommendation information to help you remediate detected anomalous behavior that generated an insight.

    " + }, + "RecommendationDescription":{"type":"string"}, + "RecommendationLink":{"type":"string"}, + "RecommendationName":{"type":"string"}, + "RecommendationReason":{"type":"string"}, + "RecommendationRelatedAnomalies":{ + "type":"list", + "member":{"shape":"RecommendationRelatedAnomaly"} + }, + "RecommendationRelatedAnomaly":{ + "type":"structure", + "members":{ + "Resources":{ + "shape":"RecommendationRelatedAnomalyResources", + "documentation":"

    An array of objects that represent resources in which DevOps Guru detected anomalous behavior. Each object contains the name and type of the resource.

    " + }, + "SourceDetails":{ + "shape":"RelatedAnomalySourceDetails", + "documentation":"

    Information about where the anomalous behavior related the recommendation was found. For example, details in Amazon CloudWatch metrics.

    " + } + }, + "documentation":"

    Information about an anomaly that is related to a recommendation.

    " + }, + "RecommendationRelatedAnomalyResource":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"RecommendationRelatedAnomalyResourceName", + "documentation":"

    The name of the resource.

    " + }, + "Type":{ + "shape":"RecommendationRelatedAnomalyResourceType", + "documentation":"

    The type of the resource.

    " + } + }, + "documentation":"

    Information about a resource in which DevOps Guru detected anomalous behavior.

    " + }, + "RecommendationRelatedAnomalyResourceName":{"type":"string"}, + "RecommendationRelatedAnomalyResourceType":{"type":"string"}, + "RecommendationRelatedAnomalyResources":{ + "type":"list", + "member":{"shape":"RecommendationRelatedAnomalyResource"} + }, + "RecommendationRelatedAnomalySourceDetail":{ + "type":"structure", + "members":{ + "CloudWatchMetrics":{ + "shape":"RecommendationRelatedCloudWatchMetricsSourceDetails", + "documentation":"

    An array of CloudWatchMetricsDetail objects that contains information about the analyzed metrics that displayed anomalous behavior.

    " + } + }, + "documentation":"

    Contains an array of RecommendationRelatedCloudWatchMetricsSourceDetail objects that contain the name and namespace of an Amazon CloudWatch metric.

    " + }, + "RecommendationRelatedCloudWatchMetricsSourceDetail":{ + "type":"structure", + "members":{ + "MetricName":{ + "shape":"RecommendationRelatedCloudWatchMetricsSourceMetricName", + "documentation":"

    The name of the CloudWatch metric.

    " + }, + "Namespace":{ + "shape":"RecommendationRelatedCloudWatchMetricsSourceNamespace", + "documentation":"

    The namespace of the CloudWatch metric. A namespace is a container for CloudWatch metrics.

    " + } + }, + "documentation":"

    Information about an Amazon CloudWatch metric that is analyzed by DevOps Guru. It is one of many analyzed metrics that are used to generate insights.

    " + }, + "RecommendationRelatedCloudWatchMetricsSourceDetails":{ + "type":"list", + "member":{"shape":"RecommendationRelatedCloudWatchMetricsSourceDetail"} + }, + "RecommendationRelatedCloudWatchMetricsSourceMetricName":{"type":"string"}, + "RecommendationRelatedCloudWatchMetricsSourceNamespace":{"type":"string"}, + "RecommendationRelatedEvent":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"RecommendationRelatedEventName", + "documentation":"

    The name of the event. This corresponds to the Name field in an Event object.

    " + }, + "Resources":{ + "shape":"RecommendationRelatedEventResources", + "documentation":"

    A ResourceCollection object that contains arrays of the names of AWS CloudFormation stacks.

    " + } + }, + "documentation":"

    Information about an event that is related to a recommendation.

    " + }, + "RecommendationRelatedEventName":{"type":"string"}, + "RecommendationRelatedEventResource":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"RecommendationRelatedEventResourceName", + "documentation":"

    The name of the resource that emitted the event. This corresponds to the Name field in an EventResource object.

    " + }, + "Type":{ + "shape":"RecommendationRelatedEventResourceType", + "documentation":"

    The type of the resource that emitted the event. This corresponds to the Type field in an EventResource object.

    " + } + }, + "documentation":"

    Information about an AWS resource that emitted and event that is related to a recommendation in an insight.

    " + }, + "RecommendationRelatedEventResourceName":{"type":"string"}, + "RecommendationRelatedEventResourceType":{"type":"string"}, + "RecommendationRelatedEventResources":{ + "type":"list", + "member":{"shape":"RecommendationRelatedEventResource"} + }, + "RecommendationRelatedEvents":{ + "type":"list", + "member":{"shape":"RecommendationRelatedEvent"} + }, + "Recommendations":{ + "type":"list", + "member":{"shape":"Recommendation"}, + "max":10, + "min":0 + }, + "RelatedAnomalySourceDetails":{ + "type":"list", + "member":{"shape":"RecommendationRelatedAnomalySourceDetail"} + }, + "RemoveNotificationChannelRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"NotificationChannelId", + "documentation":"

    The ID of the notification channel to be removed.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "RemoveNotificationChannelResponse":{ + "type":"structure", + "members":{ + } + }, + "ResourceCollection":{ + "type":"structure", + "members":{ + "CloudFormation":{ + "shape":"CloudFormationCollection", + "documentation":"

    An array of the names of AWS CloudFormation stacks. The stacks define AWS resources that DevOps Guru analyzes.

    " + } + }, + "documentation":"

    A collection of AWS resources supported by DevOps Guru. The one type of AWS resource collection supported is AWS CloudFormation stacks. DevOps Guru can be configured to analyze only the AWS resources that are defined in the stacks.

    " + }, + "ResourceCollectionFilter":{ + "type":"structure", + "members":{ + "CloudFormation":{ + "shape":"CloudFormationCollectionFilter", + "documentation":"

    Information about AWS CloudFormation stacks. You can use stacks to specify which AWS resources in your account to analyze. For more information, see Stacks in the AWS CloudFormation User Guide.

    " + } + }, + "documentation":"

    Information about a filter used to specify which AWS resources are analyzed for anomalous behavior by DevOps Guru.

    " + }, + "ResourceCollectionType":{ + "type":"string", + "enum":["AWS_CLOUD_FORMATION"] + }, + "ResourceHours":{"type":"long"}, + "ResourceIdString":{"type":"string"}, + "ResourceIdType":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"ErrorMessageString"}, + "ResourceId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the AWS resource that could not be found.

    " + }, + "ResourceType":{ + "shape":"ResourceIdType", + "documentation":"

    The type of the AWS resource that could not be found.

    " + } + }, + "documentation":"

    A requested resource could not be found

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RetryAfterSeconds":{"type":"integer"}, + "SearchInsightsFilters":{ + "type":"structure", + "members":{ + "Severities":{ + "shape":"InsightSeverities", + "documentation":"

    An array of severity values used to search for insights.

    " + }, + "Statuses":{ + "shape":"InsightStatuses", + "documentation":"

    An array of status values used to search for insights.

    " + }, + "ResourceCollection":{"shape":"ResourceCollection"} + }, + "documentation":"

    Specifies one or more severity values and one or more status values that are used to search for insights.

    " + }, + "SearchInsightsMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "SearchInsightsRequest":{ + "type":"structure", + "required":[ + "StartTimeRange", + "Type" + ], + "members":{ + "StartTimeRange":{ + "shape":"StartTimeRange", + "documentation":"

    The start of the time range passed in. Returned insights occurred after this time.

    " + }, + "Filters":{ + "shape":"SearchInsightsFilters", + "documentation":"

    A SearchInsightsFilters object that is used to set the severity and status filters on your insight search.

    " + }, + "MaxResults":{ + "shape":"SearchInsightsMaxResults", + "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

    " + }, + "Type":{ + "shape":"InsightType", + "documentation":"

    The type of insights you are searching for (REACTIVE or PROACTIVE).

    " + } + } + }, + "SearchInsightsResponse":{ + "type":"structure", + "members":{ + "ProactiveInsights":{ + "shape":"ProactiveInsights", + "documentation":"

    The returned proactive insights.

    " + }, + "ReactiveInsights":{ + "shape":"ReactiveInsights", + "documentation":"

    The returned reactive insights.

    " + }, + "NextToken":{ + "shape":"UuidNextToken", + "documentation":"

    The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.

    " + } + } + }, + "ServiceIntegrationConfig":{ + "type":"structure", + "members":{ + "OpsCenter":{ + "shape":"OpsCenterIntegration", + "documentation":"

    Information about whether DevOps Guru is configured to create an OpsItem in AWS Systems Manager OpsCenter for each created insight.

    " + } + }, + "documentation":"

    Information about the integration of DevOps Guru with another AWS service, such as AWS Systems Manager.

    " + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessageString"} + }, + "documentation":"

    The request contains a value that exceeds a maximum quota.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "SnsChannelConfig":{ + "type":"structure", + "members":{ + "TopicArn":{ + "shape":"TopicArn", + "documentation":"

    The Amazon Resource Name (ARN) of an Amazon Simple Notification Service topic.

    " + } + }, + "documentation":"

    Contains the Amazon Resource Name (ARN) of an Amazon Simple Notification Service topic.

    If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. For more information, see Permissions for cross account Amazon SNS topics.

    If you use an Amazon SNS topic that is encrypted by an AWS Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for AWS KMS–encrypted Amazon SNS topics.

    " + }, + "SsmOpsItemId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^.*$" + }, + "StackName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z*]+[a-zA-Z0-9-]*$" + }, + "StackNames":{ + "type":"list", + "member":{"shape":"StackName"} + }, + "StartTimeRange":{ + "type":"structure", + "members":{ + "FromTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the time range.

    " + }, + "ToTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the time range.

    " + } + }, + "documentation":"

    A time range used to specify when the behavior of an insight or anomaly started.

    " + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessageString"}, + "QuotaCode":{ + "shape":"ErrorQuotaCodeString", + "documentation":"

    The code of the quota that was exceeded, causing the throttling exception.

    " + }, + "ServiceCode":{ + "shape":"ErrorServiceCodeString", + "documentation":"

    The code of the service that caused the throttling exception.

    " + }, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    The number of seconds after which the action that caused the throttling exception can be retried.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    The request was denied due to a request throttling.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TopicArn":{ + "type":"string", + "max":1024, + "min":36, + "pattern":"^arn:aws[a-z0-9-]*:sns:[a-z0-9-]+:\\d{12}:[^:]+$" + }, + "UpdateCloudFormationCollectionFilter":{ + "type":"structure", + "members":{ + "StackNames":{ + "shape":"UpdateStackNames", + "documentation":"

    An array of the name of stacks to update.

    " + } + }, + "documentation":"

    Contains the names of AWS CloudFormation stacks used to update a collection of stacks.

    " + }, + "UpdateResourceCollectionAction":{ + "type":"string", + "enum":[ + "ADD", + "REMOVE" + ] + }, + "UpdateResourceCollectionFilter":{ + "type":"structure", + "members":{ + "CloudFormation":{ + "shape":"UpdateCloudFormationCollectionFilter", + "documentation":"

    An collection of AWS CloudFormation stacks.

    " + } + }, + "documentation":"

    Contains information used to update a collection of AWS resources.

    " + }, + "UpdateResourceCollectionRequest":{ + "type":"structure", + "required":[ + "Action", + "ResourceCollection" + ], + "members":{ + "Action":{ + "shape":"UpdateResourceCollectionAction", + "documentation":"

    Specifies if the resource collection in the request is added or deleted to the resource collection.

    " + }, + "ResourceCollection":{"shape":"UpdateResourceCollectionFilter"} + } + }, + "UpdateResourceCollectionResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateServiceIntegrationConfig":{ + "type":"structure", + "members":{ + "OpsCenter":{"shape":"OpsCenterIntegrationConfig"} + }, + "documentation":"

    Information about updating the integration status of an AWS service, such as AWS Systems Manager, with DevOps Guru.

    " + }, + "UpdateServiceIntegrationRequest":{ + "type":"structure", + "required":["ServiceIntegration"], + "members":{ + "ServiceIntegration":{ + "shape":"UpdateServiceIntegrationConfig", + "documentation":"

    An IntegratedServiceConfig object used to specify the integrated service you want to update, and whether you want to update it to enabled or disabled.

    " + } + } + }, + "UpdateServiceIntegrationResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateStackNames":{ + "type":"list", + "member":{"shape":"StackName"}, + "max":100, + "min":0 + }, + "UuidNextToken":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"ErrorMessageString", + "documentation":"

    A message that describes the validation exception.

    " + }, + "Reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason the validation exception was thrown.

    " + }, + "Fields":{"shape":"ValidationExceptionFields"} + }, + "documentation":"

    Contains information about data passed in to a field during a request that is not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "Name", + "Message" + ], + "members":{ + "Name":{ + "shape":"ErrorNameString", + "documentation":"

    The name of the field.

    " + }, + "Message":{ + "shape":"ErrorMessageString", + "documentation":"

    The message associated with the validation exception with information to help determine its cause.

    " + } + }, + "documentation":"

    The field associated with the validation exception.

    " + }, + "ValidationExceptionFields":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"}, + "documentation":"

    An array of fields that are associated with the validation exception.

    " + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "UNKNOWN_OPERATION", + "CANNOT_PARSE", + "FIELD_VALIDATION_FAILED", + "OTHER" + ] + } + }, + "documentation":"

    Amazon DevOps Guru is a fully managed service that helps you identify anomalous behavior in business critical operational applications. You specify the AWS resources that you want DevOps Guru to cover, then the Amazon CloudWatch metrics and AWS CloudTrail events related to those resources are analyzed. When anomalous behavior is detected, DevOps Guru creates an insight that includes recommendations, related events, and related metrics that can help you improve your operational applications. For more information, see What is Amazon DevOps Guru.

    You can specify 1 or 2 Amazon Simple Notification Service topics so you are notified every time a new insight is created. You can also enable DevOps Guru to generate an OpsItem in AWS Systems Manager for each insight to help you manage and track your work addressing insights.

    To learn about the DevOps Guru workflow, see How DevOps Guru works. To learn about DevOps Guru concepts, see Concepts in DevOps Guru.

    " +} diff --git a/services/directconnect/build.properties b/services/directconnect/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/directconnect/build.properties +++ b/services/directconnect/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index eea88ec44172..7e2f91cda1dc 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + ebs + AWS Java SDK :: Services :: EBS + The AWS Java SDK for EBS module holds the client classes that are used for + communicating with EBS. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.ebs + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/ebs/src/main/resources/codegen-resources/paginators-1.json b/services/ebs/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..e8595e4f3c15 --- /dev/null +++ b/services/ebs/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,14 @@ +{ + "pagination": { + "ListChangedBlocks": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListSnapshotBlocks": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/ebs/src/main/resources/codegen-resources/service-2.json b/services/ebs/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..4e76af26d805 --- /dev/null +++ b/services/ebs/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,818 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-11-02", + "endpointPrefix":"ebs", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon EBS", + "serviceFullName":"Amazon Elastic Block Store", + "serviceId":"EBS", + "signatureVersion":"v4", + "uid":"ebs-2019-11-02" + }, + "operations":{ + "CompleteSnapshot":{ + "name":"CompleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/snapshots/completion/{snapshotId}", + "responseCode":202 + }, + "input":{"shape":"CompleteSnapshotRequest"}, + "output":{"shape":"CompleteSnapshotResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Seals and completes the snapshot after all of the required blocks of data have been written to it. Completing the snapshot changes the status to completed. You cannot write new blocks to a snapshot after it has been completed.

    " + }, + "GetSnapshotBlock":{ + "name":"GetSnapshotBlock", + "http":{ + "method":"GET", + "requestUri":"/snapshots/{snapshotId}/blocks/{blockIndex}" + }, + "input":{"shape":"GetSnapshotBlockRequest"}, + "output":{"shape":"GetSnapshotBlockResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns the data in a block in an Amazon Elastic Block Store snapshot.

    " + }, + "ListChangedBlocks":{ + "name":"ListChangedBlocks", + "http":{ + "method":"GET", + "requestUri":"/snapshots/{secondSnapshotId}/changedblocks" + }, + "input":{"shape":"ListChangedBlocksRequest"}, + "output":{"shape":"ListChangedBlocksResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns information about the blocks that are different between two Amazon Elastic Block Store snapshots of the same volume/snapshot lineage.

    " + }, + "ListSnapshotBlocks":{ + "name":"ListSnapshotBlocks", + "http":{ + "method":"GET", + "requestUri":"/snapshots/{snapshotId}/blocks" + }, + "input":{"shape":"ListSnapshotBlocksRequest"}, + "output":{"shape":"ListSnapshotBlocksResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns information about the blocks in an Amazon Elastic Block Store snapshot.

    " + }, + "PutSnapshotBlock":{ + "name":"PutSnapshotBlock", + "http":{ + "method":"PUT", + "requestUri":"/snapshots/{snapshotId}/blocks/{blockIndex}", + "responseCode":201 + }, + "input":{"shape":"PutSnapshotBlockRequest"}, + "output":{"shape":"PutSnapshotBlockResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Writes a block of data to a snapshot. If the specified block contains data, the existing data is overwritten. The target snapshot must be in the pending state.

    Data written to a snapshot must be aligned with 512-byte sectors.

    ", + "authtype":"v4-unsigned-body" + }, + "StartSnapshot":{ + "name":"StartSnapshot", + "http":{ + "method":"POST", + "requestUri":"/snapshots", + "responseCode":201 + }, + "input":{"shape":"StartSnapshotRequest"}, + "output":{"shape":"StartSnapshotResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConcurrentLimitExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Creates a new Amazon EBS snapshot. The new snapshot enters the pending state after the request completes.

    After creating the snapshot, use PutSnapshotBlock to write blocks of data to the snapshot.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Reason"], + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"AccessDeniedExceptionReason", + "documentation":"

    The reason for the exception.

    " + } + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccessDeniedExceptionReason":{ + "type":"string", + "enum":[ + "UNAUTHORIZED_ACCOUNT", + "DEPENDENCY_ACCESS_DENIED" + ] + }, + "Block":{ + "type":"structure", + "members":{ + "BlockIndex":{ + "shape":"BlockIndex", + "documentation":"

    The block index.

    " + }, + "BlockToken":{ + "shape":"BlockToken", + "documentation":"

    The block token for the block index.

    " + } + }, + "documentation":"

    A block of data in an Amazon Elastic Block Store snapshot.

    " + }, + "BlockData":{ + "type":"blob", + "sensitive":true, + "streaming":true + }, + "BlockIndex":{ + "type":"integer", + "min":0 + }, + "BlockSize":{"type":"integer"}, + "BlockToken":{ + "type":"string", + "max":256, + "pattern":"^[A-Za-z0-9+/=]+$" + }, + "Blocks":{ + "type":"list", + "member":{"shape":"Block"}, + "sensitive":true + }, + "Boolean":{"type":"boolean"}, + "ChangedBlock":{ + "type":"structure", + "members":{ + "BlockIndex":{ + "shape":"BlockIndex", + "documentation":"

    The block index.

    " + }, + "FirstBlockToken":{ + "shape":"BlockToken", + "documentation":"

    The block token for the block index of the FirstSnapshotId specified in the ListChangedBlocks operation. This value is absent if the first snapshot does not have the changed block that is on the second snapshot.

    " + }, + "SecondBlockToken":{ + "shape":"BlockToken", + "documentation":"

    The block token for the block index of the SecondSnapshotId specified in the ListChangedBlocks operation.

    " + } + }, + "documentation":"

    A block of data in an Amazon Elastic Block Store snapshot that is different from another snapshot of the same volume/snapshot lineage.

    ", + "sensitive":true + }, + "ChangedBlocks":{ + "type":"list", + "member":{"shape":"ChangedBlock"} + }, + "ChangedBlocksCount":{ + "type":"integer", + "min":0 + }, + "Checksum":{ + "type":"string", + "max":64, + "pattern":"^[A-Za-z0-9+/=]+$" + }, + "ChecksumAggregationMethod":{ + "type":"string", + "enum":["LINEAR"], + "max":32, + "pattern":"^[A-Za-z0-9]+$" + }, + "ChecksumAlgorithm":{ + "type":"string", + "enum":["SHA256"], + "max":32, + "pattern":"^[A-Za-z0-9]+$" + }, + "CompleteSnapshotRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "ChangedBlocksCount" + ], + "members":{ + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the snapshot.

    ", + "location":"uri", + "locationName":"snapshotId" + }, + "ChangedBlocksCount":{ + "shape":"ChangedBlocksCount", + "documentation":"

    The number of blocks that were written to the snapshot.

    ", + "location":"header", + "locationName":"x-amz-ChangedBlocksCount" + }, + "Checksum":{ + "shape":"Checksum", + "documentation":"

    An aggregated Base-64 SHA256 checksum based on the checksums of each written block.

    To generate the aggregated checksum using the linear aggregation method, arrange the checksums for each written block in ascending order of their block index, concatenate them to form a single string, and then generate the checksum on the entire string using the SHA256 algorithm.

    ", + "location":"header", + "locationName":"x-amz-Checksum" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

    The algorithm used to generate the checksum. Currently, the only supported algorithm is SHA256.

    ", + "location":"header", + "locationName":"x-amz-Checksum-Algorithm" + }, + "ChecksumAggregationMethod":{ + "shape":"ChecksumAggregationMethod", + "documentation":"

    The aggregation method used to generate the checksum. Currently, the only supported aggregation method is LINEAR.

    ", + "location":"header", + "locationName":"x-amz-Checksum-Aggregation-Method" + } + } + }, + "CompleteSnapshotResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "documentation":"

    The status of the snapshot.

    " + } + } + }, + "ConcurrentLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have reached the limit for concurrent API requests. For more information, see Optimizing performance of the EBS direct APIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request uses the same client token as a previous, but non-identical request.

    ", + "error":{"httpStatusCode":503}, + "exception":true + }, + "DataLength":{"type":"integer"}, + "Description":{ + "type":"string", + "max":255, + "pattern":"^[\\S\\s]+$" + }, + "ErrorMessage":{ + "type":"string", + "max":256 + }, + "GetSnapshotBlockRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "BlockIndex", + "BlockToken" + ], + "members":{ + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the snapshot containing the block from which to get data.

    ", + "location":"uri", + "locationName":"snapshotId" + }, + "BlockIndex":{ + "shape":"BlockIndex", + "documentation":"

    The block index of the block from which to get data.

    Obtain the BlockIndex by running the ListChangedBlocks or ListSnapshotBlocks operations.

    ", + "location":"uri", + "locationName":"blockIndex" + }, + "BlockToken":{ + "shape":"BlockToken", + "documentation":"

    The block token of the block from which to get data.

    Obtain the BlockToken by running the ListChangedBlocks or ListSnapshotBlocks operations.

    ", + "location":"querystring", + "locationName":"blockToken" + } + } + }, + "GetSnapshotBlockResponse":{ + "type":"structure", + "members":{ + "DataLength":{ + "shape":"DataLength", + "documentation":"

    The size of the data in the block.

    ", + "location":"header", + "locationName":"x-amz-Data-Length" + }, + "BlockData":{ + "shape":"BlockData", + "documentation":"

    The data content of the block.

    " + }, + "Checksum":{ + "shape":"Checksum", + "documentation":"

    The checksum generated for the block, which is Base64 encoded.

    ", + "location":"header", + "locationName":"x-amz-Checksum" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

    The algorithm used to generate the checksum for the block, such as SHA256.

    ", + "location":"header", + "locationName":"x-amz-Checksum-Algorithm" + } + }, + "payload":"BlockData" + }, + "IdempotencyToken":{ + "type":"string", + "max":255, + "pattern":"^[\\S]+$" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An internal error has occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:aws[a-z\\-]*:kms:.*:[0-9]{12}:key/.*", + "sensitive":true + }, + "ListChangedBlocksRequest":{ + "type":"structure", + "required":["SecondSnapshotId"], + "members":{ + "FirstSnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the first snapshot to use for the comparison.

    The FirstSnapshotID parameter must be specified with a SecondSnapshotId parameter; otherwise, an error occurs.

    ", + "location":"querystring", + "locationName":"firstSnapshotId" + }, + "SecondSnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the second snapshot to use for the comparison.

    The SecondSnapshotId parameter must be specified with a FirstSnapshotID parameter; otherwise, an error occurs.

    ", + "location":"uri", + "locationName":"secondSnapshotId" + }, + "NextToken":{ + "shape":"PageToken", + "documentation":"

    The token to request the next page of results.

    ", + "location":"querystring", + "locationName":"pageToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "StartingBlockIndex":{ + "shape":"BlockIndex", + "documentation":"

    The block index from which the comparison should start.

    The list in the response will start from this block index or the next valid block index in the snapshots.

    ", + "location":"querystring", + "locationName":"startingBlockIndex" + } + } + }, + "ListChangedBlocksResponse":{ + "type":"structure", + "members":{ + "ChangedBlocks":{ + "shape":"ChangedBlocks", + "documentation":"

    An array of objects containing information about the changed blocks.

    " + }, + "ExpiryTime":{ + "shape":"TimeStamp", + "documentation":"

    The time when the BlockToken expires.

    " + }, + "VolumeSize":{ + "shape":"VolumeSize", + "documentation":"

    The size of the volume in GB.

    " + }, + "BlockSize":{ + "shape":"BlockSize", + "documentation":"

    The size of the block.

    " + }, + "NextToken":{ + "shape":"PageToken", + "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "ListSnapshotBlocksRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the snapshot from which to get block indexes and block tokens.

    ", + "location":"uri", + "locationName":"snapshotId" + }, + "NextToken":{ + "shape":"PageToken", + "documentation":"

    The token to request the next page of results.

    ", + "location":"querystring", + "locationName":"pageToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "StartingBlockIndex":{ + "shape":"BlockIndex", + "documentation":"

    The block index from which the list should start. The list in the response will start from this block index or the next valid block index in the snapshot.

    ", + "location":"querystring", + "locationName":"startingBlockIndex" + } + } + }, + "ListSnapshotBlocksResponse":{ + "type":"structure", + "members":{ + "Blocks":{ + "shape":"Blocks", + "documentation":"

    An array of objects containing information about the blocks.

    " + }, + "ExpiryTime":{ + "shape":"TimeStamp", + "documentation":"

    The time when the BlockToken expires.

    " + }, + "VolumeSize":{ + "shape":"VolumeSize", + "documentation":"

    The size of the volume in GB.

    " + }, + "BlockSize":{ + "shape":"BlockSize", + "documentation":"

    The size of the block.

    " + }, + "NextToken":{ + "shape":"PageToken", + "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":10000, + "min":100 + }, + "OwnerId":{ + "type":"string", + "max":24, + "min":1, + "pattern":"\\S+" + }, + "PageToken":{ + "type":"string", + "max":256, + "pattern":"^[A-Za-z0-9+/=]+$" + }, + "Progress":{ + "type":"integer", + "max":100, + "min":0 + }, + "PutSnapshotBlockRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "BlockIndex", + "BlockData", + "DataLength", + "Checksum", + "ChecksumAlgorithm" + ], + "members":{ + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the snapshot.

    ", + "location":"uri", + "locationName":"snapshotId" + }, + "BlockIndex":{ + "shape":"BlockIndex", + "documentation":"

    The block index of the block in which to write the data. A block index is a logical index in units of 512 KiB blocks. To identify the block index, divide the logical offset of the data in the logical volume by the block size (logical offset of data/524288). The logical offset of the data must be 512 KiB aligned.

    ", + "location":"uri", + "locationName":"blockIndex" + }, + "BlockData":{ + "shape":"BlockData", + "documentation":"

    The data to write to the block.

    The block data is not signed as part of the Signature Version 4 signing process. As a result, you must generate and provide a Base64-encoded SHA256 checksum for the block data using the x-amz-Checksum header. Also, you must specify the checksum algorithm using the x-amz-Checksum-Algorithm header. The checksum that you provide is part of the Signature Version 4 signing process. It is validated against a checksum generated by Amazon EBS to ensure the validity and authenticity of the data. If the checksums do not correspond, the request fails. For more information, see Using checksums with the EBS direct APIs in the Amazon Elastic Compute Cloud User Guide.

    " + }, + "DataLength":{ + "shape":"DataLength", + "documentation":"

    The size of the data to write to the block, in bytes. Currently, the only supported size is 524288.

    Valid values: 524288

    ", + "location":"header", + "locationName":"x-amz-Data-Length" + }, + "Progress":{ + "shape":"Progress", + "documentation":"

    The progress of the write process, as a percentage.

    ", + "location":"header", + "locationName":"x-amz-Progress" + }, + "Checksum":{ + "shape":"Checksum", + "documentation":"

    A Base64-encoded SHA256 checksum of the data. Only SHA256 checksums are supported.

    ", + "location":"header", + "locationName":"x-amz-Checksum" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

    The algorithm used to generate the checksum. Currently, the only supported algorithm is SHA256.

    ", + "location":"header", + "locationName":"x-amz-Checksum-Algorithm" + } + }, + "payload":"BlockData" + }, + "PutSnapshotBlockResponse":{ + "type":"structure", + "members":{ + "Checksum":{ + "shape":"Checksum", + "documentation":"

    The SHA256 checksum generated for the block data by Amazon EBS.

    ", + "location":"header", + "locationName":"x-amz-Checksum" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

    The algorithm used by Amazon EBS to generate the checksum.

    ", + "location":"header", + "locationName":"x-amz-Checksum-Algorithm" + } + } + }, + "RequestThrottledException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"RequestThrottledExceptionReason", + "documentation":"

    The reason for the exception.

    " + } + }, + "documentation":"

    The number of API requests has exceed the maximum allowed API request throttling limit.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "RequestThrottledExceptionReason":{ + "type":"string", + "enum":[ + "ACCOUNT_THROTTLED", + "DEPENDENCY_REQUEST_THROTTLED" + ] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"ResourceNotFoundExceptionReason", + "documentation":"

    The reason for the exception.

    " + } + }, + "documentation":"

    The specified resource does not exist.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceNotFoundExceptionReason":{ + "type":"string", + "enum":[ + "SNAPSHOT_NOT_FOUND", + "DEPENDENCY_RESOURCE_NOT_FOUND" + ] + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"ServiceQuotaExceededExceptionReason", + "documentation":"

    The reason for the exception.

    " + } + }, + "documentation":"

    Your current service quotas do not allow you to perform this action.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "ServiceQuotaExceededExceptionReason":{ + "type":"string", + "enum":["DEPENDENCY_SERVICE_QUOTA_EXCEEDED"] + }, + "SnapshotId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^snap-[0-9a-f]+$" + }, + "StartSnapshotRequest":{ + "type":"structure", + "required":["VolumeSize"], + "members":{ + "VolumeSize":{ + "shape":"VolumeSize", + "documentation":"

    The size of the volume, in GiB. The maximum size is 16384 GiB (16 TiB).

    " + }, + "ParentSnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the parent snapshot. If there is no parent snapshot, or if you are creating the first snapshot for an on-premises volume, omit this parameter.

    If your account is enabled for encryption by default, you cannot use an unencrypted snapshot as a parent snapshot. You must first create an encrypted copy of the parent snapshot using CopySnapshot.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The tags to apply to the snapshot.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description for the snapshot.

    " + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully. The subsequent retries with the same client token return the result from the original successful request and they have no additional effect.

    If you do not specify a client token, one is automatically generated by the AWS SDK.

    For more information, see Idempotency for StartSnapshot API in the Amazon Elastic Compute Cloud User Guide.

    ", + "idempotencyToken":true + }, + "Encrypted":{ + "shape":"Boolean", + "documentation":"

    Indicates whether to encrypt the snapshot. To create an encrypted snapshot, specify true. To create an unencrypted snapshot, omit this parameter.

    If you specify a value for ParentSnapshotId, omit this parameter.

    If you specify true, the snapshot is encrypted using the CMK specified using the KmsKeyArn parameter. If no value is specified for KmsKeyArn, the default CMK for your account is used. If no default CMK has been specified for your account, the AWS managed CMK is used. To set a default CMK for your account, use ModifyEbsDefaultKmsKeyId.

    If your account is enabled for encryption by default, you cannot set this parameter to false. In this case, you can omit this parameter.

    For more information, see Using encryption in the Amazon Elastic Compute Cloud User Guide.

    " + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) to be used to encrypt the snapshot. If you do not specify a CMK, the default AWS managed CMK is used.

    If you specify a ParentSnapshotId, omit this parameter; the snapshot will be encrypted using the same CMK that was used to encrypt the parent snapshot.

    If Encrypted is set to true, you must specify a CMK ARN.

    " + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

    The amount of time (in minutes) after which the snapshot is automatically cancelled if:

    • No blocks are written to the snapshot.

    • The snapshot is not completed after writing the last block of data.

    If no value is specified, the timeout defaults to 60 minutes.

    " + } + } + }, + "StartSnapshotResponse":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"Description", + "documentation":"

    The description of the snapshot.

    " + }, + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the snapshot.

    " + }, + "OwnerId":{ + "shape":"OwnerId", + "documentation":"

    The AWS account ID of the snapshot owner.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the snapshot.

    " + }, + "StartTime":{ + "shape":"TimeStamp", + "documentation":"

    The timestamp when the snapshot was created.

    " + }, + "VolumeSize":{ + "shape":"VolumeSize", + "documentation":"

    The size of the volume, in GiB.

    " + }, + "BlockSize":{ + "shape":"BlockSize", + "documentation":"

    The size of the blocks in the snapshot, in bytes.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The tags applied to the snapshot. You can specify up to 50 tags per snapshot. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

    " + }, + "ParentSnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the parent snapshot.

    " + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) used to encrypt the snapshot.

    " + } + } + }, + "Status":{ + "type":"string", + "enum":[ + "completed", + "pending", + "error" + ], + "max":32 + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    The key of the tag.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The value of the tag.

    " + } + }, + "documentation":"

    Describes a tag.

    " + }, + "TagKey":{ + "type":"string", + "max":127, + "pattern":"^[\\S\\s]+$" + }, + "TagValue":{ + "type":"string", + "max":255, + "pattern":"^[\\S\\s]+$" + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TimeStamp":{"type":"timestamp"}, + "Timeout":{ + "type":"integer", + "max":60, + "min":10 + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason for the validation exception.

    " + } + }, + "documentation":"

    The input fails to satisfy the constraints of the EBS direct APIs.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "INVALID_CUSTOMER_KEY", + "INVALID_PAGE_TOKEN", + "INVALID_BLOCK_TOKEN", + "INVALID_SNAPSHOT_ID", + "UNRELATED_SNAPSHOTS", + "INVALID_BLOCK", + "INVALID_CONTENT_ENCODING", + "INVALID_TAG", + "INVALID_DEPENDENCY_REQUEST", + "INVALID_PARAMETER_VALUE", + "INVALID_VOLUME_SIZE" + ] + }, + "VolumeSize":{ + "type":"long", + "min":1 + } + }, + "documentation":"

    You can use the Amazon Elastic Block Store (Amazon EBS) direct APIs to create EBS snapshots, write data directly to your snapshots, read data on your snapshots, and identify the differences or changes between two snapshots. If you’re an independent software vendor (ISV) who offers backup services for Amazon EBS, the EBS direct APIs make it more efficient and cost-effective to track incremental changes on your EBS volumes through snapshots. This can be done without having to create new volumes from snapshots, and then use Amazon Elastic Compute Cloud (Amazon EC2) instances to compare the differences.

    You can create incremental snapshots directly from data on-premises into EBS volumes and the cloud to use for quick disaster recovery. With the ability to write and read snapshots, you can write your on-premises data to an EBS snapshot during a disaster. Then after recovery, you can restore it back to AWS or on-premises from the snapshot. You no longer need to build and maintain complex mechanisms to copy data to and from Amazon EBS.

    This API reference provides detailed information about the actions, data types, parameters, and errors of the EBS direct APIs. For more information about the elements that make up the EBS direct APIs, and examples of how to use them effectively, see Accessing the Contents of an EBS Snapshot in the Amazon Elastic Compute Cloud User Guide. For more information about the supported AWS Regions, endpoints, and service quotas for the EBS direct APIs, see Amazon Elastic Block Store Endpoints and Quotas in the AWS General Reference.

    " +} diff --git a/services/ec2/build.properties b/services/ec2/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/ec2/build.properties +++ b/services/ec2/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index 6b63622713d7..a77580cc5c4d 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + ecrpublic + AWS Java SDK :: Services :: ECR PUBLIC + The AWS Java SDK for ECR PUBLIC module holds the client classes that are used for + communicating with ECR PUBLIC. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.ecrpublic + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/ecrpublic/src/main/resources/codegen-resources/paginators-1.json b/services/ecrpublic/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..bcdd4c61078a --- /dev/null +++ b/services/ecrpublic/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "DescribeImageTags": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "imageTagDetails" + }, + "DescribeImages": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "imageDetails" + }, + "DescribeRegistries": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "registries" + }, + "DescribeRepositories": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "repositories" + } + } +} \ No newline at end of file diff --git a/services/ecrpublic/src/main/resources/codegen-resources/service-2.json b/services/ecrpublic/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..6901381fcc76 --- /dev/null +++ b/services/ecrpublic/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1744 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-10-30", + "endpointPrefix":"api.ecr-public", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon ECR Public", + "serviceFullName":"Amazon Elastic Container Registry Public", + "serviceId":"ECR PUBLIC", + "signatureVersion":"v4", + "signingName":"ecr-public", + "targetPrefix":"SpencerFrontendService", + "uid":"ecr-public-2020-10-30" + }, + "operations":{ + "BatchCheckLayerAvailability":{ + "name":"BatchCheckLayerAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchCheckLayerAvailabilityRequest"}, + "output":{"shape":"BatchCheckLayerAvailabilityResponse"}, + "errors":[ + {"shape":"RepositoryNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServerException"}, + {"shape":"RegistryNotFoundException"} + ], + "documentation":"

    Checks the availability of one or more image layers within a repository in a public registry. When an image is pushed to a repository, each image layer is checked to verify if it has been uploaded before. If it has been uploaded, then the image layer is skipped.

    This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

    " + }, + "BatchDeleteImage":{ + "name":"BatchDeleteImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteImageRequest"}, + "output":{"shape":"BatchDeleteImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

    Deletes a list of specified images within a repository in a public registry. Images are specified with either an imageTag or imageDigest.

    You can remove a tag from an image by specifying the image's tag in your request. When you remove the last tag from an image, the image is deleted from your repository.

    You can completely delete an image (and all of its tags) by specifying the image's digest in your request.

    " + }, + "CompleteLayerUpload":{ + "name":"CompleteLayerUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CompleteLayerUploadRequest"}, + "output":{"shape":"CompleteLayerUploadResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"UploadNotFoundException"}, + {"shape":"InvalidLayerException"}, + {"shape":"LayerPartTooSmallException"}, + {"shape":"LayerAlreadyExistsException"}, + {"shape":"EmptyUploadException"}, + {"shape":"RegistryNotFoundException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

    Informs Amazon ECR that the image layer upload has completed for a specified public registry, repository name, and upload ID. You can optionally provide a sha256 digest of the image layer for data validation purposes.

    When an image is pushed, the CompleteLayerUpload API is called once per each new image layer to verify that the upload has completed.

    This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

    " + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRepositoryRequest"}, + "output":{"shape":"CreateRepositoryResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryAlreadyExistsException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a repository in a public registry. For more information, see Amazon ECR repositories in the Amazon Elastic Container Registry User Guide.

    " + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryRequest"}, + "output":{"shape":"DeleteRepositoryResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryNotEmptyException"} + ], + "documentation":"

    Deletes a repository in a public registry. If the repository contains images, you must either delete all images in the repository or use the force option which deletes all images on your behalf before deleting the repository.

    " + }, + "DeleteRepositoryPolicy":{ + "name":"DeleteRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryPolicyRequest"}, + "output":{"shape":"DeleteRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryPolicyNotFoundException"} + ], + "documentation":"

    Deletes the repository policy associated with the specified repository.

    " + }, + "DescribeImageTags":{ + "name":"DescribeImageTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageTagsRequest"}, + "output":{"shape":"DescribeImageTagsResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

    Returns the image tag details for a repository in a public registry.

    " + }, + "DescribeImages":{ + "name":"DescribeImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImagesRequest"}, + "output":{"shape":"DescribeImagesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ImageNotFoundException"} + ], + "documentation":"

    Returns metadata about the images in a repository in a public registry.

    Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

    " + }, + "DescribeRegistries":{ + "name":"DescribeRegistries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRegistriesRequest"}, + "output":{"shape":"DescribeRegistriesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedCommandException"}, + {"shape":"ServerException"} + ], + "documentation":"

    Returns details for a public registry.

    " + }, + "DescribeRepositories":{ + "name":"DescribeRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRepositoriesRequest"}, + "output":{"shape":"DescribeRepositoriesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

    Describes repositories in a public registry.

    " + }, + "GetAuthorizationToken":{ + "name":"GetAuthorizationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAuthorizationTokenRequest"}, + "output":{"shape":"GetAuthorizationTokenResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

    Retrieves an authorization token. An authorization token represents your IAM authentication credentials and can be used to access any Amazon ECR registry that your IAM principal has access to. The authorization token is valid for 12 hours. This API requires the ecr-public:GetAuthorizationToken and sts:GetServiceBearerToken permissions.

    " + }, + "GetRegistryCatalogData":{ + "name":"GetRegistryCatalogData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRegistryCatalogDataRequest"}, + "output":{"shape":"GetRegistryCatalogDataResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

    Retrieves catalog metadata for a public registry.

    " + }, + "GetRepositoryCatalogData":{ + "name":"GetRepositoryCatalogData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryCatalogDataRequest"}, + "output":{"shape":"GetRepositoryCatalogDataResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

    Retrieve catalog metadata for a repository in a public registry. This metadata is displayed publicly in the Amazon ECR Public Gallery.

    " + }, + "GetRepositoryPolicy":{ + "name":"GetRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryPolicyRequest"}, + "output":{"shape":"GetRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryPolicyNotFoundException"} + ], + "documentation":"

    Retrieves the repository policy for the specified repository.

    " + }, + "InitiateLayerUpload":{ + "name":"InitiateLayerUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InitiateLayerUploadRequest"}, + "output":{"shape":"InitiateLayerUploadResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RegistryNotFoundException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

    Notifies Amazon ECR that you intend to upload an image layer.

    When an image is pushed, the InitiateLayerUpload API is called once per image layer that has not already been uploaded. Whether or not an image layer has been uploaded is determined by the BatchCheckLayerAvailability API action.

    This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

    " + }, + "PutImage":{ + "name":"PutImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutImageRequest"}, + "output":{"shape":"PutImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ImageAlreadyExistsException"}, + {"shape":"LayersNotFoundException"}, + {"shape":"ReferencedImagesNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ImageTagAlreadyExistsException"}, + {"shape":"ImageDigestDoesNotMatchException"}, + {"shape":"RegistryNotFoundException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

    Creates or updates the image manifest and tags associated with an image.

    When an image is pushed and all new image layers have been uploaded, the PutImage API is called once to create or update the image manifest and the tags associated with the image.

    This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

    " + }, + "PutRegistryCatalogData":{ + "name":"PutRegistryCatalogData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRegistryCatalogDataRequest"}, + "output":{"shape":"PutRegistryCatalogDataResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

    Create or updates the catalog data for a public registry.

    " + }, + "PutRepositoryCatalogData":{ + "name":"PutRepositoryCatalogData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRepositoryCatalogDataRequest"}, + "output":{"shape":"PutRepositoryCatalogDataResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

    Creates or updates the catalog data for a repository in a public registry.

    " + }, + "SetRepositoryPolicy":{ + "name":"SetRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetRepositoryPolicyRequest"}, + "output":{"shape":"SetRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

    Applies a repository policy to the specified public repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

    " + }, + "UploadLayerPart":{ + "name":"UploadLayerPart", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadLayerPartRequest"}, + "output":{"shape":"UploadLayerPartResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidLayerPartException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"UploadNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"RegistryNotFoundException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

    Uploads an image layer part to Amazon ECR.

    When an image is pushed, each new image layer is uploaded in parts. The maximum size of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per each new image layer part.

    This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

    " + } + }, + "shapes":{ + "AboutText":{ + "type":"string", + "max":10240 + }, + "Architecture":{ + "type":"string", + "max":50, + "min":1 + }, + "ArchitectureList":{ + "type":"list", + "member":{"shape":"Architecture"}, + "max":50 + }, + "Arn":{"type":"string"}, + "AuthorizationData":{ + "type":"structure", + "members":{ + "authorizationToken":{ + "shape":"Base64", + "documentation":"

    A base64-encoded string that contains authorization data for a public Amazon ECR registry. When the string is decoded, it is presented in the format user:password for public registry authentication using docker login.

    " + }, + "expiresAt":{ + "shape":"ExpirationTimestamp", + "documentation":"

    The Unix time in seconds and milliseconds when the authorization token expires. Authorization tokens are valid for 12 hours.

    " + } + }, + "documentation":"

    An authorization token data object that corresponds to a public registry.

    " + }, + "Base64":{ + "type":"string", + "pattern":"^\\S+$" + }, + "BatchCheckLayerAvailabilityRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "layerDigests" + ], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

    The AWS account ID associated with the public registry that contains the image layers to check. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that is associated with the image layers to check.

    " + }, + "layerDigests":{ + "shape":"BatchedOperationLayerDigestList", + "documentation":"

    The digests of the image layers to check.

    " + } + } + }, + "BatchCheckLayerAvailabilityResponse":{ + "type":"structure", + "members":{ + "layers":{ + "shape":"LayerList", + "documentation":"

    A list of image layer objects corresponding to the image layer references in the request.

    " + }, + "failures":{ + "shape":"LayerFailureList", + "documentation":"

    Any failures associated with the call.

    " + } + } + }, + "BatchDeleteImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageIds" + ], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the registry that contains the image to delete. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The repository in a public registry that contains the image to delete.

    " + }, + "imageIds":{ + "shape":"ImageIdentifierList", + "documentation":"

    A list of image ID references that correspond to images to delete. The format of the imageIds reference is imageTag=tag or imageDigest=digest.

    " + } + } + }, + "BatchDeleteImageResponse":{ + "type":"structure", + "members":{ + "imageIds":{ + "shape":"ImageIdentifierList", + "documentation":"

    The image IDs of the deleted images.

    " + }, + "failures":{ + "shape":"ImageFailureList", + "documentation":"

    Any failures associated with the call.

    " + } + } + }, + "BatchedOperationLayerDigest":{ + "type":"string", + "max":1000, + "min":0 + }, + "BatchedOperationLayerDigestList":{ + "type":"list", + "member":{"shape":"BatchedOperationLayerDigest"}, + "max":100, + "min":1 + }, + "CompleteLayerUploadRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "uploadId", + "layerDigests" + ], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

    The AWS account ID associated with the registry to which to upload layers. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository in a public registry to associate with the image layer.

    " + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

    The upload ID from a previous InitiateLayerUpload operation to associate with the image layer.

    " + }, + "layerDigests":{ + "shape":"LayerDigestList", + "documentation":"

    The sha256 digest of the image layer.

    " + } + } + }, + "CompleteLayerUploadResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The public registry ID associated with the request.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The repository name associated with the request.

    " + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

    The upload ID associated with the layer.

    " + }, + "layerDigest":{ + "shape":"LayerDigest", + "documentation":"

    The sha256 digest of the image layer.

    " + } + } + }, + "CreateRepositoryRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name to use for the repository. This appears publicly in the Amazon ECR Public Gallery. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app).

    " + }, + "catalogData":{ + "shape":"RepositoryCatalogDataInput", + "documentation":"

    The details about the repository that are publicly visible in the Amazon ECR Public Gallery.

    " + } + } + }, + "CreateRepositoryResponse":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"Repository", + "documentation":"

    The repository that was created.

    " + }, + "catalogData":{"shape":"RepositoryCatalogData"} + } + }, + "CreationTimestamp":{"type":"timestamp"}, + "DefaultRegistryAliasFlag":{"type":"boolean"}, + "DeleteRepositoryPolicyRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the public registry that contains the repository policy to delete. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that is associated with the repository policy to delete.

    " + } + } + }, + "DeleteRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The registry ID associated with the request.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The repository name associated with the request.

    " + }, + "policyText":{ + "shape":"RepositoryPolicyText", + "documentation":"

    The JSON repository policy that was deleted from the repository.

    " + } + } + }, + "DeleteRepositoryRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the public registry that contains the repository to delete. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to delete.

    " + }, + "force":{ + "shape":"ForceFlag", + "documentation":"

    If a repository contains images, forces the deletion.

    " + } + } + }, + "DeleteRepositoryResponse":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"Repository", + "documentation":"

    The repository that was deleted.

    " + } + } + }, + "DescribeImageTagsRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the public registry that contains the repository in which to describe images. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the image tag details to describe.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value returned from a previous paginated DescribeImageTags request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This option cannot be used when you specify images with imageIds.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of repository results returned by DescribeImageTags in paginated output. When this parameter is used, DescribeImageTags only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeImageTags request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeImageTags returns up to 100 results and a nextToken value, if applicable. This option cannot be used when you specify images with imageIds.

    " + } + } + }, + "DescribeImageTagsResponse":{ + "type":"structure", + "members":{ + "imageTagDetails":{ + "shape":"ImageTagDetailList", + "documentation":"

    The image tag details for the images in the requested repository.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value to include in a future DescribeImageTags request. When the results of a DescribeImageTags request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "DescribeImagesRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the public registry that contains the repository in which to describe images. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The repository that contains the images to describe.

    " + }, + "imageIds":{ + "shape":"ImageIdentifierList", + "documentation":"

    The list of image IDs for the requested repository.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value returned from a previous paginated DescribeImages request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This option cannot be used when you specify images with imageIds.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of repository results returned by DescribeImages in paginated output. When this parameter is used, DescribeImages only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeImages request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeImages returns up to 100 results and a nextToken value, if applicable. This option cannot be used when you specify images with imageIds.

    " + } + } + }, + "DescribeImagesResponse":{ + "type":"structure", + "members":{ + "imageDetails":{ + "shape":"ImageDetailList", + "documentation":"

    A list of ImageDetail objects that contain data about the image.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value to include in a future DescribeImages request. When the results of a DescribeImages request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "DescribeRegistriesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value returned from a previous paginated DescribeRegistries request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of repository results returned by DescribeRegistries in paginated output. When this parameter is used, DescribeRegistries only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRegistries request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeRegistries returns up to 100 results and a nextToken value, if applicable.

    " + } + } + }, + "DescribeRegistriesResponse":{ + "type":"structure", + "required":["registries"], + "members":{ + "registries":{ + "shape":"RegistryList", + "documentation":"

    An object containing the details for a public registry.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value to include in a future DescribeRepositories request. When the results of a DescribeRepositories request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "DescribeRepositoriesRequest":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the registry that contains the repositories to be described. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryNames":{ + "shape":"RepositoryNameList", + "documentation":"

    A list of repositories to describe. If this parameter is omitted, then all repositories in a registry are described.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value returned from a previous paginated DescribeRepositories request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This option cannot be used when you specify repositories with repositoryNames.

    This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of repository results returned by DescribeRepositories in paginated output. When this parameter is used, DescribeRepositories only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRepositories request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeRepositories returns up to 100 results and a nextToken value, if applicable. This option cannot be used when you specify repositories with repositoryNames.

    " + } + } + }, + "DescribeRepositoriesResponse":{ + "type":"structure", + "members":{ + "repositories":{ + "shape":"RepositoryList", + "documentation":"

    A list of repository objects corresponding to valid repositories.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The nextToken value to include in a future DescribeRepositories request. When the results of a DescribeRepositories request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "EmptyUploadException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified layer upload does not contain any layer parts.

    ", + "exception":true + }, + "ExceptionMessage":{"type":"string"}, + "ExpirationTimestamp":{"type":"timestamp"}, + "ForceFlag":{"type":"boolean"}, + "GetAuthorizationTokenRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAuthorizationTokenResponse":{ + "type":"structure", + "members":{ + "authorizationData":{ + "shape":"AuthorizationData", + "documentation":"

    An authorization token data object that corresponds to a public registry.

    " + } + } + }, + "GetRegistryCatalogDataRequest":{ + "type":"structure", + "members":{ + } + }, + "GetRegistryCatalogDataResponse":{ + "type":"structure", + "required":["registryCatalogData"], + "members":{ + "registryCatalogData":{ + "shape":"RegistryCatalogData", + "documentation":"

    The catalog metadata for the public registry.

    " + } + } + }, + "GetRepositoryCatalogDataRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the registry that contains the repositories to be described. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to retrieve the catalog metadata for.

    " + } + } + }, + "GetRepositoryCatalogDataResponse":{ + "type":"structure", + "members":{ + "catalogData":{ + "shape":"RepositoryCatalogData", + "documentation":"

    The catalog metadata for the repository.

    " + } + } + }, + "GetRepositoryPolicyRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the public registry that contains the repository. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository with the policy to retrieve.

    " + } + } + }, + "GetRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The registry ID associated with the request.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The repository name associated with the request.

    " + }, + "policyText":{ + "shape":"RepositoryPolicyText", + "documentation":"

    The repository policy text associated with the repository. The policy text will be in JSON format.

    " + } + } + }, + "Image":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

    The AWS account ID associated with the registry containing the image.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository associated with the image.

    " + }, + "imageId":{ + "shape":"ImageIdentifier", + "documentation":"

    An object containing the image tag and image digest associated with an image.

    " + }, + "imageManifest":{ + "shape":"ImageManifest", + "documentation":"

    The image manifest associated with the image.

    " + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

    The manifest media type of the image.

    " + } + }, + "documentation":"

    An object representing an Amazon ECR image.

    " + }, + "ImageAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified image has already been pushed, and there were no changes to the manifest or image tag after the last push.

    ", + "exception":true + }, + "ImageDetail":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the public registry to which this image belongs.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to which this image belongs.

    " + }, + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

    The sha256 digest of the image manifest.

    " + }, + "imageTags":{ + "shape":"ImageTagList", + "documentation":"

    The list of tags associated with this image.

    " + }, + "imageSizeInBytes":{ + "shape":"ImageSizeInBytes", + "documentation":"

    The size, in bytes, of the image in the repository.

    If the image is a manifest list, this will be the max size of all manifests in the list.

    Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

    " + }, + "imagePushedAt":{ + "shape":"PushTimestamp", + "documentation":"

    The date and time, expressed in standard JavaScript date format, at which the current image was pushed to the repository.

    " + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

    The media type of the image manifest.

    " + }, + "artifactMediaType":{ + "shape":"MediaType", + "documentation":"

    The artifact media type of the image.

    " + } + }, + "documentation":"

    An object that describes an image returned by a DescribeImages operation.

    " + }, + "ImageDetailList":{ + "type":"list", + "member":{"shape":"ImageDetail"} + }, + "ImageDigest":{"type":"string"}, + "ImageDigestDoesNotMatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified image digest does not match the digest that Amazon ECR calculated for the image.

    ", + "exception":true + }, + "ImageFailure":{ + "type":"structure", + "members":{ + "imageId":{ + "shape":"ImageIdentifier", + "documentation":"

    The image ID associated with the failure.

    " + }, + "failureCode":{ + "shape":"ImageFailureCode", + "documentation":"

    The code associated with the failure.

    " + }, + "failureReason":{ + "shape":"ImageFailureReason", + "documentation":"

    The reason for the failure.

    " + } + }, + "documentation":"

    An object representing an Amazon ECR image failure.

    " + }, + "ImageFailureCode":{ + "type":"string", + "enum":[ + "InvalidImageDigest", + "InvalidImageTag", + "ImageTagDoesNotMatchDigest", + "ImageNotFound", + "MissingDigestAndTag", + "ImageReferencedByManifestList", + "KmsError" + ] + }, + "ImageFailureList":{ + "type":"list", + "member":{"shape":"ImageFailure"} + }, + "ImageFailureReason":{"type":"string"}, + "ImageIdentifier":{ + "type":"structure", + "members":{ + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

    The sha256 digest of the image manifest.

    " + }, + "imageTag":{ + "shape":"ImageTag", + "documentation":"

    The tag used for the image.

    " + } + }, + "documentation":"

    An object with identifying information for an Amazon ECR image.

    " + }, + "ImageIdentifierList":{ + "type":"list", + "member":{"shape":"ImageIdentifier"}, + "max":100, + "min":1 + }, + "ImageManifest":{ + "type":"string", + "max":4194304, + "min":1 + }, + "ImageNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The image requested does not exist in the specified repository.

    ", + "exception":true + }, + "ImageSizeInBytes":{"type":"long"}, + "ImageTag":{ + "type":"string", + "max":300, + "min":1 + }, + "ImageTagAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified image is tagged with a tag that already exists. The repository is configured for tag immutability.

    ", + "exception":true + }, + "ImageTagDetail":{ + "type":"structure", + "members":{ + "imageTag":{ + "shape":"ImageTag", + "documentation":"

    The tag associated with the image.

    " + }, + "createdAt":{ + "shape":"CreationTimestamp", + "documentation":"

    The time stamp indicating when the image tag was created.

    " + }, + "imageDetail":{ + "shape":"ReferencedImageDetail", + "documentation":"

    An object that describes the details of an image.

    " + } + }, + "documentation":"

    An object representing the image tag details for an image.

    " + }, + "ImageTagDetailList":{ + "type":"list", + "member":{"shape":"ImageTagDetail"} + }, + "ImageTagList":{ + "type":"list", + "member":{"shape":"ImageTag"} + }, + "InitiateLayerUploadRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

    The AWS account ID associated with the registry to which you intend to upload layers. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to which you intend to upload layers.

    " + } + } + }, + "InitiateLayerUploadResponse":{ + "type":"structure", + "members":{ + "uploadId":{ + "shape":"UploadId", + "documentation":"

    The upload ID for the layer upload. This parameter is passed to further UploadLayerPart and CompleteLayerUpload operations.

    " + }, + "partSize":{ + "shape":"PartSize", + "documentation":"

    The size, in bytes, that Amazon ECR expects future layer part uploads to be.

    " + } + } + }, + "InvalidLayerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The layer digest calculation performed by Amazon ECR upon receipt of the image layer does not match the digest specified.

    ", + "exception":true + }, + "InvalidLayerPartException":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the layer part.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository.

    " + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

    The upload ID associated with the layer part.

    " + }, + "lastValidByteReceived":{ + "shape":"PartSize", + "documentation":"

    The position of the last byte of the layer part.

    " + }, + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The layer part size is not valid, or the first byte specified is not consecutive to the last byte of a previous layer part upload.

    ", + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified parameter is invalid. Review the available parameters for the API request.

    ", + "exception":true + }, + "Layer":{ + "type":"structure", + "members":{ + "layerDigest":{ + "shape":"LayerDigest", + "documentation":"

    The sha256 digest of the image layer.

    " + }, + "layerAvailability":{ + "shape":"LayerAvailability", + "documentation":"

    The availability status of the image layer.

    " + }, + "layerSize":{ + "shape":"LayerSizeInBytes", + "documentation":"

    The size, in bytes, of the image layer.

    " + }, + "mediaType":{ + "shape":"MediaType", + "documentation":"

    The media type of the layer, such as application/vnd.docker.image.rootfs.diff.tar.gzip or application/vnd.oci.image.layer.v1.tar+gzip.

    " + } + }, + "documentation":"

    An object representing an Amazon ECR image layer.

    " + }, + "LayerAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The image layer already exists in the associated repository.

    ", + "exception":true + }, + "LayerAvailability":{ + "type":"string", + "enum":[ + "AVAILABLE", + "UNAVAILABLE" + ] + }, + "LayerDigest":{ + "type":"string", + "pattern":"[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+" + }, + "LayerDigestList":{ + "type":"list", + "member":{"shape":"LayerDigest"}, + "max":100, + "min":1 + }, + "LayerFailure":{ + "type":"structure", + "members":{ + "layerDigest":{ + "shape":"BatchedOperationLayerDigest", + "documentation":"

    The layer digest associated with the failure.

    " + }, + "failureCode":{ + "shape":"LayerFailureCode", + "documentation":"

    The failure code associated with the failure.

    " + }, + "failureReason":{ + "shape":"LayerFailureReason", + "documentation":"

    The reason for the failure.

    " + } + }, + "documentation":"

    An object representing an Amazon ECR image layer failure.

    " + }, + "LayerFailureCode":{ + "type":"string", + "enum":[ + "InvalidLayerDigest", + "MissingLayerDigest" + ] + }, + "LayerFailureList":{ + "type":"list", + "member":{"shape":"LayerFailure"} + }, + "LayerFailureReason":{"type":"string"}, + "LayerList":{ + "type":"list", + "member":{"shape":"Layer"} + }, + "LayerPartBlob":{ + "type":"blob", + "max":20971520, + "min":0 + }, + "LayerPartTooSmallException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    Layer parts must be at least 5 MiB in size.

    ", + "exception":true + }, + "LayerSizeInBytes":{"type":"long"}, + "LayersNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified layers could not be found, or the specified layer is not valid for this repository.

    ", + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Service Quotas in the Amazon Elastic Container Registry User Guide.

    ", + "exception":true + }, + "LogoImageBlob":{ + "type":"blob", + "max":512000, + "min":0 + }, + "MarketplaceCertified":{"type":"boolean"}, + "MaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "MediaType":{"type":"string"}, + "NextToken":{"type":"string"}, + "OperatingSystem":{ + "type":"string", + "max":50, + "min":1 + }, + "OperatingSystemList":{ + "type":"list", + "member":{"shape":"OperatingSystem"}, + "max":50 + }, + "PartSize":{ + "type":"long", + "min":0 + }, + "PrimaryRegistryAliasFlag":{"type":"boolean"}, + "PushTimestamp":{"type":"timestamp"}, + "PutImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageManifest" + ], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

    The AWS account ID associated with the public registry that contains the repository in which to put the image. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository in which to put the image.

    " + }, + "imageManifest":{ + "shape":"ImageManifest", + "documentation":"

    The image manifest corresponding to the image to be uploaded.

    " + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

    The media type of the image manifest. If you push an image manifest that does not contain the mediaType field, you must specify the imageManifestMediaType in the request.

    " + }, + "imageTag":{ + "shape":"ImageTag", + "documentation":"

    The tag to associate with the image. This parameter is required for images that use the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) formats.

    " + }, + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

    The image digest of the image manifest corresponding to the image.

    " + } + } + }, + "PutImageResponse":{ + "type":"structure", + "members":{ + "image":{ + "shape":"Image", + "documentation":"

    Details of the image uploaded.

    " + } + } + }, + "PutRegistryCatalogDataRequest":{ + "type":"structure", + "members":{ + "displayName":{ + "shape":"RegistryDisplayName", + "documentation":"

    The display name for a public registry. The display name is shown as the repository author in the Amazon ECR Public Gallery.

    The registry display name is only publicly visible in the Amazon ECR Public Gallery for verified accounts.

    " + } + } + }, + "PutRegistryCatalogDataResponse":{ + "type":"structure", + "required":["registryCatalogData"], + "members":{ + "registryCatalogData":{ + "shape":"RegistryCatalogData", + "documentation":"

    The catalog data for the public registry.

    " + } + } + }, + "PutRepositoryCatalogDataRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "catalogData" + ], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the public registry the repository is in. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to create or update the catalog data for.

    " + }, + "catalogData":{ + "shape":"RepositoryCatalogDataInput", + "documentation":"

    An object containing the catalog data for a repository. This data is publicly visible in the Amazon ECR Public Gallery.

    " + } + } + }, + "PutRepositoryCatalogDataResponse":{ + "type":"structure", + "members":{ + "catalogData":{ + "shape":"RepositoryCatalogData", + "documentation":"

    The catalog data for the repository.

    " + } + } + }, + "ReferencedImageDetail":{ + "type":"structure", + "members":{ + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

    The sha256 digest of the image manifest.

    " + }, + "imageSizeInBytes":{ + "shape":"ImageSizeInBytes", + "documentation":"

    The size, in bytes, of the image in the repository.

    If the image is a manifest list, this will be the max size of all manifests in the list.

    Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

    " + }, + "imagePushedAt":{ + "shape":"PushTimestamp", + "documentation":"

    The date and time, expressed in standard JavaScript date format, at which the current image tag was pushed to the repository.

    " + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

    The media type of the image manifest.

    " + }, + "artifactMediaType":{ + "shape":"MediaType", + "documentation":"

    The artifact media type of the image.

    " + } + }, + "documentation":"

    An object that describes the image tag details returned by a DescribeImageTags action.

    " + }, + "ReferencedImagesNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The manifest list is referencing an image that does not exist.

    ", + "exception":true + }, + "Registry":{ + "type":"structure", + "required":[ + "registryId", + "registryArn", + "registryUri", + "verified", + "aliases" + ], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the registry. If you do not specify a registry, the default public registry is assumed.

    " + }, + "registryArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the public registry.

    " + }, + "registryUri":{ + "shape":"Url", + "documentation":"

    The URI of a public registry. The URI contains a universal prefix and the registry alias.

    " + }, + "verified":{ + "shape":"RegistryVerified", + "documentation":"

    Whether the account is verified. This indicates whether the account is an AWS Marketplace vendor. If an account is verified, each public repository will received a verified account badge on the Amazon ECR Public Gallery.

    " + }, + "aliases":{ + "shape":"RegistryAliasList", + "documentation":"

    An array of objects representing the aliases for a public registry.

    " + } + }, + "documentation":"

    The details of a public registry.

    " + }, + "RegistryAlias":{ + "type":"structure", + "required":[ + "name", + "status", + "primaryRegistryAlias", + "defaultRegistryAlias" + ], + "members":{ + "name":{ + "shape":"RegistryAliasName", + "documentation":"

    The name of the registry alias.

    " + }, + "status":{ + "shape":"RegistryAliasStatus", + "documentation":"

    The status of the registry alias.

    " + }, + "primaryRegistryAlias":{ + "shape":"PrimaryRegistryAliasFlag", + "documentation":"

    Whether or not the registry alias is the primary alias for the registry. If true, the alias is the primary registry alias and is displayed in both the repository URL and the image URI used in the docker pull commands on the Amazon ECR Public Gallery.

    A registry alias that is not the primary registry alias can be used in the repository URI in a docker pull command.

    " + }, + "defaultRegistryAlias":{ + "shape":"DefaultRegistryAliasFlag", + "documentation":"

    Whether or not the registry alias is the default alias for the registry. When the first public repository is created, your public registry is assigned a default registry alias.

    " + } + }, + "documentation":"

    An object representing the aliases for a public registry. A public registry is given an alias upon creation but a custom alias can be set using the Amazon ECR console. For more information, see Registries in the Amazon Elastic Container Registry User Guide.

    " + }, + "RegistryAliasList":{ + "type":"list", + "member":{"shape":"RegistryAlias"} + }, + "RegistryAliasName":{ + "type":"string", + "max":50, + "min":2, + "pattern":"[a-z][a-z0-9]+(?:[._-][a-z0-9]+)*" + }, + "RegistryAliasStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "PENDING", + "REJECTED" + ] + }, + "RegistryCatalogData":{ + "type":"structure", + "members":{ + "displayName":{ + "shape":"RegistryDisplayName", + "documentation":"

    The display name for a public registry. This appears on the Amazon ECR Public Gallery.

    Only accounts that have the verified account badge can have a registry display name.

    " + } + }, + "documentation":"

    The metadata for a public registry.

    " + }, + "RegistryDisplayName":{ + "type":"string", + "max":100, + "min":0 + }, + "RegistryId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "RegistryIdOrAlias":{ + "type":"string", + "max":256, + "min":1 + }, + "RegistryList":{ + "type":"list", + "member":{"shape":"Registry"} + }, + "RegistryNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The registry does not exist.

    ", + "exception":true + }, + "RegistryVerified":{"type":"boolean"}, + "Repository":{ + "type":"structure", + "members":{ + "repositoryArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the repository. The ARN contains the arn:aws:ecr namespace, followed by the region of the repository, AWS account ID of the repository owner, repository namespace, and repository name. For example, arn:aws:ecr:region:012345678910:repository/test.

    " + }, + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the public registry that contains the repository.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository.

    " + }, + "repositoryUri":{ + "shape":"Url", + "documentation":"

    The URI for the repository. You can use this URI for container image push and pull operations.

    " + }, + "createdAt":{ + "shape":"CreationTimestamp", + "documentation":"

    The date and time, in JavaScript date format, when the repository was created.

    " + } + }, + "documentation":"

    An object representing a repository.

    " + }, + "RepositoryAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified repository already exists in the specified registry.

    ", + "exception":true + }, + "RepositoryCatalogData":{ + "type":"structure", + "members":{ + "description":{ + "shape":"RepositoryDescription", + "documentation":"

    The short description of the repository.

    " + }, + "architectures":{ + "shape":"ArchitectureList", + "documentation":"

    The architecture tags that are associated with the repository.

    Only supported operating system tags appear publicly in the Amazon ECR Public Gallery. For more information, see RepositoryCatalogDataInput.

    " + }, + "operatingSystems":{ + "shape":"OperatingSystemList", + "documentation":"

    The operating system tags that are associated with the repository.

    Only supported operating system tags appear publicly in the Amazon ECR Public Gallery. For more information, see RepositoryCatalogDataInput.

    " + }, + "logoUrl":{ + "shape":"ResourceUrl", + "documentation":"

    The URL containing the logo associated with the repository.

    " + }, + "aboutText":{ + "shape":"AboutText", + "documentation":"

    The longform description of the contents of the repository. This text appears in the repository details on the Amazon ECR Public Gallery.

    " + }, + "usageText":{ + "shape":"UsageText", + "documentation":"

    The longform usage details of the contents of the repository. The usage text provides context for users of the repository.

    " + }, + "marketplaceCertified":{ + "shape":"MarketplaceCertified", + "documentation":"

    Whether or not the repository is certified by AWS Marketplace.

    " + } + }, + "documentation":"

    The catalog data for a repository. This data is publicly visible in the Amazon ECR Public Gallery.

    " + }, + "RepositoryCatalogDataInput":{ + "type":"structure", + "members":{ + "description":{ + "shape":"RepositoryDescription", + "documentation":"

    A short description of the contents of the repository. This text appears in both the image details and also when searching for repositories on the Amazon ECR Public Gallery.

    " + }, + "architectures":{ + "shape":"ArchitectureList", + "documentation":"

    The system architecture that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported architectures will appear as badges on the repository and are used as search filters.

    • Linux

    • Windows

    If an unsupported tag is added to your repository catalog data, it will be associated with the repository and can be retrieved using the API but will not be discoverable in the Amazon ECR Public Gallery.

    " + }, + "operatingSystems":{ + "shape":"OperatingSystemList", + "documentation":"

    The operating systems that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported operating systems will appear as badges on the repository and are used as search filters.

    • ARM

    • ARM 64

    • x86

    • x86-64

    If an unsupported tag is added to your repository catalog data, it will be associated with the repository and can be retrieved using the API but will not be discoverable in the Amazon ECR Public Gallery.

    " + }, + "logoImageBlob":{ + "shape":"LogoImageBlob", + "documentation":"

    The base64-encoded repository logo payload.

    The repository logo is only publicly visible in the Amazon ECR Public Gallery for verified accounts.

    " + }, + "aboutText":{ + "shape":"AboutText", + "documentation":"

    A detailed description of the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The text must be in markdown format.

    " + }, + "usageText":{ + "shape":"UsageText", + "documentation":"

    Detailed information on how to use the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The usage text provides context, support information, and additional usage details for users of the repository. The text must be in markdown format.

    " + } + }, + "documentation":"

    An object containing the catalog data for a repository. This data is publicly visible in the Amazon ECR Public Gallery.

    " + }, + "RepositoryDescription":{ + "type":"string", + "max":1024 + }, + "RepositoryList":{ + "type":"list", + "member":{"shape":"Repository"} + }, + "RepositoryName":{ + "type":"string", + "max":205, + "min":2, + "pattern":"(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*" + }, + "RepositoryNameList":{ + "type":"list", + "member":{"shape":"RepositoryName"}, + "max":100, + "min":1 + }, + "RepositoryNotEmptyException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified repository contains images. To delete a repository that contains images, you must force the deletion with the force parameter.

    ", + "exception":true + }, + "RepositoryNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified repository could not be found. Check the spelling of the specified repository and ensure that you are performing operations on the correct registry.

    ", + "exception":true + }, + "RepositoryPolicyNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The specified repository and registry combination does not have an associated repository policy.

    ", + "exception":true + }, + "RepositoryPolicyText":{ + "type":"string", + "max":10240, + "min":0 + }, + "ResourceUrl":{ + "type":"string", + "max":2048 + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    These errors are usually caused by a server-side issue.

    ", + "exception":true, + "fault":true + }, + "SetRepositoryPolicyRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "policyText" + ], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The AWS account ID associated with the registry that contains the repository. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to receive the policy.

    " + }, + "policyText":{ + "shape":"RepositoryPolicyText", + "documentation":"

    The JSON repository policy text to apply to the repository. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

    " + }, + "force":{ + "shape":"ForceFlag", + "documentation":"

    If the policy you are attempting to set on a repository policy would prevent you from setting another policy in the future, you must force the SetRepositoryPolicy operation. This is intended to prevent accidental repository lock outs.

    " + } + } + }, + "SetRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The registry ID associated with the request.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The repository name associated with the request.

    " + }, + "policyText":{ + "shape":"RepositoryPolicyText", + "documentation":"

    The JSON repository policy text applied to the repository.

    " + } + } + }, + "UnsupportedCommandException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The action is not supported in this Region.

    ", + "exception":true + }, + "UploadId":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "UploadLayerPartRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "uploadId", + "partFirstByte", + "partLastByte", + "layerPartBlob" + ], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

    The AWS account ID associated with the registry to which you are uploading layer parts. If you do not specify a registry, the default public registry is assumed.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository to which you are uploading layer parts.

    " + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

    The upload ID from a previous InitiateLayerUpload operation to associate with the layer part upload.

    " + }, + "partFirstByte":{ + "shape":"PartSize", + "documentation":"

    The position of the first byte of the layer part witin the overall image layer.

    " + }, + "partLastByte":{ + "shape":"PartSize", + "documentation":"

    The position of the last byte of the layer part within the overall image layer.

    " + }, + "layerPartBlob":{ + "shape":"LayerPartBlob", + "documentation":"

    The base64-encoded layer part payload.

    " + } + } + }, + "UploadLayerPartResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

    The registry ID associated with the request.

    " + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The repository name associated with the request.

    " + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

    The upload ID associated with the request.

    " + }, + "lastByteReceived":{ + "shape":"PartSize", + "documentation":"

    The integer value of the last byte received in the request.

    " + } + } + }, + "UploadNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    The upload could not be found, or the specified upload ID is not valid for this repository.

    ", + "exception":true + }, + "Url":{"type":"string"}, + "UsageText":{ + "type":"string", + "max":10240 + } + }, + "documentation":"Amazon Elastic Container Registry Public

    Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Amazon ECR provides both public and private registries to host your container images. You can use the familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry for your Docker or Open Container Initiative (OCI) images. Amazon ECR supports public repositories with this API. For information about the Amazon ECR API for private repositories, see Amazon Elastic Container Registry API Reference.

    " +} diff --git a/services/ecs/build.properties b/services/ecs/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/ecs/build.properties +++ b/services/ecs/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 4ec64394993d..1f3525c0333a 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + elasticinference + AWS Java SDK :: Services :: Elastic Inference + The AWS Java SDK for Elastic Inference module holds the client classes that are used for + communicating with Elastic Inference. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.elasticinference + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/elasticinference/src/main/resources/codegen-resources/paginators-1.json b/services/elasticinference/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..909b792bacb6 --- /dev/null +++ b/services/elasticinference/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "DescribeAccelerators": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "acceleratorSet" + } + } +} diff --git a/services/elasticinference/src/main/resources/codegen-resources/service-2.json b/services/elasticinference/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..6a469fc8c453 --- /dev/null +++ b/services/elasticinference/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,534 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-07-25", + "endpointPrefix":"api.elastic-inference", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon Elastic Inference", + "serviceFullName":"Amazon Elastic Inference", + "serviceId":"Elastic Inference", + "signatureVersion":"v4", + "signingName":"elastic-inference", + "uid":"elastic-inference-2017-07-25" + }, + "operations":{ + "DescribeAcceleratorOfferings":{ + "name":"DescribeAcceleratorOfferings", + "http":{ + "method":"POST", + "requestUri":"/describe-accelerator-offerings" + }, + "input":{"shape":"DescribeAcceleratorOfferingsRequest"}, + "output":{"shape":"DescribeAcceleratorOfferingsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes the locations in which a given accelerator type or set of types is present in a given region.

    " + }, + "DescribeAcceleratorTypes":{ + "name":"DescribeAcceleratorTypes", + "http":{ + "method":"GET", + "requestUri":"/describe-accelerator-types" + }, + "input":{"shape":"DescribeAcceleratorTypesRequest"}, + "output":{"shape":"DescribeAcceleratorTypesResponse"}, + "errors":[ + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput.

    " + }, + "DescribeAccelerators":{ + "name":"DescribeAccelerators", + "http":{ + "method":"POST", + "requestUri":"/describe-accelerators" + }, + "input":{"shape":"DescribeAcceleratorsRequest"}, + "output":{"shape":"DescribeAcceleratorsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes information over a provided set of accelerators belonging to an account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns all tags of an Elastic Inference Accelerator.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Adds the specified tags to an Elastic Inference Accelerator.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes the specified tags from an Elastic Inference Accelerator.

    " + } + }, + "shapes":{ + "AcceleratorHealthStatus":{ + "type":"string", + "max":256, + "min":1 + }, + "AcceleratorId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^eia-[0-9a-f]+$" + }, + "AcceleratorIdList":{ + "type":"list", + "member":{"shape":"AcceleratorId"}, + "max":1000, + "min":0 + }, + "AcceleratorType":{ + "type":"structure", + "members":{ + "acceleratorTypeName":{ + "shape":"AcceleratorTypeName", + "documentation":"

    The name of the Elastic Inference Accelerator type.

    " + }, + "memoryInfo":{ + "shape":"MemoryInfo", + "documentation":"

    The memory information of the Elastic Inference Accelerator type.

    " + }, + "throughputInfo":{ + "shape":"ThroughputInfoList", + "documentation":"

    The throughput information of the Elastic Inference Accelerator type.

    " + } + }, + "documentation":"

    The details of an Elastic Inference Accelerator type.

    " + }, + "AcceleratorTypeList":{ + "type":"list", + "member":{"shape":"AcceleratorType"}, + "max":100, + "min":0 + }, + "AcceleratorTypeName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\S+$" + }, + "AcceleratorTypeNameList":{ + "type":"list", + "member":{"shape":"AcceleratorTypeName"}, + "max":100, + "min":0 + }, + "AcceleratorTypeOffering":{ + "type":"structure", + "members":{ + "acceleratorType":{ + "shape":"AcceleratorTypeName", + "documentation":"

    The name of the Elastic Inference Accelerator type.

    " + }, + "locationType":{ + "shape":"LocationType", + "documentation":"

    The location type for the offering. It can assume the following values: region: defines that the offering is at the regional level. availability-zone: defines that the offering is at the availability zone level. availability-zone-id: defines that the offering is at the availability zone level, defined by the availability zone id.

    " + }, + "location":{ + "shape":"Location", + "documentation":"

    The location for the offering. It will return either the region, availability zone or availability zone id for the offering depending on the locationType value.

    " + } + }, + "documentation":"

    The offering for an Elastic Inference Accelerator type.

    " + }, + "AcceleratorTypeOfferingList":{ + "type":"list", + "member":{"shape":"AcceleratorTypeOffering"}, + "max":100, + "min":0 + }, + "AvailabilityZone":{ + "type":"string", + "max":256, + "min":1 + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    Raised when a malformed input has been provided to the API.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "DescribeAcceleratorOfferingsRequest":{ + "type":"structure", + "required":["locationType"], + "members":{ + "locationType":{ + "shape":"LocationType", + "documentation":"

    The location type that you want to describe accelerator type offerings for. It can assume the following values: region: will return the accelerator type offering at the regional level. availability-zone: will return the accelerator type offering at the availability zone level. availability-zone-id: will return the accelerator type offering at the availability zone level returning the availability zone id.

    " + }, + "acceleratorTypes":{ + "shape":"AcceleratorTypeNameList", + "documentation":"

    The list of accelerator types to describe.

    " + } + } + }, + "DescribeAcceleratorOfferingsResponse":{ + "type":"structure", + "members":{ + "acceleratorTypeOfferings":{ + "shape":"AcceleratorTypeOfferingList", + "documentation":"

    The list of accelerator type offerings for a specific location.

    " + } + } + }, + "DescribeAcceleratorTypesRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeAcceleratorTypesResponse":{ + "type":"structure", + "members":{ + "acceleratorTypes":{ + "shape":"AcceleratorTypeList", + "documentation":"

    The available accelerator types.

    " + } + } + }, + "DescribeAcceleratorsRequest":{ + "type":"structure", + "members":{ + "acceleratorIds":{ + "shape":"AcceleratorIdList", + "documentation":"

    The IDs of the accelerators to describe.

    " + }, + "filters":{ + "shape":"FilterList", + "documentation":"

    One or more filters. Filter names and values are case-sensitive. Valid filter names are: accelerator-types: can provide a list of accelerator type names to filter for. instance-id: can provide a list of EC2 instance ids to filter for.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The total number of items to return in the command's output. If the total number of items available is more than the value specified, a NextToken is provided in the command's output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command. Do not use the NextToken response element directly outside of the AWS CLI.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "DescribeAcceleratorsResponse":{ + "type":"structure", + "members":{ + "acceleratorSet":{ + "shape":"ElasticInferenceAcceleratorSet", + "documentation":"

    The details of the Elastic Inference Accelerators.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ElasticInferenceAccelerator":{ + "type":"structure", + "members":{ + "acceleratorHealth":{ + "shape":"ElasticInferenceAcceleratorHealth", + "documentation":"

    The health of the Elastic Inference Accelerator.

    " + }, + "acceleratorType":{ + "shape":"AcceleratorTypeName", + "documentation":"

    The type of the Elastic Inference Accelerator.

    " + }, + "acceleratorId":{ + "shape":"AcceleratorId", + "documentation":"

    The ID of the Elastic Inference Accelerator.

    " + }, + "availabilityZone":{ + "shape":"AvailabilityZone", + "documentation":"

    The availability zone where the Elastic Inference Accelerator is present.

    " + }, + "attachedResource":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource that the Elastic Inference Accelerator is attached to.

    " + } + }, + "documentation":"

    The details of an Elastic Inference Accelerator.

    " + }, + "ElasticInferenceAcceleratorHealth":{ + "type":"structure", + "members":{ + "status":{ + "shape":"AcceleratorHealthStatus", + "documentation":"

    The health status of the Elastic Inference Accelerator.

    " + } + }, + "documentation":"

    The health details of an Elastic Inference Accelerator.

    " + }, + "ElasticInferenceAcceleratorSet":{ + "type":"list", + "member":{"shape":"ElasticInferenceAccelerator"} + }, + "Filter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"FilterName", + "documentation":"

    The filter name for the Elastic Inference Accelerator list. It can assume the following values: accelerator-type: the type of Elastic Inference Accelerator to filter for. instance-id: an EC2 instance id to filter for.

    " + }, + "values":{ + "shape":"ValueStringList", + "documentation":"

    The values for the filter of the Elastic Inference Accelerator list.

    " + } + }, + "documentation":"

    A filter expression for the Elastic Inference Accelerator list.

    " + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":100, + "min":0 + }, + "FilterName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^\\S+$" + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    Raised when an unexpected error occurred during request processing.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "Key":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\S+$" + }, + "KeyValuePair":{ + "type":"structure", + "members":{ + "key":{ + "shape":"Key", + "documentation":"

    The throughput value of the Elastic Inference Accelerator type. It can assume the following values: TFLOPS16bit: the throughput expressed in 16bit TeraFLOPS. TFLOPS32bit: the throughput expressed in 32bit TeraFLOPS.

    " + }, + "value":{ + "shape":"Value", + "documentation":"

    The throughput value of the Elastic Inference Accelerator type.

    " + } + }, + "documentation":"

    A throughput entry for an Elastic Inference Accelerator type.

    " + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceARN", + "documentation":"

    The ARN of the Elastic Inference Accelerator to list the tags for.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResult":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the Elastic Inference Accelerator.

    " + } + } + }, + "Location":{ + "type":"string", + "max":256, + "min":1 + }, + "LocationType":{ + "type":"string", + "enum":[ + "region", + "availability-zone", + "availability-zone-id" + ], + "max":256, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "MemoryInfo":{ + "type":"structure", + "members":{ + "sizeInMiB":{ + "shape":"Integer", + "documentation":"

    The size in mebibytes of the Elastic Inference Accelerator type.

    " + } + }, + "documentation":"

    The memory information of an Elastic Inference Accelerator type.

    " + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9+/]+={0,2}$" + }, + "ResourceARN":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"^arn:aws\\S*:elastic-inference:\\S+:\\d{12}:elastic-inference-accelerator/eia-[0-9a-f]+$" + }, + "ResourceArn":{ + "type":"string", + "max":1283, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    Raised when the requested resource cannot be found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "String":{ + "type":"string", + "max":500000, + "pattern":"^.*$" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^\\S$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceARN", + "documentation":"

    The ARN of the Elastic Inference Accelerator to tag.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags to add to the Elastic Inference Accelerator.

    " + } + } + }, + "TagResourceResult":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "ThroughputInfoList":{ + "type":"list", + "member":{"shape":"KeyValuePair"}, + "max":100, + "min":0 + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceARN", + "documentation":"

    The ARN of the Elastic Inference Accelerator to untag.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The list of tags to remove from the Elastic Inference Accelerator.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResult":{ + "type":"structure", + "members":{ + } + }, + "Value":{"type":"integer"}, + "ValueStringList":{ + "type":"list", + "member":{"shape":"String"}, + "max":100, + "min":0 + } + }, + "documentation":"

    Elastic Inference public APIs.

    " +} diff --git a/services/elasticloadbalancing/build.properties b/services/elasticloadbalancing/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/elasticloadbalancing/build.properties +++ b/services/elasticloadbalancing/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 409a1d4a2ddf..f0ca2064a203 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + emrcontainers + AWS Java SDK :: Services :: EMR Containers + The AWS Java SDK for EMR Containers module holds the client classes that are used for + communicating with EMR Containers. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.emrcontainers + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/emrcontainers/src/main/resources/codegen-resources/paginators-1.json b/services/emrcontainers/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..48c53c8f286d --- /dev/null +++ b/services/emrcontainers/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListJobRuns": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "jobRuns" + }, + "ListManagedEndpoints": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "endpoints" + }, + "ListVirtualClusters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "virtualClusters" + } + } +} diff --git a/services/emrcontainers/src/main/resources/codegen-resources/service-2.json b/services/emrcontainers/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..af1a36f99b54 --- /dev/null +++ b/services/emrcontainers/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1446 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-10-01", + "endpointPrefix":"emr-containers", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon EMR Containers", + "serviceId":"EMR containers", + "signatureVersion":"v4", + "signingName":"emr-containers", + "uid":"emr-containers-2020-10-01" + }, + "operations":{ + "CancelJobRun":{ + "name":"CancelJobRun", + "http":{ + "method":"DELETE", + "requestUri":"/virtualclusters/{virtualClusterId}/jobruns/{jobRunId}" + }, + "input":{"shape":"CancelJobRunRequest"}, + "output":{"shape":"CancelJobRunResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Cancels a job run. A job run is a unit of work, such as a Spark jar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on EKS.

    " + }, + "CreateManagedEndpoint":{ + "name":"CreateManagedEndpoint", + "http":{ + "method":"POST", + "requestUri":"/virtualclusters/{virtualClusterId}/endpoints" + }, + "input":{"shape":"CreateManagedEndpointRequest"}, + "output":{"shape":"CreateManagedEndpointResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a managed endpoint. A managed endpoint is a gateway that connects EMR Studio to Amazon EMR on EKS so that EMR Studio can communicate with your virtual cluster.

    " + }, + "CreateVirtualCluster":{ + "name":"CreateVirtualCluster", + "http":{ + "method":"POST", + "requestUri":"/virtualclusters" + }, + "input":{"shape":"CreateVirtualClusterRequest"}, + "output":{"shape":"CreateVirtualClusterResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a virtual cluster. Virtual cluster is a managed entity on Amazon EMR on EKS. You can create, describe, list and delete virtual clusters. They do not consume any additional resource in your system. A single virtual cluster maps to a single Kubernetes namespace. Given this relationship, you can model virtual clusters the same way you model Kubernetes namespaces to meet your requirements.

    " + }, + "DeleteManagedEndpoint":{ + "name":"DeleteManagedEndpoint", + "http":{ + "method":"DELETE", + "requestUri":"/virtualclusters/{virtualClusterId}/endpoints/{endpointId}" + }, + "input":{"shape":"DeleteManagedEndpointRequest"}, + "output":{"shape":"DeleteManagedEndpointResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a managed endpoint. A managed endpoint is a gateway that connects EMR Studio to Amazon EMR on EKS so that EMR Studio can communicate with your virtual cluster.

    " + }, + "DeleteVirtualCluster":{ + "name":"DeleteVirtualCluster", + "http":{ + "method":"DELETE", + "requestUri":"/virtualclusters/{virtualClusterId}" + }, + "input":{"shape":"DeleteVirtualClusterRequest"}, + "output":{"shape":"DeleteVirtualClusterResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a virtual cluster. Virtual cluster is a managed entity on Amazon EMR on EKS. You can create, describe, list and delete virtual clusters. They do not consume any additional resource in your system. A single virtual cluster maps to a single Kubernetes namespace. Given this relationship, you can model virtual clusters the same way you model Kubernetes namespaces to meet your requirements.

    " + }, + "DescribeJobRun":{ + "name":"DescribeJobRun", + "http":{ + "method":"GET", + "requestUri":"/virtualclusters/{virtualClusterId}/jobruns/{jobRunId}" + }, + "input":{"shape":"DescribeJobRunRequest"}, + "output":{"shape":"DescribeJobRunResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Displays detailed information about a job run. A job run is a unit of work, such as a Spark jar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on EKS.

    " + }, + "DescribeManagedEndpoint":{ + "name":"DescribeManagedEndpoint", + "http":{ + "method":"GET", + "requestUri":"/virtualclusters/{virtualClusterId}/endpoints/{endpointId}" + }, + "input":{"shape":"DescribeManagedEndpointRequest"}, + "output":{"shape":"DescribeManagedEndpointResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Displays detailed information about a managed endpoint. A managed endpoint is a gateway that connects EMR Studio to Amazon EMR on EKS so that EMR Studio can communicate with your virtual cluster.

    " + }, + "DescribeVirtualCluster":{ + "name":"DescribeVirtualCluster", + "http":{ + "method":"GET", + "requestUri":"/virtualclusters/{virtualClusterId}" + }, + "input":{"shape":"DescribeVirtualClusterRequest"}, + "output":{"shape":"DescribeVirtualClusterResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Displays detailed information about a specified virtual cluster. Virtual cluster is a managed entity on Amazon EMR on EKS. You can create, describe, list and delete virtual clusters. They do not consume any additional resource in your system. A single virtual cluster maps to a single Kubernetes namespace. Given this relationship, you can model virtual clusters the same way you model Kubernetes namespaces to meet your requirements.

    " + }, + "ListJobRuns":{ + "name":"ListJobRuns", + "http":{ + "method":"GET", + "requestUri":"/virtualclusters/{virtualClusterId}/jobruns" + }, + "input":{"shape":"ListJobRunsRequest"}, + "output":{"shape":"ListJobRunsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists job runs based on a set of parameters. A job run is a unit of work, such as a Spark jar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on EKS.

    " + }, + "ListManagedEndpoints":{ + "name":"ListManagedEndpoints", + "http":{ + "method":"GET", + "requestUri":"/virtualclusters/{virtualClusterId}/endpoints" + }, + "input":{"shape":"ListManagedEndpointsRequest"}, + "output":{"shape":"ListManagedEndpointsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists managed endpoints based on a set of parameters. A managed endpoint is a gateway that connects EMR Studio to Amazon EMR on EKS so that EMR Studio can communicate with your virtual cluster.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists the tags assigned to the resources.

    " + }, + "ListVirtualClusters":{ + "name":"ListVirtualClusters", + "http":{ + "method":"GET", + "requestUri":"/virtualclusters" + }, + "input":{"shape":"ListVirtualClustersRequest"}, + "output":{"shape":"ListVirtualClustersResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists information about the specified virtual cluster. Virtual cluster is a managed entity on Amazon EMR on EKS. You can create, describe, list and delete virtual clusters. They do not consume any additional resource in your system. A single virtual cluster maps to a single Kubernetes namespace. Given this relationship, you can model virtual clusters the same way you model Kubernetes namespaces to meet your requirements.

    " + }, + "StartJobRun":{ + "name":"StartJobRun", + "http":{ + "method":"POST", + "requestUri":"/virtualclusters/{virtualClusterId}/jobruns" + }, + "input":{"shape":"StartJobRunRequest"}, + "output":{"shape":"StartJobRunResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Starts a job run. A job run is a unit of work, such as a Spark jar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on EKS.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Assigns tags to resources. A tag is a label that you assign to an AWS resource. Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize your AWS resources by attributes such as purpose, owner, or environment. When you have many resources of the same type, you can quickly identify a specific resource based on the tags you've assigned to it. For example, you can define a set of tags for your Amazon EMR on EKS clusters to help you track each cluster's owner and stack level. We recommend that you devise a consistent set of tag keys for each resource type. You can then search and filter the resources based on the tags that you add.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes tags from resources.

    " + } + }, + "shapes":{ + "ACMCertArn":{ + "type":"string", + "max":2048, + "min":44, + "pattern":"^arn:(aws[a-zA-Z0-9-]*):acm:.+:(\\d{12}):certificate/.+$" + }, + "CancelJobRunRequest":{ + "type":"structure", + "required":[ + "id", + "virtualClusterId" + ], + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the job run to cancel.

    ", + "location":"uri", + "locationName":"jobRunId" + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the virtual cluster for which the job run will be canceled.

    ", + "location":"uri", + "locationName":"virtualClusterId" + } + } + }, + "CancelJobRunResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The output contains the ID of the cancelled job run.

    " + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The output contains the virtual cluster ID for which the job run is cancelled.

    " + } + } + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":".*\\S.*" + }, + "CloudWatchMonitoringConfiguration":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{ + "shape":"LogGroupName", + "documentation":"

    The name of the log group for log publishing.

    " + }, + "logStreamNamePrefix":{ + "shape":"String256", + "documentation":"

    The specified name prefix for log streams.

    " + } + }, + "documentation":"

    A configuration for CloudWatch monitoring. You can configure your jobs to send log information to CloudWatch Logs.

    " + }, + "Configuration":{ + "type":"structure", + "required":["classification"], + "members":{ + "classification":{ + "shape":"String1024", + "documentation":"

    The classification within a configuration.

    " + }, + "properties":{ + "shape":"SensitivePropertiesMap", + "documentation":"

    A set of properties specified within a configuration classification.

    " + }, + "configurations":{ + "shape":"ConfigurationList", + "documentation":"

    A list of additional configurations to apply within a configuration object.

    " + } + }, + "documentation":"

    A configuration specification to be used when provisioning virtual clusters, which can include configurations for applications and software bundled with Amazon EMR on EKS. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file.

    " + }, + "ConfigurationList":{ + "type":"list", + "member":{"shape":"Configuration"}, + "max":100 + }, + "ConfigurationOverrides":{ + "type":"structure", + "members":{ + "applicationConfiguration":{ + "shape":"ConfigurationList", + "documentation":"

    The configurations for the application running by the job run.

    " + }, + "monitoringConfiguration":{ + "shape":"MonitoringConfiguration", + "documentation":"

    The configurations for monitoring.

    " + } + }, + "documentation":"

    A configuration specification to be used to override existing configurations.

    " + }, + "ContainerInfo":{ + "type":"structure", + "members":{ + "eksInfo":{ + "shape":"EksInfo", + "documentation":"

    The information about the EKS cluster.

    " + } + }, + "documentation":"

    The information about the container used for a job run or a managed endpoint.

    ", + "union":true + }, + "ContainerProvider":{ + "type":"structure", + "required":[ + "type", + "id" + ], + "members":{ + "type":{ + "shape":"ContainerProviderType", + "documentation":"

    The type of the container provider. EKS is the only supported type as of now.

    " + }, + "id":{ + "shape":"String256", + "documentation":"

    The ID of the container cluster.

    " + }, + "info":{ + "shape":"ContainerInfo", + "documentation":"

    The information about the container cluster.

    " + } + }, + "documentation":"

    The information about the container provider.

    " + }, + "ContainerProviderType":{ + "type":"string", + "enum":["EKS"] + }, + "CreateManagedEndpointRequest":{ + "type":"structure", + "required":[ + "name", + "virtualClusterId", + "type", + "releaseLabel", + "executionRoleArn", + "certificateArn", + "clientToken" + ], + "members":{ + "name":{ + "shape":"ResourceNameString", + "documentation":"

    The name of the managed endpoint.

    " + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the virtual cluster for which a managed endpoint is created.

    ", + "location":"uri", + "locationName":"virtualClusterId" + }, + "type":{ + "shape":"EndpointType", + "documentation":"

    The type of the managed endpoint.

    " + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

    The Amazon EMR release version.

    " + }, + "executionRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The ARN of the execution role.

    " + }, + "certificateArn":{ + "shape":"ACMCertArn", + "documentation":"

    The certificate ARN of the managed endpoint.

    " + }, + "configurationOverrides":{ + "shape":"ConfigurationOverrides", + "documentation":"

    The configuration settings that will be used to override existing configurations.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The client idempotency token for this create call.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the managed endpoint.

    " + } + } + }, + "CreateManagedEndpointResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The output contains the ID of the managed endpoint.

    " + }, + "name":{ + "shape":"ResourceNameString", + "documentation":"

    The output contains the name of the managed endpoint.

    " + }, + "arn":{ + "shape":"EndpointArn", + "documentation":"

    The output contains the ARN of the managed endpoint.

    " + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The output contains the ID of the virtual cluster.

    " + } + } + }, + "CreateVirtualClusterRequest":{ + "type":"structure", + "required":[ + "name", + "containerProvider", + "clientToken" + ], + "members":{ + "name":{ + "shape":"ResourceNameString", + "documentation":"

    The specified name of the virtual cluster.

    " + }, + "containerProvider":{ + "shape":"ContainerProvider", + "documentation":"

    The container provider of the virtual cluster.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The client token of the virtual cluster.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags assigned to the virtual cluster.

    " + } + } + }, + "CreateVirtualClusterResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    This output contains the virtual cluster ID.

    " + }, + "name":{ + "shape":"ResourceNameString", + "documentation":"

    This output contains the name of the virtual cluster.

    " + }, + "arn":{ + "shape":"VirtualClusterArn", + "documentation":"

    This output contains the ARN of virtual cluster.

    " + } + } + }, + "Date":{"type":"timestamp"}, + "DeleteManagedEndpointRequest":{ + "type":"structure", + "required":[ + "id", + "virtualClusterId" + ], + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the managed endpoint.

    ", + "location":"uri", + "locationName":"endpointId" + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the endpoint's virtual cluster.

    ", + "location":"uri", + "locationName":"virtualClusterId" + } + } + }, + "DeleteManagedEndpointResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The output displays the ID of the managed endpoint.

    " + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The output displays the ID of the endpoint's virtual cluster.

    " + } + } + }, + "DeleteVirtualClusterRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the virtual cluster that will be deleted.

    ", + "location":"uri", + "locationName":"virtualClusterId" + } + } + }, + "DeleteVirtualClusterResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    This output contains the ID of the virtual cluster that will be deleted.

    " + } + } + }, + "DescribeJobRunRequest":{ + "type":"structure", + "required":[ + "id", + "virtualClusterId" + ], + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the job run request.

    ", + "location":"uri", + "locationName":"jobRunId" + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the virtual cluster for which the job run is submitted.

    ", + "location":"uri", + "locationName":"virtualClusterId" + } + } + }, + "DescribeJobRunResponse":{ + "type":"structure", + "members":{ + "jobRun":{ + "shape":"JobRun", + "documentation":"

    The output displays information about a job run.

    " + } + } + }, + "DescribeManagedEndpointRequest":{ + "type":"structure", + "required":[ + "id", + "virtualClusterId" + ], + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    This output displays ID of the managed endpoint.

    ", + "location":"uri", + "locationName":"endpointId" + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the endpoint's virtual cluster.

    ", + "location":"uri", + "locationName":"virtualClusterId" + } + } + }, + "DescribeManagedEndpointResponse":{ + "type":"structure", + "members":{ + "endpoint":{ + "shape":"Endpoint", + "documentation":"

    This output displays information about a managed endpoint.

    " + } + } + }, + "DescribeVirtualClusterRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the virtual cluster that will be described.

    ", + "location":"uri", + "locationName":"virtualClusterId" + } + } + }, + "DescribeVirtualClusterResponse":{ + "type":"structure", + "members":{ + "virtualCluster":{ + "shape":"VirtualCluster", + "documentation":"

    This output displays information about the specified virtual cluster.

    " + } + } + }, + "EksInfo":{ + "type":"structure", + "members":{ + "namespace":{ + "shape":"String256", + "documentation":"

    The namespaces of the EKS cluster.

    " + } + }, + "documentation":"

    The information about the EKS cluster.

    " + }, + "Endpoint":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the endpoint.

    " + }, + "name":{ + "shape":"ResourceNameString", + "documentation":"

    The name of the endpoint.

    " + }, + "arn":{ + "shape":"EndpointArn", + "documentation":"

    The ARN of the endpoint.

    " + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the endpoint's virtual cluster.

    " + }, + "type":{ + "shape":"EndpointType", + "documentation":"

    The type of the endpoint.

    " + }, + "state":{ + "shape":"EndpointState", + "documentation":"

    The state of the endpoint.

    " + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

    The EMR release version to be used for the endpoint.

    " + }, + "executionRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The execution role ARN of the endpoint.

    " + }, + "certificateArn":{ + "shape":"ACMCertArn", + "documentation":"

    The certificate ARN of the endpoint.

    " + }, + "configurationOverrides":{ + "shape":"ConfigurationOverrides", + "documentation":"

    The configuration settings that are used to override existing configurations for endpoints.

    " + }, + "serverUrl":{ + "shape":"UriString", + "documentation":"

    The server URL of the endpoint.

    " + }, + "createdAt":{ + "shape":"Date", + "documentation":"

    The date and time when the endpoint was created.

    " + }, + "securityGroup":{ + "shape":"String256", + "documentation":"

    The security group configuration of the endpoint.

    " + }, + "subnetIds":{ + "shape":"SubnetIds", + "documentation":"

    The subnet IDs of the endpoint.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the endpoint.

    " + } + }, + "documentation":"

    This entity represents the endpoint that is managed by Amazon EMR on EKS.

    " + }, + "EndpointArn":{ + "type":"string", + "max":1024, + "min":60, + "pattern":"^arn:(aws[a-zA-Z0-9-]*):emr-containers:.+:(\\d{12}):\\/virtualclusters\\/[0-9a-zA-Z]+\\/endpoints\\/[0-9a-zA-Z]+$" + }, + "EndpointState":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "TERMINATING", + "TERMINATED", + "TERMINATED_WITH_ERRORS" + ] + }, + "EndpointStates":{ + "type":"list", + "member":{"shape":"EndpointState"}, + "max":10 + }, + "EndpointType":{ + "type":"string", + "max":64, + "min":1, + "pattern":".*\\S.*" + }, + "EndpointTypes":{ + "type":"list", + "member":{"shape":"EndpointType"}, + "max":10 + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "EntryPointArgument":{ + "type":"string", + "max":10280, + "min":1, + "pattern":"(?!\\s*$)(^[^';|\\u0026\\u003C\\u003E*?`$(){}\\[\\]!#\\\\]*$)", + "sensitive":true + }, + "EntryPointArguments":{ + "type":"list", + "member":{"shape":"EntryPointArgument"} + }, + "EntryPointPath":{ + "type":"string", + "max":256, + "min":1, + "pattern":"(?!\\s*$)(^[^';|\\u0026\\u003C\\u003E*?`$(){}\\[\\]!#\\\\]*$)", + "sensitive":true + }, + "FailureReason":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "USER_ERROR", + "VALIDATION_ERROR", + "CLUSTER_UNAVAILABLE" + ] + }, + "IAMRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:(aws[a-zA-Z0-9-]*):iam::(\\d{12})?:(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)$" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String1024"} + }, + "documentation":"

    This is an internal server exception.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "JavaInteger":{"type":"integer"}, + "JobArn":{ + "type":"string", + "max":1024, + "min":60, + "pattern":"^arn:(aws[a-zA-Z0-9-]*):emr-containers:.+:(\\d{12}):\\/virtualclusters\\/[0-9a-zA-Z]+\\/jobruns\\/[0-9a-zA-Z]+$" + }, + "JobDriver":{ + "type":"structure", + "members":{ + "sparkSubmitJobDriver":{ + "shape":"SparkSubmitJobDriver", + "documentation":"

    The job driver parameters specified for spark submit.

    " + } + }, + "documentation":"

    Specify the driver that the job runs on.

    " + }, + "JobRun":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the job run.

    " + }, + "name":{ + "shape":"ResourceNameString", + "documentation":"

    The name of the job run.

    " + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the job run's virtual cluster.

    " + }, + "arn":{ + "shape":"JobArn", + "documentation":"

    The ARN of job run.

    " + }, + "state":{ + "shape":"JobRunState", + "documentation":"

    The state of the job run.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The client token used to start a job run.

    " + }, + "executionRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The execution role ARN of the job run.

    " + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

    The release version of Amazon EMR.

    " + }, + "configurationOverrides":{ + "shape":"ConfigurationOverrides", + "documentation":"

    The configuration settings that are used to override default configuration.

    " + }, + "jobDriver":{ + "shape":"JobDriver", + "documentation":"

    Parameters of job driver for the job run.

    " + }, + "createdAt":{ + "shape":"Date", + "documentation":"

    The date and time when the job run was created.

    " + }, + "createdBy":{ + "shape":"RequestIdentityUserArn", + "documentation":"

    The user who created the job run.

    " + }, + "finishedAt":{ + "shape":"Date", + "documentation":"

    The date and time when the job run has finished.

    " + }, + "stateDetails":{ + "shape":"String256", + "documentation":"

    Additional details of the job run state.

    " + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

    The reasons why the job run has failed.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The assigned tags of the job run.

    " + } + }, + "documentation":"

    This entity describes a job run. A job run is a unit of work, such as a Spark jar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on EKS.

    " + }, + "JobRunState":{ + "type":"string", + "enum":[ + "PENDING", + "SUBMITTED", + "RUNNING", + "FAILED", + "CANCELLED", + "CANCEL_PENDING", + "COMPLETED" + ] + }, + "JobRunStates":{ + "type":"list", + "member":{"shape":"JobRunState"}, + "max":10 + }, + "JobRuns":{ + "type":"list", + "member":{"shape":"JobRun"} + }, + "ListJobRunsRequest":{ + "type":"structure", + "required":["virtualClusterId"], + "members":{ + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the virtual cluster for which to list the job run.

    ", + "location":"uri", + "locationName":"virtualClusterId" + }, + "createdBefore":{ + "shape":"Date", + "documentation":"

    The date and time before which the job runs were submitted.

    ", + "location":"querystring", + "locationName":"createdBefore" + }, + "createdAfter":{ + "shape":"Date", + "documentation":"

    The date and time after which the job runs were submitted.

    ", + "location":"querystring", + "locationName":"createdAfter" + }, + "name":{ + "shape":"ResourceNameString", + "documentation":"

    The name of the job run.

    ", + "location":"querystring", + "locationName":"name" + }, + "states":{ + "shape":"JobRunStates", + "documentation":"

    The states of the job run.

    ", + "location":"querystring", + "locationName":"states" + }, + "maxResults":{ + "shape":"JavaInteger", + "documentation":"

    The maximum number of job runs that can be listed.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of job runs to return.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListJobRunsResponse":{ + "type":"structure", + "members":{ + "jobRuns":{ + "shape":"JobRuns", + "documentation":"

    This output lists information about the specified job runs.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    This output displays the token for the next set of job runs.

    " + } + } + }, + "ListManagedEndpointsRequest":{ + "type":"structure", + "required":["virtualClusterId"], + "members":{ + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the virtual cluster.

    ", + "location":"uri", + "locationName":"virtualClusterId" + }, + "createdBefore":{ + "shape":"Date", + "documentation":"

    The date and time before which the endpoints are created.

    ", + "location":"querystring", + "locationName":"createdBefore" + }, + "createdAfter":{ + "shape":"Date", + "documentation":"

    The date and time after which the endpoints are created.

    ", + "location":"querystring", + "locationName":"createdAfter" + }, + "types":{ + "shape":"EndpointTypes", + "documentation":"

    The types of the managed endpoints.

    ", + "location":"querystring", + "locationName":"types" + }, + "states":{ + "shape":"EndpointStates", + "documentation":"

    The states of the managed endpoints.

    ", + "location":"querystring", + "locationName":"states" + }, + "maxResults":{ + "shape":"JavaInteger", + "documentation":"

    The maximum number of managed endpoints that can be listed.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of managed endpoints to return.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListManagedEndpointsResponse":{ + "type":"structure", + "members":{ + "endpoints":{ + "shape":"Endpoints", + "documentation":"

    The managed endpoints to be listed.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of endpoints to return.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"RsiArn", + "documentation":"

    The ARN of tagged resources.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags assigned to resources.

    " + } + } + }, + "ListVirtualClustersRequest":{ + "type":"structure", + "members":{ + "containerProviderId":{ + "shape":"String1024", + "documentation":"

    The container provider ID of the virtual cluster.

    ", + "location":"querystring", + "locationName":"containerProviderId" + }, + "containerProviderType":{ + "shape":"ContainerProviderType", + "documentation":"

    The container provider type of the virtual cluster. EKS is the only supported type as of now.

    ", + "location":"querystring", + "locationName":"containerProviderType" + }, + "createdAfter":{ + "shape":"Date", + "documentation":"

    The date and time after which the virtual clusters are created.

    ", + "location":"querystring", + "locationName":"createdAfter" + }, + "createdBefore":{ + "shape":"Date", + "documentation":"

    The date and time before which the virtual clusters are created.

    ", + "location":"querystring", + "locationName":"createdBefore" + }, + "states":{ + "shape":"VirtualClusterStates", + "documentation":"

    The states of the requested virtual clusters.

    ", + "location":"querystring", + "locationName":"states" + }, + "maxResults":{ + "shape":"JavaInteger", + "documentation":"

    The maximum number of virtual clusters that can be listed.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of virtual clusters to return.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListVirtualClustersResponse":{ + "type":"structure", + "members":{ + "virtualClusters":{ + "shape":"VirtualClusters", + "documentation":"

    This output lists the specified virtual clusters.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    This output displays the token for the next set of virtual clusters.

    " + } + } + }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "MonitoringConfiguration":{ + "type":"structure", + "members":{ + "persistentAppUI":{ + "shape":"PersistentAppUI", + "documentation":"

    Monitoring configurations for the persistent application UI.

    " + }, + "cloudWatchMonitoringConfiguration":{ + "shape":"CloudWatchMonitoringConfiguration", + "documentation":"

    Monitoring configurations for CloudWatch.

    " + }, + "s3MonitoringConfiguration":{ + "shape":"S3MonitoringConfiguration", + "documentation":"

    Amazon S3 configuration for monitoring log publishing.

    " + } + }, + "documentation":"

    Configuration setting for monitoring.

    " + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*\\S.*" + }, + "PersistentAppUI":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "ReleaseLabel":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\.\\-_/A-Za-z0-9]+" + }, + "RequestIdentityUserArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:(aws[a-zA-Z0-9-]*):(iam|sts)::(\\d{12})?:[\\w/+=,.@-]+$" + }, + "ResourceIdString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[0-9a-z]+" + }, + "ResourceNameString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String1024"} + }, + "documentation":"

    The specified resource was not found.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "RsiArn":{ + "type":"string", + "max":500, + "min":60, + "pattern":"^arn:(aws[a-zA-Z0-9-]*):emr-containers:.+:(\\d{12}):/virtualclusters/.+$" + }, + "S3MonitoringConfiguration":{ + "type":"structure", + "required":["logUri"], + "members":{ + "logUri":{ + "shape":"UriString", + "documentation":"

    Amazon S3 destination URI for log publishing.

    " + } + }, + "documentation":"

    Amazon S3 configuration for monitoring log publishing. You can configure your jobs to send log information to Amazon S3.

    " + }, + "SensitivePropertiesMap":{ + "type":"map", + "key":{"shape":"String1024"}, + "value":{"shape":"String1024"}, + "max":100, + "sensitive":true + }, + "SparkSubmitJobDriver":{ + "type":"structure", + "required":["entryPoint"], + "members":{ + "entryPoint":{ + "shape":"EntryPointPath", + "documentation":"

    The entry point of job application.

    " + }, + "entryPointArguments":{ + "shape":"EntryPointArguments", + "documentation":"

    The arguments for job application.

    " + }, + "sparkSubmitParameters":{ + "shape":"SparkSubmitParameters", + "documentation":"

    The Spark submit parameters that are used for job runs.

    " + } + }, + "documentation":"

    The information about job driver for Spark submit.

    " + }, + "SparkSubmitParameters":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"(?!\\s*$)(^[^';|\\u0026\\u003C\\u003E*?`$(){}\\[\\]!#\\\\]*$)", + "sensitive":true + }, + "StartJobRunRequest":{ + "type":"structure", + "required":[ + "virtualClusterId", + "clientToken", + "executionRoleArn", + "releaseLabel", + "jobDriver" + ], + "members":{ + "name":{ + "shape":"ResourceNameString", + "documentation":"

    The name of the job run.

    " + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    The virtual cluster ID for which the job run request is submitted.

    ", + "location":"uri", + "locationName":"virtualClusterId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The client idempotency token of the job run request.

    ", + "idempotencyToken":true + }, + "executionRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The execution role ARN for the job run.

    " + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

    The Amazon EMR release version to use for the job run.

    " + }, + "jobDriver":{ + "shape":"JobDriver", + "documentation":"

    The job driver for the job run.

    " + }, + "configurationOverrides":{ + "shape":"ConfigurationOverrides", + "documentation":"

    The configuration overrides for the job run.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags assigned to job runs.

    " + } + } + }, + "StartJobRunResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    This output displays the started job run ID.

    " + }, + "name":{ + "shape":"ResourceNameString", + "documentation":"

    This output displays the name of the started job run.

    " + }, + "arn":{ + "shape":"JobArn", + "documentation":"

    This output lists the ARN of job run.

    " + }, + "virtualClusterId":{ + "shape":"ResourceIdString", + "documentation":"

    This output displays the virtual cluster ID for which the job run was submitted.

    " + } + } + }, + "String1024":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*\\S.*" + }, + "String128":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, + "String256":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "StringEmpty256":{ + "type":"string", + "max":256, + "min":0, + "pattern":".*\\S.*" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"String256"} + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"String128"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"String128"}, + "value":{"shape":"StringEmpty256"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"RsiArn", + "documentation":"

    The ARN of resources.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags assigned to resources.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"RsiArn", + "documentation":"

    The ARN of resources.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys of the resources.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UriString":{ + "type":"string", + "max":10280, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\r\\n\\t]*" + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String1024"} + }, + "documentation":"

    There are invalid parameters in the client request.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "VirtualCluster":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ResourceIdString", + "documentation":"

    The ID of the virtual cluster.

    " + }, + "name":{ + "shape":"ResourceNameString", + "documentation":"

    The name of the virtual cluster.

    " + }, + "arn":{ + "shape":"VirtualClusterArn", + "documentation":"

    The ARN of the virtual cluster.

    " + }, + "state":{ + "shape":"VirtualClusterState", + "documentation":"

    The state of the virtual cluster.

    " + }, + "containerProvider":{ + "shape":"ContainerProvider", + "documentation":"

    The container provider of the virtual cluster.

    " + }, + "createdAt":{ + "shape":"Date", + "documentation":"

    The date and time when the virtual cluster is created.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The assigned tags of the virtual cluster.

    " + } + }, + "documentation":"

    This entity describes a virtual cluster. A virtual cluster is a Kubernetes namespace that Amazon EMR is registered with. Amazon EMR uses virtual clusters to run jobs and host endpoints. Multiple virtual clusters can be backed by the same physical cluster. However, each virtual cluster maps to one namespace on an EKS cluster. Virtual clusters do not create any active resources that contribute to your bill or that require lifecycle management outside the service.

    " + }, + "VirtualClusterArn":{ + "type":"string", + "max":1024, + "min":60, + "pattern":"^arn:(aws[a-zA-Z0-9-]*):emr-containers:.+:(\\d{12}):\\/virtualclusters\\/[0-9a-zA-Z]+$" + }, + "VirtualClusterState":{ + "type":"string", + "enum":[ + "RUNNING", + "TERMINATING", + "TERMINATED", + "ARRESTED" + ] + }, + "VirtualClusterStates":{ + "type":"list", + "member":{"shape":"VirtualClusterState"}, + "max":10 + }, + "VirtualClusters":{ + "type":"list", + "member":{"shape":"VirtualCluster"} + } + }, + "documentation":"

    Amazon EMR on EKS provides a deployment option for Amazon EMR that allows you to run open-source big data frameworks on Amazon Elastic Kubernetes Service (Amazon EKS). With this deployment option, you can focus on running analytics workloads while Amazon EMR on EKS builds, configures, and manages containers for open-source applications. For more information about Amazon EMR on EKS concepts and tasks, see What is Amazon EMR on EKS.

    Amazon EMR containers is the API name for Amazon EMR on EKS. The emr-containers prefix is used in the following scenarios:

    • It is the prefix in the CLI commands for Amazon EMR on EKS. For example, aws emr-containers start-job-run.

    • It is the prefix before IAM policy actions for Amazon EMR on EKS. For example, \"Action\": [ \"emr-containers:StartJobRun\"]. For more information, see Policy actions for Amazon EMR on EKS.

    • It is the prefix used in Amazon EMR on EKS service endpoints. For example, emr-containers.us-east-2.amazonaws.com. For more information, see Amazon EMR on EKS Service Endpoints.

    " +} diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 2c6b6df72a6d..f1fea8f2ee8c 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + forecast + AWS Java SDK :: Services :: Forecast + The AWS Java SDK for Forecast module holds the client classes that are used for + communicating with Forecast. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.forecast + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/forecast/src/main/resources/codegen-resources/paginators-1.json b/services/forecast/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..da6de7d8f44d --- /dev/null +++ b/services/forecast/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,46 @@ +{ + "pagination": { + "ListDatasetGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DatasetGroups" + }, + "ListDatasetImportJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DatasetImportJobs" + }, + "ListDatasets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Datasets" + }, + "ListForecastExportJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ForecastExportJobs" + }, + "ListForecasts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Forecasts" + }, + "ListPredictorBacktestExportJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PredictorBacktestExportJobs" + }, + "ListPredictors": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Predictors" + } + } +} \ No newline at end of file diff --git a/services/forecast/src/main/resources/codegen-resources/service-2.json b/services/forecast/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..737ddd3679a8 --- /dev/null +++ b/services/forecast/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2691 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-06-26", + "endpointPrefix":"forecast", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Forecast Service", + "serviceId":"forecast", + "signatureVersion":"v4", + "signingName":"forecast", + "targetPrefix":"AmazonForecast", + "uid":"forecast-2018-06-26" + }, + "operations":{ + "CreateDataset":{ + "name":"CreateDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatasetRequest"}, + "output":{"shape":"CreateDatasetResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:

    • DataFrequency - How frequently your historical time-series data is collected.

    • Domain and DatasetType - Each dataset has an associated dataset domain and a type within the domain. Amazon Forecast provides a list of predefined domains and types within each domain. For each unique dataset domain and type within the domain, Amazon Forecast requires your data to include a minimum set of predefined fields.

    • Schema - A schema specifies the fields in the dataset, including the field name and data type.

    After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see howitworks-datasets-groups.

    To get a list of all your datasets, use the ListDatasets operation.

    For example Forecast datasets, see the Amazon Forecast Sample GitHub repository.

    The Status of a dataset must be ACTIVE before you can import training data. Use the DescribeDataset operation to get the status.

    " + }, + "CreateDatasetGroup":{ + "name":"CreateDatasetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatasetGroupRequest"}, + "output":{"shape":"CreateDatasetGroupResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the UpdateDatasetGroup operation.

    After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see howitworks-datasets-groups.

    To get a list of all your datasets groups, use the ListDatasetGroups operation.

    The Status of a dataset group must be ACTIVE before you can use the dataset group to create a predictor. To get the status, use the DescribeDatasetGroup operation.

    " + }, + "CreateDatasetImportJob":{ + "name":"CreateDatasetImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatasetImportJobRequest"}, + "output":{"shape":"CreateDatasetImportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.

    You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see aws-forecast-iam-roles.

    The training data must be in CSV format. The delimiter must be a comma (,).

    You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.

    Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.

    To get a list of all your dataset import jobs, filtered by specified criteria, use the ListDatasetImportJobs operation.

    " + }, + "CreateForecast":{ + "name":"CreateForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateForecastRequest"}, + "output":{"shape":"CreateForecastResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a forecast for each item in the TARGET_TIME_SERIES dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3) bucket, use the CreateForecastExportJob operation.

    The range of the forecast is determined by the ForecastHorizon value, which you specify in the CreatePredictor request. When you query a forecast, you can request a specific date range within the forecast.

    To get a list of all your forecasts, use the ListForecasts operation.

    The forecasts generated by Amazon Forecast are in the same time zone as the dataset that was used to create the predictor.

    For more information, see howitworks-forecast.

    The Status of the forecast must be ACTIVE before you can query or export the forecast. Use the DescribeForecast operation to get the status.

    " + }, + "CreateForecastExportJob":{ + "name":"CreateForecastExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateForecastExportJobRequest"}, + "output":{"shape":"CreateForecastExportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Exports a forecast created by the CreateForecast operation to your Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will match the following conventions:

    <ForecastExportJobName>_<ExportTimestamp>_<PartNumber>

    where the <ExportTimestamp> component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ).

    You must specify a DataDestination object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

    For more information, see howitworks-forecast.

    To get a list of all your forecast export jobs, use the ListForecastExportJobs operation.

    The Status of the forecast export job must be ACTIVE before you can access the forecast in your Amazon S3 bucket. To get the status, use the DescribeForecastExportJob operation.

    " + }, + "CreatePredictor":{ + "name":"CreatePredictor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePredictorRequest"}, + "output":{"shape":"CreatePredictorResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates an Amazon Forecast predictor.

    In the request, provide a dataset group and either specify an algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.

    Amazon Forecast uses the algorithm to train a predictor using the latest version of the datasets in the specified dataset group. You can then generate a forecast using the CreateForecast operation.

    To see the evaluation metrics, use the GetAccuracyMetrics operation.

    You can specify a featurization configuration to fill and aggregate the data fields in the TARGET_TIME_SERIES dataset to improve model training. For more information, see FeaturizationConfig.

    For RELATED_TIME_SERIES datasets, CreatePredictor verifies that the DataFrequency specified when the dataset was created matches the ForecastFrequency. TARGET_TIME_SERIES datasets don't have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see howitworks-datasets-groups.

    By default, predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90) quantiles. You can choose custom forecast types to train and evaluate your predictor by setting the ForecastTypes.

    AutoML

    If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the objective function, set PerformAutoML to true. The objective function is defined as the mean of the weighted losses over the forecast types. By default, these are the p10, p50, and p90 quantile losses. For more information, see EvaluationResult.

    When AutoML is enabled, the following properties are disallowed:

    • AlgorithmArn

    • HPOConfig

    • PerformHPO

    • TrainingParameters

    To get a list of all of your predictors, use the ListPredictors operation.

    Before you can use the predictor to create a forecast, the Status of the predictor must be ACTIVE, signifying that training has completed. To get the status, use the DescribePredictor operation.

    " + }, + "CreatePredictorBacktestExportJob":{ + "name":"CreatePredictorBacktestExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePredictorBacktestExportJobRequest"}, + "output":{"shape":"CreatePredictorBacktestExportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Exports backtest forecasts and accuracy metrics generated by the CreatePredictor operation. Two folders containing CSV files are exported to your specified S3 bucket.

    The export file names will match the following conventions:

    <ExportJobName>_<ExportTimestamp>_<PartNumber>.csv

    The <ExportTimestamp> component is in Java SimpleDate format (yyyy-MM-ddTHH-mm-ssZ).

    You must specify a DataDestination object that includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

    The Status of the export job must be ACTIVE before you can access the export in your Amazon S3 bucket. To get the status, use the DescribePredictorBacktestExportJob operation.

    " + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatasetRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes an Amazon Forecast dataset that was created using the CreateDataset operation. You can only delete datasets that have a status of ACTIVE or CREATE_FAILED. To get the status use the DescribeDataset operation.

    Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the operation, omitting the deleted dataset's ARN.

    ", + "idempotent":true + }, + "DeleteDatasetGroup":{ + "name":"DeleteDatasetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatasetGroupRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes a dataset group created using the CreateDatasetGroup operation. You can only delete dataset groups that have a status of ACTIVE, CREATE_FAILED, or UPDATE_FAILED. To get the status, use the DescribeDatasetGroup operation.

    This operation deletes only the dataset group, not the datasets in the group.

    ", + "idempotent":true + }, + "DeleteDatasetImportJob":{ + "name":"DeleteDatasetImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatasetImportJobRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes a dataset import job created using the CreateDatasetImportJob operation. You can delete only dataset import jobs that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribeDatasetImportJob operation.

    ", + "idempotent":true + }, + "DeleteForecast":{ + "name":"DeleteForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteForecastRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes a forecast created using the CreateForecast operation. You can delete only forecasts that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribeForecast operation.

    You can't delete a forecast while it is being exported. After a forecast is deleted, you can no longer query the forecast.

    ", + "idempotent":true + }, + "DeleteForecastExportJob":{ + "name":"DeleteForecastExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteForecastExportJobRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes a forecast export job created using the CreateForecastExportJob operation. You can delete only export jobs that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribeForecastExportJob operation.

    ", + "idempotent":true + }, + "DeletePredictor":{ + "name":"DeletePredictor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePredictorRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes a predictor created using the CreatePredictor operation. You can delete only predictor that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribePredictor operation.

    ", + "idempotent":true + }, + "DeletePredictorBacktestExportJob":{ + "name":"DeletePredictorBacktestExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePredictorBacktestExportJobRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes a predictor backtest export job.

    ", + "idempotent":true + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Describes an Amazon Forecast dataset created using the CreateDataset operation.

    In addition to listing the parameters specified in the CreateDataset request, this operation includes the following dataset properties:

    • CreationTime

    • LastModificationTime

    • Status

    ", + "idempotent":true + }, + "DescribeDatasetGroup":{ + "name":"DescribeDatasetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatasetGroupRequest"}, + "output":{"shape":"DescribeDatasetGroupResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Describes a dataset group created using the CreateDatasetGroup operation.

    In addition to listing the parameters provided in the CreateDatasetGroup request, this operation includes the following properties:

    • DatasetArns - The datasets belonging to the group.

    • CreationTime

    • LastModificationTime

    • Status

    ", + "idempotent":true + }, + "DescribeDatasetImportJob":{ + "name":"DescribeDatasetImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatasetImportJobRequest"}, + "output":{"shape":"DescribeDatasetImportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Describes a dataset import job created using the CreateDatasetImportJob operation.

    In addition to listing the parameters provided in the CreateDatasetImportJob request, this operation includes the following properties:

    • CreationTime

    • LastModificationTime

    • DataSize

    • FieldStatistics

    • Status

    • Message - If an error occurred, information about the error.

    ", + "idempotent":true + }, + "DescribeForecast":{ + "name":"DescribeForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeForecastRequest"}, + "output":{"shape":"DescribeForecastResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Describes a forecast created using the CreateForecast operation.

    In addition to listing the properties provided in the CreateForecast request, this operation lists the following properties:

    • DatasetGroupArn - The dataset group that provided the training data.

    • CreationTime

    • LastModificationTime

    • Status

    • Message - If an error occurred, information about the error.

    ", + "idempotent":true + }, + "DescribeForecastExportJob":{ + "name":"DescribeForecastExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeForecastExportJobRequest"}, + "output":{"shape":"DescribeForecastExportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Describes a forecast export job created using the CreateForecastExportJob operation.

    In addition to listing the properties provided by the user in the CreateForecastExportJob request, this operation lists the following properties:

    • CreationTime

    • LastModificationTime

    • Status

    • Message - If an error occurred, information about the error.

    ", + "idempotent":true + }, + "DescribePredictor":{ + "name":"DescribePredictor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePredictorRequest"}, + "output":{"shape":"DescribePredictorResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Describes a predictor created using the CreatePredictor operation.

    In addition to listing the properties provided in the CreatePredictor request, this operation lists the following properties:

    • DatasetImportJobArns - The dataset import jobs used to import training data.

    • AutoMLAlgorithmArns - If AutoML is performed, the algorithms that were evaluated.

    • CreationTime

    • LastModificationTime

    • Status

    • Message - If an error occurred, information about the error.

    ", + "idempotent":true + }, + "DescribePredictorBacktestExportJob":{ + "name":"DescribePredictorBacktestExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePredictorBacktestExportJobRequest"}, + "output":{"shape":"DescribePredictorBacktestExportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Describes a predictor backtest export job created using the CreatePredictorBacktestExportJob operation.

    In addition to listing the properties provided by the user in the CreatePredictorBacktestExportJob request, this operation lists the following properties:

    • CreationTime

    • LastModificationTime

    • Status

    • Message (if an error occurred)

    ", + "idempotent":true + }, + "GetAccuracyMetrics":{ + "name":"GetAccuracyMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccuracyMetricsRequest"}, + "output":{"shape":"GetAccuracyMetricsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Provides metrics on the accuracy of the models that were trained by the CreatePredictor operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast. For more information, see Predictor Metrics.

    This operation generates metrics for each backtest window that was evaluated. The number of backtest windows (NumberOfBacktestWindows) is specified using the EvaluationParameters object, which is optionally included in the CreatePredictor request. If NumberOfBacktestWindows isn't specified, the number defaults to one.

    The parameters of the filling method determine which items contribute to the metrics. If you want all items to contribute, specify zero. If you want only those items that have complete data in the range being evaluated to contribute, specify nan. For more information, see FeaturizationMethod.

    Before you can get accuracy metrics, the Status of the predictor must be ACTIVE, signifying that training has completed. To get the status, use the DescribePredictor operation.

    ", + "idempotent":true + }, + "ListDatasetGroups":{ + "name":"ListDatasetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatasetGroupsRequest"}, + "output":{"shape":"ListDatasetGroupsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

    Returns a list of dataset groups created using the CreateDatasetGroup operation. For each dataset group, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the dataset group ARN with the DescribeDatasetGroup operation.

    ", + "idempotent":true + }, + "ListDatasetImportJobs":{ + "name":"ListDatasetImportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatasetImportJobsRequest"}, + "output":{"shape":"ListDatasetImportJobsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Returns a list of dataset import jobs created using the CreateDatasetImportJob operation. For each import job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the DescribeDatasetImportJob operation. You can filter the list by providing an array of Filter objects.

    ", + "idempotent":true + }, + "ListDatasets":{ + "name":"ListDatasets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatasetsRequest"}, + "output":{"shape":"ListDatasetsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

    Returns a list of datasets created using the CreateDataset operation. For each dataset, a summary of its properties, including its Amazon Resource Name (ARN), is returned. To retrieve the complete set of properties, use the ARN with the DescribeDataset operation.

    ", + "idempotent":true + }, + "ListForecastExportJobs":{ + "name":"ListForecastExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListForecastExportJobsRequest"}, + "output":{"shape":"ListForecastExportJobsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Returns a list of forecast export jobs created using the CreateForecastExportJob operation. For each forecast export job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, use the ARN with the DescribeForecastExportJob operation. You can filter the list using an array of Filter objects.

    ", + "idempotent":true + }, + "ListForecasts":{ + "name":"ListForecasts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListForecastsRequest"}, + "output":{"shape":"ListForecastsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Returns a list of forecasts created using the CreateForecast operation. For each forecast, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, specify the ARN with the DescribeForecast operation. You can filter the list using an array of Filter objects.

    ", + "idempotent":true + }, + "ListPredictorBacktestExportJobs":{ + "name":"ListPredictorBacktestExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPredictorBacktestExportJobsRequest"}, + "output":{"shape":"ListPredictorBacktestExportJobsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Returns a list of predictor backtest export jobs created using the CreatePredictorBacktestExportJob operation. This operation returns a summary for each backtest export job. You can filter the list using an array of Filter objects.

    To retrieve the complete set of properties for a particular backtest export job, use the ARN with the DescribePredictorBacktestExportJob operation.

    ", + "idempotent":true + }, + "ListPredictors":{ + "name":"ListPredictors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPredictorsRequest"}, + "output":{"shape":"ListPredictorsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Returns a list of predictors created using the CreatePredictor operation. For each predictor, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the DescribePredictor operation. You can filter the list using an array of Filter objects.

    ", + "idempotent":true + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Lists the tags for an Amazon Forecast resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are also deleted.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Deletes the specified tags from a resource.

    " + }, + "UpdateDatasetGroup":{ + "name":"UpdateDatasetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDatasetGroupRequest"}, + "output":{"shape":"UpdateDatasetGroupResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Replaces the datasets in a dataset group with the specified datasets.

    The Status of the dataset group must be ACTIVE before you can use the dataset group to create a predictor. Use the DescribeDatasetGroup operation to get the status.

    ", + "idempotent":true + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\-\\_\\.\\/\\:]+$" + }, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "AttributeType":{ + "type":"string", + "enum":[ + "string", + "integer", + "float", + "timestamp", + "geolocation" + ] + }, + "Boolean":{"type":"boolean"}, + "CategoricalParameterRange":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the categorical hyperparameter to tune.

    " + }, + "Values":{ + "shape":"Values", + "documentation":"

    A list of the tunable categories for the hyperparameter.

    " + } + }, + "documentation":"

    Specifies a categorical hyperparameter and it's range of tunable values. This object is part of the ParameterRanges object.

    " + }, + "CategoricalParameterRanges":{ + "type":"list", + "member":{"shape":"CategoricalParameterRange"}, + "max":20, + "min":1 + }, + "ContinuousParameterRange":{ + "type":"structure", + "required":[ + "Name", + "MaxValue", + "MinValue" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the hyperparameter to tune.

    " + }, + "MaxValue":{ + "shape":"Double", + "documentation":"

    The maximum tunable value of the hyperparameter.

    " + }, + "MinValue":{ + "shape":"Double", + "documentation":"

    The minimum tunable value of the hyperparameter.

    " + }, + "ScalingType":{ + "shape":"ScalingType", + "documentation":"

    The scale that hyperparameter tuning uses to search the hyperparameter range. Valid values:

    Auto

    Amazon Forecast hyperparameter tuning chooses the best scale for the hyperparameter.

    Linear

    Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

    Logarithmic

    Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

    Logarithmic scaling works only for ranges that have values greater than 0.

    ReverseLogarithmic

    hyperparameter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.

    Reverse logarithmic scaling works only for ranges that are entirely within the range 0 <= x < 1.0.

    For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

    " + } + }, + "documentation":"

    Specifies a continuous hyperparameter and it's range of tunable values. This object is part of the ParameterRanges object.

    " + }, + "ContinuousParameterRanges":{ + "type":"list", + "member":{"shape":"ContinuousParameterRange"}, + "max":20, + "min":1 + }, + "CreateDatasetGroupRequest":{ + "type":"structure", + "required":[ + "DatasetGroupName", + "Domain" + ], + "members":{ + "DatasetGroupName":{ + "shape":"Name", + "documentation":"

    A name for the dataset group.

    " + }, + "Domain":{ + "shape":"Domain", + "documentation":"

    The domain associated with the dataset group. When you add a dataset to a dataset group, this value and the value specified for the Domain parameter of the CreateDataset operation must match.

    The Domain and DatasetType that you choose determine the fields that must be present in training data that you import to a dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires that item_id, timestamp, and demand fields are present in your data. For more information, see howitworks-datasets-groups.

    " + }, + "DatasetArns":{ + "shape":"ArnList", + "documentation":"

    An array of Amazon Resource Names (ARNs) of the datasets that you want to include in the dataset group.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The optional metadata that you apply to the dataset group to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource - 50.

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length - 128 Unicode characters in UTF-8.

    • Maximum value length - 256 Unicode characters in UTF-8.

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

    " + } + } + }, + "CreateDatasetGroupResponse":{ + "type":"structure", + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset group.

    " + } + } + }, + "CreateDatasetImportJobRequest":{ + "type":"structure", + "required":[ + "DatasetImportJobName", + "DatasetArn", + "DataSource" + ], + "members":{ + "DatasetImportJobName":{ + "shape":"Name", + "documentation":"

    The name for the dataset import job. We recommend including the current timestamp in the name, for example, 20190721DatasetImport. This can help you avoid getting a ResourceAlreadyExistsException exception.

    " + }, + "DatasetArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.

    " + }, + "DataSource":{ + "shape":"DataSource", + "documentation":"

    The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.

    If encryption is used, DataSource must include an AWS Key Management Service (KMS) key and the IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in the EncryptionConfig parameter of the CreateDataset operation.

    " + }, + "TimestampFormat":{ + "shape":"TimestampFormat", + "documentation":"

    The format of timestamps in the dataset. The format that you specify depends on the DataFrequency specified when the dataset was created. The following formats are supported

    • \"yyyy-MM-dd\"

      For the following data frequencies: Y, M, W, and D

    • \"yyyy-MM-dd HH:mm:ss\"

      For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D

    If the format isn't specified, Amazon Forecast expects the format to be \"yyyy-MM-dd HH:mm:ss\".

    " + }, + "TimeZone":{ + "shape":"TimeZone", + "documentation":"

    A single time zone for every item in your dataset. This option is ideal for datasets with all timestamps within a single time zone, or if all timestamps are normalized to a single time zone.

    Refer to the Joda-Time API for a complete list of valid time zone names.

    " + }, + "UseGeolocationForTimeZone":{ + "shape":"UseGeolocationForTimeZone", + "documentation":"

    Automatically derive time zone information from the geolocation attribute. This option is ideal for datasets that contain timestamps in multiple time zones and those timestamps are expressed in local time.

    " + }, + "GeolocationFormat":{ + "shape":"GeolocationFormat", + "documentation":"

    The format of the geolocation attribute. The geolocation attribute can be formatted in one of two ways:

    • LAT_LONG - the latitude and longitude in decimal format (Example: 47.61_-122.33).

    • CC_POSTALCODE (US Only) - the country code (US), followed by the 5-digit ZIP code (Example: US_98121).

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The optional metadata that you apply to the dataset import job to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource - 50.

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length - 128 Unicode characters in UTF-8.

    • Maximum value length - 256 Unicode characters in UTF-8.

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

    " + } + } + }, + "CreateDatasetImportJobResponse":{ + "type":"structure", + "members":{ + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset import job.

    " + } + } + }, + "CreateDatasetRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "Domain", + "DatasetType", + "Schema" + ], + "members":{ + "DatasetName":{ + "shape":"Name", + "documentation":"

    A name for the dataset.

    " + }, + "Domain":{ + "shape":"Domain", + "documentation":"

    The domain associated with the dataset. When you add a dataset to a dataset group, this value and the value specified for the Domain parameter of the CreateDatasetGroup operation must match.

    The Domain and DatasetType that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the RETAIL domain and TARGET_TIME_SERIES as the DatasetType, Amazon Forecast requires item_id, timestamp, and demand fields to be present in your data. For more information, see howitworks-datasets-groups.

    " + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The dataset type. Valid values depend on the chosen Domain.

    " + }, + "DataFrequency":{ + "shape":"Frequency", + "documentation":"

    The frequency of data collection. This parameter is required for RELATED_TIME_SERIES datasets.

    Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, \"D\" indicates every day and \"15min\" indicates every 15 minutes.

    " + }, + "Schema":{ + "shape":"Schema", + "documentation":"

    The schema for the dataset. The schema attributes and their order must match the fields in your data. The dataset Domain and DatasetType that you choose determine the minimum required fields in your training data. For information about the required fields for a specific dataset domain and type, see howitworks-domains-ds-types.

    " + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

    An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The optional metadata that you apply to the dataset to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource - 50.

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length - 128 Unicode characters in UTF-8.

    • Maximum value length - 256 Unicode characters in UTF-8.

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

    " + } + } + }, + "CreateDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset.

    " + } + } + }, + "CreateForecastExportJobRequest":{ + "type":"structure", + "required":[ + "ForecastExportJobName", + "ForecastArn", + "Destination" + ], + "members":{ + "ForecastExportJobName":{ + "shape":"Name", + "documentation":"

    The name for the forecast export job.

    " + }, + "ForecastArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the forecast that you want to export.

    " + }, + "Destination":{ + "shape":"DataDestination", + "documentation":"

    The location where you want to save the forecast and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the location. The forecast must be exported to an Amazon S3 bucket.

    If encryption is used, Destination must include an AWS Key Management Service (KMS) key. The IAM role must allow Amazon Forecast permission to access the key.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The optional metadata that you apply to the forecast export job to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource - 50.

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length - 128 Unicode characters in UTF-8.

    • Maximum value length - 256 Unicode characters in UTF-8.

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

    " + } + } + }, + "CreateForecastExportJobResponse":{ + "type":"structure", + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the export job.

    " + } + } + }, + "CreateForecastRequest":{ + "type":"structure", + "required":[ + "ForecastName", + "PredictorArn" + ], + "members":{ + "ForecastName":{ + "shape":"Name", + "documentation":"

    A name for the forecast.

    " + }, + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor to use to generate the forecast.

    " + }, + "ForecastTypes":{ + "shape":"ForecastTypes", + "documentation":"

    The quantiles at which probabilistic forecasts are generated. You can currently specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99 (increments of .01 only) and mean. The mean forecast is different from the median (0.50) when the distribution is not symmetric (for example, Beta and Negative Binomial). The default value is [\"0.1\", \"0.5\", \"0.9\"].

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The optional metadata that you apply to the forecast to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource - 50.

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length - 128 Unicode characters in UTF-8.

    • Maximum value length - 256 Unicode characters in UTF-8.

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

    " + } + } + }, + "CreateForecastResponse":{ + "type":"structure", + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the forecast.

    " + } + } + }, + "CreatePredictorBacktestExportJobRequest":{ + "type":"structure", + "required":[ + "PredictorBacktestExportJobName", + "PredictorArn", + "Destination" + ], + "members":{ + "PredictorBacktestExportJobName":{ + "shape":"Name", + "documentation":"

    The name for the backtest export job.

    " + }, + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor that you want to export.

    " + }, + "Destination":{"shape":"DataDestination"}, + "Tags":{ + "shape":"Tags", + "documentation":"

    Optional metadata to help you categorize and organize your backtests. Each tag consists of a key and an optional value, both of which you define. Tag keys and values are case sensitive.

    The following restrictions apply to tags:

    • For each resource, each tag key must be unique and each tag key must have one value.

    • Maximum number of tags per resource: 50.

    • Maximum key length: 128 Unicode characters in UTF-8.

    • Maximum value length: 256 Unicode characters in UTF-8.

    • Accepted characters: all letters and numbers, spaces representable in UTF-8, and + - = . _ : / @. If your tagging schema is used across other services and resources, the character restrictions of those services also apply.

    • Key prefixes cannot include any upper or lowercase combination of aws: or AWS:. Values can have this prefix. If a tag value has aws as its prefix but the key does not, Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit. You cannot edit or delete tag keys with this prefix.

    " + } + } + }, + "CreatePredictorBacktestExportJobResponse":{ + "type":"structure", + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor backtest export job that you want to export.

    " + } + } + }, + "CreatePredictorRequest":{ + "type":"structure", + "required":[ + "PredictorName", + "ForecastHorizon", + "InputDataConfig", + "FeaturizationConfig" + ], + "members":{ + "PredictorName":{ + "shape":"Name", + "documentation":"

    A name for the predictor.

    " + }, + "AlgorithmArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the algorithm to use for model training. Required if PerformAutoML is not set to true.

    Supported algorithms:

    • arn:aws:forecast:::algorithm/ARIMA

    • arn:aws:forecast:::algorithm/CNN-QR

    • arn:aws:forecast:::algorithm/Deep_AR_Plus

    • arn:aws:forecast:::algorithm/ETS

    • arn:aws:forecast:::algorithm/NPTS

    • arn:aws:forecast:::algorithm/Prophet

    " + }, + "ForecastHorizon":{ + "shape":"Integer", + "documentation":"

    Specifies the number of time-steps that the model is trained to predict. The forecast horizon is also called the prediction length.

    For example, if you configure a dataset for daily data collection (using the DataFrequency parameter of the CreateDataset operation) and set the forecast horizon to 10, the model returns predictions for 10 days.

    The maximum forecast horizon is the lesser of 500 time-steps or 1/3 of the TARGET_TIME_SERIES dataset length.

    " + }, + "ForecastTypes":{ + "shape":"ForecastTypes", + "documentation":"

    Specifies the forecast types used to train a predictor. You can specify up to five forecast types. Forecast types can be quantiles from 0.01 to 0.99, by increments of 0.01 or higher. You can also specify the mean forecast with mean.

    The default value is [\"0.10\", \"0.50\", \"0.9\"].

    " + }, + "PerformAutoML":{ + "shape":"Boolean", + "documentation":"

    Whether to perform AutoML. When Amazon Forecast performs AutoML, it evaluates the algorithms it provides and chooses the best algorithm and configuration for your training dataset.

    The default value is false. In this case, you are required to specify an algorithm.

    Set PerformAutoML to true to have Amazon Forecast perform AutoML. This is a good option if you aren't sure which algorithm is suitable for your training data. In this case, PerformHPO must be false.

    " + }, + "PerformHPO":{ + "shape":"Boolean", + "documentation":"

    Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter values for your training data. The process of performing HPO is known as running a hyperparameter tuning job.

    The default value is false. In this case, Amazon Forecast uses default hyperparameter values from the chosen algorithm.

    To override the default values, set PerformHPO to true and, optionally, supply the HyperParameterTuningJobConfig object. The tuning job specifies a metric to optimize, which hyperparameters participate in tuning, and the valid range for each tunable hyperparameter. In this case, you are required to specify an algorithm and PerformAutoML must be false.

    The following algorithms support HPO:

    • DeepAR+

    • CNN-QR

    " + }, + "TrainingParameters":{ + "shape":"TrainingParameters", + "documentation":"

    The hyperparameters to override for model training. The hyperparameters that you can override are listed in the individual algorithms. For the list of supported algorithms, see aws-forecast-choosing-recipes.

    " + }, + "EvaluationParameters":{ + "shape":"EvaluationParameters", + "documentation":"

    Used to override the default evaluation parameters of the specified algorithm. Amazon Forecast evaluates a predictor by splitting a dataset into training data and testing data. The evaluation parameters define how to perform the split and the number of iterations.

    " + }, + "HPOConfig":{ + "shape":"HyperParameterTuningJobConfig", + "documentation":"

    Provides hyperparameter override values for the algorithm. If you don't provide this parameter, Amazon Forecast uses default values. The individual algorithms specify which hyperparameters support hyperparameter optimization (HPO). For more information, see aws-forecast-choosing-recipes.

    If you included the HPOConfig object, you must set PerformHPO to true.

    " + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

    Describes the dataset group that contains the data to use to train the predictor.

    " + }, + "FeaturizationConfig":{ + "shape":"FeaturizationConfig", + "documentation":"

    The featurization configuration.

    " + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

    An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The optional metadata that you apply to the predictor to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource - 50.

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length - 128 Unicode characters in UTF-8.

    • Maximum value length - 256 Unicode characters in UTF-8.

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

    " + } + } + }, + "CreatePredictorResponse":{ + "type":"structure", + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor.

    " + } + } + }, + "DataDestination":{ + "type":"structure", + "required":["S3Config"], + "members":{ + "S3Config":{ + "shape":"S3Config", + "documentation":"

    The path to an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the bucket.

    " + } + }, + "documentation":"

    The destination for an export job. Provide an S3 path, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the location, and an AWS Key Management Service (KMS) key (optional).

    " + }, + "DataSource":{ + "type":"structure", + "required":["S3Config"], + "members":{ + "S3Config":{ + "shape":"S3Config", + "documentation":"

    The path to the training data stored in an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the data.

    " + } + }, + "documentation":"

    The source of your training data, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the data and, optionally, an AWS Key Management Service (KMS) key. This object is submitted in the CreateDatasetImportJob request.

    " + }, + "DatasetGroupSummary":{ + "type":"structure", + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset group.

    " + }, + "DatasetGroupName":{ + "shape":"Name", + "documentation":"

    The name of the dataset group.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the dataset group was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime is the current time of the ListDatasetGroups call.

    " + } + }, + "documentation":"

    Provides a summary of the dataset group properties used in the ListDatasetGroups operation. To get the complete set of properties, call the DescribeDatasetGroup operation, and provide the DatasetGroupArn.

    " + }, + "DatasetGroups":{ + "type":"list", + "member":{"shape":"DatasetGroupSummary"} + }, + "DatasetImportJobSummary":{ + "type":"structure", + "members":{ + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset import job.

    " + }, + "DatasetImportJobName":{ + "shape":"Name", + "documentation":"

    The name of the dataset import job.

    " + }, + "DataSource":{ + "shape":"DataSource", + "documentation":"

    The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.

    If encryption is used, DataSource includes an AWS Key Management Service (KMS) key.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the dataset import job. The status is reflected in the status of the dataset. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    " + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

    If an error occurred, an informational message about the error.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the dataset import job was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    The last time that the dataset was modified. The time depends on the status of the job, as follows:

    • CREATE_PENDING - The same time as CreationTime.

    • CREATE_IN_PROGRESS - The current timestamp.

    • ACTIVE or CREATE_FAILED - When the job finished or failed.

    " + } + }, + "documentation":"

    Provides a summary of the dataset import job properties used in the ListDatasetImportJobs operation. To get the complete set of properties, call the DescribeDatasetImportJob operation, and provide the DatasetImportJobArn.

    " + }, + "DatasetImportJobs":{ + "type":"list", + "member":{"shape":"DatasetImportJobSummary"} + }, + "DatasetSummary":{ + "type":"structure", + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset.

    " + }, + "DatasetName":{ + "shape":"Name", + "documentation":"

    The name of the dataset.

    " + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The dataset type.

    " + }, + "Domain":{ + "shape":"Domain", + "documentation":"

    The domain associated with the dataset.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the dataset was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    When you create a dataset, LastModificationTime is the same as CreationTime. While data is being imported to the dataset, LastModificationTime is the current time of the ListDatasets call. After a CreateDatasetImportJob operation has finished, LastModificationTime is when the import job completed or failed.

    " + } + }, + "documentation":"

    Provides a summary of the dataset properties used in the ListDatasets operation. To get the complete set of properties, call the DescribeDataset operation, and provide the DatasetArn.

    " + }, + "DatasetType":{ + "type":"string", + "enum":[ + "TARGET_TIME_SERIES", + "RELATED_TIME_SERIES", + "ITEM_METADATA" + ] + }, + "Datasets":{ + "type":"list", + "member":{"shape":"DatasetSummary"} + }, + "DeleteDatasetGroupRequest":{ + "type":"structure", + "required":["DatasetGroupArn"], + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset group to delete.

    " + } + } + }, + "DeleteDatasetImportJobRequest":{ + "type":"structure", + "required":["DatasetImportJobArn"], + "members":{ + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset import job to delete.

    " + } + } + }, + "DeleteDatasetRequest":{ + "type":"structure", + "required":["DatasetArn"], + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset to delete.

    " + } + } + }, + "DeleteForecastExportJobRequest":{ + "type":"structure", + "required":["ForecastExportJobArn"], + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the forecast export job to delete.

    " + } + } + }, + "DeleteForecastRequest":{ + "type":"structure", + "required":["ForecastArn"], + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the forecast to delete.

    " + } + } + }, + "DeletePredictorBacktestExportJobRequest":{ + "type":"structure", + "required":["PredictorBacktestExportJobArn"], + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor backtest export job to delete.

    " + } + } + }, + "DeletePredictorRequest":{ + "type":"structure", + "required":["PredictorArn"], + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor to delete.

    " + } + } + }, + "DescribeDatasetGroupRequest":{ + "type":"structure", + "required":["DatasetGroupArn"], + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset group.

    " + } + } + }, + "DescribeDatasetGroupResponse":{ + "type":"structure", + "members":{ + "DatasetGroupName":{ + "shape":"Name", + "documentation":"

    The name of the dataset group.

    " + }, + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the dataset group.

    " + }, + "DatasetArns":{ + "shape":"ArnList", + "documentation":"

    An array of Amazon Resource Names (ARNs) of the datasets contained in the dataset group.

    " + }, + "Domain":{ + "shape":"Domain", + "documentation":"

    The domain associated with the dataset group.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the dataset group. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    • UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED

    The UPDATE states apply when you call the UpdateDatasetGroup operation.

    The Status of the dataset group must be ACTIVE before you can use the dataset group to create a predictor.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the dataset group was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    When the dataset group was created or last updated from a call to the UpdateDatasetGroup operation. While the dataset group is being updated, LastModificationTime is the current time of the DescribeDatasetGroup call.

    " + } + } + }, + "DescribeDatasetImportJobRequest":{ + "type":"structure", + "required":["DatasetImportJobArn"], + "members":{ + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset import job.

    " + } + } + }, + "DescribeDatasetImportJobResponse":{ + "type":"structure", + "members":{ + "DatasetImportJobName":{ + "shape":"Name", + "documentation":"

    The name of the dataset import job.

    " + }, + "DatasetImportJobArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the dataset import job.

    " + }, + "DatasetArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset that the training data was imported to.

    " + }, + "TimestampFormat":{ + "shape":"TimestampFormat", + "documentation":"

    The format of timestamps in the dataset. The format that you specify depends on the DataFrequency specified when the dataset was created. The following formats are supported

    • \"yyyy-MM-dd\"

      For the following data frequencies: Y, M, W, and D

    • \"yyyy-MM-dd HH:mm:ss\"

      For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D

    " + }, + "TimeZone":{ + "shape":"TimeZone", + "documentation":"

    The single time zone applied to every item in the dataset

    " + }, + "UseGeolocationForTimeZone":{ + "shape":"UseGeolocationForTimeZone", + "documentation":"

    Whether TimeZone is automatically derived from the geolocation attribute.

    " + }, + "GeolocationFormat":{ + "shape":"GeolocationFormat", + "documentation":"

    The format of the geolocation attribute. Valid Values:\"LAT_LONG\" and \"CC_POSTALCODE\".

    " + }, + "DataSource":{ + "shape":"DataSource", + "documentation":"

    The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data.

    If encryption is used, DataSource includes an AWS Key Management Service (KMS) key.

    " + }, + "FieldStatistics":{ + "shape":"FieldStatistics", + "documentation":"

    Statistical information about each field in the input data.

    " + }, + "DataSize":{ + "shape":"Double", + "documentation":"

    The size of the dataset in gigabytes (GB) after the import job has finished.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the dataset import job. The status is reflected in the status of the dataset. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    " + }, + "Message":{ + "shape":"Message", + "documentation":"

    If an error occurred, an informational message about the error.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the dataset import job was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    The last time that the dataset was modified. The time depends on the status of the job, as follows:

    • CREATE_PENDING - The same time as CreationTime.

    • CREATE_IN_PROGRESS - The current timestamp.

    • ACTIVE or CREATE_FAILED - When the job finished or failed.

    " + } + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":["DatasetArn"], + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset.

    " + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset.

    " + }, + "DatasetName":{ + "shape":"Name", + "documentation":"

    The name of the dataset.

    " + }, + "Domain":{ + "shape":"Domain", + "documentation":"

    The domain associated with the dataset.

    " + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The dataset type.

    " + }, + "DataFrequency":{ + "shape":"Frequency", + "documentation":"

    The frequency of data collection.

    Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, \"M\" indicates every month and \"30min\" indicates every 30 minutes.

    " + }, + "Schema":{ + "shape":"Schema", + "documentation":"

    An array of SchemaAttribute objects that specify the dataset fields. Each SchemaAttribute specifies the name and data type of a field.

    " + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

    The AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the dataset. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    • UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED

    The UPDATE states apply while data is imported to the dataset from a call to the CreateDatasetImportJob operation and reflect the status of the dataset import job. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS.

    The Status of the dataset must be ACTIVE before you can import training data.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the dataset was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    When you create a dataset, LastModificationTime is the same as CreationTime. While data is being imported to the dataset, LastModificationTime is the current time of the DescribeDataset call. After a CreateDatasetImportJob operation has finished, LastModificationTime is when the import job completed or failed.

    " + } + } + }, + "DescribeForecastExportJobRequest":{ + "type":"structure", + "required":["ForecastExportJobArn"], + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the forecast export job.

    " + } + } + }, + "DescribeForecastExportJobResponse":{ + "type":"structure", + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the forecast export job.

    " + }, + "ForecastExportJobName":{ + "shape":"Name", + "documentation":"

    The name of the forecast export job.

    " + }, + "ForecastArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the exported forecast.

    " + }, + "Destination":{ + "shape":"DataDestination", + "documentation":"

    The path to the Amazon Simple Storage Service (Amazon S3) bucket where the forecast is exported.

    " + }, + "Message":{ + "shape":"Message", + "documentation":"

    If an error occurred, an informational message about the error.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the forecast export job. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    The Status of the forecast export job must be ACTIVE before you can access the forecast in your S3 bucket.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the forecast export job was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    When the last successful export job finished.

    " + } + } + }, + "DescribeForecastRequest":{ + "type":"structure", + "required":["ForecastArn"], + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the forecast.

    " + } + } + }, + "DescribeForecastResponse":{ + "type":"structure", + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

    The forecast ARN as specified in the request.

    " + }, + "ForecastName":{ + "shape":"Name", + "documentation":"

    The name of the forecast.

    " + }, + "ForecastTypes":{ + "shape":"ForecastTypes", + "documentation":"

    The quantiles at which probabilistic forecasts were generated.

    " + }, + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the predictor used to generate the forecast.

    " + }, + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the dataset group that provided the data used to train the predictor.

    " + }, + "Status":{ + "shape":"String", + "documentation":"

    The status of the forecast. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    The Status of the forecast must be ACTIVE before you can query or export the forecast.

    " + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

    If an error occurred, an informational message about the error.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the forecast creation task was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    Initially, the same as CreationTime (status is CREATE_PENDING). Updated when inference (creating the forecast) starts (status changed to CREATE_IN_PROGRESS), and when inference is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

    " + } + } + }, + "DescribePredictorBacktestExportJobRequest":{ + "type":"structure", + "required":["PredictorBacktestExportJobArn"], + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor backtest export job.

    " + } + } + }, + "DescribePredictorBacktestExportJobResponse":{ + "type":"structure", + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor backtest export job.

    " + }, + "PredictorBacktestExportJobName":{ + "shape":"Name", + "documentation":"

    The name of the predictor backtest export job.

    " + }, + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor.

    " + }, + "Destination":{"shape":"DataDestination"}, + "Message":{ + "shape":"Message", + "documentation":"

    Information about any errors that may have occurred during the backtest export.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the predictor backtest export job. States include:

    • ACTIVE

    • CREATE_PENDING

    • CREATE_IN_PROGRESS

    • CREATE_FAILED

    • DELETE_PENDING

    • DELETE_IN_PROGRESS

    • DELETE_FAILED

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the predictor backtest export job was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    When the last successful export job finished.

    " + } + } + }, + "DescribePredictorRequest":{ + "type":"structure", + "required":["PredictorArn"], + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor that you want information about.

    " + } + } + }, + "DescribePredictorResponse":{ + "type":"structure", + "members":{ + "PredictorArn":{ + "shape":"Name", + "documentation":"

    The ARN of the predictor.

    " + }, + "PredictorName":{ + "shape":"Name", + "documentation":"

    The name of the predictor.

    " + }, + "AlgorithmArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the algorithm used for model training.

    " + }, + "ForecastHorizon":{ + "shape":"Integer", + "documentation":"

    The number of time-steps of the forecast. The forecast horizon is also called the prediction length.

    " + }, + "ForecastTypes":{ + "shape":"ForecastTypes", + "documentation":"

    The forecast types used during predictor training. Default value is [\"0.1\",\"0.5\",\"0.9\"]

    " + }, + "PerformAutoML":{ + "shape":"Boolean", + "documentation":"

    Whether the predictor is set to perform AutoML.

    " + }, + "PerformHPO":{ + "shape":"Boolean", + "documentation":"

    Whether the predictor is set to perform hyperparameter optimization (HPO).

    " + }, + "TrainingParameters":{ + "shape":"TrainingParameters", + "documentation":"

    The default training parameters or overrides selected during model training. When running AutoML or choosing HPO with CNN-QR or DeepAR+, the optimized values for the chosen hyperparameters are returned. For more information, see aws-forecast-choosing-recipes.

    " + }, + "EvaluationParameters":{ + "shape":"EvaluationParameters", + "documentation":"

    Used to override the default evaluation parameters of the specified algorithm. Amazon Forecast evaluates a predictor by splitting a dataset into training data and testing data. The evaluation parameters define how to perform the split and the number of iterations.

    " + }, + "HPOConfig":{ + "shape":"HyperParameterTuningJobConfig", + "documentation":"

    The hyperparameter override values for the algorithm.

    " + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

    Describes the dataset group that contains the data to use to train the predictor.

    " + }, + "FeaturizationConfig":{ + "shape":"FeaturizationConfig", + "documentation":"

    The featurization configuration.

    " + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

    An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

    " + }, + "PredictorExecutionDetails":{ + "shape":"PredictorExecutionDetails", + "documentation":"

    Details on the the status and results of the backtests performed to evaluate the accuracy of the predictor. You specify the number of backtests to perform when you call the operation.

    " + }, + "DatasetImportJobArns":{ + "shape":"ArnList", + "documentation":"

    An array of the ARNs of the dataset import jobs used to import training data for the predictor.

    " + }, + "AutoMLAlgorithmArns":{ + "shape":"ArnList", + "documentation":"

    When PerformAutoML is specified, the ARN of the chosen algorithm.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the predictor. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    • UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED

    The Status of the predictor must be ACTIVE before you can use the predictor to create a forecast.

    " + }, + "Message":{ + "shape":"Message", + "documentation":"

    If an error occurred, an informational message about the error.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the model training task was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    Initially, the same as CreationTime (when the status is CREATE_PENDING). This value is updated when training starts (when the status changes to CREATE_IN_PROGRESS), and when training has completed (when the status changes to ACTIVE) or fails (when the status changes to CREATE_FAILED).

    " + } + } + }, + "Domain":{ + "type":"string", + "enum":[ + "RETAIL", + "CUSTOM", + "INVENTORY_PLANNING", + "EC2_CAPACITY", + "WORK_FORCE", + "WEB_TRAFFIC", + "METRICS" + ] + }, + "Double":{"type":"double"}, + "EncryptionConfig":{ + "type":"structure", + "required":[ + "RoleArn", + "KMSKeyArn" + ], + "members":{ + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the IAM role that Amazon Forecast can assume to access the AWS KMS key.

    Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an InvalidInputException error.

    " + }, + "KMSKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of the KMS key.

    " + } + }, + "documentation":"

    An AWS Key Management Service (KMS) key and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key. You can specify this optional object in the CreateDataset and CreatePredictor requests.

    " + }, + "ErrorMessage":{"type":"string"}, + "ErrorMetric":{ + "type":"structure", + "members":{ + "ForecastType":{ + "shape":"ForecastType", + "documentation":"

    The Forecast type used to compute WAPE and RMSE.

    " + }, + "WAPE":{ + "shape":"Double", + "documentation":"

    The weighted absolute percentage error (WAPE).

    " + }, + "RMSE":{ + "shape":"Double", + "documentation":"

    The root-mean-square error (RMSE).

    " + } + }, + "documentation":"

    Provides detailed error metrics to evaluate the performance of a predictor. This object is part of the Metrics object.

    " + }, + "ErrorMetrics":{ + "type":"list", + "member":{"shape":"ErrorMetric"} + }, + "EvaluationParameters":{ + "type":"structure", + "members":{ + "NumberOfBacktestWindows":{ + "shape":"Integer", + "documentation":"

    The number of times to split the input data. The default is 1. Valid values are 1 through 5.

    " + }, + "BackTestWindowOffset":{ + "shape":"Integer", + "documentation":"

    The point from the end of the dataset where you want to split the data for model training and testing (evaluation). Specify the value as the number of data points. The default is the value of the forecast horizon. BackTestWindowOffset can be used to mimic a past virtual forecast start date. This value must be greater than or equal to the forecast horizon and less than half of the TARGET_TIME_SERIES dataset length.

    ForecastHorizon <= BackTestWindowOffset < 1/2 * TARGET_TIME_SERIES dataset length

    " + } + }, + "documentation":"

    Parameters that define how to split a dataset into training data and testing data, and the number of iterations to perform. These parameters are specified in the predefined algorithms but you can override them in the CreatePredictor request.

    " + }, + "EvaluationResult":{ + "type":"structure", + "members":{ + "AlgorithmArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the algorithm that was evaluated.

    " + }, + "TestWindows":{ + "shape":"TestWindows", + "documentation":"

    The array of test windows used for evaluating the algorithm. The NumberOfBacktestWindows from the EvaluationParameters object determines the number of windows in the array.

    " + } + }, + "documentation":"

    The results of evaluating an algorithm. Returned as part of the GetAccuracyMetrics response.

    " + }, + "EvaluationType":{ + "type":"string", + "enum":[ + "SUMMARY", + "COMPUTED" + ] + }, + "Featurization":{ + "type":"structure", + "required":["AttributeName"], + "members":{ + "AttributeName":{ + "shape":"Name", + "documentation":"

    The name of the schema attribute that specifies the data field to be featurized. Amazon Forecast supports the target field of the TARGET_TIME_SERIES and the RELATED_TIME_SERIES datasets. For example, for the RETAIL domain, the target is demand, and for the CUSTOM domain, the target is target_value. For more information, see howitworks-missing-values.

    " + }, + "FeaturizationPipeline":{ + "shape":"FeaturizationPipeline", + "documentation":"

    An array of one FeaturizationMethod object that specifies the feature transformation method.

    " + } + }, + "documentation":"

    Provides featurization (transformation) information for a dataset field. This object is part of the FeaturizationConfig object.

    For example:

    {

    \"AttributeName\": \"demand\",

    FeaturizationPipeline [ {

    \"FeaturizationMethodName\": \"filling\",

    \"FeaturizationMethodParameters\": {\"aggregation\": \"avg\", \"backfill\": \"nan\"}

    } ]

    }

    " + }, + "FeaturizationConfig":{ + "type":"structure", + "required":["ForecastFrequency"], + "members":{ + "ForecastFrequency":{ + "shape":"Frequency", + "documentation":"

    The frequency of predictions in a forecast.

    Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, \"Y\" indicates every year and \"5min\" indicates every five minutes.

    The frequency must be greater than or equal to the TARGET_TIME_SERIES dataset frequency.

    When a RELATED_TIME_SERIES dataset is provided, the frequency must be equal to the RELATED_TIME_SERIES dataset frequency.

    " + }, + "ForecastDimensions":{ + "shape":"ForecastDimensions", + "documentation":"

    An array of dimension (field) names that specify how to group the generated forecast.

    For example, suppose that you are generating a forecast for item sales across all of your stores, and your dataset contains a store_id field. If you want the sales forecast for each item by store, you would specify store_id as the dimension.

    All forecast dimensions specified in the TARGET_TIME_SERIES dataset don't need to be specified in the CreatePredictor request. All forecast dimensions specified in the RELATED_TIME_SERIES dataset must be specified in the CreatePredictor request.

    " + }, + "Featurizations":{ + "shape":"Featurizations", + "documentation":"

    An array of featurization (transformation) information for the fields of a dataset.

    " + } + }, + "documentation":"

    In a CreatePredictor operation, the specified algorithm trains a model using the specified dataset group. You can optionally tell the operation to modify data fields prior to training a model. These modifications are referred to as featurization.

    You define featurization using the FeaturizationConfig object. You specify an array of transformations, one for each field that you want to featurize. You then include the FeaturizationConfig object in your CreatePredictor request. Amazon Forecast applies the featurization to the TARGET_TIME_SERIES and RELATED_TIME_SERIES datasets before model training.

    You can create multiple featurization configurations. For example, you might call the CreatePredictor operation twice by specifying different featurization configurations.

    " + }, + "FeaturizationMethod":{ + "type":"structure", + "required":["FeaturizationMethodName"], + "members":{ + "FeaturizationMethodName":{ + "shape":"FeaturizationMethodName", + "documentation":"

    The name of the method. The \"filling\" method is the only supported method.

    " + }, + "FeaturizationMethodParameters":{ + "shape":"FeaturizationMethodParameters", + "documentation":"

    The method parameters (key-value pairs), which are a map of override parameters. Specify these parameters to override the default values. Related Time Series attributes do not accept aggregation parameters.

    The following list shows the parameters and their valid values for the \"filling\" featurization method for a Target Time Series dataset. Bold signifies the default value.

    • aggregation: sum, avg, first, min, max

    • frontfill: none

    • middlefill: zero, nan (not a number), value, median, mean, min, max

    • backfill: zero, nan, value, median, mean, min, max

    The following list shows the parameters and their valid values for a Related Time Series featurization method (there are no defaults):

    • middlefill: zero, value, median, mean, min, max

    • backfill: zero, value, median, mean, min, max

    • futurefill: zero, value, median, mean, min, max

    To set a filling method to a specific value, set the fill parameter to value and define the value in a corresponding _value parameter. For example, to set backfilling to a value of 2, include the following: \"backfill\": \"value\" and \"backfill_value\":\"2\".

    " + } + }, + "documentation":"

    Provides information about the method that featurizes (transforms) a dataset field. The method is part of the FeaturizationPipeline of the Featurization object.

    The following is an example of how you specify a FeaturizationMethod object.

    {

    \"FeaturizationMethodName\": \"filling\",

    \"FeaturizationMethodParameters\": {\"aggregation\": \"sum\", \"middlefill\": \"zero\", \"backfill\": \"zero\"}

    }

    " + }, + "FeaturizationMethodName":{ + "type":"string", + "enum":["filling"] + }, + "FeaturizationMethodParameters":{ + "type":"map", + "key":{"shape":"ParameterKey"}, + "value":{"shape":"ParameterValue"}, + "max":20, + "min":1 + }, + "FeaturizationPipeline":{ + "type":"list", + "member":{"shape":"FeaturizationMethod"}, + "max":1, + "min":1 + }, + "Featurizations":{ + "type":"list", + "member":{"shape":"Featurization"}, + "max":50, + "min":1 + }, + "FieldStatistics":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Statistics"} + }, + "Filter":{ + "type":"structure", + "required":[ + "Key", + "Value", + "Condition" + ], + "members":{ + "Key":{ + "shape":"String", + "documentation":"

    The name of the parameter to filter on.

    " + }, + "Value":{ + "shape":"Arn", + "documentation":"

    The value to match.

    " + }, + "Condition":{ + "shape":"FilterConditionString", + "documentation":"

    The condition to apply. To include the objects that match the statement, specify IS. To exclude matching objects, specify IS_NOT.

    " + } + }, + "documentation":"

    Describes a filter for choosing a subset of objects. Each filter consists of a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the objects that match the statement, respectively. The match statement consists of a key and a value.

    " + }, + "FilterConditionString":{ + "type":"string", + "enum":[ + "IS", + "IS_NOT" + ] + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "ForecastDimensions":{ + "type":"list", + "member":{"shape":"Name"}, + "max":5, + "min":1 + }, + "ForecastExportJobSummary":{ + "type":"structure", + "members":{ + "ForecastExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the forecast export job.

    " + }, + "ForecastExportJobName":{ + "shape":"Name", + "documentation":"

    The name of the forecast export job.

    " + }, + "Destination":{ + "shape":"DataDestination", + "documentation":"

    The path to the Amazon Simple Storage Service (Amazon S3) bucket where the forecast is exported.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the forecast export job. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    The Status of the forecast export job must be ACTIVE before you can access the forecast in your S3 bucket.

    " + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

    If an error occurred, an informational message about the error.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the forecast export job was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    When the last successful export job finished.

    " + } + }, + "documentation":"

    Provides a summary of the forecast export job properties used in the ListForecastExportJobs operation. To get the complete set of properties, call the DescribeForecastExportJob operation, and provide the listed ForecastExportJobArn.

    " + }, + "ForecastExportJobs":{ + "type":"list", + "member":{"shape":"ForecastExportJobSummary"} + }, + "ForecastSummary":{ + "type":"structure", + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the forecast.

    " + }, + "ForecastName":{ + "shape":"Name", + "documentation":"

    The name of the forecast.

    " + }, + "PredictorArn":{ + "shape":"String", + "documentation":"

    The ARN of the predictor used to generate the forecast.

    " + }, + "DatasetGroupArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset group that provided the data used to train the predictor.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the forecast. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    The Status of the forecast must be ACTIVE before you can query or export the forecast.

    " + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

    If an error occurred, an informational message about the error.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the forecast creation task was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    Initially, the same as CreationTime (status is CREATE_PENDING). Updated when inference (creating the forecast) starts (status changed to CREATE_IN_PROGRESS), and when inference is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

    " + } + }, + "documentation":"

    Provides a summary of the forecast properties used in the ListForecasts operation. To get the complete set of properties, call the DescribeForecast operation, and provide the ForecastArn that is listed in the summary.

    " + }, + "ForecastType":{ + "type":"string", + "pattern":"(^0?\\.\\d\\d?$|^mean$)" + }, + "ForecastTypes":{ + "type":"list", + "member":{"shape":"ForecastType"}, + "max":20, + "min":1 + }, + "Forecasts":{ + "type":"list", + "member":{"shape":"ForecastSummary"} + }, + "Frequency":{ + "type":"string", + "pattern":"^Y|M|W|D|H|30min|15min|10min|5min|1min$" + }, + "GeolocationFormat":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9_]+$" + }, + "GetAccuracyMetricsRequest":{ + "type":"structure", + "required":["PredictorArn"], + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor to get metrics for.

    " + } + } + }, + "GetAccuracyMetricsResponse":{ + "type":"structure", + "members":{ + "PredictorEvaluationResults":{ + "shape":"PredictorEvaluationResults", + "documentation":"

    An array of results from evaluating the predictor.

    " + } + } + }, + "HyperParameterTuningJobConfig":{ + "type":"structure", + "members":{ + "ParameterRanges":{ + "shape":"ParameterRanges", + "documentation":"

    Specifies the ranges of valid values for the hyperparameters.

    " + } + }, + "documentation":"

    Configuration information for a hyperparameter tuning job. You specify this object in the CreatePredictor request.

    A hyperparameter is a parameter that governs the model training process. You set hyperparameters before training starts, unlike model parameters, which are determined during training. The values of the hyperparameters effect which values are chosen for the model parameters.

    In a hyperparameter tuning job, Amazon Forecast chooses the set of hyperparameter values that optimize a specified metric. Forecast accomplishes this by running many training jobs over a range of hyperparameter values. The optimum set of values depends on the algorithm, the training data, and the specified metric objective.

    " + }, + "InputDataConfig":{ + "type":"structure", + "required":["DatasetGroupArn"], + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset group.

    " + }, + "SupplementaryFeatures":{ + "shape":"SupplementaryFeatures", + "documentation":"

    An array of supplementary features. The only supported feature is a holiday calendar.

    " + } + }, + "documentation":"

    The data used to train a predictor. The data includes a dataset group and any supplementary features. You specify this object in the CreatePredictor request.

    " + }, + "Integer":{"type":"integer"}, + "IntegerParameterRange":{ + "type":"structure", + "required":[ + "Name", + "MaxValue", + "MinValue" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the hyperparameter to tune.

    " + }, + "MaxValue":{ + "shape":"Integer", + "documentation":"

    The maximum tunable value of the hyperparameter.

    " + }, + "MinValue":{ + "shape":"Integer", + "documentation":"

    The minimum tunable value of the hyperparameter.

    " + }, + "ScalingType":{ + "shape":"ScalingType", + "documentation":"

    The scale that hyperparameter tuning uses to search the hyperparameter range. Valid values:

    Auto

    Amazon Forecast hyperparameter tuning chooses the best scale for the hyperparameter.

    Linear

    Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

    Logarithmic

    Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

    Logarithmic scaling works only for ranges that have values greater than 0.

    ReverseLogarithmic

    Not supported for IntegerParameterRange.

    Reverse logarithmic scaling works only for ranges that are entirely within the range 0 <= x < 1.0.

    For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

    " + } + }, + "documentation":"

    Specifies an integer hyperparameter and it's range of tunable values. This object is part of the ParameterRanges object.

    " + }, + "IntegerParameterRanges":{ + "type":"list", + "member":{"shape":"IntegerParameterRange"}, + "max":20, + "min":1 + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    We can't process the request because it includes an invalid value or a value that exceeds the valid range.

    ", + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The token is not valid. Tokens expire after 24 hours.

    ", + "exception":true + }, + "KMSKeyArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws:kms:.*:key/.*" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The limit on the number of resources per account has been exceeded.

    ", + "exception":true + }, + "ListDatasetGroupsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of items to return in the response.

    " + } + } + }, + "ListDatasetGroupsResponse":{ + "type":"structure", + "members":{ + "DatasetGroups":{ + "shape":"DatasetGroups", + "documentation":"

    An array of objects that summarize each dataset group's properties.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

    " + } + } + }, + "ListDatasetImportJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of items to return in the response.

    " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

    An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the datasets that match the statement from the list, respectively. The match statement consists of a key and a value.

    Filter properties

    • Condition - The condition to apply. Valid values are IS and IS_NOT. To include the datasets that match the statement, specify IS. To exclude matching datasets, specify IS_NOT.

    • Key - The name of the parameter to filter on. Valid values are DatasetArn and Status.

    • Value - The value to match.

    For example, to list all dataset import jobs whose status is ACTIVE, you specify the following filter:

    \"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]

    " + } + } + }, + "ListDatasetImportJobsResponse":{ + "type":"structure", + "members":{ + "DatasetImportJobs":{ + "shape":"DatasetImportJobs", + "documentation":"

    An array of objects that summarize each dataset import job's properties.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

    " + } + } + }, + "ListDatasetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of items to return in the response.

    " + } + } + }, + "ListDatasetsResponse":{ + "type":"structure", + "members":{ + "Datasets":{ + "shape":"Datasets", + "documentation":"

    An array of objects that summarize each dataset's properties.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

    " + } + } + }, + "ListForecastExportJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of items to return in the response.

    " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

    An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the forecast export jobs that match the statement from the list, respectively. The match statement consists of a key and a value.

    Filter properties

    • Condition - The condition to apply. Valid values are IS and IS_NOT. To include the forecast export jobs that match the statement, specify IS. To exclude matching forecast export jobs, specify IS_NOT.

    • Key - The name of the parameter to filter on. Valid values are ForecastArn and Status.

    • Value - The value to match.

    For example, to list all jobs that export a forecast named electricityforecast, specify the following filter:

    \"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"ForecastArn\", \"Value\": \"arn:aws:forecast:us-west-2:<acct-id>:forecast/electricityforecast\" } ]

    " + } + } + }, + "ListForecastExportJobsResponse":{ + "type":"structure", + "members":{ + "ForecastExportJobs":{ + "shape":"ForecastExportJobs", + "documentation":"

    An array of objects that summarize each export job's properties.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

    " + } + } + }, + "ListForecastsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of items to return in the response.

    " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

    An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the forecasts that match the statement from the list, respectively. The match statement consists of a key and a value.

    Filter properties

    • Condition - The condition to apply. Valid values are IS and IS_NOT. To include the forecasts that match the statement, specify IS. To exclude matching forecasts, specify IS_NOT.

    • Key - The name of the parameter to filter on. Valid values are DatasetGroupArn, PredictorArn, and Status.

    • Value - The value to match.

    For example, to list all forecasts whose status is not ACTIVE, you would specify:

    \"Filters\": [ { \"Condition\": \"IS_NOT\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]

    " + } + } + }, + "ListForecastsResponse":{ + "type":"structure", + "members":{ + "Forecasts":{ + "shape":"Forecasts", + "documentation":"

    An array of objects that summarize each forecast's properties.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

    " + } + } + }, + "ListPredictorBacktestExportJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of items to return in the response.

    " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

    An array of filters. For each filter, provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the predictor backtest export jobs that match the statement from the list. The match statement consists of a key and a value.

    Filter properties

    • Condition - The condition to apply. Valid values are IS and IS_NOT. To include the predictor backtest export jobs that match the statement, specify IS. To exclude matching predictor backtest export jobs, specify IS_NOT.

    • Key - The name of the parameter to filter on. Valid values are PredictorBacktestExportJobArn and Status.

    • Value - The value to match.

    " + } + } + }, + "ListPredictorBacktestExportJobsResponse":{ + "type":"structure", + "members":{ + "PredictorBacktestExportJobs":{ + "shape":"PredictorBacktestExportJobs", + "documentation":"

    An array of objects that summarize the properties of each predictor backtest export job.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Returns this token if the response is truncated. To retrieve the next set of results, use the token in the next request.

    " + } + } + }, + "ListPredictorsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of items to return in the response.

    " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

    An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the predictors that match the statement from the list, respectively. The match statement consists of a key and a value.

    Filter properties

    • Condition - The condition to apply. Valid values are IS and IS_NOT. To include the predictors that match the statement, specify IS. To exclude matching predictors, specify IS_NOT.

    • Key - The name of the parameter to filter on. Valid values are DatasetGroupArn and Status.

    • Value - The value to match.

    For example, to list all predictors whose status is ACTIVE, you would specify:

    \"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]

    " + } + } + }, + "ListPredictorsResponse":{ + "type":"structure", + "members":{ + "Predictors":{ + "shape":"Predictors", + "documentation":"

    An array of objects that summarize each predictor's properties.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast export jobs.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

    The tags for the resource.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "Message":{"type":"string"}, + "Metrics":{ + "type":"structure", + "members":{ + "RMSE":{ + "shape":"Double", + "documentation":"

    The root-mean-square error (RMSE).

    ", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, please refer to ErrorMetrics for both RMSE and WAPE" + }, + "WeightedQuantileLosses":{ + "shape":"WeightedQuantileLosses", + "documentation":"

    An array of weighted quantile losses. Quantiles divide a probability distribution into regions of equal probability. The distribution in this case is the loss function.

    " + }, + "ErrorMetrics":{ + "shape":"ErrorMetrics", + "documentation":"

    Provides detailed error metrics on forecast type, root-mean square-error (RMSE), and weighted average percentage error (WAPE).

    " + } + }, + "documentation":"

    Provides metrics that are used to evaluate the performance of a predictor. This object is part of the WindowSummary object.

    " + }, + "Name":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-zA-Z][a-zA-Z0-9_]*" + }, + "NextToken":{ + "type":"string", + "max":3000, + "min":1 + }, + "ParameterKey":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\-\\_\\.\\/\\[\\]\\,\\\\]+$" + }, + "ParameterRanges":{ + "type":"structure", + "members":{ + "CategoricalParameterRanges":{ + "shape":"CategoricalParameterRanges", + "documentation":"

    Specifies the tunable range for each categorical hyperparameter.

    " + }, + "ContinuousParameterRanges":{ + "shape":"ContinuousParameterRanges", + "documentation":"

    Specifies the tunable range for each continuous hyperparameter.

    " + }, + "IntegerParameterRanges":{ + "shape":"IntegerParameterRanges", + "documentation":"

    Specifies the tunable range for each integer hyperparameter.

    " + } + }, + "documentation":"

    Specifies the categorical, continuous, and integer hyperparameters, and their ranges of tunable values. The range of tunable values determines which values that a hyperparameter tuning job can choose for the specified hyperparameter. This object is part of the HyperParameterTuningJobConfig object.

    " + }, + "ParameterValue":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\-\\_\\.\\/\\[\\]\\,\\\"\\\\\\s]+$" + }, + "PredictorBacktestExportJobSummary":{ + "type":"structure", + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the predictor backtest export job.

    " + }, + "PredictorBacktestExportJobName":{ + "shape":"Name", + "documentation":"

    The name of the predictor backtest export job.

    " + }, + "Destination":{"shape":"DataDestination"}, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the predictor backtest export job. States include:

    • ACTIVE

    • CREATE_PENDING

    • CREATE_IN_PROGRESS

    • CREATE_FAILED

    • DELETE_PENDING

    • DELETE_IN_PROGRESS

    • DELETE_FAILED

    " + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

    Information about any errors that may have occurred during the backtest export.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the predictor backtest export job was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    When the last successful export job finished.

    " + } + }, + "documentation":"

    Provides a summary of the predictor backtest export job properties used in the ListPredictorBacktestExportJobs operation. To get a complete set of properties, call the DescribePredictorBacktestExportJob operation, and provide the listed PredictorBacktestExportJobArn.

    " + }, + "PredictorBacktestExportJobs":{ + "type":"list", + "member":{"shape":"PredictorBacktestExportJobSummary"} + }, + "PredictorEvaluationResults":{ + "type":"list", + "member":{"shape":"EvaluationResult"} + }, + "PredictorExecution":{ + "type":"structure", + "members":{ + "AlgorithmArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the algorithm used to test the predictor.

    " + }, + "TestWindows":{ + "shape":"TestWindowDetails", + "documentation":"

    An array of test windows used to evaluate the algorithm. The NumberOfBacktestWindows from the object determines the number of windows in the array.

    " + } + }, + "documentation":"

    The algorithm used to perform a backtest and the status of those tests.

    " + }, + "PredictorExecutionDetails":{ + "type":"structure", + "members":{ + "PredictorExecutions":{ + "shape":"PredictorExecutions", + "documentation":"

    An array of the backtests performed to evaluate the accuracy of the predictor against a particular algorithm. The NumberOfBacktestWindows from the object determines the number of windows in the array.

    " + } + }, + "documentation":"

    Contains details on the backtests performed to evaluate the accuracy of the predictor. The tests are returned in descending order of accuracy, with the most accurate backtest appearing first. You specify the number of backtests to perform when you call the operation.

    " + }, + "PredictorExecutions":{ + "type":"list", + "member":{"shape":"PredictorExecution"}, + "max":5, + "min":1 + }, + "PredictorSummary":{ + "type":"structure", + "members":{ + "PredictorArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the predictor.

    " + }, + "PredictorName":{ + "shape":"Name", + "documentation":"

    The name of the predictor.

    " + }, + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the dataset group that contains the data used to train the predictor.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the predictor. States include:

    • ACTIVE

    • CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED

    • DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED

    • UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED

    The Status of the predictor must be ACTIVE before you can use the predictor to create a forecast.

    " + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

    If an error occurred, an informational message about the error.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When the model training task was created.

    " + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

    Initially, the same as CreationTime (status is CREATE_PENDING). Updated when training starts (status changed to CREATE_IN_PROGRESS), and when training is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

    " + } + }, + "documentation":"

    Provides a summary of the predictor properties that are used in the ListPredictors operation. To get the complete set of properties, call the DescribePredictor operation, and provide the listed PredictorArn.

    " + }, + "Predictors":{ + "type":"list", + "member":{"shape":"PredictorSummary"} + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    There is already a resource with this name. Try again with a different name.

    ", + "exception":true + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The specified resource is in use.

    ", + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.

    ", + "exception":true + }, + "S3Config":{ + "type":"structure", + "required":[ + "Path", + "RoleArn" + ], + "members":{ + "Path":{ + "shape":"S3Path", + "documentation":"

    The path to an Amazon Simple Storage Service (Amazon S3) bucket or file(s) in an Amazon S3 bucket.

    " + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket or files. If you provide a value for the KMSKeyArn key, the role must allow access to the key.

    Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an InvalidInputException error.

    " + }, + "KMSKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key.

    " + } + }, + "documentation":"

    The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the file(s). Optionally, includes an AWS Key Management Service (KMS) key. This object is part of the DataSource object that is submitted in the CreateDatasetImportJob request, and part of the DataDestination object.

    " + }, + "S3Path":{ + "type":"string", + "pattern":"^s3://[a-z0-9].+$" + }, + "ScalingType":{ + "type":"string", + "enum":[ + "Auto", + "Linear", + "Logarithmic", + "ReverseLogarithmic" + ] + }, + "Schema":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"SchemaAttributes", + "documentation":"

    An array of attributes specifying the name and type of each field in a dataset.

    " + } + }, + "documentation":"

    Defines the fields of a dataset. You specify this object in the CreateDataset request.

    " + }, + "SchemaAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"Name", + "documentation":"

    The name of the dataset field.

    " + }, + "AttributeType":{ + "shape":"AttributeType", + "documentation":"

    The data type of the field.

    " + } + }, + "documentation":"

    An attribute of a schema, which defines a dataset field. A schema attribute is required for every field in a dataset. The Schema object contains an array of SchemaAttribute objects.

    " + }, + "SchemaAttributes":{ + "type":"list", + "member":{"shape":"SchemaAttribute"}, + "max":100, + "min":1 + }, + "Statistics":{ + "type":"structure", + "members":{ + "Count":{ + "shape":"Integer", + "documentation":"

    The number of values in the field.

    " + }, + "CountDistinct":{ + "shape":"Integer", + "documentation":"

    The number of distinct values in the field.

    " + }, + "CountNull":{ + "shape":"Integer", + "documentation":"

    The number of null values in the field.

    " + }, + "CountNan":{ + "shape":"Integer", + "documentation":"

    The number of NAN (not a number) values in the field.

    " + }, + "Min":{ + "shape":"String", + "documentation":"

    For a numeric field, the minimum value in the field.

    " + }, + "Max":{ + "shape":"String", + "documentation":"

    For a numeric field, the maximum value in the field.

    " + }, + "Avg":{ + "shape":"Double", + "documentation":"

    For a numeric field, the average value in the field.

    " + }, + "Stddev":{ + "shape":"Double", + "documentation":"

    For a numeric field, the standard deviation.

    " + } + }, + "documentation":"

    Provides statistics for each data field imported into to an Amazon Forecast dataset with the CreateDatasetImportJob operation.

    " + }, + "Status":{ + "type":"string", + "max":256 + }, + "String":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\_]+$" + }, + "SupplementaryFeature":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the feature. Valid values: \"holiday\" and \"weather\".

    " + }, + "Value":{ + "shape":"Value", + "documentation":"

    Weather Index

    To enable the Weather Index, set the value to \"true\"

    Holidays

    To enable Holidays, specify a country with one of the following two-letter country codes:

    • \"AL\" - ALBANIA

    • \"AR\" - ARGENTINA

    • \"AT\" - AUSTRIA

    • \"AU\" - AUSTRALIA

    • \"BA\" - BOSNIA HERZEGOVINA

    • \"BE\" - BELGIUM

    • \"BG\" - BULGARIA

    • \"BO\" - BOLIVIA

    • \"BR\" - BRAZIL

    • \"BY\" - BELARUS

    • \"CA\" - CANADA

    • \"CL\" - CHILE

    • \"CO\" - COLOMBIA

    • \"CR\" - COSTA RICA

    • \"HR\" - CROATIA

    • \"CZ\" - CZECH REPUBLIC

    • \"DK\" - DENMARK

    • \"EC\" - ECUADOR

    • \"EE\" - ESTONIA

    • \"ET\" - ETHIOPIA

    • \"FI\" - FINLAND

    • \"FR\" - FRANCE

    • \"DE\" - GERMANY

    • \"GR\" - GREECE

    • \"HU\" - HUNGARY

    • \"IS\" - ICELAND

    • \"IN\" - INDIA

    • \"IE\" - IRELAND

    • \"IT\" - ITALY

    • \"JP\" - JAPAN

    • \"KZ\" - KAZAKHSTAN

    • \"KR\" - KOREA

    • \"LV\" - LATVIA

    • \"LI\" - LIECHTENSTEIN

    • \"LT\" - LITHUANIA

    • \"LU\" - LUXEMBOURG

    • \"MK\" - MACEDONIA

    • \"MT\" - MALTA

    • \"MX\" - MEXICO

    • \"MD\" - MOLDOVA

    • \"ME\" - MONTENEGRO

    • \"NL\" - NETHERLANDS

    • \"NZ\" - NEW ZEALAND

    • \"NI\" - NICARAGUA

    • \"NG\" - NIGERIA

    • \"NO\" - NORWAY

    • \"PA\" - PANAMA

    • \"PY\" - PARAGUAY

    • \"PE\" - PERU

    • \"PL\" - POLAND

    • \"PT\" - PORTUGAL

    • \"RO\" - ROMANIA

    • \"RU\" - RUSSIA

    • \"RS\" - SERBIA

    • \"SK\" - SLOVAKIA

    • \"SI\" - SLOVENIA

    • \"ZA\" - SOUTH AFRICA

    • \"ES\" - SPAIN

    • \"SE\" - SWEDEN

    • \"CH\" - SWITZERLAND

    • \"UA\" - UKRAINE

    • \"AE\" - UNITED ARAB EMIRATES

    • \"US\" - UNITED STATES

    • \"UK\" - UNITED KINGDOM

    • \"UY\" - URUGUAY

    • \"VE\" - VENEZUELA

    " + } + }, + "documentation":"

    Describes a supplementary feature of a dataset group. This object is part of the InputDataConfig object. Forecast supports the Weather Index and Holidays built-in featurizations.

    Weather Index

    The Amazon Forecast Weather Index is a built-in featurization that incorporates historical and projected weather information into your model. The Weather Index supplements your datasets with over two years of historical weather data and up to 14 days of projected weather data. For more information, see Amazon Forecast Weather Index.

    Holidays

    Holidays is a built-in featurization that incorporates a feature-engineered dataset of national holiday information into your model. It provides native support for the holiday calendars of 66 countries. To view the holiday calendars, refer to the Jollyday library. For more information, see Holidays Featurization.

    " + }, + "SupplementaryFeatures":{ + "type":"list", + "member":{"shape":"SupplementaryFeature"}, + "max":2, + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    One part of a key-value pair that makes up a tag. A key is a general label that acts like a category for more specific tag values.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The optional part of a key-value pair that makes up a tag. A value acts as a descriptor within a tag category (key).

    " + } + }, + "documentation":"

    The optional metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource - 50.

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length - 128 Unicode characters in UTF-8.

    • Maximum value length - 256 Unicode characters in UTF-8.

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$", + "sensitive":true + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast export jobs.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The tags to add to the resource. A tag is an array of key-value pairs.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource - 50.

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length - 128 Unicode characters in UTF-8.

    • Maximum value length - 256 Unicode characters in UTF-8.

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$", + "sensitive":true + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TestWindowDetails":{ + "type":"list", + "member":{"shape":"TestWindowSummary"} + }, + "TestWindowSummary":{ + "type":"structure", + "members":{ + "TestWindowStart":{ + "shape":"Timestamp", + "documentation":"

    The time at which the test began.

    " + }, + "TestWindowEnd":{ + "shape":"Timestamp", + "documentation":"

    The time at which the test ended.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    The status of the test. Possible status values are:

    • ACTIVE

    • CREATE_IN_PROGRESS

    • CREATE_FAILED

    " + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

    If the test failed, the reason why it failed.

    " + } + }, + "documentation":"

    The status, start time, and end time of a backtest, as well as a failure reason if applicable.

    " + }, + "TestWindows":{ + "type":"list", + "member":{"shape":"WindowSummary"} + }, + "TimeZone":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\/\\+\\-\\_]+$" + }, + "Timestamp":{"type":"timestamp"}, + "TimestampFormat":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\-\\:\\.\\,\\'\\s]+$" + }, + "TrainingParameters":{ + "type":"map", + "key":{"shape":"ParameterKey"}, + "value":{"shape":"ParameterValue"}, + "max":100, + "min":0 + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast exports.

    " + }, + "TagKeys":{ + "shape":"TagKeys", + "documentation":"

    The keys of the tags to be removed.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDatasetGroupRequest":{ + "type":"structure", + "required":[ + "DatasetGroupArn", + "DatasetArns" + ], + "members":{ + "DatasetGroupArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the dataset group.

    " + }, + "DatasetArns":{ + "shape":"ArnList", + "documentation":"

    An array of the Amazon Resource Names (ARNs) of the datasets to add to the dataset group.

    " + } + } + }, + "UpdateDatasetGroupResponse":{ + "type":"structure", + "members":{ + } + }, + "UseGeolocationForTimeZone":{"type":"boolean"}, + "Value":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\_\\-]+$" + }, + "Values":{ + "type":"list", + "member":{"shape":"Value"}, + "max":20, + "min":1 + }, + "WeightedQuantileLoss":{ + "type":"structure", + "members":{ + "Quantile":{ + "shape":"Double", + "documentation":"

    The quantile. Quantiles divide a probability distribution into regions of equal probability. For example, if the distribution was divided into 5 regions of equal probability, the quantiles would be 0.2, 0.4, 0.6, and 0.8.

    " + }, + "LossValue":{ + "shape":"Double", + "documentation":"

    The difference between the predicted value and the actual value over the quantile, weighted (normalized) by dividing by the sum over all quantiles.

    " + } + }, + "documentation":"

    The weighted loss value for a quantile. This object is part of the Metrics object.

    " + }, + "WeightedQuantileLosses":{ + "type":"list", + "member":{"shape":"WeightedQuantileLoss"} + }, + "WindowSummary":{ + "type":"structure", + "members":{ + "TestWindowStart":{ + "shape":"Timestamp", + "documentation":"

    The timestamp that defines the start of the window.

    " + }, + "TestWindowEnd":{ + "shape":"Timestamp", + "documentation":"

    The timestamp that defines the end of the window.

    " + }, + "ItemCount":{ + "shape":"Integer", + "documentation":"

    The number of data points within the window.

    " + }, + "EvaluationType":{ + "shape":"EvaluationType", + "documentation":"

    The type of evaluation.

    • SUMMARY - The average metrics across all windows.

    • COMPUTED - The metrics for the specified window.

    " + }, + "Metrics":{ + "shape":"Metrics", + "documentation":"

    Provides metrics used to evaluate the performance of a predictor.

    " + } + }, + "documentation":"

    The metrics for a time range within the evaluation portion of a dataset. This object is part of the EvaluationResult object.

    The TestWindowStart and TestWindowEnd parameters are determined by the BackTestWindowOffset parameter of the EvaluationParameters object.

    " + } + }, + "documentation":"

    Provides APIs for creating and managing Amazon Forecast resources.

    " +} diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml new file mode 100644 index 000000000000..a42cdcd75a4d --- /dev/null +++ b/services/forecastquery/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + forecastquery + AWS Java SDK :: Services :: Forecastquery + The AWS Java SDK for Forecastquery module holds the client classes that are used for + communicating with Forecastquery. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.forecastquery + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/forecastquery/src/main/resources/codegen-resources/paginators-1.json b/services/forecastquery/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/forecastquery/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/forecastquery/src/main/resources/codegen-resources/service-2.json b/services/forecastquery/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..67f3c0cb6ee0 --- /dev/null +++ b/services/forecastquery/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,182 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-06-26", + "endpointPrefix":"forecastquery", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Forecast Query Service", + "serviceId":"forecastquery", + "signatureVersion":"v4", + "signingName":"forecast", + "targetPrefix":"AmazonForecastRuntime", + "uid":"forecastquery-2018-06-26" + }, + "operations":{ + "QueryForecast":{ + "name":"QueryForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryForecastRequest"}, + "output":{"shape":"QueryForecastResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

    Retrieves a forecast for a single item, filtered by the supplied criteria.

    The criteria is a key-value pair. The key is either item_id (or the equivalent non-timestamp, non-target field) from the TARGET_TIME_SERIES dataset, or one of the forecast dimensions specified as part of the FeaturizationConfig object.

    By default, QueryForecast returns the complete date range for the filtered forecast. You can request a specific date range.

    To get the full forecast, use the CreateForecastExportJob operation.

    The forecasts generated by Amazon Forecast are in the same timezone as the dataset that was used to create the predictor.

    " + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "max":256, + "pattern":"arn:([a-z\\d-]+):forecast:.*:.*:.+" + }, + "AttributeName":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\_\\-]+$" + }, + "AttributeValue":{ + "type":"string", + "max":256 + }, + "DataPoint":{ + "type":"structure", + "members":{ + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of the specific forecast.

    " + }, + "Value":{ + "shape":"Double", + "documentation":"

    The forecast value.

    " + } + }, + "documentation":"

    The forecast value for a specific date. Part of the Forecast object.

    " + }, + "DateTime":{"type":"string"}, + "Double":{"type":"double"}, + "ErrorMessage":{"type":"string"}, + "Filters":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"}, + "max":50, + "min":1 + }, + "Forecast":{ + "type":"structure", + "members":{ + "Predictions":{ + "shape":"Predictions", + "documentation":"

    The forecast.

    The string of the string-to-array map is one of the following values:

    • p10

    • p50

    • p90

    " + } + }, + "documentation":"

    Provides information about a forecast. Returned as part of the QueryForecast response.

    " + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The value is invalid or is too long.

    ", + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The token is not valid. Tokens expire after 24 hours.

    ", + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The limit on the number of requests per second has been exceeded.

    ", + "exception":true + }, + "NextToken":{ + "type":"string", + "max":3000, + "min":1 + }, + "Predictions":{ + "type":"map", + "key":{"shape":"Statistic"}, + "value":{"shape":"TimeSeries"} + }, + "QueryForecastRequest":{ + "type":"structure", + "required":[ + "ForecastArn", + "Filters" + ], + "members":{ + "ForecastArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the forecast to query.

    " + }, + "StartDate":{ + "shape":"DateTime", + "documentation":"

    The start date for the forecast. Specify the date using this format: yyyy-MM-dd'T'HH:mm:ss (ISO 8601 format). For example, 2015-01-01T08:00:00.

    " + }, + "EndDate":{ + "shape":"DateTime", + "documentation":"

    The end date for the forecast. Specify the date using this format: yyyy-MM-dd'T'HH:mm:ss (ISO 8601 format). For example, 2015-01-01T20:00:00.

    " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

    The filtering criteria to apply when retrieving the forecast. For example, to get the forecast for client_21 in the electricity usage dataset, specify the following:

    {\"item_id\" : \"client_21\"}

    To get the full forecast, use the CreateForecastExportJob operation.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

    " + } + } + }, + "QueryForecastResponse":{ + "type":"structure", + "members":{ + "Forecast":{ + "shape":"Forecast", + "documentation":"

    The forecast.

    " + } + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The specified resource is in use.

    ", + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    We can't find that resource. Check the information that you've provided and try again.

    ", + "exception":true + }, + "Statistic":{ + "type":"string", + "max":4 + }, + "TimeSeries":{ + "type":"list", + "member":{"shape":"DataPoint"} + }, + "Timestamp":{"type":"string"} + }, + "documentation":"

    Provides APIs for creating and managing Amazon Forecast resources.

    " +} diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml new file mode 100644 index 000000000000..4912570813ba --- /dev/null +++ b/services/frauddetector/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + frauddetector + AWS Java SDK :: Services :: FraudDetector + The AWS Java SDK for FraudDetector module holds the client classes that are used for + communicating with FraudDetector. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.frauddetector + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/frauddetector/src/main/resources/codegen-resources/paginators-1.json b/services/frauddetector/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..ac4b7cf14ed3 --- /dev/null +++ b/services/frauddetector/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,59 @@ +{ + "pagination": { + "DescribeModelVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetDetectors": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetEntityTypes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetEventTypes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetExternalModels": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetLabels": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetModels": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetOutcomes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetRules": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "GetVariables": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListTagsForResource": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + } + } +} diff --git a/services/frauddetector/src/main/resources/codegen-resources/service-2.json b/services/frauddetector/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f7dc9352b732 --- /dev/null +++ b/services/frauddetector/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3731 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-11-15", + "endpointPrefix":"frauddetector", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Fraud Detector", + "serviceId":"FraudDetector", + "signatureVersion":"v4", + "targetPrefix":"AWSHawksNestServiceFacade", + "uid":"frauddetector-2019-11-15" + }, + "operations":{ + "BatchCreateVariable":{ + "name":"BatchCreateVariable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchCreateVariableRequest"}, + "output":{"shape":"BatchCreateVariableResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a batch of variables.

    " + }, + "BatchGetVariable":{ + "name":"BatchGetVariable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetVariableRequest"}, + "output":{"shape":"BatchGetVariableResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets a batch of variables.

    " + }, + "CreateDetectorVersion":{ + "name":"CreateDetectorVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDetectorVersionRequest"}, + "output":{"shape":"CreateDetectorVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a detector version. The detector version starts in a DRAFT status.

    " + }, + "CreateModel":{ + "name":"CreateModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateModelRequest"}, + "output":{"shape":"CreateModelResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a model using the specified model type.

    " + }, + "CreateModelVersion":{ + "name":"CreateModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateModelVersionRequest"}, + "output":{"shape":"CreateModelVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a version of the model using the specified model type and model id.

    " + }, + "CreateRule":{ + "name":"CreateRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRuleRequest"}, + "output":{"shape":"CreateRuleResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a rule for use with the specified detector.

    " + }, + "CreateVariable":{ + "name":"CreateVariable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVariableRequest"}, + "output":{"shape":"CreateVariableResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a variable.

    " + }, + "DeleteDetector":{ + "name":"DeleteDetector", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDetectorRequest"}, + "output":{"shape":"DeleteDetectorResult"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes the detector. Before deleting a detector, you must first delete all detector versions and rule versions associated with the detector.

    When you delete a detector, Amazon Fraud Detector permanently deletes the detector and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteDetectorVersion":{ + "name":"DeleteDetectorVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDetectorVersionRequest"}, + "output":{"shape":"DeleteDetectorVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes the detector version. You cannot delete detector versions that are in ACTIVE status.

    When you delete a detector version, Amazon Fraud Detector permanently deletes the detector and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteEntityType":{ + "name":"DeleteEntityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEntityTypeRequest"}, + "output":{"shape":"DeleteEntityTypeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes an entity type.

    You cannot delete an entity type that is included in an event type.

    When you delete an entity type, Amazon Fraud Detector permanently deletes that entity type from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteEvent":{ + "name":"DeleteEvent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventRequest"}, + "output":{"shape":"DeleteEventResult"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Deletes the specified event.

    When you delete an event, Amazon Fraud Detector permanently deletes that event from the evaluation history, and the event data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteEventType":{ + "name":"DeleteEventType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventTypeRequest"}, + "output":{"shape":"DeleteEventTypeResult"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes an event type.

    You cannot delete an event type that is used in a detector or a model.

    When you delete an entity type, Amazon Fraud Detector permanently deletes that entity type from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteExternalModel":{ + "name":"DeleteExternalModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteExternalModelRequest"}, + "output":{"shape":"DeleteExternalModelResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Removes a SageMaker model from Amazon Fraud Detector.

    You can remove an Amazon SageMaker model if it is not associated with a detector version. Removing a SageMaker model disconnects it from Amazon Fraud Detector, but the model remains available in SageMaker.

    " + }, + "DeleteLabel":{ + "name":"DeleteLabel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLabelRequest"}, + "output":{"shape":"DeleteLabelResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a label.

    You cannot delete labels that are included in an event type in Amazon Fraud Detector.

    You cannot delete a label assigned to an event ID. You must first delete the relevant event ID.

    When you delete a label, Amazon Fraud Detector permanently deletes that label from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteModel":{ + "name":"DeleteModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteModelRequest"}, + "output":{"shape":"DeleteModelResult"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes a model.

    You can delete models and model versions in Amazon Fraud Detector, provided that they are not associated with a detector version.

    When you delete a model, Amazon Fraud Detector permanently deletes that model from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteModelVersion":{ + "name":"DeleteModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteModelVersionRequest"}, + "output":{"shape":"DeleteModelVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes a model version.

    You can delete models and model versions in Amazon Fraud Detector, provided that they are not associated with a detector version.

    When you delete a model version, Amazon Fraud Detector permanently deletes that model version from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteOutcome":{ + "name":"DeleteOutcome", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOutcomeRequest"}, + "output":{"shape":"DeleteOutcomeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes an outcome.

    You cannot delete an outcome that is used in a rule version.

    When you delete an outcome, Amazon Fraud Detector permanently deletes that outcome from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteRule":{ + "name":"DeleteRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleRequest"}, + "output":{"shape":"DeleteRuleResult"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes the rule. You cannot delete a rule if it is used by an ACTIVE or INACTIVE detector version.

    When you delete a rule, Amazon Fraud Detector permanently deletes that rule from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DeleteVariable":{ + "name":"DeleteVariable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVariableRequest"}, + "output":{"shape":"DeleteVariableResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes a variable.

    You can't delete variables that are included in an event type in Amazon Fraud Detector.

    Amazon Fraud Detector automatically deletes model output variables and SageMaker model output variables when you delete the model. You can't delete these variables manually.

    When you delete a variable, Amazon Fraud Detector permanently deletes that variable from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

    " + }, + "DescribeDetector":{ + "name":"DescribeDetector", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDetectorRequest"}, + "output":{"shape":"DescribeDetectorResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets all versions for a specified detector.

    " + }, + "DescribeModelVersions":{ + "name":"DescribeModelVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeModelVersionsRequest"}, + "output":{"shape":"DescribeModelVersionsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets all of the model versions for the specified model type or for the specified model type and model ID. You can also get details for a single, specified model version.

    " + }, + "GetDetectorVersion":{ + "name":"GetDetectorVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDetectorVersionRequest"}, + "output":{"shape":"GetDetectorVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets a particular detector version.

    " + }, + "GetDetectors":{ + "name":"GetDetectors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDetectorsRequest"}, + "output":{"shape":"GetDetectorsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets all detectors or a single detector if a detectorId is specified. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 5 and 10. To get the next page results, provide the pagination token from the GetDetectorsResponse as part of your request. A null pagination token fetches the records from the beginning.

    " + }, + "GetEntityTypes":{ + "name":"GetEntityTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEntityTypesRequest"}, + "output":{"shape":"GetEntityTypesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets all entity types or a specific entity type if a name is specified. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 5 and 10. To get the next page results, provide the pagination token from the GetEntityTypesResponse as part of your request. A null pagination token fetches the records from the beginning.

    " + }, + "GetEventPrediction":{ + "name":"GetEventPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEventPredictionRequest"}, + "output":{"shape":"GetEventPredictionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Evaluates an event against a detector version. If a version ID is not provided, the detector’s (ACTIVE) version is used.

    " + }, + "GetEventTypes":{ + "name":"GetEventTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEventTypesRequest"}, + "output":{"shape":"GetEventTypesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets all event types or a specific event type if name is provided. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 5 and 10. To get the next page results, provide the pagination token from the GetEventTypesResponse as part of your request. A null pagination token fetches the records from the beginning.

    " + }, + "GetExternalModels":{ + "name":"GetExternalModels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetExternalModelsRequest"}, + "output":{"shape":"GetExternalModelsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets the details for one or more Amazon SageMaker models that have been imported into the service. This is a paginated API. If you provide a null maxResults, this actions retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 5 and 10. To get the next page results, provide the pagination token from the GetExternalModelsResult as part of your request. A null pagination token fetches the records from the beginning.

    " + }, + "GetKMSEncryptionKey":{ + "name":"GetKMSEncryptionKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"GetKMSEncryptionKeyResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets the encryption key if a Key Management Service (KMS) customer master key (CMK) has been specified to be used to encrypt content in Amazon Fraud Detector.

    " + }, + "GetLabels":{ + "name":"GetLabels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLabelsRequest"}, + "output":{"shape":"GetLabelsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets all labels or a specific label if name is provided. This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 50 records per page. If you provide a maxResults, the value must be between 10 and 50. To get the next page results, provide the pagination token from the GetGetLabelsResponse as part of your request. A null pagination token fetches the records from the beginning.

    " + }, + "GetModelVersion":{ + "name":"GetModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetModelVersionRequest"}, + "output":{"shape":"GetModelVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets the details of the specified model version.

    " + }, + "GetModels":{ + "name":"GetModels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetModelsRequest"}, + "output":{"shape":"GetModelsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets one or more models. Gets all models for the AWS account if no model type and no model id provided. Gets all models for the AWS account and model type, if the model type is specified but model id is not provided. Gets a specific model if (model type, model id) tuple is specified.

    This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 1 and 10. To get the next page results, provide the pagination token from the response as part of your request. A null pagination token fetches the records from the beginning.

    " + }, + "GetOutcomes":{ + "name":"GetOutcomes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOutcomesRequest"}, + "output":{"shape":"GetOutcomesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets one or more outcomes. This is a paginated API. If you provide a null maxResults, this actions retrieves a maximum of 100 records per page. If you provide a maxResults, the value must be between 50 and 100. To get the next page results, provide the pagination token from the GetOutcomesResult as part of your request. A null pagination token fetches the records from the beginning.

    " + }, + "GetRules":{ + "name":"GetRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRulesRequest"}, + "output":{"shape":"GetRulesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Get all rules for a detector (paginated) if ruleId and ruleVersion are not specified. Gets all rules for the detector and the ruleId if present (paginated). Gets a specific rule if both the ruleId and the ruleVersion are specified.

    This is a paginated API. Providing null maxResults results in retrieving maximum of 100 records per page. If you provide maxResults the value must be between 50 and 100. To get the next page result, a provide a pagination token from GetRulesResult as part of your request. Null pagination token fetches the records from the beginning.

    " + }, + "GetVariables":{ + "name":"GetVariables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetVariablesRequest"}, + "output":{"shape":"GetVariablesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets all of the variables or the specific variable. This is a paginated API. Providing null maxSizePerPage results in retrieving maximum of 100 records per page. If you provide maxSizePerPage the value must be between 50 and 100. To get the next page result, a provide a pagination token from GetVariablesResult as part of your request. Null pagination token fetches the records from the beginning.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists all tags associated with the resource. This is a paginated API. To get the next page results, provide the pagination token from the response as part of your request. A null pagination token fetches the records from the beginning.

    " + }, + "PutDetector":{ + "name":"PutDetector", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDetectorRequest"}, + "output":{"shape":"PutDetectorResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates or updates a detector.

    " + }, + "PutEntityType":{ + "name":"PutEntityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEntityTypeRequest"}, + "output":{"shape":"PutEntityTypeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates or updates an entity type. An entity represents who is performing the event. As part of a fraud prediction, you pass the entity ID to indicate the specific entity who performed the event. An entity type classifies the entity. Example classifications include customer, merchant, or account.

    " + }, + "PutEventType":{ + "name":"PutEventType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEventTypeRequest"}, + "output":{"shape":"PutEventTypeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates or updates an event type. An event is a business activity that is evaluated for fraud risk. With Amazon Fraud Detector, you generate fraud predictions for events. An event type defines the structure for an event sent to Amazon Fraud Detector. This includes the variables sent as part of the event, the entity performing the event (such as a customer), and the labels that classify the event. Example event types include online payment transactions, account registrations, and authentications.

    " + }, + "PutExternalModel":{ + "name":"PutExternalModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutExternalModelRequest"}, + "output":{"shape":"PutExternalModelResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates or updates an Amazon SageMaker model endpoint. You can also use this action to update the configuration of the model endpoint, including the IAM role and/or the mapped variables.

    " + }, + "PutKMSEncryptionKey":{ + "name":"PutKMSEncryptionKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutKMSEncryptionKeyRequest"}, + "output":{"shape":"PutKMSEncryptionKeyResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Specifies the Key Management Service (KMS) customer master key (CMK) to be used to encrypt content in Amazon Fraud Detector.

    " + }, + "PutLabel":{ + "name":"PutLabel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLabelRequest"}, + "output":{"shape":"PutLabelResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates or updates label. A label classifies an event as fraudulent or legitimate. Labels are associated with event types and used to train supervised machine learning models in Amazon Fraud Detector.

    " + }, + "PutOutcome":{ + "name":"PutOutcome", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutOutcomeRequest"}, + "output":{"shape":"PutOutcomeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates or updates an outcome.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Assigns tags to a resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Removes tags from a resource.

    " + }, + "UpdateDetectorVersion":{ + "name":"UpdateDetectorVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDetectorVersionRequest"}, + "output":{"shape":"UpdateDetectorVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates a detector version. The detector version attributes that you can update include models, external model endpoints, rules, rule execution mode, and description. You can only update a DRAFT detector version.

    " + }, + "UpdateDetectorVersionMetadata":{ + "name":"UpdateDetectorVersionMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDetectorVersionMetadataRequest"}, + "output":{"shape":"UpdateDetectorVersionMetadataResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates the detector version's description. You can update the metadata for any detector version (DRAFT, ACTIVE, or INACTIVE).

    " + }, + "UpdateDetectorVersionStatus":{ + "name":"UpdateDetectorVersionStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDetectorVersionStatusRequest"}, + "output":{"shape":"UpdateDetectorVersionStatusResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates the detector version’s status. You can perform the following promotions or demotions using UpdateDetectorVersionStatus: DRAFT to ACTIVE, ACTIVE to INACTIVE, and INACTIVE to ACTIVE.

    " + }, + "UpdateModel":{ + "name":"UpdateModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateModelRequest"}, + "output":{"shape":"UpdateModelResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates a model. You can update the description attribute using this action.

    " + }, + "UpdateModelVersion":{ + "name":"UpdateModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateModelVersionRequest"}, + "output":{"shape":"UpdateModelVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a model version. Updating a model version retrains an existing model version using updated training data and produces a new minor version of the model. You can update the training data set location and data access role attributes using this action. This action creates and trains a new minor version of the model, for example version 1.01, 1.02, 1.03.

    " + }, + "UpdateModelVersionStatus":{ + "name":"UpdateModelVersionStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateModelVersionStatusRequest"}, + "output":{"shape":"UpdateModelVersionStatusResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates the status of a model version.

    You can perform the following status updates:

    1. Change the TRAINING_COMPLETE status to ACTIVE.

    2. Change ACTIVEto INACTIVE.

    " + }, + "UpdateRuleMetadata":{ + "name":"UpdateRuleMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuleMetadataRequest"}, + "output":{"shape":"UpdateRuleMetadataResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates a rule's metadata. The description attribute can be updated.

    " + }, + "UpdateRuleVersion":{ + "name":"UpdateRuleVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuleVersionRequest"}, + "output":{"shape":"UpdateRuleVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates a rule version resulting in a new rule version. Updates a rule version resulting in a new rule version (version 1, 2, 3 ...).

    " + }, + "UpdateVariable":{ + "name":"UpdateVariable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateVariableRequest"}, + "output":{"shape":"UpdateVariableResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates a variable.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

    An exception indicating Amazon Fraud Detector does not have the needed permissions. This can occur if you submit a request, such as PutExternalModel, that specifies a role that is not in your account.

    ", + "exception":true + }, + "BatchCreateVariableError":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The name.

    " + }, + "code":{ + "shape":"integer", + "documentation":"

    The error code.

    " + }, + "message":{ + "shape":"string", + "documentation":"

    The error message.

    " + } + }, + "documentation":"

    Provides the error of the batch create variable API.

    " + }, + "BatchCreateVariableErrorList":{ + "type":"list", + "member":{"shape":"BatchCreateVariableError"} + }, + "BatchCreateVariableRequest":{ + "type":"structure", + "required":["variableEntries"], + "members":{ + "variableEntries":{ + "shape":"VariableEntryList", + "documentation":"

    The list of variables for the batch create variable request.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "BatchCreateVariableResult":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"BatchCreateVariableErrorList", + "documentation":"

    Provides the errors for the BatchCreateVariable request.

    " + } + } + }, + "BatchGetVariableError":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The error name.

    " + }, + "code":{ + "shape":"integer", + "documentation":"

    The error code.

    " + }, + "message":{ + "shape":"string", + "documentation":"

    The error message.

    " + } + }, + "documentation":"

    Provides the error of the batch get variable API.

    " + }, + "BatchGetVariableErrorList":{ + "type":"list", + "member":{"shape":"BatchGetVariableError"} + }, + "BatchGetVariableRequest":{ + "type":"structure", + "required":["names"], + "members":{ + "names":{ + "shape":"NameList", + "documentation":"

    The list of variable names to get.

    " + } + } + }, + "BatchGetVariableResult":{ + "type":"structure", + "members":{ + "variables":{ + "shape":"VariableList", + "documentation":"

    The returned variables.

    " + }, + "errors":{ + "shape":"BatchGetVariableErrorList", + "documentation":"

    The errors from the request.

    " + } + } + }, + "ConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

    An exception indicating there was a conflict during a delete operation. The following delete operations can cause a conflict exception:

    • DeleteDetector: A conflict exception will occur if the detector has associated Rules or DetectorVersions. You can only delete a detector if it has no Rules or DetectorVersions.

    • DeleteDetectorVersion: A conflict exception will occur if the DetectorVersion status is ACTIVE.

    • DeleteRule: A conflict exception will occur if the RuleVersion is in use by an associated ACTIVE or INACTIVE DetectorVersion.

    ", + "exception":true + }, + "CreateDetectorVersionRequest":{ + "type":"structure", + "required":[ + "detectorId", + "rules" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The ID of the detector under which you want to create a new version.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The description of the detector version.

    " + }, + "externalModelEndpoints":{ + "shape":"ListOfStrings", + "documentation":"

    The Amazon Sagemaker model endpoints to include in the detector version.

    " + }, + "rules":{ + "shape":"RuleList", + "documentation":"

    The rules to include in the detector version.

    " + }, + "modelVersions":{ + "shape":"ListOfModelVersions", + "documentation":"

    The model versions to include in the detector version.

    " + }, + "ruleExecutionMode":{ + "shape":"RuleExecutionMode", + "documentation":"

    The rule execution mode for the rules included in the detector version.

    You can define and edit the rule mode at the detector version level, when it is in draft status.

    If you specify FIRST_MATCHED, Amazon Fraud Detector evaluates rules sequentially, first to last, stopping at the first matched rule. Amazon Fraud dectector then provides the outcomes for that single rule.

    If you specifiy ALL_MATCHED, Amazon Fraud Detector evaluates all rules and returns the outcomes for all matched rules.

    The default behavior is FIRST_MATCHED.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "CreateDetectorVersionResult":{ + "type":"structure", + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The ID for the created version's parent detector.

    " + }, + "detectorVersionId":{ + "shape":"nonEmptyString", + "documentation":"

    The ID for the created detector.

    " + }, + "status":{ + "shape":"DetectorVersionStatus", + "documentation":"

    The status of the detector version.

    " + } + } + }, + "CreateModelRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType", + "eventTypeName" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The model description.

    " + }, + "eventTypeName":{ + "shape":"string", + "documentation":"

    The name of the event type.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "CreateModelResult":{ + "type":"structure", + "members":{ + } + }, + "CreateModelVersionRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType", + "trainingDataSource", + "trainingDataSchema" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "trainingDataSource":{ + "shape":"TrainingDataSourceEnum", + "documentation":"

    The training data source location in Amazon S3.

    " + }, + "trainingDataSchema":{ + "shape":"TrainingDataSchema", + "documentation":"

    The training data schema.

    " + }, + "externalEventsDetail":{ + "shape":"ExternalEventsDetail", + "documentation":"

    Details for the external events data used for model version training. Required if trainingDataSource is EXTERNAL_EVENTS.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "CreateModelVersionResult":{ + "type":"structure", + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "modelVersionNumber":{ + "shape":"nonEmptyString", + "documentation":"

    The model version number of the model version created.

    " + }, + "status":{ + "shape":"string", + "documentation":"

    The model version status.

    " + } + } + }, + "CreateRuleRequest":{ + "type":"structure", + "required":[ + "ruleId", + "detectorId", + "expression", + "language", + "outcomes" + ], + "members":{ + "ruleId":{ + "shape":"identifier", + "documentation":"

    The rule ID.

    " + }, + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID for the rule's parent detector.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The rule description.

    " + }, + "expression":{ + "shape":"ruleExpression", + "documentation":"

    The rule expression.

    " + }, + "language":{ + "shape":"Language", + "documentation":"

    The language of the rule.

    " + }, + "outcomes":{ + "shape":"NonEmptyListOfStrings", + "documentation":"

    The outcome or outcomes returned when the rule expression matches.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "CreateRuleResult":{ + "type":"structure", + "members":{ + "rule":{ + "shape":"Rule", + "documentation":"

    The created rule.

    " + } + } + }, + "CreateVariableRequest":{ + "type":"structure", + "required":[ + "name", + "dataType", + "dataSource", + "defaultValue" + ], + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The name of the variable.

    " + }, + "dataType":{ + "shape":"DataType", + "documentation":"

    The data type.

    " + }, + "dataSource":{ + "shape":"DataSource", + "documentation":"

    The source of the data.

    " + }, + "defaultValue":{ + "shape":"string", + "documentation":"

    The default value for the variable when no value is received.

    " + }, + "description":{ + "shape":"string", + "documentation":"

    The description.

    " + }, + "variableType":{ + "shape":"string", + "documentation":"

    The variable type. For more information see Variable types.

    Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "CreateVariableResult":{ + "type":"structure", + "members":{ + } + }, + "CsvIndexToVariableMap":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"string"} + }, + "DataSource":{ + "type":"string", + "enum":[ + "EVENT", + "MODEL_SCORE", + "EXTERNAL_MODEL_SCORE" + ] + }, + "DataType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "FLOAT", + "BOOLEAN" + ] + }, + "DataValidationMetrics":{ + "type":"structure", + "members":{ + "fileLevelMessages":{ + "shape":"fileValidationMessageList", + "documentation":"

    The file-specific model training validation messages.

    " + }, + "fieldLevelMessages":{ + "shape":"fieldValidationMessageList", + "documentation":"

    The field-specific model training validation messages.

    " + } + }, + "documentation":"

    The model training validation messages.

    " + }, + "DeleteDetectorRequest":{ + "type":"structure", + "required":["detectorId"], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The ID of the detector to delete.

    " + } + } + }, + "DeleteDetectorResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteDetectorVersionRequest":{ + "type":"structure", + "required":[ + "detectorId", + "detectorVersionId" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The ID of the parent detector for the detector version to delete.

    " + }, + "detectorVersionId":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The ID of the detector version to delete.

    " + } + } + }, + "DeleteDetectorVersionResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteEntityTypeRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name of the entity type to delete.

    " + } + } + }, + "DeleteEntityTypeResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteEventRequest":{ + "type":"structure", + "required":[ + "eventId", + "eventTypeName" + ], + "members":{ + "eventId":{ + "shape":"identifier", + "documentation":"

    The ID of the event to delete.

    " + }, + "eventTypeName":{ + "shape":"identifier", + "documentation":"

    The name of the event type.

    " + } + } + }, + "DeleteEventResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteEventTypeRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name of the event type to delete.

    " + } + } + }, + "DeleteEventTypeResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteExternalModelRequest":{ + "type":"structure", + "required":["modelEndpoint"], + "members":{ + "modelEndpoint":{ + "shape":"sageMakerEndpointIdentifier", + "documentation":"

    The endpoint of the Amazon Sagemaker model to delete.

    " + } + } + }, + "DeleteExternalModelResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteLabelRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name of the label to delete.

    " + } + } + }, + "DeleteLabelResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteModelRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID of the model to delete.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type of the model to delete.

    " + } + } + }, + "DeleteModelResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteModelVersionRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType", + "modelVersionNumber" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID of the model version to delete.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type of the model version to delete.

    " + }, + "modelVersionNumber":{ + "shape":"floatVersionString", + "documentation":"

    The model version number of the model version to delete.

    " + } + } + }, + "DeleteModelVersionResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteOutcomeRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name of the outcome to delete.

    " + } + } + }, + "DeleteOutcomeResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteRuleRequest":{ + "type":"structure", + "required":["rule"], + "members":{ + "rule":{"shape":"Rule"} + } + }, + "DeleteRuleResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteVariableRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The name of the variable to delete.

    " + } + } + }, + "DeleteVariableResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeDetectorRequest":{ + "type":"structure", + "required":["detectorId"], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token from the previous response.

    " + }, + "maxResults":{ + "shape":"DetectorVersionMaxResults", + "documentation":"

    The maximum number of results to return for the request.

    " + } + } + }, + "DescribeDetectorResult":{ + "type":"structure", + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "detectorVersionSummaries":{ + "shape":"DetectorVersionSummaryList", + "documentation":"

    The status and description for each detector version.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token to be used for subsequent requests.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The detector ARN.

    " + } + } + }, + "DescribeModelVersionsRequest":{ + "type":"structure", + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelVersionNumber":{ + "shape":"floatVersionString", + "documentation":"

    The model version number.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token from the previous results.

    " + }, + "maxResults":{ + "shape":"modelsMaxPageSize", + "documentation":"

    The maximum number of results to return.

    " + } + } + }, + "DescribeModelVersionsResult":{ + "type":"structure", + "members":{ + "modelVersionDetails":{ + "shape":"modelVersionDetailList", + "documentation":"

    The model version details.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token.

    " + } + } + }, + "Detector":{ + "type":"structure", + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The detector description.

    " + }, + "eventTypeName":{ + "shape":"identifier", + "documentation":"

    The name of the event type.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the detector was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the detector was created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The detector ARN.

    " + } + }, + "documentation":"

    The detector.

    " + }, + "DetectorList":{ + "type":"list", + "member":{"shape":"Detector"} + }, + "DetectorVersionMaxResults":{ + "type":"integer", + "box":true, + "max":2500, + "min":1000 + }, + "DetectorVersionStatus":{ + "type":"string", + "enum":[ + "DRAFT", + "ACTIVE", + "INACTIVE" + ] + }, + "DetectorVersionSummary":{ + "type":"structure", + "members":{ + "detectorVersionId":{ + "shape":"nonEmptyString", + "documentation":"

    The detector version ID.

    " + }, + "status":{ + "shape":"DetectorVersionStatus", + "documentation":"

    The detector version status.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The detector version description.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the detector version was last updated.

    " + } + }, + "documentation":"

    The summary of the detector version.

    " + }, + "DetectorVersionSummaryList":{ + "type":"list", + "member":{"shape":"DetectorVersionSummary"} + }, + "DetectorsMaxResults":{ + "type":"integer", + "box":true, + "max":10, + "min":5 + }, + "Entity":{ + "type":"structure", + "required":[ + "entityType", + "entityId" + ], + "members":{ + "entityType":{ + "shape":"string", + "documentation":"

    The entity type.

    " + }, + "entityId":{ + "shape":"identifier", + "documentation":"

    The entity ID. If you do not know the entityId, you can pass unknown, which is areserved string literal.

    " + } + }, + "documentation":"

    The entity details.

    " + }, + "EntityType":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The entity type name.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The entity type description.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the entity type was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the entity type was created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The entity type ARN.

    " + } + }, + "documentation":"

    The entity type details.

    " + }, + "EventType":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The event type name.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The event type description.

    " + }, + "eventVariables":{ + "shape":"ListOfStrings", + "documentation":"

    The event type event variables.

    " + }, + "labels":{ + "shape":"ListOfStrings", + "documentation":"

    The event type labels.

    " + }, + "entityTypes":{ + "shape":"NonEmptyListOfStrings", + "documentation":"

    The event type entity types.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the event type was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the event type was created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The entity type ARN.

    " + } + }, + "documentation":"

    The event type details.

    " + }, + "EventVariableMap":{ + "type":"map", + "key":{"shape":"variableName"}, + "value":{"shape":"variableValue"}, + "min":1 + }, + "ExternalEventsDetail":{ + "type":"structure", + "required":[ + "dataLocation", + "dataAccessRoleArn" + ], + "members":{ + "dataLocation":{ + "shape":"s3BucketLocation", + "documentation":"

    The Amazon S3 bucket location for the data.

    " + }, + "dataAccessRoleArn":{ + "shape":"iamRoleArn", + "documentation":"

    The ARN of the role that provides Amazon Fraud Detector access to the data location.

    " + } + }, + "documentation":"

    Details for the external events data used for model version training.

    " + }, + "ExternalModel":{ + "type":"structure", + "members":{ + "modelEndpoint":{ + "shape":"string", + "documentation":"

    The Amazon SageMaker model endpoints.

    " + }, + "modelSource":{ + "shape":"ModelSource", + "documentation":"

    The source of the model.

    " + }, + "invokeModelEndpointRoleArn":{ + "shape":"string", + "documentation":"

    The role used to invoke the model.

    " + }, + "inputConfiguration":{ + "shape":"ModelInputConfiguration", + "documentation":"

    The input configuration.

    " + }, + "outputConfiguration":{ + "shape":"ModelOutputConfiguration", + "documentation":"

    The output configuration.

    " + }, + "modelEndpointStatus":{ + "shape":"ModelEndpointStatus", + "documentation":"

    The Amazon Fraud Detector status for the external model endpoint

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the model was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the model was last created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The model ARN.

    " + } + }, + "documentation":"

    The Amazon SageMaker model.

    " + }, + "ExternalModelEndpointDataBlobMap":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"ModelEndpointDataBlob"}, + "sensitive":true + }, + "ExternalModelList":{ + "type":"list", + "member":{"shape":"ExternalModel"} + }, + "ExternalModelsMaxResults":{ + "type":"integer", + "box":true, + "max":10, + "min":5 + }, + "FieldValidationMessage":{ + "type":"structure", + "members":{ + "fieldName":{ + "shape":"string", + "documentation":"

    The field name.

    " + }, + "identifier":{ + "shape":"string", + "documentation":"

    The message ID.

    " + }, + "title":{ + "shape":"string", + "documentation":"

    The message title.

    " + }, + "content":{ + "shape":"string", + "documentation":"

    The message content.

    " + }, + "type":{ + "shape":"string", + "documentation":"

    The message type.

    " + } + }, + "documentation":"

    The message details.

    " + }, + "FileValidationMessage":{ + "type":"structure", + "members":{ + "title":{ + "shape":"string", + "documentation":"

    The message title.

    " + }, + "content":{ + "shape":"string", + "documentation":"

    The message content.

    " + }, + "type":{ + "shape":"string", + "documentation":"

    The message type.

    " + } + }, + "documentation":"

    The message details.

    " + }, + "GetDetectorVersionRequest":{ + "type":"structure", + "required":[ + "detectorId", + "detectorVersionId" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "detectorVersionId":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The detector version ID.

    " + } + } + }, + "GetDetectorVersionResult":{ + "type":"structure", + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "detectorVersionId":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The detector version ID.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The detector version description.

    " + }, + "externalModelEndpoints":{ + "shape":"ListOfStrings", + "documentation":"

    The Amazon SageMaker model endpoints included in the detector version.

    " + }, + "modelVersions":{ + "shape":"ListOfModelVersions", + "documentation":"

    The model versions included in the detector version.

    " + }, + "rules":{ + "shape":"RuleList", + "documentation":"

    The rules included in the detector version.

    " + }, + "status":{ + "shape":"DetectorVersionStatus", + "documentation":"

    The status of the detector version.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    The timestamp when the detector version was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    The timestamp when the detector version was created.

    " + }, + "ruleExecutionMode":{ + "shape":"RuleExecutionMode", + "documentation":"

    The execution mode of the rule in the dectector

    FIRST_MATCHED indicates that Amazon Fraud Detector evaluates rules sequentially, first to last, stopping at the first matched rule. Amazon Fraud dectector then provides the outcomes for that single rule.

    ALL_MATCHED indicates that Amazon Fraud Detector evaluates all rules and returns the outcomes for all matched rules. You can define and edit the rule mode at the detector version level, when it is in draft status.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The detector version ARN.

    " + } + } + }, + "GetDetectorsRequest":{ + "type":"structure", + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token for the subsequent request.

    " + }, + "maxResults":{ + "shape":"DetectorsMaxResults", + "documentation":"

    The maximum number of objects to return for the request.

    " + } + } + }, + "GetDetectorsResult":{ + "type":"structure", + "members":{ + "detectors":{ + "shape":"DetectorList", + "documentation":"

    The detectors.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token.

    " + } + } + }, + "GetEntityTypesRequest":{ + "type":"structure", + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token for the subsequent request.

    " + }, + "maxResults":{ + "shape":"entityTypesMaxResults", + "documentation":"

    The maximum number of objects to return for the request.

    " + } + } + }, + "GetEntityTypesResult":{ + "type":"structure", + "members":{ + "entityTypes":{ + "shape":"entityTypeList", + "documentation":"

    An array of entity types.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token.

    " + } + } + }, + "GetEventPredictionRequest":{ + "type":"structure", + "required":[ + "detectorId", + "eventId", + "eventTypeName", + "entities", + "eventTimestamp", + "eventVariables" + ], + "members":{ + "detectorId":{ + "shape":"string", + "documentation":"

    The detector ID.

    " + }, + "detectorVersionId":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The detector version ID.

    " + }, + "eventId":{ + "shape":"string", + "documentation":"

    The unique ID used to identify the event.

    " + }, + "eventTypeName":{ + "shape":"string", + "documentation":"

    The event type associated with the detector specified for the prediction.

    " + }, + "entities":{ + "shape":"listOfEntities", + "documentation":"

    The entity type (associated with the detector's event type) and specific entity ID representing who performed the event. If an entity id is not available, use \"UNKNOWN.\"

    " + }, + "eventTimestamp":{ + "shape":"string", + "documentation":"

    Timestamp that defines when the event under evaluation occurred.

    " + }, + "eventVariables":{ + "shape":"EventVariableMap", + "documentation":"

    Names of the event type's variables you defined in Amazon Fraud Detector to represent data elements and their corresponding values for the event you are sending for evaluation.

    " + }, + "externalModelEndpointDataBlobs":{ + "shape":"ExternalModelEndpointDataBlobMap", + "documentation":"

    The Amazon SageMaker model endpoint input data blobs.

    " + } + } + }, + "GetEventPredictionResult":{ + "type":"structure", + "members":{ + "modelScores":{ + "shape":"ListOfModelScores", + "documentation":"

    The model scores. Amazon Fraud Detector generates model scores between 0 and 1000, where 0 is low fraud risk and 1000 is high fraud risk. Model scores are directly related to the false positive rate (FPR). For example, a score of 600 corresponds to an estimated 10% false positive rate whereas a score of 900 corresponds to an estimated 2% false positive rate.

    " + }, + "ruleResults":{ + "shape":"ListOfRuleResults", + "documentation":"

    The results.

    " + } + } + }, + "GetEventTypesRequest":{ + "type":"structure", + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token for the subsequent request.

    " + }, + "maxResults":{ + "shape":"eventTypesMaxResults", + "documentation":"

    The maximum number of objects to return for the request.

    " + } + } + }, + "GetEventTypesResult":{ + "type":"structure", + "members":{ + "eventTypes":{ + "shape":"eventTypeList", + "documentation":"

    An array of event types.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token.

    " + } + } + }, + "GetExternalModelsRequest":{ + "type":"structure", + "members":{ + "modelEndpoint":{ + "shape":"string", + "documentation":"

    The Amazon SageMaker model endpoint.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token for the request.

    " + }, + "maxResults":{ + "shape":"ExternalModelsMaxResults", + "documentation":"

    The maximum number of objects to return for the request.

    " + } + } + }, + "GetExternalModelsResult":{ + "type":"structure", + "members":{ + "externalModels":{ + "shape":"ExternalModelList", + "documentation":"

    Gets the Amazon SageMaker models.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token to be used in subsequent requests.

    " + } + } + }, + "GetKMSEncryptionKeyResult":{ + "type":"structure", + "members":{ + "kmsKey":{ + "shape":"KMSKey", + "documentation":"

    The KMS encryption key.

    " + } + } + }, + "GetLabelsRequest":{ + "type":"structure", + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name of the label or labels to get.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token for the subsequent request.

    " + }, + "maxResults":{ + "shape":"labelsMaxResults", + "documentation":"

    The maximum number of objects to return for the request.

    " + } + } + }, + "GetLabelsResult":{ + "type":"structure", + "members":{ + "labels":{ + "shape":"labelList", + "documentation":"

    An array of labels.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token.

    " + } + } + }, + "GetModelVersionRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType", + "modelVersionNumber" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "modelVersionNumber":{ + "shape":"floatVersionString", + "documentation":"

    The model version number.

    " + } + } + }, + "GetModelVersionResult":{ + "type":"structure", + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "modelVersionNumber":{ + "shape":"floatVersionString", + "documentation":"

    The model version number.

    " + }, + "trainingDataSource":{ + "shape":"TrainingDataSourceEnum", + "documentation":"

    The training data source.

    " + }, + "trainingDataSchema":{ + "shape":"TrainingDataSchema", + "documentation":"

    The training data schema.

    " + }, + "externalEventsDetail":{ + "shape":"ExternalEventsDetail", + "documentation":"

    The event details.

    " + }, + "status":{ + "shape":"string", + "documentation":"

    The model version status.

    Possible values are:

    • TRAINING_IN_PROGRESS

    • TRAINING_COMPLETE

    • ACTIVATE_REQUESTED

    • ACTIVATE_IN_PROGRESS

    • ACTIVE

    • INACTIVATE_REQUESTED

    • INACTIVATE_IN_PROGRESS

    • INACTIVE

    • ERROR

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The model version ARN.

    " + } + } + }, + "GetModelsRequest":{ + "type":"structure", + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token for the subsequent request.

    " + }, + "maxResults":{ + "shape":"modelsMaxPageSize", + "documentation":"

    The maximum number of objects to return for the request.

    " + } + } + }, + "GetModelsResult":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token to be used in subsequent requests.

    " + }, + "models":{ + "shape":"modelList", + "documentation":"

    The array of models.

    " + } + } + }, + "GetOutcomesRequest":{ + "type":"structure", + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name of the outcome or outcomes to get.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token for the request.

    " + }, + "maxResults":{ + "shape":"OutcomesMaxResults", + "documentation":"

    The maximum number of objects to return for the request.

    " + } + } + }, + "GetOutcomesResult":{ + "type":"structure", + "members":{ + "outcomes":{ + "shape":"OutcomeList", + "documentation":"

    The outcomes.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token for subsequent requests.

    " + } + } + }, + "GetRulesRequest":{ + "type":"structure", + "required":["detectorId"], + "members":{ + "ruleId":{ + "shape":"identifier", + "documentation":"

    The rule ID.

    " + }, + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "ruleVersion":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The rule version.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token.

    " + }, + "maxResults":{ + "shape":"RulesMaxResults", + "documentation":"

    The maximum number of rules to return for the request.

    " + } + } + }, + "GetRulesResult":{ + "type":"structure", + "members":{ + "ruleDetails":{ + "shape":"RuleDetailList", + "documentation":"

    The details of the requested rule.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token to be used in subsequent requests.

    " + } + } + }, + "GetVariablesRequest":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The name of the variable.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token of the get variable request.

    " + }, + "maxResults":{ + "shape":"VariablesMaxResults", + "documentation":"

    The max size per page determined for the get variable request.

    " + } + } + }, + "GetVariablesResult":{ + "type":"structure", + "members":{ + "variables":{ + "shape":"VariableList", + "documentation":"

    The names of the variables returned.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next page token to be used in subsequent requests.

    " + } + } + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

    An exception indicating an internal server error.

    ", + "exception":true, + "fault":true + }, + "JsonKeyToVariableMap":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"string"} + }, + "KMSKey":{ + "type":"structure", + "members":{ + "kmsEncryptionKeyArn":{ + "shape":"KmsEncryptionKeyArn", + "documentation":"

    The encryption key ARN.

    " + } + }, + "documentation":"

    The KMS key details.

    " + }, + "KmsEncryptionKeyArn":{ + "type":"string", + "max":90, + "min":7, + "pattern":"^DEFAULT|arn:[a-zA-Z0-9-]+:kms:[a-zA-Z0-9-]+:\\d{12}:key\\/\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$" + }, + "Label":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The label name.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The label description.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the label was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the event type was created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The label ARN.

    " + } + }, + "documentation":"

    The label details.

    " + }, + "LabelSchema":{ + "type":"structure", + "required":["labelMapper"], + "members":{ + "labelMapper":{ + "shape":"labelMapper", + "documentation":"

    The label mapper maps the Amazon Fraud Detector supported model classification labels (FRAUD, LEGIT) to the appropriate event type labels. For example, if \"FRAUD\" and \"LEGIT\" are Amazon Fraud Detector supported labels, this mapper could be: {\"FRAUD\" => [\"0\"], \"LEGIT\" => [\"1\"]} or {\"FRAUD\" => [\"false\"], \"LEGIT\" => [\"true\"]} or {\"FRAUD\" => [\"fraud\", \"abuse\"], \"LEGIT\" => [\"legit\", \"safe\"]}. The value part of the mapper is a list, because you may have multiple label variants from your event type for a single Amazon Fraud Detector label.

    " + } + }, + "documentation":"

    The label schema.

    " + }, + "Language":{ + "type":"string", + "enum":["DETECTORPL"] + }, + "ListOfModelScores":{ + "type":"list", + "member":{"shape":"ModelScores"} + }, + "ListOfModelVersions":{ + "type":"list", + "member":{"shape":"ModelVersion"} + }, + "ListOfRuleResults":{ + "type":"list", + "member":{"shape":"RuleResult"} + }, + "ListOfStrings":{ + "type":"list", + "member":{"shape":"string"} + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceARN"], + "members":{ + "resourceARN":{ + "shape":"fraudDetectorArn", + "documentation":"

    The ARN that specifies the resource whose tags you want to list.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token from the previous results.

    " + }, + "maxResults":{ + "shape":"TagsMaxResults", + "documentation":"

    The maximum number of objects to return for the request.

    " + } + } + }, + "ListTagsForResourceResult":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + }, + "nextToken":{ + "shape":"string", + "documentation":"

    The next token for subsequent requests.

    " + } + } + }, + "MetricDataPoint":{ + "type":"structure", + "members":{ + "fpr":{ + "shape":"float", + "documentation":"

    The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.

    " + }, + "precision":{ + "shape":"float", + "documentation":"

    The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.

    " + }, + "tpr":{ + "shape":"float", + "documentation":"

    The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.

    " + }, + "threshold":{ + "shape":"float", + "documentation":"

    The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.

    " + } + }, + "documentation":"

    Model performance metrics data points.

    " + }, + "Model":{ + "type":"structure", + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The model description.

    " + }, + "eventTypeName":{ + "shape":"string", + "documentation":"

    The name of the event type.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    Timestamp of when the model was created.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    Timestamp of last time the model was updated.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The ARN of the model.

    " + } + }, + "documentation":"

    The model.

    " + }, + "ModelEndpointDataBlob":{ + "type":"structure", + "members":{ + "byteBuffer":{ + "shape":"blob", + "documentation":"

    The byte buffer of the Amazon SageMaker model endpoint input data blob.

    " + }, + "contentType":{ + "shape":"contentType", + "documentation":"

    The content type of the Amazon SageMaker model endpoint input data blob.

    " + } + }, + "documentation":"

    A pre-formed Amazon SageMaker model input you can include if your detector version includes an imported Amazon SageMaker model endpoint with pass-through input configuration.

    " + }, + "ModelEndpointStatus":{ + "type":"string", + "enum":[ + "ASSOCIATED", + "DISSOCIATED" + ] + }, + "ModelInputConfiguration":{ + "type":"structure", + "required":["useEventVariables"], + "members":{ + "eventTypeName":{ + "shape":"identifier", + "documentation":"

    The event type name.

    " + }, + "format":{ + "shape":"ModelInputDataFormat", + "documentation":"

    The format of the model input configuration. The format differs depending on if it is passed through to SageMaker or constructed by Amazon Fraud Detector.

    " + }, + "useEventVariables":{ + "shape":"UseEventVariables", + "documentation":"

    The event variables.

    " + }, + "jsonInputTemplate":{ + "shape":"string", + "documentation":"

    Template for constructing the JSON input-data sent to SageMaker. At event-evaluation, the placeholders for variable names in the template will be replaced with the variable values before being sent to SageMaker.

    " + }, + "csvInputTemplate":{ + "shape":"string", + "documentation":"

    Template for constructing the CSV input-data sent to SageMaker. At event-evaluation, the placeholders for variable-names in the template will be replaced with the variable values before being sent to SageMaker.

    " + } + }, + "documentation":"

    The Amazon SageMaker model input configuration.

    " + }, + "ModelInputDataFormat":{ + "type":"string", + "enum":[ + "TEXT_CSV", + "APPLICATION_JSON" + ] + }, + "ModelOutputConfiguration":{ + "type":"structure", + "required":["format"], + "members":{ + "format":{ + "shape":"ModelOutputDataFormat", + "documentation":"

    The format of the model output configuration.

    " + }, + "jsonKeyToVariableMap":{ + "shape":"JsonKeyToVariableMap", + "documentation":"

    A map of JSON keys in response from SageMaker to the Amazon Fraud Detector variables.

    " + }, + "csvIndexToVariableMap":{ + "shape":"CsvIndexToVariableMap", + "documentation":"

    A map of CSV index values in the SageMaker response to the Amazon Fraud Detector variables.

    " + } + }, + "documentation":"

    Provides the Amazon Sagemaker model output configuration.

    " + }, + "ModelOutputDataFormat":{ + "type":"string", + "enum":[ + "TEXT_CSV", + "APPLICATION_JSONLINES" + ] + }, + "ModelPredictionMap":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"float"} + }, + "ModelScores":{ + "type":"structure", + "members":{ + "modelVersion":{ + "shape":"ModelVersion", + "documentation":"

    The model version.

    " + }, + "scores":{ + "shape":"ModelPredictionMap", + "documentation":"

    The model's fraud prediction scores.

    " + } + }, + "documentation":"

    The fraud prediction scores.

    " + }, + "ModelSource":{ + "type":"string", + "enum":["SAGEMAKER"] + }, + "ModelTypeEnum":{ + "type":"string", + "enum":["ONLINE_FRAUD_INSIGHTS"] + }, + "ModelVersion":{ + "type":"structure", + "required":[ + "modelId", + "modelType", + "modelVersionNumber" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "modelVersionNumber":{ + "shape":"nonEmptyString", + "documentation":"

    The model version number.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The model version ARN.

    " + } + }, + "documentation":"

    The model version.

    " + }, + "ModelVersionDetail":{ + "type":"structure", + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "modelVersionNumber":{ + "shape":"floatVersionString", + "documentation":"

    The model version number.

    " + }, + "status":{ + "shape":"string", + "documentation":"

    The status of the model version.

    " + }, + "trainingDataSource":{ + "shape":"TrainingDataSourceEnum", + "documentation":"

    The model version training data source.

    " + }, + "trainingDataSchema":{ + "shape":"TrainingDataSchema", + "documentation":"

    The training data schema.

    " + }, + "externalEventsDetail":{ + "shape":"ExternalEventsDetail", + "documentation":"

    The event details.

    " + }, + "trainingResult":{ + "shape":"TrainingResult", + "documentation":"

    The training results.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    The timestamp when the model was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    The timestamp when the model was created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The model version ARN.

    " + } + }, + "documentation":"

    The details of the model version.

    " + }, + "ModelVersionStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE" + ] + }, + "NameList":{ + "type":"list", + "member":{"shape":"string"}, + "max":100, + "min":1 + }, + "NonEmptyListOfStrings":{ + "type":"list", + "member":{"shape":"string"}, + "min":1 + }, + "Outcome":{ + "type":"structure", + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The outcome name.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The outcome description.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    The timestamp when the outcome was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    The timestamp when the outcome was created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The outcome ARN.

    " + } + }, + "documentation":"

    The outcome.

    " + }, + "OutcomeList":{ + "type":"list", + "member":{"shape":"Outcome"} + }, + "OutcomesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":50 + }, + "PutDetectorRequest":{ + "type":"structure", + "required":[ + "detectorId", + "eventTypeName" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The description of the detector.

    " + }, + "eventTypeName":{ + "shape":"identifier", + "documentation":"

    The name of the event type.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "PutDetectorResult":{ + "type":"structure", + "members":{ + } + }, + "PutEntityTypeRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name of the entity type.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The description.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "PutEntityTypeResult":{ + "type":"structure", + "members":{ + } + }, + "PutEventTypeRequest":{ + "type":"structure", + "required":[ + "name", + "eventVariables", + "entityTypes" + ], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The description of the event type.

    " + }, + "eventVariables":{ + "shape":"NonEmptyListOfStrings", + "documentation":"

    The event type variables.

    " + }, + "labels":{ + "shape":"ListOfStrings", + "documentation":"

    The event type labels.

    " + }, + "entityTypes":{ + "shape":"NonEmptyListOfStrings", + "documentation":"

    The entity type for the event type. Example entity types: customer, merchant, account.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "PutEventTypeResult":{ + "type":"structure", + "members":{ + } + }, + "PutExternalModelRequest":{ + "type":"structure", + "required":[ + "modelEndpoint", + "modelSource", + "invokeModelEndpointRoleArn", + "inputConfiguration", + "outputConfiguration", + "modelEndpointStatus" + ], + "members":{ + "modelEndpoint":{ + "shape":"sageMakerEndpointIdentifier", + "documentation":"

    The model endpoints name.

    " + }, + "modelSource":{ + "shape":"ModelSource", + "documentation":"

    The source of the model.

    " + }, + "invokeModelEndpointRoleArn":{ + "shape":"string", + "documentation":"

    The IAM role used to invoke the model endpoint.

    " + }, + "inputConfiguration":{ + "shape":"ModelInputConfiguration", + "documentation":"

    The model endpoint input configuration.

    " + }, + "outputConfiguration":{ + "shape":"ModelOutputConfiguration", + "documentation":"

    The model endpoint output configuration.

    " + }, + "modelEndpointStatus":{ + "shape":"ModelEndpointStatus", + "documentation":"

    The model endpoint’s status in Amazon Fraud Detector.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "PutExternalModelResult":{ + "type":"structure", + "members":{ + } + }, + "PutKMSEncryptionKeyRequest":{ + "type":"structure", + "required":["kmsEncryptionKeyArn"], + "members":{ + "kmsEncryptionKeyArn":{ + "shape":"KmsEncryptionKeyArn", + "documentation":"

    The KMS encryption key ARN.

    " + } + } + }, + "PutKMSEncryptionKeyResult":{ + "type":"structure", + "members":{ + } + }, + "PutLabelRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The label name.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The label description.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    " + } + } + }, + "PutLabelResult":{ + "type":"structure", + "members":{ + } + }, + "PutOutcomeRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

    The name of the outcome.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The outcome description.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "PutOutcomeResult":{ + "type":"structure", + "members":{ + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

    An exception indicating the specified resource was not found.

    ", + "exception":true + }, + "Rule":{ + "type":"structure", + "required":[ + "detectorId", + "ruleId", + "ruleVersion" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector for which the rule is associated.

    " + }, + "ruleId":{ + "shape":"identifier", + "documentation":"

    The rule ID.

    " + }, + "ruleVersion":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The rule version.

    " + } + }, + "documentation":"

    A rule.

    " + }, + "RuleDetail":{ + "type":"structure", + "members":{ + "ruleId":{ + "shape":"identifier", + "documentation":"

    The rule ID.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The rule description.

    " + }, + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector for which the rule is associated.

    " + }, + "ruleVersion":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The rule version.

    " + }, + "expression":{ + "shape":"ruleExpression", + "documentation":"

    The rule expression.

    " + }, + "language":{ + "shape":"Language", + "documentation":"

    The rule language.

    " + }, + "outcomes":{ + "shape":"NonEmptyListOfStrings", + "documentation":"

    The rule outcomes.

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    Timestamp of the last time the rule was updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    The timestamp of when the rule was created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The rule ARN.

    " + } + }, + "documentation":"

    The details of the rule.

    " + }, + "RuleDetailList":{ + "type":"list", + "member":{"shape":"RuleDetail"} + }, + "RuleExecutionMode":{ + "type":"string", + "enum":[ + "ALL_MATCHED", + "FIRST_MATCHED" + ] + }, + "RuleList":{ + "type":"list", + "member":{"shape":"Rule"} + }, + "RuleResult":{ + "type":"structure", + "members":{ + "ruleId":{ + "shape":"string", + "documentation":"

    The rule ID that was matched, based on the rule execution mode.

    " + }, + "outcomes":{ + "shape":"ListOfStrings", + "documentation":"

    The outcomes of the matched rule, based on the rule execution mode.

    " + } + }, + "documentation":"

    The rule results.

    " + }, + "RulesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":50 + }, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"tagKey", + "documentation":"

    A tag key.

    " + }, + "value":{ + "shape":"tagValue", + "documentation":"

    A value assigned to a tag key.

    " + } + }, + "documentation":"

    A key and value pair.

    " + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceARN", + "tags" + ], + "members":{ + "resourceARN":{ + "shape":"fraudDetectorArn", + "documentation":"

    The resource ARN.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    The tags to assign to the resource.

    " + } + } + }, + "TagResourceResult":{ + "type":"structure", + "members":{ + } + }, + "TagsMaxResults":{ + "type":"integer", + "box":true, + "max":50, + "min":50 + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

    An exception indicating a throttling error.

    ", + "exception":true + }, + "TrainingDataSchema":{ + "type":"structure", + "required":[ + "modelVariables", + "labelSchema" + ], + "members":{ + "modelVariables":{ + "shape":"ListOfStrings", + "documentation":"

    The training data schema variables.

    " + }, + "labelSchema":{"shape":"LabelSchema"} + }, + "documentation":"

    The training data schema.

    " + }, + "TrainingDataSourceEnum":{ + "type":"string", + "enum":["EXTERNAL_EVENTS"] + }, + "TrainingMetrics":{ + "type":"structure", + "members":{ + "auc":{ + "shape":"float", + "documentation":"

    The area under the curve. This summarizes true positive rate (TPR) and false positive rate (FPR) across all possible model score thresholds. A model with no predictive power has an AUC of 0.5, whereas a perfect model has a score of 1.0.

    " + }, + "metricDataPoints":{ + "shape":"metricDataPointsList", + "documentation":"

    The data points details.

    " + } + }, + "documentation":"

    The training metric details.

    " + }, + "TrainingResult":{ + "type":"structure", + "members":{ + "dataValidationMetrics":{ + "shape":"DataValidationMetrics", + "documentation":"

    The validation metrics.

    " + }, + "trainingMetrics":{ + "shape":"TrainingMetrics", + "documentation":"

    The training metric details.

    " + } + }, + "documentation":"

    The training result details.

    " + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceARN", + "tagKeys" + ], + "members":{ + "resourceARN":{ + "shape":"fraudDetectorArn", + "documentation":"

    The ARN of the resource from which to remove the tag.

    " + }, + "tagKeys":{ + "shape":"tagKeyList", + "documentation":"

    The resource ARN.

    " + } + } + }, + "UntagResourceResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateDetectorVersionMetadataRequest":{ + "type":"structure", + "required":[ + "detectorId", + "detectorVersionId", + "description" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "detectorVersionId":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The detector version ID.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The description.

    " + } + } + }, + "UpdateDetectorVersionMetadataResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateDetectorVersionRequest":{ + "type":"structure", + "required":[ + "detectorId", + "detectorVersionId", + "externalModelEndpoints", + "rules" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The parent detector ID for the detector version you want to update.

    " + }, + "detectorVersionId":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The detector version ID.

    " + }, + "externalModelEndpoints":{ + "shape":"ListOfStrings", + "documentation":"

    The Amazon SageMaker model endpoints to include in the detector version.

    " + }, + "rules":{ + "shape":"RuleList", + "documentation":"

    The rules to include in the detector version.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The detector version description.

    " + }, + "modelVersions":{ + "shape":"ListOfModelVersions", + "documentation":"

    The model versions to include in the detector version.

    " + }, + "ruleExecutionMode":{ + "shape":"RuleExecutionMode", + "documentation":"

    The rule execution mode to add to the detector.

    If you specify FIRST_MATCHED, Amazon Fraud Detector evaluates rules sequentially, first to last, stopping at the first matched rule. Amazon Fraud dectector then provides the outcomes for that single rule.

    If you specifiy ALL_MATCHED, Amazon Fraud Detector evaluates all rules and returns the outcomes for all matched rules. You can define and edit the rule mode at the detector version level, when it is in draft status.

    The default behavior is FIRST_MATCHED.

    " + } + } + }, + "UpdateDetectorVersionResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateDetectorVersionStatusRequest":{ + "type":"structure", + "required":[ + "detectorId", + "detectorVersionId", + "status" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

    The detector ID.

    " + }, + "detectorVersionId":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The detector version ID.

    " + }, + "status":{ + "shape":"DetectorVersionStatus", + "documentation":"

    The new status.

    " + } + } + }, + "UpdateDetectorVersionStatusResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateModelRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The new model description.

    " + } + } + }, + "UpdateModelResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateModelVersionRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType", + "majorVersionNumber" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "majorVersionNumber":{ + "shape":"wholeNumberVersionString", + "documentation":"

    The major version number.

    " + }, + "externalEventsDetail":{ + "shape":"ExternalEventsDetail", + "documentation":"

    The event details.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    A collection of key and value pairs.

    " + } + } + }, + "UpdateModelVersionResult":{ + "type":"structure", + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "modelVersionNumber":{ + "shape":"floatVersionString", + "documentation":"

    The model version number of the model version updated.

    " + }, + "status":{ + "shape":"string", + "documentation":"

    The status of the updated model version.

    " + } + } + }, + "UpdateModelVersionStatusRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType", + "modelVersionNumber", + "status" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

    The model ID of the model version to update.

    " + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

    The model type.

    " + }, + "modelVersionNumber":{ + "shape":"floatVersionString", + "documentation":"

    The model version number.

    " + }, + "status":{ + "shape":"ModelVersionStatus", + "documentation":"

    The model version status.

    " + } + } + }, + "UpdateModelVersionStatusResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateRuleMetadataRequest":{ + "type":"structure", + "required":[ + "rule", + "description" + ], + "members":{ + "rule":{ + "shape":"Rule", + "documentation":"

    The rule to update.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The rule description.

    " + } + } + }, + "UpdateRuleMetadataResult":{ + "type":"structure", + "members":{ + } + }, + "UpdateRuleVersionRequest":{ + "type":"structure", + "required":[ + "rule", + "expression", + "language", + "outcomes" + ], + "members":{ + "rule":{ + "shape":"Rule", + "documentation":"

    The rule to update.

    " + }, + "description":{ + "shape":"description", + "documentation":"

    The description.

    " + }, + "expression":{ + "shape":"ruleExpression", + "documentation":"

    The rule expression.

    " + }, + "language":{ + "shape":"Language", + "documentation":"

    The language.

    " + }, + "outcomes":{ + "shape":"NonEmptyListOfStrings", + "documentation":"

    The outcomes.

    " + }, + "tags":{ + "shape":"tagList", + "documentation":"

    The tags to assign to the rule version.

    " + } + } + }, + "UpdateRuleVersionResult":{ + "type":"structure", + "members":{ + "rule":{ + "shape":"Rule", + "documentation":"

    The new rule version that was created.

    " + } + } + }, + "UpdateVariableRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The name of the variable.

    " + }, + "defaultValue":{ + "shape":"string", + "documentation":"

    The new default value of the variable.

    " + }, + "description":{ + "shape":"string", + "documentation":"

    The new description.

    " + }, + "variableType":{ + "shape":"string", + "documentation":"

    The variable type. For more information see Variable types.

    " + } + } + }, + "UpdateVariableResult":{ + "type":"structure", + "members":{ + } + }, + "UseEventVariables":{"type":"boolean"}, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

    An exception indicating a specified value is not allowed.

    ", + "exception":true + }, + "Variable":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The name of the variable.

    " + }, + "dataType":{ + "shape":"DataType", + "documentation":"

    The data type of the variable. For more information see Variable types.

    " + }, + "dataSource":{ + "shape":"DataSource", + "documentation":"

    The data source of the variable.

    " + }, + "defaultValue":{ + "shape":"string", + "documentation":"

    The default value of the variable.

    " + }, + "description":{ + "shape":"string", + "documentation":"

    The description of the variable.

    " + }, + "variableType":{ + "shape":"string", + "documentation":"

    The variable type of the variable.

    Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT

    " + }, + "lastUpdatedTime":{ + "shape":"time", + "documentation":"

    The time when variable was last updated.

    " + }, + "createdTime":{ + "shape":"time", + "documentation":"

    The time when the variable was created.

    " + }, + "arn":{ + "shape":"fraudDetectorArn", + "documentation":"

    The ARN of the variable.

    " + } + }, + "documentation":"

    The variable.

    " + }, + "VariableEntry":{ + "type":"structure", + "members":{ + "name":{ + "shape":"string", + "documentation":"

    The name of the variable.

    " + }, + "dataType":{ + "shape":"string", + "documentation":"

    The data type of the variable.

    " + }, + "dataSource":{ + "shape":"string", + "documentation":"

    The data source of the variable.

    " + }, + "defaultValue":{ + "shape":"string", + "documentation":"

    The default value of the variable.

    " + }, + "description":{ + "shape":"string", + "documentation":"

    The description of the variable.

    " + }, + "variableType":{ + "shape":"string", + "documentation":"

    The type of the variable. For more information see Variable types.

    Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT

    " + } + }, + "documentation":"

    A variable in the list of variables for the batch create variable request.

    " + }, + "VariableEntryList":{ + "type":"list", + "member":{"shape":"VariableEntry"}, + "max":25, + "min":1 + }, + "VariableList":{ + "type":"list", + "member":{"shape":"Variable"} + }, + "VariablesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":50 + }, + "blob":{"type":"blob"}, + "contentType":{ + "type":"string", + "max":1024, + "min":1 + }, + "description":{ + "type":"string", + "max":128, + "min":1 + }, + "entityTypeList":{ + "type":"list", + "member":{"shape":"EntityType"} + }, + "entityTypesMaxResults":{ + "type":"integer", + "box":true, + "max":10, + "min":5 + }, + "eventTypeList":{ + "type":"list", + "member":{"shape":"EventType"} + }, + "eventTypesMaxResults":{ + "type":"integer", + "box":true, + "max":10, + "min":5 + }, + "fieldValidationMessageList":{ + "type":"list", + "member":{"shape":"FieldValidationMessage"} + }, + "fileValidationMessageList":{ + "type":"list", + "member":{"shape":"FileValidationMessage"} + }, + "float":{"type":"float"}, + "floatVersionString":{ + "type":"string", + "max":7, + "min":3, + "pattern":"^[1-9][0-9]{0,3}\\.[0-9]{1,2}$" + }, + "fraudDetectorArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^arn\\:aws[a-z-]{0,15}\\:frauddetector\\:[a-z0-9-]{3,20}\\:[0-9]{12}\\:[^\\s]{2,128}$" + }, + "iamRoleArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^arn\\:aws[a-z-]{0,15}\\:iam\\:\\:[0-9]{12}\\:role\\/[^\\s]{2,64}$" + }, + "identifier":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[0-9a-z_-]+$" + }, + "integer":{"type":"integer"}, + "labelList":{ + "type":"list", + "member":{"shape":"Label"} + }, + "labelMapper":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"ListOfStrings"} + }, + "labelsMaxResults":{ + "type":"integer", + "box":true, + "max":50, + "min":10 + }, + "listOfEntities":{ + "type":"list", + "member":{"shape":"Entity"} + }, + "metricDataPointsList":{ + "type":"list", + "member":{"shape":"MetricDataPoint"} + }, + "modelIdentifier":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[0-9a-z_]+$" + }, + "modelList":{ + "type":"list", + "member":{"shape":"Model"} + }, + "modelVersionDetailList":{ + "type":"list", + "member":{"shape":"ModelVersionDetail"} + }, + "modelsMaxPageSize":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, + "nonEmptyString":{ + "type":"string", + "min":1 + }, + "ruleExpression":{ + "type":"string", + "max":4096, + "min":1, + "sensitive":true + }, + "s3BucketLocation":{ + "type":"string", + "max":512, + "min":1, + "pattern":"^s3:\\/\\/(.+)$" + }, + "sageMakerEndpointIdentifier":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[0-9A-Za-z_-]+$" + }, + "string":{"type":"string"}, + "tagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "tagKeyList":{ + "type":"list", + "member":{"shape":"tagKey"}, + "max":50, + "min":0 + }, + "tagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "tagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "time":{"type":"string"}, + "variableName":{ + "type":"string", + "max":64, + "min":1 + }, + "variableValue":{ + "type":"string", + "max":1024, + "min":1, + "sensitive":true + }, + "wholeNumberVersionString":{ + "type":"string", + "max":5, + "min":1, + "pattern":"^([1-9][0-9]*)$" + } + }, + "documentation":"

    This is the Amazon Fraud Detector API Reference. This guide is for developers who need detailed information about Amazon Fraud Detector API actions, data types, and errors. For more information about Amazon Fraud Detector features, see the Amazon Fraud Detector User Guide.

    " +} diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index 8b8380873db6..f06659a056f2 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + greengrassv2 + AWS Java SDK :: Services :: Greengrass V2 + The AWS Java SDK for Greengrass V2 module holds the client classes that are used for + communicating with Greengrass V2. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.greengrassv2 + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/greengrassv2/src/main/resources/codegen-resources/paginators-1.json b/services/greengrassv2/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..8f32216e19e9 --- /dev/null +++ b/services/greengrassv2/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListComponentVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "componentVersions" + }, + "ListComponents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "components" + }, + "ListCoreDevices": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "coreDevices" + }, + "ListDeployments": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "deployments" + }, + "ListEffectiveDeployments": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "effectiveDeployments" + }, + "ListInstalledComponents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "installedComponents" + } + } +} diff --git a/services/greengrassv2/src/main/resources/codegen-resources/service-2.json b/services/greengrassv2/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..3461e8942228 --- /dev/null +++ b/services/greengrassv2/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2319 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-11-30", + "endpointPrefix":"greengrass", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AWS GreengrassV2", + "serviceFullName":"AWS IoT Greengrass V2", + "serviceId":"GreengrassV2", + "signatureVersion":"v4", + "uid":"greengrassv2-2020-11-30" + }, + "operations":{ + "CancelDeployment":{ + "name":"CancelDeployment", + "http":{ + "method":"POST", + "requestUri":"/greengrass/v2/deployments/{deploymentId}/cancel" + }, + "input":{"shape":"CancelDeploymentRequest"}, + "output":{"shape":"CancelDeploymentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Cancels a deployment. This operation cancels the deployment for devices that haven't yet received it. If a device already received the deployment, this operation doesn't change anything for that device.

    " + }, + "CreateComponentVersion":{ + "name":"CreateComponentVersion", + "http":{ + "method":"POST", + "requestUri":"/greengrass/v2/createComponentVersion", + "responseCode":201 + }, + "input":{"shape":"CreateComponentVersionRequest"}, + "output":{"shape":"CreateComponentVersionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a component. Components are software that run on AWS IoT Greengrass core devices. After you develop and test a component on your core device, you can use this operation to upload your component to AWS IoT Greengrass. Then, you can deploy the component to other core devices.

    You can use this operation to do the following:

    • Create components from recipes

      Create a component from a recipe, which is a file that defines the component's metadata, parameters, dependencies, lifecycle, artifacts, and platform capability. For more information, see AWS IoT Greengrass component recipe reference in the AWS IoT Greengrass V2 Developer Guide.

      To create a component from a recipe, specify inlineRecipe when you call this operation.

    • Create components from Lambda functions

      Create a component from an AWS Lambda function that runs on AWS IoT Greengrass. This creates a recipe and artifacts from the Lambda function's deployment package. You can use this operation to migrate Lambda functions from AWS IoT Greengrass V1 to AWS IoT Greengrass V2.

      This function only accepts Lambda functions that use the following runtimes:

      • Python 2.7 – python2.7

      • Python 3.7 – python3.7

      • Python 3.8 – python3.8

      • Java 8 – java8

      • Node.js 10 – nodejs10.x

      • Node.js 12 – nodejs12.x

      To create a component from a Lambda function, specify lambdaFunction when you call this operation.

    " + }, + "CreateDeployment":{ + "name":"CreateDeployment", + "http":{ + "method":"POST", + "requestUri":"/greengrass/v2/deployments", + "responseCode":201 + }, + "input":{"shape":"CreateDeploymentRequest"}, + "output":{"shape":"CreateDeploymentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a continuous deployment for a target, which is a AWS IoT Greengrass core device or group of core devices. When you add a new core device to a group of core devices that has a deployment, AWS IoT Greengrass deploys that group's deployment to the new device.

    You can define one deployment for each target. When you create a new deployment for a target that has an existing deployment, you replace the previous deployment. AWS IoT Greengrass applies the new deployment to the target devices.

    Every deployment has a revision number that indicates how many deployment revisions you define for a target. Use this operation to create a new revision of an existing deployment. This operation returns the revision number of the new deployment when you create it.

    For more information, see the Create deployments in the AWS IoT Greengrass V2 Developer Guide.

    " + }, + "DeleteComponent":{ + "name":"DeleteComponent", + "http":{ + "method":"DELETE", + "requestUri":"/greengrass/v2/components/{arn}", + "responseCode":204 + }, + "input":{"shape":"DeleteComponentRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a version of a component from AWS IoT Greengrass.

    This operation deletes the component's recipe and artifacts. As a result, deployments that refer to this component version will fail. If you have deployments that use this component version, you can remove the component from the deployment or update the deployment to use a valid version.

    " + }, + "DeleteCoreDevice":{ + "name":"DeleteCoreDevice", + "http":{ + "method":"DELETE", + "requestUri":"/greengrass/v2/coreDevices/{coreDeviceThingName}", + "responseCode":204 + }, + "input":{"shape":"DeleteCoreDeviceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes a AWS IoT Greengrass core device, which is an AWS IoT thing. This operation removes the core device from the list of core devices. This operation doesn't delete the AWS IoT thing. For more information about how to delete the AWS IoT thing, see DeleteThing in the AWS IoT API Reference.

    " + }, + "DescribeComponent":{ + "name":"DescribeComponent", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/components/{arn}/metadata" + }, + "input":{"shape":"DescribeComponentRequest"}, + "output":{"shape":"DescribeComponentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves metadata for a version of a component.

    " + }, + "GetComponent":{ + "name":"GetComponent", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/components/{arn}" + }, + "input":{"shape":"GetComponentRequest"}, + "output":{"shape":"GetComponentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets the recipe for a version of a component. Core devices can call this operation to identify the artifacts and requirements to install a component.

    " + }, + "GetComponentVersionArtifact":{ + "name":"GetComponentVersionArtifact", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/components/{arn}/artifacts/{artifactName+}" + }, + "input":{"shape":"GetComponentVersionArtifactRequest"}, + "output":{"shape":"GetComponentVersionArtifactResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets the pre-signed URL to a component artifact in an S3 bucket. Core devices can call this operation to identify the URL that they can use to download an artifact to install.

    " + }, + "GetCoreDevice":{ + "name":"GetCoreDevice", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/coreDevices/{coreDeviceThingName}" + }, + "input":{"shape":"GetCoreDeviceRequest"}, + "output":{"shape":"GetCoreDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves metadata for a AWS IoT Greengrass core device.

    " + }, + "GetDeployment":{ + "name":"GetDeployment", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/deployments/{deploymentId}" + }, + "input":{"shape":"GetDeploymentRequest"}, + "output":{"shape":"GetDeploymentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets a deployment. Deployments define the components that run on AWS IoT Greengrass core devices.

    " + }, + "ListComponentVersions":{ + "name":"ListComponentVersions", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/components/{arn}/versions" + }, + "input":{"shape":"ListComponentVersionsRequest"}, + "output":{"shape":"ListComponentVersionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves a paginated list of all versions for a component.

    " + }, + "ListComponents":{ + "name":"ListComponents", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/components" + }, + "input":{"shape":"ListComponentsRequest"}, + "output":{"shape":"ListComponentsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves a paginated list of component summaries. This list includes components that you have permission to view.

    " + }, + "ListCoreDevices":{ + "name":"ListCoreDevices", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/coreDevices" + }, + "input":{"shape":"ListCoreDevicesRequest"}, + "output":{"shape":"ListCoreDevicesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of AWS IoT Greengrass core devices.

    " + }, + "ListDeployments":{ + "name":"ListDeployments", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/deployments" + }, + "input":{"shape":"ListDeploymentsRequest"}, + "output":{"shape":"ListDeploymentsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of deployments.

    " + }, + "ListEffectiveDeployments":{ + "name":"ListEffectiveDeployments", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/coreDevices/{coreDeviceThingName}/effectiveDeployments" + }, + "input":{"shape":"ListEffectiveDeploymentsRequest"}, + "output":{"shape":"ListEffectiveDeploymentsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of deployment jobs that AWS IoT Greengrass sends to AWS IoT Greengrass core devices.

    " + }, + "ListInstalledComponents":{ + "name":"ListInstalledComponents", + "http":{ + "method":"GET", + "requestUri":"/greengrass/v2/coreDevices/{coreDeviceThingName}/installedComponents" + }, + "input":{"shape":"ListInstalledComponentsRequest"}, + "output":{"shape":"ListInstalledComponentsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of the components that a AWS IoT Greengrass core device runs.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves the list of tags for an AWS IoT Greengrass resource.

    " + }, + "ResolveComponentCandidates":{ + "name":"ResolveComponentCandidates", + "http":{ + "method":"POST", + "requestUri":"/greengrass/v2/resolveComponentCandidates" + }, + "input":{"shape":"ResolveComponentCandidatesRequest"}, + "output":{"shape":"ResolveComponentCandidatesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Retrieves a list of components that meet the component, version, and platform requirements of a deployment. AWS IoT Greengrass core devices call this operation when they receive a deployment to identify the components to install.

    This operation identifies components that meet all dependency requirements for a deployment. If the requirements conflict, then this operation returns an error and the deployment fails. For example, this occurs if component A requires version >2.0.0 and component B requires version <2.0.0 of a component dependency.

    When you specify the component candidates to resolve, AWS IoT Greengrass compares each component's digest from the core device with the component's digest in the AWS Cloud. If the digests don't match, then AWS IoT Greengrass specifies to use the version from the AWS Cloud.

    To use this operation, you must use the data plane API endpoint and authenticate with an AWS IoT device certificate. For more information, see AWS IoT Greengrass endpoints and quotas.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Adds tags to an AWS IoT Greengrass resource. If a tag already exists for the resource, this operation updates the tag's value.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes a tag from an AWS IoT Greengrass resource.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    You don't have permission to perform the action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "CancelDeploymentRequest":{ + "type":"structure", + "required":["deploymentId"], + "members":{ + "deploymentId":{ + "shape":"NonEmptyString", + "documentation":"

    The ID of the deployment.

    ", + "location":"uri", + "locationName":"deploymentId" + } + } + }, + "CancelDeploymentResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"NonEmptyString", + "documentation":"

    A message that communicates if the cancel was successful.

    " + } + } + }, + "CloudComponentState":{ + "type":"string", + "enum":[ + "REQUESTED", + "INITIATED", + "DEPLOYABLE", + "FAILED", + "DEPRECATED" + ] + }, + "CloudComponentStatus":{ + "type":"structure", + "members":{ + "componentState":{ + "shape":"CloudComponentState", + "documentation":"

    The state of the component.

    " + }, + "message":{ + "shape":"NonEmptyString", + "documentation":"

    A message that communicates details, such as errors, about the status of the component.

    " + }, + "errors":{ + "shape":"StringMap", + "documentation":"

    A dictionary of errors that communicate why the component is in an error state. For example, if AWS IoT Greengrass can't access an artifact for the component, then errors contains the artifact's URI as a key, and the error message as the value for that key.

    " + } + }, + "documentation":"

    Contains the status of a component in the AWS IoT Greengrass service.

    " + }, + "Component":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ComponentARN", + "documentation":"

    The ARN of the component version.

    " + }, + "componentName":{ + "shape":"ComponentNameString", + "documentation":"

    The name of the component.

    " + }, + "latestVersion":{ + "shape":"ComponentLatestVersion", + "documentation":"

    The latest version of the component and its details.

    " + } + }, + "documentation":"

    Contains information about a component.

    " + }, + "ComponentARN":{ + "type":"string", + "pattern":"arn:aws(-cn|-us-gov)?:greengrass:[^:]+:(aws|[0-9]+):components:[^:]+" + }, + "ComponentCandidate":{ + "type":"structure", + "members":{ + "componentName":{ + "shape":"ComponentNameString", + "documentation":"

    The name of the component.

    " + }, + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    " + }, + "versionRequirements":{ + "shape":"ComponentVersionRequirementMap", + "documentation":"

    The version requirements for the component's dependencies. AWS IoT Greengrass core devices get the version requirements from component recipes.

    AWS IoT Greengrass V2 uses semantic version constraints. For more information, see Semantic Versioning.

    " + } + }, + "documentation":"

    Contains information about a component that is a candidate to deploy to a AWS IoT Greengrass core device.

    " + }, + "ComponentCandidateList":{ + "type":"list", + "member":{"shape":"ComponentCandidate"} + }, + "ComponentConfigurationPath":{ + "type":"string", + "max":256, + "min":0 + }, + "ComponentConfigurationPathList":{ + "type":"list", + "member":{"shape":"ComponentConfigurationPath"} + }, + "ComponentConfigurationString":{ + "type":"string", + "max":65536, + "min":1 + }, + "ComponentConfigurationUpdate":{ + "type":"structure", + "members":{ + "merge":{ + "shape":"ComponentConfigurationString", + "documentation":"

    A serialized JSON string that contains the configuration object to merge to target devices. The core device merges this configuration with the component's existing configuration. If this is the first time a component deploys on a device, the core device merges this configuration with the component's default configuration. This means that the core device keeps it's existing configuration for keys and values that you don't specify in this object. For more information, see Merge configuration updates in the AWS IoT Greengrass V2 Developer Guide.

    " + }, + "reset":{ + "shape":"ComponentConfigurationPathList", + "documentation":"

    The list of configuration nodes to reset to default values on target devices. Use JSON pointers to specify each node to reset. JSON pointers start with a forward slash (/) and use forward slashes to separate the key for each level in the object. For more information, see the JSON pointer specification and Reset configuration updates in the AWS IoT Greengrass V2 Developer Guide.

    " + } + }, + "documentation":"

    Contains information about a deployment's update to a component's configuration on Greengrass core devices. For more information, see Update component configurations in the AWS IoT Greengrass V2 Developer Guide.

    " + }, + "ComponentDependencyMap":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"ComponentDependencyRequirement"} + }, + "ComponentDependencyRequirement":{ + "type":"structure", + "members":{ + "versionRequirement":{ + "shape":"NonEmptyString", + "documentation":"

    The component version requirement for the component dependency.

    AWS IoT Greengrass V2 uses semantic version constraints. For more information, see Semantic Versioning.

    " + }, + "dependencyType":{ + "shape":"ComponentDependencyType", + "documentation":"

    The type of this dependency. Choose from the following options:

    • SOFT – The component doesn't restart if the dependency changes state.

    • HARD – The component restarts if the dependency changes state.

    Default: HARD

    " + } + }, + "documentation":"

    Contains information about a component dependency for a Lambda function component.

    " + }, + "ComponentDependencyType":{ + "type":"string", + "enum":[ + "HARD", + "SOFT" + ] + }, + "ComponentDeploymentSpecification":{ + "type":"structure", + "members":{ + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    " + }, + "configurationUpdate":{ + "shape":"ComponentConfigurationUpdate", + "documentation":"

    The configuration updates to deploy for the component. You can define reset updates and merge updates. A reset updates the keys that you specify to the default configuration for the component. A merge updates the core device's component configuration with the keys and values that you specify. The AWS IoT Greengrass Core software applies reset updates before it applies merge updates. For more information, see Update component configurations in the AWS IoT Greengrass V2 Developer Guide.

    " + }, + "runWith":{ + "shape":"ComponentRunWith", + "documentation":"

    The system user and group that the AWS IoT Greengrass Core software uses to run component processes on the core device. If you omit this parameter, the AWS IoT Greengrass Core software uses the system user and group that you configure for the core device. For more information, see Configure the user and group that run components in the AWS IoT Greengrass V2 Developer Guide.

    " + } + }, + "documentation":"

    Contains information about a component to deploy.

    " + }, + "ComponentDeploymentSpecifications":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"ComponentDeploymentSpecification"} + }, + "ComponentLatestVersion":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ComponentVersionARN", + "documentation":"

    The ARN of the component version.

    " + }, + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the component was created, expressed in ISO 8601 format.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the component version.

    " + }, + "publisher":{ + "shape":"NonEmptyString", + "documentation":"

    The publisher of the component version.

    " + }, + "platforms":{ + "shape":"ComponentPlatformList", + "documentation":"

    The platforms that the component version supports.

    " + } + }, + "documentation":"

    Contains information about the latest version of a component.

    " + }, + "ComponentList":{ + "type":"list", + "member":{"shape":"Component"} + }, + "ComponentNameString":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9-_.]+" + }, + "ComponentPlatform":{ + "type":"structure", + "members":{ + "name":{ + "shape":"NonEmptyString", + "documentation":"

    The friendly name of the platform. This name helps you identify the platform.

    If you omit this parameter, AWS IoT Greengrass creates a friendly name from the os and architecture of the platform.

    " + }, + "attributes":{ + "shape":"PlatformAttributesMap", + "documentation":"

    A dictionary of attributes for the platform. The AWS IoT Greengrass Core software defines the os and platform by default. You can specify additional platform attributes for a core device when you deploy the AWS IoT Greengrass nucleus component. For more information, see the AWS IoT Greengrass nucleus component in the AWS IoT Greengrass V2 Developer Guide.

    " + } + }, + "documentation":"

    Contains information about a platform that a component supports.

    " + }, + "ComponentPlatformList":{ + "type":"list", + "member":{"shape":"ComponentPlatform"} + }, + "ComponentRunWith":{ + "type":"structure", + "members":{ + "posixUser":{ + "shape":"NonEmptyString", + "documentation":"

    The POSIX system user and (optional) group to use to run this component. Specify the user and group separated by a colon (:) in the following format: user:group. The group is optional. If you don't specify a group, the AWS IoT Greengrass Core software uses the primary user for the group.

    " + } + }, + "documentation":"

    Contains information system user and group that the AWS IoT Greengrass Core software uses to run component processes on the core device. For more information, see Configure the user and group that run components in the AWS IoT Greengrass V2 Developer Guide.

    " + }, + "ComponentVersionARN":{ + "type":"string", + "pattern":"arn:aws(-cn|-us-gov)?:greengrass:[^:]+:(aws|[0-9]+):components:[^:]+:versions:[^:]+" + }, + "ComponentVersionList":{ + "type":"list", + "member":{"shape":"ComponentVersionListItem"} + }, + "ComponentVersionListItem":{ + "type":"structure", + "members":{ + "componentName":{ + "shape":"ComponentNameString", + "documentation":"

    The name of the component.

    " + }, + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    " + }, + "arn":{ + "shape":"NonEmptyString", + "documentation":"

    The ARN of the component version.

    " + } + }, + "documentation":"

    Contains information about a component version in a list.

    " + }, + "ComponentVersionRequirementMap":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"NonEmptyString"} + }, + "ComponentVersionString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[0-9a-zA-Z-.+]+" + }, + "ComponentVisibilityScope":{ + "type":"string", + "enum":[ + "PRIVATE", + "PUBLIC" + ] + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource that conflicts with the request.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of the resource that conflicts with the request.

    " + } + }, + "documentation":"

    Your request has conflicting operations. This can occur if you're trying to perform more than one operation on the same resource at the same time.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CoreDevice":{ + "type":"structure", + "members":{ + "coreDeviceThingName":{ + "shape":"CoreDeviceThingName", + "documentation":"

    The name of the core device. This is also the name of the AWS IoT thing.

    " + }, + "status":{ + "shape":"CoreDeviceStatus", + "documentation":"

    The status of the core device. Core devices can have the following statuses:

    • HEALTHY – The AWS IoT Greengrass Core software and all components run on the core device without issue.

    • UNHEALTHY – The AWS IoT Greengrass Core software or a component is in a failed state on the core device.

    " + }, + "lastStatusUpdateTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the core device's status last updated, expressed in ISO 8601 format.

    " + } + }, + "documentation":"

    Contains information about a AWS IoT Greengrass core device, which is an AWS IoT thing that runs the AWS IoT Greengrass Core software.

    " + }, + "CoreDeviceArchitectureString":{ + "type":"string", + "max":255, + "min":1 + }, + "CoreDevicePlatformString":{ + "type":"string", + "max":255, + "min":1 + }, + "CoreDeviceStatus":{ + "type":"string", + "enum":[ + "HEALTHY", + "UNHEALTHY" + ] + }, + "CoreDeviceThingName":{ + "type":"string", + "max":128, + "min":1 + }, + "CoreDevicesList":{ + "type":"list", + "member":{"shape":"CoreDevice"} + }, + "CreateComponentVersionRequest":{ + "type":"structure", + "members":{ + "inlineRecipe":{ + "shape":"RecipeBlob", + "documentation":"

    The recipe to use to create the component. The recipe defines the component's metadata, parameters, dependencies, lifecycle, artifacts, and platform compatibility.

    You must specify either inlineRecipe or lambdaFunction.

    " + }, + "lambdaFunction":{ + "shape":"LambdaFunctionRecipeSource", + "documentation":"

    The parameters to create a component from a Lambda function.

    You must specify either inlineRecipe or lambdaFunction.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

    " + } + } + }, + "CreateComponentVersionResponse":{ + "type":"structure", + "required":[ + "componentName", + "componentVersion", + "creationTimestamp", + "status" + ], + "members":{ + "arn":{ + "shape":"ComponentVersionARN", + "documentation":"

    The ARN of the component version.

    " + }, + "componentName":{ + "shape":"ComponentNameString", + "documentation":"

    The name of the component.

    " + }, + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the component was created, expressed in ISO 8601 format.

    " + }, + "status":{ + "shape":"CloudComponentStatus", + "documentation":"

    The status of the component version in AWS IoT Greengrass V2. This status is different from the status of the component on a core device.

    " + } + } + }, + "CreateDeploymentRequest":{ + "type":"structure", + "required":["targetArn"], + "members":{ + "targetArn":{ + "shape":"TargetARN", + "documentation":"

    The ARN of the target AWS IoT thing or thing group.

    " + }, + "deploymentName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the deployment.

    You can create deployments without names. If you create a deployment without a name, the AWS IoT Greengrass V2 console shows the deployment name as <targetType>:<targetName>, where targetType and targetName are the type and name of the deployment target.

    " + }, + "components":{ + "shape":"ComponentDeploymentSpecifications", + "documentation":"

    The components to deploy. This is a dictionary, where each key is the name of a component, and each key's value is the version and configuration to deploy for that component.

    " + }, + "iotJobConfiguration":{ + "shape":"DeploymentIoTJobConfiguration", + "documentation":"

    The job configuration for the deployment configuration. The job configuration specifies the rollout, timeout, and stop configurations for the deployment configuration.

    " + }, + "deploymentPolicies":{ + "shape":"DeploymentPolicies", + "documentation":"

    The deployment policies for the deployment. These policies define how the deployment updates components and handles failure.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

    " + } + } + }, + "CreateDeploymentResponse":{ + "type":"structure", + "members":{ + "deploymentId":{ + "shape":"NonEmptyString", + "documentation":"

    The ID of the deployment.

    " + }, + "iotJobId":{ + "shape":"NonEmptyString", + "documentation":"

    The ID of the AWS IoT job that applies the deployment to target devices.

    " + }, + "iotJobArn":{ + "shape":"IoTJobARN", + "documentation":"

    The ARN of the AWS IoT job that applies the deployment to target devices.

    " + } + } + }, + "DefaultMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "DeleteComponentRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ComponentVersionARN", + "documentation":"

    The ARN of the component version.

    ", + "location":"uri", + "locationName":"arn" + } + } + }, + "DeleteCoreDeviceRequest":{ + "type":"structure", + "required":["coreDeviceThingName"], + "members":{ + "coreDeviceThingName":{ + "shape":"CoreDeviceThingName", + "documentation":"

    The name of the core device. This is also the name of the AWS IoT thing.

    ", + "location":"uri", + "locationName":"coreDeviceThingName" + } + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "targetArn":{ + "shape":"TargetARN", + "documentation":"

    The ARN of the target AWS IoT thing or thing group.

    " + }, + "revisionId":{ + "shape":"NonEmptyString", + "documentation":"

    The revision number of the deployment.

    " + }, + "deploymentId":{ + "shape":"NonEmptyString", + "documentation":"

    The ID of the deployment.

    " + }, + "deploymentName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the deployment.

    You can create deployments without names. If you create a deployment without a name, the AWS IoT Greengrass V2 console shows the deployment name as <targetType>:<targetName>, where targetType and targetName are the type and name of the deployment target.

    " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the deployment was created, expressed in ISO 8601 format.

    " + }, + "deploymentStatus":{ + "shape":"DeploymentStatus", + "documentation":"

    The status of the deployment.

    " + }, + "isLatestForTarget":{ + "shape":"IsLatestForTarget", + "documentation":"

    Whether or not the deployment is the latest revision for its target.

    " + } + }, + "documentation":"

    Contains information about a deployment.

    " + }, + "DeploymentComponentUpdatePolicy":{ + "type":"structure", + "members":{ + "timeoutInSeconds":{ + "shape":"OptionalInteger", + "documentation":"

    The amount of time in seconds that each component on a device has to report that it's safe to update. If the component waits for longer than this timeout, then the deployment proceeds on the device.

    Default: 60

    ", + "box":true + }, + "action":{ + "shape":"DeploymentComponentUpdatePolicyAction", + "documentation":"

    Whether or not to notify components and wait for components to become safe to update. Choose from the following options:

    • NOTIFY_COMPONENTS – The deployment notifies each component before it stops and updates that component. Components can use the SubscribeToComponentUpdates IPC operation to receive these notifications. Then, components can respond with the DeferComponentUpdate IPC operation. For more information, see the Create deployments in the AWS IoT Greengrass V2 Developer Guide.

    • SKIP_NOTIFY_COMPONENTS – The deployment doesn't notify components or wait for them to be safe to update.

    Default: NOTIFY_COMPONENTS

    " + } + }, + "documentation":"

    Contains information about a deployment's policy that defines when components are safe to update.

    Each component on a device can report whether or not it's ready to update. After a component and its dependencies are ready, they can apply the update in the deployment. You can configure whether or not the deployment notifies components of an update and waits for a response. You specify the amount of time each component has to respond to the update notification.

    " + }, + "DeploymentComponentUpdatePolicyAction":{ + "type":"string", + "enum":[ + "NOTIFY_COMPONENTS", + "SKIP_NOTIFY_COMPONENTS" + ] + }, + "DeploymentConfigurationValidationPolicy":{ + "type":"structure", + "members":{ + "timeoutInSeconds":{ + "shape":"OptionalInteger", + "documentation":"

    The amount of time in seconds that a component can validate its configuration updates. If the validation time exceeds this timeout, then the deployment proceeds for the device.

    Default: 30

    ", + "box":true + } + }, + "documentation":"

    Contains information about how long a component on a core device can validate its configuration updates before it times out. Components can use the SubscribeToValidateConfigurationUpdates IPC operation to receive notifications when a deployment specifies a configuration update. Then, components can respond with the SendConfigurationValidityReport IPC operation. For more information, see the Create deployments in the AWS IoT Greengrass V2 Developer Guide.

    " + }, + "DeploymentFailureHandlingPolicy":{ + "type":"string", + "enum":[ + "ROLLBACK", + "DO_NOTHING" + ] + }, + "DeploymentHistoryFilter":{ + "type":"string", + "enum":[ + "ALL", + "LATEST_ONLY" + ] + }, + "DeploymentID":{"type":"string"}, + "DeploymentIoTJobConfiguration":{ + "type":"structure", + "members":{ + "jobExecutionsRolloutConfig":{ + "shape":"IoTJobExecutionsRolloutConfig", + "documentation":"

    The rollout configuration for the job. This configuration defines the rate at which the job rolls out to the fleet of target devices.

    " + }, + "abortConfig":{ + "shape":"IoTJobAbortConfig", + "documentation":"

    The stop configuration for the job. This configuration defines when and how to stop a job rollout.

    " + }, + "timeoutConfig":{ + "shape":"IoTJobTimeoutConfig", + "documentation":"

    The timeout configuration for the job. This configuration defines the amount of time each device has to complete the job.

    " + } + }, + "documentation":"

    Contains information about an AWS IoT job configuration.

    " + }, + "DeploymentList":{ + "type":"list", + "member":{"shape":"Deployment"} + }, + "DeploymentName":{"type":"string"}, + "DeploymentPolicies":{ + "type":"structure", + "members":{ + "failureHandlingPolicy":{ + "shape":"DeploymentFailureHandlingPolicy", + "documentation":"

    The failure handling policy for the configuration deployment. This policy defines what to do if the deployment fails.

    Default: ROLLBACK

    " + }, + "componentUpdatePolicy":{ + "shape":"DeploymentComponentUpdatePolicy", + "documentation":"

    The component update policy for the configuration deployment. This policy defines when it's safe to deploy the configuration to devices.

    " + }, + "configurationValidationPolicy":{ + "shape":"DeploymentConfigurationValidationPolicy", + "documentation":"

    The configuration validation policy for the configuration deployment. This policy defines how long each component has to validate its configure updates.

    " + } + }, + "documentation":"

    Contains information about policies that define how a deployment updates components and handles failure.

    " + }, + "DeploymentStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "COMPLETED", + "CANCELED", + "FAILED", + "INACTIVE" + ] + }, + "DescribeComponentRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ComponentVersionARN", + "documentation":"

    The ARN of the component version.

    ", + "location":"uri", + "locationName":"arn" + } + } + }, + "DescribeComponentResponse":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ComponentVersionARN", + "documentation":"

    The ARN of the component version.

    " + }, + "componentName":{ + "shape":"ComponentNameString", + "documentation":"

    The name of the component.

    " + }, + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the component was created, expressed in ISO 8601 format.

    " + }, + "publisher":{ + "shape":"PublisherString", + "documentation":"

    The publisher of the component version.

    " + }, + "description":{ + "shape":"DescriptionString", + "documentation":"

    The description of the component version.

    " + }, + "status":{ + "shape":"CloudComponentStatus", + "documentation":"

    The status of the component version in AWS IoT Greengrass V2. This status is different from the status of the component on a core device.

    " + }, + "platforms":{ + "shape":"ComponentPlatformList", + "documentation":"

    The platforms that the component version supports.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

    " + } + } + }, + "Description":{ + "type":"string", + "max":500, + "min":1 + }, + "DescriptionString":{"type":"string"}, + "EffectiveDeployment":{ + "type":"structure", + "required":[ + "deploymentId", + "deploymentName", + "targetArn", + "coreDeviceExecutionStatus", + "creationTimestamp", + "modifiedTimestamp" + ], + "members":{ + "deploymentId":{ + "shape":"DeploymentID", + "documentation":"

    The ID of the deployment.

    " + }, + "deploymentName":{ + "shape":"DeploymentName", + "documentation":"

    The name of the deployment.

    You can create deployments without names. If you create a deployment without a name, the AWS IoT Greengrass V2 console shows the deployment name as <targetType>:<targetName>, where targetType and targetName are the type and name of the deployment target.

    " + }, + "iotJobId":{ + "shape":"IoTJobId", + "documentation":"

    The ID of the AWS IoT job that applies the deployment to target devices.

    " + }, + "iotJobArn":{ + "shape":"IoTJobARN", + "documentation":"

    The ARN of the AWS IoT job that applies the deployment to target devices.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the deployment job.

    " + }, + "targetArn":{ + "shape":"TargetARN", + "documentation":"

    The ARN of the target AWS IoT thing or thing group.

    " + }, + "coreDeviceExecutionStatus":{ + "shape":"EffectiveDeploymentExecutionStatus", + "documentation":"

    The status of the deployment job on the AWS IoT Greengrass core device.

    " + }, + "reason":{ + "shape":"Reason", + "documentation":"

    The reason code for the update, if the job was updated.

    " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the deployment was created, expressed in ISO 8601 format.

    " + }, + "modifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the deployment job was last modified, expressed in ISO 8601 format.

    " + } + }, + "documentation":"

    Contains information about a deployment job that AWS IoT Greengrass sends to a AWS IoT Greengrass core device.

    " + }, + "EffectiveDeploymentExecutionStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "QUEUED", + "FAILED", + "COMPLETED", + "TIMED_OUT", + "CANCELED", + "REJECTED" + ] + }, + "EffectiveDeploymentsList":{ + "type":"list", + "member":{"shape":"EffectiveDeployment"} + }, + "FileSystemPath":{"type":"string"}, + "GGCVersion":{ + "type":"string", + "max":255, + "min":1 + }, + "GenericV2ARN":{ + "type":"string", + "pattern":"arn:aws(-cn|-us-gov)?:greengrass:[^:]+:(aws|[0-9]+):(components|deployments|coreDevices):.+" + }, + "GetComponentRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "recipeOutputFormat":{ + "shape":"RecipeOutputFormat", + "documentation":"

    The format of the recipe.

    ", + "location":"querystring", + "locationName":"recipeOutputFormat" + }, + "arn":{ + "shape":"ComponentVersionARN", + "documentation":"

    The ARN of the component version.

    ", + "location":"uri", + "locationName":"arn" + } + } + }, + "GetComponentResponse":{ + "type":"structure", + "required":[ + "recipeOutputFormat", + "recipe" + ], + "members":{ + "recipeOutputFormat":{ + "shape":"RecipeOutputFormat", + "documentation":"

    The format of the recipe.

    " + }, + "recipe":{ + "shape":"RecipeBlob", + "documentation":"

    The recipe of the component version.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

    " + } + } + }, + "GetComponentVersionArtifactRequest":{ + "type":"structure", + "required":[ + "arn", + "artifactName" + ], + "members":{ + "arn":{ + "shape":"ComponentVersionARN", + "documentation":"

    The ARN of the component version.

    ", + "location":"uri", + "locationName":"arn" + }, + "artifactName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the artifact.

    ", + "location":"uri", + "locationName":"artifactName" + } + } + }, + "GetComponentVersionArtifactResponse":{ + "type":"structure", + "required":["preSignedUrl"], + "members":{ + "preSignedUrl":{ + "shape":"NonEmptyString", + "documentation":"

    The URL to the artifact.

    " + } + } + }, + "GetCoreDeviceRequest":{ + "type":"structure", + "required":["coreDeviceThingName"], + "members":{ + "coreDeviceThingName":{ + "shape":"CoreDeviceThingName", + "documentation":"

    The name of the core device. This is also the name of the AWS IoT thing.

    ", + "location":"uri", + "locationName":"coreDeviceThingName" + } + } + }, + "GetCoreDeviceResponse":{ + "type":"structure", + "members":{ + "coreDeviceThingName":{ + "shape":"CoreDeviceThingName", + "documentation":"

    The name of the core device. This is also the name of the AWS IoT thing.

    " + }, + "coreVersion":{ + "shape":"GGCVersion", + "documentation":"

    The version of the AWS IoT Greengrass Core software that the core device runs. This version is equivalent to the version of the AWS IoT Greengrass nucleus component that runs on the core device. For more information, see the AWS IoT Greengrass nucleus component in the AWS IoT Greengrass V2 Developer Guide.

    " + }, + "platform":{ + "shape":"CoreDevicePlatformString", + "documentation":"

    The operating system platform that the core device runs.

    " + }, + "architecture":{ + "shape":"CoreDeviceArchitectureString", + "documentation":"

    The computer architecture of the core device.

    " + }, + "status":{ + "shape":"CoreDeviceStatus", + "documentation":"

    The status of the core device. The core device status can be:

    • HEALTHY – The AWS IoT Greengrass Core software and all components run on the core device without issue.

    • UNHEALTHY – The AWS IoT Greengrass Core software or a component is in a failed state on the core device.

    " + }, + "lastStatusUpdateTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the core device's status last updated, expressed in ISO 8601 format.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

    " + } + } + }, + "GetDeploymentRequest":{ + "type":"structure", + "required":["deploymentId"], + "members":{ + "deploymentId":{ + "shape":"NonEmptyString", + "documentation":"

    The ID of the deployment.

    ", + "location":"uri", + "locationName":"deploymentId" + } + } + }, + "GetDeploymentResponse":{ + "type":"structure", + "members":{ + "targetArn":{ + "shape":"TargetARN", + "documentation":"

    The ARN of the target AWS IoT thing or thing group.

    " + }, + "revisionId":{ + "shape":"NonEmptyString", + "documentation":"

    The revision number of the deployment.

    " + }, + "deploymentId":{ + "shape":"NonEmptyString", + "documentation":"

    The ID of the deployment.

    " + }, + "deploymentName":{ + "shape":"NullableString", + "documentation":"

    The name of the deployment.

    You can create deployments without names. If you create a deployment without a name, the AWS IoT Greengrass V2 console shows the deployment name as <targetType>:<targetName>, where targetType and targetName are the type and name of the deployment target.

    " + }, + "deploymentStatus":{ + "shape":"DeploymentStatus", + "documentation":"

    The status of the deployment.

    " + }, + "iotJobId":{ + "shape":"NullableString", + "documentation":"

    The ID of the AWS IoT job that applies the deployment to target devices.

    " + }, + "iotJobArn":{ + "shape":"IoTJobARN", + "documentation":"

    The ARN of the AWS IoT job that applies the deployment to target devices.

    " + }, + "components":{ + "shape":"ComponentDeploymentSpecifications", + "documentation":"

    The components to deploy. This is a dictionary, where each key is the name of a component, and each key's value is the version and configuration to deploy for that component.

    " + }, + "deploymentPolicies":{ + "shape":"DeploymentPolicies", + "documentation":"

    The deployment policies for the deployment. These policies define how the deployment updates components and handles failure.

    " + }, + "iotJobConfiguration":{ + "shape":"DeploymentIoTJobConfiguration", + "documentation":"

    The job configuration for the deployment configuration. The job configuration specifies the rollout, timeout, and stop configurations for the deployment configuration.

    " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which the deployment was created, expressed in ISO 8601 format.

    " + }, + "isLatestForTarget":{ + "shape":"IsLatestForTarget", + "documentation":"

    Whether or not the deployment is the latest revision for its target.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

    " + } + } + }, + "InstalledComponent":{ + "type":"structure", + "members":{ + "componentName":{ + "shape":"ComponentNameString", + "documentation":"

    The name of the component.

    " + }, + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    " + }, + "lifecycleState":{ + "shape":"InstalledComponentLifecycleState", + "documentation":"

    The lifecycle state of the component.

    " + }, + "lifecycleStateDetails":{ + "shape":"LifecycleStateDetails", + "documentation":"

    The details about the lifecycle state of the component.

    " + }, + "isRoot":{ + "shape":"IsRoot", + "documentation":"

    Whether or not the component is a root component.

    " + } + }, + "documentation":"

    Contains information about a component on a AWS IoT Greengrass core device.

    " + }, + "InstalledComponentLifecycleState":{ + "type":"string", + "enum":[ + "NEW", + "INSTALLED", + "STARTING", + "RUNNING", + "STOPPING", + "ERRORED", + "BROKEN", + "FINISHED" + ] + }, + "InstalledComponentList":{ + "type":"list", + "member":{"shape":"InstalledComponent"} + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    The amount of time to wait before you retry the request.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    AWS IoT Greengrass can't process your request right now. Try again later.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "IoTJobARN":{ + "type":"string", + "pattern":"arn:aws(-cn|-us-gov)?:iot:[^:]+:[0-9]+:job/.+" + }, + "IoTJobAbortAction":{ + "type":"string", + "enum":["CANCEL"] + }, + "IoTJobAbortConfig":{ + "type":"structure", + "required":["criteriaList"], + "members":{ + "criteriaList":{ + "shape":"IoTJobAbortCriteriaList", + "documentation":"

    The list of criteria that define when and how to cancel the configuration deployment.

    " + } + }, + "documentation":"

    Contains a list of criteria that define when and how to cancel a configuration deployment.

    " + }, + "IoTJobAbortCriteria":{ + "type":"structure", + "required":[ + "failureType", + "action", + "thresholdPercentage", + "minNumberOfExecutedThings" + ], + "members":{ + "failureType":{ + "shape":"IoTJobExecutionFailureType", + "documentation":"

    The type of job deployment failure that can cancel a job.

    " + }, + "action":{ + "shape":"IoTJobAbortAction", + "documentation":"

    The action to perform when the criteria are met.

    " + }, + "thresholdPercentage":{ + "shape":"IoTJobAbortThresholdPercentage", + "documentation":"

    The minimum percentage of failureType failures that occur before the job can cancel.

    This parameter supports up to two digits after the decimal (for example, you can specify 10.9 or 10.99, but not 10.999).

    " + }, + "minNumberOfExecutedThings":{ + "shape":"IoTJobMinimumNumberOfExecutedThings", + "documentation":"

    The minimum number of things that receive the configuration before the job can cancel.

    " + } + }, + "documentation":"

    Contains criteria that define when and how to cancel a job.

    The deployment stops if the following conditions are true:

    1. The number of things that receive the deployment exceeds the minNumberOfExecutedThings.

    2. The percentage of failures with type failureType exceeds the thresholdPercentage.

    " + }, + "IoTJobAbortCriteriaList":{ + "type":"list", + "member":{"shape":"IoTJobAbortCriteria"}, + "min":1 + }, + "IoTJobAbortThresholdPercentage":{ + "type":"double", + "max":100 + }, + "IoTJobExecutionFailureType":{ + "type":"string", + "enum":[ + "FAILED", + "REJECTED", + "TIMED_OUT", + "ALL" + ] + }, + "IoTJobExecutionsRolloutConfig":{ + "type":"structure", + "members":{ + "exponentialRate":{ + "shape":"IoTJobExponentialRolloutRate", + "documentation":"

    The exponential rate to increase the job rollout rate.

    " + }, + "maximumPerMinute":{ + "shape":"IoTJobMaxExecutionsPerMin", + "documentation":"

    The maximum number of devices that receive a pending job notification, per minute.

    ", + "box":true + } + }, + "documentation":"

    Contains information about the rollout configuration for a job. This configuration defines the rate at which the job deploys a configuration to a fleet of target devices.

    " + }, + "IoTJobExponentialRolloutRate":{ + "type":"structure", + "required":[ + "baseRatePerMinute", + "incrementFactor", + "rateIncreaseCriteria" + ], + "members":{ + "baseRatePerMinute":{ + "shape":"IoTJobRolloutBaseRatePerMinute", + "documentation":"

    The minimum number of devices that receive a pending job notification, per minute, when the job starts. This parameter defines the initial rollout rate of the job.

    " + }, + "incrementFactor":{ + "shape":"IoTJobRolloutIncrementFactor", + "documentation":"

    The exponential factor to increase the rollout rate for the job.

    This parameter supports up to one digit after the decimal (for example, you can specify 1.5, but not 1.55).

    " + }, + "rateIncreaseCriteria":{ + "shape":"IoTJobRateIncreaseCriteria", + "documentation":"

    The criteria to increase the rollout rate for the job.

    " + } + }, + "documentation":"

    Contains information about an exponential rollout rate for a configuration deployment job.

    " + }, + "IoTJobId":{ + "type":"string", + "max":255, + "min":1 + }, + "IoTJobInProgressTimeoutInMinutes":{"type":"long"}, + "IoTJobMaxExecutionsPerMin":{ + "type":"integer", + "max":1000, + "min":1 + }, + "IoTJobMinimumNumberOfExecutedThings":{ + "type":"integer", + "min":1 + }, + "IoTJobNumberOfThings":{ + "type":"integer", + "min":1 + }, + "IoTJobRateIncreaseCriteria":{ + "type":"structure", + "members":{ + "numberOfNotifiedThings":{ + "shape":"IoTJobNumberOfThings", + "documentation":"

    The number of devices to receive the job notification before the rollout rate increases.

    ", + "box":true + }, + "numberOfSucceededThings":{ + "shape":"IoTJobNumberOfThings", + "documentation":"

    The number of devices to successfully run the configuration job before the rollout rate increases.

    ", + "box":true + } + }, + "documentation":"

    Contains information about criteria to meet before a job increases its rollout rate. Specify either numberOfNotifiedThings or numberOfSucceededThings.

    " + }, + "IoTJobRolloutBaseRatePerMinute":{ + "type":"integer", + "max":1000, + "min":1 + }, + "IoTJobRolloutIncrementFactor":{ + "type":"double", + "max":5, + "min":1 + }, + "IoTJobTimeoutConfig":{ + "type":"structure", + "members":{ + "inProgressTimeoutInMinutes":{ + "shape":"IoTJobInProgressTimeoutInMinutes", + "documentation":"

    The amount of time, in minutes, that devices have to complete the job. The timer starts when the job status is set to IN_PROGRESS. If the job status doesn't change to a terminal state before the time expires, then the job status is set to TIMED_OUT.

    The timeout interval must be between 1 minute and 7 days (10080 minutes).

    ", + "box":true + } + }, + "documentation":"

    Contains information about the timeout configuration for a job.

    " + }, + "IsLatestForTarget":{"type":"boolean"}, + "IsRoot":{"type":"boolean"}, + "LambdaContainerParams":{ + "type":"structure", + "members":{ + "memorySizeInKB":{ + "shape":"OptionalInteger", + "documentation":"

    The memory size of the container, expressed in kilobytes.

    Default: 16384 (16 MB)

    ", + "box":true + }, + "mountROSysfs":{ + "shape":"OptionalBoolean", + "documentation":"

    Whether or not the container can read information from the device's /sys folder.

    Default: false

    ", + "box":true + }, + "volumes":{ + "shape":"LambdaVolumeList", + "documentation":"

    The list of volumes that the container can access.

    " + }, + "devices":{ + "shape":"LambdaDeviceList", + "documentation":"

    The list of system devices that the container can access.

    " + } + }, + "documentation":"

    Contains information about a container in which AWS Lambda functions run on AWS IoT Greengrass core devices.

    " + }, + "LambdaDeviceList":{ + "type":"list", + "member":{"shape":"LambdaDeviceMount"} + }, + "LambdaDeviceMount":{ + "type":"structure", + "required":["path"], + "members":{ + "path":{ + "shape":"FileSystemPath", + "documentation":"

    The mount path for the device in the file system.

    " + }, + "permission":{ + "shape":"LambdaFilesystemPermission", + "documentation":"

    The permission to access the device: read/only (ro) or read/write (rw).

    Default: ro

    " + }, + "addGroupOwner":{ + "shape":"OptionalBoolean", + "documentation":"

    Whether or not to add the component's system user as an owner of the device.

    Default: false

    ", + "box":true + } + }, + "documentation":"

    Contains information about a device that Linux processes in a container can access.

    " + }, + "LambdaEnvironmentVariables":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"String"} + }, + "LambdaEventSource":{ + "type":"structure", + "required":[ + "topic", + "type" + ], + "members":{ + "topic":{ + "shape":"TopicString", + "documentation":"

    The topic to which to subscribe to receive event messages.

    " + }, + "type":{ + "shape":"LambdaEventSourceType", + "documentation":"

    The type of event source. Choose from the following options:

    • PUB_SUB – Subscribe to local publish/subscribe messages. This event source type doesn't support MQTT wildcards (+ and #) in the event source topic.

    • IOT_CORE – Subscribe to AWS IoT Core MQTT messages. This event source type supports MQTT wildcards (+ and #) in the event source topic.

    " + } + }, + "documentation":"

    Contains information about an event source for an AWS Lambda function. The event source defines the topics on which this Lambda function subscribes to receive messages that run the function.

    " + }, + "LambdaEventSourceList":{ + "type":"list", + "member":{"shape":"LambdaEventSource"} + }, + "LambdaEventSourceType":{ + "type":"string", + "enum":[ + "PUB_SUB", + "IOT_CORE" + ] + }, + "LambdaExecArg":{"type":"string"}, + "LambdaExecArgsList":{ + "type":"list", + "member":{"shape":"LambdaExecArg"} + }, + "LambdaExecutionParameters":{ + "type":"structure", + "members":{ + "eventSources":{ + "shape":"LambdaEventSourceList", + "documentation":"

    The list of event sources to which to subscribe to receive work messages. The Lambda function runs when it receives a message from an event source. You can subscribe this function to local publish/subscribe messages and AWS IoT Core MQTT messages.

    " + }, + "maxQueueSize":{ + "shape":"OptionalInteger", + "documentation":"

    The maximum size of the message queue for the Lambda function component. The AWS IoT Greengrass core stores messages in a FIFO (first-in-first-out) queue until it can run the Lambda function to consume each message.

    ", + "box":true + }, + "maxInstancesCount":{ + "shape":"OptionalInteger", + "documentation":"

    The maximum number of instances that a non-pinned Lambda function can run at the same time.

    ", + "box":true + }, + "maxIdleTimeInSeconds":{ + "shape":"OptionalInteger", + "documentation":"

    The maximum amount of time in seconds that a non-pinned Lambda function can idle before the AWS IoT Greengrass Core software stops its process.

    ", + "box":true + }, + "timeoutInSeconds":{ + "shape":"OptionalInteger", + "documentation":"

    The maximum amount of time in seconds that the Lambda function can process a work item.

    ", + "box":true + }, + "statusTimeoutInSeconds":{ + "shape":"OptionalInteger", + "documentation":"

    The interval in seconds at which a pinned (also known as long-lived) Lambda function component sends status updates to the Lambda manager component.

    ", + "box":true + }, + "pinned":{ + "shape":"OptionalBoolean", + "documentation":"

    Whether or not the Lambda function is pinned, or long-lived.

    • A pinned Lambda function starts when AWS IoT Greengrass starts and keeps running in its own container.

    • A non-pinned Lambda function starts only when it receives a work item and exists after it idles for maxIdleTimeInSeconds. If the function has multiple work items, the AWS IoT Greengrass Core software creates multiple instances of the function.

    Default: true

    ", + "box":true + }, + "inputPayloadEncodingType":{ + "shape":"LambdaInputPayloadEncodingType", + "documentation":"

    The encoding type that the Lambda function supports.

    Default: json

    " + }, + "execArgs":{ + "shape":"LambdaExecArgsList", + "documentation":"

    The list of arguments to pass to the Lambda function when it runs.

    " + }, + "environmentVariables":{ + "shape":"LambdaEnvironmentVariables", + "documentation":"

    The map of environment variables that are available to the Lambda function when it runs.

    " + }, + "linuxProcessParams":{ + "shape":"LambdaLinuxProcessParams", + "documentation":"

    The parameters for the Linux process that contains the Lambda function.

    " + } + }, + "documentation":"

    Contains parameters for a Lambda function that runs on AWS IoT Greengrass.

    " + }, + "LambdaFilesystemPermission":{ + "type":"string", + "enum":[ + "ro", + "rw" + ] + }, + "LambdaFunctionARNWithVersionNumber":{ + "type":"string", + "pattern":"arn:aws(-cn|-us-gov)?:lambda:[^:]+:[0-9]+:function:[a-zA-Z0-9-_]+:[0-9]+" + }, + "LambdaFunctionRecipeSource":{ + "type":"structure", + "required":["lambdaArn"], + "members":{ + "lambdaArn":{ + "shape":"LambdaFunctionARNWithVersionNumber", + "documentation":"

    The ARN of the Lambda function. The ARN must include the version of the function to import. You can't use version aliases like $LATEST.

    " + }, + "componentName":{ + "shape":"ComponentNameString", + "documentation":"

    The name of the component.

    Defaults to the name of the Lambda function.

    " + }, + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    Defaults to the version of the Lambda function as a semantic version. For example, if your function version is 3, the component version becomes 3.0.0.

    " + }, + "componentPlatforms":{ + "shape":"ComponentPlatformList", + "documentation":"

    The platforms that the component version supports.

    " + }, + "componentDependencies":{ + "shape":"ComponentDependencyMap", + "documentation":"

    The component versions on which this Lambda function component depends.

    " + }, + "componentLambdaParameters":{ + "shape":"LambdaExecutionParameters", + "documentation":"

    The system and runtime parameters for the Lambda function as it runs on the AWS IoT Greengrass core device.

    " + } + }, + "documentation":"

    Contains information about an AWS Lambda function to import to create a component.

    " + }, + "LambdaInputPayloadEncodingType":{ + "type":"string", + "enum":[ + "json", + "binary" + ] + }, + "LambdaIsolationMode":{ + "type":"string", + "enum":[ + "GreengrassContainer", + "NoContainer" + ] + }, + "LambdaLinuxProcessParams":{ + "type":"structure", + "members":{ + "isolationMode":{ + "shape":"LambdaIsolationMode", + "documentation":"

    The isolation mode for the process that contains the Lambda function. The process can run in an isolated runtime environment inside the AWS IoT Greengrass container, or as a regular process outside any container.

    Default: GreengrassContainer

    " + }, + "containerParams":{ + "shape":"LambdaContainerParams", + "documentation":"

    The parameters for the container in which the Lambda function runs.

    " + } + }, + "documentation":"

    Contains parameters for a Linux process that contains an AWS Lambda function.

    " + }, + "LambdaVolumeList":{ + "type":"list", + "member":{"shape":"LambdaVolumeMount"} + }, + "LambdaVolumeMount":{ + "type":"structure", + "required":[ + "sourcePath", + "destinationPath" + ], + "members":{ + "sourcePath":{ + "shape":"FileSystemPath", + "documentation":"

    The path to the physical volume in the file system.

    " + }, + "destinationPath":{ + "shape":"FileSystemPath", + "documentation":"

    The path to the logical volume in the file system.

    " + }, + "permission":{ + "shape":"LambdaFilesystemPermission", + "documentation":"

    The permission to access the volume: read/only (ro) or read/write (rw).

    Default: ro

    " + }, + "addGroupOwner":{ + "shape":"OptionalBoolean", + "documentation":"

    Whether or not to add the AWS IoT Greengrass user group as an owner of the volume.

    Default: false

    ", + "box":true + } + }, + "documentation":"

    Contains information about a volume that Linux processes in a container can access. When you define a volume, the AWS IoT Greengrass Core software mounts the source files to the destination inside the container.

    " + }, + "LifecycleStateDetails":{ + "type":"string", + "max":1000, + "min":1 + }, + "ListComponentVersionsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ComponentARN", + "documentation":"

    The ARN of the component version.

    ", + "location":"uri", + "locationName":"arn" + }, + "maxResults":{ + "shape":"DefaultMaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "box":true, + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListComponentVersionsResponse":{ + "type":"structure", + "members":{ + "componentVersions":{ + "shape":"ComponentVersionList", + "documentation":"

    A list of versions that exist for the component.

    " + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListComponentsRequest":{ + "type":"structure", + "members":{ + "scope":{ + "shape":"ComponentVisibilityScope", + "documentation":"

    The scope of the components to list.

    Default: PRIVATE

    ", + "location":"querystring", + "locationName":"scope" + }, + "maxResults":{ + "shape":"DefaultMaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "box":true, + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListComponentsResponse":{ + "type":"structure", + "members":{ + "components":{ + "shape":"ComponentList", + "documentation":"

    A list that summarizes each component.

    " + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListCoreDevicesRequest":{ + "type":"structure", + "members":{ + "thingGroupArn":{ + "shape":"ThingGroupARN", + "documentation":"

    The ARN of the AWS IoT thing group by which to filter. If you specify this parameter, the list includes only core devices that are members of this thing group.

    ", + "location":"querystring", + "locationName":"thingGroupArn" + }, + "status":{ + "shape":"CoreDeviceStatus", + "documentation":"

    The core device status by which to filter. If you specify this parameter, the list includes only core devices that have this status. Choose one of the following options:

    • HEALTHY – The AWS IoT Greengrass Core software and all components run on the core device without issue.

    • UNHEALTHY – The AWS IoT Greengrass Core software or a component is in a failed state on the core device.

    ", + "location":"querystring", + "locationName":"status" + }, + "maxResults":{ + "shape":"DefaultMaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "box":true, + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListCoreDevicesResponse":{ + "type":"structure", + "members":{ + "coreDevices":{ + "shape":"CoreDevicesList", + "documentation":"

    A list that summarizes each core device.

    " + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListDeploymentsRequest":{ + "type":"structure", + "members":{ + "targetArn":{ + "shape":"TargetARN", + "documentation":"

    The ARN of the target AWS IoT thing or thing group.

    ", + "location":"querystring", + "locationName":"targetArn" + }, + "historyFilter":{ + "shape":"DeploymentHistoryFilter", + "documentation":"

    The filter for the list of deployments. Choose one of the following options:

    • ALL – The list includes all deployments.

    • LATEST_ONLY – The list includes only the latest revision of each deployment.

    Default: LATEST_ONLY

    ", + "location":"querystring", + "locationName":"historyFilter" + }, + "maxResults":{ + "shape":"DefaultMaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "box":true, + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListDeploymentsResponse":{ + "type":"structure", + "members":{ + "deployments":{ + "shape":"DeploymentList", + "documentation":"

    A list that summarizes each deployment.

    " + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListEffectiveDeploymentsRequest":{ + "type":"structure", + "required":["coreDeviceThingName"], + "members":{ + "coreDeviceThingName":{ + "shape":"CoreDeviceThingName", + "documentation":"

    The name of the core device. This is also the name of the AWS IoT thing.

    ", + "location":"uri", + "locationName":"coreDeviceThingName" + }, + "maxResults":{ + "shape":"DefaultMaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "box":true, + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListEffectiveDeploymentsResponse":{ + "type":"structure", + "members":{ + "effectiveDeployments":{ + "shape":"EffectiveDeploymentsList", + "documentation":"

    A list that summarizes each deployment on the core device.

    " + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListInstalledComponentsRequest":{ + "type":"structure", + "required":["coreDeviceThingName"], + "members":{ + "coreDeviceThingName":{ + "shape":"CoreDeviceThingName", + "documentation":"

    The name of the core device. This is also the name of the AWS IoT thing.

    ", + "location":"uri", + "locationName":"coreDeviceThingName" + }, + "maxResults":{ + "shape":"DefaultMaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "box":true, + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListInstalledComponentsResponse":{ + "type":"structure", + "members":{ + "installedComponents":{ + "shape":"InstalledComponentList", + "documentation":"

    A list that summarizes each component on the core device.

    " + }, + "nextToken":{ + "shape":"NextTokenString", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"GenericV2ARN", + "documentation":"

    The ARN of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

    " + } + } + }, + "NextTokenString":{"type":"string"}, + "NonEmptyString":{ + "type":"string", + "min":1 + }, + "NullableString":{"type":"string"}, + "OptionalBoolean":{"type":"boolean"}, + "OptionalInteger":{"type":"integer"}, + "PlatformAttributesMap":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"NonEmptyString"} + }, + "PublisherString":{"type":"string"}, + "Reason":{"type":"string"}, + "RecipeBlob":{"type":"blob"}, + "RecipeOutputFormat":{ + "type":"string", + "enum":[ + "JSON", + "YAML" + ] + }, + "ResolveComponentCandidatesRequest":{ + "type":"structure", + "required":[ + "platform", + "componentCandidates" + ], + "members":{ + "platform":{ + "shape":"ComponentPlatform", + "documentation":"

    The platform to use to resolve compatible components.

    " + }, + "componentCandidates":{ + "shape":"ComponentCandidateList", + "documentation":"

    The list of components to resolve.

    " + } + } + }, + "ResolveComponentCandidatesResponse":{ + "type":"structure", + "members":{ + "resolvedComponentVersions":{ + "shape":"ResolvedComponentVersionsList", + "documentation":"

    A list of components that meet the requirements that you specify in the request. This list includes each component's recipe that you can use to install the component.

    " + } + } + }, + "ResolvedComponentVersion":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ComponentVersionARN", + "documentation":"

    The ARN of the component version.

    " + }, + "componentName":{ + "shape":"ComponentNameString", + "documentation":"

    The name of the component.

    " + }, + "componentVersion":{ + "shape":"ComponentVersionString", + "documentation":"

    The version of the component.

    " + }, + "recipe":{ + "shape":"RecipeBlob", + "documentation":"

    The recipe of the component version.

    " + } + }, + "documentation":"

    Contains information about a component version that is compatible to run on a AWS IoT Greengrass core device.

    " + }, + "ResolvedComponentVersionsList":{ + "type":"list", + "member":{"shape":"ResolvedComponentVersion"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource that isn't found.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of the resource that isn't found.

    " + } + }, + "documentation":"

    The requested resource can't be found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RetryAfterSeconds":{"type":"integer"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "quotaCode", + "serviceCode" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource that exceeds the service quota.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of the resource that exceeds the service quota.

    " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

    The code for the quota in Service Quotas.

    " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    The code for the service in Service Quotas.

    " + } + }, + "documentation":"

    Your request exceeds a service quota. For example, you might have the maximum number of components that you can create.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "String":{"type":"string"}, + "StringMap":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"NonEmptyString"} + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"GenericV2ARN", + "documentation":"

    The ARN of the resource to tag.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the AWS IoT Greengrass V2 Developer Guide.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "TargetARN":{ + "type":"string", + "pattern":"arn:aws(-cn|-us-gov)?:iot:[^:]+:[0-9]+:(thing|thinggroup)/.+" + }, + "ThingGroupARN":{ + "type":"string", + "pattern":"arn:aws(-cn|-us-gov)?:iot:[^:]+:[0-9]+:thinggroup/.+" + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "quotaCode":{ + "shape":"String", + "documentation":"

    The code for the quota in Service Quotas.

    " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    The code for the service in Service Quotas.

    " + }, + "retryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    The amount of time to wait before you retry the request.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    Your request exceeded a request rate quota. For example, you might have exceeded the amount of times that you can retrieve device or deployment status per second.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TopicString":{"type":"string"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"GenericV2ARN", + "documentation":"

    The ARN of the resource to untag.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    A list of keys for tags to remove from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason for the validation exception.

    " + }, + "fields":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The list of fields that failed to validate.

    " + } + }, + "documentation":"

    The request isn't valid. This can occur if your request contains malformed JSON or unsupported characters.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of the exception field.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    The message of the exception field.

    " + } + }, + "documentation":"

    Contains information about a validation exception field.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "UNKNOWN_OPERATION", + "CANNOT_PARSE", + "FIELD_VALIDATION_FAILED", + "OTHER" + ] + } + }, + "documentation":"

    AWS IoT Greengrass brings local compute, messaging, data management, sync, and ML inference capabilities to edge devices. This enables devices to collect and analyze data closer to the source of information, react autonomously to local events, and communicate securely with each other on local networks. Local devices can also communicate securely with AWS IoT Core and export IoT data to the AWS Cloud. AWS IoT Greengrass developers can use AWS Lambda functions and components to create and deploy applications to fleets of edge devices for local operation.

    AWS IoT Greengrass Version 2 provides a new major version of the AWS IoT Greengrass Core software, new APIs, and a new console. Use this API reference to learn how to use the AWS IoT Greengrass V2 API operations to manage components, manage deployments, and core devices.

    For more information, see What is AWS IoT Greengrass? in the AWS IoT Greengrass V2 Developer Guide.

    " +} diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index 128bc11942d2..b23271c50f4c 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + healthlake + AWS Java SDK :: Services :: Health Lake + The AWS Java SDK for Health Lake module holds the client classes that are used for + communicating with Health Lake. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.healthlake + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/healthlake/src/main/resources/codegen-resources/paginators-1.json b/services/healthlake/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..c134891ac251 --- /dev/null +++ b/services/healthlake/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "ListFHIRDatastores": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/healthlake/src/main/resources/codegen-resources/service-2.json b/services/healthlake/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..2a8fffc7e263 --- /dev/null +++ b/services/healthlake/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,797 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-07-01", + "endpointPrefix":"healthlake", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"HealthLake", + "serviceFullName":"Amazon HealthLake", + "serviceId":"HealthLake", + "signatureVersion":"v4", + "signingName":"healthlake", + "targetPrefix":"HealthLake", + "uid":"healthlake-2017-07-01" + }, + "operations":{ + "CreateFHIRDatastore":{ + "name":"CreateFHIRDatastore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFHIRDatastoreRequest"}, + "output":{"shape":"CreateFHIRDatastoreResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a Data Store that can ingest and export FHIR formatted data.

    " + }, + "DeleteFHIRDatastore":{ + "name":"DeleteFHIRDatastore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFHIRDatastoreRequest"}, + "output":{"shape":"DeleteFHIRDatastoreResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a Data Store.

    " + }, + "DescribeFHIRDatastore":{ + "name":"DescribeFHIRDatastore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFHIRDatastoreRequest"}, + "output":{"shape":"DescribeFHIRDatastoreResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets the properties associated with the FHIR Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.

    " + }, + "DescribeFHIRExportJob":{ + "name":"DescribeFHIRExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFHIRExportJobRequest"}, + "output":{"shape":"DescribeFHIRExportJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Displays the properties of a FHIR export job, including the ID, ARN, name, and the status of the job.

    " + }, + "DescribeFHIRImportJob":{ + "name":"DescribeFHIRImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFHIRImportJobRequest"}, + "output":{"shape":"DescribeFHIRImportJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Displays the properties of a FHIR import job, including the ID, ARN, name, and the status of the job.

    " + }, + "ListFHIRDatastores":{ + "name":"ListFHIRDatastores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFHIRDatastoresRequest"}, + "output":{"shape":"ListFHIRDatastoresResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all FHIR Data Stores that are in the user’s account, regardless of Data Store status.

    " + }, + "StartFHIRExportJob":{ + "name":"StartFHIRExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartFHIRExportJobRequest"}, + "output":{"shape":"StartFHIRExportJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Begins a FHIR export job.

    " + }, + "StartFHIRImportJob":{ + "name":"StartFHIRImportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartFHIRImportJobRequest"}, + "output":{"shape":"StartFHIRImportJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Begins a FHIR Import job.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    Access is denied. Your account is not authorized to perform this operation.

    ", + "exception":true + }, + "BoundedLengthString":{ + "type":"string", + "max":5000, + "min":1, + "pattern":"[\\P{M}\\p{M}]{1,5000}" + }, + "ClientTokenString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The Data Store is in a transition state and the user requested action can not be performed.

    ", + "exception":true + }, + "CreateFHIRDatastoreRequest":{ + "type":"structure", + "required":["DatastoreTypeVersion"], + "members":{ + "DatastoreName":{ + "shape":"DatastoreName", + "documentation":"

    The user generated name for the Data Store.

    " + }, + "DatastoreTypeVersion":{ + "shape":"FHIRVersion", + "documentation":"

    The FHIR version of the Data Store. The only supported version is R4.

    " + }, + "PreloadDataConfig":{ + "shape":"PreloadDataConfig", + "documentation":"

    Optional parameter to preload data upon creation of the Data Store. Currently, the only supported preloaded data is synthetic data generated from Synthea.

    " + }, + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

    Optional user provided token used for ensuring idempotency.

    ", + "idempotencyToken":true + } + } + }, + "CreateFHIRDatastoreResponse":{ + "type":"structure", + "required":[ + "DatastoreId", + "DatastoreArn", + "DatastoreStatus", + "DatastoreEndpoint" + ], + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS-generated Data Store id. This id is in the output from the initial Data Store creation call.

    " + }, + "DatastoreArn":{ + "shape":"DatastoreArn", + "documentation":"

    The datastore ARN is generated during the creation of the Data Store and can be found in the output from the initial Data Store creation call.

    " + }, + "DatastoreStatus":{ + "shape":"DatastoreStatus", + "documentation":"

    The status of the FHIR Data Store. Possible statuses are ‘CREATING’, ‘ACTIVE’, ‘DELETING’, ‘DELETED’.

    " + }, + "DatastoreEndpoint":{ + "shape":"BoundedLengthString", + "documentation":"

    The AWS endpoint for the created Data Store. For preview, only US-east-1 endpoints are supported.

    " + } + } + }, + "DatastoreArn":{ + "type":"string", + "pattern":"^arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:healthlake:[a-zA-Z0-9-]+:[0-9]{12}:datastore/.+?" + }, + "DatastoreFilter":{ + "type":"structure", + "members":{ + "DatastoreName":{ + "shape":"DatastoreName", + "documentation":"

    Allows the user to filter Data Store results by name.

    " + }, + "DatastoreStatus":{ + "shape":"DatastoreStatus", + "documentation":"

    Allows the user to filter Data Store results by status.

    " + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

    A filter that allows the user to set cutoff dates for records. All Data Stores created before the specified date will be included in the results.

    " + }, + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

    A filter that allows the user to set cutoff dates for records. All Data Stores created after the specified date will be included in the results.

    " + } + }, + "documentation":"

    The filters applied to Data Store query.

    " + }, + "DatastoreId":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "DatastoreName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "DatastoreProperties":{ + "type":"structure", + "required":[ + "DatastoreId", + "DatastoreArn", + "DatastoreStatus", + "DatastoreTypeVersion", + "DatastoreEndpoint" + ], + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS-generated ID number for the Data Store.

    " + }, + "DatastoreArn":{ + "shape":"DatastoreArn", + "documentation":"

    The Amazon Resource Name used in the creation of the Data Store.

    " + }, + "DatastoreName":{ + "shape":"DatastoreName", + "documentation":"

    The user-generated name for the Data Store.

    " + }, + "DatastoreStatus":{ + "shape":"DatastoreStatus", + "documentation":"

    The status of the Data Store. Possible statuses are 'CREATING', 'ACTIVE', 'DELETING', or 'DELETED'.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The time that a Data Store was created.

    " + }, + "DatastoreTypeVersion":{ + "shape":"FHIRVersion", + "documentation":"

    The FHIR version. Only R4 version data is supported.

    " + }, + "DatastoreEndpoint":{ + "shape":"String", + "documentation":"

    The AWS endpoint for the Data Store. Each Data Store will have it's own endpoint with Data Store ID in the endpoint URL.

    " + }, + "PreloadDataConfig":{ + "shape":"PreloadDataConfig", + "documentation":"

    The preloaded data configuration for the Data Store. Only data preloaded from Synthea is supported.

    " + } + }, + "documentation":"

    Displays the properties of the Data Store, including the ID, Arn, name, and the status of the Data Store.

    " + }, + "DatastorePropertiesList":{ + "type":"list", + "member":{"shape":"DatastoreProperties"} + }, + "DatastoreStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "DELETED" + ] + }, + "DeleteFHIRDatastoreRequest":{ + "type":"structure", + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS-generated ID for the Data Store to be deleted.

    " + } + } + }, + "DeleteFHIRDatastoreResponse":{ + "type":"structure", + "required":[ + "DatastoreId", + "DatastoreArn", + "DatastoreStatus", + "DatastoreEndpoint" + ], + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS-generated ID for the Data Store to be deleted.

    " + }, + "DatastoreArn":{ + "shape":"DatastoreArn", + "documentation":"

    The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission.

    " + }, + "DatastoreStatus":{ + "shape":"DatastoreStatus", + "documentation":"

    The status of the Data Store that the user has requested to be deleted.

    " + }, + "DatastoreEndpoint":{ + "shape":"BoundedLengthString", + "documentation":"

    The AWS endpoint for the Data Store the user has requested to be deleted.

    " + } + } + }, + "DescribeFHIRDatastoreRequest":{ + "type":"structure", + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS-generated Data Store id. This is part of the ‘CreateFHIRDatastore’ output.

    " + } + } + }, + "DescribeFHIRDatastoreResponse":{ + "type":"structure", + "required":["DatastoreProperties"], + "members":{ + "DatastoreProperties":{ + "shape":"DatastoreProperties", + "documentation":"

    All properties associated with a Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.

    " + } + } + }, + "DescribeFHIRExportJobRequest":{ + "type":"structure", + "required":[ + "DatastoreId", + "JobId" + ], + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS generated ID for the Data Store from which files are being exported from for an export job.

    " + }, + "JobId":{ + "shape":"JobId", + "documentation":"

    The AWS generated ID for an export job.

    " + } + } + }, + "DescribeFHIRExportJobResponse":{ + "type":"structure", + "required":["ExportJobProperties"], + "members":{ + "ExportJobProperties":{ + "shape":"ExportJobProperties", + "documentation":"

    Displays the properties of the export job, including the ID, Arn, Name, and the status of the job.

    " + } + } + }, + "DescribeFHIRImportJobRequest":{ + "type":"structure", + "required":[ + "DatastoreId", + "JobId" + ], + "members":{ + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS-generated ID of the Data Store.

    " + }, + "JobId":{ + "shape":"JobId", + "documentation":"

    The AWS-generated job ID.

    " + } + } + }, + "DescribeFHIRImportJobResponse":{ + "type":"structure", + "required":["ImportJobProperties"], + "members":{ + "ImportJobProperties":{ + "shape":"ImportJobProperties", + "documentation":"

    The properties of the Import job request, including the ID, ARN, name, and the status of the job.

    " + } + } + }, + "ExportJobProperties":{ + "type":"structure", + "required":[ + "JobId", + "JobStatus", + "SubmitTime", + "DatastoreId", + "OutputDataConfig" + ], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The AWS generated ID for an export job.

    " + }, + "JobName":{ + "shape":"JobName", + "documentation":"

    The user generated name for an export job.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The status of a FHIR export job. Possible statuses are SUBMITTED, IN_PROGRESS, COMPLETED, or FAILED.

    " + }, + "SubmitTime":{ + "shape":"Timestamp", + "documentation":"

    The time an export job was initiated.

    " + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

    The time an export job completed.

    " + }, + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS generated ID for the Data Store from which files are being exported for an export job.

    " + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

    The output data configuration that was supplied when the export job was created.

    " + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Amazon Resource Name used during the initiation of the job.

    " + }, + "Message":{ + "shape":"Message", + "documentation":"

    An explanation of any errors that may have occurred during the export job.

    " + } + }, + "documentation":"

    The properties of a FHIR export job, including the ID, ARN, name, and the status of the job.

    " + }, + "FHIRVersion":{ + "type":"string", + "enum":["R4"] + }, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" + }, + "ImportJobProperties":{ + "type":"structure", + "required":[ + "JobId", + "JobStatus", + "SubmitTime", + "DatastoreId", + "InputDataConfig" + ], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The AWS-generated id number for the Import job.

    " + }, + "JobName":{ + "shape":"JobName", + "documentation":"

    The user-generated name for an Import job.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The job status for an Import job. Possible statuses are SUBMITTED, IN_PROGRESS, COMPLETED, FAILED.

    " + }, + "SubmitTime":{ + "shape":"Timestamp", + "documentation":"

    The time that the Import job was submitted for processing.

    " + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

    The time that the Import job was completed.

    " + }, + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The datastore id used when the Import job was created.

    " + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

    The input data configuration that was supplied when the Import job was created.

    " + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your input data.

    " + }, + "Message":{ + "shape":"Message", + "documentation":"

    An explanation of any errors that may have occurred during the FHIR import job.

    " + } + }, + "documentation":"

    Displays the properties of the import job, including the ID, Arn, Name, and the status of the Data Store.

    " + }, + "InputDataConfig":{ + "type":"structure", + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

    The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.

    " + } + }, + "documentation":"

    The input properties for an import job.

    ", + "union":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    Unknown error occurs in the service.

    ", + "exception":true, + "fault":true + }, + "JobId":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "JobName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "JobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, + "ListFHIRDatastoresRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"DatastoreFilter", + "documentation":"

    Lists all filters associated with a FHIR Data Store request.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Fetches the next page of Data Stores when results are paginated.

    " + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

    The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest call.

    " + } + } + }, + "ListFHIRDatastoresResponse":{ + "type":"structure", + "required":["DatastorePropertiesList"], + "members":{ + "DatastorePropertiesList":{ + "shape":"DatastorePropertiesList", + "documentation":"

    All properties associated with the listed Data Stores.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Pagination token that can be used to retrieve the next page of results.

    " + } + } + }, + "MaxResultsInteger":{ + "type":"integer", + "max":500, + "min":1 + }, + "Message":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "NextToken":{ + "type":"string", + "max":8192, + "pattern":"\\p{ASCII}{0,8192}" + }, + "OutputDataConfig":{ + "type":"structure", + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

    The S3Uri is the user specified S3 location to which data will be exported from a FHIR Data Store.

    " + } + }, + "documentation":"

    The output data configuration that was supplied when the export job was created.

    ", + "union":true + }, + "PreloadDataConfig":{ + "type":"structure", + "required":["PreloadDataType"], + "members":{ + "PreloadDataType":{ + "shape":"PreloadDataType", + "documentation":"

    The type of preloaded data. Only Synthea preloaded data is supported.

    " + } + }, + "documentation":"

    The input properties for the preloaded Data Store. Only data preloaded from Synthea is supported.

    " + }, + "PreloadDataType":{ + "type":"string", + "enum":["SYNTHEA"] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The requested Data Store was not found.

    ", + "exception":true + }, + "S3Uri":{ + "type":"string", + "max":1024, + "pattern":"s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" + }, + "StartFHIRExportJobRequest":{ + "type":"structure", + "required":[ + "OutputDataConfig", + "DatastoreId", + "DataAccessRoleArn", + "ClientToken" + ], + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

    The user generated name for an export job.

    " + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

    The output data configuration that was supplied when the export job was created.

    " + }, + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS generated ID for the Data Store from which files are being exported for an export job.

    " + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Amazon Resource Name used during the initiation of the job.

    " + }, + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

    An optional user provided token used for ensuring idempotency.

    ", + "idempotencyToken":true + } + } + }, + "StartFHIRExportJobResponse":{ + "type":"structure", + "required":[ + "JobId", + "JobStatus" + ], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The AWS generated ID for an export job.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The status of a FHIR export job. Possible statuses are SUBMITTED, IN_PROGRESS, COMPLETED, or FAILED.

    " + }, + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS generated ID for the Data Store from which files are being exported for an export job.

    " + } + } + }, + "StartFHIRImportJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "DatastoreId", + "DataAccessRoleArn", + "ClientToken" + ], + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

    The name of the FHIR Import job in the StartFHIRImport job request.

    " + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

    The input properties of the FHIR Import job in the StartFHIRImport job request.

    " + }, + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS-generated Data Store ID.

    " + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission.

    " + }, + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

    Optional user provided token used for ensuring idempotency.

    ", + "idempotencyToken":true + } + } + }, + "StartFHIRImportJobResponse":{ + "type":"structure", + "required":[ + "JobId", + "JobStatus" + ], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The AWS-generated job ID.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The status of an import job.

    " + }, + "DatastoreId":{ + "shape":"DatastoreId", + "documentation":"

    The AWS-generated Data Store ID.

    " + } + } + }, + "String":{ + "type":"string", + "max":10000, + "pattern":"[\\P{M}\\p{M}]{0,10000}" + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The user has exceeded their maximum number of allowed calls to the given API.

    ", + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The user input parameter was invalid.

    ", + "exception":true + } + }, + "documentation":"

    Amazon HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their FHIR-formatted data in a consistent fashion in the cloud.

    " +} diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml new file mode 100644 index 000000000000..c88c55942396 --- /dev/null +++ b/services/honeycode/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + honeycode + AWS Java SDK :: Services :: Honeycode + The AWS Java SDK for Honeycode module holds the client classes that are used for + communicating with Honeycode. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.honeycode + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/honeycode/src/main/resources/codegen-resources/paginators-1.json b/services/honeycode/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..19ba884c5e5c --- /dev/null +++ b/services/honeycode/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,27 @@ +{ + "pagination": { + "ListTableColumns": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "tableColumns" + }, + "ListTableRows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "rows" + }, + "ListTables": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "tables" + }, + "QueryTableRows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "rows" + } + } +} diff --git a/services/honeycode/src/main/resources/codegen-resources/service-2.json b/services/honeycode/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..dfca581ebdd0 --- /dev/null +++ b/services/honeycode/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1623 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-03-01", + "endpointPrefix":"honeycode", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Honeycode", + "serviceFullName":"Amazon Honeycode", + "serviceId":"Honeycode", + "signatureVersion":"v4", + "signingName":"honeycode", + "uid":"honeycode-2020-03-01" + }, + "operations":{ + "BatchCreateTableRows":{ + "name":"BatchCreateTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchcreate" + }, + "input":{"shape":"BatchCreateTableRowsRequest"}, + "output":{"shape":"BatchCreateTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    The BatchCreateTableRows API allows you to create one or more rows at the end of a table in a workbook. The API allows you to specify the values to set in some or all of the columns in the new rows.

    If a column is not explicitly set in a specific row, then the column level formula specified in the table will be applied to the new row. If there is no column level formula but the last row of the table has a formula, then that formula will be copied down to the new row. If there is no column level formula and no formula in the last row of the table, then that column will be left blank for the new rows.

    " + }, + "BatchDeleteTableRows":{ + "name":"BatchDeleteTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchdelete" + }, + "input":{"shape":"BatchDeleteTableRowsRequest"}, + "output":{"shape":"BatchDeleteTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    The BatchDeleteTableRows API allows you to delete one or more rows from a table in a workbook. You need to specify the ids of the rows that you want to delete from the table.

    " + }, + "BatchUpdateTableRows":{ + "name":"BatchUpdateTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchupdate" + }, + "input":{"shape":"BatchUpdateTableRowsRequest"}, + "output":{"shape":"BatchUpdateTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    The BatchUpdateTableRows API allows you to update one or more rows in a table in a workbook.

    You can specify the values to set in some or all of the columns in the table for the specified rows. If a column is not explicitly specified in a particular row, then that column will not be updated for that row. To clear out the data in a specific cell, you need to set the value as an empty string (\"\").

    " + }, + "BatchUpsertTableRows":{ + "name":"BatchUpsertTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchupsert" + }, + "input":{"shape":"BatchUpsertTableRowsRequest"}, + "output":{"shape":"BatchUpsertTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    The BatchUpsertTableRows API allows you to upsert one or more rows in a table. The upsert operation takes a filter expression as input and evaluates it to find matching rows on the destination table. If matching rows are found, it will update the cells in the matching rows to new values specified in the request. If no matching rows are found, a new row is added at the end of the table and the cells in that row are set to the new values specified in the request.

    You can specify the values to set in some or all of the columns in the table for the matching or newly appended rows. If a column is not explicitly specified for a particular row, then that column will not be updated for that row. To clear out the data in a specific cell, you need to set the value as an empty string (\"\").

    " + }, + "DescribeTableDataImportJob":{ + "name":"DescribeTableDataImportJob", + "http":{ + "method":"GET", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/import/{jobId}" + }, + "input":{"shape":"DescribeTableDataImportJobRequest"}, + "output":{"shape":"DescribeTableDataImportJobResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    The DescribeTableDataImportJob API allows you to retrieve the status and details of a table data import job.

    " + }, + "GetScreenData":{ + "name":"GetScreenData", + "http":{ + "method":"POST", + "requestUri":"/screendata" + }, + "input":{"shape":"GetScreenDataRequest"}, + "output":{"shape":"GetScreenDataResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    The GetScreenData API allows retrieval of data from a screen in a Honeycode app. The API allows setting local variables in the screen to filter, sort or otherwise affect what will be displayed on the screen.

    " + }, + "InvokeScreenAutomation":{ + "name":"InvokeScreenAutomation", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/apps/{appId}/screens/{screenId}/automations/{automationId}" + }, + "input":{"shape":"InvokeScreenAutomationRequest"}, + "output":{"shape":"InvokeScreenAutomationResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AutomationExecutionException"}, + {"shape":"AutomationExecutionTimeoutException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

    The InvokeScreenAutomation API allows invoking an action defined in a screen in a Honeycode app. The API allows setting local variables, which can then be used in the automation being invoked. This allows automating the Honeycode app interactions to write, update or delete data in the workbook.

    " + }, + "ListTableColumns":{ + "name":"ListTableColumns", + "http":{ + "method":"GET", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/columns" + }, + "input":{"shape":"ListTableColumnsRequest"}, + "output":{"shape":"ListTableColumnsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    The ListTableColumns API allows you to retrieve a list of all the columns in a table in a workbook.

    " + }, + "ListTableRows":{ + "name":"ListTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/list" + }, + "input":{"shape":"ListTableRowsRequest"}, + "output":{"shape":"ListTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    The ListTableRows API allows you to retrieve a list of all the rows in a table in a workbook.

    " + }, + "ListTables":{ + "name":"ListTables", + "http":{ + "method":"GET", + "requestUri":"/workbooks/{workbookId}/tables" + }, + "input":{"shape":"ListTablesRequest"}, + "output":{"shape":"ListTablesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    The ListTables API allows you to retrieve a list of all the tables in a workbook.

    " + }, + "QueryTableRows":{ + "name":"QueryTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/query" + }, + "input":{"shape":"QueryTableRowsRequest"}, + "output":{"shape":"QueryTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    The QueryTableRows API allows you to use a filter formula to query for specific rows in a table.

    " + }, + "StartTableDataImportJob":{ + "name":"StartTableDataImportJob", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/import" + }, + "input":{"shape":"StartTableDataImportJobRequest"}, + "output":{"shape":"StartTableDataImportJobResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    The StartTableDataImportJob API allows you to start an import job on a table. This API will only return the id of the job that was started. To find out the status of the import request, you need to call the DescribeTableDataImportJob API.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You do not have sufficient access to perform this action. Check that the workbook is owned by you and your IAM policy allows access to the resource in the request.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AutomationExecutionException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The automation execution did not end successfully.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "AutomationExecutionTimeoutException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The automation execution timed out.

    ", + "error":{ + "httpStatusCode":504, + "senderFault":true + }, + "exception":true + }, + "AwsUserArn":{ + "type":"string", + "max":2048, + "min":20 + }, + "BatchCreateTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "rowsToCreate" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook where the new rows are being added.

    If a workbook with the specified ID could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table where the new rows are being added.

    If a table with the specified ID could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "rowsToCreate":{ + "shape":"CreateRowDataList", + "documentation":"

    The list of rows to create at the end of the table. Each item in this list needs to have a batch item id to uniquely identify the element in the request and the cells to create for that row. You need to specify at least one item in this list.

    Note that if one of the column ids in any of the rows in the request does not exist in the table, then the request fails and no updates are made to the table.

    " + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    The request token for performing the batch create operation. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the operation again.

    Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

    " + } + } + }, + "BatchCreateTableRowsResult":{ + "type":"structure", + "required":[ + "workbookCursor", + "createdRows" + ], + "members":{ + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    The updated workbook cursor after adding the new rows at the end of the table.

    " + }, + "createdRows":{ + "shape":"CreatedRowsMap", + "documentation":"

    The map of batch item id to the row id that was created for that item.

    " + }, + "failedBatchItems":{ + "shape":"FailedBatchItems", + "documentation":"

    The list of batch items in the request that could not be added to the table. Each element in this list contains one item from the request that could not be added to the table along with the reason why that item could not be added.

    " + } + } + }, + "BatchDeleteTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "rowIds" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook where the rows are being deleted.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table where the rows are being deleted.

    If a table with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "rowIds":{ + "shape":"RowIdList", + "documentation":"

    The list of row ids to delete from the table. You need to specify at least one row id in this list.

    Note that if one of the row ids provided in the request does not exist in the table, then the request fails and no rows are deleted from the table.

    " + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    The request token for performing the delete action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

    Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

    " + } + } + }, + "BatchDeleteTableRowsResult":{ + "type":"structure", + "required":["workbookCursor"], + "members":{ + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    The updated workbook cursor after deleting the rows from the table.

    " + }, + "failedBatchItems":{ + "shape":"FailedBatchItems", + "documentation":"

    The list of row ids in the request that could not be deleted from the table. Each element in this list contains one row id from the request that could not be deleted along with the reason why that item could not be deleted.

    " + } + } + }, + "BatchErrorMessage":{ + "type":"string", + "pattern":"^(?!\\s*$).+" + }, + "BatchItemId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^(?!\\s*$).+" + }, + "BatchUpdateTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "rowsToUpdate" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook where the rows are being updated.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table where the rows are being updated.

    If a table with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "rowsToUpdate":{ + "shape":"UpdateRowDataList", + "documentation":"

    The list of rows to update in the table. Each item in this list needs to contain the row id to update along with the map of column id to cell values for each column in that row that needs to be updated. You need to specify at least one row in this list, and for each row, you need to specify at least one column to update.

    Note that if one of the row or column ids in the request does not exist in the table, then the request fails and no updates are made to the table.

    " + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

    Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

    " + } + } + }, + "BatchUpdateTableRowsResult":{ + "type":"structure", + "required":["workbookCursor"], + "members":{ + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    The updated workbook cursor after adding the new rows at the end of the table.

    " + }, + "failedBatchItems":{ + "shape":"FailedBatchItems", + "documentation":"

    The list of batch items in the request that could not be updated in the table. Each element in this list contains one item from the request that could not be updated in the table along with the reason why that item could not be updated.

    " + } + } + }, + "BatchUpsertTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "rowsToUpsert" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook where the rows are being upserted.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table where the rows are being upserted.

    If a table with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "rowsToUpsert":{ + "shape":"UpsertRowDataList", + "documentation":"

    The list of rows to upsert in the table. Each item in this list needs to have a batch item id to uniquely identify the element in the request, a filter expression to find the rows to update for that element and the cell values to set for each column in the upserted rows. You need to specify at least one item in this list.

    Note that if one of the filter formulas in the request fails to evaluate because of an error or one of the column ids in any of the rows does not exist in the table, then the request fails and no updates are made to the table.

    " + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

    Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

    " + } + } + }, + "BatchUpsertTableRowsResult":{ + "type":"structure", + "required":[ + "rows", + "workbookCursor" + ], + "members":{ + "rows":{ + "shape":"UpsertRowsResultMap", + "documentation":"

    A map with the batch item id as the key and the result of the upsert operation as the value. The result of the upsert operation specifies whether existing rows were updated or a new row was appended, along with the list of row ids that were affected.

    " + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    The updated workbook cursor after updating or appending rows in the table.

    " + }, + "failedBatchItems":{ + "shape":"FailedBatchItems", + "documentation":"

    The list of batch items in the request that could not be updated or appended in the table. Each element in this list contains one item from the request that could not be updated in the table along with the reason why that item could not be updated or appended.

    " + } + } + }, + "Cell":{ + "type":"structure", + "members":{ + "formula":{ + "shape":"Formula", + "documentation":"

    The formula contained in the cell. This field is empty if a cell does not have a formula.

    " + }, + "format":{ + "shape":"Format", + "documentation":"

    The format of the cell. If this field is empty, then the format is either not specified in the workbook or the format is set to AUTO.

    " + }, + "rawValue":{ + "shape":"RawValue", + "documentation":"

    The raw value of the data contained in the cell. The raw value depends on the format of the data in the cell. However the attribute in the API return value is always a string containing the raw value.

    Cells with format DATE, DATE_TIME or TIME have the raw value as a floating point number where the whole number represents the number of days since 1/1/1900 and the fractional part represents the fraction of the day since midnight. For example, a cell with date 11/3/2020 has the raw value \"44138\". A cell with the time 9:00 AM has the raw value \"0.375\" and a cell with date/time value of 11/3/2020 9:00 AM has the raw value \"44138.375\". Notice that even though the raw value is a number in all three cases, it is still represented as a string.

    Cells with format NUMBER, CURRENCY, PERCENTAGE and ACCOUNTING have the raw value of the data as the number representing the data being displayed. For example, the number 1.325 with two decimal places in the format will have it's raw value as \"1.325\" and formatted value as \"1.33\". A currency value for $10 will have the raw value as \"10\" and formatted value as \"$10.00\". A value representing 20% with two decimal places in the format will have its raw value as \"0.2\" and the formatted value as \"20.00%\". An accounting value of -$25 will have \"-25\" as the raw value and \"$ (25.00)\" as the formatted value.

    Cells with format TEXT will have the raw text as the raw value. For example, a cell with text \"John Smith\" will have \"John Smith\" as both the raw value and the formatted value.

    Cells with format CONTACT will have the name of the contact as a formatted value and the email address of the contact as the raw value. For example, a contact for John Smith will have \"John Smith\" as the formatted value and \"john.smith@example.com\" as the raw value.

    Cells with format ROWLINK (aka picklist) will have the first column of the linked row as the formatted value and the row id of the linked row as the raw value. For example, a cell containing a picklist to a table that displays task status might have \"Completed\" as the formatted value and \"row:dfcefaee-5b37-4355-8f28-40c3e4ff5dd4/ca432b2f-b8eb-431d-9fb5-cbe0342f9f03\" as the raw value.

    Cells with format AUTO or cells without any format that are auto-detected as one of the formats above will contain the raw and formatted values as mentioned above, based on the auto-detected formats. If there is no auto-detected format, the raw and formatted values will be the same as the data in the cell.

    " + }, + "formattedValue":{ + "shape":"FormattedValue", + "documentation":"

    The formatted value of the cell. This is the value that you see displayed in the cell in the UI.

    Note that the formatted value of a cell is always represented as a string irrespective of the data that is stored in the cell. For example, if a cell contains a date, the formatted value of the cell is the string representation of the formatted date being shown in the cell in the UI. See details in the rawValue field below for how cells of different formats will have different raw and formatted values.

    " + } + }, + "documentation":"

    An object that represents a single cell in a table.

    ", + "sensitive":true + }, + "CellInput":{ + "type":"structure", + "members":{ + "fact":{ + "shape":"Fact", + "documentation":"

    Fact represents the data that is entered into a cell. This data can be free text or a formula. Formulas need to start with the equals (=) sign.

    " + } + }, + "documentation":"

    CellInput object contains the data needed to create or update cells in a table.

    " + }, + "Cells":{ + "type":"list", + "member":{"shape":"Cell"} + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":32, + "pattern":"^(?!\\s*$).+" + }, + "ColumnMetadata":{ + "type":"structure", + "required":[ + "name", + "format" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the column.

    " + }, + "format":{ + "shape":"Format", + "documentation":"

    The format of the column.

    " + } + }, + "documentation":"

    Metadata for column in the table.

    " + }, + "CreateRowData":{ + "type":"structure", + "required":[ + "batchItemId", + "cellsToCreate" + ], + "members":{ + "batchItemId":{ + "shape":"BatchItemId", + "documentation":"

    An external identifier that represents the single row that is being created as part of the BatchCreateTableRows request. This can be any string that you can use to identify the row in the request. The BatchCreateTableRows API puts the batch item id in the results to allow you to link data in the request to data in the results.

    " + }, + "cellsToCreate":{ + "shape":"RowDataInput", + "documentation":"

    A map representing the cells to create in the new row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

    " + } + }, + "documentation":"

    Data needed to create a single row in a table as part of the BatchCreateTableRows request.

    " + }, + "CreateRowDataList":{ + "type":"list", + "member":{"shape":"CreateRowData"}, + "max":100, + "min":1 + }, + "CreatedRowsMap":{ + "type":"map", + "key":{"shape":"BatchItemId"}, + "value":{"shape":"RowId"} + }, + "DataItem":{ + "type":"structure", + "members":{ + "overrideFormat":{ + "shape":"Format", + "documentation":"

    The overrideFormat is optional and is specified only if a particular row of data has a different format for the data than the default format defined on the screen or the table.

    " + }, + "rawValue":{ + "shape":"RawValue", + "documentation":"

    The raw value of the data. e.g. jsmith@example.com

    " + }, + "formattedValue":{ + "shape":"FormattedValue", + "documentation":"

    The formatted value of the data. e.g. John Smith.

    " + } + }, + "documentation":"

    The data in a particular data cell defined on the screen.

    ", + "sensitive":true + }, + "DataItems":{ + "type":"list", + "member":{"shape":"DataItem"} + }, + "DelimitedTextDelimiter":{ + "type":"string", + "max":1, + "min":1, + "pattern":"^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]?$" + }, + "DelimitedTextImportOptions":{ + "type":"structure", + "required":["delimiter"], + "members":{ + "delimiter":{ + "shape":"DelimitedTextDelimiter", + "documentation":"

    The delimiter to use for separating columns in a single row of the input.

    " + }, + "hasHeaderRow":{ + "shape":"HasHeaderRow", + "documentation":"

    Indicates whether the input file has a header row at the top containing the column names.

    " + }, + "ignoreEmptyRows":{ + "shape":"IgnoreEmptyRows", + "documentation":"

    A parameter to indicate whether empty rows should be ignored or be included in the import.

    " + }, + "dataCharacterEncoding":{ + "shape":"ImportDataCharacterEncoding", + "documentation":"

    The encoding of the data in the input file.

    " + } + }, + "documentation":"

    An object that contains the options relating to parsing delimited text as part of an import request.

    " + }, + "DescribeTableDataImportJobRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "jobId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook into which data was imported.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table into which data was imported.

    If a table with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "jobId":{ + "shape":"JobId", + "documentation":"

    The ID of the job that was returned by the StartTableDataImportJob request.

    If a job with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"jobId" + } + } + }, + "DescribeTableDataImportJobResult":{ + "type":"structure", + "required":[ + "jobStatus", + "message", + "jobMetadata" + ], + "members":{ + "jobStatus":{ + "shape":"TableDataImportJobStatus", + "documentation":"

    The current status of the import job.

    " + }, + "message":{ + "shape":"TableDataImportJobMessage", + "documentation":"

    A message providing more details about the current status of the import job.

    " + }, + "jobMetadata":{ + "shape":"TableDataImportJobMetadata", + "documentation":"

    The metadata about the job that was submitted for import.

    " + } + } + }, + "DestinationOptions":{ + "type":"structure", + "members":{ + "columnMap":{ + "shape":"ImportColumnMap", + "documentation":"

    A map of the column id to the import properties for each column.

    " + } + }, + "documentation":"

    An object that contains the options relating to the destination of the import request.

    " + }, + "Email":{ + "type":"string", + "max":254, + "min":3, + "pattern":"^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$", + "sensitive":true + }, + "ErrorMessage":{"type":"string"}, + "Fact":{ + "type":"string", + "max":8192, + "min":0, + "pattern":"[\\s\\S]*", + "sensitive":true + }, + "FailedBatchItem":{ + "type":"structure", + "required":[ + "id", + "errorMessage" + ], + "members":{ + "id":{ + "shape":"BatchItemId", + "documentation":"

    The id of the batch item that failed. This is the batch item id for the BatchCreateTableRows and BatchUpsertTableRows operations and the row id for the BatchUpdateTableRows and BatchDeleteTableRows operations.

    " + }, + "errorMessage":{ + "shape":"BatchErrorMessage", + "documentation":"

    The error message that indicates why the batch item failed.

    " + } + }, + "documentation":"

    A single item in a batch that failed to perform the intended action because of an error preventing it from succeeding.

    " + }, + "FailedBatchItems":{ + "type":"list", + "member":{"shape":"FailedBatchItem"}, + "max":100, + "min":0 + }, + "Filter":{ + "type":"structure", + "required":["formula"], + "members":{ + "formula":{ + "shape":"Formula", + "documentation":"

    A formula representing a filter function that returns zero or more matching rows from a table. Valid formulas in this field return a list of rows from a table. The most common ways of writing a formula to return a list of rows are to use the FindRow() or Filter() functions. Any other formula that returns zero or more rows is also acceptable. For example, you can use a formula that points to a cell that contains a filter function.

    " + }, + "contextRowId":{ + "shape":"RowId", + "documentation":"

    The optional contextRowId attribute can be used to specify the row id of the context row if the filter formula contains unqualified references to table columns and needs a context row to evaluate them successfully.

    " + } + }, + "documentation":"

    An object that represents a filter formula along with the id of the context row under which the filter function needs to evaluate.

    " + }, + "Format":{ + "type":"string", + "enum":[ + "AUTO", + "NUMBER", + "CURRENCY", + "DATE", + "TIME", + "DATE_TIME", + "PERCENTAGE", + "TEXT", + "ACCOUNTING", + "CONTACT", + "ROWLINK" + ] + }, + "FormattedValue":{ + "type":"string", + "max":8192, + "min":0, + "pattern":"[\\s\\S]*" + }, + "Formula":{ + "type":"string", + "max":8192, + "min":0, + "pattern":"^=.*", + "sensitive":true + }, + "GetScreenDataRequest":{ + "type":"structure", + "required":[ + "workbookId", + "appId", + "screenId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook that contains the screen.

    " + }, + "appId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the app that contains the screem.

    " + }, + "screenId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the screen.

    " + }, + "variables":{ + "shape":"VariableValueMap", + "documentation":"

    Variables are optional and are needed only if the screen requires them to render correctly. Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of results to be returned on a single page. Specify a number between 1 and 100. The maximum value is 100.

    This parameter is optional. If you don't specify this parameter, the default page size is 100.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

    Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

    " + } + } + }, + "GetScreenDataResult":{ + "type":"structure", + "required":[ + "results", + "workbookCursor" + ], + "members":{ + "results":{ + "shape":"ResultSetMap", + "documentation":"

    A map of all the rows on the screen keyed by block name.

    " + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    Indicates the cursor of the workbook at which the data returned by this workbook is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the query has been loaded.

    " + } + } + }, + "HasHeaderRow":{"type":"boolean"}, + "IgnoreEmptyRows":{"type":"boolean"}, + "ImportColumnMap":{ + "type":"map", + "key":{"shape":"ResourceId"}, + "value":{"shape":"SourceDataColumnProperties"}, + "max":100 + }, + "ImportDataCharacterEncoding":{ + "type":"string", + "enum":[ + "UTF-8", + "US-ASCII", + "ISO-8859-1", + "UTF-16BE", + "UTF-16LE", + "UTF-16" + ] + }, + "ImportDataSource":{ + "type":"structure", + "required":["dataSourceConfig"], + "members":{ + "dataSourceConfig":{ + "shape":"ImportDataSourceConfig", + "documentation":"

    The configuration parameters for the data source of the import

    " + } + }, + "documentation":"

    An object that has details about the source of the data that was submitted for import.

    " + }, + "ImportDataSourceConfig":{ + "type":"structure", + "members":{ + "dataSourceUrl":{ + "shape":"SecureURL", + "documentation":"

    The URL from which source data will be downloaded for the import request.

    " + } + }, + "documentation":"

    An object that contains the configuration parameters for the data source of an import request.

    " + }, + "ImportJobSubmitter":{ + "type":"structure", + "members":{ + "email":{ + "shape":"Email", + "documentation":"

    The email id of the submitter of the import job, if available.

    " + }, + "userArn":{ + "shape":"AwsUserArn", + "documentation":"

    The AWS user ARN of the submitter of the import job, if available.

    " + } + }, + "documentation":"

    An object that contains the attributes of the submitter of the import job.

    " + }, + "ImportOptions":{ + "type":"structure", + "members":{ + "destinationOptions":{ + "shape":"DestinationOptions", + "documentation":"

    Options relating to the destination of the import request.

    " + }, + "delimitedTextOptions":{ + "shape":"DelimitedTextImportOptions", + "documentation":"

    Options relating to parsing delimited text. Required if dataFormat is DELIMITED_TEXT.

    " + } + }, + "documentation":"

    An object that contains the options specified by the sumitter of the import request.

    " + }, + "ImportSourceDataFormat":{ + "type":"string", + "enum":["DELIMITED_TEXT"] + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    There were unexpected errors from the server.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvokeScreenAutomationRequest":{ + "type":"structure", + "required":[ + "workbookId", + "appId", + "screenId", + "screenAutomationId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook that contains the screen automation.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "appId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the app that contains the screen automation.

    ", + "location":"uri", + "locationName":"appId" + }, + "screenId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the screen that contains the screen automation.

    ", + "location":"uri", + "locationName":"screenId" + }, + "screenAutomationId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the automation action to be performed.

    ", + "location":"uri", + "locationName":"automationId" + }, + "variables":{ + "shape":"VariableValueMap", + "documentation":"

    Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen. Any variables defined in a screen are required to be passed in the call.

    " + }, + "rowId":{ + "shape":"RowId", + "documentation":"

    The row ID for the automation if the automation is defined inside a block with source or list.

    " + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    The request token for performing the automation action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will return the response of the previous call rather than performing the action again.

    Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

    " + } + } + }, + "InvokeScreenAutomationResult":{ + "type":"structure", + "required":["workbookCursor"], + "members":{ + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    The updated workbook cursor after performing the automation action.

    " + } + } + }, + "JobId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" + }, + "ListTableColumnsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook that contains the table whose columns are being retrieved.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table whose columns are being retrieved.

    If a table with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

    Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListTableColumnsResult":{ + "type":"structure", + "required":["tableColumns"], + "members":{ + "tableColumns":{ + "shape":"TableColumns", + "documentation":"

    The list of columns in the table.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

    " + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

    " + } + } + }, + "ListTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook that contains the table whose rows are being retrieved.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table whose rows are being retrieved.

    If a table with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "rowIds":{ + "shape":"RowIdList", + "documentation":"

    This parameter is optional. If one or more row ids are specified in this list, then only the specified row ids are returned in the result. If no row ids are specified here, then all the rows in the table are returned.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of rows to return in each page of the results.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

    Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

    " + } + } + }, + "ListTableRowsResult":{ + "type":"structure", + "required":[ + "columnIds", + "rows", + "workbookCursor" + ], + "members":{ + "columnIds":{ + "shape":"ResourceIds", + "documentation":"

    The list of columns in the table whose row data is returned in the result.

    " + }, + "rows":{ + "shape":"TableRows", + "documentation":"

    The list of rows in the table. Note that this result is paginated, so this list contains a maximum of 100 rows.

    " + }, + "rowIdsNotFound":{ + "shape":"RowIdList", + "documentation":"

    The list of row ids included in the request that were not found in the table.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

    " + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

    " + } + } + }, + "ListTablesRequest":{ + "type":"structure", + "required":["workbookId"], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook whose tables are being retrieved.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of tables to return in each page of the results.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

    Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListTablesResult":{ + "type":"structure", + "required":["tables"], + "members":{ + "tables":{ + "shape":"Tables", + "documentation":"

    The list of tables in the workbook.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

    " + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "Name":{ + "type":"string", + "sensitive":true + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^(?!\\s*$).+" + }, + "QueryTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "filterFormula" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook whose table rows are being queried.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table whose rows are being queried.

    If a table with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "filterFormula":{ + "shape":"Filter", + "documentation":"

    An object that represents a filter formula along with the id of the context row under which the filter function needs to evaluate.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of rows to return in each page of the results.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

    Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

    " + } + } + }, + "QueryTableRowsResult":{ + "type":"structure", + "required":[ + "columnIds", + "rows", + "workbookCursor" + ], + "members":{ + "columnIds":{ + "shape":"ResourceIds", + "documentation":"

    The list of columns in the table whose row data is returned in the result.

    " + }, + "rows":{ + "shape":"TableRows", + "documentation":"

    The list of rows in the table that match the query filter.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

    " + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

    Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

    " + } + } + }, + "RawValue":{ + "type":"string", + "max":32767, + "min":0, + "pattern":"[\\s\\S]*" + }, + "RequestTimeoutException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request timed out.

    ", + "error":{ + "httpStatusCode":504, + "senderFault":true + }, + "exception":true + }, + "ResourceId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "ResourceIds":{ + "type":"list", + "member":{"shape":"ResourceId"}, + "max":100, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    A Workbook, Table, App, Screen or Screen Automation was not found with the given ID.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResultHeader":{ + "type":"list", + "member":{"shape":"ColumnMetadata"} + }, + "ResultRow":{ + "type":"structure", + "required":["dataItems"], + "members":{ + "rowId":{ + "shape":"RowId", + "documentation":"

    The ID for a particular row.

    " + }, + "dataItems":{ + "shape":"DataItems", + "documentation":"

    List of all the data cells in a row.

    " + } + }, + "documentation":"

    A single row in the ResultSet.

    " + }, + "ResultRows":{ + "type":"list", + "member":{"shape":"ResultRow"} + }, + "ResultSet":{ + "type":"structure", + "required":[ + "headers", + "rows" + ], + "members":{ + "headers":{ + "shape":"ResultHeader", + "documentation":"

    List of headers for all the data cells in the block. The header identifies the name and default format of the data cell. Data cells appear in the same order in all rows as defined in the header. The names and formats are not repeated in the rows. If a particular row does not have a value for a data cell, a blank value is used.

    For example, a task list that displays the task name, due date and assigned person might have headers [ { \"name\": \"Task Name\"}, {\"name\": \"Due Date\", \"format\": \"DATE\"}, {\"name\": \"Assigned\", \"format\": \"CONTACT\"} ]. Every row in the result will have the task name as the first item, due date as the second item and assigned person as the third item. If a particular task does not have a due date, that row will still have a blank value in the second element and the assigned person will still be in the third element.

    " + }, + "rows":{ + "shape":"ResultRows", + "documentation":"

    List of rows returned by the request. Each row has a row Id and a list of data cells in that row. The data cells will be present in the same order as they are defined in the header.

    " + } + }, + "documentation":"

    ResultSet contains the results of the request for a single block or list defined on the screen.

    " + }, + "ResultSetMap":{ + "type":"map", + "key":{"shape":"Name"}, + "value":{"shape":"ResultSet"} + }, + "RowDataInput":{ + "type":"map", + "key":{"shape":"ResourceId"}, + "value":{"shape":"CellInput"}, + "max":100, + "min":1 + }, + "RowId":{ + "type":"string", + "max":77, + "min":77, + "pattern":"row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "RowIdList":{ + "type":"list", + "member":{"shape":"RowId"}, + "max":100, + "min":1 + }, + "SecureURL":{ + "type":"string", + "max":8000, + "min":1, + "pattern":"^https:\\/\\/[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request caused service quota to be breached.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Remote service is unreachable.

    ", + "error":{"httpStatusCode":503}, + "exception":true + }, + "SourceDataColumnIndex":{ + "type":"integer", + "min":1 + }, + "SourceDataColumnProperties":{ + "type":"structure", + "members":{ + "columnIndex":{ + "shape":"SourceDataColumnIndex", + "documentation":"

    The index of the column in the input file.

    " + } + }, + "documentation":"

    An object that contains the properties for importing data to a specific column in a table.

    " + }, + "StartTableDataImportJobRequest":{ + "type":"structure", + "required":[ + "workbookId", + "dataSource", + "dataFormat", + "destinationTableId", + "importOptions", + "clientRequestToken" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the workbook where the rows are being imported.

    If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"workbookId" + }, + "dataSource":{ + "shape":"ImportDataSource", + "documentation":"

    The source of the data that is being imported. The size of source must be no larger than 100 MB. Source must have no more than 100,000 cells and no more than 1,000 rows.

    " + }, + "dataFormat":{ + "shape":"ImportSourceDataFormat", + "documentation":"

    The format of the data that is being imported. Currently the only option supported is \"DELIMITED_TEXT\".

    " + }, + "destinationTableId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the table where the rows are being imported.

    If a table with the specified id could not be found, this API throws ResourceNotFoundException.

    ", + "location":"uri", + "locationName":"tableId" + }, + "importOptions":{ + "shape":"ImportOptions", + "documentation":"

    The options for customizing this import request.

    " + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

    Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

    " + } + } + }, + "StartTableDataImportJobResult":{ + "type":"structure", + "required":[ + "jobId", + "jobStatus" + ], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

    The id that is assigned to this import job. Future requests to find out the status of this import job need to send this id in the appropriate parameter in the request.

    " + }, + "jobStatus":{ + "shape":"TableDataImportJobStatus", + "documentation":"

    The status of the import job immediately after submitting the request.

    " + } + } + }, + "Table":{ + "type":"structure", + "members":{ + "tableId":{ + "shape":"ResourceId", + "documentation":"

    The id of the table.

    " + }, + "tableName":{ + "shape":"TableName", + "documentation":"

    The name of the table.

    " + } + }, + "documentation":"

    An object representing the properties of a table in a workbook.

    " + }, + "TableColumn":{ + "type":"structure", + "members":{ + "tableColumnId":{ + "shape":"ResourceId", + "documentation":"

    The id of the column in the table.

    " + }, + "tableColumnName":{ + "shape":"TableColumnName", + "documentation":"

    The name of the column in the table.

    " + }, + "format":{ + "shape":"Format", + "documentation":"

    The column level format that is applied in the table. An empty value in this field means that the column format is the default value 'AUTO'.

    " + } + }, + "documentation":"

    An object that contains attributes about a single column in a table

    " + }, + "TableColumnName":{"type":"string"}, + "TableColumns":{ + "type":"list", + "member":{"shape":"TableColumn"} + }, + "TableDataImportJobMessage":{"type":"string"}, + "TableDataImportJobMetadata":{ + "type":"structure", + "required":[ + "submitter", + "submitTime", + "importOptions", + "dataSource" + ], + "members":{ + "submitter":{ + "shape":"ImportJobSubmitter", + "documentation":"

    Details about the submitter of the import request.

    " + }, + "submitTime":{ + "shape":"TimestampInMillis", + "documentation":"

    The timestamp when the job was submitted for import.

    " + }, + "importOptions":{ + "shape":"ImportOptions", + "documentation":"

    The options that was specified at the time of submitting the import request.

    " + }, + "dataSource":{ + "shape":"ImportDataSource", + "documentation":"

    The source of the data that was submitted for import.

    " + } + }, + "documentation":"

    The metadata associated with the table data import job that was submitted.

    " + }, + "TableDataImportJobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, + "TableName":{"type":"string"}, + "TableRow":{ + "type":"structure", + "required":[ + "rowId", + "cells" + ], + "members":{ + "rowId":{ + "shape":"RowId", + "documentation":"

    The id of the row in the table.

    " + }, + "cells":{ + "shape":"Cells", + "documentation":"

    A list of cells in the table row. The cells appear in the same order as the columns of the table.

    " + } + }, + "documentation":"

    An object that contains attributes about a single row in a table

    " + }, + "TableRows":{ + "type":"list", + "member":{"shape":"TableRow"} + }, + "Tables":{ + "type":"list", + "member":{"shape":"Table"} + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Tps(transactions per second) rate reached.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "TimestampInMillis":{"type":"timestamp"}, + "UpdateRowData":{ + "type":"structure", + "required":[ + "rowId", + "cellsToUpdate" + ], + "members":{ + "rowId":{ + "shape":"RowId", + "documentation":"

    The id of the row that needs to be updated.

    " + }, + "cellsToUpdate":{ + "shape":"RowDataInput", + "documentation":"

    A map representing the cells to update in the given row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

    " + } + }, + "documentation":"

    Data needed to create a single row in a table as part of the BatchCreateTableRows request.

    " + }, + "UpdateRowDataList":{ + "type":"list", + "member":{"shape":"UpdateRowData"}, + "max":100, + "min":1 + }, + "UpsertAction":{ + "type":"string", + "enum":[ + "UPDATED", + "APPENDED" + ] + }, + "UpsertRowData":{ + "type":"structure", + "required":[ + "batchItemId", + "filter", + "cellsToUpdate" + ], + "members":{ + "batchItemId":{ + "shape":"BatchItemId", + "documentation":"

    An external identifier that represents a single item in the request that is being upserted as part of the BatchUpsertTableRows request. This can be any string that you can use to identify the item in the request. The BatchUpsertTableRows API puts the batch item id in the results to allow you to link data in the request to data in the results.

    " + }, + "filter":{ + "shape":"Filter", + "documentation":"

    The filter formula to use to find existing matching rows to update. The formula needs to return zero or more rows. If the formula returns 0 rows, then a new row will be appended in the target table. If the formula returns one or more rows, then the returned rows will be updated.

    Note that the filter formula needs to return rows from the target table for the upsert operation to succeed. If the filter formula has a syntax error or it doesn't evaluate to zero or more rows in the target table for any one item in the input list, then the entire BatchUpsertTableRows request fails and no updates are made to the table.

    " + }, + "cellsToUpdate":{ + "shape":"RowDataInput", + "documentation":"

    A map representing the cells to update for the matching rows or an appended row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

    " + } + }, + "documentation":"

    Data needed to upsert rows in a table as part of a single item in the BatchUpsertTableRows request.

    " + }, + "UpsertRowDataList":{ + "type":"list", + "member":{"shape":"UpsertRowData"} + }, + "UpsertRowsResult":{ + "type":"structure", + "required":[ + "rowIds", + "upsertAction" + ], + "members":{ + "rowIds":{ + "shape":"RowIdList", + "documentation":"

    The list of row ids that were changed as part of an upsert row operation. If the upsert resulted in an update, this list could potentially contain multiple rows that matched the filter and hence got updated. If the upsert resulted in an append, this list would only have the single row that was appended.

    " + }, + "upsertAction":{ + "shape":"UpsertAction", + "documentation":"

    The result of the upsert action.

    " + } + }, + "documentation":"

    An object that represents the result of a single upsert row request.

    " + }, + "UpsertRowsResultMap":{ + "type":"map", + "key":{"shape":"BatchItemId"}, + "value":{"shape":"UpsertRowsResult"} + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Request is invalid. The message in the response contains details on why the request is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "VariableName":{ + "type":"string", + "pattern":"^(?!\\s*$).+", + "sensitive":true + }, + "VariableValue":{ + "type":"structure", + "required":["rawValue"], + "members":{ + "rawValue":{ + "shape":"RawValue", + "documentation":"

    Raw value of the variable.

    " + } + }, + "documentation":"

    The input variables to the app to be used by the InvokeScreenAutomation action request.

    ", + "sensitive":true + }, + "VariableValueMap":{ + "type":"map", + "key":{"shape":"VariableName"}, + "value":{"shape":"VariableValue"}, + "sensitive":true + }, + "WorkbookCursor":{"type":"long"} + }, + "documentation":"

    Amazon Honeycode is a fully managed service that allows you to quickly build mobile and web apps for teams—without programming. Build Honeycode apps for managing almost anything, like projects, customers, operations, approvals, resources, and even your team.

    " +} diff --git a/services/iam/build.properties b/services/iam/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/iam/build.properties +++ b/services/iam/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/iam/pom.xml b/services/iam/pom.xml index d8de081b8a05..824b86349343 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + identitystore + AWS Java SDK :: Services :: Identitystore + The AWS Java SDK for Identitystore module holds the client classes that are used for + communicating with Identitystore. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.identitystore + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/identitystore/src/main/resources/codegen-resources/paginators-1.json b/services/identitystore/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..3277ccaad6e4 --- /dev/null +++ b/services/identitystore/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,14 @@ +{ + "pagination": { + "ListGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListUsers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/identitystore/src/main/resources/codegen-resources/service-2.json b/services/identitystore/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..d5137d37bf90 --- /dev/null +++ b/services/identitystore/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,429 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-06-15", + "endpointPrefix":"identitystore", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"IdentityStore", + "serviceFullName":"AWS SSO Identity Store", + "serviceId":"identitystore", + "signatureVersion":"v4", + "signingName":"identitystore", + "targetPrefix":"AWSIdentityStore", + "uid":"identitystore-2020-06-15" + }, + "operations":{ + "DescribeGroup":{ + "name":"DescribeGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGroupRequest"}, + "output":{"shape":"DescribeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves the group metadata and attributes from GroupId in an identity store.

    " + }, + "DescribeUser":{ + "name":"DescribeUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUserRequest"}, + "output":{"shape":"DescribeUserResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves the user metadata and attributes from UserId in an identity store.

    " + }, + "ListGroups":{ + "name":"ListGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGroupsRequest"}, + "output":{"shape":"ListGroupsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the attribute name and value of the group that you specified in the search. We only support DisplayName as a valid filter attribute path currently, and filter is required. This API returns minimum attributes, including GroupId and group DisplayName in the response.

    " + }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the attribute name and value of the user that you specified in the search. We only support UserName as a valid filter attribute path currently, and filter is required. This API returns minimum attributes, including UserId and UserName in the response.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "RequestId":{ + "shape":"RequestId", + "documentation":"

    The identifier for each request. This value is a globally unique ID that is generated by the Identity Store service for each sent request, and is then returned inside the exception if the request fails.

    " + } + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "exception":true + }, + "AttributePath":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}  ]+" + }, + "DescribeGroupRequest":{ + "type":"structure", + "required":[ + "IdentityStoreId", + "GroupId" + ], + "members":{ + "IdentityStoreId":{ + "shape":"IdentityStoreId", + "documentation":"

    The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string which contains number and lower case letters. This value is generated at the time that a new identity store is created.

    " + }, + "GroupId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for a group in the identity store.

    " + } + } + }, + "DescribeGroupResponse":{ + "type":"structure", + "required":[ + "GroupId", + "DisplayName" + ], + "members":{ + "GroupId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for a group in the identity store.

    " + }, + "DisplayName":{ + "shape":"GroupDisplayName", + "documentation":"

    Contains the group’s display name value. The length limit is 1024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space and non breaking space in this attribute. The characters “<>;:%” are excluded. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.

    " + } + } + }, + "DescribeUserRequest":{ + "type":"structure", + "required":[ + "IdentityStoreId", + "UserId" + ], + "members":{ + "IdentityStoreId":{ + "shape":"IdentityStoreId", + "documentation":"

    The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string which contains number and lower case letters. This value is generated at the time that a new identity store is created.

    " + }, + "UserId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for a user in the identity store.

    " + } + } + }, + "DescribeUserResponse":{ + "type":"structure", + "required":[ + "UserName", + "UserId" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "documentation":"

    Contains the user’s username value. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers and punctuation. The characters “<>;:%” are excluded. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store.

    " + }, + "UserId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for a user in the identity store.

    " + } + } + }, + "Filter":{ + "type":"structure", + "required":[ + "AttributePath", + "AttributeValue" + ], + "members":{ + "AttributePath":{ + "shape":"AttributePath", + "documentation":"

    The attribute path used to specify which attribute name to search. Length limit is 255 characters. For example, UserName is a valid attribute path for the ListUsers API, and DisplayName is a valid attribute path for the ListGroups API.

    " + }, + "AttributeValue":{ + "shape":"SensitiveStringType", + "documentation":"

    Represents the data for an attribute. Each attribute value is described as a name-value pair.

    " + } + }, + "documentation":"

    A query filter used by ListUsers and ListGroup. This filter object provides the attribute name and attribute value to search users or groups.

    " + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "Group":{ + "type":"structure", + "required":[ + "GroupId", + "DisplayName" + ], + "members":{ + "GroupId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for a group in the identity store.

    " + }, + "DisplayName":{ + "shape":"GroupDisplayName", + "documentation":"

    Contains the group’s display name value. The length limit is 1024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space and non breaking space in this attribute. The characters “<>;:%” are excluded. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.

    " + } + }, + "documentation":"

    A group object, which contains a specified group’s metadata and attributes.

    " + }, + "GroupDisplayName":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r  ]+" + }, + "Groups":{ + "type":"list", + "member":{"shape":"Group"} + }, + "IdentityStoreId":{ + "type":"string", + "max":12, + "min":1, + "pattern":"^d-[0-9a-f]{10}$" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "RequestId":{ + "shape":"RequestId", + "documentation":"

    The identifier for each request. This value is a globally unique ID that is generated by the Identity Store service for each sent request, and is then returned inside the exception if the request fails.

    " + } + }, + "documentation":"

    The request processing has failed because of an unknown error, exception or failure with an internal server.

    ", + "exception":true, + "fault":true + }, + "ListGroupsRequest":{ + "type":"structure", + "required":["IdentityStoreId"], + "members":{ + "IdentityStoreId":{ + "shape":"IdentityStoreId", + "documentation":"

    The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string which contains number and lower case letters. This value is generated at the time that a new identity store is created.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per request, which is used in the ListUsers and ListGroups request to specify how many results to return in one page. The length limit is 50 characters.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token used for the ListUsers and ListGroups APIs. This value is generated by the identity store service and is returned in the API response if the total results are more than the size of one page, and when this token is used in the API request to search for the next page.

    " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

    A list of Filter objects, which is used in the ListUsers and ListGroups request.

    " + } + } + }, + "ListGroupsResponse":{ + "type":"structure", + "required":["Groups"], + "members":{ + "Groups":{ + "shape":"Groups", + "documentation":"

    A list of Group objects in the identity store.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token used for the ListUsers and ListGroups APIs. This value is generated by the identity store service and is returned in the API response if the total results are more than the size of one page, and when this token is used in the API request to search for the next page.

    " + } + } + }, + "ListUsersRequest":{ + "type":"structure", + "required":["IdentityStoreId"], + "members":{ + "IdentityStoreId":{ + "shape":"IdentityStoreId", + "documentation":"

    The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string which contains number and lower case letters. This value is generated at the time that a new identity store is created.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per request, which is used in the ListUsers and ListGroups request to specify how many results to return in one page. The length limit is 50 characters.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token used for the ListUsers and ListGroups APIs. This value is generated by the identity store service and is returned in the API response if the total results are more than the size of one page, and when this token is used in the API request to search for the next page.

    " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

    A list of Filter objects, which is used in the ListUsers and ListGroups request.

    " + } + } + }, + "ListUsersResponse":{ + "type":"structure", + "required":["Users"], + "members":{ + "Users":{ + "shape":"Users", + "documentation":"

    A list of User objects in the identity store.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token used for the ListUsers and ListGroups APIs. This value is generated by the identity store service and is returned in the API response if the total results are more than the size of one page, and when this token is used in the API request to search for the next page.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "Message":{ + "type":"string", + "max":65535, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":65535, + "min":1, + "pattern":"^[-a-zA-Z0-9+=/:]*" + }, + "RequestId":{ + "type":"string", + "pattern":"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}" + }, + "ResourceId":{ + "type":"string", + "max":47, + "min":1, + "pattern":"^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource in the Identity Store service, which is an enum object. Valid values include USER, GROUP, and IDENTITY_STORE.

    " + }, + "ResourceId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for a resource in the identity store, which can be used as UserId or GroupId. The format for ResourceId is either UUID or 1234567890-UUID, where UUID is a randomly generated value for each resource when it is created and 1234567890 represents the IdentityStoreId string value. In the case that the identity store is migrated from a legacy SSO identity store, the ResourceId for that identity store will be in the format of UUID. Otherwise, it will be in the 1234567890-UUID format.

    " + }, + "Message":{"shape":"Message"}, + "RequestId":{ + "shape":"RequestId", + "documentation":"

    The identifier for each request. This value is a globally unique ID that is generated by the Identity Store service for each sent request, and is then returned inside the exception if the request fails.

    " + } + }, + "documentation":"

    Indicates that a requested resource is not found.

    ", + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "GROUP", + "USER", + "IDENTITY_STORE" + ] + }, + "SensitiveStringType":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r  ]+", + "sensitive":true + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "RequestId":{ + "shape":"RequestId", + "documentation":"

    The identifier for each request. This value is a globally unique ID that is generated by the Identity Store service for each sent request, and is then returned inside the exception if the request fails.

    " + } + }, + "documentation":"

    Indicates that the principal has crossed the throttling limits of the API operations.

    ", + "exception":true + }, + "User":{ + "type":"structure", + "required":[ + "UserName", + "UserId" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "documentation":"

    Contains the user’s username value. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers and punctuation. The characters “<>;:%” are excluded. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store.

    " + }, + "UserId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for a user in the identity store.

    " + } + }, + "documentation":"

    A user object, which contains a specified user’s metadata and attributes.

    " + }, + "UserName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+", + "sensitive":true + }, + "Users":{ + "type":"list", + "member":{"shape":"User"} + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "RequestId":{ + "shape":"RequestId", + "documentation":"

    The identifier for each request. This value is a globally unique ID that is generated by the Identity Store service for each sent request, and is then returned inside the exception if the request fails.

    " + } + }, + "documentation":"

    The request failed because it contains a syntax error.

    ", + "exception":true + } + } +} diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml new file mode 100644 index 000000000000..41aeda27b34b --- /dev/null +++ b/services/imagebuilder/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + imagebuilder + AWS Java SDK :: Services :: Imagebuilder + The AWS Java SDK for Imagebuilder module holds the client classes that are used for + communicating with Imagebuilder. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.imagebuilder + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/imagebuilder/src/main/resources/codegen-resources/paginators-1.json b/services/imagebuilder/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..157e6543cc0f --- /dev/null +++ b/services/imagebuilder/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,64 @@ +{ + "pagination": { + "ListComponentBuildVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "componentSummaryList" + }, + "ListComponents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "componentVersionList" + }, + "ListContainerRecipes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "containerRecipeSummaryList" + }, + "ListDistributionConfigurations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "distributionConfigurationSummaryList" + }, + "ListImageBuildVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "imageSummaryList" + }, + "ListImagePipelineImages": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "imageSummaryList" + }, + "ListImagePipelines": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "imagePipelineList" + }, + "ListImageRecipes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "imageRecipeSummaryList" + }, + "ListImages": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "imageVersionList" + }, + "ListInfrastructureConfigurations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "infrastructureConfigurationSummaryList" + } + } +} diff --git a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..97d10b7e54c8 --- /dev/null +++ b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,4414 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-02", + "endpointPrefix":"imagebuilder", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"imagebuilder", + "serviceFullName":"EC2 Image Builder", + "serviceId":"imagebuilder", + "signatureVersion":"v4", + "signingName":"imagebuilder", + "uid":"imagebuilder-2019-12-02" + }, + "operations":{ + "CancelImageCreation":{ + "name":"CancelImageCreation", + "http":{ + "method":"PUT", + "requestUri":"/CancelImageCreation" + }, + "input":{"shape":"CancelImageCreationRequest"}, + "output":{"shape":"CancelImageCreationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    CancelImageCreation cancels the creation of Image. This operation can only be used on images in a non-terminal state.

    " + }, + "CreateComponent":{ + "name":"CreateComponent", + "http":{ + "method":"PUT", + "requestUri":"/CreateComponent" + }, + "input":{"shape":"CreateComponentRequest"}, + "output":{"shape":"CreateComponentResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"InvalidVersionNumberException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new component that can be used to build, validate, test, and assess your image.

    " + }, + "CreateContainerRecipe":{ + "name":"CreateContainerRecipe", + "http":{ + "method":"PUT", + "requestUri":"/CreateContainerRecipe" + }, + "input":{"shape":"CreateContainerRecipeRequest"}, + "output":{"shape":"CreateContainerRecipeResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"InvalidVersionNumberException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new container recipe. Container recipes define how images are configured, tested, and assessed.

    " + }, + "CreateDistributionConfiguration":{ + "name":"CreateDistributionConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/CreateDistributionConfiguration" + }, + "input":{"shape":"CreateDistributionConfigurationRequest"}, + "output":{"shape":"CreateDistributionConfigurationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new distribution configuration. Distribution configurations define and configure the outputs of your pipeline.

    " + }, + "CreateImage":{ + "name":"CreateImage", + "http":{ + "method":"PUT", + "requestUri":"/CreateImage" + }, + "input":{"shape":"CreateImageRequest"}, + "output":{"shape":"CreateImageResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new image. This request will create a new image along with all of the configured output resources defined in the distribution configuration.

    " + }, + "CreateImagePipeline":{ + "name":"CreateImagePipeline", + "http":{ + "method":"PUT", + "requestUri":"/CreateImagePipeline" + }, + "input":{"shape":"CreateImagePipelineRequest"}, + "output":{"shape":"CreateImagePipelineResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new image pipeline. Image pipelines enable you to automate the creation and distribution of images.

    " + }, + "CreateImageRecipe":{ + "name":"CreateImageRecipe", + "http":{ + "method":"PUT", + "requestUri":"/CreateImageRecipe" + }, + "input":{"shape":"CreateImageRecipeRequest"}, + "output":{"shape":"CreateImageRecipeResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"InvalidVersionNumberException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new image recipe. Image recipes define how images are configured, tested, and assessed.

    " + }, + "CreateInfrastructureConfiguration":{ + "name":"CreateInfrastructureConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/CreateInfrastructureConfiguration" + }, + "input":{"shape":"CreateInfrastructureConfigurationRequest"}, + "output":{"shape":"CreateInfrastructureConfigurationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new infrastructure configuration. An infrastructure configuration defines the environment in which your image will be built and tested.

    " + }, + "DeleteComponent":{ + "name":"DeleteComponent", + "http":{ + "method":"DELETE", + "requestUri":"/DeleteComponent" + }, + "input":{"shape":"DeleteComponentRequest"}, + "output":{"shape":"DeleteComponentResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceDependencyException"} + ], + "documentation":"

    Deletes a component build version.

    " + }, + "DeleteContainerRecipe":{ + "name":"DeleteContainerRecipe", + "http":{ + "method":"DELETE", + "requestUri":"/DeleteContainerRecipe" + }, + "input":{"shape":"DeleteContainerRecipeRequest"}, + "output":{"shape":"DeleteContainerRecipeResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceDependencyException"} + ], + "documentation":"

    Deletes a container recipe.

    " + }, + "DeleteDistributionConfiguration":{ + "name":"DeleteDistributionConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/DeleteDistributionConfiguration" + }, + "input":{"shape":"DeleteDistributionConfigurationRequest"}, + "output":{"shape":"DeleteDistributionConfigurationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceDependencyException"} + ], + "documentation":"

    Deletes a distribution configuration.

    " + }, + "DeleteImage":{ + "name":"DeleteImage", + "http":{ + "method":"DELETE", + "requestUri":"/DeleteImage" + }, + "input":{"shape":"DeleteImageRequest"}, + "output":{"shape":"DeleteImageResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceDependencyException"} + ], + "documentation":"

    Deletes an image.

    " + }, + "DeleteImagePipeline":{ + "name":"DeleteImagePipeline", + "http":{ + "method":"DELETE", + "requestUri":"/DeleteImagePipeline" + }, + "input":{"shape":"DeleteImagePipelineRequest"}, + "output":{"shape":"DeleteImagePipelineResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceDependencyException"} + ], + "documentation":"

    Deletes an image pipeline.

    " + }, + "DeleteImageRecipe":{ + "name":"DeleteImageRecipe", + "http":{ + "method":"DELETE", + "requestUri":"/DeleteImageRecipe" + }, + "input":{"shape":"DeleteImageRecipeRequest"}, + "output":{"shape":"DeleteImageRecipeResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceDependencyException"} + ], + "documentation":"

    Deletes an image recipe.

    " + }, + "DeleteInfrastructureConfiguration":{ + "name":"DeleteInfrastructureConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/DeleteInfrastructureConfiguration" + }, + "input":{"shape":"DeleteInfrastructureConfigurationRequest"}, + "output":{"shape":"DeleteInfrastructureConfigurationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceDependencyException"} + ], + "documentation":"

    Deletes an infrastructure configuration.

    " + }, + "GetComponent":{ + "name":"GetComponent", + "http":{ + "method":"GET", + "requestUri":"/GetComponent" + }, + "input":{"shape":"GetComponentRequest"}, + "output":{"shape":"GetComponentResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets a component object.

    " + }, + "GetComponentPolicy":{ + "name":"GetComponentPolicy", + "http":{ + "method":"GET", + "requestUri":"/GetComponentPolicy" + }, + "input":{"shape":"GetComponentPolicyRequest"}, + "output":{"shape":"GetComponentPolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets a component policy.

    " + }, + "GetContainerRecipe":{ + "name":"GetContainerRecipe", + "http":{ + "method":"GET", + "requestUri":"/GetContainerRecipe" + }, + "input":{"shape":"GetContainerRecipeRequest"}, + "output":{"shape":"GetContainerRecipeResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Retrieves a container recipe.

    " + }, + "GetContainerRecipePolicy":{ + "name":"GetContainerRecipePolicy", + "http":{ + "method":"GET", + "requestUri":"/GetContainerRecipePolicy" + }, + "input":{"shape":"GetContainerRecipePolicyRequest"}, + "output":{"shape":"GetContainerRecipePolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Retrieves the policy for a container recipe.

    " + }, + "GetDistributionConfiguration":{ + "name":"GetDistributionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/GetDistributionConfiguration" + }, + "input":{"shape":"GetDistributionConfigurationRequest"}, + "output":{"shape":"GetDistributionConfigurationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets a distribution configuration.

    " + }, + "GetImage":{ + "name":"GetImage", + "http":{ + "method":"GET", + "requestUri":"/GetImage" + }, + "input":{"shape":"GetImageRequest"}, + "output":{"shape":"GetImageResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets an image.

    " + }, + "GetImagePipeline":{ + "name":"GetImagePipeline", + "http":{ + "method":"GET", + "requestUri":"/GetImagePipeline" + }, + "input":{"shape":"GetImagePipelineRequest"}, + "output":{"shape":"GetImagePipelineResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets an image pipeline.

    " + }, + "GetImagePolicy":{ + "name":"GetImagePolicy", + "http":{ + "method":"GET", + "requestUri":"/GetImagePolicy" + }, + "input":{"shape":"GetImagePolicyRequest"}, + "output":{"shape":"GetImagePolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets an image policy.

    " + }, + "GetImageRecipe":{ + "name":"GetImageRecipe", + "http":{ + "method":"GET", + "requestUri":"/GetImageRecipe" + }, + "input":{"shape":"GetImageRecipeRequest"}, + "output":{"shape":"GetImageRecipeResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets an image recipe.

    " + }, + "GetImageRecipePolicy":{ + "name":"GetImageRecipePolicy", + "http":{ + "method":"GET", + "requestUri":"/GetImageRecipePolicy" + }, + "input":{"shape":"GetImageRecipePolicyRequest"}, + "output":{"shape":"GetImageRecipePolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets an image recipe policy.

    " + }, + "GetInfrastructureConfiguration":{ + "name":"GetInfrastructureConfiguration", + "http":{ + "method":"GET", + "requestUri":"/GetInfrastructureConfiguration" + }, + "input":{"shape":"GetInfrastructureConfigurationRequest"}, + "output":{"shape":"GetInfrastructureConfigurationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Gets an infrastructure configuration.

    " + }, + "ImportComponent":{ + "name":"ImportComponent", + "http":{ + "method":"PUT", + "requestUri":"/ImportComponent" + }, + "input":{"shape":"ImportComponentRequest"}, + "output":{"shape":"ImportComponentResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"InvalidVersionNumberException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

    Imports a component and transforms its data into a component document.

    " + }, + "ListComponentBuildVersions":{ + "name":"ListComponentBuildVersions", + "http":{ + "method":"POST", + "requestUri":"/ListComponentBuildVersions" + }, + "input":{"shape":"ListComponentBuildVersionsRequest"}, + "output":{"shape":"ListComponentBuildVersionsResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns the list of component build versions for the specified semantic version.

    " + }, + "ListComponents":{ + "name":"ListComponents", + "http":{ + "method":"POST", + "requestUri":"/ListComponents" + }, + "input":{"shape":"ListComponentsRequest"}, + "output":{"shape":"ListComponentsResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns the list of component build versions for the specified semantic version.

    " + }, + "ListContainerRecipes":{ + "name":"ListContainerRecipes", + "http":{ + "method":"POST", + "requestUri":"/ListContainerRecipes" + }, + "input":{"shape":"ListContainerRecipesRequest"}, + "output":{"shape":"ListContainerRecipesResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns a list of container recipes.

    " + }, + "ListDistributionConfigurations":{ + "name":"ListDistributionConfigurations", + "http":{ + "method":"POST", + "requestUri":"/ListDistributionConfigurations" + }, + "input":{"shape":"ListDistributionConfigurationsRequest"}, + "output":{"shape":"ListDistributionConfigurationsResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns a list of distribution configurations.

    " + }, + "ListImageBuildVersions":{ + "name":"ListImageBuildVersions", + "http":{ + "method":"POST", + "requestUri":"/ListImageBuildVersions" + }, + "input":{"shape":"ListImageBuildVersionsRequest"}, + "output":{"shape":"ListImageBuildVersionsResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns a list of image build versions.

    " + }, + "ListImagePipelineImages":{ + "name":"ListImagePipelineImages", + "http":{ + "method":"POST", + "requestUri":"/ListImagePipelineImages" + }, + "input":{"shape":"ListImagePipelineImagesRequest"}, + "output":{"shape":"ListImagePipelineImagesResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns a list of images created by the specified pipeline.

    " + }, + "ListImagePipelines":{ + "name":"ListImagePipelines", + "http":{ + "method":"POST", + "requestUri":"/ListImagePipelines" + }, + "input":{"shape":"ListImagePipelinesRequest"}, + "output":{"shape":"ListImagePipelinesResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns a list of image pipelines.

    " + }, + "ListImageRecipes":{ + "name":"ListImageRecipes", + "http":{ + "method":"POST", + "requestUri":"/ListImageRecipes" + }, + "input":{"shape":"ListImageRecipesRequest"}, + "output":{"shape":"ListImageRecipesResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns a list of image recipes.

    " + }, + "ListImages":{ + "name":"ListImages", + "http":{ + "method":"POST", + "requestUri":"/ListImages" + }, + "input":{"shape":"ListImagesRequest"}, + "output":{"shape":"ListImagesResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns the list of images that you have access to.

    " + }, + "ListInfrastructureConfigurations":{ + "name":"ListInfrastructureConfigurations", + "http":{ + "method":"POST", + "requestUri":"/ListInfrastructureConfigurations" + }, + "input":{"shape":"ListInfrastructureConfigurationsRequest"}, + "output":{"shape":"ListInfrastructureConfigurationsResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Returns a list of infrastructure configurations.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns the list of tags for the specified resource.

    " + }, + "PutComponentPolicy":{ + "name":"PutComponentPolicy", + "http":{ + "method":"PUT", + "requestUri":"/PutComponentPolicy" + }, + "input":{"shape":"PutComponentPolicyRequest"}, + "output":{"shape":"PutComponentPolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Applies a policy to a component. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutComponentPolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.

    " + }, + "PutContainerRecipePolicy":{ + "name":"PutContainerRecipePolicy", + "http":{ + "method":"PUT", + "requestUri":"/PutContainerRecipePolicy" + }, + "input":{"shape":"PutContainerRecipePolicyRequest"}, + "output":{"shape":"PutContainerRecipePolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Applies a policy to a container image. We recommend that you call the RAM API CreateResourceShare (https://docs.aws.amazon.com/ram/latest/APIReference/API_CreateResourceShare.html) to share resources. If you call the Image Builder API PutContainerImagePolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy (https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html) in order for the resource to be visible to all principals with whom the resource is shared.

    " + }, + "PutImagePolicy":{ + "name":"PutImagePolicy", + "http":{ + "method":"PUT", + "requestUri":"/PutImagePolicy" + }, + "input":{"shape":"PutImagePolicyRequest"}, + "output":{"shape":"PutImagePolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Applies a policy to an image. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutImagePolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.

    " + }, + "PutImageRecipePolicy":{ + "name":"PutImageRecipePolicy", + "http":{ + "method":"PUT", + "requestUri":"/PutImageRecipePolicy" + }, + "input":{"shape":"PutImageRecipePolicyRequest"}, + "output":{"shape":"PutImageRecipePolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

    Applies a policy to an image recipe. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutImageRecipePolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.

    " + }, + "StartImagePipelineExecution":{ + "name":"StartImagePipelineExecution", + "http":{ + "method":"PUT", + "requestUri":"/StartImagePipelineExecution" + }, + "input":{"shape":"StartImagePipelineExecutionRequest"}, + "output":{"shape":"StartImagePipelineExecutionResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Manually triggers a pipeline to create an image.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Adds a tag to a resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes a tag from a resource.

    " + }, + "UpdateDistributionConfiguration":{ + "name":"UpdateDistributionConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/UpdateDistributionConfiguration" + }, + "input":{"shape":"UpdateDistributionConfigurationRequest"}, + "output":{"shape":"UpdateDistributionConfigurationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

    Updates a new distribution configuration. Distribution configurations define and configure the outputs of your pipeline.

    " + }, + "UpdateImagePipeline":{ + "name":"UpdateImagePipeline", + "http":{ + "method":"PUT", + "requestUri":"/UpdateImagePipeline" + }, + "input":{"shape":"UpdateImagePipelineRequest"}, + "output":{"shape":"UpdateImagePipelineResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Updates a new image pipeline. Image pipelines enable you to automate the creation and distribution of images.

    " + }, + "UpdateInfrastructureConfiguration":{ + "name":"UpdateInfrastructureConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/UpdateInfrastructureConfiguration" + }, + "input":{"shape":"UpdateInfrastructureConfigurationRequest"}, + "output":{"shape":"UpdateInfrastructureConfigurationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Updates a new infrastructure configuration. An infrastructure configuration defines the environment in which your image will be built and tested.

    " + } + }, + "shapes":{ + "AccountId":{ + "type":"string", + "pattern":"^\\d{12}$" + }, + "AccountList":{ + "type":"list", + "member":{"shape":"AccountId"}, + "max":1536, + "min":1 + }, + "Ami":{ + "type":"structure", + "members":{ + "region":{ + "shape":"NonEmptyString", + "documentation":"

    The AWS Region of the EC2 AMI.

    " + }, + "image":{ + "shape":"NonEmptyString", + "documentation":"

    The AMI ID of the EC2 AMI.

    " + }, + "name":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the EC2 AMI.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the EC2 AMI. Minimum and maximum length are in characters.

    " + }, + "state":{"shape":"ImageState"}, + "accountId":{ + "shape":"NonEmptyString", + "documentation":"

    The account ID of the owner of the AMI.

    " + } + }, + "documentation":"

    Details of an EC2 AMI.

    " + }, + "AmiDistributionConfiguration":{ + "type":"structure", + "members":{ + "name":{ + "shape":"AmiNameString", + "documentation":"

    The name of the distribution configuration.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the distribution configuration. Minimum and maximum length are in characters.

    " + }, + "targetAccountIds":{ + "shape":"AccountList", + "documentation":"

    The ID of an account to which you want to distribute an image.

    " + }, + "amiTags":{ + "shape":"TagMap", + "documentation":"

    The tags to apply to AMIs distributed to this Region.

    " + }, + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

    The KMS key identifier used to encrypt the distributed image.

    " + }, + "launchPermission":{ + "shape":"LaunchPermissionConfiguration", + "documentation":"

    Launch permissions can be used to configure which AWS accounts can use the AMI to launch instances.

    " + } + }, + "documentation":"

    Define and configure the output AMIs of the pipeline.

    " + }, + "AmiList":{ + "type":"list", + "member":{"shape":"Ami"} + }, + "AmiNameString":{ + "type":"string", + "max":127, + "min":1, + "pattern":"^[-_A-Za-z0-9{][-_A-Za-z0-9\\s:{}\\.]+[-_A-Za-z0-9}]$" + }, + "Arn":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "CallRateLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have exceeded the permitted request rate for the specific operation.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "CancelImageCreationRequest":{ + "type":"structure", + "required":[ + "imageBuildVersionArn", + "clientToken" + ], + "members":{ + "imageBuildVersionArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image whose creation you want to cancel.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    ", + "idempotencyToken":true + } + } + }, + "CancelImageCreationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "imageBuildVersionArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image whose creation has been cancelled.

    " + } + } + }, + "ClientException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    These errors are usually caused by a client action, such as using an action or resource on behalf of a user that doesn't have permissions to use the action or resource, or specifying an invalid resource identifier.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ClientToken":{ + "type":"string", + "max":36, + "min":1 + }, + "Component":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the component.

    " + }, + "version":{ + "shape":"VersionNumber", + "documentation":"

    The version of the component.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the component.

    " + }, + "changeDescription":{ + "shape":"NonEmptyString", + "documentation":"

    The change description of the component.

    " + }, + "type":{ + "shape":"ComponentType", + "documentation":"

    The type of the component denotes whether the component is used to build the image or only to test it.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the component.

    " + }, + "supportedOsVersions":{ + "shape":"OsVersionList", + "documentation":"

    The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the component.

    " + }, + "data":{ + "shape":"ComponentData", + "documentation":"

    The data of the component.

    " + }, + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

    The KMS key identifier used to encrypt the component.

    " + }, + "encrypted":{ + "shape":"NullableBoolean", + "documentation":"

    The encryption status of the component.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date that the component was created.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the component.

    " + } + }, + "documentation":"

    A detailed view of a component.

    " + }, + "ComponentBuildVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):component/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+/\\d+$" + }, + "ComponentConfiguration":{ + "type":"structure", + "required":["componentArn"], + "members":{ + "componentArn":{ + "shape":"ComponentVersionArnOrBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component.

    " + } + }, + "documentation":"

    Configuration details of the component.

    " + }, + "ComponentConfigurationList":{ + "type":"list", + "member":{"shape":"ComponentConfiguration"}, + "min":1 + }, + "ComponentData":{"type":"string"}, + "ComponentFormat":{ + "type":"string", + "enum":["SHELL"] + }, + "ComponentSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the component.

    " + }, + "version":{ + "shape":"VersionNumber", + "documentation":"

    The version of the component.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the component.

    " + }, + "supportedOsVersions":{ + "shape":"OsVersionList", + "documentation":"

    The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

    " + }, + "type":{ + "shape":"ComponentType", + "documentation":"

    The type of the component denotes whether the component is used to build the image or only to test it.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the component.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the component.

    " + }, + "changeDescription":{ + "shape":"NonEmptyString", + "documentation":"

    The change description of the component.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date that the component was created.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the component.

    " + } + }, + "documentation":"

    A high-level summary of a component.

    " + }, + "ComponentSummaryList":{ + "type":"list", + "member":{"shape":"ComponentSummary"} + }, + "ComponentType":{ + "type":"string", + "enum":[ + "BUILD", + "TEST" + ] + }, + "ComponentVersion":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the component.

    " + }, + "version":{ + "shape":"VersionNumber", + "documentation":"

    The semantic version of the component.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the component.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the component.

    " + }, + "supportedOsVersions":{ + "shape":"OsVersionList", + "documentation":"

    The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

    " + }, + "type":{ + "shape":"ComponentType", + "documentation":"

    The type of the component denotes whether the component is used to build the image or only to test it.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the component.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date that the component was created.

    " + } + }, + "documentation":"

    A high-level overview of a component semantic version.

    " + }, + "ComponentVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):component/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+$" + }, + "ComponentVersionArnOrBuildVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):component/[a-z0-9-_]+/(?:(?:(\\d+|x)\\.(\\d+|x)\\.(\\d+|x))|(?:\\d+\\.\\d+\\.\\d+/\\d+))$" + }, + "ComponentVersionList":{ + "type":"list", + "member":{"shape":"ComponentVersion"} + }, + "Container":{ + "type":"structure", + "members":{ + "region":{ + "shape":"NonEmptyString", + "documentation":"

    Containers and container images are Region-specific. This is the Region context for the container.

    " + }, + "imageUris":{ + "shape":"StringList", + "documentation":"

    A list of URIs for containers created in the context Region.

    " + } + }, + "documentation":"

    A container encapsulates the runtime environment for an application.

    " + }, + "ContainerDistributionConfiguration":{ + "type":"structure", + "required":["targetRepository"], + "members":{ + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the container distribution configuration.

    " + }, + "containerTags":{ + "shape":"StringList", + "documentation":"

    Tags that are attached to the container distribution configuration.

    " + }, + "targetRepository":{ + "shape":"TargetContainerRepository", + "documentation":"

    The destination repository for the container distribution configuration.

    " + } + }, + "documentation":"

    Container distribution settings for encryption, licensing, and sharing in a specific Region.

    " + }, + "ContainerList":{ + "type":"list", + "member":{"shape":"Container"} + }, + "ContainerRecipe":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe.

    " + }, + "containerType":{ + "shape":"ContainerType", + "documentation":"

    Specifies the type of container, such as Docker.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the container recipe.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the container recipe.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The system platform for the container, such as Windows or Linux.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the container recipe.

    " + }, + "version":{ + "shape":"VersionNumber", + "documentation":"

    The semantic version of the container recipe (<major>.<minor>.<patch>).

    " + }, + "components":{ + "shape":"ComponentConfigurationList", + "documentation":"

    Components for build and test that are included in the container recipe.

    " + }, + "dockerfileTemplateData":{ + "shape":"DockerFileTemplate", + "documentation":"

    Dockerfiles are text documents that are used to build Docker containers, and ensure that they contain all of the elements required by the application running inside. The template data consists of contextual variables where Image Builder places build information or scripts, based on your container image recipe.

    " + }, + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

    Identifies which KMS key is used to encrypt the container image for distribution to the target Region.

    " + }, + "encrypted":{ + "shape":"NullableBoolean", + "documentation":"

    A flag that indicates if the target container is encrypted.

    " + }, + "parentImage":{ + "shape":"NonEmptyString", + "documentation":"

    The source image for the container recipe.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date when this container recipe was created.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Tags that are attached to the container recipe.

    " + }, + "workingDirectory":{ + "shape":"NonEmptyString", + "documentation":"

    The working directory for use during build and test workflows.

    " + }, + "targetRepository":{ + "shape":"TargetContainerRepository", + "documentation":"

    The destination repository for the container image.

    " + } + }, + "documentation":"

    A container recipe.

    " + }, + "ContainerRecipeArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):container-recipe/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+$" + }, + "ContainerRecipeSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe.

    " + }, + "containerType":{ + "shape":"ContainerType", + "documentation":"

    Specifies the type of container, such as \"Docker\".

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the container recipe.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The system platform for the container, such as Windows or Linux.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the container recipe.

    " + }, + "parentImage":{ + "shape":"NonEmptyString", + "documentation":"

    The source image for the container recipe.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date when this container recipe was created.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Tags that are attached to the container recipe.

    " + } + }, + "documentation":"

    A summary of a container recipe

    " + }, + "ContainerRecipeSummaryList":{ + "type":"list", + "member":{"shape":"ContainerRecipeSummary"} + }, + "ContainerRepositoryService":{ + "type":"string", + "enum":["ECR"] + }, + "ContainerType":{ + "type":"string", + "enum":["DOCKER"] + }, + "CreateComponentRequest":{ + "type":"structure", + "required":[ + "name", + "semanticVersion", + "platform", + "clientToken" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the component.

    " + }, + "semanticVersion":{ + "shape":"VersionNumber", + "documentation":"

    The semantic version of the component. This version follows the semantic version syntax. For example, major.minor.patch. This could be versioned like software (2.0.1) or like a date (2019.12.01).

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the component. Describes the contents of the component.

    " + }, + "changeDescription":{ + "shape":"NonEmptyString", + "documentation":"

    The change description of the component. Describes what change has been made in this version, or what makes this version different from other versions of this component.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the component.

    " + }, + "supportedOsVersions":{ + "shape":"OsVersionList", + "documentation":"

    The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

    " + }, + "data":{ + "shape":"InlineComponentData", + "documentation":"

    The data of the component. Used to specify the data inline. Either data or uri can be used to specify the data within the component.

    " + }, + "uri":{ + "shape":"Uri", + "documentation":"

    The uri of the component. Must be an S3 URL and the requester must have permission to access the S3 bucket. If you use S3, you can specify component content up to your service quota. Either data or uri can be used to specify the data within the component.

    " + }, + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

    The ID of the KMS key that should be used to encrypt this component.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the component.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token of the component.

    ", + "idempotencyToken":true + } + } + }, + "CreateComponentResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "componentBuildVersionArn":{ + "shape":"ComponentBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component that was created by this request.

    " + } + } + }, + "CreateContainerRecipeRequest":{ + "type":"structure", + "required":[ + "containerType", + "name", + "semanticVersion", + "components", + "dockerfileTemplateData", + "parentImage", + "targetRepository", + "clientToken" + ], + "members":{ + "containerType":{ + "shape":"ContainerType", + "documentation":"

    The type of container to create.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the container recipe.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the container recipe.

    " + }, + "semanticVersion":{ + "shape":"VersionNumber", + "documentation":"

    The semantic version of the container recipe (<major>.<minor>.<patch>).

    " + }, + "components":{ + "shape":"ComponentConfigurationList", + "documentation":"

    Components for build and test that are included in the container recipe.

    " + }, + "dockerfileTemplateData":{ + "shape":"InlineDockerFileTemplate", + "documentation":"

    The Dockerfile template used to build your image as an inline data blob.

    " + }, + "dockerfileTemplateUri":{ + "shape":"Uri", + "documentation":"

    The S3 URI for the Dockerfile that will be used to build your container image.

    " + }, + "platformOverride":{ + "shape":"Platform", + "documentation":"

    Specifies the operating system platform when you use a custom source image.

    " + }, + "imageOsVersionOverride":{ + "shape":"NonEmptyString", + "documentation":"

    Specifies the operating system version for the source image.

    " + }, + "parentImage":{ + "shape":"NonEmptyString", + "documentation":"

    The source image for the container recipe.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Tags that are attached to the container recipe.

    " + }, + "workingDirectory":{ + "shape":"NonEmptyString", + "documentation":"

    The working directory for use during build and test workflows.

    " + }, + "targetRepository":{ + "shape":"TargetContainerRepository", + "documentation":"

    The destination repository for the container image.

    " + }, + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

    Identifies which KMS key is used to encrypt the container image.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The client token used to make this request idempotent.

    ", + "idempotencyToken":true + } + } + }, + "CreateContainerRecipeResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The client token used to make this request idempotent.

    " + }, + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    Returns the Amazon Resource Name (ARN) of the container recipe that the request created.

    " + } + } + }, + "CreateDistributionConfigurationRequest":{ + "type":"structure", + "required":[ + "name", + "distributions", + "clientToken" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the distribution configuration.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the distribution configuration.

    " + }, + "distributions":{ + "shape":"DistributionList", + "documentation":"

    The distributions of the distribution configuration.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the distribution configuration.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token of the distribution configuration.

    ", + "idempotencyToken":true + } + } + }, + "CreateDistributionConfigurationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration that was created by this request.

    " + } + } + }, + "CreateImagePipelineRequest":{ + "type":"structure", + "required":[ + "name", + "infrastructureConfigurationArn", + "clientToken" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the image pipeline.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the image pipeline.

    " + }, + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe that will be used to configure images created by this image pipeline.

    " + }, + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe that is used to configure images created by this container pipeline.

    " + }, + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration that will be used to build images created by this image pipeline.

    " + }, + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration that will be used to configure and distribute images created by this image pipeline.

    " + }, + "imageTestsConfiguration":{ + "shape":"ImageTestsConfiguration", + "documentation":"

    The image test configuration of the image pipeline.

    " + }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

    Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

    " + }, + "schedule":{ + "shape":"Schedule", + "documentation":"

    The schedule of the image pipeline.

    " + }, + "status":{ + "shape":"PipelineStatus", + "documentation":"

    The status of the image pipeline.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the image pipeline.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    ", + "idempotencyToken":true + } + } + }, + "CreateImagePipelineResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "imagePipelineArn":{ + "shape":"ImagePipelineArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline that was created by this request.

    " + } + } + }, + "CreateImageRecipeRequest":{ + "type":"structure", + "required":[ + "name", + "semanticVersion", + "components", + "parentImage", + "clientToken" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the image recipe.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the image recipe.

    " + }, + "semanticVersion":{ + "shape":"VersionNumber", + "documentation":"

    The semantic version of the image recipe.

    " + }, + "components":{ + "shape":"ComponentConfigurationList", + "documentation":"

    The components of the image recipe.

    " + }, + "parentImage":{ + "shape":"NonEmptyString", + "documentation":"

    The parent image of the image recipe. The value of the string can be the ARN of the parent image or an AMI ID. The format for the ARN follows this example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/xxxx.x.x. You can provide the specific version that you want to use, or you can use a wildcard in all of the fields. If you enter an AMI ID for the string value, you must have access to the AMI, and the AMI must be in the same Region in which you are using Image Builder.

    " + }, + "blockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappings", + "documentation":"

    The block device mappings of the image recipe.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the image recipe.

    " + }, + "workingDirectory":{ + "shape":"NonEmptyString", + "documentation":"

    The working directory to be used during build and test workflows.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    ", + "idempotencyToken":true + } + } + }, + "CreateImageRecipeResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe that was created by this request.

    " + } + } + }, + "CreateImageRequest":{ + "type":"structure", + "required":[ + "infrastructureConfigurationArn", + "clientToken" + ], + "members":{ + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe that defines how images are configured, tested, and assessed.

    " + }, + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe that defines how images are configured and tested.

    " + }, + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration that defines and configures the outputs of your pipeline.

    " + }, + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration that defines the environment in which your image will be built and tested.

    " + }, + "imageTestsConfiguration":{ + "shape":"ImageTestsConfiguration", + "documentation":"

    The image tests configuration of the image.

    " + }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

    Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the image.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    ", + "idempotencyToken":true + } + } + }, + "CreateImageResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "imageBuildVersionArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image that was created by this request.

    " + } + } + }, + "CreateInfrastructureConfigurationRequest":{ + "type":"structure", + "required":[ + "name", + "instanceProfileName", + "clientToken" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the infrastructure configuration.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the infrastructure configuration.

    " + }, + "instanceTypes":{ + "shape":"InstanceTypeList", + "documentation":"

    The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

    " + }, + "instanceProfileName":{ + "shape":"NonEmptyString", + "documentation":"

    The instance profile to associate with the instance used to customize your EC2 AMI.

    " + }, + "securityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

    The security group IDs to associate with the instance used to customize your EC2 AMI.

    " + }, + "subnetId":{ + "shape":"NonEmptyString", + "documentation":"

    The subnet ID in which to place the instance used to customize your EC2 AMI.

    " + }, + "logging":{ + "shape":"Logging", + "documentation":"

    The logging configuration of the infrastructure configuration.

    " + }, + "keyPair":{ + "shape":"NonEmptyString", + "documentation":"

    The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create your image.

    " + }, + "terminateInstanceOnFailure":{ + "shape":"NullableBoolean", + "documentation":"

    The terminate instance on failure setting of the infrastructure configuration. Set to false if you want Image Builder to retain the instance used to configure your AMI if the build or test phase of your workflow fails.

    " + }, + "snsTopicArn":{ + "shape":"SnsTopicArn", + "documentation":"

    The SNS topic on which to send image build events.

    " + }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

    The tags attached to the resource created by Image Builder.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the infrastructure configuration.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    ", + "idempotencyToken":true + } + } + }, + "CreateInfrastructureConfigurationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration that was created by this request.

    " + } + } + }, + "DateTime":{"type":"string"}, + "DeleteComponentRequest":{ + "type":"structure", + "required":["componentBuildVersionArn"], + "members":{ + "componentBuildVersionArn":{ + "shape":"ComponentBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component build version to delete.

    ", + "location":"querystring", + "locationName":"componentBuildVersionArn" + } + } + }, + "DeleteComponentResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "componentBuildVersionArn":{ + "shape":"ComponentBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component build version that was deleted.

    " + } + } + }, + "DeleteContainerRecipeRequest":{ + "type":"structure", + "required":["containerRecipeArn"], + "members":{ + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe to delete.

    ", + "location":"querystring", + "locationName":"containerRecipeArn" + } + } + }, + "DeleteContainerRecipeResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe that was deleted.

    " + } + } + }, + "DeleteDistributionConfigurationRequest":{ + "type":"structure", + "required":["distributionConfigurationArn"], + "members":{ + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration to delete.

    ", + "location":"querystring", + "locationName":"distributionConfigurationArn" + } + } + }, + "DeleteDistributionConfigurationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration that was deleted.

    " + } + } + }, + "DeleteImagePipelineRequest":{ + "type":"structure", + "required":["imagePipelineArn"], + "members":{ + "imagePipelineArn":{ + "shape":"ImagePipelineArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline to delete.

    ", + "location":"querystring", + "locationName":"imagePipelineArn" + } + } + }, + "DeleteImagePipelineResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imagePipelineArn":{ + "shape":"ImagePipelineArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline that was deleted.

    " + } + } + }, + "DeleteImageRecipeRequest":{ + "type":"structure", + "required":["imageRecipeArn"], + "members":{ + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe to delete.

    ", + "location":"querystring", + "locationName":"imageRecipeArn" + } + } + }, + "DeleteImageRecipeResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe that was deleted.

    " + } + } + }, + "DeleteImageRequest":{ + "type":"structure", + "required":["imageBuildVersionArn"], + "members":{ + "imageBuildVersionArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image to delete.

    ", + "location":"querystring", + "locationName":"imageBuildVersionArn" + } + } + }, + "DeleteImageResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageBuildVersionArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image that was deleted.

    " + } + } + }, + "DeleteInfrastructureConfigurationRequest":{ + "type":"structure", + "required":["infrastructureConfigurationArn"], + "members":{ + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration to delete.

    ", + "location":"querystring", + "locationName":"infrastructureConfigurationArn" + } + } + }, + "DeleteInfrastructureConfigurationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration that was deleted.

    " + } + } + }, + "Distribution":{ + "type":"structure", + "required":["region"], + "members":{ + "region":{ + "shape":"NonEmptyString", + "documentation":"

    The target Region.

    " + }, + "amiDistributionConfiguration":{ + "shape":"AmiDistributionConfiguration", + "documentation":"

    The specific AMI settings (for example, launch permissions, AMI tags).

    " + }, + "containerDistributionConfiguration":{ + "shape":"ContainerDistributionConfiguration", + "documentation":"

    Container distribution settings for encryption, licensing, and sharing in a specific Region.

    " + }, + "licenseConfigurationArns":{ + "shape":"LicenseConfigurationArnList", + "documentation":"

    The License Manager Configuration to associate with the AMI in the specified Region.

    " + } + }, + "documentation":"

    Defines the settings for a specific Region.

    " + }, + "DistributionConfiguration":{ + "type":"structure", + "required":["timeoutMinutes"], + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the distribution configuration.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the distribution configuration.

    " + }, + "distributions":{ + "shape":"DistributionList", + "documentation":"

    The distributions of the distribution configuration.

    " + }, + "timeoutMinutes":{ + "shape":"DistributionTimeoutMinutes", + "documentation":"

    The maximum duration in minutes for this distribution configuration.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which this distribution configuration was created.

    " + }, + "dateUpdated":{ + "shape":"DateTime", + "documentation":"

    The date on which this distribution configuration was last updated.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the distribution configuration.

    " + } + }, + "documentation":"

    A distribution configuration.

    " + }, + "DistributionConfigurationArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):distribution-configuration/[a-z0-9-_]+$" + }, + "DistributionConfigurationSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the distribution configuration.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the distribution configuration.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which the distribution configuration was created.

    " + }, + "dateUpdated":{ + "shape":"DateTime", + "documentation":"

    The date on which the distribution configuration was updated.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the distribution configuration.

    " + }, + "regions":{ + "shape":"RegionList", + "documentation":"

    A list of Regions where the container image is distributed to.

    " + } + }, + "documentation":"

    A high-level overview of a distribution configuration.

    " + }, + "DistributionConfigurationSummaryList":{ + "type":"list", + "member":{"shape":"DistributionConfigurationSummary"} + }, + "DistributionList":{ + "type":"list", + "member":{"shape":"Distribution"} + }, + "DistributionTimeoutMinutes":{ + "type":"integer", + "max":720, + "min":30 + }, + "DockerFileTemplate":{"type":"string"}, + "EbsInstanceBlockDeviceSpecification":{ + "type":"structure", + "members":{ + "encrypted":{ + "shape":"NullableBoolean", + "documentation":"

    Use to configure device encryption.

    " + }, + "deleteOnTermination":{ + "shape":"NullableBoolean", + "documentation":"

    Use to configure delete on termination of the associated device.

    " + }, + "iops":{ + "shape":"EbsIopsInteger", + "documentation":"

    Use to configure device IOPS.

    " + }, + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

    Use to configure the KMS key to use when encrypting the device.

    " + }, + "snapshotId":{ + "shape":"NonEmptyString", + "documentation":"

    The snapshot that defines the device contents.

    " + }, + "volumeSize":{ + "shape":"EbsVolumeSizeInteger", + "documentation":"

    Use to override the device's volume size.

    " + }, + "volumeType":{ + "shape":"EbsVolumeType", + "documentation":"

    Use to override the device's volume type.

    " + } + }, + "documentation":"

    Amazon EBS-specific block device mapping specifications.

    " + }, + "EbsIopsInteger":{ + "type":"integer", + "max":10000, + "min":100 + }, + "EbsVolumeSizeInteger":{ + "type":"integer", + "max":16000, + "min":1 + }, + "EbsVolumeType":{ + "type":"string", + "enum":[ + "standard", + "io1", + "io2", + "gp2", + "sc1", + "st1" + ] + }, + "EmptyString":{ + "type":"string", + "max":0, + "min":0 + }, + "ErrorMessage":{"type":"string"}, + "Filter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"FilterName", + "documentation":"

    The name of the filter. Filter names are case-sensitive.

    " + }, + "values":{ + "shape":"FilterValues", + "documentation":"

    The filter values. Filter values are case-sensitive.

    " + } + }, + "documentation":"

    A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

    " + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":10, + "min":1 + }, + "FilterName":{ + "type":"string", + "pattern":"^[a-zA-Z]{1,1024}$" + }, + "FilterValue":{ + "type":"string", + "pattern":"^[0-9a-zA-Z./_ :-]{1,1024}$" + }, + "FilterValues":{ + "type":"list", + "member":{"shape":"FilterValue"}, + "max":10, + "min":1 + }, + "ForbiddenException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You are not authorized to perform the requested operation.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "GetComponentPolicyRequest":{ + "type":"structure", + "required":["componentArn"], + "members":{ + "componentArn":{ + "shape":"ComponentBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component whose policy you want to retrieve.

    ", + "location":"querystring", + "locationName":"componentArn" + } + } + }, + "GetComponentPolicyResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "policy":{ + "shape":"ResourcePolicyDocument", + "documentation":"

    The component policy.

    " + } + } + }, + "GetComponentRequest":{ + "type":"structure", + "required":["componentBuildVersionArn"], + "members":{ + "componentBuildVersionArn":{ + "shape":"ComponentVersionArnOrBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component that you want to retrieve. Regex requires \"/\\d+$\" suffix.

    ", + "location":"querystring", + "locationName":"componentBuildVersionArn" + } + } + }, + "GetComponentResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "component":{ + "shape":"Component", + "documentation":"

    The component object associated with the specified ARN.

    " + } + } + }, + "GetContainerRecipePolicyRequest":{ + "type":"structure", + "required":["containerRecipeArn"], + "members":{ + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe for the policy being requested.

    ", + "location":"querystring", + "locationName":"containerRecipeArn" + } + } + }, + "GetContainerRecipePolicyResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "policy":{ + "shape":"ResourcePolicyDocument", + "documentation":"

    The container recipe policy object that is returned.

    " + } + } + }, + "GetContainerRecipeRequest":{ + "type":"structure", + "required":["containerRecipeArn"], + "members":{ + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe to retrieve.

    ", + "location":"querystring", + "locationName":"containerRecipeArn" + } + } + }, + "GetContainerRecipeResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "containerRecipe":{ + "shape":"ContainerRecipe", + "documentation":"

    The container recipe object that is returned.

    " + } + } + }, + "GetDistributionConfigurationRequest":{ + "type":"structure", + "required":["distributionConfigurationArn"], + "members":{ + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration that you want to retrieve.

    ", + "location":"querystring", + "locationName":"distributionConfigurationArn" + } + } + }, + "GetDistributionConfigurationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "distributionConfiguration":{ + "shape":"DistributionConfiguration", + "documentation":"

    The distribution configuration object.

    " + } + } + }, + "GetImagePipelineRequest":{ + "type":"structure", + "required":["imagePipelineArn"], + "members":{ + "imagePipelineArn":{ + "shape":"ImagePipelineArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline that you want to retrieve.

    ", + "location":"querystring", + "locationName":"imagePipelineArn" + } + } + }, + "GetImagePipelineResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imagePipeline":{ + "shape":"ImagePipeline", + "documentation":"

    The image pipeline object.

    " + } + } + }, + "GetImagePolicyRequest":{ + "type":"structure", + "required":["imageArn"], + "members":{ + "imageArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image whose policy you want to retrieve.

    ", + "location":"querystring", + "locationName":"imageArn" + } + } + }, + "GetImagePolicyResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "policy":{ + "shape":"ResourcePolicyDocument", + "documentation":"

    The image policy object.

    " + } + } + }, + "GetImageRecipePolicyRequest":{ + "type":"structure", + "required":["imageRecipeArn"], + "members":{ + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe whose policy you want to retrieve.

    ", + "location":"querystring", + "locationName":"imageRecipeArn" + } + } + }, + "GetImageRecipePolicyResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "policy":{ + "shape":"ResourcePolicyDocument", + "documentation":"

    The image recipe policy object.

    " + } + } + }, + "GetImageRecipeRequest":{ + "type":"structure", + "required":["imageRecipeArn"], + "members":{ + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe that you want to retrieve.

    ", + "location":"querystring", + "locationName":"imageRecipeArn" + } + } + }, + "GetImageRecipeResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageRecipe":{ + "shape":"ImageRecipe", + "documentation":"

    The image recipe object.

    " + } + } + }, + "GetImageRequest":{ + "type":"structure", + "required":["imageBuildVersionArn"], + "members":{ + "imageBuildVersionArn":{ + "shape":"ImageVersionArnOrBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image that you want to retrieve.

    ", + "location":"querystring", + "locationName":"imageBuildVersionArn" + } + } + }, + "GetImageResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "image":{ + "shape":"Image", + "documentation":"

    The image object.

    " + } + } + }, + "GetInfrastructureConfigurationRequest":{ + "type":"structure", + "required":["infrastructureConfigurationArn"], + "members":{ + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration that you want to retrieve.

    ", + "location":"querystring", + "locationName":"infrastructureConfigurationArn" + } + }, + "documentation":"

    GetInfrastructureConfiguration request object.

    " + }, + "GetInfrastructureConfigurationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "infrastructureConfiguration":{ + "shape":"InfrastructureConfiguration", + "documentation":"

    The infrastructure configuration object.

    " + } + }, + "documentation":"

    GetInfrastructureConfiguration response object.

    " + }, + "IdempotentParameterMismatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have specified a client token for an operation using parameter values that differ from a previous request that used the same client token.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Image":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image.

    " + }, + "type":{ + "shape":"ImageType", + "documentation":"

    Specifies whether this is an AMI or container image.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the image.

    " + }, + "version":{ + "shape":"VersionNumber", + "documentation":"

    The semantic version of the image.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the image.

    " + }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

    Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

    " + }, + "osVersion":{ + "shape":"OsVersion", + "documentation":"

    The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.

    " + }, + "state":{ + "shape":"ImageState", + "documentation":"

    The state of the image.

    " + }, + "imageRecipe":{ + "shape":"ImageRecipe", + "documentation":"

    The image recipe used when creating the image.

    " + }, + "containerRecipe":{ + "shape":"ContainerRecipe", + "documentation":"

    The container recipe used to create the container image type.

    " + }, + "sourcePipelineName":{ + "shape":"ResourceName", + "documentation":"

    The name of the image pipeline that created this image.

    " + }, + "sourcePipelineArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline that created this image.

    " + }, + "infrastructureConfiguration":{ + "shape":"InfrastructureConfiguration", + "documentation":"

    The infrastructure used when creating this image.

    " + }, + "distributionConfiguration":{ + "shape":"DistributionConfiguration", + "documentation":"

    The distribution configuration used when creating this image.

    " + }, + "imageTestsConfiguration":{ + "shape":"ImageTestsConfiguration", + "documentation":"

    The image tests configuration used when creating this image.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which this image was created.

    " + }, + "outputResources":{ + "shape":"OutputResources", + "documentation":"

    The output resources produced when creating this image.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the image.

    " + } + }, + "documentation":"

    An image build version.

    " + }, + "ImageBuildVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+/\\d+$" + }, + "ImageBuilderArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline)/[a-z0-9-_]+(?:/(?:(?:x|\\d+)\\.(?:x|\\d+)\\.(?:x|\\d+))(?:/\\d+)?)?$" + }, + "ImagePipeline":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the image pipeline.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the image pipeline.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the image pipeline.

    " + }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

    Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

    " + }, + "imageRecipeArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe associated with this image pipeline.

    " + }, + "containerRecipeArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe that is used for this pipeline.

    " + }, + "infrastructureConfigurationArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration associated with this image pipeline.

    " + }, + "distributionConfigurationArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration associated with this image pipeline.

    " + }, + "imageTestsConfiguration":{ + "shape":"ImageTestsConfiguration", + "documentation":"

    The image tests configuration of the image pipeline.

    " + }, + "schedule":{ + "shape":"Schedule", + "documentation":"

    The schedule of the image pipeline.

    " + }, + "status":{ + "shape":"PipelineStatus", + "documentation":"

    The status of the image pipeline.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which this image pipeline was created.

    " + }, + "dateUpdated":{ + "shape":"DateTime", + "documentation":"

    The date on which this image pipeline was last updated.

    " + }, + "dateLastRun":{ + "shape":"DateTime", + "documentation":"

    The date on which this image pipeline was last run.

    " + }, + "dateNextRun":{ + "shape":"DateTime", + "documentation":"

    The date on which this image pipeline will next be run.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of this image pipeline.

    " + } + }, + "documentation":"

    Details of an image pipeline.

    " + }, + "ImagePipelineArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image-pipeline/[a-z0-9-_]+$" + }, + "ImagePipelineList":{ + "type":"list", + "member":{"shape":"ImagePipeline"} + }, + "ImageRecipe":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe.

    " + }, + "type":{ + "shape":"ImageType", + "documentation":"

    Specifies which type of image is created by the recipe - an AMI or a container image.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the image recipe.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the image recipe.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the image recipe.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the image recipe.

    " + }, + "version":{ + "shape":"VersionNumber", + "documentation":"

    The version of the image recipe.

    " + }, + "components":{ + "shape":"ComponentConfigurationList", + "documentation":"

    The components of the image recipe.

    " + }, + "parentImage":{ + "shape":"NonEmptyString", + "documentation":"

    The parent image of the image recipe.

    " + }, + "blockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappings", + "documentation":"

    The block device mappings to apply when creating images from this recipe.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which this image recipe was created.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the image recipe.

    " + }, + "workingDirectory":{ + "shape":"NonEmptyString", + "documentation":"

    The working directory to be used during build and test workflows.

    " + } + }, + "documentation":"

    An image recipe.

    " + }, + "ImageRecipeArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image-recipe/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+$" + }, + "ImageRecipeSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the image recipe.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the image recipe.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the image recipe.

    " + }, + "parentImage":{ + "shape":"NonEmptyString", + "documentation":"

    The parent image of the image recipe.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which this image recipe was created.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the image recipe.

    " + } + }, + "documentation":"

    A summary of an image recipe.

    " + }, + "ImageRecipeSummaryList":{ + "type":"list", + "member":{"shape":"ImageRecipeSummary"} + }, + "ImageState":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ImageStatus", + "documentation":"

    The status of the image.

    " + }, + "reason":{ + "shape":"NonEmptyString", + "documentation":"

    The reason for the image's status.

    " + } + }, + "documentation":"

    Image state shows the image status and the reason for that status.

    " + }, + "ImageStatus":{ + "type":"string", + "enum":[ + "PENDING", + "CREATING", + "BUILDING", + "TESTING", + "DISTRIBUTING", + "INTEGRATING", + "AVAILABLE", + "CANCELLED", + "FAILED", + "DEPRECATED", + "DELETED" + ] + }, + "ImageSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the image.

    " + }, + "type":{ + "shape":"ImageType", + "documentation":"

    Specifies whether this is an AMI or container image.

    " + }, + "version":{ + "shape":"VersionNumber", + "documentation":"

    The version of the image.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the image.

    " + }, + "osVersion":{ + "shape":"OsVersion", + "documentation":"

    The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.

    " + }, + "state":{ + "shape":"ImageState", + "documentation":"

    The state of the image.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the image.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which this image was created.

    " + }, + "outputResources":{ + "shape":"OutputResources", + "documentation":"

    The output resources produced when creating this image.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the image.

    " + } + }, + "documentation":"

    An image summary.

    " + }, + "ImageSummaryList":{ + "type":"list", + "member":{"shape":"ImageSummary"} + }, + "ImageTestsConfiguration":{ + "type":"structure", + "members":{ + "imageTestsEnabled":{ + "shape":"NullableBoolean", + "documentation":"

    Defines if tests should be executed when building this image.

    " + }, + "timeoutMinutes":{ + "shape":"ImageTestsTimeoutMinutes", + "documentation":"

    The maximum time in minutes that tests are permitted to run.

    " + } + }, + "documentation":"

    Image tests configuration.

    " + }, + "ImageTestsTimeoutMinutes":{ + "type":"integer", + "max":1440, + "min":60 + }, + "ImageType":{ + "type":"string", + "enum":[ + "AMI", + "DOCKER" + ] + }, + "ImageVersion":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image semantic version.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the image semantic version.

    " + }, + "type":{ + "shape":"ImageType", + "documentation":"

    Specifies whether this is an AMI or container image.

    " + }, + "version":{ + "shape":"VersionNumber", + "documentation":"

    The semantic version of the image semantic version.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the image semantic version.

    " + }, + "osVersion":{ + "shape":"OsVersion", + "documentation":"

    The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.

    " + }, + "owner":{ + "shape":"NonEmptyString", + "documentation":"

    The owner of the image semantic version.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date at which this image semantic version was created.

    " + } + }, + "documentation":"

    An image semantic version.

    " + }, + "ImageVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+$" + }, + "ImageVersionArnOrBuildVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image/[a-z0-9-_]+/(?:(?:(\\d+|x)\\.(\\d+|x)\\.(\\d+|x))|(?:\\d+\\.\\d+\\.\\d+/\\d+))$" + }, + "ImageVersionList":{ + "type":"list", + "member":{"shape":"ImageVersion"} + }, + "ImportComponentRequest":{ + "type":"structure", + "required":[ + "name", + "semanticVersion", + "type", + "format", + "platform", + "clientToken" + ], + "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the component.

    " + }, + "semanticVersion":{ + "shape":"VersionNumber", + "documentation":"

    The semantic version of the component. This version follows the semantic version syntax. For example, major.minor.patch. This could be versioned like software (2.0.1) or like a date (2019.12.01).

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the component. Describes the contents of the component.

    " + }, + "changeDescription":{ + "shape":"NonEmptyString", + "documentation":"

    The change description of the component. Describes what change has been made in this version, or what makes this version different from other versions of this component.

    " + }, + "type":{ + "shape":"ComponentType", + "documentation":"

    The type of the component denotes whether the component is used to build the image or only to test it.

    " + }, + "format":{ + "shape":"ComponentFormat", + "documentation":"

    The format of the resource that you want to import as a component.

    " + }, + "platform":{ + "shape":"Platform", + "documentation":"

    The platform of the component.

    " + }, + "data":{ + "shape":"NonEmptyString", + "documentation":"

    The data of the component. Used to specify the data inline. Either data or uri can be used to specify the data within the component.

    " + }, + "uri":{ + "shape":"Uri", + "documentation":"

    The uri of the component. Must be an S3 URL and the requester must have permission to access the S3 bucket. If you use S3, you can specify component content up to your service quota. Either data or uri can be used to specify the data within the component.

    " + }, + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

    The ID of the KMS key that should be used to encrypt this component.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the component.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token of the component.

    ", + "idempotencyToken":true + } + } + }, + "ImportComponentResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "componentBuildVersionArn":{ + "shape":"ComponentBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the imported component.

    " + } + } + }, + "InfrastructureConfiguration":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the infrastructure configuration.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the infrastructure configuration.

    " + }, + "instanceTypes":{ + "shape":"InstanceTypeList", + "documentation":"

    The instance types of the infrastructure configuration.

    " + }, + "instanceProfileName":{ + "shape":"NonEmptyString", + "documentation":"

    The instance profile of the infrastructure configuration.

    " + }, + "securityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

    The security group IDs of the infrastructure configuration.

    " + }, + "subnetId":{ + "shape":"NonEmptyString", + "documentation":"

    The subnet ID of the infrastructure configuration.

    " + }, + "logging":{ + "shape":"Logging", + "documentation":"

    The logging configuration of the infrastructure configuration.

    " + }, + "keyPair":{ + "shape":"NonEmptyString", + "documentation":"

    The EC2 key pair of the infrastructure configuration.

    " + }, + "terminateInstanceOnFailure":{ + "shape":"NullableBoolean", + "documentation":"

    The terminate instance on failure configuration of the infrastructure configuration.

    " + }, + "snsTopicArn":{ + "shape":"NonEmptyString", + "documentation":"

    The SNS topic Amazon Resource Name (ARN) of the infrastructure configuration.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which the infrastructure configuration was created.

    " + }, + "dateUpdated":{ + "shape":"DateTime", + "documentation":"

    The date on which the infrastructure configuration was last updated.

    " + }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

    The tags attached to the resource created by Image Builder.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the infrastructure configuration.

    " + } + }, + "documentation":"

    Details of the infrastructure configuration.

    " + }, + "InfrastructureConfigurationArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):infrastructure-configuration/[a-z0-9-_]+$" + }, + "InfrastructureConfigurationSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the infrastructure configuration.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the infrastructure configuration.

    " + }, + "dateCreated":{ + "shape":"DateTime", + "documentation":"

    The date on which the infrastructure configuration was created.

    " + }, + "dateUpdated":{ + "shape":"DateTime", + "documentation":"

    The date on which the infrastructure configuration was last updated.

    " + }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

    The tags attached to the image created by Image Builder.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the infrastructure configuration.

    " + } + }, + "documentation":"

    The infrastructure used when building EC2 AMIs.

    " + }, + "InfrastructureConfigurationSummaryList":{ + "type":"list", + "member":{"shape":"InfrastructureConfigurationSummary"} + }, + "InlineComponentData":{ + "type":"string", + "max":16000, + "min":1, + "pattern":"[^\\x00]+" + }, + "InlineDockerFileTemplate":{ + "type":"string", + "max":16000, + "min":1, + "pattern":"[^\\x00]+" + }, + "InstanceBlockDeviceMapping":{ + "type":"structure", + "members":{ + "deviceName":{ + "shape":"NonEmptyString", + "documentation":"

    The device to which these mappings apply.

    " + }, + "ebs":{ + "shape":"EbsInstanceBlockDeviceSpecification", + "documentation":"

    Use to manage Amazon EBS-specific configuration for this mapping.

    " + }, + "virtualName":{ + "shape":"NonEmptyString", + "documentation":"

    Use to manage instance ephemeral devices.

    " + }, + "noDevice":{ + "shape":"EmptyString", + "documentation":"

    Use to remove a mapping from the parent image.

    " + } + }, + "documentation":"

    Defines block device mappings for the instance used to configure your image.

    " + }, + "InstanceBlockDeviceMappings":{ + "type":"list", + "member":{"shape":"InstanceBlockDeviceMapping"} + }, + "InstanceType":{"type":"string"}, + "InstanceTypeList":{ + "type":"list", + "member":{"shape":"InstanceType"} + }, + "InvalidPaginationTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have provided an invalid pagination token in your request.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidParameterCombinationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have specified two or more mutually exclusive parameters. Review the error message for details.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The specified parameter is invalid. Review the available parameters for the API request.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The value that you provided for the specified parameter is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have made a request for an action that is not supported by the service.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidVersionNumberException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Your version number is out of bounds or does not follow the required syntax.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LaunchPermissionConfiguration":{ + "type":"structure", + "members":{ + "userIds":{ + "shape":"AccountList", + "documentation":"

    The AWS account ID.

    " + }, + "userGroups":{ + "shape":"StringList", + "documentation":"

    The name of the group.

    " + } + }, + "documentation":"

    Describes the configuration for a launch permission. The launch permission modification request is sent to the EC2 ModifyImageAttribute API on behalf of the user for each Region they have selected to distribute the AMI. To make an AMI public, set the launch permission authorized accounts to all. See the examples for making an AMI public at EC2 ModifyImageAttribute.

    " + }, + "LicenseConfigurationArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:license-manager:[^:]+:\\d{12}:license-configuration:lic-[a-z0-9-_]{32}$" + }, + "LicenseConfigurationArnList":{ + "type":"list", + "member":{"shape":"LicenseConfigurationArn"}, + "max":50, + "min":1 + }, + "ListComponentBuildVersionsRequest":{ + "type":"structure", + "required":["componentVersionArn"], + "members":{ + "componentVersionArn":{ + "shape":"ComponentVersionArn", + "documentation":"

    The component version Amazon Resource Name (ARN) whose versions you want to list.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListComponentBuildVersionsResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "componentSummaryList":{ + "shape":"ComponentSummaryList", + "documentation":"

    The list of component summaries for the specified semantic version.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListComponentsRequest":{ + "type":"structure", + "members":{ + "owner":{ + "shape":"Ownership", + "documentation":"

    The owner defines which components you want to list. By default, this request will only show components owned by your account. You can use this field to specify if you want to view components owned by yourself, by Amazon, or those components that have been shared with you by other customers.

    " + }, + "filters":{ + "shape":"FilterList", + "documentation":"

    The filters.

    " + }, + "byName":{ + "shape":"Boolean", + "documentation":"

    Returns the list of component build versions for the specified semantic version.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListComponentsResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "componentVersionList":{ + "shape":"ComponentVersionList", + "documentation":"

    The list of component semantic versions.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListContainerRecipesRequest":{ + "type":"structure", + "members":{ + "owner":{ + "shape":"Ownership", + "documentation":"

    Returns container recipes belonging to the specified owner, that have been shared with you. You can omit this field to return container recipes belonging to your account.

    " + }, + "filters":{ + "shape":"FilterList", + "documentation":"

    Request filters that are used to narrow the list of container images that are returned.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum number of results to return in the list.

    ", + "box":true + }, + "nextToken":{ + "shape":"NonEmptyString", + "documentation":"

    Provides a token for pagination, which determines where to begin the next set of results when the current set reaches the maximum for one request.

    " + } + } + }, + "ListContainerRecipesResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "containerRecipeSummaryList":{ + "shape":"ContainerRecipeSummaryList", + "documentation":"

    The list of container recipes returned for the request.

    " + }, + "nextToken":{ + "shape":"NonEmptyString", + "documentation":"

    The next token field is used for paginated responses. When this is not empty, there are additional container recipes that the service has not included in this response. Use this token with the next request to retrieve additional list items.

    " + } + } + }, + "ListDistributionConfigurationsRequest":{ + "type":"structure", + "members":{ + "filters":{ + "shape":"FilterList", + "documentation":"

    The filters.

    • name - The name of this distribution configuration.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListDistributionConfigurationsResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "distributionConfigurationSummaryList":{ + "shape":"DistributionConfigurationSummaryList", + "documentation":"

    The list of distributions.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListImageBuildVersionsRequest":{ + "type":"structure", + "required":["imageVersionArn"], + "members":{ + "imageVersionArn":{ + "shape":"ImageVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image whose build versions you want to retrieve.

    " + }, + "filters":{ + "shape":"FilterList", + "documentation":"

    The filters.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListImageBuildVersionsResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageSummaryList":{ + "shape":"ImageSummaryList", + "documentation":"

    The list of image build versions.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListImagePipelineImagesRequest":{ + "type":"structure", + "required":["imagePipelineArn"], + "members":{ + "imagePipelineArn":{ + "shape":"ImagePipelineArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline whose images you want to view.

    " + }, + "filters":{ + "shape":"FilterList", + "documentation":"

    The filters.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListImagePipelineImagesResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageSummaryList":{ + "shape":"ImageSummaryList", + "documentation":"

    The list of images built by this pipeline.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListImagePipelinesRequest":{ + "type":"structure", + "members":{ + "filters":{ + "shape":"FilterList", + "documentation":"

    The filters.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListImagePipelinesResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imagePipelineList":{ + "shape":"ImagePipelineList", + "documentation":"

    The list of image pipelines.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListImageRecipesRequest":{ + "type":"structure", + "members":{ + "owner":{ + "shape":"Ownership", + "documentation":"

    The owner defines which image recipes you want to list. By default, this request will only show image recipes owned by your account. You can use this field to specify if you want to view image recipes owned by yourself, by Amazon, or those image recipes that have been shared with you by other customers.

    " + }, + "filters":{ + "shape":"FilterList", + "documentation":"

    The filters.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListImageRecipesResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageRecipeSummaryList":{ + "shape":"ImageRecipeSummaryList", + "documentation":"

    The list of image pipelines.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListImagesRequest":{ + "type":"structure", + "members":{ + "owner":{ + "shape":"Ownership", + "documentation":"

    The owner defines which images you want to list. By default, this request will only show images owned by your account. You can use this field to specify if you want to view images owned by yourself, by Amazon, or those images that have been shared with you by other customers.

    " + }, + "filters":{ + "shape":"FilterList", + "documentation":"

    The filters.

    " + }, + "byName":{ + "shape":"Boolean", + "documentation":"

    Requests a list of images with a specific recipe name.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + }, + "includeDeprecated":{ + "shape":"NullableBoolean", + "documentation":"

    Includes deprecated images in the response list.

    " + } + } + }, + "ListImagesResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageVersionList":{ + "shape":"ImageVersionList", + "documentation":"

    The list of image semantic versions.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListInfrastructureConfigurationsRequest":{ + "type":"structure", + "members":{ + "filters":{ + "shape":"FilterList", + "documentation":"

    The filters.

    " + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

    The maximum items to return in a request.

    ", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListInfrastructureConfigurationsResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "infrastructureConfigurationSummaryList":{ + "shape":"InfrastructureConfigurationSummaryList", + "documentation":"

    The list of infrastructure configurations.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags for the specified resource.

    " + } + } + }, + "Logging":{ + "type":"structure", + "members":{ + "s3Logs":{ + "shape":"S3Logs", + "documentation":"

    The Amazon S3 logging configuration.

    " + } + }, + "documentation":"

    Logging configuration defines where Image Builder uploads your logs.

    " + }, + "NonEmptyString":{ + "type":"string", + "max":1024, + "min":1 + }, + "NullableBoolean":{"type":"boolean"}, + "OsVersion":{ + "type":"string", + "min":1 + }, + "OsVersionList":{ + "type":"list", + "member":{"shape":"OsVersion"}, + "max":25, + "min":1 + }, + "OutputResources":{ + "type":"structure", + "members":{ + "amis":{ + "shape":"AmiList", + "documentation":"

    The EC2 AMIs created by this image.

    " + }, + "containers":{ + "shape":"ContainerList", + "documentation":"

    Container images that the pipeline has generated and stored in the output repository.

    " + } + }, + "documentation":"

    The resources produced by this image.

    " + }, + "Ownership":{ + "type":"string", + "enum":[ + "Self", + "Shared", + "Amazon" + ] + }, + "PaginationToken":{ + "type":"string", + "max":65535, + "min":1 + }, + "PipelineExecutionStartCondition":{ + "type":"string", + "enum":[ + "EXPRESSION_MATCH_ONLY", + "EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE" + ] + }, + "PipelineStatus":{ + "type":"string", + "enum":[ + "DISABLED", + "ENABLED" + ] + }, + "Platform":{ + "type":"string", + "enum":[ + "Windows", + "Linux" + ] + }, + "PutComponentPolicyRequest":{ + "type":"structure", + "required":[ + "componentArn", + "policy" + ], + "members":{ + "componentArn":{ + "shape":"ComponentBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component that this policy should be applied to.

    " + }, + "policy":{ + "shape":"ResourcePolicyDocument", + "documentation":"

    The policy to apply.

    " + } + } + }, + "PutComponentPolicyResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "componentArn":{ + "shape":"ComponentBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the component that this policy was applied to.

    " + } + } + }, + "PutContainerRecipePolicyRequest":{ + "type":"structure", + "required":[ + "containerRecipeArn", + "policy" + ], + "members":{ + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe that this policy should be applied to.

    " + }, + "policy":{ + "shape":"ResourcePolicyDocument", + "documentation":"

    The policy to apply to the container recipe.

    " + } + } + }, + "PutContainerRecipePolicyResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container recipe that this policy was applied to.

    " + } + } + }, + "PutImagePolicyRequest":{ + "type":"structure", + "required":[ + "imageArn", + "policy" + ], + "members":{ + "imageArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image that this policy should be applied to.

    " + }, + "policy":{ + "shape":"ResourcePolicyDocument", + "documentation":"

    The policy to apply.

    " + } + } + }, + "PutImagePolicyResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image that this policy was applied to.

    " + } + } + }, + "PutImageRecipePolicyRequest":{ + "type":"structure", + "required":[ + "imageRecipeArn", + "policy" + ], + "members":{ + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe that this policy should be applied to.

    " + }, + "policy":{ + "shape":"ResourcePolicyDocument", + "documentation":"

    The policy to apply.

    " + } + } + }, + "PutImageRecipePolicyResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe that this policy was applied to.

    " + } + } + }, + "RegionList":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The resource that you are trying to create already exists.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ResourceDependencyException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have attempted to mutate or delete a resource with a dependency that prohibits this action. See the error message for more details.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The resource that you are trying to operate on is currently in use. Review the message details and retry later.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ResourceName":{ + "type":"string", + "pattern":"^[-_A-Za-z-0-9][-_A-Za-z0-9 ]{1,126}[-_A-Za-z-0-9]$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    At least one of the resources referenced by your request does not exist.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourcePolicyDocument":{ + "type":"string", + "max":30000, + "min":1 + }, + "ResourceTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":30, + "min":1 + }, + "RestrictedInteger":{ + "type":"integer", + "max":25, + "min":1 + }, + "S3Logs":{ + "type":"structure", + "members":{ + "s3BucketName":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon S3 bucket in which to store the logs.

    " + }, + "s3KeyPrefix":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon S3 path in which to store the logs.

    " + } + }, + "documentation":"

    Amazon S3 logging configuration.

    " + }, + "Schedule":{ + "type":"structure", + "members":{ + "scheduleExpression":{ + "shape":"NonEmptyString", + "documentation":"

    The cron expression determines how often EC2 Image Builder evaluates your pipelineExecutionStartCondition.

    For information on how to format a cron expression in Image Builder, see Use cron expressions in EC2 Image Builder.

    " + }, + "pipelineExecutionStartCondition":{ + "shape":"PipelineExecutionStartCondition", + "documentation":"

    The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, and you use semantic version filters on the source image or components in your image recipe, EC2 Image Builder will build a new image only when there are new versions of the image or components in your recipe that match the semantic version filter. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time. For semantic version syntax, see CreateComponent in the EC2 Image Builder API Reference.

    " + } + }, + "documentation":"

    A schedule configures how often and when a pipeline will automatically create a new image.

    " + }, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "ServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    This exception is thrown when the service encounters an unrecoverable exception.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have exceeded the number of permitted resources or operations for this service. For service quotas, see EC2 Image Builder endpoints and quotas.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The service is unable to process your request at this time.

    ", + "error":{"httpStatusCode":503}, + "exception":true + }, + "SnsTopicArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:sns:[^:]+:\\d{12}:[a-zA-Z0-9-_]{1,256}$" + }, + "StartImagePipelineExecutionRequest":{ + "type":"structure", + "required":[ + "imagePipelineArn", + "clientToken" + ], + "members":{ + "imagePipelineArn":{ + "shape":"ImagePipelineArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline that you want to manually invoke.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    ", + "idempotencyToken":true + } + } + }, + "StartImagePipelineExecutionResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "imageBuildVersionArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image that was created by this request.

    " + } + } + }, + "StringList":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to tag.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags to apply to the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "TargetContainerRepository":{ + "type":"structure", + "required":[ + "service", + "repositoryName" + ], + "members":{ + "service":{ + "shape":"ContainerRepositoryService", + "documentation":"

    Specifies the service in which this image was registered.

    " + }, + "repositoryName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the container repository where the output container image is stored. This name is prefixed by the repository location.

    " + } + }, + "documentation":"

    The container repository where the output container image is stored.

    " + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ImageBuilderArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to untag.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys to remove from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDistributionConfigurationRequest":{ + "type":"structure", + "required":[ + "distributionConfigurationArn", + "distributions", + "clientToken" + ], + "members":{ + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration that you want to update.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the distribution configuration.

    " + }, + "distributions":{ + "shape":"DistributionList", + "documentation":"

    The distributions of the distribution configuration.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token of the distribution configuration.

    ", + "idempotencyToken":true + } + } + }, + "UpdateDistributionConfigurationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration that was updated by this request.

    " + } + } + }, + "UpdateImagePipelineRequest":{ + "type":"structure", + "required":[ + "imagePipelineArn", + "infrastructureConfigurationArn", + "clientToken" + ], + "members":{ + "imagePipelineArn":{ + "shape":"ImagePipelineArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline that you want to update.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the image pipeline.

    " + }, + "imageRecipeArn":{ + "shape":"ImageRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image recipe that will be used to configure images updated by this image pipeline.

    " + }, + "containerRecipeArn":{ + "shape":"ContainerRecipeArn", + "documentation":"

    The Amazon Resource Name (ARN) of the container pipeline to update.

    " + }, + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration that will be used to build images updated by this image pipeline.

    " + }, + "distributionConfigurationArn":{ + "shape":"DistributionConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the distribution configuration that will be used to configure and distribute images updated by this image pipeline.

    " + }, + "imageTestsConfiguration":{ + "shape":"ImageTestsConfiguration", + "documentation":"

    The image test configuration of the image pipeline.

    " + }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

    Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

    " + }, + "schedule":{ + "shape":"Schedule", + "documentation":"

    The schedule of the image pipeline.

    " + }, + "status":{ + "shape":"PipelineStatus", + "documentation":"

    The status of the image pipeline.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    ", + "idempotencyToken":true + } + } + }, + "UpdateImagePipelineResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "imagePipelineArn":{ + "shape":"ImagePipelineArn", + "documentation":"

    The Amazon Resource Name (ARN) of the image pipeline that was updated by this request.

    " + } + } + }, + "UpdateInfrastructureConfigurationRequest":{ + "type":"structure", + "required":[ + "infrastructureConfigurationArn", + "instanceProfileName", + "clientToken" + ], + "members":{ + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration that you want to update.

    " + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the infrastructure configuration.

    " + }, + "instanceTypes":{ + "shape":"InstanceTypeList", + "documentation":"

    The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

    " + }, + "instanceProfileName":{ + "shape":"NonEmptyString", + "documentation":"

    The instance profile to associate with the instance used to customize your EC2 AMI.

    " + }, + "securityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

    The security group IDs to associate with the instance used to customize your EC2 AMI.

    " + }, + "subnetId":{ + "shape":"NonEmptyString", + "documentation":"

    The subnet ID to place the instance used to customize your EC2 AMI in.

    " + }, + "logging":{ + "shape":"Logging", + "documentation":"

    The logging configuration of the infrastructure configuration.

    " + }, + "keyPair":{ + "shape":"NonEmptyString", + "documentation":"

    The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create your image.

    " + }, + "terminateInstanceOnFailure":{ + "shape":"NullableBoolean", + "documentation":"

    The terminate instance on failure setting of the infrastructure configuration. Set to false if you want Image Builder to retain the instance used to configure your AMI if the build or test phase of your workflow fails.

    " + }, + "snsTopicArn":{ + "shape":"SnsTopicArn", + "documentation":"

    The SNS topic on which to send image build events.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    ", + "idempotencyToken":true + }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

    The tags attached to the resource created by Image Builder.

    " + } + } + }, + "UpdateInfrastructureConfigurationResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

    The request ID that uniquely identifies this request.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token used to make this request idempotent.

    " + }, + "infrastructureConfigurationArn":{ + "shape":"InfrastructureConfigurationArn", + "documentation":"

    The Amazon Resource Name (ARN) of the infrastructure configuration that was updated by this request.

    " + } + } + }, + "Uri":{"type":"string"}, + "VersionNumber":{ + "type":"string", + "pattern":"^[0-9]+\\.[0-9]+\\.[0-9]+$" + } + }, + "documentation":"

    EC2 Image Builder is a fully managed AWS service that makes it easier to automate the creation, management, and deployment of customized, secure, and up-to-date \"golden\" server images that are pre-installed and pre-configured with software and settings to meet specific IT standards.

    " +} diff --git a/services/inspector/build.properties b/services/inspector/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/inspector/build.properties +++ b/services/inspector/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 877f2353da12..5e4b548c6890 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + iotdeviceadvisor + AWS Java SDK :: Services :: Iot Device Advisor + The AWS Java SDK for Iot Device Advisor module holds the client classes that are used for + communicating with Iot Device Advisor. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.iotdeviceadvisor + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/iotdeviceadvisor/src/main/resources/codegen-resources/paginators-1.json b/services/iotdeviceadvisor/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..cdc8ace5f78b --- /dev/null +++ b/services/iotdeviceadvisor/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,19 @@ +{ + "pagination": { + "ListSuiteDefinitions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListSuiteRuns": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListTestCases": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + } + } +} diff --git a/services/iotdeviceadvisor/src/main/resources/codegen-resources/service-2.json b/services/iotdeviceadvisor/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..629489c602ce --- /dev/null +++ b/services/iotdeviceadvisor/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1129 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-09-18", + "endpointPrefix":"api.iotdeviceadvisor", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AWSIoTDeviceAdvisor", + "serviceFullName":"AWS IoT Core Device Advisor", + "serviceId":"IotDeviceAdvisor", + "signatureVersion":"v4", + "signingName":"iotdeviceadvisor", + "uid":"iotdeviceadvisor-2020-09-18" + }, + "operations":{ + "CreateSuiteDefinition":{ + "name":"CreateSuiteDefinition", + "http":{ + "method":"POST", + "requestUri":"/suiteDefinitions" + }, + "input":{"shape":"CreateSuiteDefinitionRequest"}, + "output":{"shape":"CreateSuiteDefinitionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a Device Advisor test suite.

    " + }, + "DeleteSuiteDefinition":{ + "name":"DeleteSuiteDefinition", + "http":{ + "method":"DELETE", + "requestUri":"/suiteDefinitions/{suiteDefinitionId}" + }, + "input":{"shape":"DeleteSuiteDefinitionRequest"}, + "output":{"shape":"DeleteSuiteDefinitionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes a Device Advisor test suite.

    " + }, + "GetSuiteDefinition":{ + "name":"GetSuiteDefinition", + "http":{ + "method":"GET", + "requestUri":"/suiteDefinitions/{suiteDefinitionId}" + }, + "input":{"shape":"GetSuiteDefinitionRequest"}, + "output":{"shape":"GetSuiteDefinitionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets information about a Device Advisor test suite.

    " + }, + "GetSuiteRun":{ + "name":"GetSuiteRun", + "http":{ + "method":"GET", + "requestUri":"/suiteDefinitions/{suiteDefinitionId}/suiteRuns/{suiteRunId}" + }, + "input":{"shape":"GetSuiteRunRequest"}, + "output":{"shape":"GetSuiteRunResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets information about a Device Advisor test suite run.

    " + }, + "GetSuiteRunReport":{ + "name":"GetSuiteRunReport", + "http":{ + "method":"GET", + "requestUri":"/suiteDefinitions/{suiteDefinitionId}/suiteRuns/{suiteRunId}/report" + }, + "input":{"shape":"GetSuiteRunReportRequest"}, + "output":{"shape":"GetSuiteRunReportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets a report download link for a successful Device Advisor qualifying test suite run.

    " + }, + "ListSuiteDefinitions":{ + "name":"ListSuiteDefinitions", + "http":{ + "method":"GET", + "requestUri":"/suiteDefinitions" + }, + "input":{"shape":"ListSuiteDefinitionsRequest"}, + "output":{"shape":"ListSuiteDefinitionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the Device Advisor test suites you have created.

    " + }, + "ListSuiteRuns":{ + "name":"ListSuiteRuns", + "http":{ + "method":"GET", + "requestUri":"/suiteRuns" + }, + "input":{"shape":"ListSuiteRunsRequest"}, + "output":{"shape":"ListSuiteRunsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the runs of the specified Device Advisor test suite. You can list all runs of the test suite, or the runs of a specific version of the test suite.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists the tags attached to an IoT Device Advisor resource.

    " + }, + "ListTestCases":{ + "name":"ListTestCases", + "http":{ + "method":"GET", + "requestUri":"/testCases" + }, + "input":{"shape":"ListTestCasesRequest"}, + "output":{"shape":"ListTestCasesResponse"}, + "errors":[ + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all the test cases in the test suite.

    " + }, + "StartSuiteRun":{ + "name":"StartSuiteRun", + "http":{ + "method":"POST", + "requestUri":"/suiteDefinitions/{suiteDefinitionId}/suiteRuns" + }, + "input":{"shape":"StartSuiteRunRequest"}, + "output":{"shape":"StartSuiteRunResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Starts a Device Advisor test suite run.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Adds to and modifies existing tags of an IoT Device Advisor resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes tags from an IoT Device Advisor resource.

    " + }, + "UpdateSuiteDefinition":{ + "name":"UpdateSuiteDefinition", + "http":{ + "method":"PATCH", + "requestUri":"/suiteDefinitions/{suiteDefinitionId}" + }, + "input":{"shape":"UpdateSuiteDefinitionRequest"}, + "output":{"shape":"UpdateSuiteDefinitionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a Device Advisor test suite.

    " + } + }, + "shapes":{ + "AmazonResourceName":{ + "type":"string", + "max":2048, + "min":20 + }, + "CategoryName":{"type":"string"}, + "ConfigString":{"type":"string"}, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"Message", + "documentation":"

    Sends Conflict Exception message.

    " + } + }, + "documentation":"

    Sends Conflict Exception.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateSuiteDefinitionRequest":{ + "type":"structure", + "members":{ + "suiteDefinitionConfiguration":{ + "shape":"SuiteDefinitionConfiguration", + "documentation":"

    Creates a Device Advisor test suite with suite definition configuration.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags to be attached to the suite definition.

    " + } + } + }, + "CreateSuiteDefinitionResponse":{ + "type":"structure", + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Creates a Device Advisor test suite with suite UUID.

    " + }, + "suiteDefinitionArn":{ + "shape":"AmazonResourceName", + "documentation":"

    Creates a Device Advisor test suite with Amazon Resource name.

    " + }, + "suiteDefinitionName":{ + "shape":"SuiteDefinitionName", + "documentation":"

    Creates a Device Advisor test suite with suite definition name.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Creates a Device Advisor test suite with TimeStamp of when it was created.

    " + } + } + }, + "DeleteSuiteDefinitionRequest":{ + "type":"structure", + "required":["suiteDefinitionId"], + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Deletes a Device Advisor test suite with defined suite Id.

    ", + "location":"uri", + "locationName":"suiteDefinitionId" + } + } + }, + "DeleteSuiteDefinitionResponse":{ + "type":"structure", + "members":{ + } + }, + "DeviceUnderTest":{ + "type":"structure", + "members":{ + "thingArn":{ + "shape":"AmazonResourceName", + "documentation":"

    Lists devices thing arn

    " + }, + "certificateArn":{ + "shape":"AmazonResourceName", + "documentation":"

    Lists devices certificate arn

    " + } + }, + "documentation":"

    Lists all the devices under test

    " + }, + "DeviceUnderTestList":{ + "type":"list", + "member":{"shape":"DeviceUnderTest"}, + "max":2, + "min":0 + }, + "ErrorReason":{"type":"string"}, + "Failure":{"type":"string"}, + "GetSuiteDefinitionRequest":{ + "type":"structure", + "required":["suiteDefinitionId"], + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Requests suite definition Id with GetSuiteDefinition API call.

    ", + "location":"uri", + "locationName":"suiteDefinitionId" + }, + "suiteDefinitionVersion":{ + "shape":"SuiteDefinitionVersion", + "documentation":"

    Requests the suite definition version of a test suite.

    ", + "location":"querystring", + "locationName":"suiteDefinitionVersion" + } + } + }, + "GetSuiteDefinitionResponse":{ + "type":"structure", + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Gets suite definition Id with GetSuiteDefinition API call.

    " + }, + "suiteDefinitionArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the suite definition.

    " + }, + "suiteDefinitionVersion":{ + "shape":"SuiteDefinitionVersion", + "documentation":"

    Gets suite definition version with GetSuiteDefinition API call.

    " + }, + "latestVersion":{ + "shape":"SuiteDefinitionVersion", + "documentation":"

    Gets latest suite definition version with GetSuiteDefinition API call.

    " + }, + "suiteDefinitionConfiguration":{ + "shape":"SuiteDefinitionConfiguration", + "documentation":"

    Gets the suite configuration with GetSuiteDefinition API call.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Gets the timestamp of the time suite was created with GetSuiteDefinition API call.

    " + }, + "lastModifiedAt":{ + "shape":"Timestamp", + "documentation":"

    Gets the timestamp of the time suite was modified with GetSuiteDefinition API call.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Tags attached to the suite definition.

    " + } + } + }, + "GetSuiteRunReportRequest":{ + "type":"structure", + "required":[ + "suiteDefinitionId", + "suiteRunId" + ], + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Device Advisor suite definition Id.

    ", + "location":"uri", + "locationName":"suiteDefinitionId" + }, + "suiteRunId":{ + "shape":"UUID", + "documentation":"

    Device Advisor suite run Id.

    ", + "location":"uri", + "locationName":"suiteRunId" + } + } + }, + "GetSuiteRunReportResponse":{ + "type":"structure", + "members":{ + "qualificationReportDownloadUrl":{ + "shape":"QualificationReportDownloadUrl", + "documentation":"

    Gets the download URL of the qualification report.

    " + } + } + }, + "GetSuiteRunRequest":{ + "type":"structure", + "required":[ + "suiteDefinitionId", + "suiteRunId" + ], + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Requests the information about Device Advisor test suite run based on suite definition Id.

    ", + "location":"uri", + "locationName":"suiteDefinitionId" + }, + "suiteRunId":{ + "shape":"UUID", + "documentation":"

    Requests the information about Device Advisor test suite run based on suite run Id.

    ", + "location":"uri", + "locationName":"suiteRunId" + } + } + }, + "GetSuiteRunResponse":{ + "type":"structure", + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Gets the information about Device Advisor test suite run based on suite definition Id.

    " + }, + "suiteDefinitionVersion":{ + "shape":"SuiteDefinitionVersion", + "documentation":"

    Gets the information about Device Advisor test suite run based on suite definition version.

    " + }, + "suiteRunId":{ + "shape":"UUID", + "documentation":"

    Gets the information about Device Advisor test suite run based on suite run Id.

    " + }, + "suiteRunArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the suite run.

    " + }, + "suiteRunConfiguration":{ + "shape":"SuiteRunConfiguration", + "documentation":"

    Gets the information about Device Advisor test suite run based on suite configuration.

    " + }, + "testResult":{ + "shape":"TestResult", + "documentation":"

    Gets the information about Device Advisor test suite run based on test case runs.

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    Gets the information about Device Advisor test suite run based on start time.

    " + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

    Gets the information about Device Advisor test suite run based on end time.

    " + }, + "status":{ + "shape":"SuiteRunStatus", + "documentation":"

    Gets the information about Device Advisor test suite run based on its status.

    " + }, + "errorReason":{ + "shape":"ErrorReason", + "documentation":"

    Gets the information about Device Advisor test suite run based on error.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags attached to the suite run.

    " + } + } + }, + "GroupName":{"type":"string"}, + "GroupResult":{ + "type":"structure", + "members":{ + "groupId":{ + "shape":"UUID", + "documentation":"

    Show Group Result Id.

    " + }, + "groupName":{ + "shape":"GroupName", + "documentation":"

    Show Group Result Name.

    " + }, + "tests":{ + "shape":"TestCaseRuns", + "documentation":"

    Show Group Result.

    " + } + }, + "documentation":"

    Show Group Result.

    " + }, + "GroupResultList":{ + "type":"list", + "member":{"shape":"GroupResult"}, + "documentation":"

    how Group Result list.

    " + }, + "IntendedForQualificationBoolean":{"type":"boolean"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"Message", + "documentation":"

    Sends Internal Failure Exception message.

    " + } + }, + "documentation":"

    Sends Internal Failure Exception.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListSuiteDefinitionsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Request the list of all the Device Advisor test suites.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    Requests the Device Advisor test suites next token.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSuiteDefinitionsResponse":{ + "type":"structure", + "members":{ + "suiteDefinitionInformationList":{ + "shape":"SuiteDefinitionInformationList", + "documentation":"

    Lists test suite information using List suite definition.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    Creates a Device Advisor test suite.

    " + } + } + }, + "ListSuiteRunsRequest":{ + "type":"structure", + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Lists the runs of the specified Device Advisor test suite based on suite definition Id.

    ", + "location":"querystring", + "locationName":"suiteDefinitionId" + }, + "suiteDefinitionVersion":{ + "shape":"SuiteDefinitionVersion", + "documentation":"

    Lists the runs of the specified Device Advisor test suite based on suite definition version.

    ", + "location":"querystring", + "locationName":"suiteDefinitionVersion" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    MaxResults for list suite run API request.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    Next pagination token for list suite run request.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSuiteRunsResponse":{ + "type":"structure", + "members":{ + "suiteRunsList":{ + "shape":"SuiteRunsList", + "documentation":"

    Lists the runs of the specified Device Advisor test suite.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    Next pagination token for list suite run response.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the IoT Device Advisor resource.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags attached to the IoT Device Advisor resource.

    " + } + } + }, + "ListTestCasesRequest":{ + "type":"structure", + "members":{ + "intendedForQualification":{ + "shape":"IntendedForQualificationBoolean", + "documentation":"

    Lists all the qualification test cases in the test suite.

    ", + "location":"querystring", + "locationName":"intendedForQualification" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Requests the test cases max results.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    Requests the test cases next token.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListTestCasesResponse":{ + "type":"structure", + "members":{ + "categories":{ + "shape":"TestCategory", + "documentation":"

    Gets the category of test case.

    " + }, + "rootGroupConfiguration":{ + "shape":"TestConfiguration", + "documentation":"

    Gets the configuration of root test group.

    " + }, + "groupConfiguration":{ + "shape":"TestConfiguration", + "documentation":"

    Gets the configuration of test group.

    " + }, + "nextToken":{ + "shape":"Token", + "documentation":"

    Test cases next token response.

    " + } + } + }, + "LogUrl":{"type":"string"}, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "Message":{ + "type":"string", + "max":2048, + "min":1 + }, + "QualificationReportDownloadUrl":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"Message", + "documentation":"

    Sends Resource Not Found Exception message.

    " + } + }, + "documentation":"

    Sends Resource Not Found Exception.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RootGroup":{ + "type":"string", + "max":2048, + "min":1 + }, + "SelectedTestList":{ + "type":"list", + "member":{"shape":"UUID"}, + "max":100, + "min":0 + }, + "StartSuiteRunRequest":{ + "type":"structure", + "required":["suiteDefinitionId"], + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Request to start suite run based on suite definition Id.

    ", + "location":"uri", + "locationName":"suiteDefinitionId" + }, + "suiteDefinitionVersion":{ + "shape":"SuiteDefinitionVersion", + "documentation":"

    Request to start suite run based on suite definition version.

    " + }, + "suiteRunConfiguration":{ + "shape":"SuiteRunConfiguration", + "documentation":"

    Request to start suite run based on suite configuration.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags to be attached to the suite run.

    " + } + } + }, + "StartSuiteRunResponse":{ + "type":"structure", + "members":{ + "suiteRunId":{ + "shape":"UUID", + "documentation":"

    Starts a Device Advisor test suite run based on suite Run Id.

    " + }, + "suiteRunArn":{ + "shape":"AmazonResourceName", + "documentation":"

    Starts a Device Advisor test suite run based on suite run arn.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Starts a Device Advisor test suite run based on suite create time.

    " + } + } + }, + "Status":{ + "type":"string", + "enum":[ + "PASS", + "FAIL", + "CANCELED", + "PENDING", + "RUNNING", + "PASS_WITH_WARNINGS", + "ERROR" + ] + }, + "String128":{ + "type":"string", + "max":128, + "min":1 + }, + "String256":{ + "type":"string", + "max":256, + "min":1 + }, + "SuiteDefinitionConfiguration":{ + "type":"structure", + "members":{ + "suiteDefinitionName":{ + "shape":"SuiteDefinitionName", + "documentation":"

    Gets Suite Definition Configuration name.

    " + }, + "devices":{ + "shape":"DeviceUnderTestList", + "documentation":"

    Gets the devices configured.

    " + }, + "intendedForQualification":{ + "shape":"IntendedForQualificationBoolean", + "documentation":"

    Gets the tests intended for qualification in a suite.

    " + }, + "rootGroup":{ + "shape":"RootGroup", + "documentation":"

    Gets test suite root group.

    " + }, + "devicePermissionRoleArn":{ + "shape":"AmazonResourceName", + "documentation":"

    Gets device permission arn.

    " + } + }, + "documentation":"

    Gets Suite Definition Configuration.

    " + }, + "SuiteDefinitionInformation":{ + "type":"structure", + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Get suite definition Id.

    " + }, + "suiteDefinitionName":{ + "shape":"SuiteDefinitionName", + "documentation":"

    Get test suite name.

    " + }, + "defaultDevices":{ + "shape":"DeviceUnderTestList", + "documentation":"

    Specifies the devices under test.

    " + }, + "intendedForQualification":{ + "shape":"IntendedForQualificationBoolean", + "documentation":"

    Gets the test suites which will be used for qualification.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Gets the information of when the test suite was created.

    " + } + }, + "documentation":"

    Get suite definition information.

    " + }, + "SuiteDefinitionInformationList":{ + "type":"list", + "member":{"shape":"SuiteDefinitionInformation"} + }, + "SuiteDefinitionName":{ + "type":"string", + "max":256, + "min":1 + }, + "SuiteDefinitionVersion":{ + "type":"string", + "max":255, + "min":2 + }, + "SuiteRunConfiguration":{ + "type":"structure", + "members":{ + "primaryDevice":{ + "shape":"DeviceUnderTest", + "documentation":"

    Gets the primary device for suite run.

    " + }, + "secondaryDevice":{ + "shape":"DeviceUnderTest", + "documentation":"

    Gets the secondary device for suite run.

    " + }, + "selectedTestList":{ + "shape":"SelectedTestList", + "documentation":"

    Gets test case list.

    " + } + }, + "documentation":"

    Gets suite run configuration.

    " + }, + "SuiteRunInformation":{ + "type":"structure", + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Get suite run information based on suite definition Id.

    " + }, + "suiteDefinitionVersion":{ + "shape":"SuiteDefinitionVersion", + "documentation":"

    Get suite run information based on suite definition version.

    " + }, + "suiteDefinitionName":{ + "shape":"SuiteDefinitionName", + "documentation":"

    Get suite run information based on suite definition name.

    " + }, + "suiteRunId":{ + "shape":"UUID", + "documentation":"

    Get suite run information based on suite run Id.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Get suite run information based on time suite was created.

    " + }, + "startedAt":{ + "shape":"Timestamp", + "documentation":"

    Get suite run information based on start time of the run.

    " + }, + "endAt":{ + "shape":"Timestamp", + "documentation":"

    Get suite run information based on end time of the run.

    " + }, + "status":{ + "shape":"SuiteRunStatus", + "documentation":"

    Get suite run information based on test run status.

    " + }, + "passed":{ + "shape":"SuiteRunResultCount", + "documentation":"

    Get suite run information based on result of the test suite run.

    " + }, + "failed":{ + "shape":"SuiteRunResultCount", + "documentation":"

    Get suite run information based on result of the test suite run.

    " + } + }, + "documentation":"

    Get suite run information.

    " + }, + "SuiteRunResultCount":{ + "type":"integer", + "max":500, + "min":0 + }, + "SuiteRunStatus":{ + "type":"string", + "enum":[ + "PASS", + "FAIL", + "CANCELED", + "PENDING", + "RUNNING", + "PASS_WITH_WARNINGS", + "ERROR" + ] + }, + "SuiteRunsList":{ + "type":"list", + "member":{"shape":"SuiteRunInformation"} + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"String128"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"String128"}, + "value":{"shape":"String256"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The resource ARN of an IoT Device Advisor resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The tags to be attached to the IoT Device Advisor resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TestCase":{ + "type":"structure", + "members":{ + "name":{ + "shape":"TestCaseName", + "documentation":"

    Shows test case name.

    " + }, + "configuration":{ + "shape":"TestConfiguration", + "documentation":"

    Shows test case configuration.

    " + }, + "test":{ + "shape":"TestCaseDefinition", + "documentation":"

    Specifies a test.

    " + } + }, + "documentation":"

    Shows tests in a test group.

    " + }, + "TestCaseCategory":{ + "type":"structure", + "members":{ + "name":{ + "shape":"CategoryName", + "documentation":"

    Lists all the tests name in the specified category.

    " + }, + "tests":{ + "shape":"TestCaseList", + "documentation":"

    Lists all the tests in the specified category.

    " + } + }, + "documentation":"

    Gets the test case category.

    " + }, + "TestCaseDefinition":{ + "type":"structure", + "members":{ + "id":{ + "shape":"TestCaseName", + "documentation":"

    Provides test case definition Id.

    " + }, + "testCaseVersion":{ + "shape":"TestCaseVersion", + "documentation":"

    Provides test case definition version.

    " + } + }, + "documentation":"

    Provides test case definition.

    " + }, + "TestCaseDefinitionName":{"type":"string"}, + "TestCaseList":{ + "type":"list", + "member":{"shape":"TestCase"} + }, + "TestCaseName":{"type":"string"}, + "TestCaseRun":{ + "type":"structure", + "members":{ + "testCaseRunId":{ + "shape":"UUID", + "documentation":"

    Provides test case run Id.

    " + }, + "testCaseDefinitionId":{ + "shape":"UUID", + "documentation":"

    Provides test case run definition Id.

    " + }, + "testCaseDefinitionName":{ + "shape":"TestCaseDefinitionName", + "documentation":"

    Provides test case run definition Name.

    " + }, + "status":{ + "shape":"Status", + "documentation":"

    Provides test case run status.

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    Provides test case run start time.

    " + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

    Provides test case run end time.

    " + }, + "logUrl":{ + "shape":"LogUrl", + "documentation":"

    Provides test case run log Url.

    " + }, + "warnings":{ + "shape":"Warnings", + "documentation":"

    Provides test case run warnings.

    " + }, + "failure":{ + "shape":"Failure", + "documentation":"

    Provides test case run failure result.

    " + } + }, + "documentation":"

    Provides test case run.

    " + }, + "TestCaseRuns":{ + "type":"list", + "member":{"shape":"TestCaseRun"}, + "documentation":"

    Show each group result.

    " + }, + "TestCaseVersion":{"type":"string"}, + "TestCategory":{ + "type":"list", + "member":{"shape":"TestCaseCategory"} + }, + "TestConfiguration":{ + "type":"map", + "key":{"shape":"ConfigString"}, + "value":{"shape":"ConfigString"} + }, + "TestResult":{ + "type":"structure", + "members":{ + "groups":{ + "shape":"GroupResultList", + "documentation":"

    Show each group of test results.

    " + } + }, + "documentation":"

    Show each group result.

    " + }, + "Timestamp":{"type":"timestamp"}, + "Token":{ + "type":"string", + "max":2000 + }, + "UUID":{ + "type":"string", + "max":36, + "min":36 + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The resource ARN of an IoT Device Advisor resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    List of tag keys to remove from the IoT Device Advisor resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateSuiteDefinitionRequest":{ + "type":"structure", + "required":["suiteDefinitionId"], + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Updates a Device Advisor test suite with suite definition id.

    ", + "location":"uri", + "locationName":"suiteDefinitionId" + }, + "suiteDefinitionConfiguration":{ + "shape":"SuiteDefinitionConfiguration", + "documentation":"

    Updates a Device Advisor test suite with suite definition configuration.

    " + } + } + }, + "UpdateSuiteDefinitionResponse":{ + "type":"structure", + "members":{ + "suiteDefinitionId":{ + "shape":"UUID", + "documentation":"

    Updates a Device Advisor test suite with suite UUID.

    " + }, + "suiteDefinitionArn":{ + "shape":"AmazonResourceName", + "documentation":"

    Updates a Device Advisor test suite with Amazon Resource name.

    " + }, + "suiteDefinitionName":{ + "shape":"SuiteDefinitionName", + "documentation":"

    Updates a Device Advisor test suite with suite definition name.

    " + }, + "suiteDefinitionVersion":{ + "shape":"SuiteDefinitionVersion", + "documentation":"

    Updates a Device Advisor test suite with suite definition version.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    Updates a Device Advisor test suite with TimeStamp of when it was created.

    " + }, + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    Updates a Device Advisor test suite with TimeStamp of when it was updated.

    " + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"Message", + "documentation":"

    Sends invalid request exception message.

    " + } + }, + "documentation":"

    Sends invalid request exception.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Warnings":{"type":"string"} + }, + "documentation":"

    AWS IoT Core Device Advisor is a cloud-based, fully managed test capability for validating IoT devices during device software development. Device Advisor provides pre-built tests that you can use to validate IoT devices for reliable and secure connectivity with AWS IoT Core before deploying devices to production. By using Device Advisor, you can confirm that your devices can connect to AWS IoT Core, follow security best practices and, if applicable, receive software updates from IoT Device Management. You can also download signed qualification reports to submit to the AWS Partner Network to get your device qualified for the AWS Partner Device Catalog without the need to send your device in and wait for it to be tested.

    " +} diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index 71e2cf2df016..1bbf715afe12 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + iotfleethub + AWS Java SDK :: Services :: Io T Fleet Hub + The AWS Java SDK for Io T Fleet Hub module holds the client classes that are used for + communicating with Io T Fleet Hub. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.iotfleethub + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/iotfleethub/src/main/resources/codegen-resources/paginators-1.json b/services/iotfleethub/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..74c96f517796 --- /dev/null +++ b/services/iotfleethub/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "ListApplications": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "applicationSummaries" + } + } +} diff --git a/services/iotfleethub/src/main/resources/codegen-resources/service-2.json b/services/iotfleethub/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..48da59f76fc9 --- /dev/null +++ b/services/iotfleethub/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,597 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-11-03", + "endpointPrefix":"api.fleethub.iot", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS IoT Fleet Hub", + "serviceId":"IoTFleetHub", + "signatureVersion":"v4", + "signingName":"iotfleethub", + "uid":"iotfleethub-2020-11-03" + }, + "operations":{ + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/applications", + "responseCode":201 + }, + "input":{"shape":"CreateApplicationRequest"}, + "output":{"shape":"CreateApplicationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a Fleet Hub for AWS IoT Device Management web application.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{applicationId}", + "responseCode":204 + }, + "input":{"shape":"DeleteApplicationRequest"}, + "output":{"shape":"DeleteApplicationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a Fleet Hub for AWS IoT Device Management web application.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + }, + "DescribeApplication":{ + "name":"DescribeApplication", + "http":{ + "method":"GET", + "requestUri":"/applications/{applicationId}", + "responseCode":200 + }, + "input":{"shape":"DescribeApplicationRequest"}, + "output":{"shape":"DescribeApplicationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a Fleet Hub for AWS IoT Device Management web application.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + }, + "ListApplications":{ + "name":"ListApplications", + "http":{ + "method":"GET", + "requestUri":"/applications", + "responseCode":200 + }, + "input":{"shape":"ListApplicationsRequest"}, + "output":{"shape":"ListApplicationsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets a list of Fleet Hub for AWS IoT Device Management web applications for the current account.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists the tags for the specified resource.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Adds to or modifies the tags of the specified resource. Tags are metadata which can be used to manage a resource.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes the specified tags (metadata) from the resource.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"PATCH", + "requestUri":"/applications/{applicationId}", + "responseCode":202 + }, + "input":{"shape":"UpdateApplicationRequest"}, + "output":{"shape":"UpdateApplicationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates information about a Fleet Hub for a AWS IoT Device Management web application.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + } + }, + "shapes":{ + "ApplicationState":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "ACTIVE", + "CREATE_FAILED", + "DELETE_FAILED" + ] + }, + "ApplicationSummaries":{ + "type":"list", + "member":{"shape":"ApplicationSummary"} + }, + "ApplicationSummary":{ + "type":"structure", + "required":[ + "applicationId", + "applicationName", + "applicationUrl" + ], + "members":{ + "applicationId":{ + "shape":"Id", + "documentation":"

    The unique Id of the web application.

    " + }, + "applicationName":{ + "shape":"Name", + "documentation":"

    The name of the web application.

    " + }, + "applicationDescription":{ + "shape":"Description", + "documentation":"

    An optional description of the web application.

    " + }, + "applicationUrl":{ + "shape":"Url", + "documentation":"

    The URL of the web application.

    " + }, + "applicationCreationDate":{ + "shape":"Timestamp", + "documentation":"

    The date (in Unix epoch time) when the web application was created.

    " + }, + "applicationLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date (in Unix epoch time) when the web application was last updated.

    " + }, + "applicationState":{ + "shape":"ApplicationState", + "documentation":"

    The current state of the web application.

    " + } + }, + "documentation":"

    A summary of information about a AWS IoT Device Management web application.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " + }, + "Arn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:[!-~]+$" + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-_]+$" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    The request conflicts with the current state of the resource.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateApplicationRequest":{ + "type":"structure", + "required":[ + "applicationName", + "roleArn" + ], + "members":{ + "applicationName":{ + "shape":"Name", + "documentation":"

    The name of the web application.

    " + }, + "applicationDescription":{ + "shape":"Description", + "documentation":"

    An optional description of the web application.

    " + }, + "clientToken":{ + "shape":"ClientRequestToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + }, + "roleArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the role that the web application assumes when it interacts with AWS IoT Core.

    The name of the role must be in the form AWSIotFleetHub_random_string .

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A set of key/value pairs that you can use to manage the web application resource.

    " + } + } + }, + "CreateApplicationResponse":{ + "type":"structure", + "required":[ + "applicationId", + "applicationArn" + ], + "members":{ + "applicationId":{ + "shape":"Id", + "documentation":"

    The unique Id of the web application.

    " + }, + "applicationArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the web application.

    " + } + } + }, + "DeleteApplicationRequest":{ + "type":"structure", + "required":["applicationId"], + "members":{ + "applicationId":{ + "shape":"Id", + "documentation":"

    The unique Id of the web application.

    ", + "location":"uri", + "locationName":"applicationId" + }, + "clientToken":{ + "shape":"ClientRequestToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteApplicationResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeApplicationRequest":{ + "type":"structure", + "required":["applicationId"], + "members":{ + "applicationId":{ + "shape":"Id", + "documentation":"

    The unique Id of the web application.

    ", + "location":"uri", + "locationName":"applicationId" + } + } + }, + "DescribeApplicationResponse":{ + "type":"structure", + "required":[ + "applicationId", + "applicationArn", + "applicationName", + "applicationUrl", + "applicationState", + "applicationCreationDate", + "applicationLastUpdateDate", + "roleArn" + ], + "members":{ + "applicationId":{ + "shape":"Id", + "documentation":"

    The unique Id of the web application.

    " + }, + "applicationArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the web application.

    " + }, + "applicationName":{ + "shape":"Name", + "documentation":"

    The name of the web application.

    " + }, + "applicationDescription":{ + "shape":"Description", + "documentation":"

    An optional description of the web application.

    " + }, + "applicationUrl":{ + "shape":"Url", + "documentation":"

    The URL of the web application.

    " + }, + "applicationState":{ + "shape":"ApplicationState", + "documentation":"

    The current state of the web application.

    " + }, + "applicationCreationDate":{ + "shape":"Timestamp", + "documentation":"

    The date (in Unix epoch time) when the application was created.

    " + }, + "applicationLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date (in Unix epoch time) when the application was last updated.

    " + }, + "roleArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the role that the web application assumes when it interacts with AWS IoT Core.

    " + }, + "ssoClientId":{ + "shape":"SsoClientId", + "documentation":"

    The Id of the single sign-on client that you use to authenticate and authorize users on the web application.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    A message indicating why the DescribeApplication API failed.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A set of key/value pairs that you can use to manage the web application resource.

    " + } + } + }, + "Description":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[ -~]*$" + }, + "ErrorMessage":{"type":"string"}, + "Id":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    An unexpected error has occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    The request is not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    A limit has been exceeded.

    ", + "error":{"httpStatusCode":410}, + "exception":true + }, + "ListApplicationsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token used to get the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListApplicationsResponse":{ + "type":"structure", + "members":{ + "applicationSummaries":{ + "shape":"ApplicationSummaries", + "documentation":"

    An array of objects that provide summaries of information about the web applications in the list.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token used to get the next set of results.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The list of tags assigned to the resource.

    " + } + } + }, + "Name":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[ -~]*$" + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9+/=]+$" + }, + "ResourceArn":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    The specified resource does not exist.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SsoClientId":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The new or modified tags for the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    The rate exceeds the limit.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Timestamp":{"type":"long"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    A list of the keys of the tags to be removed from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateApplicationRequest":{ + "type":"structure", + "required":["applicationId"], + "members":{ + "applicationId":{ + "shape":"Id", + "documentation":"

    The unique Id of the web application.

    ", + "location":"uri", + "locationName":"applicationId" + }, + "applicationName":{ + "shape":"Name", + "documentation":"

    The name of the web application.

    " + }, + "applicationDescription":{ + "shape":"Description", + "documentation":"

    An optional description of the web application.

    " + }, + "clientToken":{ + "shape":"ClientRequestToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "UpdateApplicationResponse":{ + "type":"structure", + "members":{ + } + }, + "Url":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^https\\://\\S+$" + }, + "errorMessage":{"type":"string"} + }, + "documentation":"

    With Fleet Hub for AWS IoT Device Management you can build stand-alone web applications for monitoring the health of your device fleets.

    Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.

    " +} diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index a05e593393a1..d07cc306629b 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + iotsecuretunneling + AWS Java SDK :: Services :: IoTSecureTunneling + The AWS Java SDK for IoTSecureTunneling module holds the client classes that are used for + communicating with IoTSecureTunneling. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.iotsecuretunneling + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/iotsecuretunneling/src/main/resources/codegen-resources/paginators-1.json b/services/iotsecuretunneling/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..cd36b9c69a85 --- /dev/null +++ b/services/iotsecuretunneling/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "ListTunnels": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + } + } +} diff --git a/services/iotsecuretunneling/src/main/resources/codegen-resources/service-2.json b/services/iotsecuretunneling/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..622d36f63a77 --- /dev/null +++ b/services/iotsecuretunneling/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,535 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-10-05", + "endpointPrefix":"api.tunneling.iot", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS IoT Secure Tunneling", + "serviceId":"IoTSecureTunneling", + "signatureVersion":"v4", + "signingName":"IoTSecuredTunneling", + "targetPrefix":"IoTSecuredTunneling", + "uid":"iotsecuretunneling-2018-10-05" + }, + "operations":{ + "CloseTunnel":{ + "name":"CloseTunnel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CloseTunnelRequest"}, + "output":{"shape":"CloseTunnelResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Closes a tunnel identified by the unique tunnel id. When a CloseTunnel request is received, we close the WebSocket connections between the client and proxy server so no data can be transmitted.

    " + }, + "DescribeTunnel":{ + "name":"DescribeTunnel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTunnelRequest"}, + "output":{"shape":"DescribeTunnelResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets information about a tunnel identified by the unique tunnel id.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists the tags for the specified resource.

    " + }, + "ListTunnels":{ + "name":"ListTunnels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTunnelsRequest"}, + "output":{"shape":"ListTunnelsResponse"}, + "documentation":"

    List all tunnels for an AWS account. Tunnels are listed by creation time in descending order, newer tunnels will be listed before older tunnels.

    " + }, + "OpenTunnel":{ + "name":"OpenTunnel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"OpenTunnelRequest"}, + "output":{"shape":"OpenTunnelResponse"}, + "errors":[ + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a new tunnel, and returns two client access tokens for clients to use to connect to the AWS IoT Secure Tunneling proxy server.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    A resource tag.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes a tag from a resource.

    " + } + }, + "shapes":{ + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, + "ClientAccessToken":{ + "type":"string", + "sensitive":true + }, + "CloseTunnelRequest":{ + "type":"structure", + "required":["tunnelId"], + "members":{ + "tunnelId":{ + "shape":"TunnelId", + "documentation":"

    The ID of the tunnel to close.

    " + }, + "delete":{ + "shape":"DeleteFlag", + "documentation":"

    When set to true, AWS IoT Secure Tunneling deletes the tunnel data immediately.

    ", + "box":true + } + } + }, + "CloseTunnelResponse":{ + "type":"structure", + "members":{ + } + }, + "ConnectionState":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ConnectionStatus", + "documentation":"

    The connection status of the tunnel. Valid values are CONNECTED and DISCONNECTED.

    " + }, + "lastUpdatedAt":{ + "shape":"DateType", + "documentation":"

    The last time the connection status was updated.

    " + } + }, + "documentation":"

    The state of a connection.

    " + }, + "ConnectionStatus":{ + "type":"string", + "enum":[ + "CONNECTED", + "DISCONNECTED" + ] + }, + "DateType":{"type":"timestamp"}, + "DeleteFlag":{"type":"boolean"}, + "DescribeTunnelRequest":{ + "type":"structure", + "required":["tunnelId"], + "members":{ + "tunnelId":{ + "shape":"TunnelId", + "documentation":"

    The tunnel to describe.

    " + } + } + }, + "DescribeTunnelResponse":{ + "type":"structure", + "members":{ + "tunnel":{ + "shape":"Tunnel", + "documentation":"

    The tunnel being described.

    " + } + } + }, + "Description":{ + "type":"string", + "pattern":"[^\\p{C}]{1,2048}" + }, + "DestinationConfig":{ + "type":"structure", + "required":["services"], + "members":{ + "thingName":{ + "shape":"ThingName", + "documentation":"

    The name of the IoT thing to which you want to connect.

    " + }, + "services":{ + "shape":"ServiceList", + "documentation":"

    A list of service names that identity the target application. The AWS IoT client running on the destination device reads this value and uses it to look up a port or an IP address and a port. The AWS IoT client instantiates the local proxy which uses this information to connect to the destination application.

    " + } + }, + "documentation":"

    The destination configuration.

    " + }, + "ErrorMessage":{"type":"string"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Thrown when a tunnel limit is exceeded.

    ", + "exception":true + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The resource ARN.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagList", + "documentation":"

    The tags for the specified resource.

    " + } + } + }, + "ListTunnelsRequest":{ + "type":"structure", + "members":{ + "thingName":{ + "shape":"ThingName", + "documentation":"

    The name of the IoT thing associated with the destination device.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return at once.

    ", + "box":true + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to retrieve the next set of results.

    " + } + } + }, + "ListTunnelsResponse":{ + "type":"structure", + "members":{ + "tunnelSummaries":{ + "shape":"TunnelSummaryList", + "documentation":"

    A short description of the tunnels in an AWS account.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to used to retrieve the next set of results.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "NextToken":{ + "type":"string", + "pattern":"[a-zA-Z0-9_=-]{1,4096}" + }, + "OpenTunnelRequest":{ + "type":"structure", + "members":{ + "description":{ + "shape":"Description", + "documentation":"

    A short text description of the tunnel.

    " + }, + "tags":{ + "shape":"TagList", + "documentation":"

    A collection of tag metadata.

    " + }, + "destinationConfig":{ + "shape":"DestinationConfig", + "documentation":"

    The destination configuration for the OpenTunnel request.

    " + }, + "timeoutConfig":{ + "shape":"TimeoutConfig", + "documentation":"

    Timeout configuration for a tunnel.

    " + } + } + }, + "OpenTunnelResponse":{ + "type":"structure", + "members":{ + "tunnelId":{ + "shape":"TunnelId", + "documentation":"

    A unique alpha-numeric tunnel ID.

    " + }, + "tunnelArn":{ + "shape":"TunnelArn", + "documentation":"

    The Amazon Resource Name for the tunnel. The tunnel ARN format is arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id>

    " + }, + "sourceAccessToken":{ + "shape":"ClientAccessToken", + "documentation":"

    The access token the source local proxy uses to connect to AWS IoT Secure Tunneling.

    " + }, + "destinationAccessToken":{ + "shape":"ClientAccessToken", + "documentation":"

    The access token the destination local proxy uses to connect to AWS IoT Secure Tunneling.

    " + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Thrown when an operation is attempted on a resource that does not exist.

    ", + "exception":true + }, + "Service":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9:_-]+" + }, + "ServiceList":{ + "type":"list", + "member":{"shape":"Service"}, + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"TagKey", + "documentation":"

    The key of the tag.

    " + }, + "value":{ + "shape":"TagValue", + "documentation":"

    The value of the tag.

    " + } + }, + "documentation":"

    An arbitary key/value pair used to add searchable metadata to secure tunnel resources.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the resource.

    " + }, + "tags":{ + "shape":"TagList", + "documentation":"

    The tags for the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "ThingName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9:_-]+" + }, + "TimeoutConfig":{ + "type":"structure", + "members":{ + "maxLifetimeTimeoutMinutes":{ + "shape":"TimeoutInMin", + "documentation":"

    The maximum amount of time (in minutes) a tunnel can remain open. If not specified, maxLifetimeTimeoutMinutes defaults to 720 minutes. Valid values are from 1 minute to 12 hours (720 minutes)

    ", + "box":true + } + }, + "documentation":"

    Tunnel timeout configuration.

    " + }, + "TimeoutInMin":{ + "type":"integer", + "max":720, + "min":1 + }, + "Tunnel":{ + "type":"structure", + "members":{ + "tunnelId":{ + "shape":"TunnelId", + "documentation":"

    A unique alpha-numeric ID that identifies a tunnel.

    " + }, + "tunnelArn":{ + "shape":"TunnelArn", + "documentation":"

    The Amazon Resource Name (ARN) of a tunnel. The tunnel ARN format is arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id>

    " + }, + "status":{ + "shape":"TunnelStatus", + "documentation":"

    The status of a tunnel. Valid values are: Open and Closed.

    " + }, + "sourceConnectionState":{ + "shape":"ConnectionState", + "documentation":"

    The connection state of the source application.

    " + }, + "destinationConnectionState":{ + "shape":"ConnectionState", + "documentation":"

    The connection state of the destination application.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    A description of the tunnel.

    " + }, + "destinationConfig":{ + "shape":"DestinationConfig", + "documentation":"

    The destination configuration that specifies the thing name of the destination device and a service name that the local proxy uses to connect to the destination application.

    " + }, + "timeoutConfig":{ + "shape":"TimeoutConfig", + "documentation":"

    Timeout configuration for the tunnel.

    " + }, + "tags":{ + "shape":"TagList", + "documentation":"

    A list of tag metadata associated with the secure tunnel.

    " + }, + "createdAt":{ + "shape":"DateType", + "documentation":"

    The time when the tunnel was created.

    " + }, + "lastUpdatedAt":{ + "shape":"DateType", + "documentation":"

    The last time the tunnel was updated.

    " + } + }, + "documentation":"

    A connection between a source computer and a destination device.

    " + }, + "TunnelArn":{ + "type":"string", + "max":1600, + "min":1 + }, + "TunnelId":{ + "type":"string", + "pattern":"[a-zA-Z0-9_\\-+=:]{1,128}" + }, + "TunnelStatus":{ + "type":"string", + "enum":[ + "OPEN", + "CLOSED" + ] + }, + "TunnelSummary":{ + "type":"structure", + "members":{ + "tunnelId":{ + "shape":"TunnelId", + "documentation":"

    The unique alpha-numeric identifier for the tunnel.

    " + }, + "tunnelArn":{ + "shape":"TunnelArn", + "documentation":"

    The Amazon Resource Name of the tunnel. The tunnel ARN format is arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id>

    " + }, + "status":{ + "shape":"TunnelStatus", + "documentation":"

    The status of a tunnel. Valid values are: Open and Closed.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    A description of the tunnel.

    " + }, + "createdAt":{ + "shape":"DateType", + "documentation":"

    The time the tunnel was created.

    " + }, + "lastUpdatedAt":{ + "shape":"DateType", + "documentation":"

    The time the tunnel was last updated.

    " + } + }, + "documentation":"

    Information about the tunnel.

    " + }, + "TunnelSummaryList":{ + "type":"list", + "member":{"shape":"TunnelSummary"} + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The resource ARN.

    " + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The keys of the tags to remove.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + } + }, + "documentation":"AWS IoT Secure Tunneling

    AWS IoT Secure Tunnling enables you to create remote connections to devices deployed in the field.

    For more information about how AWS IoT Secure Tunneling works, see AWS IoT Secure Tunneling.

    " +} diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml new file mode 100644 index 000000000000..4d9792c55cd4 --- /dev/null +++ b/services/iotsitewise/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + iotsitewise + AWS Java SDK :: Services :: Io T Site Wise + The AWS Java SDK for Io T Site Wise module holds the client classes that are used for + communicating with Io T Site Wise. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.iotsitewise + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/iotsitewise/src/main/resources/codegen-resources/paginators-1.json b/services/iotsitewise/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..6477a679a043 --- /dev/null +++ b/services/iotsitewise/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,76 @@ +{ + "pagination": { + "GetAssetPropertyAggregates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "aggregatedValues" + }, + "GetAssetPropertyValueHistory": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetPropertyValueHistory" + }, + "ListAccessPolicies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "accessPolicySummaries" + }, + "ListAssetModels": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetModelSummaries" + }, + "ListAssetRelationships": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetRelationshipSummaries" + }, + "ListAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetSummaries" + }, + "ListAssociatedAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetSummaries" + }, + "ListDashboards": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "dashboardSummaries" + }, + "ListGateways": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "gatewaySummaries" + }, + "ListPortals": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "portalSummaries" + }, + "ListProjectAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetIds" + }, + "ListProjects": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "projectSummaries" + } + } +} diff --git a/services/iotsitewise/src/main/resources/codegen-resources/service-2.json b/services/iotsitewise/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..7c67f5fd59af --- /dev/null +++ b/services/iotsitewise/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,5317 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-02", + "endpointPrefix":"iotsitewise", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS IoT SiteWise", + "serviceId":"IoTSiteWise", + "signatureVersion":"v4", + "signingName":"iotsitewise", + "uid":"iotsitewise-2019-12-02" + }, + "operations":{ + "AssociateAssets":{ + "name":"AssociateAssets", + "http":{ + "method":"POST", + "requestUri":"/assets/{assetId}/associate" + }, + "input":{"shape":"AssociateAssetsRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Associates a child asset with the given parent asset through a hierarchy defined in the parent asset's model. For more information, see Associating assets in the AWS IoT SiteWise User Guide.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "BatchAssociateProjectAssets":{ + "name":"BatchAssociateProjectAssets", + "http":{ + "method":"POST", + "requestUri":"/projects/{projectId}/assets/associate", + "responseCode":200 + }, + "input":{"shape":"BatchAssociateProjectAssetsRequest"}, + "output":{"shape":"BatchAssociateProjectAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Associates a group (batch) of assets with an AWS IoT SiteWise Monitor project.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "BatchDisassociateProjectAssets":{ + "name":"BatchDisassociateProjectAssets", + "http":{ + "method":"POST", + "requestUri":"/projects/{projectId}/assets/disassociate", + "responseCode":200 + }, + "input":{"shape":"BatchDisassociateProjectAssetsRequest"}, + "output":{"shape":"BatchDisassociateProjectAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Disassociates a group (batch) of assets from an AWS IoT SiteWise Monitor project.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "BatchPutAssetPropertyValue":{ + "name":"BatchPutAssetPropertyValue", + "http":{ + "method":"POST", + "requestUri":"/properties" + }, + "input":{"shape":"BatchPutAssetPropertyValueRequest"}, + "output":{"shape":"BatchPutAssetPropertyValueResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Sends a list of asset property values to AWS IoT SiteWise. Each value is a timestamp-quality-value (TQV) data point. For more information, see Ingesting data using the API in the AWS IoT SiteWise User Guide.

    To identify an asset property, you must specify one of the following:

    • The assetId and propertyId of an asset property.

    • A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty.

    With respect to Unix epoch time, AWS IoT SiteWise accepts only TQVs that have a timestamp of no more than 7 days in the past and no more than 5 minutes in the future. AWS IoT SiteWise rejects timestamps outside of the inclusive range of [-7 days, +5 minutes] and returns a TimestampOutOfRangeException error.

    For each asset property, AWS IoT SiteWise overwrites TQVs with duplicate timestamps unless the newer TQV has a different quality. For example, if you store a TQV {T1, GOOD, V1}, then storing {T1, GOOD, V2} replaces the existing TQV.

    AWS IoT SiteWise authorizes access to each BatchPutAssetPropertyValue entry individually. For more information, see BatchPutAssetPropertyValue authorization in the AWS IoT SiteWise User Guide.

    ", + "endpoint":{"hostPrefix":"data."} + }, + "CreateAccessPolicy":{ + "name":"CreateAccessPolicy", + "http":{ + "method":"POST", + "requestUri":"/access-policies", + "responseCode":201 + }, + "input":{"shape":"CreateAccessPolicyRequest"}, + "output":{"shape":"CreateAccessPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates an access policy that grants the specified identity (AWS SSO user, AWS SSO group, or IAM user) access to the specified AWS IoT SiteWise Monitor portal or project resource.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "CreateAsset":{ + "name":"CreateAsset", + "http":{ + "method":"POST", + "requestUri":"/assets", + "responseCode":202 + }, + "input":{"shape":"CreateAssetRequest"}, + "output":{"shape":"CreateAssetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Creates an asset from an existing asset model. For more information, see Creating assets in the AWS IoT SiteWise User Guide.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "CreateAssetModel":{ + "name":"CreateAssetModel", + "http":{ + "method":"POST", + "requestUri":"/asset-models", + "responseCode":202 + }, + "input":{"shape":"CreateAssetModelRequest"}, + "output":{"shape":"CreateAssetModelResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Creates an asset model from specified property and hierarchy definitions. You create assets from asset models. With asset models, you can easily create assets of the same type that have standardized definitions. Each asset created from a model inherits the asset model's property and hierarchy definitions. For more information, see Defining asset models in the AWS IoT SiteWise User Guide.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "CreateDashboard":{ + "name":"CreateDashboard", + "http":{ + "method":"POST", + "requestUri":"/dashboards", + "responseCode":201 + }, + "input":{"shape":"CreateDashboardRequest"}, + "output":{"shape":"CreateDashboardResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a dashboard in an AWS IoT SiteWise Monitor project.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "CreateGateway":{ + "name":"CreateGateway", + "http":{ + "method":"POST", + "requestUri":"/20200301/gateways", + "responseCode":201 + }, + "input":{"shape":"CreateGatewayRequest"}, + "output":{"shape":"CreateGatewayResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to AWS IoT SiteWise. For more information, see Ingesting data using a gateway in the AWS IoT SiteWise User Guide.

    ", + "endpoint":{"hostPrefix":"edge."} + }, + "CreatePortal":{ + "name":"CreatePortal", + "http":{ + "method":"POST", + "requestUri":"/portals", + "responseCode":202 + }, + "input":{"shape":"CreatePortalRequest"}, + "output":{"shape":"CreatePortalResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a portal, which can contain projects and dashboards. AWS IoT SiteWise Monitor uses AWS SSO or IAM to authenticate portal users and manage user permissions.

    Before you can sign in to a new portal, you must add at least one identity to that portal. For more information, see Adding or removing portal administrators in the AWS IoT SiteWise User Guide.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/projects", + "responseCode":201 + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a project in the specified portal.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DeleteAccessPolicy":{ + "name":"DeleteAccessPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/access-policies/{accessPolicyId}", + "responseCode":204 + }, + "input":{"shape":"DeleteAccessPolicyRequest"}, + "output":{"shape":"DeleteAccessPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes an access policy that grants the specified identity access to the specified AWS IoT SiteWise Monitor resource. You can use this operation to revoke access to an AWS IoT SiteWise Monitor resource.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DeleteAsset":{ + "name":"DeleteAsset", + "http":{ + "method":"DELETE", + "requestUri":"/assets/{assetId}", + "responseCode":202 + }, + "input":{"shape":"DeleteAssetRequest"}, + "output":{"shape":"DeleteAssetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Deletes an asset. This action can't be undone. For more information, see Deleting assets and models in the AWS IoT SiteWise User Guide.

    You can't delete an asset that's associated to another asset. For more information, see DisassociateAssets.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "DeleteAssetModel":{ + "name":"DeleteAssetModel", + "http":{ + "method":"DELETE", + "requestUri":"/asset-models/{assetModelId}", + "responseCode":202 + }, + "input":{"shape":"DeleteAssetModelRequest"}, + "output":{"shape":"DeleteAssetModelResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Deletes an asset model. This action can't be undone. You must delete all assets created from an asset model before you can delete the model. Also, you can't delete an asset model if a parent asset model exists that contains a property formula expression that depends on the asset model that you want to delete. For more information, see Deleting assets and models in the AWS IoT SiteWise User Guide.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "DeleteDashboard":{ + "name":"DeleteDashboard", + "http":{ + "method":"DELETE", + "requestUri":"/dashboards/{dashboardId}", + "responseCode":204 + }, + "input":{"shape":"DeleteDashboardRequest"}, + "output":{"shape":"DeleteDashboardResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a dashboard from AWS IoT SiteWise Monitor.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DeleteGateway":{ + "name":"DeleteGateway", + "http":{ + "method":"DELETE", + "requestUri":"/20200301/gateways/{gatewayId}" + }, + "input":{"shape":"DeleteGatewayRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a gateway from AWS IoT SiteWise. When you delete a gateway, some of the gateway's files remain in your gateway's file system.

    ", + "endpoint":{"hostPrefix":"edge."} + }, + "DeletePortal":{ + "name":"DeletePortal", + "http":{ + "method":"DELETE", + "requestUri":"/portals/{portalId}", + "responseCode":202 + }, + "input":{"shape":"DeletePortalRequest"}, + "output":{"shape":"DeletePortalResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Deletes a portal from AWS IoT SiteWise Monitor.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"DELETE", + "requestUri":"/projects/{projectId}", + "responseCode":204 + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a project from AWS IoT SiteWise Monitor.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DescribeAccessPolicy":{ + "name":"DescribeAccessPolicy", + "http":{ + "method":"GET", + "requestUri":"/access-policies/{accessPolicyId}", + "responseCode":200 + }, + "input":{"shape":"DescribeAccessPolicyRequest"}, + "output":{"shape":"DescribeAccessPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Describes an access policy, which specifies an identity's access to an AWS IoT SiteWise Monitor portal or project.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DescribeAsset":{ + "name":"DescribeAsset", + "http":{ + "method":"GET", + "requestUri":"/assets/{assetId}" + }, + "input":{"shape":"DescribeAssetRequest"}, + "output":{"shape":"DescribeAssetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about an asset.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "DescribeAssetModel":{ + "name":"DescribeAssetModel", + "http":{ + "method":"GET", + "requestUri":"/asset-models/{assetModelId}" + }, + "input":{"shape":"DescribeAssetModelRequest"}, + "output":{"shape":"DescribeAssetModelResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about an asset model.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "DescribeAssetProperty":{ + "name":"DescribeAssetProperty", + "http":{ + "method":"GET", + "requestUri":"/assets/{assetId}/properties/{propertyId}" + }, + "input":{"shape":"DescribeAssetPropertyRequest"}, + "output":{"shape":"DescribeAssetPropertyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about an asset property.

    When you call this operation for an attribute property, this response includes the default attribute value that you define in the asset model. If you update the default value in the model, this operation's response includes the new default value.

    This operation doesn't return the value of the asset property. To get the value of an asset property, use GetAssetPropertyValue.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "DescribeDashboard":{ + "name":"DescribeDashboard", + "http":{ + "method":"GET", + "requestUri":"/dashboards/{dashboardId}", + "responseCode":200 + }, + "input":{"shape":"DescribeDashboardRequest"}, + "output":{"shape":"DescribeDashboardResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about a dashboard.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DescribeDefaultEncryptionConfiguration":{ + "name":"DescribeDefaultEncryptionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/configuration/account/encryption" + }, + "input":{"shape":"DescribeDefaultEncryptionConfigurationRequest"}, + "output":{"shape":"DescribeDefaultEncryptionConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about the default encryption configuration for the AWS account in the default or specified region. For more information, see Key management in the AWS IoT SiteWise User Guide.

    " + }, + "DescribeGateway":{ + "name":"DescribeGateway", + "http":{ + "method":"GET", + "requestUri":"/20200301/gateways/{gatewayId}" + }, + "input":{"shape":"DescribeGatewayRequest"}, + "output":{"shape":"DescribeGatewayResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about a gateway.

    ", + "endpoint":{"hostPrefix":"edge."} + }, + "DescribeGatewayCapabilityConfiguration":{ + "name":"DescribeGatewayCapabilityConfiguration", + "http":{ + "method":"GET", + "requestUri":"/20200301/gateways/{gatewayId}/capability/{capabilityNamespace}" + }, + "input":{"shape":"DescribeGatewayCapabilityConfigurationRequest"}, + "output":{"shape":"DescribeGatewayCapabilityConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about a gateway capability configuration. Each gateway capability defines data sources for a gateway. A capability configuration can contain multiple data source configurations. If you define OPC-UA sources for a gateway in the AWS IoT SiteWise console, all of your OPC-UA sources are stored in one capability configuration. To list all capability configurations for a gateway, use DescribeGateway.

    ", + "endpoint":{"hostPrefix":"edge."} + }, + "DescribeLoggingOptions":{ + "name":"DescribeLoggingOptions", + "http":{ + "method":"GET", + "requestUri":"/logging" + }, + "input":{"shape":"DescribeLoggingOptionsRequest"}, + "output":{"shape":"DescribeLoggingOptionsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves the current AWS IoT SiteWise logging options.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "DescribePortal":{ + "name":"DescribePortal", + "http":{ + "method":"GET", + "requestUri":"/portals/{portalId}", + "responseCode":200 + }, + "input":{"shape":"DescribePortalRequest"}, + "output":{"shape":"DescribePortalResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about a portal.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DescribeProject":{ + "name":"DescribeProject", + "http":{ + "method":"GET", + "requestUri":"/projects/{projectId}", + "responseCode":200 + }, + "input":{"shape":"DescribeProjectRequest"}, + "output":{"shape":"DescribeProjectResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves information about a project.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "DisassociateAssets":{ + "name":"DisassociateAssets", + "http":{ + "method":"POST", + "requestUri":"/assets/{assetId}/disassociate" + }, + "input":{"shape":"DisassociateAssetsRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Disassociates a child asset from the given parent asset through a hierarchy defined in the parent asset's model.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "GetAssetPropertyAggregates":{ + "name":"GetAssetPropertyAggregates", + "http":{ + "method":"GET", + "requestUri":"/properties/aggregates" + }, + "input":{"shape":"GetAssetPropertyAggregatesRequest"}, + "output":{"shape":"GetAssetPropertyAggregatesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

    Gets aggregated values for an asset property. For more information, see Querying aggregates in the AWS IoT SiteWise User Guide.

    To identify an asset property, you must specify one of the following:

    • The assetId and propertyId of an asset property.

    • A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty.

    ", + "endpoint":{"hostPrefix":"data."} + }, + "GetAssetPropertyValue":{ + "name":"GetAssetPropertyValue", + "http":{ + "method":"GET", + "requestUri":"/properties/latest" + }, + "input":{"shape":"GetAssetPropertyValueRequest"}, + "output":{"shape":"GetAssetPropertyValueResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

    Gets an asset property's current value. For more information, see Querying current values in the AWS IoT SiteWise User Guide.

    To identify an asset property, you must specify one of the following:

    • The assetId and propertyId of an asset property.

    • A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty.

    ", + "endpoint":{"hostPrefix":"data."} + }, + "GetAssetPropertyValueHistory":{ + "name":"GetAssetPropertyValueHistory", + "http":{ + "method":"GET", + "requestUri":"/properties/history" + }, + "input":{"shape":"GetAssetPropertyValueHistoryRequest"}, + "output":{"shape":"GetAssetPropertyValueHistoryResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

    Gets the history of an asset property's values. For more information, see Querying historical values in the AWS IoT SiteWise User Guide.

    To identify an asset property, you must specify one of the following:

    • The assetId and propertyId of an asset property.

    • A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty.

    ", + "endpoint":{"hostPrefix":"data."} + }, + "ListAccessPolicies":{ + "name":"ListAccessPolicies", + "http":{ + "method":"GET", + "requestUri":"/access-policies", + "responseCode":200 + }, + "input":{"shape":"ListAccessPoliciesRequest"}, + "output":{"shape":"ListAccessPoliciesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of access policies for an identity (an AWS SSO user, an AWS SSO group, or an IAM user) or an AWS IoT SiteWise Monitor resource (a portal or project).

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListAssetModels":{ + "name":"ListAssetModels", + "http":{ + "method":"GET", + "requestUri":"/asset-models" + }, + "input":{"shape":"ListAssetModelsRequest"}, + "output":{"shape":"ListAssetModelsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of summaries of all asset models.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "ListAssetRelationships":{ + "name":"ListAssetRelationships", + "http":{ + "method":"GET", + "requestUri":"/assets/{assetId}/assetRelationships" + }, + "input":{"shape":"ListAssetRelationshipsRequest"}, + "output":{"shape":"ListAssetRelationshipsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of asset relationships for an asset. You can use this operation to identify an asset's root asset and all associated assets between that asset and its root.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "ListAssets":{ + "name":"ListAssets", + "http":{ + "method":"GET", + "requestUri":"/assets" + }, + "input":{"shape":"ListAssetsRequest"}, + "output":{"shape":"ListAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of asset summaries.

    You can use this operation to do the following:

    • List assets based on a specific asset model.

    • List top-level assets.

    You can't use this operation to list all assets. To retrieve summaries for all of your assets, use ListAssetModels to get all of your asset model IDs. Then, use ListAssets to get all assets for each asset model.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "ListAssociatedAssets":{ + "name":"ListAssociatedAssets", + "http":{ + "method":"GET", + "requestUri":"/assets/{assetId}/hierarchies" + }, + "input":{"shape":"ListAssociatedAssetsRequest"}, + "output":{"shape":"ListAssociatedAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of associated assets.

    You can use this operation to do the following:

    • List child assets associated to a parent asset by a hierarchy that you specify.

    • List an asset's parent asset.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "ListDashboards":{ + "name":"ListDashboards", + "http":{ + "method":"GET", + "requestUri":"/dashboards", + "responseCode":200 + }, + "input":{"shape":"ListDashboardsRequest"}, + "output":{"shape":"ListDashboardsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of dashboards for an AWS IoT SiteWise Monitor project.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListGateways":{ + "name":"ListGateways", + "http":{ + "method":"GET", + "requestUri":"/20200301/gateways" + }, + "input":{"shape":"ListGatewaysRequest"}, + "output":{"shape":"ListGatewaysResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of gateways.

    ", + "endpoint":{"hostPrefix":"edge."} + }, + "ListPortals":{ + "name":"ListPortals", + "http":{ + "method":"GET", + "requestUri":"/portals", + "responseCode":200 + }, + "input":{"shape":"ListPortalsRequest"}, + "output":{"shape":"ListPortalsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of AWS IoT SiteWise Monitor portals.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListProjectAssets":{ + "name":"ListProjectAssets", + "http":{ + "method":"GET", + "requestUri":"/projects/{projectId}/assets", + "responseCode":200 + }, + "input":{"shape":"ListProjectAssetsRequest"}, + "output":{"shape":"ListProjectAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of assets associated with an AWS IoT SiteWise Monitor project.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"GET", + "requestUri":"/projects", + "responseCode":200 + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a paginated list of projects for an AWS IoT SiteWise Monitor portal.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

    Retrieves the list of tags for an AWS IoT SiteWise resource.

    " + }, + "PutDefaultEncryptionConfiguration":{ + "name":"PutDefaultEncryptionConfiguration", + "http":{ + "method":"POST", + "requestUri":"/configuration/account/encryption" + }, + "input":{"shape":"PutDefaultEncryptionConfigurationRequest"}, + "output":{"shape":"PutDefaultEncryptionConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Sets the default encryption configuration for the AWS account. For more information, see Key management in the AWS IoT SiteWise User Guide.

    " + }, + "PutLoggingOptions":{ + "name":"PutLoggingOptions", + "http":{ + "method":"PUT", + "requestUri":"/logging" + }, + "input":{"shape":"PutLoggingOptionsRequest"}, + "output":{"shape":"PutLoggingOptionsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Sets logging options for AWS IoT SiteWise.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"

    Adds tags to an AWS IoT SiteWise resource. If a tag already exists for the resource, this operation updates the tag's value.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

    Removes a tag from an AWS IoT SiteWise resource.

    " + }, + "UpdateAccessPolicy":{ + "name":"UpdateAccessPolicy", + "http":{ + "method":"PUT", + "requestUri":"/access-policies/{accessPolicyId}", + "responseCode":200 + }, + "input":{"shape":"UpdateAccessPolicyRequest"}, + "output":{"shape":"UpdateAccessPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates an existing access policy that specifies an identity's access to an AWS IoT SiteWise Monitor portal or project resource.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "UpdateAsset":{ + "name":"UpdateAsset", + "http":{ + "method":"PUT", + "requestUri":"/assets/{assetId}", + "responseCode":202 + }, + "input":{"shape":"UpdateAssetRequest"}, + "output":{"shape":"UpdateAssetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Updates an asset's name. For more information, see Updating assets and models in the AWS IoT SiteWise User Guide.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "UpdateAssetModel":{ + "name":"UpdateAssetModel", + "http":{ + "method":"PUT", + "requestUri":"/asset-models/{assetModelId}", + "responseCode":202 + }, + "input":{"shape":"UpdateAssetModelRequest"}, + "output":{"shape":"UpdateAssetModelResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the AWS IoT SiteWise User Guide.

    This operation overwrites the existing model with the provided model. To avoid deleting your asset model's properties or hierarchies, you must include their IDs and definitions in the updated asset model payload. For more information, see DescribeAssetModel.

    If you remove a property from an asset model, AWS IoT SiteWise deletes all previous data for that property. If you remove a hierarchy definition from an asset model, AWS IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the type or data type of an existing property.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "UpdateAssetProperty":{ + "name":"UpdateAssetProperty", + "http":{ + "method":"PUT", + "requestUri":"/assets/{assetId}/properties/{propertyId}" + }, + "input":{"shape":"UpdateAssetPropertyRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Updates an asset property's alias and notification state.

    This operation overwrites the property's existing alias and notification state. To keep your existing property's alias or notification state, you must include the existing values in the UpdateAssetProperty request. For more information, see DescribeAssetProperty.

    ", + "endpoint":{"hostPrefix":"model."} + }, + "UpdateDashboard":{ + "name":"UpdateDashboard", + "http":{ + "method":"PUT", + "requestUri":"/dashboards/{dashboardId}", + "responseCode":200 + }, + "input":{"shape":"UpdateDashboardRequest"}, + "output":{"shape":"UpdateDashboardResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates an AWS IoT SiteWise Monitor dashboard.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "UpdateGateway":{ + "name":"UpdateGateway", + "http":{ + "method":"PUT", + "requestUri":"/20200301/gateways/{gatewayId}" + }, + "input":{"shape":"UpdateGatewayRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates a gateway's name.

    ", + "endpoint":{"hostPrefix":"edge."} + }, + "UpdateGatewayCapabilityConfiguration":{ + "name":"UpdateGatewayCapabilityConfiguration", + "http":{ + "method":"POST", + "requestUri":"/20200301/gateways/{gatewayId}/capability", + "responseCode":201 + }, + "input":{"shape":"UpdateGatewayCapabilityConfigurationRequest"}, + "output":{"shape":"UpdateGatewayCapabilityConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Updates a gateway capability configuration or defines a new capability configuration. Each gateway capability defines data sources for a gateway. A capability configuration can contain multiple data source configurations. If you define OPC-UA sources for a gateway in the AWS IoT SiteWise console, all of your OPC-UA sources are stored in one capability configuration. To list all capability configurations for a gateway, use DescribeGateway.

    ", + "endpoint":{"hostPrefix":"edge."} + }, + "UpdatePortal":{ + "name":"UpdatePortal", + "http":{ + "method":"PUT", + "requestUri":"/portals/{portalId}", + "responseCode":202 + }, + "input":{"shape":"UpdatePortalRequest"}, + "output":{"shape":"UpdatePortalResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

    Updates an AWS IoT SiteWise Monitor portal.

    ", + "endpoint":{"hostPrefix":"monitor."} + }, + "UpdateProject":{ + "name":"UpdateProject", + "http":{ + "method":"PUT", + "requestUri":"/projects/{projectId}", + "responseCode":200 + }, + "input":{"shape":"UpdateProjectRequest"}, + "output":{"shape":"UpdateProjectResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates an AWS IoT SiteWise Monitor project.

    ", + "endpoint":{"hostPrefix":"monitor."} + } + }, + "shapes":{ + "ARN":{ + "type":"string", + "max":1600, + "min":1, + "pattern":".*" + }, + "AccessPolicySummaries":{ + "type":"list", + "member":{"shape":"AccessPolicySummary"} + }, + "AccessPolicySummary":{ + "type":"structure", + "required":[ + "id", + "identity", + "resource", + "permission" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the access policy.

    " + }, + "identity":{ + "shape":"Identity", + "documentation":"

    The identity (an AWS SSO user, an AWS SSO group, or an IAM user).

    " + }, + "resource":{ + "shape":"Resource", + "documentation":"

    The AWS IoT SiteWise Monitor resource (a portal or project).

    " + }, + "permission":{ + "shape":"Permission", + "documentation":"

    The permissions for the access policy. Note that a project ADMINISTRATOR is also known as a project owner.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the access policy was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the access policy was last updated, in Unix epoch time.

    " + } + }, + "documentation":"

    Contains an access policy that defines an identity's access to an AWS IoT SiteWise Monitor resource.

    " + }, + "AggregateType":{ + "type":"string", + "enum":[ + "AVERAGE", + "COUNT", + "MAXIMUM", + "MINIMUM", + "SUM", + "STANDARD_DEVIATION" + ] + }, + "AggregateTypes":{ + "type":"list", + "member":{"shape":"AggregateType"}, + "min":1 + }, + "AggregatedDoubleValue":{"type":"double"}, + "AggregatedValue":{ + "type":"structure", + "required":[ + "timestamp", + "value" + ], + "members":{ + "timestamp":{ + "shape":"Timestamp", + "documentation":"

    The date the aggregating computations occurred, in Unix epoch time.

    " + }, + "quality":{ + "shape":"Quality", + "documentation":"

    The quality of the aggregated data.

    " + }, + "value":{ + "shape":"Aggregates", + "documentation":"

    The value of the aggregates.

    " + } + }, + "documentation":"

    Contains aggregated asset property values (for example, average, minimum, and maximum).

    " + }, + "AggregatedValues":{ + "type":"list", + "member":{"shape":"AggregatedValue"} + }, + "Aggregates":{ + "type":"structure", + "members":{ + "average":{ + "shape":"AggregatedDoubleValue", + "documentation":"

    The average (mean) value of the time series over a time interval window.

    " + }, + "count":{ + "shape":"AggregatedDoubleValue", + "documentation":"

    The count of data points in the time series over a time interval window.

    " + }, + "maximum":{ + "shape":"AggregatedDoubleValue", + "documentation":"

    The maximum value of the time series over a time interval window.

    " + }, + "minimum":{ + "shape":"AggregatedDoubleValue", + "documentation":"

    The minimum value of the time series over a time interval window.

    " + }, + "sum":{ + "shape":"AggregatedDoubleValue", + "documentation":"

    The sum of the time series over a time interval window.

    " + }, + "standardDeviation":{ + "shape":"AggregatedDoubleValue", + "documentation":"

    The standard deviation of the time series over a time interval window.

    " + } + }, + "documentation":"

    Contains the (pre-calculated) aggregate values for an asset property.

    " + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, + "AssetCompositeModel":{ + "type":"structure", + "required":[ + "name", + "type", + "properties" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the composite model.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the composite model.

    " + }, + "type":{ + "shape":"Name", + "documentation":"

    The type of the composite model. For alarm composite models, this type is AWS/ALARM.

    " + }, + "properties":{ + "shape":"AssetProperties", + "documentation":"

    The asset properties that this composite model defines.

    " + } + }, + "documentation":"

    Contains information about a composite model in an asset. This object contains the asset's properties that you define in the composite model.

    " + }, + "AssetCompositeModels":{ + "type":"list", + "member":{"shape":"AssetCompositeModel"} + }, + "AssetErrorCode":{ + "type":"string", + "enum":["INTERNAL_FAILURE"] + }, + "AssetErrorDetails":{ + "type":"structure", + "required":[ + "assetId", + "code", + "message" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    " + }, + "code":{ + "shape":"AssetErrorCode", + "documentation":"

    The error code.

    " + }, + "message":{ + "shape":"AssetErrorMessage", + "documentation":"

    The error message.

    " + } + }, + "documentation":"

    Contains error details for the requested associate project asset action.

    " + }, + "AssetErrorMessage":{"type":"string"}, + "AssetHierarchies":{ + "type":"list", + "member":{"shape":"AssetHierarchy"} + }, + "AssetHierarchy":{ + "type":"structure", + "required":["name"], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the hierarchy. This ID is a hierarchyId.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The hierarchy name provided in the CreateAssetModel or UpdateAssetModel API operation.

    " + } + }, + "documentation":"

    Describes an asset hierarchy that contains a hierarchy's name and ID.

    " + }, + "AssetHierarchyInfo":{ + "type":"structure", + "members":{ + "parentAssetId":{ + "shape":"ID", + "documentation":"

    The ID of the parent asset in this asset relationship.

    " + }, + "childAssetId":{ + "shape":"ID", + "documentation":"

    The ID of the child asset in this asset relationship.

    " + } + }, + "documentation":"

    Contains information about a parent asset and a child asset that are related through an asset hierarchy.

    " + }, + "AssetIDs":{ + "type":"list", + "member":{"shape":"ID"} + }, + "AssetModelCompositeModel":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the composite model.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the composite model.

    " + }, + "type":{ + "shape":"Name", + "documentation":"

    The type of the composite model. For alarm composite models, this type is AWS/ALARM.

    " + }, + "properties":{ + "shape":"AssetModelProperties", + "documentation":"

    The asset property definitions for this composite model.

    " + } + }, + "documentation":"

    Contains information about a composite model in an asset model. This object contains the asset property definitions that you define in the composite model.

    " + }, + "AssetModelCompositeModelDefinition":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the composite model.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the composite model.

    " + }, + "type":{ + "shape":"Name", + "documentation":"

    The type of the composite model. For alarm composite models, this type is AWS/ALARM.

    " + }, + "properties":{ + "shape":"AssetModelPropertyDefinitions", + "documentation":"

    The asset property definitions for this composite model.

    " + } + }, + "documentation":"

    Contains a composite model definition in an asset model. This composite model definition is applied to all assets created from the asset model.

    " + }, + "AssetModelCompositeModelDefinitions":{ + "type":"list", + "member":{"shape":"AssetModelCompositeModelDefinition"} + }, + "AssetModelCompositeModels":{ + "type":"list", + "member":{"shape":"AssetModelCompositeModel"} + }, + "AssetModelHierarchies":{ + "type":"list", + "member":{"shape":"AssetModelHierarchy"} + }, + "AssetModelHierarchy":{ + "type":"structure", + "required":[ + "name", + "childAssetModelId" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the asset model hierarchy. This ID is a hierarchyId.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the asset model hierarchy that you specify by using the CreateAssetModel or UpdateAssetModel API operation.

    " + }, + "childAssetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model. All assets in this hierarchy must be instances of the childAssetModelId asset model.

    " + } + }, + "documentation":"

    Describes an asset hierarchy that contains a hierarchy's name, ID, and child asset model ID that specifies the type of asset that can be in this hierarchy.

    " + }, + "AssetModelHierarchyDefinition":{ + "type":"structure", + "required":[ + "name", + "childAssetModelId" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the asset model hierarchy definition (as specified in the CreateAssetModel or UpdateAssetModel API operation).

    " + }, + "childAssetModelId":{ + "shape":"ID", + "documentation":"

    The ID of an asset model for this hierarchy.

    " + } + }, + "documentation":"

    Contains an asset model hierarchy used in asset model creation. An asset model hierarchy determines the kind (or type) of asset that can belong to a hierarchy.

    " + }, + "AssetModelHierarchyDefinitions":{ + "type":"list", + "member":{"shape":"AssetModelHierarchyDefinition"} + }, + "AssetModelProperties":{ + "type":"list", + "member":{"shape":"AssetModelProperty"} + }, + "AssetModelProperty":{ + "type":"structure", + "required":[ + "name", + "dataType", + "type" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the asset model property.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the asset model property.

    " + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

    The data type of the asset model property.

    " + }, + "dataTypeSpec":{ + "shape":"Name", + "documentation":"

    The data type of the structure for this property. This parameter exists on properties that have the STRUCT data type.

    " + }, + "unit":{ + "shape":"PropertyUnit", + "documentation":"

    The unit of the asset model property, such as Newtons or RPM.

    " + }, + "type":{ + "shape":"PropertyType", + "documentation":"

    The property type (see PropertyType).

    " + } + }, + "documentation":"

    Contains information about an asset model property.

    " + }, + "AssetModelPropertyDefinition":{ + "type":"structure", + "required":[ + "name", + "dataType", + "type" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the property definition.

    " + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

    The data type of the property definition.

    If you specify STRUCT, you must also specify dataTypeSpec to identify the type of the structure for this property.

    " + }, + "dataTypeSpec":{ + "shape":"Name", + "documentation":"

    The data type of the structure for this property. This parameter is required on properties that have the STRUCT data type.

    The options for this parameter depend on the type of the composite model in which you define this property. Use AWS/ALARM_STATE for alarm state in alarm composite models.

    " + }, + "unit":{ + "shape":"PropertyUnit", + "documentation":"

    The unit of the property definition, such as Newtons or RPM.

    " + }, + "type":{ + "shape":"PropertyType", + "documentation":"

    The property definition type (see PropertyType). You can only specify one type in a property definition.

    " + } + }, + "documentation":"

    Contains an asset model property definition. This property definition is applied to all assets created from the asset model.

    " + }, + "AssetModelPropertyDefinitions":{ + "type":"list", + "member":{"shape":"AssetModelPropertyDefinition"} + }, + "AssetModelState":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "PROPAGATING", + "DELETING", + "FAILED" + ] + }, + "AssetModelStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"AssetModelState", + "documentation":"

    The current state of the asset model.

    " + }, + "error":{ + "shape":"ErrorDetails", + "documentation":"

    Contains associated error information, if any.

    " + } + }, + "documentation":"

    Contains current status information for an asset model. For more information, see Asset and model states in the AWS IoT SiteWise User Guide.

    " + }, + "AssetModelSummaries":{ + "type":"list", + "member":{"shape":"AssetModelSummary"} + }, + "AssetModelSummary":{ + "type":"structure", + "required":[ + "id", + "arn", + "name", + "description", + "creationDate", + "lastUpdateDate", + "status" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the asset model (used with AWS IoT SiteWise APIs).

    " + }, + "arn":{ + "shape":"ARN", + "documentation":"

    The ARN of the asset model, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the asset model.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The asset model description.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset model was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset model was last updated, in Unix epoch time.

    " + }, + "status":{ + "shape":"AssetModelStatus", + "documentation":"

    The current status of the asset model.

    " + } + }, + "documentation":"

    Contains a summary of an asset model.

    " + }, + "AssetProperties":{ + "type":"list", + "member":{"shape":"AssetProperty"} + }, + "AssetProperty":{ + "type":"structure", + "required":[ + "id", + "name", + "dataType" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the asset property.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the property.

    " + }, + "alias":{ + "shape":"PropertyAlias", + "documentation":"

    The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the AWS IoT SiteWise User Guide.

    " + }, + "notification":{ + "shape":"PropertyNotification", + "documentation":"

    The asset property's notification topic and state. For more information, see UpdateAssetProperty.

    " + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

    The data type of the asset property.

    " + }, + "dataTypeSpec":{ + "shape":"Name", + "documentation":"

    The data type of the structure for this property. This parameter exists on properties that have the STRUCT data type.

    " + }, + "unit":{ + "shape":"PropertyUnit", + "documentation":"

    The unit (such as Newtons or RPM) of the asset property.

    " + } + }, + "documentation":"

    Contains asset property information.

    " + }, + "AssetPropertyAlias":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "AssetPropertyValue":{ + "type":"structure", + "required":[ + "value", + "timestamp" + ], + "members":{ + "value":{ + "shape":"Variant", + "documentation":"

    The value of the asset property (see Variant).

    " + }, + "timestamp":{ + "shape":"TimeInNanos", + "documentation":"

    The timestamp of the asset property value.

    " + }, + "quality":{ + "shape":"Quality", + "documentation":"

    The quality of the asset property value.

    " + } + }, + "documentation":"

    Contains asset property value information.

    " + }, + "AssetPropertyValueHistory":{ + "type":"list", + "member":{"shape":"AssetPropertyValue"} + }, + "AssetPropertyValues":{ + "type":"list", + "member":{"shape":"AssetPropertyValue"} + }, + "AssetRelationshipSummaries":{ + "type":"list", + "member":{"shape":"AssetRelationshipSummary"} + }, + "AssetRelationshipSummary":{ + "type":"structure", + "required":["relationshipType"], + "members":{ + "hierarchyInfo":{ + "shape":"AssetHierarchyInfo", + "documentation":"

    The assets that are related through an asset hierarchy.

    This object is present if the relationshipType is HIERARCHY.

    " + }, + "relationshipType":{ + "shape":"AssetRelationshipType", + "documentation":"

    The relationship type of the assets in this relationship. This value is one of the following:

    • HIERARCHY – The assets are related through an asset hierarchy. If you specify this relationship type, this asset relationship includes the hierarchyInfo object.

    " + } + }, + "documentation":"

    Contains information about assets that are related to one another.

    " + }, + "AssetRelationshipType":{ + "type":"string", + "enum":["HIERARCHY"] + }, + "AssetState":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "FAILED" + ] + }, + "AssetStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"AssetState", + "documentation":"

    The current status of the asset.

    " + }, + "error":{ + "shape":"ErrorDetails", + "documentation":"

    Contains associated error information, if any.

    " + } + }, + "documentation":"

    Contains information about the current status of an asset. For more information, see Asset and model states in the AWS IoT SiteWise User Guide.

    " + }, + "AssetSummaries":{ + "type":"list", + "member":{"shape":"AssetSummary"} + }, + "AssetSummary":{ + "type":"structure", + "required":[ + "id", + "arn", + "name", + "assetModelId", + "creationDate", + "lastUpdateDate", + "status", + "hierarchies" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    " + }, + "arn":{ + "shape":"ARN", + "documentation":"

    The ARN of the asset, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the asset.

    " + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model used to create this asset.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset was last updated, in Unix epoch time.

    " + }, + "status":{ + "shape":"AssetStatus", + "documentation":"

    The current status of the asset.

    " + }, + "hierarchies":{ + "shape":"AssetHierarchies", + "documentation":"

    A list of asset hierarchies that each contain a hierarchyId. A hierarchy specifies allowed parent/child asset relationships.

    " + } + }, + "documentation":"

    Contains a summary of an asset.

    " + }, + "AssociateAssetsRequest":{ + "type":"structure", + "required":[ + "assetId", + "hierarchyId", + "childAssetId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the parent asset.

    ", + "location":"uri", + "locationName":"assetId" + }, + "hierarchyId":{ + "shape":"ID", + "documentation":"

    The ID of a hierarchy in the parent asset's model. Hierarchies allow different groupings of assets to be formed that all come from the same asset model. For more information, see Asset hierarchies in the AWS IoT SiteWise User Guide.

    " + }, + "childAssetId":{ + "shape":"ID", + "documentation":"

    The ID of the child asset to be associated.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "AssociatedAssetsSummaries":{ + "type":"list", + "member":{"shape":"AssociatedAssetsSummary"} + }, + "AssociatedAssetsSummary":{ + "type":"structure", + "required":[ + "id", + "arn", + "name", + "assetModelId", + "creationDate", + "lastUpdateDate", + "status", + "hierarchies" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    " + }, + "arn":{ + "shape":"ARN", + "documentation":"

    The ARN of the asset, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the asset.

    " + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model used to create the asset.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset was last updated, in Unix epoch time.

    " + }, + "status":{ + "shape":"AssetStatus", + "documentation":"

    The current status of the asset.

    " + }, + "hierarchies":{ + "shape":"AssetHierarchies", + "documentation":"

    A list of asset hierarchies that each contain a hierarchyId. A hierarchy specifies allowed parent/child asset relationships.

    " + } + }, + "documentation":"

    Contains a summary of an associated asset.

    " + }, + "Attribute":{ + "type":"structure", + "members":{ + "defaultValue":{ + "shape":"DefaultValue", + "documentation":"

    The default value of the asset model property attribute. All assets that you create from the asset model contain this attribute value. You can update an attribute's value after you create an asset. For more information, see Updating attribute values in the AWS IoT SiteWise User Guide.

    " + } + }, + "documentation":"

    Contains an asset attribute property. For more information, see Attributes in the AWS IoT SiteWise User Guide.

    " + }, + "AuthMode":{ + "type":"string", + "enum":[ + "IAM", + "SSO" + ] + }, + "BatchAssociateProjectAssetsErrors":{ + "type":"list", + "member":{"shape":"AssetErrorDetails"} + }, + "BatchAssociateProjectAssetsRequest":{ + "type":"structure", + "required":[ + "projectId", + "assetIds" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project to which to associate the assets.

    ", + "location":"uri", + "locationName":"projectId" + }, + "assetIds":{ + "shape":"IDs", + "documentation":"

    The IDs of the assets to be associated to the project.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "BatchAssociateProjectAssetsResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"BatchAssociateProjectAssetsErrors", + "documentation":"

    A list of associated error information, if any.

    " + } + } + }, + "BatchDisassociateProjectAssetsErrors":{ + "type":"list", + "member":{"shape":"AssetErrorDetails"} + }, + "BatchDisassociateProjectAssetsRequest":{ + "type":"structure", + "required":[ + "projectId", + "assetIds" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project from which to disassociate the assets.

    ", + "location":"uri", + "locationName":"projectId" + }, + "assetIds":{ + "shape":"IDs", + "documentation":"

    The IDs of the assets to be disassociated from the project.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "BatchDisassociateProjectAssetsResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"BatchDisassociateProjectAssetsErrors", + "documentation":"

    A list of associated error information, if any.

    " + } + } + }, + "BatchPutAssetPropertyError":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "timestamps" + ], + "members":{ + "errorCode":{ + "shape":"BatchPutAssetPropertyValueErrorCode", + "documentation":"

    The error code.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    The associated error message.

    " + }, + "timestamps":{ + "shape":"Timestamps", + "documentation":"

    A list of timestamps for each error, if any.

    " + } + }, + "documentation":"

    Contains error information from updating a batch of asset property values.

    " + }, + "BatchPutAssetPropertyErrorEntries":{ + "type":"list", + "member":{"shape":"BatchPutAssetPropertyErrorEntry"} + }, + "BatchPutAssetPropertyErrorEntry":{ + "type":"structure", + "required":[ + "entryId", + "errors" + ], + "members":{ + "entryId":{ + "shape":"EntryId", + "documentation":"

    The ID of the failed entry.

    " + }, + "errors":{ + "shape":"BatchPutAssetPropertyErrors", + "documentation":"

    The list of update property value errors.

    " + } + }, + "documentation":"

    Contains error information for asset property value entries that are associated with the BatchPutAssetPropertyValue API.

    " + }, + "BatchPutAssetPropertyErrors":{ + "type":"list", + "member":{"shape":"BatchPutAssetPropertyError"} + }, + "BatchPutAssetPropertyValueErrorCode":{ + "type":"string", + "enum":[ + "ResourceNotFoundException", + "InvalidRequestException", + "InternalFailureException", + "ServiceUnavailableException", + "ThrottlingException", + "LimitExceededException", + "ConflictingOperationException", + "TimestampOutOfRangeException", + "AccessDeniedException" + ] + }, + "BatchPutAssetPropertyValueRequest":{ + "type":"structure", + "required":["entries"], + "members":{ + "entries":{ + "shape":"PutAssetPropertyValueEntries", + "documentation":"

    The list of asset property value entries for the batch put request. You can specify up to 10 entries per request.

    " + } + } + }, + "BatchPutAssetPropertyValueResponse":{ + "type":"structure", + "required":["errorEntries"], + "members":{ + "errorEntries":{ + "shape":"BatchPutAssetPropertyErrorEntries", + "documentation":"

    A list of the errors (if any) associated with the batch put request. Each error entry contains the entryId of the entry that failed.

    " + } + } + }, + "CapabilityConfiguration":{ + "type":"string", + "max":104857600, + "min":1 + }, + "CapabilityNamespace":{ + "type":"string", + "max":512, + "min":1, + "pattern":"^[a-zA-Z]+:[a-zA-Z]+:[0-9]+$" + }, + "CapabilitySyncStatus":{ + "type":"string", + "enum":[ + "IN_SYNC", + "OUT_OF_SYNC", + "SYNC_FAILED" + ] + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":36, + "pattern":"\\S{36,64}" + }, + "CompositeModelProperty":{ + "type":"structure", + "required":[ + "name", + "type", + "assetProperty" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the property.

    " + }, + "type":{ + "shape":"Name", + "documentation":"

    The type of the composite model that defines this property.

    " + }, + "assetProperty":{"shape":"Property"} + }, + "documentation":"

    Contains information about a composite model property on an asset.

    " + }, + "ConfigurationErrorDetails":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{ + "shape":"ErrorCode", + "documentation":"

    The error code.

    " + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

    The error message.

    " + } + }, + "documentation":"

    Contains the details of an AWS IoT SiteWise configuration error.

    " + }, + "ConfigurationState":{ + "type":"string", + "enum":[ + "ACTIVE", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" + ] + }, + "ConfigurationStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"ConfigurationState", + "documentation":"

    The current state of the configuration.

    " + }, + "error":{ + "shape":"ConfigurationErrorDetails", + "documentation":"

    Contains associated error information, if any.

    " + } + }, + "documentation":"

    Contains current status information for the configuration.

    " + }, + "ConflictingOperationException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceArn" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the resource that conflicts with this operation.

    " + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource that conflicts with this operation.

    " + } + }, + "documentation":"

    Your request has conflicting operations. This can occur if you're trying to perform more than one operation on the same resource at the same time.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateAccessPolicyRequest":{ + "type":"structure", + "required":[ + "accessPolicyIdentity", + "accessPolicyResource", + "accessPolicyPermission" + ], + "members":{ + "accessPolicyIdentity":{ + "shape":"Identity", + "documentation":"

    The identity for this access policy. Choose an AWS SSO user, an AWS SSO group, or an IAM user.

    " + }, + "accessPolicyResource":{ + "shape":"Resource", + "documentation":"

    The AWS IoT SiteWise Monitor resource for this access policy. Choose either a portal or a project.

    " + }, + "accessPolicyPermission":{ + "shape":"Permission", + "documentation":"

    The permission level for this access policy. Note that a project ADMINISTRATOR is also known as a project owner.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the access policy. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "CreateAccessPolicyResponse":{ + "type":"structure", + "required":[ + "accessPolicyId", + "accessPolicyArn" + ], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

    The ID of the access policy.

    " + }, + "accessPolicyArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the access policy, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:access-policy/${AccessPolicyId}

    " + } + } + }, + "CreateAssetModelRequest":{ + "type":"structure", + "required":["assetModelName"], + "members":{ + "assetModelName":{ + "shape":"Name", + "documentation":"

    A unique, friendly name for the asset model.

    " + }, + "assetModelDescription":{ + "shape":"Description", + "documentation":"

    A description for the asset model.

    " + }, + "assetModelProperties":{ + "shape":"AssetModelPropertyDefinitions", + "documentation":"

    The property definitions of the asset model. For more information, see Asset properties in the AWS IoT SiteWise User Guide.

    You can specify up to 200 properties per asset model. For more information, see Quotas in the AWS IoT SiteWise User Guide.

    " + }, + "assetModelHierarchies":{ + "shape":"AssetModelHierarchyDefinitions", + "documentation":"

    The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see Asset hierarchies in the AWS IoT SiteWise User Guide.

    You can specify up to 10 hierarchies per asset model. For more information, see Quotas in the AWS IoT SiteWise User Guide.

    " + }, + "assetModelCompositeModels":{ + "shape":"AssetModelCompositeModelDefinitions", + "documentation":"

    The composite asset models that are part of this asset model. Composite asset models are asset models that contain specific properties. Each composite model has a type that defines the properties that the composite model supports. Use composite asset models to define alarms on this asset model.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the asset model. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "CreateAssetModelResponse":{ + "type":"structure", + "required":[ + "assetModelId", + "assetModelArn", + "assetModelStatus" + ], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model. You can use this ID when you call other AWS IoT SiteWise APIs.

    " + }, + "assetModelArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the asset model, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

    " + }, + "assetModelStatus":{ + "shape":"AssetModelStatus", + "documentation":"

    The status of the asset model, which contains a state (CREATING after successfully calling this operation) and any error message.

    " + } + } + }, + "CreateAssetRequest":{ + "type":"structure", + "required":[ + "assetName", + "assetModelId" + ], + "members":{ + "assetName":{ + "shape":"Name", + "documentation":"

    A unique, friendly name for the asset.

    " + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model from which to create the asset.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the asset. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "CreateAssetResponse":{ + "type":"structure", + "required":[ + "assetId", + "assetArn", + "assetStatus" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset. This ID uniquely identifies the asset within AWS IoT SiteWise and can be used with other AWS IoT SiteWise APIs.

    " + }, + "assetArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the asset, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

    " + }, + "assetStatus":{ + "shape":"AssetStatus", + "documentation":"

    The status of the asset, which contains a state (CREATING after successfully calling this operation) and any error message.

    " + } + } + }, + "CreateDashboardRequest":{ + "type":"structure", + "required":[ + "projectId", + "dashboardName", + "dashboardDefinition" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project in which to create the dashboard.

    " + }, + "dashboardName":{ + "shape":"Name", + "documentation":"

    A friendly name for the dashboard.

    " + }, + "dashboardDescription":{ + "shape":"Description", + "documentation":"

    A description for the dashboard.

    " + }, + "dashboardDefinition":{ + "shape":"DashboardDefinition", + "documentation":"

    The dashboard definition specified in a JSON literal. For detailed information, see Creating dashboards (CLI) in the AWS IoT SiteWise User Guide.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the dashboard. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "CreateDashboardResponse":{ + "type":"structure", + "required":[ + "dashboardId", + "dashboardArn" + ], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

    The ID of the dashboard.

    " + }, + "dashboardArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the dashboard, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}

    " + } + } + }, + "CreateGatewayRequest":{ + "type":"structure", + "required":[ + "gatewayName", + "gatewayPlatform" + ], + "members":{ + "gatewayName":{ + "shape":"Name", + "documentation":"

    A unique, friendly name for the gateway.

    " + }, + "gatewayPlatform":{ + "shape":"GatewayPlatform", + "documentation":"

    The gateway's platform. You can only specify one platform in a gateway.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the gateway. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "CreateGatewayResponse":{ + "type":"structure", + "required":[ + "gatewayId", + "gatewayArn" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway device. You can use this ID when you call other AWS IoT SiteWise APIs.

    " + }, + "gatewayArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the gateway, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}

    " + } + } + }, + "CreatePortalRequest":{ + "type":"structure", + "required":[ + "portalName", + "portalContactEmail", + "roleArn" + ], + "members":{ + "portalName":{ + "shape":"Name", + "documentation":"

    A friendly name for the portal.

    " + }, + "portalDescription":{ + "shape":"Description", + "documentation":"

    A description for the portal.

    " + }, + "portalContactEmail":{ + "shape":"Email", + "documentation":"

    The AWS administrator's contact email address.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + }, + "portalLogoImageFile":{ + "shape":"ImageFile", + "documentation":"

    A logo image to display in the portal. Upload a square, high-resolution image. The image is displayed on a dark background.

    " + }, + "roleArn":{ + "shape":"ARN", + "documentation":"

    The ARN of a service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the portal. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + }, + "portalAuthMode":{ + "shape":"AuthMode", + "documentation":"

    The service to use to authenticate users to the portal. Choose from the following options:

    • SSO – The portal uses AWS Single Sign-On to authenticate users and manage user permissions. Before you can create a portal that uses AWS SSO, you must enable AWS SSO. For more information, see Enabling AWS SSO in the AWS IoT SiteWise User Guide. This option is only available in AWS Regions other than the China Regions.

    • IAM – The portal uses AWS Identity and Access Management (IAM) to authenticate users and manage user permissions. This option is only available in the China Regions.

    You can't change this value after you create a portal.

    Default: SSO

    " + } + } + }, + "CreatePortalResponse":{ + "type":"structure", + "required":[ + "portalId", + "portalArn", + "portalStartUrl", + "portalStatus", + "ssoApplicationId" + ], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

    The ID of the created portal.

    " + }, + "portalArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the portal, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:portal/${PortalId}

    " + }, + "portalStartUrl":{ + "shape":"Url", + "documentation":"

    The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the AWS IoT SiteWise console to get a URL that you can use to access the portal.

    " + }, + "portalStatus":{ + "shape":"PortalStatus", + "documentation":"

    The status of the portal, which contains a state (CREATING after successfully calling this operation) and any error message.

    " + }, + "ssoApplicationId":{ + "shape":"SSOApplicationId", + "documentation":"

    The associated AWS SSO application ID, if the portal uses AWS SSO.

    " + } + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":[ + "portalId", + "projectName" + ], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

    The ID of the portal in which to create the project.

    " + }, + "projectName":{ + "shape":"Name", + "documentation":"

    A friendly name for the project.

    " + }, + "projectDescription":{ + "shape":"Description", + "documentation":"

    A description for the project.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the project. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "CreateProjectResponse":{ + "type":"structure", + "required":[ + "projectId", + "projectArn" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project.

    " + }, + "projectArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the project, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:project/${ProjectId}

    " + } + } + }, + "DashboardDefinition":{ + "type":"string", + "max":204800, + "min":0, + "pattern":".+" + }, + "DashboardSummaries":{ + "type":"list", + "member":{"shape":"DashboardSummary"} + }, + "DashboardSummary":{ + "type":"structure", + "required":[ + "id", + "name" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the dashboard.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the dashboard

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The dashboard's description.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the dashboard was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the dashboard was last updated, in Unix epoch time.

    " + } + }, + "documentation":"

    Contains a dashboard summary.

    " + }, + "DefaultValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "DeleteAccessPolicyRequest":{ + "type":"structure", + "required":["accessPolicyId"], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

    The ID of the access policy to be deleted.

    ", + "location":"uri", + "locationName":"accessPolicyId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteAccessPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteAssetModelRequest":{ + "type":"structure", + "required":["assetModelId"], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model to delete.

    ", + "location":"uri", + "locationName":"assetModelId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteAssetModelResponse":{ + "type":"structure", + "required":["assetModelStatus"], + "members":{ + "assetModelStatus":{ + "shape":"AssetModelStatus", + "documentation":"

    The status of the asset model, which contains a state (DELETING after successfully calling this operation) and any error message.

    " + } + } + }, + "DeleteAssetRequest":{ + "type":"structure", + "required":["assetId"], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset to delete.

    ", + "location":"uri", + "locationName":"assetId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteAssetResponse":{ + "type":"structure", + "required":["assetStatus"], + "members":{ + "assetStatus":{ + "shape":"AssetStatus", + "documentation":"

    The status of the asset, which contains a state (DELETING after successfully calling this operation) and any error message.

    " + } + } + }, + "DeleteDashboardRequest":{ + "type":"structure", + "required":["dashboardId"], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

    The ID of the dashboard to delete.

    ", + "location":"uri", + "locationName":"dashboardId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteDashboardResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteGatewayRequest":{ + "type":"structure", + "required":["gatewayId"], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway to delete.

    ", + "location":"uri", + "locationName":"gatewayId" + } + } + }, + "DeletePortalRequest":{ + "type":"structure", + "required":["portalId"], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

    The ID of the portal to delete.

    ", + "location":"uri", + "locationName":"portalId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeletePortalResponse":{ + "type":"structure", + "required":["portalStatus"], + "members":{ + "portalStatus":{ + "shape":"PortalStatus", + "documentation":"

    The status of the portal, which contains a state (DELETING after successfully calling this operation) and any error message.

    " + } + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["projectId"], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project.

    ", + "location":"uri", + "locationName":"projectId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteProjectResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeAccessPolicyRequest":{ + "type":"structure", + "required":["accessPolicyId"], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

    The ID of the access policy.

    ", + "location":"uri", + "locationName":"accessPolicyId" + } + } + }, + "DescribeAccessPolicyResponse":{ + "type":"structure", + "required":[ + "accessPolicyId", + "accessPolicyArn", + "accessPolicyIdentity", + "accessPolicyResource", + "accessPolicyPermission", + "accessPolicyCreationDate", + "accessPolicyLastUpdateDate" + ], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

    The ID of the access policy.

    " + }, + "accessPolicyArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the access policy, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:access-policy/${AccessPolicyId}

    " + }, + "accessPolicyIdentity":{ + "shape":"Identity", + "documentation":"

    The identity (AWS SSO user, AWS SSO group, or IAM user) to which this access policy applies.

    " + }, + "accessPolicyResource":{ + "shape":"Resource", + "documentation":"

    The AWS IoT SiteWise Monitor resource (portal or project) to which this access policy provides access.

    " + }, + "accessPolicyPermission":{ + "shape":"Permission", + "documentation":"

    The access policy permission. Note that a project ADMINISTRATOR is also known as a project owner.

    " + }, + "accessPolicyCreationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the access policy was created, in Unix epoch time.

    " + }, + "accessPolicyLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the access policy was last updated, in Unix epoch time.

    " + } + } + }, + "DescribeAssetModelRequest":{ + "type":"structure", + "required":["assetModelId"], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model.

    ", + "location":"uri", + "locationName":"assetModelId" + } + } + }, + "DescribeAssetModelResponse":{ + "type":"structure", + "required":[ + "assetModelId", + "assetModelArn", + "assetModelName", + "assetModelDescription", + "assetModelProperties", + "assetModelHierarchies", + "assetModelCreationDate", + "assetModelLastUpdateDate", + "assetModelStatus" + ], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model.

    " + }, + "assetModelArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the asset model, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

    " + }, + "assetModelName":{ + "shape":"Name", + "documentation":"

    The name of the asset model.

    " + }, + "assetModelDescription":{ + "shape":"Description", + "documentation":"

    The asset model's description.

    " + }, + "assetModelProperties":{ + "shape":"AssetModelProperties", + "documentation":"

    The list of asset properties for the asset model.

    This object doesn't include properties that you define in composite models. You can find composite model properties in the assetModelCompositeModels object.

    " + }, + "assetModelHierarchies":{ + "shape":"AssetModelHierarchies", + "documentation":"

    A list of asset model hierarchies that each contain a childAssetModelId and a hierarchyId (named id). A hierarchy specifies allowed parent/child asset relationships for an asset model.

    " + }, + "assetModelCompositeModels":{ + "shape":"AssetModelCompositeModels", + "documentation":"

    The list of composite asset models for the asset model.

    " + }, + "assetModelCreationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset model was created, in Unix epoch time.

    " + }, + "assetModelLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset model was last updated, in Unix epoch time.

    " + }, + "assetModelStatus":{ + "shape":"AssetModelStatus", + "documentation":"

    The current status of the asset model, which contains a state and any error message.

    " + } + } + }, + "DescribeAssetPropertyRequest":{ + "type":"structure", + "required":[ + "assetId", + "propertyId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    ", + "location":"uri", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

    The ID of the asset property.

    ", + "location":"uri", + "locationName":"propertyId" + } + } + }, + "DescribeAssetPropertyResponse":{ + "type":"structure", + "required":[ + "assetId", + "assetName", + "assetModelId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    " + }, + "assetName":{ + "shape":"Name", + "documentation":"

    The name of the asset.

    " + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model.

    " + }, + "assetProperty":{ + "shape":"Property", + "documentation":"

    The asset property's definition, alias, and notification state.

    This response includes this object for normal asset properties. If you describe an asset property in a composite model, this response includes the asset property information in compositeModel.

    " + }, + "compositeModel":{ + "shape":"CompositeModelProperty", + "documentation":"

    The composite asset model that declares this asset property, if this asset property exists in a composite model.

    " + } + } + }, + "DescribeAssetRequest":{ + "type":"structure", + "required":["assetId"], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    ", + "location":"uri", + "locationName":"assetId" + } + } + }, + "DescribeAssetResponse":{ + "type":"structure", + "required":[ + "assetId", + "assetArn", + "assetName", + "assetModelId", + "assetProperties", + "assetHierarchies", + "assetCreationDate", + "assetLastUpdateDate", + "assetStatus" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    " + }, + "assetArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the asset, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

    " + }, + "assetName":{ + "shape":"Name", + "documentation":"

    The name of the asset.

    " + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model that was used to create the asset.

    " + }, + "assetProperties":{ + "shape":"AssetProperties", + "documentation":"

    The list of asset properties for the asset.

    This object doesn't include properties that you define in composite models. You can find composite model properties in the assetCompositeModels object.

    " + }, + "assetHierarchies":{ + "shape":"AssetHierarchies", + "documentation":"

    A list of asset hierarchies that each contain a hierarchyId. A hierarchy specifies allowed parent/child asset relationships.

    " + }, + "assetCompositeModels":{ + "shape":"AssetCompositeModels", + "documentation":"

    The composite models for the asset.

    " + }, + "assetCreationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset was created, in Unix epoch time.

    " + }, + "assetLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the asset was last updated, in Unix epoch time.

    " + }, + "assetStatus":{ + "shape":"AssetStatus", + "documentation":"

    The current status of the asset, which contains a state and any error message.

    " + } + } + }, + "DescribeDashboardRequest":{ + "type":"structure", + "required":["dashboardId"], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

    The ID of the dashboard.

    ", + "location":"uri", + "locationName":"dashboardId" + } + } + }, + "DescribeDashboardResponse":{ + "type":"structure", + "required":[ + "dashboardId", + "dashboardArn", + "dashboardName", + "projectId", + "dashboardDefinition", + "dashboardCreationDate", + "dashboardLastUpdateDate" + ], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

    The ID of the dashboard.

    " + }, + "dashboardArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the dashboard, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}

    " + }, + "dashboardName":{ + "shape":"Name", + "documentation":"

    The name of the dashboard.

    " + }, + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project that the dashboard is in.

    " + }, + "dashboardDescription":{ + "shape":"Description", + "documentation":"

    The dashboard's description.

    " + }, + "dashboardDefinition":{ + "shape":"DashboardDefinition", + "documentation":"

    The dashboard's definition JSON literal. For detailed information, see Creating dashboards (CLI) in the AWS IoT SiteWise User Guide.

    " + }, + "dashboardCreationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the dashboard was created, in Unix epoch time.

    " + }, + "dashboardLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the dashboard was last updated, in Unix epoch time.

    " + } + } + }, + "DescribeDefaultEncryptionConfigurationRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeDefaultEncryptionConfigurationResponse":{ + "type":"structure", + "required":[ + "encryptionType", + "configurationStatus" + ], + "members":{ + "encryptionType":{ + "shape":"EncryptionType", + "documentation":"

    The type of encryption used for the encryption configuration.

    " + }, + "kmsKeyArn":{ + "shape":"ARN", + "documentation":"

    The key ARN of the customer managed customer master key (CMK) used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION.

    " + }, + "configurationStatus":{ + "shape":"ConfigurationStatus", + "documentation":"

    The status of the account configuration. This contains the ConfigurationState. If there's an error, it also contains the ErrorDetails.

    " + } + } + }, + "DescribeGatewayCapabilityConfigurationRequest":{ + "type":"structure", + "required":[ + "gatewayId", + "capabilityNamespace" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway that defines the capability configuration.

    ", + "location":"uri", + "locationName":"gatewayId" + }, + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

    The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace iotsitewise:opcuacollector:version, where version is a number such as 1.

    ", + "location":"uri", + "locationName":"capabilityNamespace" + } + } + }, + "DescribeGatewayCapabilityConfigurationResponse":{ + "type":"structure", + "required":[ + "gatewayId", + "capabilityNamespace", + "capabilityConfiguration", + "capabilitySyncStatus" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway that defines the capability configuration.

    " + }, + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

    The namespace of the gateway capability.

    " + }, + "capabilityConfiguration":{ + "shape":"CapabilityConfiguration", + "documentation":"

    The JSON document that defines the gateway capability's configuration. For more information, see Configuring data sources (CLI) in the AWS IoT SiteWise User Guide.

    " + }, + "capabilitySyncStatus":{ + "shape":"CapabilitySyncStatus", + "documentation":"

    The synchronization status of the capability configuration. The sync status can be one of the following:

    • IN_SYNC – The gateway is running the capability configuration.

    • OUT_OF_SYNC – The gateway hasn't received the capability configuration.

    • SYNC_FAILED – The gateway rejected the capability configuration.

    " + } + } + }, + "DescribeGatewayRequest":{ + "type":"structure", + "required":["gatewayId"], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway device.

    ", + "location":"uri", + "locationName":"gatewayId" + } + } + }, + "DescribeGatewayResponse":{ + "type":"structure", + "required":[ + "gatewayId", + "gatewayName", + "gatewayArn", + "gatewayCapabilitySummaries", + "creationDate", + "lastUpdateDate" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway device.

    " + }, + "gatewayName":{ + "shape":"Name", + "documentation":"

    The name of the gateway.

    " + }, + "gatewayArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the gateway, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}

    " + }, + "gatewayPlatform":{ + "shape":"GatewayPlatform", + "documentation":"

    The gateway's platform.

    " + }, + "gatewayCapabilitySummaries":{ + "shape":"GatewayCapabilitySummaries", + "documentation":"

    A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use DescribeGatewayCapabilityConfiguration.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the gateway was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the gateway was last updated, in Unix epoch time.

    " + } + } + }, + "DescribeLoggingOptionsRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeLoggingOptionsResponse":{ + "type":"structure", + "required":["loggingOptions"], + "members":{ + "loggingOptions":{ + "shape":"LoggingOptions", + "documentation":"

    The current logging options.

    " + } + } + }, + "DescribePortalRequest":{ + "type":"structure", + "required":["portalId"], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

    The ID of the portal.

    ", + "location":"uri", + "locationName":"portalId" + } + } + }, + "DescribePortalResponse":{ + "type":"structure", + "required":[ + "portalId", + "portalArn", + "portalName", + "portalClientId", + "portalStartUrl", + "portalContactEmail", + "portalStatus", + "portalCreationDate", + "portalLastUpdateDate" + ], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

    The ID of the portal.

    " + }, + "portalArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the portal, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:portal/${PortalId}

    " + }, + "portalName":{ + "shape":"Name", + "documentation":"

    The name of the portal.

    " + }, + "portalDescription":{ + "shape":"Description", + "documentation":"

    The portal's description.

    " + }, + "portalClientId":{ + "shape":"PortalClientId", + "documentation":"

    The AWS SSO application generated client ID (used with AWS SSO APIs). AWS IoT SiteWise includes portalClientId for only portals that use AWS SSO to authenticate users.

    " + }, + "portalStartUrl":{ + "shape":"Url", + "documentation":"

    The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the AWS IoT SiteWise console to get a URL that you can use to access the portal.

    " + }, + "portalContactEmail":{ + "shape":"Email", + "documentation":"

    The AWS administrator's contact email address.

    " + }, + "portalStatus":{ + "shape":"PortalStatus", + "documentation":"

    The current status of the portal, which contains a state and any error message.

    " + }, + "portalCreationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the portal was created, in Unix epoch time.

    " + }, + "portalLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the portal was last updated, in Unix epoch time.

    " + }, + "portalLogoImageLocation":{ + "shape":"ImageLocation", + "documentation":"

    The portal's logo image, which is available at a URL.

    " + }, + "roleArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

    " + }, + "portalAuthMode":{ + "shape":"AuthMode", + "documentation":"

    The service to use to authenticate users to the portal.

    " + } + } + }, + "DescribeProjectRequest":{ + "type":"structure", + "required":["projectId"], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project.

    ", + "location":"uri", + "locationName":"projectId" + } + } + }, + "DescribeProjectResponse":{ + "type":"structure", + "required":[ + "projectId", + "projectArn", + "projectName", + "portalId", + "projectCreationDate", + "projectLastUpdateDate" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project.

    " + }, + "projectArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the project, which has the following format.

    arn:${Partition}:iotsitewise:${Region}:${Account}:project/${ProjectId}

    " + }, + "projectName":{ + "shape":"Name", + "documentation":"

    The name of the project.

    " + }, + "portalId":{ + "shape":"ID", + "documentation":"

    The ID of the portal that the project is in.

    " + }, + "projectDescription":{ + "shape":"Description", + "documentation":"

    The project's description.

    " + }, + "projectCreationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the project was created, in Unix epoch time.

    " + }, + "projectLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the project was last updated, in Unix epoch time.

    " + } + } + }, + "Description":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "DisassociateAssetsRequest":{ + "type":"structure", + "required":[ + "assetId", + "hierarchyId", + "childAssetId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the parent asset from which to disassociate the child asset.

    ", + "location":"uri", + "locationName":"assetId" + }, + "hierarchyId":{ + "shape":"ID", + "documentation":"

    The ID of a hierarchy in the parent asset's model. Hierarchies allow different groupings of assets to be formed that all come from the same asset model. You can use the hierarchy ID to identify the correct asset to disassociate. For more information, see Asset hierarchies in the AWS IoT SiteWise User Guide.

    " + }, + "childAssetId":{ + "shape":"ID", + "documentation":"

    The ID of the child asset to disassociate.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "Email":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^@]+@[^@]+" + }, + "EncryptionType":{ + "type":"string", + "enum":[ + "SITEWISE_DEFAULT_ENCRYPTION", + "KMS_BASED_ENCRYPTION" + ] + }, + "EntryId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9_-]+$" + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "VALIDATION_ERROR", + "INTERNAL_FAILURE" + ] + }, + "ErrorDetails":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{ + "shape":"ErrorCode", + "documentation":"

    The error code.

    " + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

    The error message.

    " + } + }, + "documentation":"

    Contains the details of an AWS IoT SiteWise error.

    " + }, + "ErrorMessage":{"type":"string"}, + "ExceptionMessage":{"type":"string"}, + "Expression":{ + "type":"string", + "max":1024, + "min":1 + }, + "ExpressionVariable":{ + "type":"structure", + "required":[ + "name", + "value" + ], + "members":{ + "name":{ + "shape":"VariableName", + "documentation":"

    The friendly name of the variable to be used in the expression.

    " + }, + "value":{ + "shape":"VariableValue", + "documentation":"

    The variable that identifies an asset property from which to use values.

    " + } + }, + "documentation":"

    Contains expression variable information.

    " + }, + "ExpressionVariables":{ + "type":"list", + "member":{"shape":"ExpressionVariable"} + }, + "GatewayCapabilitySummaries":{ + "type":"list", + "member":{"shape":"GatewayCapabilitySummary"} + }, + "GatewayCapabilitySummary":{ + "type":"structure", + "required":[ + "capabilityNamespace", + "capabilitySyncStatus" + ], + "members":{ + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

    The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace iotsitewise:opcuacollector:version, where version is a number such as 1.

    " + }, + "capabilitySyncStatus":{ + "shape":"CapabilitySyncStatus", + "documentation":"

    The synchronization status of the capability configuration. The sync status can be one of the following:

    • IN_SYNC – The gateway is running the capability configuration.

    • OUT_OF_SYNC – The gateway hasn't received the capability configuration.

    • SYNC_FAILED – The gateway rejected the capability configuration.

    " + } + }, + "documentation":"

    Contains a summary of a gateway capability configuration.

    " + }, + "GatewayPlatform":{ + "type":"structure", + "required":["greengrass"], + "members":{ + "greengrass":{ + "shape":"Greengrass", + "documentation":"

    A gateway that runs on AWS IoT Greengrass.

    " + } + }, + "documentation":"

    Contains a gateway's platform information.

    " + }, + "GatewaySummaries":{ + "type":"list", + "member":{"shape":"GatewaySummary"} + }, + "GatewaySummary":{ + "type":"structure", + "required":[ + "gatewayId", + "gatewayName", + "creationDate", + "lastUpdateDate" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway device.

    " + }, + "gatewayName":{ + "shape":"Name", + "documentation":"

    The name of the asset.

    " + }, + "gatewayCapabilitySummaries":{ + "shape":"GatewayCapabilitySummaries", + "documentation":"

    A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use DescribeGatewayCapabilityConfiguration.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the gateway was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the gateway was last updated, in Unix epoch time.

    " + } + }, + "documentation":"

    Contains a summary of a gateway.

    " + }, + "GetAssetPropertyAggregatesRequest":{ + "type":"structure", + "required":[ + "aggregateTypes", + "resolution", + "startDate", + "endDate" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    ", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

    The ID of the asset property.

    ", + "location":"querystring", + "locationName":"propertyId" + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

    The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the AWS IoT SiteWise User Guide.

    ", + "location":"querystring", + "locationName":"propertyAlias" + }, + "aggregateTypes":{ + "shape":"AggregateTypes", + "documentation":"

    The data aggregating function.

    ", + "location":"querystring", + "locationName":"aggregateTypes" + }, + "resolution":{ + "shape":"Resolution", + "documentation":"

    The time interval over which to aggregate data.

    ", + "location":"querystring", + "locationName":"resolution" + }, + "qualities":{ + "shape":"Qualities", + "documentation":"

    The quality by which to filter asset data.

    ", + "location":"querystring", + "locationName":"qualities" + }, + "startDate":{ + "shape":"Timestamp", + "documentation":"

    The exclusive start of the range from which to query historical data, expressed in seconds in Unix epoch time.

    ", + "location":"querystring", + "locationName":"startDate" + }, + "endDate":{ + "shape":"Timestamp", + "documentation":"

    The inclusive end of the range from which to query historical data, expressed in seconds in Unix epoch time.

    ", + "location":"querystring", + "locationName":"endDate" + }, + "timeOrdering":{ + "shape":"TimeOrdering", + "documentation":"

    The chronological sorting order of the requested information.

    Default: ASCENDING

    ", + "location":"querystring", + "locationName":"timeOrdering" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 100

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetAssetPropertyAggregatesResponse":{ + "type":"structure", + "required":["aggregatedValues"], + "members":{ + "aggregatedValues":{ + "shape":"AggregatedValues", + "documentation":"

    The requested aggregated values.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "GetAssetPropertyValueHistoryRequest":{ + "type":"structure", + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    ", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

    The ID of the asset property.

    ", + "location":"querystring", + "locationName":"propertyId" + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

    The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the AWS IoT SiteWise User Guide.

    ", + "location":"querystring", + "locationName":"propertyAlias" + }, + "startDate":{ + "shape":"Timestamp", + "documentation":"

    The exclusive start of the range from which to query historical data, expressed in seconds in Unix epoch time.

    ", + "location":"querystring", + "locationName":"startDate" + }, + "endDate":{ + "shape":"Timestamp", + "documentation":"

    The inclusive end of the range from which to query historical data, expressed in seconds in Unix epoch time.

    ", + "location":"querystring", + "locationName":"endDate" + }, + "qualities":{ + "shape":"Qualities", + "documentation":"

    The quality by which to filter asset data.

    ", + "location":"querystring", + "locationName":"qualities" + }, + "timeOrdering":{ + "shape":"TimeOrdering", + "documentation":"

    The chronological sorting order of the requested information.

    Default: ASCENDING

    ", + "location":"querystring", + "locationName":"timeOrdering" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 100

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetAssetPropertyValueHistoryResponse":{ + "type":"structure", + "required":["assetPropertyValueHistory"], + "members":{ + "assetPropertyValueHistory":{ + "shape":"AssetPropertyValueHistory", + "documentation":"

    The asset property's value history.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "GetAssetPropertyValueRequest":{ + "type":"structure", + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    ", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

    The ID of the asset property.

    ", + "location":"querystring", + "locationName":"propertyId" + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

    The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the AWS IoT SiteWise User Guide.

    ", + "location":"querystring", + "locationName":"propertyAlias" + } + } + }, + "GetAssetPropertyValueResponse":{ + "type":"structure", + "members":{ + "propertyValue":{ + "shape":"AssetPropertyValue", + "documentation":"

    The current asset property value.

    " + } + } + }, + "Greengrass":{ + "type":"structure", + "required":["groupArn"], + "members":{ + "groupArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the Greengrass group. For more information about how to find a group's ARN, see ListGroups and GetGroup in the AWS IoT Greengrass API Reference.

    " + } + }, + "documentation":"

    Contains details for a gateway that runs on AWS IoT Greengrass. To create a gateway that runs on AWS IoT Greengrass, you must add the IoT SiteWise connector to a Greengrass group and deploy it. Your Greengrass group must also have permissions to upload data to AWS IoT SiteWise. For more information, see Ingesting data using a gateway in the AWS IoT SiteWise User Guide.

    " + }, + "GroupIdentity":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"IdentityId", + "documentation":"

    The AWS SSO ID of the group.

    " + } + }, + "documentation":"

    Contains information for a group identity in an access policy.

    " + }, + "IAMUserIdentity":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ARN", + "documentation":"

    The ARN of the IAM user. For more information, see IAM ARNs in the IAM User Guide.

    If you delete the IAM user, access policies that contain this identity include an empty arn. You can delete the access policy for the IAM user that no longer exists.

    " + } + }, + "documentation":"

    Contains information about an AWS Identity and Access Management (IAM) user.

    " + }, + "ID":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + }, + "IDs":{ + "type":"list", + "member":{"shape":"ID"}, + "max":100, + "min":1 + }, + "Identity":{ + "type":"structure", + "members":{ + "user":{ + "shape":"UserIdentity", + "documentation":"

    An AWS SSO user identity.

    " + }, + "group":{ + "shape":"GroupIdentity", + "documentation":"

    An AWS SSO group identity.

    " + }, + "iamUser":{ + "shape":"IAMUserIdentity", + "documentation":"

    An IAM user identity.

    " + } + }, + "documentation":"

    Contains an identity that can access an AWS IoT SiteWise Monitor resource.

    Currently, you can't use AWS APIs to retrieve AWS SSO identity IDs. You can find the AWS SSO identity IDs in the URL of user and group pages in the AWS SSO console.

    " + }, + "IdentityId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\S+" + }, + "IdentityType":{ + "type":"string", + "enum":[ + "USER", + "GROUP", + "IAM" + ] + }, + "Image":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of an existing image. Specify this parameter to keep an existing image.

    " + }, + "file":{"shape":"ImageFile"} + }, + "documentation":"

    Contains an image that is one of the following:

    • An image file. Choose this option to upload a new image.

    • The ID of an existing image. Choose this option to keep an existing image.

    " + }, + "ImageFile":{ + "type":"structure", + "required":[ + "data", + "type" + ], + "members":{ + "data":{ + "shape":"ImageFileData", + "documentation":"

    The image file contents, represented as a base64-encoded string. The file size must be less than 1 MB.

    " + }, + "type":{ + "shape":"ImageFileType", + "documentation":"

    The file type of the image.

    " + } + }, + "documentation":"

    Contains an image file.

    " + }, + "ImageFileData":{ + "type":"blob", + "max":1500000, + "min":1 + }, + "ImageFileType":{ + "type":"string", + "enum":["PNG"] + }, + "ImageLocation":{ + "type":"structure", + "required":[ + "id", + "url" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the image.

    " + }, + "url":{ + "shape":"Url", + "documentation":"

    The URL where the image is available. The URL is valid for 15 minutes so that you can view and download the image

    " + } + }, + "documentation":"

    Contains an image that is uploaded to AWS IoT SiteWise and available at a URL.

    " + }, + "InternalFailureException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS IoT SiteWise can't process your request right now. Try again later.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "Interval":{ + "type":"string", + "max":3, + "min":2, + "pattern":"1w|1d|1h|15m|5m|1m" + }, + "InvalidRequestException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request isn't valid. This can occur if your request contains malformed JSON or unsupported characters. Check your request and try again.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1 + }, + "LimitExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You've reached the limit for a resource. For example, this can occur if you're trying to associate more than the allowed number of child assets or attempting to create more than the allowed number of properties for an asset model.

    For more information, see Quotas in the AWS IoT SiteWise User Guide.

    ", + "error":{"httpStatusCode":410}, + "exception":true + }, + "ListAccessPoliciesRequest":{ + "type":"structure", + "members":{ + "identityType":{ + "shape":"IdentityType", + "documentation":"

    The type of identity (AWS SSO user, AWS SSO group, or IAM user). This parameter is required if you specify identityId.

    ", + "location":"querystring", + "locationName":"identityType" + }, + "identityId":{ + "shape":"IdentityId", + "documentation":"

    The ID of the identity. This parameter is required if you specify USER or GROUP for identityType.

    ", + "location":"querystring", + "locationName":"identityId" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource (portal or project). This parameter is required if you specify resourceId.

    ", + "location":"querystring", + "locationName":"resourceType" + }, + "resourceId":{ + "shape":"ID", + "documentation":"

    The ID of the resource. This parameter is required if you specify resourceType.

    ", + "location":"querystring", + "locationName":"resourceId" + }, + "iamArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the IAM user. For more information, see IAM ARNs in the IAM User Guide. This parameter is required if you specify IAM for identityType.

    ", + "location":"querystring", + "locationName":"iamArn" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAccessPoliciesResponse":{ + "type":"structure", + "required":["accessPolicySummaries"], + "members":{ + "accessPolicySummaries":{ + "shape":"AccessPolicySummaries", + "documentation":"

    A list that summarizes each access policy.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListAssetModelsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssetModelsResponse":{ + "type":"structure", + "required":["assetModelSummaries"], + "members":{ + "assetModelSummaries":{ + "shape":"AssetModelSummaries", + "documentation":"

    A list that summarizes each asset model.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListAssetRelationshipsRequest":{ + "type":"structure", + "required":[ + "assetId", + "traversalType" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset.

    ", + "location":"uri", + "locationName":"assetId" + }, + "traversalType":{ + "shape":"TraversalType", + "documentation":"

    The type of traversal to use to identify asset relationships. Choose the following option:

    • PATH_TO_ROOT – Identify the asset's parent assets up to the root asset. The asset that you specify in assetId is the first result in the list of assetRelationshipSummaries, and the root asset is the last result.

    ", + "location":"querystring", + "locationName":"traversalType" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssetRelationshipsResponse":{ + "type":"structure", + "required":["assetRelationshipSummaries"], + "members":{ + "assetRelationshipSummaries":{ + "shape":"AssetRelationshipSummaries", + "documentation":"

    A list that summarizes each asset relationship.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListAssetsFilter":{ + "type":"string", + "enum":[ + "ALL", + "TOP_LEVEL" + ] + }, + "ListAssetsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model by which to filter the list of assets. This parameter is required if you choose ALL for filter.

    ", + "location":"querystring", + "locationName":"assetModelId" + }, + "filter":{ + "shape":"ListAssetsFilter", + "documentation":"

    The filter for the requested list of assets. Choose one of the following options:

    • ALL – The list includes all assets for a given asset model ID. The assetModelId parameter is required if you filter by ALL.

    • TOP_LEVEL – The list includes only top-level assets in the asset hierarchy tree.

    Default: ALL

    ", + "location":"querystring", + "locationName":"filter" + } + } + }, + "ListAssetsResponse":{ + "type":"structure", + "required":["assetSummaries"], + "members":{ + "assetSummaries":{ + "shape":"AssetSummaries", + "documentation":"

    A list that summarizes each asset.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListAssociatedAssetsRequest":{ + "type":"structure", + "required":["assetId"], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset to query.

    ", + "location":"uri", + "locationName":"assetId" + }, + "hierarchyId":{ + "shape":"ID", + "documentation":"

    The ID of the hierarchy by which child assets are associated to the asset. To find a hierarchy ID, use the DescribeAsset or DescribeAssetModel operations. This parameter is required if you choose CHILD for traversalDirection.

    For more information, see Asset hierarchies in the AWS IoT SiteWise User Guide.

    ", + "location":"querystring", + "locationName":"hierarchyId" + }, + "traversalDirection":{ + "shape":"TraversalDirection", + "documentation":"

    The direction to list associated assets. Choose one of the following options:

    • CHILD – The list includes all child assets associated to the asset. The hierarchyId parameter is required if you choose CHILD.

    • PARENT – The list includes the asset's parent asset.

    Default: CHILD

    ", + "location":"querystring", + "locationName":"traversalDirection" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssociatedAssetsResponse":{ + "type":"structure", + "required":["assetSummaries"], + "members":{ + "assetSummaries":{ + "shape":"AssociatedAssetsSummaries", + "documentation":"

    A list that summarizes the associated assets.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListDashboardsRequest":{ + "type":"structure", + "required":["projectId"], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project.

    ", + "location":"querystring", + "locationName":"projectId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListDashboardsResponse":{ + "type":"structure", + "required":["dashboardSummaries"], + "members":{ + "dashboardSummaries":{ + "shape":"DashboardSummaries", + "documentation":"

    A list that summarizes each dashboard in the project.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListGatewaysRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListGatewaysResponse":{ + "type":"structure", + "required":["gatewaySummaries"], + "members":{ + "gatewaySummaries":{ + "shape":"GatewaySummaries", + "documentation":"

    A list that summarizes each gateway.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListPortalsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListPortalsResponse":{ + "type":"structure", + "members":{ + "portalSummaries":{ + "shape":"PortalSummaries", + "documentation":"

    A list that summarizes each portal.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListProjectAssetsRequest":{ + "type":"structure", + "required":["projectId"], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project.

    ", + "location":"uri", + "locationName":"projectId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectAssetsResponse":{ + "type":"structure", + "required":["assetIds"], + "members":{ + "assetIds":{ + "shape":"AssetIDs", + "documentation":"

    A list that contains the IDs of each asset associated with the project.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListProjectsRequest":{ + "type":"structure", + "required":["portalId"], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

    The ID of the portal.

    ", + "location":"querystring", + "locationName":"portalId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to be used for the next set of paginated results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per paginated request.

    Default: 50

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectsResponse":{ + "type":"structure", + "required":["projectSummaries"], + "members":{ + "projectSummaries":{ + "shape":"ProjectSummaries", + "documentation":"

    A list that summarizes each project in the portal.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token for the next set of results, or null if there are no additional results.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the resource.

    ", + "location":"querystring", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    The list of key-value pairs that contain metadata for the resource. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "LoggingLevel":{ + "type":"string", + "enum":[ + "ERROR", + "INFO", + "OFF" + ] + }, + "LoggingOptions":{ + "type":"structure", + "required":["level"], + "members":{ + "level":{ + "shape":"LoggingLevel", + "documentation":"

    The AWS IoT SiteWise logging verbosity level.

    " + } + }, + "documentation":"

    Contains logging options.

    " + }, + "Macro":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "MaxResults":{ + "type":"integer", + "max":250, + "min":1 + }, + "Measurement":{ + "type":"structure", + "members":{ + }, + "documentation":"

    Contains an asset measurement property. This structure is empty. For more information, see Measurements in the AWS IoT SiteWise User Guide.

    " + }, + "Metric":{ + "type":"structure", + "required":[ + "expression", + "variables", + "window" + ], + "members":{ + "expression":{ + "shape":"Expression", + "documentation":"

    The mathematical expression that defines the metric aggregation function. You can specify up to 10 variables per expression. You can specify up to 10 functions per expression.

    For more information, see Quotas in the AWS IoT SiteWise User Guide.

    " + }, + "variables":{ + "shape":"ExpressionVariables", + "documentation":"

    The list of variables used in the expression.

    " + }, + "window":{ + "shape":"MetricWindow", + "documentation":"

    The window (time interval) over which AWS IoT SiteWise computes the metric's aggregation expression. AWS IoT SiteWise computes one data point per window.

    " + } + }, + "documentation":"

    Contains an asset metric property. With metrics, you can calculate aggregate functions, such as an average, maximum, or minimum, as specified through an expression. A metric maps several values to a single value (such as a sum).

    The maximum number of dependent/cascading variables used in any one metric calculation is 10. Therefore, a root metric can have up to 10 cascading metrics in its computational dependency tree. Additionally, a metric can only have a data type of DOUBLE and consume properties with data types of INTEGER or DOUBLE.

    For more information, see Metrics in the AWS IoT SiteWise User Guide.

    " + }, + "MetricWindow":{ + "type":"structure", + "members":{ + "tumbling":{ + "shape":"TumblingWindow", + "documentation":"

    The tumbling time interval window.

    " + } + }, + "documentation":"

    Contains a time interval window used for data aggregate computations (for example, average, sum, count, and so on).

    " + }, + "MonitorErrorCode":{ + "type":"string", + "enum":[ + "INTERNAL_FAILURE", + "VALIDATION_ERROR", + "LIMIT_EXCEEDED" + ] + }, + "MonitorErrorDetails":{ + "type":"structure", + "members":{ + "code":{ + "shape":"MonitorErrorCode", + "documentation":"

    The error code.

    " + }, + "message":{ + "shape":"MonitorErrorMessage", + "documentation":"

    The error message.

    " + } + }, + "documentation":"

    Contains AWS IoT SiteWise Monitor error details.

    " + }, + "MonitorErrorMessage":{"type":"string"}, + "Name":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[A-Za-z0-9+/=]+" + }, + "OffsetInNanos":{ + "type":"integer", + "max":999999999, + "min":0 + }, + "Permission":{ + "type":"string", + "enum":[ + "ADMINISTRATOR", + "VIEWER" + ] + }, + "PortalClientId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[!-~]*" + }, + "PortalResource":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the portal.

    " + } + }, + "documentation":"

    Identifies an AWS IoT SiteWise Monitor portal.

    " + }, + "PortalState":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE", + "FAILED" + ] + }, + "PortalStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"PortalState", + "documentation":"

    The current state of the portal.

    " + }, + "error":{ + "shape":"MonitorErrorDetails", + "documentation":"

    Contains associated error information, if any.

    " + } + }, + "documentation":"

    Contains information about the current status of a portal.

    " + }, + "PortalSummaries":{ + "type":"list", + "member":{"shape":"PortalSummary"} + }, + "PortalSummary":{ + "type":"structure", + "required":[ + "id", + "name", + "startUrl", + "status" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the portal.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the portal.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The portal's description.

    " + }, + "startUrl":{ + "shape":"Url", + "documentation":"

    The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the AWS IoT SiteWise console to get a URL that you can use to access the portal.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the portal was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the portal was last updated, in Unix epoch time.

    " + }, + "roleArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

    " + }, + "status":{"shape":"PortalStatus"} + }, + "documentation":"

    Contains a portal summary.

    " + }, + "ProjectResource":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the project.

    " + } + }, + "documentation":"

    Identifies a specific AWS IoT SiteWise Monitor project.

    " + }, + "ProjectSummaries":{ + "type":"list", + "member":{"shape":"ProjectSummary"} + }, + "ProjectSummary":{ + "type":"structure", + "required":[ + "id", + "name" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the project.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the project.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The project's description.

    " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

    The date the project was created, in Unix epoch time.

    " + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

    The date the project was last updated, in Unix epoch time.

    " + } + }, + "documentation":"

    Contains project summary information.

    " + }, + "Property":{ + "type":"structure", + "required":[ + "id", + "name", + "dataType" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

    The ID of the asset property.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the property.

    " + }, + "alias":{ + "shape":"PropertyAlias", + "documentation":"

    The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the AWS IoT SiteWise User Guide.

    " + }, + "notification":{ + "shape":"PropertyNotification", + "documentation":"

    The asset property's notification topic and state. For more information, see UpdateAssetProperty.

    " + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

    The property data type.

    " + }, + "unit":{ + "shape":"PropertyUnit", + "documentation":"

    The unit (such as Newtons or RPM) of the asset property.

    " + }, + "type":{ + "shape":"PropertyType", + "documentation":"

    The property type (see PropertyType). A property contains one type.

    " + } + }, + "documentation":"

    Contains asset property information.

    " + }, + "PropertyAlias":{ + "type":"string", + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "PropertyDataType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "DOUBLE", + "BOOLEAN", + "STRUCT" + ] + }, + "PropertyNotification":{ + "type":"structure", + "required":[ + "topic", + "state" + ], + "members":{ + "topic":{ + "shape":"PropertyNotificationTopic", + "documentation":"

    The MQTT topic to which AWS IoT SiteWise publishes property value update notifications.

    " + }, + "state":{ + "shape":"PropertyNotificationState", + "documentation":"

    The current notification state.

    " + } + }, + "documentation":"

    Contains asset property value notification information. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see Interacting with other services in the AWS IoT SiteWise User Guide.

    " + }, + "PropertyNotificationState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "PropertyNotificationTopic":{"type":"string"}, + "PropertyType":{ + "type":"structure", + "members":{ + "attribute":{ + "shape":"Attribute", + "documentation":"

    Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an IIoT wind turbine.

    " + }, + "measurement":{ + "shape":"Measurement", + "documentation":"

    Specifies an asset measurement property. A measurement represents a device's raw sensor data stream, such as timestamped temperature values or timestamped power values.

    " + }, + "transform":{ + "shape":"Transform", + "documentation":"

    Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.

    " + }, + "metric":{ + "shape":"Metric", + "documentation":"

    Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.

    " + } + }, + "documentation":"

    Contains a property type, which can be one of attribute, measurement, metric, or transform.

    " + }, + "PropertyUnit":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "PropertyValueBooleanValue":{"type":"boolean"}, + "PropertyValueDoubleValue":{"type":"double"}, + "PropertyValueIntegerValue":{"type":"integer"}, + "PropertyValueStringValue":{ + "type":"string", + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "PutAssetPropertyValueEntries":{ + "type":"list", + "member":{"shape":"PutAssetPropertyValueEntry"} + }, + "PutAssetPropertyValueEntry":{ + "type":"structure", + "required":[ + "entryId", + "propertyValues" + ], + "members":{ + "entryId":{ + "shape":"EntryId", + "documentation":"

    The user specified ID for the entry. You can use this ID to identify which entries failed.

    " + }, + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset to update.

    " + }, + "propertyId":{ + "shape":"ID", + "documentation":"

    The ID of the asset property for this entry.

    " + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

    The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the AWS IoT SiteWise User Guide.

    " + }, + "propertyValues":{ + "shape":"AssetPropertyValues", + "documentation":"

    The list of property values to upload. You can specify up to 10 propertyValues array elements.

    " + } + }, + "documentation":"

    Contains a list of value updates for an asset property in the list of asset entries consumed by the BatchPutAssetPropertyValue API operation.

    " + }, + "PutDefaultEncryptionConfigurationRequest":{ + "type":"structure", + "required":["encryptionType"], + "members":{ + "encryptionType":{ + "shape":"EncryptionType", + "documentation":"

    The type of encryption used for the encryption configuration.

    " + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

    The Key ID of the customer managed customer master key (CMK) used for AWS KMS encryption. This is required if you use KMS_BASED_ENCRYPTION.

    " + } + } + }, + "PutDefaultEncryptionConfigurationResponse":{ + "type":"structure", + "required":[ + "encryptionType", + "configurationStatus" + ], + "members":{ + "encryptionType":{ + "shape":"EncryptionType", + "documentation":"

    The type of encryption used for the encryption configuration.

    " + }, + "kmsKeyArn":{ + "shape":"ARN", + "documentation":"

    The Key ARN of the AWS KMS CMK used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION.

    " + }, + "configurationStatus":{ + "shape":"ConfigurationStatus", + "documentation":"

    The status of the account configuration. This contains the ConfigurationState. If there is an error, it also contains the ErrorDetails.

    " + } + } + }, + "PutLoggingOptionsRequest":{ + "type":"structure", + "required":["loggingOptions"], + "members":{ + "loggingOptions":{ + "shape":"LoggingOptions", + "documentation":"

    The logging options to set.

    " + } + } + }, + "PutLoggingOptionsResponse":{ + "type":"structure", + "members":{ + } + }, + "Qualities":{ + "type":"list", + "member":{"shape":"Quality"}, + "max":1, + "min":1 + }, + "Quality":{ + "type":"string", + "enum":[ + "GOOD", + "BAD", + "UNCERTAIN" + ] + }, + "Resolution":{ + "type":"string", + "max":2, + "min":2, + "pattern":"1m|1h|1d" + }, + "Resource":{ + "type":"structure", + "members":{ + "portal":{ + "shape":"PortalResource", + "documentation":"

    A portal resource.

    " + }, + "project":{ + "shape":"ProjectResource", + "documentation":"

    A project resource.

    " + } + }, + "documentation":"

    Contains an AWS IoT SiteWise Monitor resource ID for a portal or project.

    " + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceArn" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"

    The ID of the resource that already exists.

    " + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource that already exists.

    " + } + }, + "documentation":"

    The resource already exists.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceArn":{"type":"string"}, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The requested resource can't be found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "PORTAL", + "PROJECT" + ] + }, + "SSOApplicationId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[!-~]*" + }, + "ServiceUnavailableException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The requested service is unavailable.

    ", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the resource to tag.

    ", + "location":"querystring", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs that contain metadata for the resource. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Your request exceeded a rate limit. For example, you might have exceeded the number of AWS IoT SiteWise assets that can be created per second, the allowed number of messages per second, and so on.

    For more information, see Quotas in the AWS IoT SiteWise User Guide.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "TimeInNanos":{ + "type":"structure", + "required":["timeInSeconds"], + "members":{ + "timeInSeconds":{ + "shape":"TimeInSeconds", + "documentation":"

    The timestamp date, in seconds, in the Unix epoch format. Fractional nanosecond data is provided by offsetInNanos.

    " + }, + "offsetInNanos":{ + "shape":"OffsetInNanos", + "documentation":"

    The nanosecond offset from timeInSeconds.

    " + } + }, + "documentation":"

    Contains a timestamp with optional nanosecond granularity.

    " + }, + "TimeInSeconds":{ + "type":"long", + "max":31556889864403199, + "min":1 + }, + "TimeOrdering":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "Timestamp":{"type":"timestamp"}, + "Timestamps":{ + "type":"list", + "member":{"shape":"TimeInNanos"} + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"}, + "resourceName":{ + "shape":"AmazonResourceName", + "documentation":"

    The name of the resource with too many tags.

    " + } + }, + "documentation":"

    You've reached the limit for the number of tags allowed for a resource. For more information, see Tag naming limits and requirements in the AWS General Reference.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Transform":{ + "type":"structure", + "required":[ + "expression", + "variables" + ], + "members":{ + "expression":{ + "shape":"Expression", + "documentation":"

    The mathematical expression that defines the transformation function. You can specify up to 10 variables per expression. You can specify up to 10 functions per expression.

    For more information, see Quotas in the AWS IoT SiteWise User Guide.

    " + }, + "variables":{ + "shape":"ExpressionVariables", + "documentation":"

    The list of variables used in the expression.

    " + } + }, + "documentation":"

    Contains an asset transform property. A transform is a one-to-one mapping of a property's data points from one form to another. For example, you can use a transform to convert a Celsius data stream to Fahrenheit by applying the transformation expression to each data point of the Celsius stream. A transform can only have a data type of DOUBLE and consume properties with data types of INTEGER or DOUBLE.

    For more information, see Transforms in the AWS IoT SiteWise User Guide.

    " + }, + "TraversalDirection":{ + "type":"string", + "enum":[ + "PARENT", + "CHILD" + ] + }, + "TraversalType":{ + "type":"string", + "enum":["PATH_TO_ROOT"] + }, + "TumblingWindow":{ + "type":"structure", + "required":["interval"], + "members":{ + "interval":{ + "shape":"Interval", + "documentation":"

    The time interval for the tumbling window. Note that w represents weeks, d represents days, h represents hours, and m represents minutes. AWS IoT SiteWise computes the 1w interval the end of Sunday at midnight each week (UTC), the 1d interval at the end of each day at midnight (UTC), the 1h interval at the end of each hour, and so on.

    When AWS IoT SiteWise aggregates data points for metric computations, the start of each interval is exclusive and the end of each interval is inclusive. AWS IoT SiteWise places the computed data point at the end of the interval.

    " + } + }, + "documentation":"

    Contains a tumbling window, which is a repeating fixed-sized, non-overlapping, and contiguous time interval. This window is used in metric and aggregation computations.

    " + }, + "UnauthorizedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You are not authorized.

    ", + "error":{"httpStatusCode":401}, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the resource to untag.

    ", + "location":"querystring", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    A list of keys for tags to remove from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAccessPolicyRequest":{ + "type":"structure", + "required":[ + "accessPolicyId", + "accessPolicyIdentity", + "accessPolicyResource", + "accessPolicyPermission" + ], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

    The ID of the access policy.

    ", + "location":"uri", + "locationName":"accessPolicyId" + }, + "accessPolicyIdentity":{ + "shape":"Identity", + "documentation":"

    The identity for this access policy. Choose an AWS SSO user, an AWS SSO group, or an IAM user.

    " + }, + "accessPolicyResource":{ + "shape":"Resource", + "documentation":"

    The AWS IoT SiteWise Monitor resource for this access policy. Choose either a portal or a project.

    " + }, + "accessPolicyPermission":{ + "shape":"Permission", + "documentation":"

    The permission level for this access policy. Note that a project ADMINISTRATOR is also known as a project owner.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "UpdateAccessPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAssetModelRequest":{ + "type":"structure", + "required":[ + "assetModelId", + "assetModelName" + ], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

    The ID of the asset model to update.

    ", + "location":"uri", + "locationName":"assetModelId" + }, + "assetModelName":{ + "shape":"Name", + "documentation":"

    A unique, friendly name for the asset model.

    " + }, + "assetModelDescription":{ + "shape":"Description", + "documentation":"

    A description for the asset model.

    " + }, + "assetModelProperties":{ + "shape":"AssetModelProperties", + "documentation":"

    The updated property definitions of the asset model. For more information, see Asset properties in the AWS IoT SiteWise User Guide.

    You can specify up to 200 properties per asset model. For more information, see Quotas in the AWS IoT SiteWise User Guide.

    " + }, + "assetModelHierarchies":{ + "shape":"AssetModelHierarchies", + "documentation":"

    The updated hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see Asset hierarchies in the AWS IoT SiteWise User Guide.

    You can specify up to 10 hierarchies per asset model. For more information, see Quotas in the AWS IoT SiteWise User Guide.

    " + }, + "assetModelCompositeModels":{ + "shape":"AssetModelCompositeModels", + "documentation":"

    The composite asset models that are part of this asset model. Composite asset models are asset models that contain specific properties. Each composite model has a type that defines the properties that the composite model supports. Use composite asset models to define alarms on this asset model.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "UpdateAssetModelResponse":{ + "type":"structure", + "required":["assetModelStatus"], + "members":{ + "assetModelStatus":{ + "shape":"AssetModelStatus", + "documentation":"

    The status of the asset model, which contains a state (UPDATING after successfully calling this operation) and any error message.

    " + } + } + }, + "UpdateAssetPropertyRequest":{ + "type":"structure", + "required":[ + "assetId", + "propertyId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset to be updated.

    ", + "location":"uri", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

    The ID of the asset property to be updated.

    ", + "location":"uri", + "locationName":"propertyId" + }, + "propertyAlias":{ + "shape":"PropertyAlias", + "documentation":"

    The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping industrial data streams to asset properties in the AWS IoT SiteWise User Guide.

    If you omit this parameter, the alias is removed from the property.

    " + }, + "propertyNotificationState":{ + "shape":"PropertyNotificationState", + "documentation":"

    The MQTT notification state (enabled or disabled) for this asset property. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see Interacting with other services in the AWS IoT SiteWise User Guide.

    If you omit this parameter, the notification state is set to DISABLED.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "UpdateAssetRequest":{ + "type":"structure", + "required":[ + "assetId", + "assetName" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

    The ID of the asset to update.

    ", + "location":"uri", + "locationName":"assetId" + }, + "assetName":{ + "shape":"Name", + "documentation":"

    A unique, friendly name for the asset.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "UpdateAssetResponse":{ + "type":"structure", + "required":["assetStatus"], + "members":{ + "assetStatus":{ + "shape":"AssetStatus", + "documentation":"

    The status of the asset, which contains a state (UPDATING after successfully calling this operation) and any error message.

    " + } + } + }, + "UpdateDashboardRequest":{ + "type":"structure", + "required":[ + "dashboardId", + "dashboardName", + "dashboardDefinition" + ], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

    The ID of the dashboard to update.

    ", + "location":"uri", + "locationName":"dashboardId" + }, + "dashboardName":{ + "shape":"Name", + "documentation":"

    A new friendly name for the dashboard.

    " + }, + "dashboardDescription":{ + "shape":"Description", + "documentation":"

    A new description for the dashboard.

    " + }, + "dashboardDefinition":{ + "shape":"DashboardDefinition", + "documentation":"

    The new dashboard definition, as specified in a JSON literal. For detailed information, see Creating dashboards (CLI) in the AWS IoT SiteWise User Guide.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "UpdateDashboardResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateGatewayCapabilityConfigurationRequest":{ + "type":"structure", + "required":[ + "gatewayId", + "capabilityNamespace", + "capabilityConfiguration" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway to be updated.

    ", + "location":"uri", + "locationName":"gatewayId" + }, + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

    The namespace of the gateway capability configuration to be updated. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace iotsitewise:opcuacollector:version, where version is a number such as 1.

    " + }, + "capabilityConfiguration":{ + "shape":"CapabilityConfiguration", + "documentation":"

    The JSON document that defines the configuration for the gateway capability. For more information, see Configuring data sources (CLI) in the AWS IoT SiteWise User Guide.

    " + } + } + }, + "UpdateGatewayCapabilityConfigurationResponse":{ + "type":"structure", + "required":[ + "capabilityNamespace", + "capabilitySyncStatus" + ], + "members":{ + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

    The namespace of the gateway capability.

    " + }, + "capabilitySyncStatus":{ + "shape":"CapabilitySyncStatus", + "documentation":"

    The synchronization status of the capability configuration. The sync status can be one of the following:

    • IN_SYNC – The gateway is running the capability configuration.

    • OUT_OF_SYNC – The gateway hasn't received the capability configuration.

    • SYNC_FAILED – The gateway rejected the capability configuration.

    After you update a capability configuration, its sync status is OUT_OF_SYNC until the gateway receives and applies or rejects the updated configuration.

    " + } + } + }, + "UpdateGatewayRequest":{ + "type":"structure", + "required":[ + "gatewayId", + "gatewayName" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

    The ID of the gateway to update.

    ", + "location":"uri", + "locationName":"gatewayId" + }, + "gatewayName":{ + "shape":"Name", + "documentation":"

    A unique, friendly name for the gateway.

    " + } + } + }, + "UpdatePortalRequest":{ + "type":"structure", + "required":[ + "portalId", + "portalName", + "portalContactEmail", + "roleArn" + ], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

    The ID of the portal to update.

    ", + "location":"uri", + "locationName":"portalId" + }, + "portalName":{ + "shape":"Name", + "documentation":"

    A new friendly name for the portal.

    " + }, + "portalDescription":{ + "shape":"Description", + "documentation":"

    A new description for the portal.

    " + }, + "portalContactEmail":{ + "shape":"Email", + "documentation":"

    The AWS administrator's contact email address.

    " + }, + "portalLogoImage":{"shape":"Image"}, + "roleArn":{ + "shape":"ARN", + "documentation":"

    The ARN of a service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "UpdatePortalResponse":{ + "type":"structure", + "required":["portalStatus"], + "members":{ + "portalStatus":{ + "shape":"PortalStatus", + "documentation":"

    The status of the portal, which contains a state (UPDATING after successfully calling this operation) and any error message.

    " + } + } + }, + "UpdateProjectRequest":{ + "type":"structure", + "required":[ + "projectId", + "projectName" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

    The ID of the project to update.

    ", + "location":"uri", + "locationName":"projectId" + }, + "projectName":{ + "shape":"Name", + "documentation":"

    A new friendly name for the project.

    " + }, + "projectDescription":{ + "shape":"Description", + "documentation":"

    A new description for the project.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

    ", + "idempotencyToken":true + } + } + }, + "UpdateProjectResponse":{ + "type":"structure", + "members":{ + } + }, + "Url":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^(http|https)\\://\\S+" + }, + "UserIdentity":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"IdentityId", + "documentation":"

    The AWS SSO ID of the user.

    " + } + }, + "documentation":"

    Contains information for a user identity in an access policy.

    " + }, + "VariableName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-z][a-z0-9_]*$" + }, + "VariableValue":{ + "type":"structure", + "required":["propertyId"], + "members":{ + "propertyId":{ + "shape":"Macro", + "documentation":"

    The ID of the property to use as the variable. You can use the property name if it's from the same asset model.

    " + }, + "hierarchyId":{ + "shape":"Macro", + "documentation":"

    The ID of the hierarchy to query for the property ID. You can use the hierarchy's name instead of the hierarchy's ID.

    You use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same propertyId. For example, you might have separately grouped assets that come from the same asset model. For more information, see Asset hierarchies in the AWS IoT SiteWise User Guide.

    " + } + }, + "documentation":"

    Identifies a property value used in an expression.

    " + }, + "Variant":{ + "type":"structure", + "members":{ + "stringValue":{ + "shape":"PropertyValueStringValue", + "documentation":"

    Asset property data of type string (sequence of characters).

    " + }, + "integerValue":{ + "shape":"PropertyValueIntegerValue", + "documentation":"

    Asset property data of type integer (whole number).

    " + }, + "doubleValue":{ + "shape":"PropertyValueDoubleValue", + "documentation":"

    Asset property data of type double (floating point number).

    " + }, + "booleanValue":{ + "shape":"PropertyValueBooleanValue", + "documentation":"

    Asset property data of type Boolean (true or false).

    " + } + }, + "documentation":"

    Contains an asset property value (of a single type only).

    " + } + }, + "documentation":"

    Welcome to the AWS IoT SiteWise API Reference. AWS IoT SiteWise is an AWS service that connects Industrial Internet of Things (IIoT) devices to the power of the AWS Cloud. For more information, see the AWS IoT SiteWise User Guide. For information about AWS IoT SiteWise quotas, see Quotas in the AWS IoT SiteWise User Guide.

    " +} diff --git a/services/iotsitewise/src/main/resources/codegen-resources/waiters-2.json b/services/iotsitewise/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..e51df5feaaa9 --- /dev/null +++ b/services/iotsitewise/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,92 @@ +{ + "version": 2, + "waiters": { + "AssetModelNotExists": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribeAssetModel", + "acceptors": [ + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "AssetModelActive": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribeAssetModel", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "assetModelStatus.state", + "expected": "ACTIVE" + }, + { + "state": "failure", + "matcher": "path", + "argument": "assetModelStatus.state", + "expected": "FAILED" + } + ] + }, + "AssetNotExists": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribeAsset", + "acceptors": [ + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "AssetActive": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribeAsset", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "assetStatus.state", + "expected": "ACTIVE" + }, + { + "state": "failure", + "matcher": "path", + "argument": "assetStatus.state", + "expected": "FAILED" + } + ] + }, + "PortalNotExists": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribePortal", + "acceptors": [ + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "PortalActive": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribePortal", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "portalStatus.state", + "expected": "ACTIVE" + } + ] + } + } + } diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index c285f70da821..e389faf44fcd 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + iotwireless + AWS Java SDK :: Services :: IoT Wireless + The AWS Java SDK for IoT Wireless module holds the client classes that are used for + communicating with IoT Wireless. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.iotwireless + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/iotwireless/src/main/resources/codegen-resources/customization.config b/services/iotwireless/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..21b15d9542cb --- /dev/null +++ b/services/iotwireless/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "underscoresInNameBehavior": "ALLOW" +} diff --git a/services/iotwireless/src/main/resources/codegen-resources/paginators-1.json b/services/iotwireless/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..6ebd70afa3c1 --- /dev/null +++ b/services/iotwireless/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,29 @@ +{ + "pagination": { + "ListDestinations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDeviceProfiles": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListServiceProfiles": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListWirelessDevices": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListWirelessGateways": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/iotwireless/src/main/resources/codegen-resources/service-2.json b/services/iotwireless/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..ba38521f4ade --- /dev/null +++ b/services/iotwireless/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3710 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-11-22", + "endpointPrefix":"api.iotwireless", + "protocol":"rest-json", + "serviceFullName":"AWS IoT Wireless", + "serviceId":"IoT Wireless", + "signatureVersion":"v4", + "signingName":"iotwireless", + "uid":"iotwireless-2020-11-22" + }, + "operations":{ + "AssociateAwsAccountWithPartnerAccount":{ + "name":"AssociateAwsAccountWithPartnerAccount", + "http":{ + "method":"POST", + "requestUri":"/partner-accounts" + }, + "input":{"shape":"AssociateAwsAccountWithPartnerAccountRequest"}, + "output":{"shape":"AssociateAwsAccountWithPartnerAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Associates a partner account with your AWS account.

    " + }, + "AssociateWirelessDeviceWithThing":{ + "name":"AssociateWirelessDeviceWithThing", + "http":{ + "method":"PUT", + "requestUri":"/wireless-devices/{Id}/thing", + "responseCode":204 + }, + "input":{"shape":"AssociateWirelessDeviceWithThingRequest"}, + "output":{"shape":"AssociateWirelessDeviceWithThingResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Associates a wireless device with a thing.

    " + }, + "AssociateWirelessGatewayWithCertificate":{ + "name":"AssociateWirelessGatewayWithCertificate", + "http":{ + "method":"PUT", + "requestUri":"/wireless-gateways/{Id}/certificate" + }, + "input":{"shape":"AssociateWirelessGatewayWithCertificateRequest"}, + "output":{"shape":"AssociateWirelessGatewayWithCertificateResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Associates a wireless gateway with a certificate.

    " + }, + "AssociateWirelessGatewayWithThing":{ + "name":"AssociateWirelessGatewayWithThing", + "http":{ + "method":"PUT", + "requestUri":"/wireless-gateways/{Id}/thing", + "responseCode":204 + }, + "input":{"shape":"AssociateWirelessGatewayWithThingRequest"}, + "output":{"shape":"AssociateWirelessGatewayWithThingResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Associates a wireless gateway with a thing.

    " + }, + "CreateDestination":{ + "name":"CreateDestination", + "http":{ + "method":"POST", + "requestUri":"/destinations", + "responseCode":201 + }, + "input":{"shape":"CreateDestinationRequest"}, + "output":{"shape":"CreateDestinationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a new destination that maps a device message to an AWS IoT rule.

    " + }, + "CreateDeviceProfile":{ + "name":"CreateDeviceProfile", + "http":{ + "method":"POST", + "requestUri":"/device-profiles", + "responseCode":201 + }, + "input":{"shape":"CreateDeviceProfileRequest"}, + "output":{"shape":"CreateDeviceProfileResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a new device profile.

    " + }, + "CreateServiceProfile":{ + "name":"CreateServiceProfile", + "http":{ + "method":"POST", + "requestUri":"/service-profiles", + "responseCode":201 + }, + "input":{"shape":"CreateServiceProfileRequest"}, + "output":{"shape":"CreateServiceProfileResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a new service profile.

    " + }, + "CreateWirelessDevice":{ + "name":"CreateWirelessDevice", + "http":{ + "method":"POST", + "requestUri":"/wireless-devices", + "responseCode":201 + }, + "input":{"shape":"CreateWirelessDeviceRequest"}, + "output":{"shape":"CreateWirelessDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Provisions a wireless device.

    " + }, + "CreateWirelessGateway":{ + "name":"CreateWirelessGateway", + "http":{ + "method":"POST", + "requestUri":"/wireless-gateways", + "responseCode":201 + }, + "input":{"shape":"CreateWirelessGatewayRequest"}, + "output":{"shape":"CreateWirelessGatewayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Provisions a wireless gateway.

    " + }, + "CreateWirelessGatewayTask":{ + "name":"CreateWirelessGatewayTask", + "http":{ + "method":"POST", + "requestUri":"/wireless-gateways/{Id}/tasks", + "responseCode":201 + }, + "input":{"shape":"CreateWirelessGatewayTaskRequest"}, + "output":{"shape":"CreateWirelessGatewayTaskResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a task for a wireless gateway.

    " + }, + "CreateWirelessGatewayTaskDefinition":{ + "name":"CreateWirelessGatewayTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/wireless-gateway-task-definitions", + "responseCode":201 + }, + "input":{"shape":"CreateWirelessGatewayTaskDefinitionRequest"}, + "output":{"shape":"CreateWirelessGatewayTaskDefinitionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a gateway task definition.

    " + }, + "DeleteDestination":{ + "name":"DeleteDestination", + "http":{ + "method":"DELETE", + "requestUri":"/destinations/{Name}", + "responseCode":204 + }, + "input":{"shape":"DeleteDestinationRequest"}, + "output":{"shape":"DeleteDestinationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a destination.

    " + }, + "DeleteDeviceProfile":{ + "name":"DeleteDeviceProfile", + "http":{ + "method":"DELETE", + "requestUri":"/device-profiles/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDeviceProfileRequest"}, + "output":{"shape":"DeleteDeviceProfileResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a device profile.

    " + }, + "DeleteServiceProfile":{ + "name":"DeleteServiceProfile", + "http":{ + "method":"DELETE", + "requestUri":"/service-profiles/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteServiceProfileRequest"}, + "output":{"shape":"DeleteServiceProfileResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a service profile.

    " + }, + "DeleteWirelessDevice":{ + "name":"DeleteWirelessDevice", + "http":{ + "method":"DELETE", + "requestUri":"/wireless-devices/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteWirelessDeviceRequest"}, + "output":{"shape":"DeleteWirelessDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a wireless device.

    " + }, + "DeleteWirelessGateway":{ + "name":"DeleteWirelessGateway", + "http":{ + "method":"DELETE", + "requestUri":"/wireless-gateways/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteWirelessGatewayRequest"}, + "output":{"shape":"DeleteWirelessGatewayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a wireless gateway.

    " + }, + "DeleteWirelessGatewayTask":{ + "name":"DeleteWirelessGatewayTask", + "http":{ + "method":"DELETE", + "requestUri":"/wireless-gateways/{Id}/tasks", + "responseCode":204 + }, + "input":{"shape":"DeleteWirelessGatewayTaskRequest"}, + "output":{"shape":"DeleteWirelessGatewayTaskResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a wireless gateway task.

    " + }, + "DeleteWirelessGatewayTaskDefinition":{ + "name":"DeleteWirelessGatewayTaskDefinition", + "http":{ + "method":"DELETE", + "requestUri":"/wireless-gateway-task-definitions/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteWirelessGatewayTaskDefinitionRequest"}, + "output":{"shape":"DeleteWirelessGatewayTaskDefinitionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a wireless gateway task definition. Deleting this task definition does not affect tasks that are currently in progress.

    " + }, + "DisassociateAwsAccountFromPartnerAccount":{ + "name":"DisassociateAwsAccountFromPartnerAccount", + "http":{ + "method":"DELETE", + "requestUri":"/partner-accounts/{PartnerAccountId}", + "responseCode":204 + }, + "input":{"shape":"DisassociateAwsAccountFromPartnerAccountRequest"}, + "output":{"shape":"DisassociateAwsAccountFromPartnerAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Disassociates your AWS account from a partner account. If PartnerAccountId and PartnerType are null, disassociates your AWS account from all partner accounts.

    " + }, + "DisassociateWirelessDeviceFromThing":{ + "name":"DisassociateWirelessDeviceFromThing", + "http":{ + "method":"DELETE", + "requestUri":"/wireless-devices/{Id}/thing", + "responseCode":204 + }, + "input":{"shape":"DisassociateWirelessDeviceFromThingRequest"}, + "output":{"shape":"DisassociateWirelessDeviceFromThingResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Disassociates a wireless device from its currently associated thing.

    " + }, + "DisassociateWirelessGatewayFromCertificate":{ + "name":"DisassociateWirelessGatewayFromCertificate", + "http":{ + "method":"DELETE", + "requestUri":"/wireless-gateways/{Id}/certificate", + "responseCode":204 + }, + "input":{"shape":"DisassociateWirelessGatewayFromCertificateRequest"}, + "output":{"shape":"DisassociateWirelessGatewayFromCertificateResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Disassociates a wireless gateway from its currently associated certificate.

    " + }, + "DisassociateWirelessGatewayFromThing":{ + "name":"DisassociateWirelessGatewayFromThing", + "http":{ + "method":"DELETE", + "requestUri":"/wireless-gateways/{Id}/thing", + "responseCode":204 + }, + "input":{"shape":"DisassociateWirelessGatewayFromThingRequest"}, + "output":{"shape":"DisassociateWirelessGatewayFromThingResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Disassociates a wireless gateway from its currently associated thing.

    " + }, + "GetDestination":{ + "name":"GetDestination", + "http":{ + "method":"GET", + "requestUri":"/destinations/{Name}" + }, + "input":{"shape":"GetDestinationRequest"}, + "output":{"shape":"GetDestinationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a destination.

    " + }, + "GetDeviceProfile":{ + "name":"GetDeviceProfile", + "http":{ + "method":"GET", + "requestUri":"/device-profiles/{Id}" + }, + "input":{"shape":"GetDeviceProfileRequest"}, + "output":{"shape":"GetDeviceProfileResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a device profile.

    " + }, + "GetPartnerAccount":{ + "name":"GetPartnerAccount", + "http":{ + "method":"GET", + "requestUri":"/partner-accounts/{PartnerAccountId}" + }, + "input":{"shape":"GetPartnerAccountRequest"}, + "output":{"shape":"GetPartnerAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a partner account. If PartnerAccountId and PartnerType are null, returns all partner accounts.

    " + }, + "GetServiceEndpoint":{ + "name":"GetServiceEndpoint", + "http":{ + "method":"GET", + "requestUri":"/service-endpoint" + }, + "input":{"shape":"GetServiceEndpointRequest"}, + "output":{"shape":"GetServiceEndpointResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets the account-specific endpoint for Configuration and Update Server (CUPS) protocol or LoRaWAN Network Server (LNS) connections.

    " + }, + "GetServiceProfile":{ + "name":"GetServiceProfile", + "http":{ + "method":"GET", + "requestUri":"/service-profiles/{Id}" + }, + "input":{"shape":"GetServiceProfileRequest"}, + "output":{"shape":"GetServiceProfileResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a service profile.

    " + }, + "GetWirelessDevice":{ + "name":"GetWirelessDevice", + "http":{ + "method":"GET", + "requestUri":"/wireless-devices/{Identifier}" + }, + "input":{"shape":"GetWirelessDeviceRequest"}, + "output":{"shape":"GetWirelessDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a wireless device.

    " + }, + "GetWirelessDeviceStatistics":{ + "name":"GetWirelessDeviceStatistics", + "http":{ + "method":"GET", + "requestUri":"/wireless-devices/{Id}/statistics", + "responseCode":200 + }, + "input":{"shape":"GetWirelessDeviceStatisticsRequest"}, + "output":{"shape":"GetWirelessDeviceStatisticsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets operating information about a wireless device.

    " + }, + "GetWirelessGateway":{ + "name":"GetWirelessGateway", + "http":{ + "method":"GET", + "requestUri":"/wireless-gateways/{Identifier}" + }, + "input":{"shape":"GetWirelessGatewayRequest"}, + "output":{"shape":"GetWirelessGatewayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a wireless gateway.

    " + }, + "GetWirelessGatewayCertificate":{ + "name":"GetWirelessGatewayCertificate", + "http":{ + "method":"GET", + "requestUri":"/wireless-gateways/{Id}/certificate" + }, + "input":{"shape":"GetWirelessGatewayCertificateRequest"}, + "output":{"shape":"GetWirelessGatewayCertificateResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets the ID of the certificate that is currently associated with a wireless gateway.

    " + }, + "GetWirelessGatewayFirmwareInformation":{ + "name":"GetWirelessGatewayFirmwareInformation", + "http":{ + "method":"GET", + "requestUri":"/wireless-gateways/{Id}/firmware-information" + }, + "input":{"shape":"GetWirelessGatewayFirmwareInformationRequest"}, + "output":{"shape":"GetWirelessGatewayFirmwareInformationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets the firmware version and other information about a wireless gateway.

    " + }, + "GetWirelessGatewayStatistics":{ + "name":"GetWirelessGatewayStatistics", + "http":{ + "method":"GET", + "requestUri":"/wireless-gateways/{Id}/statistics", + "responseCode":200 + }, + "input":{"shape":"GetWirelessGatewayStatisticsRequest"}, + "output":{"shape":"GetWirelessGatewayStatisticsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets operating information about a wireless gateway.

    " + }, + "GetWirelessGatewayTask":{ + "name":"GetWirelessGatewayTask", + "http":{ + "method":"GET", + "requestUri":"/wireless-gateways/{Id}/tasks" + }, + "input":{"shape":"GetWirelessGatewayTaskRequest"}, + "output":{"shape":"GetWirelessGatewayTaskResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a wireless gateway task.

    " + }, + "GetWirelessGatewayTaskDefinition":{ + "name":"GetWirelessGatewayTaskDefinition", + "http":{ + "method":"GET", + "requestUri":"/wireless-gateway-task-definitions/{Id}" + }, + "input":{"shape":"GetWirelessGatewayTaskDefinitionRequest"}, + "output":{"shape":"GetWirelessGatewayTaskDefinitionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Gets information about a wireless gateway task definition.

    " + }, + "ListDestinations":{ + "name":"ListDestinations", + "http":{ + "method":"GET", + "requestUri":"/destinations" + }, + "input":{"shape":"ListDestinationsRequest"}, + "output":{"shape":"ListDestinationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists the destinations registered to your AWS account.

    " + }, + "ListDeviceProfiles":{ + "name":"ListDeviceProfiles", + "http":{ + "method":"GET", + "requestUri":"/device-profiles" + }, + "input":{"shape":"ListDeviceProfilesRequest"}, + "output":{"shape":"ListDeviceProfilesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists the device profiles registered to your AWS account.

    " + }, + "ListPartnerAccounts":{ + "name":"ListPartnerAccounts", + "http":{ + "method":"GET", + "requestUri":"/partner-accounts" + }, + "input":{"shape":"ListPartnerAccountsRequest"}, + "output":{"shape":"ListPartnerAccountsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists the partner accounts associated with your AWS account.

    " + }, + "ListServiceProfiles":{ + "name":"ListServiceProfiles", + "http":{ + "method":"GET", + "requestUri":"/service-profiles" + }, + "input":{"shape":"ListServiceProfilesRequest"}, + "output":{"shape":"ListServiceProfilesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists the service profiles registered to your AWS account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists the tags (metadata) you have assigned to the resource.

    " + }, + "ListWirelessDevices":{ + "name":"ListWirelessDevices", + "http":{ + "method":"GET", + "requestUri":"/wireless-devices" + }, + "input":{"shape":"ListWirelessDevicesRequest"}, + "output":{"shape":"ListWirelessDevicesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the wireless devices registered to your AWS account.

    " + }, + "ListWirelessGatewayTaskDefinitions":{ + "name":"ListWirelessGatewayTaskDefinitions", + "http":{ + "method":"GET", + "requestUri":"/wireless-gateway-task-definitions" + }, + "input":{"shape":"ListWirelessGatewayTaskDefinitionsRequest"}, + "output":{"shape":"ListWirelessGatewayTaskDefinitionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List the wireless gateway tasks definitions registered to your AWS account.

    " + }, + "ListWirelessGateways":{ + "name":"ListWirelessGateways", + "http":{ + "method":"GET", + "requestUri":"/wireless-gateways" + }, + "input":{"shape":"ListWirelessGatewaysRequest"}, + "output":{"shape":"ListWirelessGatewaysResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the wireless gateways registered to your AWS account.

    " + }, + "SendDataToWirelessDevice":{ + "name":"SendDataToWirelessDevice", + "http":{ + "method":"POST", + "requestUri":"/wireless-devices/{Id}/data", + "responseCode":202 + }, + "input":{"shape":"SendDataToWirelessDeviceRequest"}, + "output":{"shape":"SendDataToWirelessDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Sends a decrypted application data frame to a device.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"

    Adds a tag to a resource.

    " + }, + "TestWirelessDevice":{ + "name":"TestWirelessDevice", + "http":{ + "method":"POST", + "requestUri":"/wireless-devices/{Id}/test", + "responseCode":200 + }, + "input":{"shape":"TestWirelessDeviceRequest"}, + "output":{"shape":"TestWirelessDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Simulates a provisioned device by sending an uplink data payload of Hello.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Removes one or more tags from a resource.

    " + }, + "UpdateDestination":{ + "name":"UpdateDestination", + "http":{ + "method":"PATCH", + "requestUri":"/destinations/{Name}", + "responseCode":204 + }, + "input":{"shape":"UpdateDestinationRequest"}, + "output":{"shape":"UpdateDestinationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates properties of a destination.

    " + }, + "UpdatePartnerAccount":{ + "name":"UpdatePartnerAccount", + "http":{ + "method":"PATCH", + "requestUri":"/partner-accounts/{PartnerAccountId}", + "responseCode":204 + }, + "input":{"shape":"UpdatePartnerAccountRequest"}, + "output":{"shape":"UpdatePartnerAccountResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates properties of a partner account.

    " + }, + "UpdateWirelessDevice":{ + "name":"UpdateWirelessDevice", + "http":{ + "method":"PATCH", + "requestUri":"/wireless-devices/{Id}", + "responseCode":204 + }, + "input":{"shape":"UpdateWirelessDeviceRequest"}, + "output":{"shape":"UpdateWirelessDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates properties of a wireless device.

    " + }, + "UpdateWirelessGateway":{ + "name":"UpdateWirelessGateway", + "http":{ + "method":"PATCH", + "requestUri":"/wireless-gateways/{Id}", + "responseCode":204 + }, + "input":{"shape":"UpdateWirelessGatewayRequest"}, + "output":{"shape":"UpdateWirelessGatewayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Updates properties of a wireless gateway.

    " + } + }, + "shapes":{ + "AbpV1_0_x":{ + "type":"structure", + "members":{ + "DevAddr":{ + "shape":"DevAddr", + "documentation":"

    The DevAddr value.

    " + }, + "SessionKeys":{ + "shape":"SessionKeysAbpV1_0_x", + "documentation":"

    Session keys for ABP v1.0.x

    " + } + }, + "documentation":"

    ABP device object for LoRaWAN specification v1.0.x

    " + }, + "AbpV1_1":{ + "type":"structure", + "members":{ + "DevAddr":{ + "shape":"DevAddr", + "documentation":"

    The DevAddr value.

    " + }, + "SessionKeys":{ + "shape":"SessionKeysAbpV1_1", + "documentation":"

    Session keys for ABP v1.1

    " + } + }, + "documentation":"

    ABP device object for LoRaWAN specification v1.1

    " + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    User does not have permission to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccountLinked":{"type":"boolean"}, + "AddGwMetadata":{"type":"boolean"}, + "AmazonId":{ + "type":"string", + "max":2048 + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, + "AppEui":{ + "type":"string", + "pattern":"[a-fA-F0-9]{16}" + }, + "AppKey":{ + "type":"string", + "pattern":"[a-fA-F0-9]{32}" + }, + "AppSKey":{ + "type":"string", + "pattern":"[a-fA-F0-9]{32}" + }, + "AppServerPrivateKey":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"[a-fA-F0-9]{64}", + "sensitive":true + }, + "AssociateAwsAccountWithPartnerAccountRequest":{ + "type":"structure", + "required":["Sidewalk"], + "members":{ + "Sidewalk":{ + "shape":"SidewalkAccountInfo", + "documentation":"

    The Sidewalk account credentials.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

    ", + "idempotencyToken":true + } + } + }, + "AssociateAwsAccountWithPartnerAccountResponse":{ + "type":"structure", + "members":{ + "Sidewalk":{ + "shape":"SidewalkAccountInfo", + "documentation":"

    The Sidewalk account credentials.

    " + } + } + }, + "AssociateWirelessDeviceWithThingRequest":{ + "type":"structure", + "required":[ + "Id", + "ThingArn" + ], + "members":{ + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + }, + "ThingArn":{ + "shape":"ThingArn", + "documentation":"

    The ARN of the thing to associate with the wireless device.

    " + } + } + }, + "AssociateWirelessDeviceWithThingResponse":{ + "type":"structure", + "members":{ + } + }, + "AssociateWirelessGatewayWithCertificateRequest":{ + "type":"structure", + "required":[ + "Id", + "IotCertificateId" + ], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + }, + "IotCertificateId":{ + "shape":"IotCertificateId", + "documentation":"

    The ID of the certificate to associate with the wireless gateway.

    " + } + } + }, + "AssociateWirelessGatewayWithCertificateResponse":{ + "type":"structure", + "members":{ + "IotCertificateId":{ + "shape":"IotCertificateId", + "documentation":"

    The ID of the certificate associated with the wireless gateway.

    " + } + } + }, + "AssociateWirelessGatewayWithThingRequest":{ + "type":"structure", + "required":[ + "Id", + "ThingArn" + ], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + }, + "ThingArn":{ + "shape":"ThingArn", + "documentation":"

    The ARN of the thing to associate with the wireless gateway.

    " + } + } + }, + "AssociateWirelessGatewayWithThingResponse":{ + "type":"structure", + "members":{ + } + }, + "AutoCreateTasks":{"type":"boolean"}, + "CertificatePEM":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"[^-A-Za-z0-9+/=]|=[^=]|={3,}${1,4096}" + }, + "ChannelMask":{ + "type":"string", + "max":2048 + }, + "ClassBTimeout":{ + "type":"integer", + "max":1000, + "min":0 + }, + "ClassCTimeout":{ + "type":"integer", + "max":1000, + "min":0 + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-_]+$" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "ResourceId":{"shape":"ResourceId"}, + "ResourceType":{"shape":"ResourceType"} + }, + "documentation":"

    Adding, updating, or deleting the resource can cause an inconsistent state.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "Crc":{ + "type":"long", + "max":4294967295, + "min":1 + }, + "CreateDestinationRequest":{ + "type":"structure", + "required":[ + "Name", + "ExpressionType", + "Expression", + "RoleArn" + ], + "members":{ + "Name":{ + "shape":"DestinationName", + "documentation":"

    The name of the new resource.

    " + }, + "ExpressionType":{ + "shape":"ExpressionType", + "documentation":"

    The type of value in Expression.

    " + }, + "Expression":{ + "shape":"Expression", + "documentation":"

    The rule name or topic rule to send messages to.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the new resource.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of the IAM Role that authorizes the destination.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to attach to the new destination. Tags are metadata that can be used to manage a resource.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

    ", + "idempotencyToken":true + } + } + }, + "CreateDestinationResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DestinationArn", + "documentation":"

    The Amazon Resource Name of the new resource.

    " + }, + "Name":{ + "shape":"DestinationName", + "documentation":"

    The name of the new resource.

    " + } + } + }, + "CreateDeviceProfileRequest":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"DeviceProfileName", + "documentation":"

    The name of the new resource.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANDeviceProfile", + "documentation":"

    The device profile information to use to create the device profile.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to attach to the new device profile Tags are metadata that can be used to manage a resource.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

    ", + "idempotencyToken":true + } + } + }, + "CreateDeviceProfileResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DeviceProfileArn", + "documentation":"

    The Amazon Resource Name of the new resource.

    " + }, + "Id":{ + "shape":"DeviceProfileId", + "documentation":"

    The ID of the new device profile.

    " + } + } + }, + "CreateServiceProfileRequest":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ServiceProfileName", + "documentation":"

    The name of the new resource.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANServiceProfile", + "documentation":"

    The service profile information to use to create the service profile.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to attach to the new service profile. Tags are metadata that can be used to manage a resource.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

    ", + "idempotencyToken":true + } + } + }, + "CreateServiceProfileResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ServiceProfileArn", + "documentation":"

    The Amazon Resource Name of the new resource.

    " + }, + "Id":{ + "shape":"ServiceProfileId", + "documentation":"

    The ID of the new service profile.

    " + } + } + }, + "CreateWirelessDeviceRequest":{ + "type":"structure", + "required":[ + "Type", + "DestinationName" + ], + "members":{ + "Type":{ + "shape":"WirelessDeviceType", + "documentation":"

    The wireless device type.

    " + }, + "Name":{ + "shape":"WirelessDeviceName", + "documentation":"

    The name of the new resource.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the new resource.

    " + }, + "DestinationName":{ + "shape":"DestinationName", + "documentation":"

    The name of the destination to assign to the new wireless device.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

    ", + "idempotencyToken":true + }, + "LoRaWAN":{ + "shape":"LoRaWANDevice", + "documentation":"

    The device configuration information to use to create the wireless device.

    " + } + } + }, + "CreateWirelessDeviceResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"WirelessDeviceArn", + "documentation":"

    The Amazon Resource Name of the new resource.

    " + }, + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the new wireless device.

    " + } + } + }, + "CreateWirelessGatewayRequest":{ + "type":"structure", + "required":["LoRaWAN"], + "members":{ + "Name":{ + "shape":"WirelessGatewayName", + "documentation":"

    The name of the new resource.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the new resource.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANGateway", + "documentation":"

    The gateway configuration information to use to create the wireless gateway.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to attach to the new wireless gateway. Tags are metadata that can be used to manage a resource.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

    ", + "idempotencyToken":true + } + } + }, + "CreateWirelessGatewayResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"WirelessGatewayArn", + "documentation":"

    The Amazon Resource Name of the new resource.

    " + }, + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the new wireless gateway.

    " + } + } + }, + "CreateWirelessGatewayTaskDefinitionRequest":{ + "type":"structure", + "required":["AutoCreateTasks"], + "members":{ + "AutoCreateTasks":{ + "shape":"AutoCreateTasks", + "documentation":"

    Whether to automatically create tasks using this task definition for all gateways with the specified current version. If false, the task must me created by calling CreateWirelessGatewayTask.

    " + }, + "Name":{ + "shape":"WirelessGatewayTaskName", + "documentation":"

    The name of the new resource.

    " + }, + "Update":{ + "shape":"UpdateWirelessGatewayTaskCreate", + "documentation":"

    Information about the gateways to update.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

    ", + "idempotencyToken":true + } + } + }, + "CreateWirelessGatewayTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"WirelessGatewayTaskDefinitionId", + "documentation":"

    The ID of the new wireless gateway task definition.

    " + } + } + }, + "CreateWirelessGatewayTaskRequest":{ + "type":"structure", + "required":[ + "Id", + "WirelessGatewayTaskDefinitionId" + ], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + }, + "WirelessGatewayTaskDefinitionId":{ + "shape":"WirelessGatewayTaskDefinitionId", + "documentation":"

    The ID of the WirelessGatewayTaskDefinition.

    " + } + } + }, + "CreateWirelessGatewayTaskResponse":{ + "type":"structure", + "members":{ + "WirelessGatewayTaskDefinitionId":{ + "shape":"WirelessGatewayTaskDefinitionId", + "documentation":"

    The ID of the WirelessGatewayTaskDefinition.

    " + }, + "Status":{ + "shape":"WirelessGatewayTaskStatus", + "documentation":"

    The status of the request.

    " + } + } + }, + "DeleteDestinationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DestinationName", + "documentation":"

    The name of the resource to delete.

    ", + "location":"uri", + "locationName":"Name" + } + } + }, + "DeleteDestinationResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteDeviceProfileRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"DeviceProfileId", + "documentation":"

    The ID of the resource to delete.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteDeviceProfileResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteServiceProfileRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ServiceProfileId", + "documentation":"

    The ID of the resource to delete.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteServiceProfileResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteWirelessDeviceRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the resource to delete.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteWirelessDeviceResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteWirelessGatewayRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to delete.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteWirelessGatewayResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteWirelessGatewayTaskDefinitionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayTaskDefinitionId", + "documentation":"

    The ID of the resource to delete.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteWirelessGatewayTaskDefinitionResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteWirelessGatewayTaskRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to delete.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteWirelessGatewayTaskResponse":{ + "type":"structure", + "members":{ + } + }, + "Description":{ + "type":"string", + "max":2048 + }, + "DestinationArn":{"type":"string"}, + "DestinationList":{ + "type":"list", + "member":{"shape":"Destinations"} + }, + "DestinationName":{ + "type":"string", + "max":128, + "pattern":"[a-zA-Z0-9-_]+" + }, + "Destinations":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DestinationArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "Name":{ + "shape":"DestinationName", + "documentation":"

    The name of the resource.

    " + }, + "ExpressionType":{ + "shape":"ExpressionType", + "documentation":"

    The type of value in Expression.

    " + }, + "Expression":{ + "shape":"Expression", + "documentation":"

    The rule name or topic rule to send messages to.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the resource.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of the IAM Role that authorizes the destination.

    " + } + }, + "documentation":"

    Describes a destination.

    " + }, + "DevAddr":{ + "type":"string", + "pattern":"[a-fA-F0-9]{8}" + }, + "DevEui":{ + "type":"string", + "pattern":"[a-fA-F0-9]{16}" + }, + "DevStatusReqFreq":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "DeviceProfile":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DeviceProfileArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "Name":{ + "shape":"DeviceProfileName", + "documentation":"

    The name of the resource.

    " + }, + "Id":{ + "shape":"DeviceProfileId", + "documentation":"

    The ID of the device profile.

    " + } + }, + "documentation":"

    Describes a device profile.

    " + }, + "DeviceProfileArn":{"type":"string"}, + "DeviceProfileId":{ + "type":"string", + "max":256 + }, + "DeviceProfileList":{ + "type":"list", + "member":{"shape":"DeviceProfile"} + }, + "DeviceProfileName":{ + "type":"string", + "max":256 + }, + "DisassociateAwsAccountFromPartnerAccountRequest":{ + "type":"structure", + "required":[ + "PartnerAccountId", + "PartnerType" + ], + "members":{ + "PartnerAccountId":{ + "shape":"PartnerAccountId", + "documentation":"

    The partner account ID to disassociate from the AWS account.

    ", + "location":"uri", + "locationName":"PartnerAccountId" + }, + "PartnerType":{ + "shape":"PartnerType", + "documentation":"

    The partner type.

    ", + "location":"querystring", + "locationName":"partnerType" + } + } + }, + "DisassociateAwsAccountFromPartnerAccountResponse":{ + "type":"structure", + "members":{ + } + }, + "DisassociateWirelessDeviceFromThingRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DisassociateWirelessDeviceFromThingResponse":{ + "type":"structure", + "members":{ + } + }, + "DisassociateWirelessGatewayFromCertificateRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DisassociateWirelessGatewayFromCertificateResponse":{ + "type":"structure", + "members":{ + } + }, + "DisassociateWirelessGatewayFromThingRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "DisassociateWirelessGatewayFromThingResponse":{ + "type":"structure", + "members":{ + } + }, + "DlBucketSize":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "DlRate":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "DlRatePolicy":{ + "type":"string", + "max":256 + }, + "Double":{"type":"double"}, + "DrMax":{ + "type":"integer", + "max":15, + "min":0 + }, + "DrMin":{ + "type":"integer", + "max":15, + "min":0 + }, + "EndPoint":{ + "type":"string", + "max":256, + "min":1 + }, + "Expression":{ + "type":"string", + "max":2048 + }, + "ExpressionType":{ + "type":"string", + "enum":["RuleName"] + }, + "FNwkSIntKey":{ + "type":"string", + "pattern":"[a-fA-F0-9]{32}" + }, + "FPort":{ + "type":"integer", + "max":223, + "min":1 + }, + "FactoryPresetFreqsList":{ + "type":"list", + "member":{"shape":"PresetFreq"}, + "max":20, + "min":0 + }, + "Fingerprint":{ + "type":"string", + "max":64, + "min":64, + "pattern":"[a-fA-F0-9]{64}", + "sensitive":true + }, + "GatewayEui":{ + "type":"string", + "pattern":"^(([0-9A-Fa-f]{2}-){7}|([0-9A-Fa-f]{2}:){7}|([0-9A-Fa-f]{2}\\s){7}|([0-9A-Fa-f]{2}){7})([0-9A-Fa-f]{2})$" + }, + "GetDestinationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DestinationName", + "documentation":"

    The name of the resource to get.

    ", + "location":"uri", + "locationName":"Name" + } + } + }, + "GetDestinationResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DestinationArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "Name":{ + "shape":"DestinationName", + "documentation":"

    The name of the resource.

    " + }, + "Expression":{ + "shape":"Expression", + "documentation":"

    The rule name or topic rule to send messages to.

    " + }, + "ExpressionType":{ + "shape":"ExpressionType", + "documentation":"

    The type of value in Expression.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the resource.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of the IAM Role that authorizes the destination.

    " + } + } + }, + "GetDeviceProfileRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"DeviceProfileId", + "documentation":"

    The ID of the resource to get.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDeviceProfileResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DeviceProfileArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "Name":{ + "shape":"DeviceProfileName", + "documentation":"

    The name of the resource.

    " + }, + "Id":{ + "shape":"DeviceProfileId", + "documentation":"

    The ID of the device profile.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANDeviceProfile", + "documentation":"

    Information about the device profile.

    " + } + } + }, + "GetPartnerAccountRequest":{ + "type":"structure", + "required":[ + "PartnerAccountId", + "PartnerType" + ], + "members":{ + "PartnerAccountId":{ + "shape":"PartnerAccountId", + "documentation":"

    The partner account ID to disassociate from the AWS account.

    ", + "location":"uri", + "locationName":"PartnerAccountId" + }, + "PartnerType":{ + "shape":"PartnerType", + "documentation":"

    The partner type.

    ", + "location":"querystring", + "locationName":"partnerType" + } + } + }, + "GetPartnerAccountResponse":{ + "type":"structure", + "members":{ + "Sidewalk":{ + "shape":"SidewalkAccountInfoWithFingerprint", + "documentation":"

    The Sidewalk account credentials.

    " + }, + "AccountLinked":{ + "shape":"AccountLinked", + "documentation":"

    Whether the partner account is linked to the AWS account.

    " + } + } + }, + "GetServiceEndpointRequest":{ + "type":"structure", + "members":{ + "ServiceType":{ + "shape":"WirelessGatewayServiceType", + "documentation":"

    The service type for which to get endpoint information about. Can be CUPS for the Configuration and Update Server endpoint, or LNS for the LoRaWAN Network Server endpoint.

    ", + "location":"querystring", + "locationName":"serviceType" + } + } + }, + "GetServiceEndpointResponse":{ + "type":"structure", + "members":{ + "ServiceType":{ + "shape":"WirelessGatewayServiceType", + "documentation":"

    The endpoint's service type.

    " + }, + "ServiceEndpoint":{ + "shape":"EndPoint", + "documentation":"

    The service endpoint value.

    " + }, + "ServerTrust":{ + "shape":"CertificatePEM", + "documentation":"

    The Root CA of the server trust certificate.

    " + } + } + }, + "GetServiceProfileRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ServiceProfileId", + "documentation":"

    The ID of the resource to get.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetServiceProfileResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ServiceProfileArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "Name":{ + "shape":"ServiceProfileName", + "documentation":"

    The name of the resource.

    " + }, + "Id":{ + "shape":"ServiceProfileId", + "documentation":"

    The ID of the service profile.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANGetServiceProfileInfo", + "documentation":"

    Information about the service profile.

    " + } + } + }, + "GetWirelessDeviceRequest":{ + "type":"structure", + "required":[ + "Identifier", + "IdentifierType" + ], + "members":{ + "Identifier":{ + "shape":"Identifier", + "documentation":"

    The identifier of the wireless device to get.

    ", + "location":"uri", + "locationName":"Identifier" + }, + "IdentifierType":{ + "shape":"WirelessDeviceIdType", + "documentation":"

    The type of identifier used in identifier.

    ", + "location":"querystring", + "locationName":"identifierType" + } + } + }, + "GetWirelessDeviceResponse":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"WirelessDeviceType", + "documentation":"

    The wireless device type.

    " + }, + "Name":{ + "shape":"WirelessDeviceName", + "documentation":"

    The name of the resource.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the resource.

    " + }, + "DestinationName":{ + "shape":"DestinationName", + "documentation":"

    The name of the destination to which the device is assigned.

    " + }, + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the wireless device.

    " + }, + "Arn":{ + "shape":"WirelessDeviceArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "ThingName":{ + "shape":"ThingName", + "documentation":"

    The name of the thing associated with the wireless device. The value is empty if a thing isn't associated with the device.

    " + }, + "ThingArn":{ + "shape":"ThingArn", + "documentation":"

    The ARN of the thing associated with the wireless device.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANDevice", + "documentation":"

    Information about the wireless device.

    " + } + } + }, + "GetWirelessDeviceStatisticsRequest":{ + "type":"structure", + "required":["WirelessDeviceId"], + "members":{ + "WirelessDeviceId":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the wireless device for which to get the data.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetWirelessDeviceStatisticsResponse":{ + "type":"structure", + "members":{ + "WirelessDeviceId":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the wireless device.

    " + }, + "LastUplinkReceivedAt":{ + "shape":"ISODateTimeString", + "documentation":"

    The date and time when the most recent uplink was received.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANDeviceMetadata", + "documentation":"

    Information about the wireless device's operations.

    " + } + } + }, + "GetWirelessGatewayCertificateRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to get.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetWirelessGatewayCertificateResponse":{ + "type":"structure", + "members":{ + "IotCertificateId":{ + "shape":"IotCertificateId", + "documentation":"

    The ID of the certificate associated with the wireless gateway.

    " + } + } + }, + "GetWirelessGatewayFirmwareInformationRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to get.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetWirelessGatewayFirmwareInformationResponse":{ + "type":"structure", + "members":{ + "LoRaWAN":{ + "shape":"LoRaWANGatewayCurrentVersion", + "documentation":"

    Information about the wireless gateway's firmware.

    " + } + } + }, + "GetWirelessGatewayRequest":{ + "type":"structure", + "required":[ + "Identifier", + "IdentifierType" + ], + "members":{ + "Identifier":{ + "shape":"Identifier", + "documentation":"

    The identifier of the wireless gateway to get.

    ", + "location":"uri", + "locationName":"Identifier" + }, + "IdentifierType":{ + "shape":"WirelessGatewayIdType", + "documentation":"

    The type of identifier used in identifier.

    ", + "location":"querystring", + "locationName":"identifierType" + } + } + }, + "GetWirelessGatewayResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"WirelessGatewayName", + "documentation":"

    The name of the resource.

    " + }, + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the wireless gateway.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the resource.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANGateway", + "documentation":"

    Information about the wireless gateway.

    " + }, + "Arn":{ + "shape":"WirelessGatewayArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "ThingName":{ + "shape":"ThingName", + "documentation":"

    The name of the thing associated with the wireless gateway. The value is empty if a thing isn't associated with the gateway.

    " + }, + "ThingArn":{ + "shape":"ThingArn", + "documentation":"

    The ARN of the thing associated with the wireless gateway.

    " + } + } + }, + "GetWirelessGatewayStatisticsRequest":{ + "type":"structure", + "required":["WirelessGatewayId"], + "members":{ + "WirelessGatewayId":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the wireless gateway for which to get the data.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetWirelessGatewayStatisticsResponse":{ + "type":"structure", + "members":{ + "WirelessGatewayId":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the wireless gateway.

    " + }, + "LastUplinkReceivedAt":{ + "shape":"ISODateTimeString", + "documentation":"

    The date and time when the most recent uplink was received.

    " + } + } + }, + "GetWirelessGatewayTaskDefinitionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayTaskDefinitionId", + "documentation":"

    The ID of the resource to get.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetWirelessGatewayTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "AutoCreateTasks":{ + "shape":"AutoCreateTasks", + "documentation":"

    Whether to automatically create tasks using this task definition for all gateways with the specified current version. If false, the task must me created by calling CreateWirelessGatewayTask.

    " + }, + "Name":{ + "shape":"WirelessGatewayTaskName", + "documentation":"

    The name of the resource.

    " + }, + "Update":{ + "shape":"UpdateWirelessGatewayTaskCreate", + "documentation":"

    Information about the gateways to update.

    " + } + } + }, + "GetWirelessGatewayTaskRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to get.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetWirelessGatewayTaskResponse":{ + "type":"structure", + "members":{ + "WirelessGatewayId":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the wireless gateway.

    " + }, + "WirelessGatewayTaskDefinitionId":{ + "shape":"WirelessGatewayTaskDefinitionId", + "documentation":"

    The ID of the WirelessGatewayTask.

    " + }, + "LastUplinkReceivedAt":{ + "shape":"ISODateTimeString", + "documentation":"

    The date and time when the most recent uplink was received.

    " + }, + "TaskCreatedAt":{ + "shape":"ISODateTimeString", + "documentation":"

    The date and time when the task was created.

    " + }, + "Status":{ + "shape":"WirelessGatewayTaskStatus", + "documentation":"

    The status of the request.

    " + } + } + }, + "HrAllowed":{"type":"boolean"}, + "ISODateTimeString":{ + "type":"string", + "pattern":"^([\\+-]?\\d{4}(?!\\d{2}\\b))((-?)((0[1-9]|1[0-2])(\\3([12]\\d|0[1-9]|3[01]))?|W([0-4]\\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\\d|[12]\\d{2}|3([0-5]\\d|6[1-6])))([T\\s]((([01]\\d|2[0-3])((:?)[0-5]\\d)?|24\\:?00)([\\.,]\\d+(?!:))?)?(\\17[0-5]\\d([\\.,]\\d+)?)?([zZ]|([\\+-])([01]\\d|2[0-3]):?([0-5]\\d)?)?)?)?$" + }, + "Identifier":{ + "type":"string", + "max":256 + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    An unexpected error occurred while processing a request.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "IotCertificateId":{ + "type":"string", + "max":4096, + "min":1 + }, + "JoinEui":{ + "type":"string", + "pattern":"[a-fA-F0-9]{16}" + }, + "ListDestinationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in this operation.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListDestinationsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next set of results, or null if there are no additional results.

    " + }, + "DestinationList":{ + "shape":"DestinationList", + "documentation":"

    The list of destinations.

    " + } + } + }, + "ListDeviceProfilesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in this operation.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListDeviceProfilesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next set of results, or null if there are no additional results.

    " + }, + "DeviceProfileList":{ + "shape":"DeviceProfileList", + "documentation":"

    The list of device profiles.

    " + } + } + }, + "ListPartnerAccountsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in this operation.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListPartnerAccountsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next set of results, or null if there are no additional results.

    " + }, + "Sidewalk":{ + "shape":"SidewalkAccountList", + "documentation":"

    The Sidewalk account credentials.

    " + } + } + }, + "ListServiceProfilesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in this operation.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListServiceProfilesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next set of results, or null if there are no additional results.

    " + }, + "ServiceProfileList":{ + "shape":"ServiceProfileList", + "documentation":"

    The list of service profiles.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the resource for which to list tags.

    ", + "location":"querystring", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags attached to the specified resource. Tags are metadata that can be used to manage a resource

    " + } + } + }, + "ListWirelessDevicesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in this operation.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "DestinationName":{ + "shape":"DestinationName", + "documentation":"

    A filter to list only the wireless devices that use this destination.

    ", + "location":"querystring", + "locationName":"destinationName" + }, + "DeviceProfileId":{ + "shape":"DeviceProfileId", + "documentation":"

    A filter to list only the wireless devices that use this device profile.

    ", + "location":"querystring", + "locationName":"deviceProfileId" + }, + "ServiceProfileId":{ + "shape":"ServiceProfileId", + "documentation":"

    A filter to list only the wireless devices that use this service profile.

    ", + "location":"querystring", + "locationName":"serviceProfileId" + }, + "WirelessDeviceType":{ + "shape":"WirelessDeviceType", + "documentation":"

    A filter to list only the wireless devices that use this wireless device type.

    ", + "location":"querystring", + "locationName":"wirelessDeviceType" + } + } + }, + "ListWirelessDevicesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next set of results, or null if there are no additional results.

    " + }, + "WirelessDeviceList":{ + "shape":"WirelessDeviceStatisticsList", + "documentation":"

    The ID of the wireless device.

    " + } + } + }, + "ListWirelessGatewayTaskDefinitionsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in this operation.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "TaskDefinitionType":{ + "shape":"WirelessGatewayTaskDefinitionType", + "documentation":"

    A filter to list only the wireless gateway task definitions that use this task definition type.

    ", + "location":"querystring", + "locationName":"taskDefinitionType" + } + } + }, + "ListWirelessGatewayTaskDefinitionsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next set of results, or null if there are no additional results.

    " + }, + "TaskDefinitions":{ + "shape":"WirelessGatewayTaskDefinitionList", + "documentation":"

    The list of task definitions.

    " + } + } + }, + "ListWirelessGatewaysRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in this operation.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListWirelessGatewaysResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next set of results, or null if there are no additional results.

    " + }, + "WirelessGatewayList":{ + "shape":"WirelessGatewayStatisticsList", + "documentation":"

    The ID of the wireless gateway.

    " + } + } + }, + "LoRaWANDevice":{ + "type":"structure", + "members":{ + "DevEui":{ + "shape":"DevEui", + "documentation":"

    The DevEUI value.

    " + }, + "DeviceProfileId":{ + "shape":"DeviceProfileId", + "documentation":"

    The ID of the device profile for the new wireless device.

    " + }, + "ServiceProfileId":{ + "shape":"ServiceProfileId", + "documentation":"

    The ID of the service profile.

    " + }, + "OtaaV1_1":{ + "shape":"OtaaV1_1", + "documentation":"

    OTAA device object for v1.1 for create APIs

    " + }, + "OtaaV1_0_x":{ + "shape":"OtaaV1_0_x", + "documentation":"

    OTAA device object for create APIs for v1.0.x

    " + }, + "AbpV1_1":{ + "shape":"AbpV1_1", + "documentation":"

    ABP device object for create APIs for v1.1

    " + }, + "AbpV1_0_x":{ + "shape":"AbpV1_0_x", + "documentation":"

    LoRaWAN object for create APIs

    " + } + }, + "documentation":"

    LoRaWAN object for create functions.

    " + }, + "LoRaWANDeviceMetadata":{ + "type":"structure", + "members":{ + "DevEui":{ + "shape":"DevEui", + "documentation":"

    The DevEUI value.

    " + }, + "FPort":{ + "shape":"Integer", + "documentation":"

    The FPort value.

    " + }, + "DataRate":{ + "shape":"Integer", + "documentation":"

    The DataRate value.

    " + }, + "Frequency":{ + "shape":"Integer", + "documentation":"

    The device's channel frequency in Hz.

    " + }, + "Timestamp":{ + "shape":"ISODateTimeString", + "documentation":"

    The date and time of the metadata.

    " + }, + "Gateways":{ + "shape":"LoRaWANGatewayMetadataList", + "documentation":"

    Information about the gateways accessed by the device.

    " + } + }, + "documentation":"

    LoRaWAN device metatdata.

    " + }, + "LoRaWANDeviceProfile":{ + "type":"structure", + "members":{ + "SupportsClassB":{ + "shape":"SupportsClassB", + "documentation":"

    The SupportsClassB value.

    " + }, + "ClassBTimeout":{ + "shape":"ClassBTimeout", + "documentation":"

    The ClassBTimeout value.

    " + }, + "PingSlotPeriod":{ + "shape":"PingSlotPeriod", + "documentation":"

    The PingSlotPeriod value.

    " + }, + "PingSlotDr":{ + "shape":"PingSlotDr", + "documentation":"

    The PingSlotDR value.

    " + }, + "PingSlotFreq":{ + "shape":"PingSlotFreq", + "documentation":"

    The PingSlotFreq value.

    " + }, + "SupportsClassC":{ + "shape":"SupportsClassC", + "documentation":"

    The SupportsClassC value.

    " + }, + "ClassCTimeout":{ + "shape":"ClassCTimeout", + "documentation":"

    The ClassCTimeout value.

    " + }, + "MacVersion":{ + "shape":"MacVersion", + "documentation":"

    The MAC version (such as OTAA 1.1 or OTAA 1.0.3) to use with this device profile.

    " + }, + "RegParamsRevision":{ + "shape":"RegParamsRevision", + "documentation":"

    The version of regional parameters.

    " + }, + "RxDelay1":{ + "shape":"RxDelay1", + "documentation":"

    The RXDelay1 value.

    " + }, + "RxDrOffset1":{ + "shape":"RxDrOffset1", + "documentation":"

    The RXDROffset1 value.

    " + }, + "RxDataRate2":{ + "shape":"RxDataRate2", + "documentation":"

    The RXDataRate2 value.

    " + }, + "RxFreq2":{ + "shape":"RxFreq2", + "documentation":"

    The RXFreq2 value.

    " + }, + "FactoryPresetFreqsList":{ + "shape":"FactoryPresetFreqsList", + "documentation":"

    The list of values that make up the FactoryPresetFreqs value.

    " + }, + "MaxEirp":{ + "shape":"MaxEirp", + "documentation":"

    The MaxEIRP value.

    " + }, + "MaxDutyCycle":{ + "shape":"MaxDutyCycle", + "documentation":"

    The MaxDutyCycle value.

    " + }, + "RfRegion":{ + "shape":"RfRegion", + "documentation":"

    The frequency band (RFRegion) value.

    " + }, + "SupportsJoin":{ + "shape":"SupportsJoin", + "documentation":"

    The SupportsJoin value.

    " + }, + "Supports32BitFCnt":{ + "shape":"Supports32BitFCnt", + "documentation":"

    The Supports32BitFCnt value.

    " + } + }, + "documentation":"

    LoRaWANDeviceProfile object.

    " + }, + "LoRaWANGateway":{ + "type":"structure", + "members":{ + "GatewayEui":{ + "shape":"GatewayEui", + "documentation":"

    The gateway's EUI value.

    " + }, + "RfRegion":{ + "shape":"RfRegion", + "documentation":"

    The frequency band (RFRegion) value.

    " + } + }, + "documentation":"

    LoRaWANGateway object.

    " + }, + "LoRaWANGatewayCurrentVersion":{ + "type":"structure", + "members":{ + "CurrentVersion":{ + "shape":"LoRaWANGatewayVersion", + "documentation":"

    The version of the gateways that should receive the update.

    " + } + }, + "documentation":"

    LoRaWANGatewayCurrentVersion object.

    " + }, + "LoRaWANGatewayMetadata":{ + "type":"structure", + "members":{ + "GatewayEui":{ + "shape":"GatewayEui", + "documentation":"

    The gateway's EUI value.

    " + }, + "Snr":{ + "shape":"Double", + "documentation":"

    The SNR value.

    " + }, + "Rssi":{ + "shape":"Double", + "documentation":"

    The RSSI value.

    " + } + }, + "documentation":"

    LoRaWAN gateway metatdata.

    " + }, + "LoRaWANGatewayMetadataList":{ + "type":"list", + "member":{"shape":"LoRaWANGatewayMetadata"} + }, + "LoRaWANGatewayVersion":{ + "type":"structure", + "members":{ + "PackageVersion":{ + "shape":"PackageVersion", + "documentation":"

    The version of the wireless gateway firmware.

    " + }, + "Model":{ + "shape":"Model", + "documentation":"

    The model number of the wireless gateway.

    " + }, + "Station":{ + "shape":"Station", + "documentation":"

    The basic station version of the wireless gateway.

    " + } + }, + "documentation":"

    LoRaWANGatewayVersion object.

    " + }, + "LoRaWANGetServiceProfileInfo":{ + "type":"structure", + "members":{ + "UlRate":{ + "shape":"UlRate", + "documentation":"

    The ULRate value.

    " + }, + "UlBucketSize":{ + "shape":"UlBucketSize", + "documentation":"

    The ULBucketSize value.

    " + }, + "UlRatePolicy":{ + "shape":"UlRatePolicy", + "documentation":"

    The ULRatePolicy value.

    " + }, + "DlRate":{ + "shape":"DlRate", + "documentation":"

    The DLRate value.

    " + }, + "DlBucketSize":{ + "shape":"DlBucketSize", + "documentation":"

    The DLBucketSize value.

    " + }, + "DlRatePolicy":{ + "shape":"DlRatePolicy", + "documentation":"

    The DLRatePolicy value.

    " + }, + "AddGwMetadata":{ + "shape":"AddGwMetadata", + "documentation":"

    The AddGWMetaData value.

    " + }, + "DevStatusReqFreq":{ + "shape":"DevStatusReqFreq", + "documentation":"

    The DevStatusReqFreq value.

    " + }, + "ReportDevStatusBattery":{ + "shape":"ReportDevStatusBattery", + "documentation":"

    The ReportDevStatusBattery value.

    " + }, + "ReportDevStatusMargin":{ + "shape":"ReportDevStatusMargin", + "documentation":"

    The ReportDevStatusMargin value.

    " + }, + "DrMin":{ + "shape":"DrMin", + "documentation":"

    The DRMin value.

    " + }, + "DrMax":{ + "shape":"DrMax", + "documentation":"

    The DRMax value.

    " + }, + "ChannelMask":{ + "shape":"ChannelMask", + "documentation":"

    The ChannelMask value.

    " + }, + "PrAllowed":{ + "shape":"PrAllowed", + "documentation":"

    The PRAllowed value that describes whether passive roaming is allowed.

    " + }, + "HrAllowed":{ + "shape":"HrAllowed", + "documentation":"

    The HRAllowed value that describes whether handover roaming is allowed.

    " + }, + "RaAllowed":{ + "shape":"RaAllowed", + "documentation":"

    The RAAllowed value that describes whether roaming activation is allowed.

    " + }, + "NwkGeoLoc":{ + "shape":"NwkGeoLoc", + "documentation":"

    The NwkGeoLoc value.

    " + }, + "TargetPer":{ + "shape":"TargetPer", + "documentation":"

    The TargetPER value.

    " + }, + "MinGwDiversity":{ + "shape":"MinGwDiversity", + "documentation":"

    The MinGwDiversity value.

    " + } + }, + "documentation":"

    LoRaWANGetServiceProfileInfo object.

    " + }, + "LoRaWANListDevice":{ + "type":"structure", + "members":{ + "DevEui":{ + "shape":"DevEui", + "documentation":"

    The DevEUI value.

    " + } + }, + "documentation":"

    LoRaWAN object for list functions.

    " + }, + "LoRaWANSendDataToDevice":{ + "type":"structure", + "members":{ + "FPort":{ + "shape":"FPort", + "documentation":"

    The Fport value.

    " + } + }, + "documentation":"

    LoRaWAN router info.

    " + }, + "LoRaWANServiceProfile":{ + "type":"structure", + "members":{ + "AddGwMetadata":{ + "shape":"AddGwMetadata", + "documentation":"

    The AddGWMetaData value.

    " + } + }, + "documentation":"

    LoRaWANServiceProfile object.

    " + }, + "LoRaWANUpdateDevice":{ + "type":"structure", + "members":{ + "DeviceProfileId":{ + "shape":"DeviceProfileId", + "documentation":"

    The ID of the device profile for the wireless device.

    " + }, + "ServiceProfileId":{ + "shape":"ServiceProfileId", + "documentation":"

    The ID of the service profile.

    " + } + }, + "documentation":"

    LoRaWAN object for update functions.

    " + }, + "LoRaWANUpdateGatewayTaskCreate":{ + "type":"structure", + "members":{ + "UpdateSignature":{ + "shape":"UpdateSignature", + "documentation":"

    The signature used to verify the update firmware.

    " + }, + "SigKeyCrc":{ + "shape":"Crc", + "documentation":"

    The CRC of the signature private key to check.

    " + }, + "CurrentVersion":{ + "shape":"LoRaWANGatewayVersion", + "documentation":"

    The version of the gateways that should receive the update.

    " + }, + "UpdateVersion":{ + "shape":"LoRaWANGatewayVersion", + "documentation":"

    The firmware version to update the gateway to.

    " + } + }, + "documentation":"

    LoRaWANUpdateGatewayTaskCreate object.

    " + }, + "LoRaWANUpdateGatewayTaskEntry":{ + "type":"structure", + "members":{ + "CurrentVersion":{ + "shape":"LoRaWANGatewayVersion", + "documentation":"

    The version of the gateways that should receive the update.

    " + }, + "UpdateVersion":{ + "shape":"LoRaWANGatewayVersion", + "documentation":"

    The firmware version to update the gateway to.

    " + } + }, + "documentation":"

    LoRaWANUpdateGatewayTaskEntry object.

    " + }, + "MacVersion":{ + "type":"string", + "max":64 + }, + "MaxDutyCycle":{ + "type":"integer", + "max":100, + "min":0 + }, + "MaxEirp":{ + "type":"integer", + "max":15, + "min":0 + }, + "MaxResults":{ + "type":"integer", + "max":250, + "min":0 + }, + "Message":{ + "type":"string", + "max":2048 + }, + "MessageId":{"type":"string"}, + "MinGwDiversity":{ + "type":"integer", + "max":100, + "min":1 + }, + "Model":{ + "type":"string", + "max":4096, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":4096 + }, + "NwkGeoLoc":{"type":"boolean"}, + "NwkKey":{ + "type":"string", + "pattern":"[a-fA-F0-9]{32}" + }, + "NwkSEncKey":{ + "type":"string", + "pattern":"[a-fA-F0-9]{32}" + }, + "NwkSKey":{ + "type":"string", + "pattern":"[a-fA-F0-9]{32}" + }, + "OtaaV1_0_x":{ + "type":"structure", + "members":{ + "AppKey":{ + "shape":"AppKey", + "documentation":"

    The AppKey value.

    " + }, + "AppEui":{ + "shape":"AppEui", + "documentation":"

    The AppEUI value.

    " + } + }, + "documentation":"

    OTAA device object for v1.0.x

    " + }, + "OtaaV1_1":{ + "type":"structure", + "members":{ + "AppKey":{ + "shape":"AppKey", + "documentation":"

    The AppKey value.

    " + }, + "NwkKey":{ + "shape":"NwkKey", + "documentation":"

    The NwkKey value.

    " + }, + "JoinEui":{ + "shape":"JoinEui", + "documentation":"

    The JoinEUI value.

    " + } + }, + "documentation":"

    OTAA device object for v1.1

    " + }, + "PackageVersion":{ + "type":"string", + "max":32, + "min":1 + }, + "PartnerAccountId":{ + "type":"string", + "max":256 + }, + "PartnerType":{ + "type":"string", + "enum":["Sidewalk"] + }, + "PayloadData":{ + "type":"string", + "max":2048, + "pattern":"^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + }, + "PingSlotDr":{ + "type":"integer", + "max":15, + "min":0 + }, + "PingSlotFreq":{ + "type":"integer", + "max":16700000, + "min":1000000 + }, + "PingSlotPeriod":{ + "type":"integer", + "max":4096, + "min":128 + }, + "PrAllowed":{"type":"boolean"}, + "PresetFreq":{ + "type":"integer", + "max":16700000, + "min":1000000 + }, + "RaAllowed":{"type":"boolean"}, + "RegParamsRevision":{ + "type":"string", + "max":64 + }, + "ReportDevStatusBattery":{"type":"boolean"}, + "ReportDevStatusMargin":{"type":"boolean"}, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "ResourceId":{"shape":"ResourceId"}, + "ResourceType":{"shape":"ResourceType"} + }, + "documentation":"

    Resource does not exist.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceType":{"type":"string"}, + "Result":{ + "type":"string", + "max":2048 + }, + "RfRegion":{ + "type":"string", + "max":64 + }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20 + }, + "RxDataRate2":{ + "type":"integer", + "max":15, + "min":0 + }, + "RxDelay1":{ + "type":"integer", + "max":15, + "min":0 + }, + "RxDrOffset1":{ + "type":"integer", + "max":7, + "min":0 + }, + "RxFreq2":{ + "type":"integer", + "max":16700000, + "min":1000000 + }, + "SNwkSIntKey":{ + "type":"string", + "pattern":"[a-fA-F0-9]{32}" + }, + "SendDataToWirelessDeviceRequest":{ + "type":"structure", + "required":[ + "Id", + "TransmitMode", + "PayloadData" + ], + "members":{ + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the wireless device to receive the data.

    ", + "location":"uri", + "locationName":"Id" + }, + "TransmitMode":{ + "shape":"TransmitMode", + "documentation":"

    The transmit mode to use to send data to the wireless device. Can be: 0 for UM (unacknowledge mode), 1 for AM (acknowledge mode), or 2 for (TM) transparent mode.

    " + }, + "PayloadData":{ + "shape":"PayloadData", + "documentation":"

    The message payload to send.

    " + }, + "WirelessMetadata":{ + "shape":"WirelessMetadata", + "documentation":"

    Metadata about the message request.

    " + } + } + }, + "SendDataToWirelessDeviceResponse":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"MessageId", + "documentation":"

    The ID of the message sent to the wireless device.

    " + } + } + }, + "Seq":{ + "type":"integer", + "min":0 + }, + "ServiceProfile":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ServiceProfileArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "Name":{ + "shape":"ServiceProfileName", + "documentation":"

    The name of the resource.

    " + }, + "Id":{ + "shape":"ServiceProfileId", + "documentation":"

    The ID of the service profile.

    " + } + }, + "documentation":"

    Information about a service profile.

    " + }, + "ServiceProfileArn":{"type":"string"}, + "ServiceProfileId":{ + "type":"string", + "max":256 + }, + "ServiceProfileList":{ + "type":"list", + "member":{"shape":"ServiceProfile"} + }, + "ServiceProfileName":{ + "type":"string", + "max":256 + }, + "SessionKeysAbpV1_0_x":{ + "type":"structure", + "members":{ + "NwkSKey":{ + "shape":"NwkSKey", + "documentation":"

    The NwkSKey value.

    " + }, + "AppSKey":{ + "shape":"AppSKey", + "documentation":"

    The AppSKey value.

    " + } + }, + "documentation":"

    Session keys for ABP v1.1

    " + }, + "SessionKeysAbpV1_1":{ + "type":"structure", + "members":{ + "FNwkSIntKey":{ + "shape":"FNwkSIntKey", + "documentation":"

    The FNwkSIntKey value.

    " + }, + "SNwkSIntKey":{ + "shape":"SNwkSIntKey", + "documentation":"

    The SNwkSIntKey value.

    " + }, + "NwkSEncKey":{ + "shape":"NwkSEncKey", + "documentation":"

    The NwkSEncKey value.

    " + }, + "AppSKey":{ + "shape":"AppSKey", + "documentation":"

    The AppSKey value.

    " + } + }, + "documentation":"

    Session keys for ABP v1.1

    " + }, + "SidewalkAccountInfo":{ + "type":"structure", + "members":{ + "AmazonId":{ + "shape":"AmazonId", + "documentation":"

    The Sidewalk Amazon ID.

    " + }, + "AppServerPrivateKey":{ + "shape":"AppServerPrivateKey", + "documentation":"

    The Sidewalk application server private key.

    " + } + }, + "documentation":"

    Information about a Sidewalk account.

    " + }, + "SidewalkAccountInfoWithFingerprint":{ + "type":"structure", + "members":{ + "AmazonId":{ + "shape":"AmazonId", + "documentation":"

    The Sidewalk Amazon ID.

    " + }, + "Fingerprint":{ + "shape":"Fingerprint", + "documentation":"

    Fingerprint for Sidewalk application server private key.

    " + } + }, + "documentation":"

    Information about a Sidewalk account.

    " + }, + "SidewalkAccountList":{ + "type":"list", + "member":{"shape":"SidewalkAccountInfoWithFingerprint"} + }, + "SidewalkListDevice":{ + "type":"structure", + "members":{ + "AmazonId":{ + "shape":"AmazonId", + "documentation":"

    The Sidewalk Amazon ID.

    " + } + }, + "documentation":"

    Sidewalk object used by list functions.

    " + }, + "SidewalkSendDataToDevice":{ + "type":"structure", + "members":{ + "Seq":{ + "shape":"Seq", + "documentation":"

    The sequence number.

    " + } + }, + "documentation":"

    Information about a Sidewalk router.

    " + }, + "SidewalkUpdateAccount":{ + "type":"structure", + "members":{ + "AppServerPrivateKey":{ + "shape":"AppServerPrivateKey", + "documentation":"

    The new Sidewalk application server private key.

    " + } + }, + "documentation":"

    Sidewalk update.

    " + }, + "Station":{ + "type":"string", + "max":4096, + "min":1 + }, + "Supports32BitFCnt":{"type":"boolean"}, + "SupportsClassB":{"type":"boolean"}, + "SupportsClassC":{"type":"boolean"}, + "SupportsJoin":{"type":"boolean"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    The tag's key value.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The tag's value.

    " + } + }, + "documentation":"

    A simple label consisting of a customer-defined key-value pair

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the resource to add tags to.

    ", + "location":"querystring", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TargetPer":{ + "type":"integer", + "max":100, + "min":0 + }, + "TestWirelessDeviceRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the wireless device to test.

    ", + "location":"uri", + "locationName":"Id" + } + } + }, + "TestWirelessDeviceResponse":{ + "type":"structure", + "members":{ + "Result":{ + "shape":"Result", + "documentation":"

    The result returned by the test.

    " + } + } + }, + "ThingArn":{"type":"string"}, + "ThingName":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The request was denied because it exceeded the allowed API request rate.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "ResourceName":{"shape":"AmazonResourceName"} + }, + "documentation":"

    The request was denied because the resource can't have any more tags.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TransmitMode":{ + "type":"integer", + "max":1, + "min":0 + }, + "UlBucketSize":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "UlRate":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "UlRatePolicy":{ + "type":"string", + "max":256 + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The ARN of the resource to remove tags from.

    ", + "location":"querystring", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    A list of the keys of the tags to remove from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDataSource":{ + "type":"string", + "max":4096, + "min":1 + }, + "UpdateDestinationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DestinationName", + "documentation":"

    The new name of the resource.

    ", + "location":"uri", + "locationName":"Name" + }, + "ExpressionType":{ + "shape":"ExpressionType", + "documentation":"

    The type of value in Expression.

    " + }, + "Expression":{ + "shape":"Expression", + "documentation":"

    The new rule name or topic rule to send messages to.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A new description of the resource.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of the IAM Role that authorizes the destination.

    " + } + } + }, + "UpdateDestinationResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdatePartnerAccountRequest":{ + "type":"structure", + "required":[ + "Sidewalk", + "PartnerAccountId", + "PartnerType" + ], + "members":{ + "Sidewalk":{ + "shape":"SidewalkUpdateAccount", + "documentation":"

    The Sidewalk account credentials.

    " + }, + "PartnerAccountId":{ + "shape":"PartnerAccountId", + "documentation":"

    The ID of the partner account to update.

    ", + "location":"uri", + "locationName":"PartnerAccountId" + }, + "PartnerType":{ + "shape":"PartnerType", + "documentation":"

    The partner type.

    ", + "location":"querystring", + "locationName":"partnerType" + } + } + }, + "UpdatePartnerAccountResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateSignature":{ + "type":"string", + "max":4096, + "min":1 + }, + "UpdateWirelessDeviceRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + }, + "DestinationName":{ + "shape":"DestinationName", + "documentation":"

    The name of the new destination for the device.

    " + }, + "Name":{ + "shape":"WirelessDeviceName", + "documentation":"

    The new name of the resource.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A new description of the resource.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANUpdateDevice", + "documentation":"

    The updated wireless device's configuration.

    " + } + } + }, + "UpdateWirelessDeviceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateWirelessGatewayRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the resource to update.

    ", + "location":"uri", + "locationName":"Id" + }, + "Name":{ + "shape":"WirelessGatewayName", + "documentation":"

    The new name of the resource.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A new description of the resource.

    " + } + } + }, + "UpdateWirelessGatewayResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateWirelessGatewayTaskCreate":{ + "type":"structure", + "members":{ + "UpdateDataSource":{ + "shape":"UpdateDataSource", + "documentation":"

    The link to the S3 bucket.

    " + }, + "UpdateDataRole":{ + "shape":"UpdateDataSource", + "documentation":"

    The IAM role used to read data from the S3 bucket.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANUpdateGatewayTaskCreate", + "documentation":"

    The properties that relate to the LoRaWAN wireless gateway.

    " + } + }, + "documentation":"

    UpdateWirelessGatewayTaskCreate object.

    " + }, + "UpdateWirelessGatewayTaskEntry":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"WirelessGatewayTaskDefinitionId", + "documentation":"

    The ID of the new wireless gateway task entry.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANUpdateGatewayTaskEntry", + "documentation":"

    The properties that relate to the LoRaWAN wireless gateway.

    " + } + }, + "documentation":"

    UpdateWirelessGatewayTaskEntry object.

    " + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The input did not meet the specified constraints.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "WirelessDeviceArn":{"type":"string"}, + "WirelessDeviceId":{ + "type":"string", + "max":256 + }, + "WirelessDeviceIdType":{ + "type":"string", + "enum":[ + "WirelessDeviceId", + "DevEui", + "ThingName" + ] + }, + "WirelessDeviceName":{ + "type":"string", + "max":256 + }, + "WirelessDeviceStatistics":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"WirelessDeviceArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "Id":{ + "shape":"WirelessDeviceId", + "documentation":"

    The ID of the wireless device reporting the data.

    " + }, + "Type":{ + "shape":"WirelessDeviceType", + "documentation":"

    The wireless device type.

    " + }, + "Name":{ + "shape":"WirelessDeviceName", + "documentation":"

    The name of the resource.

    " + }, + "DestinationName":{ + "shape":"DestinationName", + "documentation":"

    The name of the destination to which the device is assigned.

    " + }, + "LastUplinkReceivedAt":{ + "shape":"ISODateTimeString", + "documentation":"

    The date and time when the most recent uplink was received.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANListDevice", + "documentation":"

    LoRaWAN device info.

    " + }, + "Sidewalk":{ + "shape":"SidewalkListDevice", + "documentation":"

    The Sidewalk account credentials.

    " + } + }, + "documentation":"

    Information about a wireless device's operation.

    " + }, + "WirelessDeviceStatisticsList":{ + "type":"list", + "member":{"shape":"WirelessDeviceStatistics"} + }, + "WirelessDeviceType":{ + "type":"string", + "enum":[ + "Sidewalk", + "LoRaWAN" + ] + }, + "WirelessGatewayArn":{"type":"string"}, + "WirelessGatewayId":{ + "type":"string", + "max":256 + }, + "WirelessGatewayIdType":{ + "type":"string", + "enum":[ + "GatewayEui", + "WirelessGatewayId", + "ThingName" + ] + }, + "WirelessGatewayName":{ + "type":"string", + "max":256 + }, + "WirelessGatewayServiceType":{ + "type":"string", + "enum":[ + "CUPS", + "LNS" + ] + }, + "WirelessGatewayStatistics":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"WirelessGatewayArn", + "documentation":"

    The Amazon Resource Name of the resource.

    " + }, + "Id":{ + "shape":"WirelessGatewayId", + "documentation":"

    The ID of the wireless gateway reporting the data.

    " + }, + "Name":{ + "shape":"WirelessGatewayName", + "documentation":"

    The name of the resource.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the resource.

    " + }, + "LoRaWAN":{ + "shape":"LoRaWANGateway", + "documentation":"

    LoRaWAN gateway info.

    " + }, + "LastUplinkReceivedAt":{ + "shape":"ISODateTimeString", + "documentation":"

    The date and time when the most recent uplink was received.

    " + } + }, + "documentation":"

    Information about a wireless gateway's operation.

    " + }, + "WirelessGatewayStatisticsList":{ + "type":"list", + "member":{"shape":"WirelessGatewayStatistics"} + }, + "WirelessGatewayTaskDefinitionId":{ + "type":"string", + "max":36, + "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" + }, + "WirelessGatewayTaskDefinitionList":{ + "type":"list", + "member":{"shape":"UpdateWirelessGatewayTaskEntry"} + }, + "WirelessGatewayTaskDefinitionType":{ + "type":"string", + "enum":["UPDATE"] + }, + "WirelessGatewayTaskName":{ + "type":"string", + "max":2048, + "min":1 + }, + "WirelessGatewayTaskStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "FIRST_RETRY", + "SECOND_RETRY", + "COMPLETED", + "FAILED" + ] + }, + "WirelessMetadata":{ + "type":"structure", + "members":{ + "LoRaWAN":{ + "shape":"LoRaWANSendDataToDevice", + "documentation":"

    LoRaWAN device info.

    " + }, + "Sidewalk":{ + "shape":"SidewalkSendDataToDevice", + "documentation":"

    The Sidewalk account credentials.

    " + } + }, + "documentation":"

    WirelessMetadata object.

    " + } + }, + "documentation":"

    AWS IoT Wireless API documentation

    " +} diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml new file mode 100644 index 000000000000..8d4f75f7c53d --- /dev/null +++ b/services/ivs/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + ivs + AWS Java SDK :: Services :: Ivs + The AWS Java SDK for Ivs module holds the client classes that are used for + communicating with Ivs. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.ivs + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/ivs/src/main/resources/codegen-resources/paginators-1.json b/services/ivs/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..4aef6098607c --- /dev/null +++ b/services/ivs/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,33 @@ +{ + "pagination": { + "ListChannels": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "channels" + }, + "ListPlaybackKeyPairs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "keyPairs" + }, + "ListStreamKeys": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "streamKeys" + }, + "ListStreams": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "streams" + }, + "ListTagsForResource": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + } + } +} diff --git a/services/ivs/src/main/resources/codegen-resources/service-2.json b/services/ivs/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..e4d609050b9a --- /dev/null +++ b/services/ivs/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1323 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-14", + "endpointPrefix":"ivs", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon IVS", + "serviceFullName":"Amazon Interactive Video Service", + "serviceId":"ivs", + "signatureVersion":"v4", + "signingName":"ivs", + "uid":"ivs-2020-07-14" + }, + "operations":{ + "BatchGetChannel":{ + "name":"BatchGetChannel", + "http":{ + "method":"POST", + "requestUri":"/BatchGetChannel" + }, + "input":{"shape":"BatchGetChannelRequest"}, + "output":{"shape":"BatchGetChannelResponse"}, + "documentation":"

    Performs GetChannel on multiple ARNs simultaneously.

    " + }, + "BatchGetStreamKey":{ + "name":"BatchGetStreamKey", + "http":{ + "method":"POST", + "requestUri":"/BatchGetStreamKey" + }, + "input":{"shape":"BatchGetStreamKeyRequest"}, + "output":{"shape":"BatchGetStreamKeyResponse"}, + "documentation":"

    Performs GetStreamKey on multiple ARNs simultaneously.

    " + }, + "CreateChannel":{ + "name":"CreateChannel", + "http":{ + "method":"POST", + "requestUri":"/CreateChannel" + }, + "input":{"shape":"CreateChannelRequest"}, + "output":{"shape":"CreateChannelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

    Creates a new channel and an associated stream key to start streaming.

    " + }, + "CreateStreamKey":{ + "name":"CreateStreamKey", + "http":{ + "method":"POST", + "requestUri":"/CreateStreamKey" + }, + "input":{"shape":"CreateStreamKeyRequest"}, + "output":{"shape":"CreateStreamKeyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

    Creates a stream key, used to initiate a stream, for the specified channel ARN.

    Note that CreateChannel creates a stream key. If you subsequently use CreateStreamKey on the same channel, it will fail because a stream key already exists and there is a limit of 1 stream key per channel. To reset the stream key on a channel, use DeleteStreamKey and then CreateStreamKey.

    " + }, + "DeleteChannel":{ + "name":"DeleteChannel", + "http":{ + "method":"POST", + "requestUri":"/DeleteChannel" + }, + "input":{"shape":"DeleteChannelRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

    Deletes the specified channel and its associated stream keys.

    " + }, + "DeletePlaybackKeyPair":{ + "name":"DeletePlaybackKeyPair", + "http":{ + "method":"POST", + "requestUri":"/DeletePlaybackKeyPair" + }, + "input":{"shape":"DeletePlaybackKeyPairRequest"}, + "output":{"shape":"DeletePlaybackKeyPairResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

    Deletes a specified authorization key pair. This invalidates future viewer tokens generated using the key pair’s privateKey.

    " + }, + "DeleteStreamKey":{ + "name":"DeleteStreamKey", + "http":{ + "method":"POST", + "requestUri":"/DeleteStreamKey" + }, + "input":{"shape":"DeleteStreamKeyRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

    Deletes the stream key for the specified ARN, so it can no longer be used to stream.

    " + }, + "GetChannel":{ + "name":"GetChannel", + "http":{ + "method":"POST", + "requestUri":"/GetChannel" + }, + "input":{"shape":"GetChannelRequest"}, + "output":{"shape":"GetChannelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets the channel configuration for the specified channel ARN. See also BatchGetChannel.

    " + }, + "GetPlaybackKeyPair":{ + "name":"GetPlaybackKeyPair", + "http":{ + "method":"POST", + "requestUri":"/GetPlaybackKeyPair" + }, + "input":{"shape":"GetPlaybackKeyPairRequest"}, + "output":{"shape":"GetPlaybackKeyPairResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets a specified playback authorization key pair and returns the arn and fingerprint. The privateKey held by the caller can be used to generate viewer authorization tokens, to grant viewers access to authorized channels.

    " + }, + "GetStream":{ + "name":"GetStream", + "http":{ + "method":"POST", + "requestUri":"/GetStream" + }, + "input":{"shape":"GetStreamRequest"}, + "output":{"shape":"GetStreamResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ChannelNotBroadcasting"} + ], + "documentation":"

    Gets information about the active (live) stream on a specified channel.

    " + }, + "GetStreamKey":{ + "name":"GetStreamKey", + "http":{ + "method":"POST", + "requestUri":"/GetStreamKey" + }, + "input":{"shape":"GetStreamKeyRequest"}, + "output":{"shape":"GetStreamKeyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets stream-key information for a specified ARN.

    " + }, + "ImportPlaybackKeyPair":{ + "name":"ImportPlaybackKeyPair", + "http":{ + "method":"POST", + "requestUri":"/ImportPlaybackKeyPair" + }, + "input":{"shape":"ImportPlaybackKeyPairRequest"}, + "output":{"shape":"ImportPlaybackKeyPairResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

    Imports the public portion of a new key pair and returns its arn and fingerprint. The privateKey can then be used to generate viewer authorization tokens, to grant viewers access to authorized channels.

    " + }, + "ListChannels":{ + "name":"ListChannels", + "http":{ + "method":"POST", + "requestUri":"/ListChannels" + }, + "input":{"shape":"ListChannelsRequest"}, + "output":{"shape":"ListChannelsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets summary information about all channels in your account, in the AWS region where the API request is processed. This list can be filtered to match a specified string.

    " + }, + "ListPlaybackKeyPairs":{ + "name":"ListPlaybackKeyPairs", + "http":{ + "method":"POST", + "requestUri":"/ListPlaybackKeyPairs" + }, + "input":{"shape":"ListPlaybackKeyPairsRequest"}, + "output":{"shape":"ListPlaybackKeyPairsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets summary information about playback key pairs.

    " + }, + "ListStreamKeys":{ + "name":"ListStreamKeys", + "http":{ + "method":"POST", + "requestUri":"/ListStreamKeys" + }, + "input":{"shape":"ListStreamKeysRequest"}, + "output":{"shape":"ListStreamKeysResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets summary information about stream keys for the specified channel.

    " + }, + "ListStreams":{ + "name":"ListStreams", + "http":{ + "method":"POST", + "requestUri":"/ListStreams" + }, + "input":{"shape":"ListStreamsRequest"}, + "output":{"shape":"ListStreamsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets summary information about live streams in your account, in the AWS region where the API request is processed.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Gets information about AWS tags for the specified ARN.

    " + }, + "PutMetadata":{ + "name":"PutMetadata", + "http":{ + "method":"POST", + "requestUri":"/PutMetadata" + }, + "input":{"shape":"PutMetadataRequest"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ChannelNotBroadcasting"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Inserts metadata into an RTMPS stream for the specified channel. A maximum of 5 requests per second per channel is allowed, each with a maximum 1KB payload.

    " + }, + "StopStream":{ + "name":"StopStream", + "http":{ + "method":"POST", + "requestUri":"/StopStream" + }, + "input":{"shape":"StopStreamRequest"}, + "output":{"shape":"StopStreamResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ChannelNotBroadcasting"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"StreamUnavailable"} + ], + "documentation":"

    Disconnects the incoming RTMPS stream for the specified channel. Can be used in conjunction with DeleteStreamKey to prevent further streaming to a channel.

    Many streaming client-software libraries automatically reconnect a dropped RTMPS session, so to stop the stream permanently, you may want to first revoke the streamKey attached to the channel.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Adds or updates tags for the AWS resource with the specified ARN.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes tags from the resource with the specified ARN.

    " + }, + "UpdateChannel":{ + "name":"UpdateChannel", + "http":{ + "method":"POST", + "requestUri":"/UpdateChannel" + }, + "input":{"shape":"UpdateChannelRequest"}, + "output":{"shape":"UpdateChannelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

    Updates a channel's configuration. This does not affect an ongoing stream of this channel. You must stop and restart the stream for the changes to take effect.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    User does not have sufficient access to perform this action.

    " + } + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "BatchError":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ResourceArn", + "documentation":"

    Channel ARN.

    " + }, + "code":{ + "shape":"errorCode", + "documentation":"

    Error code.

    " + }, + "message":{ + "shape":"errorMessage", + "documentation":"

    Error message, determined by the application.

    " + } + }, + "documentation":"

    Error related to a specific channel, specified by its ARN.

    " + }, + "BatchErrors":{ + "type":"list", + "member":{"shape":"BatchError"} + }, + "BatchGetChannelRequest":{ + "type":"structure", + "required":["arns"], + "members":{ + "arns":{ + "shape":"ChannelArnList", + "documentation":"

    Array of ARNs, one per channel.

    " + } + } + }, + "BatchGetChannelResponse":{ + "type":"structure", + "members":{ + "channels":{"shape":"Channels"}, + "errors":{ + "shape":"BatchErrors", + "documentation":"

    Each error object is related to a specific ARN in the request.

    " + } + } + }, + "BatchGetStreamKeyRequest":{ + "type":"structure", + "required":["arns"], + "members":{ + "arns":{ + "shape":"StreamKeyArnList", + "documentation":"

    Array of ARNs, one per channel.

    " + } + } + }, + "BatchGetStreamKeyResponse":{ + "type":"structure", + "members":{ + "streamKeys":{"shape":"StreamKeys"}, + "errors":{"shape":"BatchErrors"} + } + }, + "Boolean":{"type":"boolean"}, + "Channel":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

    Channel ARN.

    " + }, + "name":{ + "shape":"ChannelName", + "documentation":"

    Channel name.

    " + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

    Channel latency mode. Default: LOW.

    " + }, + "type":{ + "shape":"ChannelType", + "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable resolution or bitrate, the stream probably will disconnect immediately. Valid values:

    • STANDARD: Multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Vertical resolution can be up to 1080 and bitrate can be up to 8.5 Mbps.

    • BASIC: Amazon IVS delivers the original input to viewers. The viewer’s video-quality choice is limited to the original input. Vertical resolution can be up to 480 and bitrate can be up to 1.5 Mbps.

    Default: STANDARD.

    " + }, + "ingestEndpoint":{ + "shape":"IngestEndpoint", + "documentation":"

    Channel ingest endpoint, part of the definition of an ingest server, used when you set up streaming software.

    " + }, + "playbackUrl":{ + "shape":"PlaybackURL", + "documentation":"

    Channel playback URL.

    " + }, + "authorized":{ + "shape":"IsAuthorized", + "documentation":"

    Whether the channel is authorized.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Array of 1-50 maps, each of the form string:string (key:value).

    " + } + }, + "documentation":"

    Object specifying a channel.

    " + }, + "ChannelArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:aws:[is]vs:[a-z0-9-]+:[0-9]+:channel/[a-zA-Z0-9-]+$" + }, + "ChannelArnList":{ + "type":"list", + "member":{"shape":"ChannelArn"}, + "max":50, + "min":1 + }, + "ChannelLatencyMode":{ + "type":"string", + "enum":[ + "NORMAL", + "LOW" + ] + }, + "ChannelList":{ + "type":"list", + "member":{"shape":"ChannelSummary"} + }, + "ChannelName":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^[a-zA-Z0-9-_]*$" + }, + "ChannelNotBroadcasting":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    The stream is offline for the given channel ARN.

    " + } + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ChannelSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

    Channel ARN.

    " + }, + "name":{ + "shape":"ChannelName", + "documentation":"

    Channel name.

    " + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

    Channel latency mode. Default: LOW.

    " + }, + "authorized":{ + "shape":"IsAuthorized", + "documentation":"

    Whether the channel is authorized.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Array of 1-50 maps, each of the form string:string (key:value).

    " + } + }, + "documentation":"

    Summary information about a channel.

    " + }, + "ChannelType":{ + "type":"string", + "enum":[ + "BASIC", + "STANDARD" + ] + }, + "Channels":{ + "type":"list", + "member":{"shape":"Channel"} + }, + "ConflictException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    Updating or deleting a resource can cause an inconsistent state.

    " + } + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateChannelRequest":{ + "type":"structure", + "members":{ + "name":{ + "shape":"ChannelName", + "documentation":"

    Channel name.

    " + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

    Channel latency mode. Default: LOW.

    " + }, + "type":{ + "shape":"ChannelType", + "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable resolution or bitrate, the stream probably will disconnect immediately. Valid values:

    • STANDARD: Multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Vertical resolution can be up to 1080 and bitrate can be up to 8.5 Mbps.

    • BASIC: Amazon IVS delivers the original input to viewers. The viewer’s video-quality choice is limited to the original input. Vertical resolution can be up to 480 and bitrate can be up to 1.5 Mbps.

    Default: STANDARD.

    " + }, + "authorized":{ + "shape":"Boolean", + "documentation":"

    Whether the channel is authorized. Default: false.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    See Channel$tags.

    " + } + } + }, + "CreateChannelResponse":{ + "type":"structure", + "members":{ + "channel":{"shape":"Channel"}, + "streamKey":{"shape":"StreamKey"} + } + }, + "CreateStreamKeyRequest":{ + "type":"structure", + "required":["channelArn"], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    ARN of the channel for which to create the stream key.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    See Channel$tags.

    " + } + } + }, + "CreateStreamKeyResponse":{ + "type":"structure", + "members":{ + "streamKey":{ + "shape":"StreamKey", + "documentation":"

    Stream key used to authenticate an RTMPS stream for ingestion.

    " + } + } + }, + "DeleteChannelRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

    ARN of the channel to be deleted.

    " + } + } + }, + "DeletePlaybackKeyPairRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"PlaybackKeyPairArn", + "documentation":"

    ARN of the key pair to be deleted.

    " + } + } + }, + "DeletePlaybackKeyPairResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteStreamKeyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"StreamKeyArn", + "documentation":"

    ARN of the stream key to be deleted.

    " + } + } + }, + "GetChannelRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

    ARN of the channel for which the configuration is to be retrieved.

    " + } + } + }, + "GetChannelResponse":{ + "type":"structure", + "members":{ + "channel":{"shape":"Channel"} + } + }, + "GetPlaybackKeyPairRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"PlaybackKeyPairArn", + "documentation":"

    ARN of the key pair to be returned.

    " + } + } + }, + "GetPlaybackKeyPairResponse":{ + "type":"structure", + "members":{ + "keyPair":{"shape":"PlaybackKeyPair"} + } + }, + "GetStreamKeyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"StreamKeyArn", + "documentation":"

    ARN for the stream key to be retrieved.

    " + } + } + }, + "GetStreamKeyResponse":{ + "type":"structure", + "members":{ + "streamKey":{"shape":"StreamKey"} + } + }, + "GetStreamRequest":{ + "type":"structure", + "required":["channelArn"], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    Channel ARN for stream to be accessed.

    " + } + } + }, + "GetStreamResponse":{ + "type":"structure", + "members":{ + "stream":{"shape":"Stream"} + } + }, + "ImportPlaybackKeyPairRequest":{ + "type":"structure", + "required":["publicKeyMaterial"], + "members":{ + "publicKeyMaterial":{ + "shape":"PlaybackPublicKeyMaterial", + "documentation":"

    The public portion of a customer-generated key pair.

    " + }, + "name":{ + "shape":"PlaybackKeyPairName", + "documentation":"

    An arbitrary string (a nickname) assigned to a playback key pair that helps the customer identify that resource. The value does not need to be unique.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Any tags provided with the request are added to the playback key pair tags.

    " + } + } + }, + "ImportPlaybackKeyPairResponse":{ + "type":"structure", + "members":{ + "keyPair":{"shape":"PlaybackKeyPair"} + } + }, + "IngestEndpoint":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    Unexpected error during processing of request.

    " + } + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "IsAuthorized":{"type":"boolean"}, + "ListChannelsRequest":{ + "type":"structure", + "members":{ + "filterByName":{ + "shape":"ChannelName", + "documentation":"

    Filters the channel list to match the specified name.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The first channel to retrieve. This is used for pagination; see the nextToken response field.

    " + }, + "maxResults":{ + "shape":"MaxChannelResults", + "documentation":"

    Maximum number of channels to return.

    " + } + } + }, + "ListChannelsResponse":{ + "type":"structure", + "required":["channels"], + "members":{ + "channels":{ + "shape":"ChannelList", + "documentation":"

    List of the matching channels.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are more channels than maxResults, use nextToken in the request to get the next set.

    " + } + } + }, + "ListPlaybackKeyPairsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    Maximum number of key pairs to return.

    " + }, + "maxResults":{ + "shape":"MaxPlaybackKeyPairResults", + "documentation":"

    The first key pair to retrieve. This is used for pagination; see the nextToken response field.

    " + } + } + }, + "ListPlaybackKeyPairsResponse":{ + "type":"structure", + "required":["keyPairs"], + "members":{ + "keyPairs":{ + "shape":"PlaybackKeyPairList", + "documentation":"

    List of key pairs.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are more key pairs than maxResults, use nextToken in the request to get the next set.

    " + } + } + }, + "ListStreamKeysRequest":{ + "type":"structure", + "required":["channelArn"], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    Channel ARN used to filter the list.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The first stream key to retrieve. This is used for pagination; see the nextToken response field.

    " + }, + "maxResults":{ + "shape":"MaxStreamKeyResults", + "documentation":"

    Maximum number of streamKeys to return.

    " + } + } + }, + "ListStreamKeysResponse":{ + "type":"structure", + "required":["streamKeys"], + "members":{ + "streamKeys":{ + "shape":"StreamKeyList", + "documentation":"

    List of stream keys.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are more stream keys than maxResults, use nextToken in the request to get the next set.

    " + } + } + }, + "ListStreamsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The first stream to retrieve. This is used for pagination; see the nextToken response field.

    " + }, + "maxResults":{ + "shape":"MaxStreamResults", + "documentation":"

    Maximum number of streams to return.

    " + } + } + }, + "ListStreamsResponse":{ + "type":"structure", + "required":["streams"], + "members":{ + "streams":{ + "shape":"StreamList", + "documentation":"

    List of streams.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    If there are more streams than maxResults, use nextToken in the request to get the next set.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN of the resource to be retrieved.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "nextToken":{ + "shape":"String", + "documentation":"

    The first tag to retrieve. This is used for pagination; see the nextToken response field.

    " + }, + "maxResults":{ + "shape":"MaxTagResults", + "documentation":"

    Maximum number of tags to return.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{"shape":"Tags"}, + "nextToken":{ + "shape":"String", + "documentation":"

    If there are more tags than maxResults, use nextToken in the request to get the next set.

    " + } + } + }, + "MaxChannelResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaxPlaybackKeyPairResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaxStreamKeyResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaxStreamResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaxTagResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":500, + "min":0 + }, + "PendingVerification":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    Your account is pending verification.

    " + } + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "PlaybackKeyPair":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"PlaybackKeyPairArn", + "documentation":"

    Key-pair ARN.

    " + }, + "name":{ + "shape":"PlaybackKeyPairName", + "documentation":"

    Key-pair name.

    " + }, + "fingerprint":{ + "shape":"PlaybackKeyPairFingerprint", + "documentation":"

    Key-pair identifier.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Array of 1-50 maps, each of the form string:string (key:value).

    " + } + }, + "documentation":"

    A key pair used to sign and validate a playback authorization token.

    " + }, + "PlaybackKeyPairArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:aws:[is]vs:[a-z0-9-]+:[0-9]+:playback-key/[a-zA-Z0-9-]+$" + }, + "PlaybackKeyPairFingerprint":{"type":"string"}, + "PlaybackKeyPairList":{ + "type":"list", + "member":{"shape":"PlaybackKeyPairSummary"} + }, + "PlaybackKeyPairName":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^[a-zA-Z0-9-_]*$" + }, + "PlaybackKeyPairSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"PlaybackKeyPairArn", + "documentation":"

    Key-pair ARN.

    " + }, + "name":{ + "shape":"PlaybackKeyPairName", + "documentation":"

    Key-pair name.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Array of 1-50 maps, each of the form string:string (key:value)

    " + } + }, + "documentation":"

    Summary information about a playback key pair.

    " + }, + "PlaybackPublicKeyMaterial":{"type":"string"}, + "PlaybackURL":{"type":"string"}, + "PutMetadataRequest":{ + "type":"structure", + "required":[ + "channelArn", + "metadata" + ], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    ARN of the channel into which metadata is inserted. This channel must have an active stream.

    " + }, + "metadata":{ + "shape":"StreamMetadata", + "documentation":"

    Metadata to insert into the stream. Maximum: 1 KB per request.

    " + } + } + }, + "ResourceArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:aws:[is]vs:[a-z0-9-]+:[0-9]+:[a-z-]/[a-zA-Z0-9-]+$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    Request references a resource which does not exist.

    " + } + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    Request would cause a service quota to be exceeded.

    " + } + }, + "error":{"httpStatusCode":402}, + "exception":true + }, + "StopStreamRequest":{ + "type":"structure", + "required":["channelArn"], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    ARN of the channel for which the stream is to be stopped.

    " + } + } + }, + "StopStreamResponse":{ + "type":"structure", + "members":{ + } + }, + "Stream":{ + "type":"structure", + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    Channel ARN for the stream.

    " + }, + "playbackUrl":{ + "shape":"PlaybackURL", + "documentation":"

    URL of the video master manifest, required by the video player to play the HLS stream.

    " + }, + "startTime":{ + "shape":"StreamStartTime", + "documentation":"

    ISO-8601 formatted timestamp of the stream’s start.

    " + }, + "state":{ + "shape":"StreamState", + "documentation":"

    The stream’s state.

    " + }, + "health":{ + "shape":"StreamHealth", + "documentation":"

    The stream’s health.

    " + }, + "viewerCount":{ + "shape":"StreamViewerCount", + "documentation":"

    Number of current viewers of the stream.

    " + } + }, + "documentation":"

    Specifies a live video stream that has been ingested and distributed.

    " + }, + "StreamHealth":{ + "type":"string", + "enum":[ + "HEALTHY", + "STARVING", + "UNKNOWN" + ] + }, + "StreamKey":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"StreamKeyArn", + "documentation":"

    Stream-key ARN.

    " + }, + "value":{ + "shape":"StreamKeyValue", + "documentation":"

    Stream-key value.

    " + }, + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    Channel ARN for the stream.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Array of 1-50 maps, each of the form string:string (key:value).

    " + } + }, + "documentation":"

    Object specifying a stream key.

    " + }, + "StreamKeyArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:aws:[is]vs:[a-z0-9-]+:[0-9]+:stream-key/[a-zA-Z0-9-]+$" + }, + "StreamKeyArnList":{ + "type":"list", + "member":{"shape":"StreamKeyArn"}, + "max":50, + "min":1 + }, + "StreamKeyList":{ + "type":"list", + "member":{"shape":"StreamKeySummary"} + }, + "StreamKeySummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"StreamKeyArn", + "documentation":"

    Stream-key ARN.

    " + }, + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    Channel ARN for the stream.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Array of 1-50 maps, each of the form string:string (key:value).

    " + } + }, + "documentation":"

    Summary information about a stream key.

    " + }, + "StreamKeyValue":{"type":"string"}, + "StreamKeys":{ + "type":"list", + "member":{"shape":"StreamKey"} + }, + "StreamList":{ + "type":"list", + "member":{"shape":"StreamSummary"} + }, + "StreamMetadata":{"type":"string"}, + "StreamStartTime":{"type":"timestamp"}, + "StreamState":{ + "type":"string", + "enum":[ + "LIVE", + "OFFLINE" + ] + }, + "StreamSummary":{ + "type":"structure", + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

    Channel ARN for the stream.

    " + }, + "state":{ + "shape":"StreamState", + "documentation":"

    The stream’s state.

    " + }, + "health":{ + "shape":"StreamHealth", + "documentation":"

    The stream’s health.

    " + }, + "viewerCount":{ + "shape":"StreamViewerCount", + "documentation":"

    Number of current viewers of the stream.

    " + }, + "startTime":{ + "shape":"StreamStartTime", + "documentation":"

    ISO-8601 formatted timestamp of the stream’s start.

    " + } + }, + "documentation":"

    Summary information about a stream.

    " + }, + "StreamUnavailable":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    The stream is temporarily unavailable.

    " + } + }, + "error":{"httpStatusCode":503}, + "exception":true + }, + "StreamViewerCount":{"type":"long"}, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    ARN of the resource for which tags are to be added or updated.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Array of tags to be added or updated.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    Request was denied due to request throttling.

    " + } + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    ARN of the resource for which tags are to be removed.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    Array of tags to be removed.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateChannelRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

    ARN of the channel to be updated.

    " + }, + "name":{ + "shape":"ChannelName", + "documentation":"

    Channel name.

    " + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

    Channel latency mode. Default: LOW.

    " + }, + "type":{ + "shape":"ChannelType", + "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable resolution or bitrate, the stream probably will disconnect immediately. Valid values:

    • STANDARD: Multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Vertical resolution can be up to 1080 and bitrate can be up to 8.5 Mbps.

    • BASIC: Amazon IVS delivers the original input to viewers. The viewer’s video-quality choice is limited to the original input. Vertical resolution can be up to 480 and bitrate can be up to 1.5 Mbps.

    Default: STANDARD.

    " + }, + "authorized":{ + "shape":"Boolean", + "documentation":"

    Whether the channel is authorized. Default: false.

    " + } + } + }, + "UpdateChannelResponse":{ + "type":"structure", + "members":{ + "channel":{"shape":"Channel"} + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

    The input fails to satisfy the constraints specified by an AWS service.

    " + } + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "errorCode":{"type":"string"}, + "errorMessage":{"type":"string"} + }, + "documentation":"

    Introduction

    The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an AWS SNS event stream for responses. JSON is used for both requests and responses, including errors.

    The API is an AWS regional service, currently in these regions: us-west-2, us-east-1, and eu-west-1.

    All API request parameters and URLs are case sensitive.

    For a summary of notable documentation changes in each release, see Document History.

    Service Endpoints

    The following are the Amazon IVS service endpoints (all HTTPS):

    Region name: US West (Oregon)

    • Region: us-west-2

    • Endpoint: ivs.us-west-2.amazonaws.com

    Region name: US East (Virginia)

    • Region: us-east-1

    • Endpoint: ivs.us-east-1.amazonaws.com

    Region name: EU West (Dublin)

    • Region: eu-west-1

    • Endpoint: ivs.eu-west-1.amazonaws.com

    Allowed Header Values

    • Accept: application/json

    • Accept-Encoding: gzip, deflate

    • Content-Type: application/json

    Resources

    The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS):

    • Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream. See the Channel endpoints for more information.

    • Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. See the StreamKey endpoints for more information. Treat the stream key like a secret, since it allows anyone to stream to the channel.

    • Playback key pair — Video playback may be restricted using playback-authorization tokens, which use public-key encryption. A playback key pair is the public-private pair of keys used to sign and validate the playback-authorization token. See the PlaybackKeyPair endpoints for more information.

    Tagging

    A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags.

    Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

    The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, and Playback Key Pairs.

    Channel Endpoints

    • CreateChannel — Creates a new channel and an associated stream key to start streaming.

    • GetChannel — Gets the channel configuration for the specified channel ARN (Amazon Resource Name).

    • BatchGetChannel — Performs GetChannel on multiple ARNs simultaneously.

    • ListChannels — Gets summary information about all channels in your account, in the AWS region where the API request is processed. This list can be filtered to match a specified string.

    • UpdateChannel — Updates a channel's configuration. This does not affect an ongoing stream of this channel. You must stop and restart the stream for the changes to take effect.

    • DeleteChannel — Deletes the specified channel.

    StreamKey Endpoints

    • CreateStreamKey — Creates a stream key, used to initiate a stream, for the specified channel ARN.

    • GetStreamKey — Gets stream key information for the specified ARN.

    • BatchGetStreamKey — Performs GetStreamKey on multiple ARNs simultaneously.

    • ListStreamKeys — Gets summary information about stream keys for the specified channel.

    • DeleteStreamKey — Deletes the stream key for the specified ARN, so it can no longer be used to stream.

    Stream Endpoints

    • GetStream — Gets information about the active (live) stream on a specified channel.

    • ListStreams — Gets summary information about live streams in your account, in the AWS region where the API request is processed.

    • StopStream — Disconnects the incoming RTMPS stream for the specified channel. Can be used in conjunction with DeleteStreamKey to prevent further streaming to a channel.

    • PutMetadata — Inserts metadata into an RTMPS stream for the specified channel. A maximum of 5 requests per second per channel is allowed, each with a maximum 1KB payload.

    PlaybackKeyPair Endpoints

    • ImportPlaybackKeyPair — Imports the public portion of a new key pair and returns its arn and fingerprint. The privateKey can then be used to generate viewer authorization tokens, to grant viewers access to authorized channels.

    • GetPlaybackKeyPair — Gets a specified playback authorization key pair and returns the arn and fingerprint. The privateKey held by the caller can be used to generate viewer authorization tokens, to grant viewers access to authorized channels.

    • ListPlaybackKeyPairs — Gets summary information about playback key pairs.

    • DeletePlaybackKeyPair — Deletes a specified authorization key pair. This invalidates future viewer tokens generated using the key pair’s privateKey.

    AWS Tags Endpoints

    • TagResource — Adds or updates tags for the AWS resource with the specified ARN.

    • UntagResource — Removes tags from the resource with the specified ARN.

    • ListTagsForResource — Gets information about AWS tags for the specified ARN.

    " +} diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 450aa38dcf67..3eebd8cdc355 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + kendra + AWS Java SDK :: Services :: Kendra + The AWS Java SDK for Kendra module holds the client classes that are used for + communicating with Kendra. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.kendra + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/kendra/src/main/resources/codegen-resources/paginators-1.json b/services/kendra/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..2170bfc57ce1 --- /dev/null +++ b/services/kendra/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,19 @@ +{ + "pagination": { + "ListDataSourceSyncJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDataSources": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListIndices": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/kendra/src/main/resources/codegen-resources/service-2.json b/services/kendra/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..43fa7f9e9e55 --- /dev/null +++ b/services/kendra/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,4357 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-02-03", + "endpointPrefix":"kendra", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"kendra", + "serviceFullName":"AWSKendraFrontendService", + "serviceId":"kendra", + "signatureVersion":"v4", + "signingName":"kendra", + "targetPrefix":"AWSKendraFrontendService", + "uid":"kendra-2019-02-03" + }, + "operations":{ + "BatchDeleteDocument":{ + "name":"BatchDeleteDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteDocumentRequest"}, + "output":{"shape":"BatchDeleteDocumentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes one or more documents from an index. The documents must have been added with the BatchPutDocument operation.

    The documents are deleted asynchronously. You can see the progress of the deletion by using AWS CloudWatch. Any error messages releated to the processing of the batch are sent to you CloudWatch log.

    " + }, + "BatchPutDocument":{ + "name":"BatchPutDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchPutDocumentRequest"}, + "output":{"shape":"BatchPutDocumentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Adds one or more documents to an index.

    The BatchPutDocument operation enables you to ingest inline documents or a set of documents stored in an Amazon S3 bucket. Use this operation to ingest your text and unstructured text into an index, add custom attributes to the documents, and to attach an access control list to the documents added to the index.

    The documents are indexed asynchronously. You can see the progress of the batch using AWS CloudWatch. Any error messages related to processing the batch are sent to your AWS CloudWatch log.

    " + }, + "CreateDataSource":{ + "name":"CreateDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataSourceRequest"}, + "output":{"shape":"CreateDataSourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a data source that you use to with an Amazon Kendra index.

    You specify a name, data source connector type and description for your data source. You also specify configuration information such as document metadata (author, source URI, and so on) and user context information.

    CreateDataSource is a synchronous operation. The operation returns 200 if the data source was successfully created. Otherwise, an exception is raised.

    " + }, + "CreateFaq":{ + "name":"CreateFaq", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFaqRequest"}, + "output":{"shape":"CreateFaqResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates an new set of frequently asked question (FAQ) questions and answers.

    " + }, + "CreateIndex":{ + "name":"CreateIndex", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIndexRequest"}, + "output":{"shape":"CreateIndexResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceAlreadyExistException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new Amazon Kendra index. Index creation is an asynchronous operation. To determine if index creation has completed, check the Status field returned from a call to . The Status field is set to ACTIVE when the index is ready to use.

    Once the index is active you can index your documents using the operation or using one of the supported data sources.

    " + }, + "CreateThesaurus":{ + "name":"CreateThesaurus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateThesaurusRequest"}, + "output":{"shape":"CreateThesaurusResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a thesaurus for an index. The thesaurus contains a list of synonyms in Solr format.

    " + }, + "DeleteDataSource":{ + "name":"DeleteDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDataSourceRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an Amazon Kendra data source. An exception is not thrown if the data source is already being deleted. While the data source is being deleted, the Status field returned by a call to the operation is set to DELETING. For more information, see Deleting Data Sources.

    " + }, + "DeleteFaq":{ + "name":"DeleteFaq", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFaqRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes an FAQ from an index.

    " + }, + "DeleteIndex":{ + "name":"DeleteIndex", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIndexRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an existing Amazon Kendra index. An exception is not thrown if the index is already being deleted. While the index is being deleted, the Status field returned by a call to the DescribeIndex operation is set to DELETING.

    " + }, + "DeleteThesaurus":{ + "name":"DeleteThesaurus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteThesaurusRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an existing Amazon Kendra thesaurus.

    " + }, + "DescribeDataSource":{ + "name":"DescribeDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataSourceRequest"}, + "output":{"shape":"DescribeDataSourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about a Amazon Kendra data source.

    " + }, + "DescribeFaq":{ + "name":"DescribeFaq", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFaqRequest"}, + "output":{"shape":"DescribeFaqResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about an FAQ list.

    " + }, + "DescribeIndex":{ + "name":"DescribeIndex", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIndexRequest"}, + "output":{"shape":"DescribeIndexResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes an existing Amazon Kendra index

    " + }, + "DescribeThesaurus":{ + "name":"DescribeThesaurus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeThesaurusRequest"}, + "output":{"shape":"DescribeThesaurusResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes an existing Amazon Kendra thesaurus.

    " + }, + "ListDataSourceSyncJobs":{ + "name":"ListDataSourceSyncJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataSourceSyncJobsRequest"}, + "output":{"shape":"ListDataSourceSyncJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets statistics about synchronizing Amazon Kendra with a data source.

    " + }, + "ListDataSources":{ + "name":"ListDataSources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataSourcesRequest"}, + "output":{"shape":"ListDataSourcesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the data sources that you have created.

    " + }, + "ListFaqs":{ + "name":"ListFaqs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFaqsRequest"}, + "output":{"shape":"ListFaqsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets a list of FAQ lists associated with an index.

    " + }, + "ListIndices":{ + "name":"ListIndices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIndicesRequest"}, + "output":{"shape":"ListIndicesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the Amazon Kendra indexes that you have created.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets a list of tags associated with a specified resource. Indexes, FAQs, and data sources can have tags associated with them.

    " + }, + "ListThesauri":{ + "name":"ListThesauri", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListThesauriRequest"}, + "output":{"shape":"ListThesauriResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the Amazon Kendra thesauri associated with an index.

    " + }, + "Query":{ + "name":"Query", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryRequest"}, + "output":{"shape":"QueryResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Searches an active index. Use this API to search your documents using query. The Query operation enables to do faceted search and to filter results based on document attributes.

    It also enables you to provide user context that Amazon Kendra uses to enforce document access control in the search results.

    Amazon Kendra searches your index for text content and question and answer (FAQ) content. By default the response contains three types of results.

    • Relevant passages

    • Matching FAQs

    • Relevant documents

    You can specify that the query return only one type of result using the QueryResultTypeConfig parameter.

    Each query returns the 100 most relevant results.

    " + }, + "StartDataSourceSyncJob":{ + "name":"StartDataSourceSyncJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDataSourceSyncJobRequest"}, + "output":{"shape":"StartDataSourceSyncJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Starts a synchronization job for a data source. If a synchronization job is already in progress, Amazon Kendra returns a ResourceInUseException exception.

    " + }, + "StopDataSourceSyncJob":{ + "name":"StopDataSourceSyncJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDataSourceSyncJobRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Stops a running synchronization job. You can't stop a scheduled synchronization job.

    " + }, + "SubmitFeedback":{ + "name":"SubmitFeedback", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubmitFeedbackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Enables you to provide feedback to Amazon Kendra to improve the performance of the service.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Adds the specified tag to the specified index, FAQ, or data source resource. If the tag already exists, the existing value is replaced with the new value.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes a tag from an index, FAQ, or a data source.

    " + }, + "UpdateDataSource":{ + "name":"UpdateDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDataSourceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates an existing Amazon Kendra data source.

    " + }, + "UpdateIndex":{ + "name":"UpdateIndex", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateIndexRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates an existing Amazon Kendra index.

    " + }, + "UpdateThesaurus":{ + "name":"UpdateThesaurus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateThesaurusRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates a thesaurus file associated with an index.

    " + } + }, + "shapes":{ + "AccessControlListConfiguration":{ + "type":"structure", + "members":{ + "KeyPath":{ + "shape":"S3ObjectKey", + "documentation":"

    Path to the AWS S3 bucket that contains the ACL files.

    " + } + }, + "documentation":"

    Access Control List files for the documents in a data source. For the format of the file, see Access control for S3 data sources.

    " + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "AclConfiguration":{ + "type":"structure", + "required":["AllowedGroupsColumnName"], + "members":{ + "AllowedGroupsColumnName":{ + "shape":"ColumnName", + "documentation":"

    A list of groups, separated by semi-colons, that filters a query response based on user context. The document is only returned to users that are in one of the groups specified in the UserContext field of the Query operation.

    " + } + }, + "documentation":"

    Provides information about the column that should be used for filtering the query response by groups.

    " + }, + "AdditionalResultAttribute":{ + "type":"structure", + "required":[ + "Key", + "ValueType", + "Value" + ], + "members":{ + "Key":{ + "shape":"String", + "documentation":"

    The key that identifies the attribute.

    " + }, + "ValueType":{ + "shape":"AdditionalResultAttributeValueType", + "documentation":"

    The data type of the Value property.

    " + }, + "Value":{ + "shape":"AdditionalResultAttributeValue", + "documentation":"

    An object that contains the attribute value.

    " + } + }, + "documentation":"

    An attribute returned from an index query.

    " + }, + "AdditionalResultAttributeList":{ + "type":"list", + "member":{"shape":"AdditionalResultAttribute"} + }, + "AdditionalResultAttributeValue":{ + "type":"structure", + "members":{ + "TextWithHighlightsValue":{ + "shape":"TextWithHighlights", + "documentation":"

    The text associated with the attribute and information about the highlight to apply to the text.

    " + } + }, + "documentation":"

    An attribute returned with a document from a search.

    " + }, + "AdditionalResultAttributeValueType":{ + "type":"string", + "enum":["TEXT_WITH_HIGHLIGHTS_VALUE"] + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, + "AttributeFilter":{ + "type":"structure", + "members":{ + "AndAllFilters":{ + "shape":"AttributeFilterList", + "documentation":"

    Performs a logical AND operation on all supplied filters.

    " + }, + "OrAllFilters":{ + "shape":"AttributeFilterList", + "documentation":"

    Performs a logical OR operation on all supplied filters.

    " + }, + "NotFilter":{ + "shape":"AttributeFilter", + "documentation":"

    Performs a logical NOT operation on all supplied filters.

    " + }, + "EqualsTo":{ + "shape":"DocumentAttribute", + "documentation":"

    Performs an equals operation on two document attributes.

    " + }, + "ContainsAll":{ + "shape":"DocumentAttribute", + "documentation":"

    Returns true when a document contains all of the specified document attributes. This filter is only applicable to StringListValue metadata.

    " + }, + "ContainsAny":{ + "shape":"DocumentAttribute", + "documentation":"

    Returns true when a document contains any of the specified document attributes. This filter is only applicable to StringListValue metadata.

    " + }, + "GreaterThan":{ + "shape":"DocumentAttribute", + "documentation":"

    Performs a greater than operation on two document attributes. Use with a document attribute of type Integer or Long.

    " + }, + "GreaterThanOrEquals":{ + "shape":"DocumentAttribute", + "documentation":"

    Performs a greater or equals than operation on two document attributes. Use with a document attribute of type Integer or Long.

    " + }, + "LessThan":{ + "shape":"DocumentAttribute", + "documentation":"

    Performs a less than operation on two document attributes. Use with a document attribute of type Integer or Long.

    " + }, + "LessThanOrEquals":{ + "shape":"DocumentAttribute", + "documentation":"

    Performs a less than or equals operation on two document attributes. Use with a document attribute of type Integer or Long.

    " + } + }, + "documentation":"

    Provides filtering the query results based on document attributes.

    When you use the AndAllFilters or OrAllFilters, filters you can use 2 layers under the first attribute filter. For example, you can use:

    <AndAllFilters>

    1. <OrAllFilters>

    2. <EqualTo>

    If you use more than 2 layers, you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\"

    " + }, + "AttributeFilterList":{ + "type":"list", + "member":{"shape":"AttributeFilter"} + }, + "BatchDeleteDocumentRequest":{ + "type":"structure", + "required":[ + "IndexId", + "DocumentIdList" + ], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the documents to delete.

    " + }, + "DocumentIdList":{ + "shape":"DocumentIdList", + "documentation":"

    One or more identifiers for documents to delete from the index.

    " + }, + "DataSourceSyncJobMetricTarget":{"shape":"DataSourceSyncJobMetricTarget"} + } + }, + "BatchDeleteDocumentResponse":{ + "type":"structure", + "members":{ + "FailedDocuments":{ + "shape":"BatchDeleteDocumentResponseFailedDocuments", + "documentation":"

    A list of documents that could not be removed from the index. Each entry contains an error message that indicates why the document couldn't be removed from the index.

    " + } + } + }, + "BatchDeleteDocumentResponseFailedDocument":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"DocumentId", + "documentation":"

    The identifier of the document that couldn't be removed from the index.

    " + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    The error code for why the document couldn't be removed from the index.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    An explanation for why the document couldn't be removed from the index.

    " + } + }, + "documentation":"

    Provides information about documents that could not be removed from an index by the BatchDeleteDocument operation.

    " + }, + "BatchDeleteDocumentResponseFailedDocuments":{ + "type":"list", + "member":{"shape":"BatchDeleteDocumentResponseFailedDocument"} + }, + "BatchPutDocumentRequest":{ + "type":"structure", + "required":[ + "IndexId", + "Documents" + ], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index to add the documents to. You need to create the index first using the CreateIndex operation.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of a role that is allowed to run the BatchPutDocument operation. For more information, see IAM Roles for Amazon Kendra.

    " + }, + "Documents":{ + "shape":"DocumentList", + "documentation":"

    One or more documents to add to the index.

    Documents have the following file size limits.

    • 5 MB total size for inline documents

    • 50 MB total size for files from an S3 bucket

    • 5 MB extracted text for any file

    For more information about file size and transaction per second quotas, see Quotas.

    " + } + } + }, + "BatchPutDocumentResponse":{ + "type":"structure", + "members":{ + "FailedDocuments":{ + "shape":"BatchPutDocumentResponseFailedDocuments", + "documentation":"

    A list of documents that were not added to the index because the document failed a validation check. Each document contains an error message that indicates why the document couldn't be added to the index.

    If there was an error adding a document to an index the error is reported in your AWS CloudWatch log. For more information, see Monitoring Amazon Kendra with Amazon CloudWatch Logs

    " + } + } + }, + "BatchPutDocumentResponseFailedDocument":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"DocumentId", + "documentation":"

    The unique identifier of the document.

    " + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    The type of error that caused the document to fail to be indexed.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    A description of the reason why the document could not be indexed.

    " + } + }, + "documentation":"

    Provides information about a document that could not be indexed.

    " + }, + "BatchPutDocumentResponseFailedDocuments":{ + "type":"list", + "member":{"shape":"BatchPutDocumentResponseFailedDocument"} + }, + "Blob":{"type":"blob"}, + "Boolean":{"type":"boolean"}, + "CapacityUnitsConfiguration":{ + "type":"structure", + "required":[ + "StorageCapacityUnits", + "QueryCapacityUnits" + ], + "members":{ + "StorageCapacityUnits":{ + "shape":"StorageCapacityUnit", + "documentation":"

    The amount of extra storage capacity for an index. Each capacity unit provides 150 Gb of storage space or 500,000 documents, whichever is reached first.

    " + }, + "QueryCapacityUnits":{ + "shape":"QueryCapacityUnit", + "documentation":"

    The amount of extra query capacity for an index. Each capacity unit provides 0.5 queries per second and 40,000 queries per day.

    " + } + }, + "documentation":"

    Specifies capacity units configured for your index. You can add and remove capacity units to tune an index to your requirements.

    " + }, + "ChangeDetectingColumns":{ + "type":"list", + "member":{"shape":"ColumnName"}, + "max":5, + "min":1 + }, + "ClaimRegex":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^\\P{C}*$" + }, + "ClickFeedback":{ + "type":"structure", + "required":[ + "ResultId", + "ClickTime" + ], + "members":{ + "ResultId":{ + "shape":"ResultId", + "documentation":"

    The unique identifier of the search result that was clicked.

    " + }, + "ClickTime":{ + "shape":"Timestamp", + "documentation":"

    The Unix timestamp of the date and time that the result was clicked.

    " + } + }, + "documentation":"

    Gathers information about when a particular result was clicked by a user. Your application uses the SubmitFeedback operation to provide click information.

    " + }, + "ClickFeedbackList":{ + "type":"list", + "member":{"shape":"ClickFeedback"} + }, + "ClientTokenName":{ + "type":"string", + "max":100, + "min":1 + }, + "ColumnConfiguration":{ + "type":"structure", + "required":[ + "DocumentIdColumnName", + "DocumentDataColumnName", + "ChangeDetectingColumns" + ], + "members":{ + "DocumentIdColumnName":{ + "shape":"ColumnName", + "documentation":"

    The column that provides the document's unique identifier.

    " + }, + "DocumentDataColumnName":{ + "shape":"ColumnName", + "documentation":"

    The column that contains the contents of the document.

    " + }, + "DocumentTitleColumnName":{ + "shape":"ColumnName", + "documentation":"

    The column that contains the title of the document.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    An array of objects that map database column names to the corresponding fields in an index. You must first create the fields in the index using the UpdateIndex operation.

    " + }, + "ChangeDetectingColumns":{ + "shape":"ChangeDetectingColumns", + "documentation":"

    One to five columns that indicate when a document in the database has changed.

    " + } + }, + "documentation":"

    Provides information about how Amazon Kendra should use the columns of a database in an index.

    " + }, + "ColumnName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z][a-zA-Z0-9_]*$" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "ConfluenceAttachmentConfiguration":{ + "type":"structure", + "members":{ + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

    Indicates whether Amazon Kendra indexes attachments to the pages and blogs in the Confluence data source.

    " + }, + "AttachmentFieldMappings":{ + "shape":"ConfluenceAttachmentFieldMappingsList", + "documentation":"

    Defines how attachment metadata fields should be mapped to index fields. Before you can map a field, you must first create an index field with a matching type using the console or the UpdateIndex operation.

    If you specify the AttachentFieldMappings parameter, you must specify at least one field mapping.

    " + } + }, + "documentation":"

    Specifies the attachment settings for the Confluence data source. Attachment settings are optional, if you don't specify settings attachments, Amazon Kendra won't index them.

    " + }, + "ConfluenceAttachmentFieldMappingsList":{ + "type":"list", + "member":{"shape":"ConfluenceAttachmentToIndexFieldMapping"}, + "max":11, + "min":1 + }, + "ConfluenceAttachmentFieldName":{ + "type":"string", + "enum":[ + "AUTHOR", + "CONTENT_TYPE", + "CREATED_DATE", + "DISPLAY_URL", + "FILE_SIZE", + "ITEM_TYPE", + "PARENT_ID", + "SPACE_KEY", + "SPACE_NAME", + "URL", + "VERSION" + ] + }, + "ConfluenceAttachmentToIndexFieldMapping":{ + "type":"structure", + "members":{ + "DataSourceFieldName":{ + "shape":"ConfluenceAttachmentFieldName", + "documentation":"

    The name of the field in the data source.

    You must first create the index field using the operation.

    " + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

    The format for date fields in the data source. If the field specified in DataSourceFieldName is a date field you must specify the date format. If the field is not a date field, an exception is thrown.

    " + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

    The name of the index field to map to the Confluence data source field. The index field type must match the Confluence field type.

    " + } + }, + "documentation":"

    Defines the mapping between a field in the Confluence data source to a Amazon Kendra index field.

    You must first create the index field using the operation.

    " + }, + "ConfluenceBlogConfiguration":{ + "type":"structure", + "members":{ + "BlogFieldMappings":{ + "shape":"ConfluenceBlogFieldMappingsList", + "documentation":"

    Defines how blog metadata fields should be mapped to index fields. Before you can map a field, you must first create an index field with a matching type using the console or the UpdateIndex operation.

    If you specify the BlogFieldMappings parameter, you must specify at least one field mapping.

    " + } + }, + "documentation":"

    Specifies the blog settings for the Confluence data source. Blogs are always indexed unless filtered from the index by the ExclusionPatterns or InclusionPatterns fields in the data type.

    " + }, + "ConfluenceBlogFieldMappingsList":{ + "type":"list", + "member":{"shape":"ConfluenceBlogToIndexFieldMapping"}, + "max":9, + "min":1 + }, + "ConfluenceBlogFieldName":{ + "type":"string", + "enum":[ + "AUTHOR", + "DISPLAY_URL", + "ITEM_TYPE", + "LABELS", + "PUBLISH_DATE", + "SPACE_KEY", + "SPACE_NAME", + "URL", + "VERSION" + ] + }, + "ConfluenceBlogToIndexFieldMapping":{ + "type":"structure", + "members":{ + "DataSourceFieldName":{ + "shape":"ConfluenceBlogFieldName", + "documentation":"

    The name of the field in the data source.

    " + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

    The format for date fields in the data source. If the field specified in DataSourceFieldName is a date field you must specify the date format. If the field is not a date field, an exception is thrown.

    " + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

    The name of the index field to map to the Confluence data source field. The index field type must match the Confluence field type.

    " + } + }, + "documentation":"

    Defines the mapping between a blog field in the Confluence data source to a Amazon Kendra index field.

    You must first create the index field using the operation.

    " + }, + "ConfluenceConfiguration":{ + "type":"structure", + "required":[ + "ServerUrl", + "SecretArn", + "Version" + ], + "members":{ + "ServerUrl":{ + "shape":"Url", + "documentation":"

    The URL of your Confluence instance. Use the full URL of the server. For example, https://server.example.com:port/. You can also use an IP address, for example, https://192.168.1.113/.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that contains the key/value pairs required to connect to your Confluence server. The secret must contain a JSON structure with the following keys:

    • username - The user name or email address of a user with administrative privileges for the Confluence server.

    • password - The password associated with the user logging in to the Confluence server.

    " + }, + "Version":{ + "shape":"ConfluenceVersion", + "documentation":"

    Specifies the version of the Confluence installation that you are connecting to.

    " + }, + "SpaceConfiguration":{ + "shape":"ConfluenceSpaceConfiguration", + "documentation":"

    Specifies configuration information for indexing Confluence spaces.

    " + }, + "PageConfiguration":{ + "shape":"ConfluencePageConfiguration", + "documentation":"

    Specifies configuration information for indexing Confluence pages.

    " + }, + "BlogConfiguration":{ + "shape":"ConfluenceBlogConfiguration", + "documentation":"

    Specifies configuration information for indexing Confluence blogs.

    " + }, + "AttachmentConfiguration":{ + "shape":"ConfluenceAttachmentConfiguration", + "documentation":"

    Specifies configuration information for indexing attachments to Confluence blogs and pages.

    " + }, + "VpcConfiguration":{ + "shape":"DataSourceVpcConfiguration", + "documentation":"

    Specifies the information for connecting to an Amazon VPC.

    " + }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns that apply to a URL on the Confluence server. An inclusion pattern can apply to a blog post, a page, a space, or an attachment. Items that match the patterns are included in the index. Items that don't match the pattern are excluded from the index. If an item matches both an inclusion pattern and an exclusion pattern, the item isn't included in the index.

    " + }, + "ExclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns that apply to a URL on the Confluence server. An exclusion pattern can apply to a blog post, a page, a space, or an attachment. Items that match the pattern are excluded from the index. Items that don't match the pattern are included in the index. If a item matches both an exclusion pattern and an inclusion pattern, the item isn't included in the index.

    " + } + }, + "documentation":"

    Provides configuration information for data sources that connect to Confluence.

    " + }, + "ConfluencePageConfiguration":{ + "type":"structure", + "members":{ + "PageFieldMappings":{ + "shape":"ConfluencePageFieldMappingsList", + "documentation":"

    Defines how page metadata fields should be mapped to index fields. Before you can map a field, you must first create an index field with a matching type using the console or the UpdateIndex operation.

    If you specify the PageFieldMappings parameter, you must specify at least one field mapping.

    " + } + }, + "documentation":"

    Specifies the page settings for the Confluence data source.

    " + }, + "ConfluencePageFieldMappingsList":{ + "type":"list", + "member":{"shape":"ConfluencePageToIndexFieldMapping"}, + "max":12, + "min":1 + }, + "ConfluencePageFieldName":{ + "type":"string", + "enum":[ + "AUTHOR", + "CONTENT_STATUS", + "CREATED_DATE", + "DISPLAY_URL", + "ITEM_TYPE", + "LABELS", + "MODIFIED_DATE", + "PARENT_ID", + "SPACE_KEY", + "SPACE_NAME", + "URL", + "VERSION" + ] + }, + "ConfluencePageToIndexFieldMapping":{ + "type":"structure", + "members":{ + "DataSourceFieldName":{ + "shape":"ConfluencePageFieldName", + "documentation":"

    The name of the field in the data source.

    " + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

    The format for date fields in the data source. If the field specified in DataSourceFieldName is a date field you must specify the date format. If the field is not a date field, an exception is thrown.

    " + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

    The name of the index field to map to the Confluence data source field. The index field type must match the Confluence field type.

    " + } + }, + "documentation":"

    Defines the mapping between a field in the Confluence data source to a Amazon Kendra index field.

    You must first create the index field using the operation.

    " + }, + "ConfluenceSpaceConfiguration":{ + "type":"structure", + "members":{ + "CrawlPersonalSpaces":{ + "shape":"Boolean", + "documentation":"

    Specifies whether Amazon Kendra should index personal spaces. Users can add restrictions to items in personal spaces. If personal spaces are indexed, queries without user context information may return restricted items from a personal space in their results. For more information, see Filtering on user context.

    " + }, + "CrawlArchivedSpaces":{ + "shape":"Boolean", + "documentation":"

    Specifies whether Amazon Kendra should index archived spaces.

    " + }, + "IncludeSpaces":{ + "shape":"ConfluenceSpaceList", + "documentation":"

    A list of space keys for Confluence spaces. If you include a key, the blogs, documents, and attachments in the space are indexed. Spaces that aren't in the list aren't indexed. A space in the list must exist. Otherwise, Amazon Kendra logs an error when the data source is synchronized. If a space is in both the IncludeSpaces and the ExcludeSpaces list, the space is excluded.

    " + }, + "ExcludeSpaces":{ + "shape":"ConfluenceSpaceList", + "documentation":"

    A list of space keys of Confluence spaces. If you include a key, the blogs, documents, and attachments in the space are not indexed. If a space is in both the ExcludeSpaces and the IncludeSpaces list, the space is excluded.

    " + }, + "SpaceFieldMappings":{ + "shape":"ConfluenceSpaceFieldMappingsList", + "documentation":"

    Defines how space metadata fields should be mapped to index fields. Before you can map a field, you must first create an index field with a matching type using the console or the UpdateIndex operation.

    If you specify the SpaceFieldMappings parameter, you must specify at least one field mapping.

    " + } + }, + "documentation":"

    Specifies the configuration for indexing Confluence spaces.

    " + }, + "ConfluenceSpaceFieldMappingsList":{ + "type":"list", + "member":{"shape":"ConfluenceSpaceToIndexFieldMapping"}, + "max":4, + "min":1 + }, + "ConfluenceSpaceFieldName":{ + "type":"string", + "enum":[ + "DISPLAY_URL", + "ITEM_TYPE", + "SPACE_KEY", + "URL" + ] + }, + "ConfluenceSpaceIdentifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^\\P{C}*$" + }, + "ConfluenceSpaceList":{ + "type":"list", + "member":{"shape":"ConfluenceSpaceIdentifier"}, + "min":1 + }, + "ConfluenceSpaceToIndexFieldMapping":{ + "type":"structure", + "members":{ + "DataSourceFieldName":{ + "shape":"ConfluenceSpaceFieldName", + "documentation":"

    The name of the field in the data source.

    " + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

    The format for date fields in the data source. If the field specified in DataSourceFieldName is a date field you must specify the date format. If the field is not a date field, an exception is thrown.

    " + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

    The name of the index field to map to the Confluence data source field. The index field type must match the Confluence field type.

    " + } + }, + "documentation":"

    Defines the mapping between a field in the Confluence data source to a Amazon Kendra index field.

    You must first create the index field using the operation.

    " + }, + "ConfluenceVersion":{ + "type":"string", + "enum":[ + "CLOUD", + "SERVER" + ] + }, + "ConnectionConfiguration":{ + "type":"structure", + "required":[ + "DatabaseHost", + "DatabasePort", + "DatabaseName", + "TableName", + "SecretArn" + ], + "members":{ + "DatabaseHost":{ + "shape":"DatabaseHost", + "documentation":"

    The name of the host for the database. Can be either a string (host.subdomain.domain.tld) or an IPv4 or IPv6 address.

    " + }, + "DatabasePort":{ + "shape":"DatabasePort", + "documentation":"

    The port that the database uses for connections.

    " + }, + "DatabaseName":{ + "shape":"DatabaseName", + "documentation":"

    The name of the database containing the document data.

    " + }, + "TableName":{ + "shape":"TableName", + "documentation":"

    The name of the table that contains the document data.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about AWS Secrets Manager, see What Is AWS Secrets Manager in the AWS Secrets Manager user guide.

    " + } + }, + "documentation":"

    Provides the information necessary to connect to a database.

    " + }, + "ContentType":{ + "type":"string", + "enum":[ + "PDF", + "HTML", + "MS_WORD", + "PLAIN_TEXT", + "PPT" + ] + }, + "CreateDataSourceRequest":{ + "type":"structure", + "required":[ + "Name", + "IndexId", + "Type" + ], + "members":{ + "Name":{ + "shape":"DataSourceName", + "documentation":"

    A unique name for the data source. A data source name can't be changed without deleting and recreating the data source.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that should be associated with this data source.

    " + }, + "Type":{ + "shape":"DataSourceType", + "documentation":"

    The type of repository that contains the data source.

    " + }, + "Configuration":{ + "shape":"DataSourceConfiguration", + "documentation":"

    The connector configuration information that is required to access the repository.

    You can't specify the Configuration parameter when the Type parameter is set to CUSTOM. If you do, you receive a ValidationException exception.

    The Configuration parameter is required for all other data sources.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description for the data source.

    " + }, + "Schedule":{ + "shape":"ScanSchedule", + "documentation":"

    Sets the frequency that Amazon Kendra will check the documents in your repository and update the index. If you don't set a schedule Amazon Kendra will not periodically update the index. You can call the StartDataSourceSyncJob operation to update the index.

    You can't specify the Schedule parameter when the Type parameter is set to CUSTOM. If you do, you receive a ValidationException exception.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of a role with permission to access the data source. For more information, see IAM Roles for Amazon Kendra.

    You can't specify the RoleArn parameter when the Type parameter is set to CUSTOM. If you do, you receive a ValidationException exception.

    The RoleArn parameter is required for all other data sources.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of key-value pairs that identify the data source. You can use the tags to identify and organize your resources and to control access to resources.

    " + }, + "ClientToken":{ + "shape":"ClientTokenName", + "documentation":"

    A token that you provide to identify the request to create a data source. Multiple calls to the CreateDataSource operation with the same client token will create only one data source.

    ", + "idempotencyToken":true + } + } + }, + "CreateDataSourceResponse":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

    A unique identifier for the data source.

    " + } + } + }, + "CreateFaqRequest":{ + "type":"structure", + "required":[ + "IndexId", + "Name", + "S3Path", + "RoleArn" + ], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the FAQ.

    " + }, + "Name":{ + "shape":"FaqName", + "documentation":"

    The name that should be associated with the FAQ.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the FAQ.

    " + }, + "S3Path":{ + "shape":"S3Path", + "documentation":"

    The S3 location of the FAQ input data.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of a role with permission to access the S3 bucket that contains the FAQs. For more information, see IAM Roles for Amazon Kendra.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of key-value pairs that identify the FAQ. You can use the tags to identify and organize your resources and to control access to resources.

    " + }, + "FileFormat":{ + "shape":"FaqFileFormat", + "documentation":"

    The format of the input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that includes custom attributes.

    The format must match the format of the file stored in the S3 bucket identified in the S3Path parameter.

    For more information, see Adding questions and answers.

    " + }, + "ClientToken":{ + "shape":"ClientTokenName", + "documentation":"

    A token that you provide to identify the request to create a FAQ. Multiple calls to the CreateFaqRequest operation with the same client token will create only one FAQ.

    ", + "idempotencyToken":true + } + } + }, + "CreateFaqResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"FaqId", + "documentation":"

    The unique identifier of the FAQ.

    " + } + } + }, + "CreateIndexRequest":{ + "type":"structure", + "required":[ + "Name", + "RoleArn" + ], + "members":{ + "Name":{ + "shape":"IndexName", + "documentation":"

    The name for the new index.

    " + }, + "Edition":{ + "shape":"IndexEdition", + "documentation":"

    The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed.

    The Edition parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    An AWS Identity and Access Management (IAM) role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role used when you use the BatchPutDocument operation to index documents from an Amazon S3 bucket.

    " + }, + "ServerSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

    The identifier of the AWS KMS customer managed key (CMK) to use to encrypt data indexed by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description for the index.

    " + }, + "ClientToken":{ + "shape":"ClientTokenName", + "documentation":"

    A token that you provide to identify the request to create an index. Multiple calls to the CreateIndex operation with the same client token will create only one index.

    ", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of key-value pairs that identify the index. You can use the tags to identify and organize your resources and to control access to resources.

    " + }, + "UserTokenConfigurations":{ + "shape":"UserTokenConfigurationList", + "documentation":"

    The user token configuration.

    " + }, + "UserContextPolicy":{ + "shape":"UserContextPolicy", + "documentation":"

    The user context policy.

    ATTRIBUTE_FILTER

    All indexed content is searchable and displayable for all users. If there is an access control list, it is ignored. You can filter on user and group attributes.

    USER_TOKEN

    Enables SSO and token-based user access control. All documents with no access control and all documents accessible to the user will be searchable and displayable.

    " + } + } + }, + "CreateIndexResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"IndexId", + "documentation":"

    The unique identifier of the index. Use this identifier when you query an index, set up a data source, or index a document.

    " + } + } + }, + "CreateThesaurusRequest":{ + "type":"structure", + "required":[ + "IndexId", + "Name", + "RoleArn", + "SourceS3Path" + ], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The unique identifier of the index for the new thesaurus.

    " + }, + "Name":{ + "shape":"ThesaurusName", + "documentation":"

    The name for the new thesaurus.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description for the new thesaurus.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    An AWS Identity and Access Management (IAM) role that gives Amazon Kendra permissions to access thesaurus file specified in SourceS3Path.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of key-value pairs that identify the thesaurus. You can use the tags to identify and organize your resources and to control access to resources.

    " + }, + "SourceS3Path":{ + "shape":"S3Path", + "documentation":"

    The thesaurus file Amazon S3 source path.

    " + }, + "ClientToken":{ + "shape":"ClientTokenName", + "documentation":"

    A token that you provide to identify the request to create a thesaurus. Multiple calls to the CreateThesaurus operation with the same client token will create only one index.

    ", + "idempotencyToken":true + } + } + }, + "CreateThesaurusResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ThesaurusId", + "documentation":"

    The unique identifier of the thesaurus.

    " + } + } + }, + "DataSourceConfiguration":{ + "type":"structure", + "members":{ + "S3Configuration":{ + "shape":"S3DataSourceConfiguration", + "documentation":"

    Provides information to create a data source connector for a document repository in an Amazon S3 bucket.

    " + }, + "SharePointConfiguration":{ + "shape":"SharePointConfiguration", + "documentation":"

    Provides information necessary to create a data source connector for a Microsoft SharePoint site.

    " + }, + "DatabaseConfiguration":{ + "shape":"DatabaseConfiguration", + "documentation":"

    Provides information necessary to create a data source connector for a database.

    " + }, + "SalesforceConfiguration":{ + "shape":"SalesforceConfiguration", + "documentation":"

    Provides configuration information for data sources that connect to a Salesforce site.

    " + }, + "OneDriveConfiguration":{ + "shape":"OneDriveConfiguration", + "documentation":"

    Provides configuration for data sources that connect to Microsoft OneDrive.

    " + }, + "ServiceNowConfiguration":{ + "shape":"ServiceNowConfiguration", + "documentation":"

    Provides configuration for data sources that connect to ServiceNow instances.

    " + }, + "ConfluenceConfiguration":{ + "shape":"ConfluenceConfiguration", + "documentation":"

    Provides configuration information for connecting to a Confluence data source.

    " + }, + "GoogleDriveConfiguration":{ + "shape":"GoogleDriveConfiguration", + "documentation":"

    Provides configuration for data sources that connect to Google Drive.

    " + } + }, + "documentation":"

    Configuration information for a Amazon Kendra data source.

    " + }, + "DataSourceDateFieldFormat":{ + "type":"string", + "max":40, + "min":4, + "pattern":"^(?!\\s).*(?The name of the data source.

    " + }, + "Id":{ + "shape":"DataSourceId", + "documentation":"

    The unique identifier for the data source.

    " + }, + "Type":{ + "shape":"DataSourceType", + "documentation":"

    The type of the data source.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The UNIX datetime that the data source was created.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The UNIX datetime that the data source was lasted updated.

    " + }, + "Status":{ + "shape":"DataSourceStatus", + "documentation":"

    The status of the data source. When the status is ATIVE the data source is ready to use.

    " + } + }, + "documentation":"

    Summary information for a Amazon Kendra data source. Returned in a call to .

    " + }, + "DataSourceSummaryList":{ + "type":"list", + "member":{"shape":"DataSourceSummary"} + }, + "DataSourceSyncJob":{ + "type":"structure", + "members":{ + "ExecutionId":{ + "shape":"String", + "documentation":"

    A unique identifier for the synchronization job.

    " + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

    The UNIX datetime that the synchronization job was started.

    " + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

    The UNIX datetime that the synchronization job was completed.

    " + }, + "Status":{ + "shape":"DataSourceSyncJobStatus", + "documentation":"

    The execution status of the synchronization job. When the Status field is set to SUCCEEDED, the synchronization job is done. If the status code is set to FAILED, the ErrorCode and ErrorMessage fields give you the reason for the failure.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    If the Status field is set to ERROR, the ErrorMessage field contains a description of the error that caused the synchronization to fail.

    " + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    If the Status field is set to FAILED, the ErrorCode field contains a the reason that the synchronization failed.

    " + }, + "DataSourceErrorCode":{ + "shape":"String", + "documentation":"

    If the reason that the synchronization failed is due to an error with the underlying data source, this field contains a code that identifies the error.

    " + }, + "Metrics":{ + "shape":"DataSourceSyncJobMetrics", + "documentation":"

    Maps a batch delete document request to a specific data source sync job. This is optional and should only be supplied when documents are deleted by a data source connector.

    " + } + }, + "documentation":"

    Provides information about a synchronization job.

    " + }, + "DataSourceSyncJobHistoryList":{ + "type":"list", + "member":{"shape":"DataSourceSyncJob"} + }, + "DataSourceSyncJobId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "DataSourceSyncJobMetricTarget":{ + "type":"structure", + "required":[ + "DataSourceId", + "DataSourceSyncJobId" + ], + "members":{ + "DataSourceId":{ + "shape":"DataSourceId", + "documentation":"

    The ID of the data source that is running the sync job.

    " + }, + "DataSourceSyncJobId":{ + "shape":"DataSourceSyncJobId", + "documentation":"

    The ID of the sync job that is running on the data source.

    " + } + }, + "documentation":"

    Maps a particular data source sync job to a particular data source.

    " + }, + "DataSourceSyncJobMetrics":{ + "type":"structure", + "members":{ + "DocumentsAdded":{ + "shape":"MetricValue", + "documentation":"

    The number of documents added from the data source up to now in the data source sync.

    " + }, + "DocumentsModified":{ + "shape":"MetricValue", + "documentation":"

    The number of documents modified in the data source up to now in the data source sync run.

    " + }, + "DocumentsDeleted":{ + "shape":"MetricValue", + "documentation":"

    The number of documents deleted from the data source up to now in the data source sync run.

    " + }, + "DocumentsFailed":{ + "shape":"MetricValue", + "documentation":"

    The number of documents that failed to sync from the data source up to now in the data source sync run.

    " + }, + "DocumentsScanned":{ + "shape":"MetricValue", + "documentation":"

    The current number of documents crawled by the current sync job in the data source.

    " + } + }, + "documentation":"

    Maps a batch delete document request to a specific data source sync job. This is optional and should only be supplied when documents are deleted by a data source connector.

    " + }, + "DataSourceSyncJobStatus":{ + "type":"string", + "enum":[ + "FAILED", + "SUCCEEDED", + "SYNCING", + "INCOMPLETE", + "STOPPING", + "ABORTED", + "SYNCING_INDEXING" + ] + }, + "DataSourceToIndexFieldMapping":{ + "type":"structure", + "required":[ + "DataSourceFieldName", + "IndexFieldName" + ], + "members":{ + "DataSourceFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the column or attribute in the data source.

    " + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

    The type of data stored in the column or attribute.

    " + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

    The name of the field in the index.

    " + } + }, + "documentation":"

    Maps a column or attribute in the data source to an index field. You must first create the fields in the index using the UpdateIndex operation.

    " + }, + "DataSourceToIndexFieldMappingList":{ + "type":"list", + "member":{"shape":"DataSourceToIndexFieldMapping"}, + "max":100, + "min":1 + }, + "DataSourceType":{ + "type":"string", + "enum":[ + "S3", + "SHAREPOINT", + "DATABASE", + "SALESFORCE", + "ONEDRIVE", + "SERVICENOW", + "CUSTOM", + "CONFLUENCE", + "GOOGLEDRIVE" + ] + }, + "DataSourceVpcConfiguration":{ + "type":"structure", + "required":[ + "SubnetIds", + "SecurityGroupIds" + ], + "members":{ + "SubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

    A list of identifiers for subnets within your Amazon VPC. The subnets should be able to connect to each other in the VPC, and they should have outgoing access to the Internet through a NAT device.

    " + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

    A list of identifiers of security groups within your Amazon VPC. The security groups should enable Amazon Kendra to connect to the data source.

    " + } + }, + "documentation":"

    Provides information for connecting to an Amazon VPC.

    " + }, + "DatabaseConfiguration":{ + "type":"structure", + "required":[ + "DatabaseEngineType", + "ConnectionConfiguration", + "ColumnConfiguration" + ], + "members":{ + "DatabaseEngineType":{ + "shape":"DatabaseEngineType", + "documentation":"

    The type of database engine that runs the database.

    " + }, + "ConnectionConfiguration":{ + "shape":"ConnectionConfiguration", + "documentation":"

    The information necessary to connect to a database.

    " + }, + "VpcConfiguration":{"shape":"DataSourceVpcConfiguration"}, + "ColumnConfiguration":{ + "shape":"ColumnConfiguration", + "documentation":"

    Information about where the index should get the document information from the database.

    " + }, + "AclConfiguration":{ + "shape":"AclConfiguration", + "documentation":"

    Information about the database column that provides information for user context filtering.

    " + }, + "SqlConfiguration":{ + "shape":"SqlConfiguration", + "documentation":"

    Provides information about how Amazon Kendra uses quote marks around SQL identifiers when querying a database data source.

    " + } + }, + "documentation":"

    Provides the information necessary to connect a database to an index.

    " + }, + "DatabaseEngineType":{ + "type":"string", + "enum":[ + "RDS_AURORA_MYSQL", + "RDS_AURORA_POSTGRESQL", + "RDS_MYSQL", + "RDS_POSTGRESQL" + ] + }, + "DatabaseHost":{ + "type":"string", + "max":253, + "min":1 + }, + "DatabaseName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z][a-zA-Z0-9_]*$" + }, + "DatabasePort":{ + "type":"integer", + "max":65535, + "min":1 + }, + "DeleteDataSourceRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

    The unique identifier of the data source to delete.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The unique identifier of the index associated with the data source.

    " + } + } + }, + "DeleteFaqRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"FaqId", + "documentation":"

    The identifier of the FAQ to remove.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The index to remove the FAQ from.

    " + } + } + }, + "DeleteIndexRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index to delete.

    " + } + } + }, + "DeleteThesaurusRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"ThesaurusId", + "documentation":"

    The identifier of the thesaurus to delete.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index associated with the thesaurus to delete.

    " + } + } + }, + "DescribeDataSourceRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

    The unique identifier of the data source to describe.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the data source.

    " + } + } + }, + "DescribeDataSourceResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

    The identifier of the data source.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the data source.

    " + }, + "Name":{ + "shape":"DataSourceName", + "documentation":"

    The name that you gave the data source when it was created.

    " + }, + "Type":{ + "shape":"DataSourceType", + "documentation":"

    The type of the data source.

    " + }, + "Configuration":{ + "shape":"DataSourceConfiguration", + "documentation":"

    Information that describes where the data source is located and how the data source is configured. The specific information in the description depends on the data source provider.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix timestamp of when the data source was created.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix timestamp of when the data source was last updated.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the data source.

    " + }, + "Status":{ + "shape":"DataSourceStatus", + "documentation":"

    The current status of the data source. When the status is ACTIVE the data source is ready to use. When the status is FAILED, the ErrorMessage field contains the reason that the data source failed.

    " + }, + "Schedule":{ + "shape":"ScanSchedule", + "documentation":"

    The schedule that Amazon Kendra will update the data source.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the role that enables the data source to access its resources.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    When the Status field value is FAILED, the ErrorMessage field contains a description of the error that caused the data source to fail.

    " + } + } + }, + "DescribeFaqRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"FaqId", + "documentation":"

    The unique identifier of the FAQ.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the FAQ.

    " + } + } + }, + "DescribeFaqResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"FaqId", + "documentation":"

    The identifier of the FAQ.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the FAQ.

    " + }, + "Name":{ + "shape":"FaqName", + "documentation":"

    The name that you gave the FAQ when it was created.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the FAQ that you provided when it was created.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the FAQ was created.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the FAQ was last updated.

    " + }, + "S3Path":{"shape":"S3Path"}, + "Status":{ + "shape":"FaqStatus", + "documentation":"

    The status of the FAQ. It is ready to use when the status is ACTIVE.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the role that provides access to the S3 bucket containing the input files for the FAQ.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    If the Status field is FAILED, the ErrorMessage field contains the reason why the FAQ failed.

    " + }, + "FileFormat":{ + "shape":"FaqFileFormat", + "documentation":"

    The file format used by the input files for the FAQ.

    " + } + } + }, + "DescribeIndexRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"IndexId", + "documentation":"

    The name of the index to describe.

    " + } + } + }, + "DescribeIndexResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"IndexName", + "documentation":"

    The name of the index.

    " + }, + "Id":{ + "shape":"IndexId", + "documentation":"

    The name of the index.

    " + }, + "Edition":{ + "shape":"IndexEdition", + "documentation":"

    The Amazon Kendra edition used for the index. You decide the edition when you create the index.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that gives Amazon Kendra permission to write to your Amazon Cloudwatch logs.

    " + }, + "ServerSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

    The identifier of the AWS KMS customer master key (CMK) used to encrypt your data. Amazon Kendra doesn't support asymmetric CMKs.

    " + }, + "Status":{ + "shape":"IndexStatus", + "documentation":"

    The current status of the index. When the value is ACTIVE, the index is ready for use. If the Status field value is FAILED, the ErrorMessage field contains a message that explains why.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The description of the index.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix datetime that the index was created.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix datetime that the index was last updated.

    " + }, + "DocumentMetadataConfigurations":{ + "shape":"DocumentMetadataConfigurationList", + "documentation":"

    Configuration settings for any metadata applied to the documents in the index.

    " + }, + "IndexStatistics":{ + "shape":"IndexStatistics", + "documentation":"

    Provides information about the number of FAQ questions and answers and the number of text documents indexed.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    When th eStatus field value is FAILED, the ErrorMessage field contains a message that explains why.

    " + }, + "CapacityUnits":{ + "shape":"CapacityUnitsConfiguration", + "documentation":"

    For enterprise edtion indexes, you can choose to use additional capacity to meet the needs of your application. This contains the capacity units used for the index. A 0 for the query capacity or the storage capacity indicates that the index is using the default capacity for the index.

    " + }, + "UserTokenConfigurations":{ + "shape":"UserTokenConfigurationList", + "documentation":"

    The user token configuration for the Amazon Kendra index.

    " + }, + "UserContextPolicy":{ + "shape":"UserContextPolicy", + "documentation":"

    The user context policy for the Amazon Kendra index.

    " + } + } + }, + "DescribeThesaurusRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"ThesaurusId", + "documentation":"

    The identifier of the thesaurus to describe.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index associated with the thesaurus to describe.

    " + } + } + }, + "DescribeThesaurusResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ThesaurusId", + "documentation":"

    The identifier of the thesaurus.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index associated with the thesaurus to describe.

    " + }, + "Name":{ + "shape":"ThesaurusName", + "documentation":"

    The thesaurus name.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The thesaurus description.

    " + }, + "Status":{ + "shape":"ThesaurusStatus", + "documentation":"

    The current status of the thesaurus. When the value is ACTIVE, queries are able to use the thesaurus. If the Status field value is FAILED, the ErrorMessage field provides more information.

    If the status is ACTIVE_BUT_UPDATE_FAILED, it means that Amazon Kendra could not ingest the new thesaurus file. The old thesaurus file is still active.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    When the Status field value is FAILED, the ErrorMessage field provides more information.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix datetime that the thesaurus was created.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix datetime that the thesaurus was last updated.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    An AWS Identity and Access Management (IAM) role that gives Amazon Kendra permissions to access thesaurus file specified in SourceS3Path.

    " + }, + "SourceS3Path":{"shape":"S3Path"}, + "FileSizeBytes":{ + "shape":"Long", + "documentation":"

    The size of the thesaurus file in bytes.

    " + }, + "TermCount":{ + "shape":"Long", + "documentation":"

    The number of unique terms in the thesaurus file. For example, the synonyms a,b,c and a=>d, the term count would be 4.

    " + }, + "SynonymRuleCount":{ + "shape":"Long", + "documentation":"

    The number of synonym rules in the thesaurus file.

    " + } + } + }, + "Description":{ + "type":"string", + "max":1000, + "min":0, + "pattern":"^\\P{C}*$" + }, + "Document":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"DocumentId", + "documentation":"

    A unique identifier of the document in the index.

    " + }, + "Title":{ + "shape":"Title", + "documentation":"

    The title of the document.

    " + }, + "Blob":{ + "shape":"Blob", + "documentation":"

    The contents of the document.

    Documents passed to the Blob parameter must be base64 encoded. Your code might not need to encode the document file bytes if you're using an AWS SDK to call Amazon Kendra operations. If you are calling the Amazon Kendra endpoint directly using REST, you must base64 encode the contents before sending.

    " + }, + "S3Path":{"shape":"S3Path"}, + "Attributes":{ + "shape":"DocumentAttributeList", + "documentation":"

    Custom attributes to apply to the document. Use the custom attributes to provide additional information for searching, to provide facets for refining searches, and to provide additional information in the query response.

    " + }, + "AccessControlList":{ + "shape":"PrincipalList", + "documentation":"

    Information to use for user context filtering.

    " + }, + "ContentType":{ + "shape":"ContentType", + "documentation":"

    The file type of the document in the Blob field.

    " + } + }, + "documentation":"

    A document in an index.

    " + }, + "DocumentAttribute":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"DocumentAttributeKey", + "documentation":"

    The identifier for the attribute.

    " + }, + "Value":{ + "shape":"DocumentAttributeValue", + "documentation":"

    The value of the attribute.

    " + } + }, + "documentation":"

    A custom attribute value assigned to a document.

    " + }, + "DocumentAttributeKey":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9_][a-zA-Z0-9_-]*" + }, + "DocumentAttributeKeyList":{ + "type":"list", + "member":{"shape":"DocumentAttributeKey"}, + "max":100, + "min":1 + }, + "DocumentAttributeList":{ + "type":"list", + "member":{"shape":"DocumentAttribute"} + }, + "DocumentAttributeStringListValue":{ + "type":"list", + "member":{"shape":"String"} + }, + "DocumentAttributeStringValue":{ + "type":"string", + "max":2048, + "min":1 + }, + "DocumentAttributeValue":{ + "type":"structure", + "members":{ + "StringValue":{ + "shape":"DocumentAttributeStringValue", + "documentation":"

    A string, such as \"department\".

    " + }, + "StringListValue":{ + "shape":"DocumentAttributeStringListValue", + "documentation":"

    A list of strings.

    " + }, + "LongValue":{ + "shape":"Long", + "documentation":"

    A long integer value.

    " + }, + "DateValue":{ + "shape":"Timestamp", + "documentation":"

    A date expressed as an ISO 8601 string.

    " + } + }, + "documentation":"

    The value of a custom document attribute. You can only provide one value for a custom attribute.

    " + }, + "DocumentAttributeValueCountPair":{ + "type":"structure", + "members":{ + "DocumentAttributeValue":{ + "shape":"DocumentAttributeValue", + "documentation":"

    The value of the attribute. For example, \"HR.\"

    " + }, + "Count":{ + "shape":"Integer", + "documentation":"

    The number of documents in the response that have the attribute value for the key.

    " + } + }, + "documentation":"

    Provides the count of documents that match a particular attribute when doing a faceted search.

    " + }, + "DocumentAttributeValueCountPairList":{ + "type":"list", + "member":{"shape":"DocumentAttributeValueCountPair"} + }, + "DocumentAttributeValueType":{ + "type":"string", + "enum":[ + "STRING_VALUE", + "STRING_LIST_VALUE", + "LONG_VALUE", + "DATE_VALUE" + ] + }, + "DocumentId":{ + "type":"string", + "max":2048, + "min":1 + }, + "DocumentIdList":{ + "type":"list", + "member":{"shape":"DocumentId"}, + "max":10, + "min":1 + }, + "DocumentList":{ + "type":"list", + "member":{"shape":"Document"}, + "max":10, + "min":1 + }, + "DocumentMetadataBoolean":{"type":"boolean"}, + "DocumentMetadataConfiguration":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"DocumentMetadataConfigurationName", + "documentation":"

    The name of the index field.

    " + }, + "Type":{ + "shape":"DocumentAttributeValueType", + "documentation":"

    The data type of the index field.

    " + }, + "Relevance":{ + "shape":"Relevance", + "documentation":"

    Provides manual tuning parameters to determine how the field affects the search results.

    " + }, + "Search":{ + "shape":"Search", + "documentation":"

    Provides information about how the field is used during a search.

    " + } + }, + "documentation":"

    Specifies the properties of a custom index field.

    " + }, + "DocumentMetadataConfigurationList":{ + "type":"list", + "member":{"shape":"DocumentMetadataConfiguration"}, + "max":500, + "min":0 + }, + "DocumentMetadataConfigurationName":{ + "type":"string", + "max":30, + "min":1 + }, + "DocumentsMetadataConfiguration":{ + "type":"structure", + "members":{ + "S3Prefix":{ + "shape":"S3ObjectKey", + "documentation":"

    A prefix used to filter metadata configuration files in the AWS S3 bucket. The S3 bucket might contain multiple metadata files. Use S3Prefix to include only the desired metadata files.

    " + } + }, + "documentation":"

    Document metadata files that contain information such as the document access control information, source URI, document author, and custom attributes. Each metadata file contains metadata about a single document.

    " + }, + "Duration":{ + "type":"string", + "max":10, + "min":1, + "pattern":"[0-9]+[s]" + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "InternalError", + "InvalidRequest" + ] + }, + "ErrorMessage":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^\\P{C}*$" + }, + "ExcludeMimeTypesList":{ + "type":"list", + "member":{"shape":"MimeType"}, + "max":30, + "min":0 + }, + "ExcludeSharedDrivesList":{ + "type":"list", + "member":{"shape":"SharedDriveId"}, + "max":100, + "min":0 + }, + "ExcludeUserAccountsList":{ + "type":"list", + "member":{"shape":"UserAccount"}, + "max":100, + "min":0 + }, + "Facet":{ + "type":"structure", + "members":{ + "DocumentAttributeKey":{ + "shape":"DocumentAttributeKey", + "documentation":"

    The unique key for the document attribute.

    " + } + }, + "documentation":"

    Information about a document attribute

    " + }, + "FacetList":{ + "type":"list", + "member":{"shape":"Facet"} + }, + "FacetResult":{ + "type":"structure", + "members":{ + "DocumentAttributeKey":{ + "shape":"DocumentAttributeKey", + "documentation":"

    The key for the facet values. This is the same as the DocumentAttributeKey provided in the query.

    " + }, + "DocumentAttributeValueType":{ + "shape":"DocumentAttributeValueType", + "documentation":"

    The data type of the facet value. This is the same as the type defined for the index field when it was created.

    " + }, + "DocumentAttributeValueCountPairs":{ + "shape":"DocumentAttributeValueCountPairList", + "documentation":"

    An array of key/value pairs, where the key is the value of the attribute and the count is the number of documents that share the key value.

    " + } + }, + "documentation":"

    The facet values for the documents in the response.

    " + }, + "FacetResultList":{ + "type":"list", + "member":{"shape":"FacetResult"} + }, + "FaqFileFormat":{ + "type":"string", + "enum":[ + "CSV", + "CSV_WITH_HEADER", + "JSON" + ] + }, + "FaqId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "FaqName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "FaqStatistics":{ + "type":"structure", + "required":["IndexedQuestionAnswersCount"], + "members":{ + "IndexedQuestionAnswersCount":{ + "shape":"IndexedQuestionAnswersCount", + "documentation":"

    The total number of FAQ questions and answers contained in the index.

    " + } + }, + "documentation":"

    Provides statistical information about the FAQ questions and answers contained in an index.

    " + }, + "FaqStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "FaqSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"FaqId", + "documentation":"

    The unique identifier of the FAQ.

    " + }, + "Name":{ + "shape":"FaqName", + "documentation":"

    The name that you assigned the FAQ when you created or updated the FAQ.

    " + }, + "Status":{ + "shape":"FaqStatus", + "documentation":"

    The current status of the FAQ. When the status is ACTIVE the FAQ is ready for use.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The UNIX datetime that the FAQ was added to the index.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The UNIX datetime that the FAQ was last updated.

    " + }, + "FileFormat":{ + "shape":"FaqFileFormat", + "documentation":"

    The file type used to create the FAQ.

    " + } + }, + "documentation":"

    Provides information about a frequently asked questions and answer contained in an index.

    " + }, + "FaqSummaryItems":{ + "type":"list", + "member":{"shape":"FaqSummary"} + }, + "FeedbackToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^\\P{C}*.\\P{C}*$" + }, + "GoogleDriveConfiguration":{ + "type":"structure", + "required":["SecretArn"], + "members":{ + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The Amazon Resource Name (ARN) of a AWS Secrets Manager secret that contains the credentials required to connect to Google Drive. For more information, see Using a Google Workspace Drive data source.

    " + }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns that apply to path on Google Drive. Items that match the pattern are included in the index from both shared drives and users' My Drives. Items that don't match the pattern are excluded from the index. If an item matches both an inclusion pattern and an exclusion pattern, it is excluded from the index.

    " + }, + "ExclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns that apply to the path on Google Drive. Items that match the pattern are excluded from the index from both shared drives and users' My Drives. Items that don't match the pattern are included in the index. If an item matches both an exclusion pattern and an inclusion pattern, it is excluded from the index.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    Defines mapping between a field in the Google Drive and a Amazon Kendra index field.

    If you are using the console, you can define index fields when creating the mapping. If you are using the API, you must first create the field using the UpdateIndex operation.

    " + }, + "ExcludeMimeTypes":{ + "shape":"ExcludeMimeTypesList", + "documentation":"

    A list of MIME types to exclude from the index. All documents matching the specified MIME type are excluded.

    For a list of MIME types, see Using a Google Workspace Drive data source.

    " + }, + "ExcludeUserAccounts":{ + "shape":"ExcludeUserAccountsList", + "documentation":"

    A list of email addresses of the users. Documents owned by these users are excluded from the index. Documents shared with excluded users are indexed unless they are excluded in another way.

    " + }, + "ExcludeSharedDrives":{ + "shape":"ExcludeSharedDrivesList", + "documentation":"

    A list of identifiers or shared drives to exclude from the index. All files and folders stored on the shared drive are excluded.

    " + } + }, + "documentation":"

    Provides configuration information for data sources that connect to Google Drive.

    " + }, + "GroupAttributeField":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^\\P{C}*$" + }, + "Highlight":{ + "type":"structure", + "required":[ + "BeginOffset", + "EndOffset" + ], + "members":{ + "BeginOffset":{ + "shape":"Integer", + "documentation":"

    The zero-based location in the response string where the highlight starts.

    " + }, + "EndOffset":{ + "shape":"Integer", + "documentation":"

    The zero-based location in the response string where the highlight ends.

    " + }, + "TopAnswer":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the response is the best response. True if this is the best response; otherwise, false.

    " + }, + "Type":{ + "shape":"HighlightType", + "documentation":"

    The highlight type.

    " + } + }, + "documentation":"

    Provides information that you can use to highlight a search result so that your users can quickly identify terms in the response.

    " + }, + "HighlightList":{ + "type":"list", + "member":{"shape":"Highlight"} + }, + "HighlightType":{ + "type":"string", + "enum":[ + "STANDARD", + "THESAURUS_SYNONYM" + ] + }, + "Importance":{ + "type":"integer", + "max":10, + "min":1 + }, + "IndexConfigurationSummary":{ + "type":"structure", + "required":[ + "CreatedAt", + "UpdatedAt", + "Status" + ], + "members":{ + "Name":{ + "shape":"IndexName", + "documentation":"

    The name of the index.

    " + }, + "Id":{ + "shape":"IndexId", + "documentation":"

    A unique identifier for the index. Use this to identify the index when you are using operations such as Query, DescribeIndex, UpdateIndex, and DeleteIndex.

    " + }, + "Edition":{ + "shape":"IndexEdition", + "documentation":"

    Indicates whether the index is a enterprise edition index or a developer edition index.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix timestamp when the index was created.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix timestamp when the index was last updated by the UpdateIndex operation.

    " + }, + "Status":{ + "shape":"IndexStatus", + "documentation":"

    The current status of the index. When the status is ACTIVE, the index is ready to search.

    " + } + }, + "documentation":"

    A summary of information about an index.

    " + }, + "IndexConfigurationSummaryList":{ + "type":"list", + "member":{"shape":"IndexConfigurationSummary"} + }, + "IndexEdition":{ + "type":"string", + "enum":[ + "DEVELOPER_EDITION", + "ENTERPRISE_EDITION" + ] + }, + "IndexFieldName":{ + "type":"string", + "max":30, + "min":1, + "pattern":"^\\P{C}*$" + }, + "IndexId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9-]*" + }, + "IndexName":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "IndexStatistics":{ + "type":"structure", + "required":[ + "FaqStatistics", + "TextDocumentStatistics" + ], + "members":{ + "FaqStatistics":{ + "shape":"FaqStatistics", + "documentation":"

    The number of question and answer topics in the index.

    " + }, + "TextDocumentStatistics":{ + "shape":"TextDocumentStatistics", + "documentation":"

    The number of text documents indexed.

    " + } + }, + "documentation":"

    Provides information about the number of documents and the number of questions and answers in an index.

    " + }, + "IndexStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED", + "UPDATING", + "SYSTEM_UPDATING" + ] + }, + "IndexedQuestionAnswersCount":{ + "type":"integer", + "min":0 + }, + "IndexedTextBytes":{ + "type":"long", + "min":0 + }, + "IndexedTextDocumentsCount":{ + "type":"integer", + "min":0 + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true, + "fault":true + }, + "Issuer":{ + "type":"string", + "max":65, + "min":1, + "pattern":"^\\P{C}*$" + }, + "JsonTokenTypeConfiguration":{ + "type":"structure", + "required":[ + "UserNameAttributeField", + "GroupAttributeField" + ], + "members":{ + "UserNameAttributeField":{ + "shape":"String", + "documentation":"

    The user name attribute field.

    " + }, + "GroupAttributeField":{ + "shape":"String", + "documentation":"

    The group attribute field.

    " + } + }, + "documentation":"

    Configuration information for the JSON token type.

    " + }, + "JwtTokenTypeConfiguration":{ + "type":"structure", + "required":["KeyLocation"], + "members":{ + "KeyLocation":{ + "shape":"KeyLocation", + "documentation":"

    The location of the key.

    " + }, + "URL":{ + "shape":"Url", + "documentation":"

    The signing key URL.

    " + }, + "SecretManagerArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (arn) of the secret.

    " + }, + "UserNameAttributeField":{ + "shape":"UserNameAttributeField", + "documentation":"

    The user name attribute field.

    " + }, + "GroupAttributeField":{ + "shape":"GroupAttributeField", + "documentation":"

    The group attribute field.

    " + }, + "Issuer":{ + "shape":"Issuer", + "documentation":"

    The issuer of the token.

    " + }, + "ClaimRegex":{ + "shape":"ClaimRegex", + "documentation":"

    The regular expression that identifies the claim.

    " + } + }, + "documentation":"

    Configuration information for the JWT token type.

    " + }, + "KeyLocation":{ + "type":"string", + "enum":[ + "URL", + "SECRET_MANAGER" + ] + }, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1, + "sensitive":true + }, + "ListDataSourceSyncJobsRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

    The identifier of the data source.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the data source.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request to GetDataSourceSyncJobHistory was truncated, include the NextToken to fetch the next set of jobs.

    " + }, + "MaxResults":{ + "shape":"MaxResultsIntegerForListDataSourceSyncJobsRequest", + "documentation":"

    The maximum number of synchronization jobs to return in the response. If there are fewer results in the list, this response contains only the actual results.

    " + }, + "StartTimeFilter":{ + "shape":"TimeRange", + "documentation":"

    When specified, the synchronization jobs returned in the list are limited to jobs between the specified dates.

    " + }, + "StatusFilter":{ + "shape":"DataSourceSyncJobStatus", + "documentation":"

    When specified, only returns synchronization jobs with the Status field equal to the specified status.

    " + } + } + }, + "ListDataSourceSyncJobsResponse":{ + "type":"structure", + "members":{ + "History":{ + "shape":"DataSourceSyncJobHistoryList", + "documentation":"

    A history of synchronization jobs for the data source.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The GetDataSourceSyncJobHistory operation returns a page of vocabularies at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Kendra returns the NextPage token. Include the token in the next request to the GetDataSourceSyncJobHistory operation to return in the next page of jobs.

    " + } + } + }, + "ListDataSourcesRequest":{ + "type":"structure", + "required":["IndexId"], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the data source.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of data sources (DataSourceSummaryItems).

    " + }, + "MaxResults":{ + "shape":"MaxResultsIntegerForListDataSourcesRequest", + "documentation":"

    The maximum number of data sources to return.

    " + } + } + }, + "ListDataSourcesResponse":{ + "type":"structure", + "members":{ + "SummaryItems":{ + "shape":"DataSourceSummaryList", + "documentation":"

    An array of summary information for one or more data sources.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of data sources.

    " + } + } + }, + "ListFaqsRequest":{ + "type":"structure", + "required":["IndexId"], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The index that contains the FAQ lists.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the result of the previous request to ListFaqs was truncated, include the NextToken to fetch the next set of FAQs.

    " + }, + "MaxResults":{ + "shape":"MaxResultsIntegerForListFaqsRequest", + "documentation":"

    The maximum number of FAQs to return in the response. If there are fewer results in the list, this response contains only the actual results.

    " + } + } + }, + "ListFaqsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The ListFaqs operation returns a page of FAQs at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Kendra returns the NextPage token. Include the token in the next request to the ListFaqs operation to return the next page of FAQs.

    " + }, + "FaqSummaryItems":{ + "shape":"FaqSummaryItems", + "documentation":"

    information about the FAQs associated with the specified index.

    " + } + } + }, + "ListIndicesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of indexes (DataSourceSummaryItems).

    " + }, + "MaxResults":{ + "shape":"MaxResultsIntegerForListIndicesRequest", + "documentation":"

    The maximum number of data sources to return.

    " + } + } + }, + "ListIndicesResponse":{ + "type":"structure", + "members":{ + "IndexConfigurationSummaryItems":{ + "shape":"IndexConfigurationSummaryList", + "documentation":"

    An array of summary information for one or more indexes.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of indexes.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the index, FAQ, or data source to get a list of tags for.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of tags associated with the index, FAQ, or data source.

    " + } + } + }, + "ListThesauriRequest":{ + "type":"structure", + "required":["IndexId"], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index associated with the thesaurus to list.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of thesauri (ThesaurusSummaryItems).

    " + }, + "MaxResults":{ + "shape":"MaxResultsIntegerForListThesauriRequest", + "documentation":"

    The maximum number of thesauri to return.

    " + } + } + }, + "ListThesauriResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of thesauri.

    " + }, + "ThesaurusSummaryItems":{ + "shape":"ThesaurusSummaryItems", + "documentation":"

    An array of summary information for one or more thesauruses.

    " + } + } + }, + "Long":{"type":"long"}, + "MaxResultsIntegerForListDataSourceSyncJobsRequest":{ + "type":"integer", + "max":10, + "min":1 + }, + "MaxResultsIntegerForListDataSourcesRequest":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxResultsIntegerForListFaqsRequest":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxResultsIntegerForListIndicesRequest":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxResultsIntegerForListThesauriRequest":{ + "type":"integer", + "max":100, + "min":1 + }, + "MetricValue":{ + "type":"string", + "pattern":"(([1-9][0-9]*)|0)" + }, + "MimeType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\P{C}*$" + }, + "NextToken":{ + "type":"string", + "max":800, + "min":1 + }, + "OneDriveConfiguration":{ + "type":"structure", + "required":[ + "TenantDomain", + "SecretArn", + "OneDriveUsers" + ], + "members":{ + "TenantDomain":{ + "shape":"TenantDomain", + "documentation":"

    The Azure Active Directory domain of the organization.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that contains the user name and password to connect to OneDrive. The user namd should be the application ID for the OneDrive application, and the password is the application key for the OneDrive application.

    " + }, + "OneDriveUsers":{ + "shape":"OneDriveUsers", + "documentation":"

    A list of user accounts whose documents should be indexed.

    " + }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns. Documents that match the pattern are included in the index. Documents that don't match the pattern are excluded from the index. If a document matches both an inclusion pattern and an exclusion pattern, the document is not included in the index.

    The exclusion pattern is applied to the file name.

    " + }, + "ExclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    List of regular expressions applied to documents. Items that match the exclusion pattern are not indexed. If you provide both an inclusion pattern and an exclusion pattern, any item that matches the exclusion pattern isn't indexed.

    The exclusion pattern is applied to the file name.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    A list of DataSourceToIndexFieldMapping objects that map Microsoft OneDrive fields to custom fields in the Amazon Kendra index. You must first create the index fields before you map OneDrive fields.

    " + }, + "DisableLocalGroups":{ + "shape":"Boolean", + "documentation":"

    A Boolean value that specifies whether local groups are disabled (True) or enabled (False).

    " + } + }, + "documentation":"

    Provides configuration information for data sources that connect to OneDrive.

    " + }, + "OneDriveUser":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^(?!\\s).+@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$" + }, + "OneDriveUserList":{ + "type":"list", + "member":{"shape":"OneDriveUser"}, + "max":100, + "min":1 + }, + "OneDriveUsers":{ + "type":"structure", + "members":{ + "OneDriveUserList":{ + "shape":"OneDriveUserList", + "documentation":"

    A list of users whose documents should be indexed. Specify the user names in email format, for example, username@tenantdomain. If you need to index the documents of more than 100 users, use the OneDriveUserS3Path field to specify the location of a file containing a list of users.

    " + }, + "OneDriveUserS3Path":{ + "shape":"S3Path", + "documentation":"

    The S3 bucket location of a file containing a list of users whose documents should be indexed.

    " + } + }, + "documentation":"

    User accounts whose documents should be indexed.

    " + }, + "Order":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "Principal":{ + "type":"structure", + "required":[ + "Name", + "Type", + "Access" + ], + "members":{ + "Name":{ + "shape":"PrincipalName", + "documentation":"

    The name of the user or group.

    " + }, + "Type":{ + "shape":"PrincipalType", + "documentation":"

    The type of principal.

    " + }, + "Access":{ + "shape":"ReadAccessType", + "documentation":"

    Whether to allow or deny access to the principal.

    " + } + }, + "documentation":"

    Provides user and group information for document access filtering.

    " + }, + "PrincipalList":{ + "type":"list", + "member":{"shape":"Principal"} + }, + "PrincipalName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^\\P{C}*$" + }, + "PrincipalType":{ + "type":"string", + "enum":[ + "USER", + "GROUP" + ] + }, + "QueryCapacityUnit":{ + "type":"integer", + "min":0 + }, + "QueryId":{ + "type":"string", + "max":36, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9-]*" + }, + "QueryIdentifiersEnclosingOption":{ + "type":"string", + "enum":[ + "DOUBLE_QUOTES", + "NONE" + ] + }, + "QueryRequest":{ + "type":"structure", + "required":[ + "IndexId", + "QueryText" + ], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The unique identifier of the index to search. The identifier is returned in the response from the operation.

    " + }, + "QueryText":{ + "shape":"QueryText", + "documentation":"

    The text to search for.

    " + }, + "AttributeFilter":{ + "shape":"AttributeFilter", + "documentation":"

    Enables filtered searches based on document attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters parameters contain a list of other filters.

    The AttributeFilter parameter enables you to create a set of filtering rules that a document must satisfy to be included in the query results.

    " + }, + "Facets":{ + "shape":"FacetList", + "documentation":"

    An array of documents attributes. Amazon Kendra returns a count for each attribute key specified. You can use this information to help narrow the search for your user.

    " + }, + "RequestedDocumentAttributes":{ + "shape":"DocumentAttributeKeyList", + "documentation":"

    An array of document attributes to include in the response. No other document attributes are included in the response. By default all document attributes are included in the response.

    " + }, + "QueryResultTypeFilter":{ + "shape":"QueryResultType", + "documentation":"

    Sets the type of query. Only results for the specified query type are returned.

    " + }, + "PageNumber":{ + "shape":"Integer", + "documentation":"

    Query results are returned in pages the size of the PageSize parameter. By default, Amazon Kendra returns the first page of results. Use this parameter to get result pages after the first one.

    " + }, + "PageSize":{ + "shape":"Integer", + "documentation":"

    Sets the number of results that are returned in each page of results. The default page size is 10. The maximum number of results returned is 100. If you ask for more than 100 results, only 100 are returned.

    " + }, + "SortingConfiguration":{ + "shape":"SortingConfiguration", + "documentation":"

    Provides information that determines how the results of the query are sorted. You can set the field that Amazon Kendra should sort the results on, and specify whether the results should be sorted in ascending or descending order. In the case of ties in sorting the results, the results are sorted by relevance.

    If you don't provide sorting configuration, the results are sorted by the relevance that Amazon Kendra determines for the result.

    " + }, + "UserContext":{ + "shape":"UserContext", + "documentation":"

    The user context token.

    " + }, + "VisitorId":{ + "shape":"VisitorId", + "documentation":"

    Provides an identifier for a specific user. The VisitorId should be a unique identifier, such as a GUID. Don't use personally identifiable information, such as the user's email address, as the VisitorId.

    " + } + } + }, + "QueryResult":{ + "type":"structure", + "members":{ + "QueryId":{ + "shape":"QueryId", + "documentation":"

    The unique identifier for the search. You use QueryId to identify the search when using the feedback API.

    " + }, + "ResultItems":{ + "shape":"QueryResultItemList", + "documentation":"

    The results of the search.

    " + }, + "FacetResults":{ + "shape":"FacetResultList", + "documentation":"

    Contains the facet results. A FacetResult contains the counts for each attribute key that was specified in the Facets input parameter.

    " + }, + "TotalNumberOfResults":{ + "shape":"Integer", + "documentation":"

    The total number of items found by the search; however, you can only retrieve up to 100 items. For example, if the search found 192 items, you can only retrieve the first 100 of the items.

    " + } + } + }, + "QueryResultItem":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ResultId", + "documentation":"

    The unique identifier for the query result.

    " + }, + "Type":{ + "shape":"QueryResultType", + "documentation":"

    The type of document.

    " + }, + "AdditionalAttributes":{ + "shape":"AdditionalResultAttributeList", + "documentation":"

    One or more additional attributes associated with the query result.

    " + }, + "DocumentId":{ + "shape":"DocumentId", + "documentation":"

    The unique identifier for the document.

    " + }, + "DocumentTitle":{ + "shape":"TextWithHighlights", + "documentation":"

    The title of the document. Contains the text of the title and information for highlighting the relevant terms in the title.

    " + }, + "DocumentExcerpt":{ + "shape":"TextWithHighlights", + "documentation":"

    An extract of the text in the document. Contains information about highlighting the relevant terms in the excerpt.

    " + }, + "DocumentURI":{ + "shape":"Url", + "documentation":"

    The URI of the original location of the document.

    " + }, + "DocumentAttributes":{ + "shape":"DocumentAttributeList", + "documentation":"

    An array of document attributes for the document that the query result maps to. For example, the document author (Author) or the source URI (SourceUri) of the document.

    " + }, + "ScoreAttributes":{ + "shape":"ScoreAttributes", + "documentation":"

    Indicates the confidence that Amazon Kendra has that a result matches the query that you provided. Each result is placed into a bin that indicates the confidence, VERY_HIGH, HIGH, MEDIUM and LOW. You can use the score to determine if a response meets the confidence needed for your application.

    The field is only set to LOW when the Type field is set to DOCUMENT and Amazon Kendra is not confident that the result matches the query.

    " + }, + "FeedbackToken":{ + "shape":"FeedbackToken", + "documentation":"

    A token that identifies a particular result from a particular query. Use this token to provide click-through feedback for the result. For more information, see Submitting feedback .

    " + } + }, + "documentation":"

    A single query result.

    A query result contains information about a document returned by the query. This includes the original location of the document, a list of attributes assigned to the document, and relevant text from the document that satisfies the query.

    " + }, + "QueryResultItemList":{ + "type":"list", + "member":{"shape":"QueryResultItem"} + }, + "QueryResultType":{ + "type":"string", + "enum":[ + "DOCUMENT", + "QUESTION_ANSWER", + "ANSWER" + ] + }, + "QueryText":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^\\P{C}*$" + }, + "ReadAccessType":{ + "type":"string", + "enum":[ + "ALLOW", + "DENY" + ] + }, + "Relevance":{ + "type":"structure", + "members":{ + "Freshness":{ + "shape":"DocumentMetadataBoolean", + "documentation":"

    Indicates that this field determines how \"fresh\" a document is. For example, if document 1 was created on November 5, and document 2 was created on October 31, document 1 is \"fresher\" than document 2. You can only set the Freshness field on one DATE type field. Only applies to DATE fields.

    " + }, + "Importance":{ + "shape":"Importance", + "documentation":"

    The relative importance of the field in the search. Larger numbers provide more of a boost than smaller numbers.

    " + }, + "Duration":{ + "shape":"Duration", + "documentation":"

    Specifies the time period that the boost applies to. For example, to make the boost apply to documents with the field value within the last month, you would use \"2628000s\". Once the field value is beyond the specified range, the effect of the boost drops off. The higher the importance, the faster the effect drops off. If you don't specify a value, the default is 3 months. The value of the field is a numeric string followed by the character \"s\", for example \"86400s\" for one day, or \"604800s\" for one week.

    Only applies to DATE fields.

    " + }, + "RankOrder":{ + "shape":"Order", + "documentation":"

    Determines how values should be interpreted.

    When the RankOrder field is ASCENDING, higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.

    When the RankOrder field is DESCENDING, lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.

    Only applies to LONG and DOUBLE fields.

    " + }, + "ValueImportanceMap":{ + "shape":"ValueImportanceMap", + "documentation":"

    A list of values that should be given a different boost when they appear in the result list. For example, if you are boosting a field called \"department,\" query terms that match the department field are boosted in the result. However, you can add entries from the department field to boost documents with those values higher.

    For example, you can add entries to the map with names of departments. If you add \"HR\",5 and \"Legal\",3 those departments are given special attention when they appear in the metadata of a document. When those terms appear they are given the specified importance instead of the regular importance for the boost.

    " + } + }, + "documentation":"

    Provides information for manually tuning the relevance of a field in a search. When a query includes terms that match the field, the results are given a boost in the response based on these tuning parameters.

    " + }, + "RelevanceFeedback":{ + "type":"structure", + "required":[ + "ResultId", + "RelevanceValue" + ], + "members":{ + "ResultId":{ + "shape":"ResultId", + "documentation":"

    The unique identifier of the search result that the user provided relevance feedback for.

    " + }, + "RelevanceValue":{ + "shape":"RelevanceType", + "documentation":"

    Whether to document was relevant or not relevant to the search.

    " + } + }, + "documentation":"

    Provides feedback on how relevant a document is to a search. Your application uses the SubmitFeedback operation to provide relevance information.

    " + }, + "RelevanceFeedbackList":{ + "type":"list", + "member":{"shape":"RelevanceFeedback"} + }, + "RelevanceType":{ + "type":"string", + "enum":[ + "RELEVANT", + "NOT_RELEVANT" + ] + }, + "ResourceAlreadyExistException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "ResourceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "ResultId":{ + "type":"string", + "max":73, + "min":1 + }, + "RoleArn":{ + "type":"string", + "max":1284, + "min":1, + "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" + }, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]" + }, + "S3DataSourceConfiguration":{ + "type":"structure", + "required":["BucketName"], + "members":{ + "BucketName":{ + "shape":"S3BucketName", + "documentation":"

    The name of the bucket that contains the documents.

    " + }, + "InclusionPrefixes":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of S3 prefixes for the documents that should be included in the index.

    " + }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of glob patterns for documents that should be indexed. If a document that matches an inclusion pattern also matches an exclusion pattern, the document is not indexed.

    For more information about glob patterns, see glob (programming) in Wikipedia.

    " + }, + "ExclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of glob patterns for documents that should not be indexed. If a document that matches an inclusion prefix or inclusion pattern also matches an exclusion pattern, the document is not indexed.

    For more information about glob patterns, see glob (programming) in Wikipedia.

    " + }, + "DocumentsMetadataConfiguration":{"shape":"DocumentsMetadataConfiguration"}, + "AccessControlListConfiguration":{ + "shape":"AccessControlListConfiguration", + "documentation":"

    Provides the path to the S3 bucket that contains the user context filtering files for the data source. For the format of the file, see Access control for S3 data sources.

    " + } + }, + "documentation":"

    Provides configuration information for a data source to index documents in an Amazon S3 bucket.

    " + }, + "S3ObjectKey":{ + "type":"string", + "max":1024, + "min":1 + }, + "S3Path":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"S3BucketName", + "documentation":"

    The name of the S3 bucket that contains the file.

    " + }, + "Key":{ + "shape":"S3ObjectKey", + "documentation":"

    The name of the file.

    " + } + }, + "documentation":"

    Information required to find a specific file in an Amazon S3 bucket.

    " + }, + "SalesforceChatterFeedConfiguration":{ + "type":"structure", + "required":["DocumentDataFieldName"], + "members":{ + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the column in the Salesforce FeedItem table that contains the content to index. Typically this is the Body column.

    " + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the column in the Salesforce FeedItem table that contains the title of the document. This is typically the Title collumn.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    Maps fields from a Salesforce chatter feed into Amazon Kendra index fields.

    " + }, + "IncludeFilterTypes":{ + "shape":"SalesforceChatterFeedIncludeFilterTypes", + "documentation":"

    Filters the documents in the feed based on status of the user. When you specify ACTIVE_USERS only documents from users who have an active account are indexed. When you specify STANDARD_USER only documents for Salesforce standard users are documented. You can specify both.

    " + } + }, + "documentation":"

    Defines configuration for syncing a Salesforce chatter feed. The contents of the object comes from the Salesforce FeedItem table.

    " + }, + "SalesforceChatterFeedIncludeFilterType":{ + "type":"string", + "enum":[ + "ACTIVE_USER", + "STANDARD_USER" + ] + }, + "SalesforceChatterFeedIncludeFilterTypes":{ + "type":"list", + "member":{"shape":"SalesforceChatterFeedIncludeFilterType"}, + "max":2, + "min":1 + }, + "SalesforceConfiguration":{ + "type":"structure", + "required":[ + "ServerUrl", + "SecretArn" + ], + "members":{ + "ServerUrl":{ + "shape":"Url", + "documentation":"

    The instance URL for the Salesforce site that you want to index.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that contains the key/value pairs required to connect to your Salesforce instance. The secret must contain a JSON structure with the following keys:

    • authenticationUrl - The OAUTH endpoint that Amazon Kendra connects to get an OAUTH token.

    • consumerKey - The application public key generated when you created your Salesforce application.

    • consumerSecret - The application private key generated when you created your Salesforce application.

    • password - The password associated with the user logging in to the Salesforce instance.

    • securityToken - The token associated with the user account logging in to the Salesforce instance.

    • username - The user name of the user logging in to the Salesforce instance.

    " + }, + "StandardObjectConfigurations":{ + "shape":"SalesforceStandardObjectConfigurationList", + "documentation":"

    Specifies the Salesforce standard objects that Amazon Kendra indexes.

    " + }, + "KnowledgeArticleConfiguration":{ + "shape":"SalesforceKnowledgeArticleConfiguration", + "documentation":"

    Specifies configuration information for the knowlege article types that Amazon Kendra indexes. Amazon Kendra indexes standard knowledge articles and the standard fields of knowledge articles, or the custom fields of custom knowledge articles, but not both.

    " + }, + "ChatterFeedConfiguration":{ + "shape":"SalesforceChatterFeedConfiguration", + "documentation":"

    Specifies configuration information for Salesforce chatter feeds.

    " + }, + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

    Indicates whether Amazon Kendra should index attachments to Salesforce objects.

    " + }, + "StandardObjectAttachmentConfiguration":{ + "shape":"SalesforceStandardObjectAttachmentConfiguration", + "documentation":"

    Provides configuration information for processing attachments to Salesforce standard objects.

    " + }, + "IncludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns. Documents that match the patterns are included in the index. Documents that don't match the patterns are excluded from the index. If a document matches both an inclusion pattern and an exclusion pattern, the document is not included in the index.

    The regex is applied to the name of the attached file.

    " + }, + "ExcludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.

    The regex is applied to the name of the attached file.

    " + } + }, + "documentation":"

    Provides configuration information for connecting to a Salesforce data source.

    " + }, + "SalesforceCustomKnowledgeArticleTypeConfiguration":{ + "type":"structure", + "required":[ + "Name", + "DocumentDataFieldName" + ], + "members":{ + "Name":{ + "shape":"SalesforceCustomKnowledgeArticleTypeName", + "documentation":"

    The name of the configuration.

    " + }, + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the field in the custom knowledge article that contains the document data to index.

    " + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the field in the custom knowledge article that contains the document title.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    One or more objects that map fields in the custom knowledge article to fields in the Amazon Kendra index.

    " + } + }, + "documentation":"

    Provides configuration information for indexing Salesforce custom articles.

    " + }, + "SalesforceCustomKnowledgeArticleTypeConfigurationList":{ + "type":"list", + "member":{"shape":"SalesforceCustomKnowledgeArticleTypeConfiguration"}, + "max":10, + "min":1 + }, + "SalesforceCustomKnowledgeArticleTypeName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z][a-zA-Z0-9_]*$" + }, + "SalesforceKnowledgeArticleConfiguration":{ + "type":"structure", + "required":["IncludedStates"], + "members":{ + "IncludedStates":{ + "shape":"SalesforceKnowledgeArticleStateList", + "documentation":"

    Specifies the document states that should be included when Amazon Kendra indexes knowledge articles. You must specify at least one state.

    " + }, + "StandardKnowledgeArticleTypeConfiguration":{ + "shape":"SalesforceStandardKnowledgeArticleTypeConfiguration", + "documentation":"

    Provides configuration information for standard Salesforce knowledge articles.

    " + }, + "CustomKnowledgeArticleTypeConfigurations":{ + "shape":"SalesforceCustomKnowledgeArticleTypeConfigurationList", + "documentation":"

    Provides configuration information for custom Salesforce knowledge articles.

    " + } + }, + "documentation":"

    Specifies configuration information for the knowlege article types that Amazon Kendra indexes. Amazon Kendra indexes standard knowledge articles and the standard fields of knowledge articles, or the custom fields of custom knowledge articles, but not both

    " + }, + "SalesforceKnowledgeArticleState":{ + "type":"string", + "enum":[ + "DRAFT", + "PUBLISHED", + "ARCHIVED" + ] + }, + "SalesforceKnowledgeArticleStateList":{ + "type":"list", + "member":{"shape":"SalesforceKnowledgeArticleState"}, + "max":3, + "min":1 + }, + "SalesforceStandardKnowledgeArticleTypeConfiguration":{ + "type":"structure", + "required":["DocumentDataFieldName"], + "members":{ + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the field that contains the document data to index.

    " + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the field that contains the document title.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    One or more objects that map fields in the knowledge article to Amazon Kendra index fields. The index field must exist before you can map a Salesforce field to it.

    " + } + }, + "documentation":"

    Provides configuration information for standard Salesforce knowledge articles.

    " + }, + "SalesforceStandardObjectAttachmentConfiguration":{ + "type":"structure", + "members":{ + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the field used for the document title.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    One or more objects that map fields in attachments to Amazon Kendra index fields.

    " + } + }, + "documentation":"

    Provides configuration information for processing attachments to Salesforce standard objects.

    " + }, + "SalesforceStandardObjectConfiguration":{ + "type":"structure", + "required":[ + "Name", + "DocumentDataFieldName" + ], + "members":{ + "Name":{ + "shape":"SalesforceStandardObjectName", + "documentation":"

    The name of the standard object.

    " + }, + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the field in the standard object table that contains the document contents.

    " + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the field in the standard object table that contains the document titleB.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    One or more objects that map fields in the standard object to Amazon Kendra index fields. The index field must exist before you can map a Salesforce field to it.

    " + } + }, + "documentation":"

    Specifies confguration information for indexing a single standard object.

    " + }, + "SalesforceStandardObjectConfigurationList":{ + "type":"list", + "member":{"shape":"SalesforceStandardObjectConfiguration"}, + "max":17, + "min":1 + }, + "SalesforceStandardObjectName":{ + "type":"string", + "enum":[ + "ACCOUNT", + "CAMPAIGN", + "CASE", + "CONTACT", + "CONTRACT", + "DOCUMENT", + "GROUP", + "IDEA", + "LEAD", + "OPPORTUNITY", + "PARTNER", + "PRICEBOOK", + "PRODUCT", + "PROFILE", + "SOLUTION", + "TASK", + "USER" + ] + }, + "ScanSchedule":{"type":"string"}, + "ScoreAttributes":{ + "type":"structure", + "members":{ + "ScoreConfidence":{ + "shape":"ScoreConfidence", + "documentation":"

    A relative ranking for how well the response matches the query.

    " + } + }, + "documentation":"

    Provides a relative ranking that indicates how confident Amazon Kendra is that the response matches the query.

    " + }, + "ScoreConfidence":{ + "type":"string", + "documentation":"Enumeration for query score confidence.", + "enum":[ + "VERY_HIGH", + "HIGH", + "MEDIUM", + "LOW" + ] + }, + "Search":{ + "type":"structure", + "members":{ + "Facetable":{ + "shape":"Boolean", + "documentation":"

    Indicates that the field can be used to create search facets, a count of results for each value in the field. The default is false .

    " + }, + "Searchable":{ + "shape":"Boolean", + "documentation":"

    Determines whether the field is used in the search. If the Searchable field is true, you can use relevance tuning to manually tune how Amazon Kendra weights the field in the search. The default is true for string fields and false for number and date fields.

    " + }, + "Displayable":{ + "shape":"Boolean", + "documentation":"

    Determines whether the field is returned in the query response. The default is true.

    " + }, + "Sortable":{ + "shape":"Boolean", + "documentation":"

    Determines whether the field can be used to sort the results of a query. If you specify sorting on a field that does not have Sortable set to true, Amazon Kendra returns an exception. The default is false.

    " + } + }, + "documentation":"

    Provides information about how a custom index field is used during a search.

    " + }, + "SecretArn":{ + "type":"string", + "max":1284, + "min":1, + "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" + }, + "SecurityGroupIdList":{ + "type":"list", + "member":{"shape":"VpcSecurityGroupId"}, + "max":10, + "min":1 + }, + "ServerSideEncryptionConfiguration":{ + "type":"structure", + "members":{ + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

    The identifier of the AWS KMS customer master key (CMK). Amazon Kendra doesn't support asymmetric CMKs.

    " + } + }, + "documentation":"

    Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt data indexed by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs.

    " + }, + "ServiceNowBuildVersionType":{ + "type":"string", + "enum":[ + "LONDON", + "OTHERS" + ] + }, + "ServiceNowConfiguration":{ + "type":"structure", + "required":[ + "HostUrl", + "SecretArn", + "ServiceNowBuildVersion" + ], + "members":{ + "HostUrl":{ + "shape":"ServiceNowHostUrl", + "documentation":"

    The ServiceNow instance that the data source connects to. The host endpoint should look like the following: {instance}.service-now.com.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Secret Manager secret that contains the user name and password required to connect to the ServiceNow instance.

    " + }, + "ServiceNowBuildVersion":{ + "shape":"ServiceNowBuildVersionType", + "documentation":"

    The identifier of the release that the ServiceNow host is running. If the host is not running the LONDON release, use OTHERS.

    " + }, + "KnowledgeArticleConfiguration":{ + "shape":"ServiceNowKnowledgeArticleConfiguration", + "documentation":"

    Provides configuration information for crawling knowledge articles in the ServiceNow site.

    " + }, + "ServiceCatalogConfiguration":{ + "shape":"ServiceNowServiceCatalogConfiguration", + "documentation":"

    Provides configuration information for crawling service catalogs in the ServiceNow site.

    " + } + }, + "documentation":"

    Provides configuration information required to connect to a ServiceNow data source.

    " + }, + "ServiceNowHostUrl":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(?!(^(https?|ftp|file):\\/\\/))[a-z0-9-]+(\\.service-now\\.com)$" + }, + "ServiceNowKnowledgeArticleConfiguration":{ + "type":"structure", + "required":["DocumentDataFieldName"], + "members":{ + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

    Indicates whether Amazon Kendra should index attachments to knowledge articles.

    " + }, + "IncludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    List of regular expressions applied to knowledge articles. Items that don't match the inclusion pattern are not indexed. The regex is applied to the field specified in the PatternTargetField.

    " + }, + "ExcludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    List of regular expressions applied to knowledge articles. Items that don't match the inclusion pattern are not indexed. The regex is applied to the field specified in the PatternTargetField

    " + }, + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the ServiceNow field that is mapped to the index document contents field in the Amazon Kendra index.

    " + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the ServiceNow field that is mapped to the index document title field.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    Mapping between ServiceNow fields and Amazon Kendra index fields. You must create the index field before you map the field.

    " + } + }, + "documentation":"

    Provides configuration information for crawling knowledge articles in the ServiceNow site.

    " + }, + "ServiceNowServiceCatalogConfiguration":{ + "type":"structure", + "required":["DocumentDataFieldName"], + "members":{ + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

    Indicates whether Amazon Kendra should crawl attachments to the service catalog items.

    " + }, + "IncludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    Determines the types of file attachments that are included in the index.

    " + }, + "ExcludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    Determines the types of file attachments that are excluded from the index.

    " + }, + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the ServiceNow field that is mapped to the index document contents field in the Amazon Kendra index.

    " + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The name of the ServiceNow field that is mapped to the index document title field.

    " + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    Mapping between ServiceNow fields and Amazon Kendra index fields. You must create the index field before you map the field.

    " + } + }, + "documentation":"

    Provides configuration information for crawling service catalog items in the ServiceNow site

    " + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "SharePointConfiguration":{ + "type":"structure", + "required":[ + "SharePointVersion", + "Urls", + "SecretArn" + ], + "members":{ + "SharePointVersion":{ + "shape":"SharePointVersion", + "documentation":"

    The version of Microsoft SharePoint that you are using as a data source.

    " + }, + "Urls":{ + "shape":"SharePointUrlList", + "documentation":"

    The URLs of the Microsoft SharePoint site that contains the documents that should be indexed.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Microsoft SharePoint Data Source. For more information about AWS Secrets Manager, see What Is AWS Secrets Manager in the AWS Secrets Manager user guide.

    " + }, + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

    TRUE to include attachments to documents stored in your Microsoft SharePoint site in the index; otherwise, FALSE.

    " + }, + "UseChangeLog":{ + "shape":"Boolean", + "documentation":"

    Set to TRUE to use the Microsoft SharePoint change log to determine the documents that need to be updated in the index. Depending on the size of the SharePoint change log, it may take longer for Amazon Kendra to use the change log than it takes it to determine the changed documents using the Amazon Kendra document crawler.

    " + }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns. Documents that match the patterns are included in the index. Documents that don't match the patterns are excluded from the index. If a document matches both an inclusion pattern and an exclusion pattern, the document is not included in the index.

    The regex is applied to the display URL of the SharePoint document.

    " + }, + "ExclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

    A list of regular expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.

    The regex is applied to the display URL of the SharePoint document.

    " + }, + "VpcConfiguration":{"shape":"DataSourceVpcConfiguration"}, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

    A list of DataSourceToIndexFieldMapping objects that map Microsoft SharePoint attributes to custom fields in the Amazon Kendra index. You must first create the index fields using the operation before you map SharePoint attributes. For more information, see Mapping Data Source Fields.

    " + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

    The Microsoft SharePoint attribute field that contains the title of the document.

    " + }, + "DisableLocalGroups":{ + "shape":"Boolean", + "documentation":"

    A Boolean value that specifies whether local groups are disabled (True) or enabled (False).

    " + } + }, + "documentation":"

    Provides configuration information for connecting to a Microsoft SharePoint data source.

    " + }, + "SharePointUrlList":{ + "type":"list", + "member":{"shape":"Url"}, + "max":100, + "min":1 + }, + "SharePointVersion":{ + "type":"string", + "enum":["SHAREPOINT_ONLINE"] + }, + "SharedDriveId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\P{C}*$" + }, + "SortOrder":{ + "type":"string", + "enum":[ + "DESC", + "ASC" + ] + }, + "SortingConfiguration":{ + "type":"structure", + "required":[ + "DocumentAttributeKey", + "SortOrder" + ], + "members":{ + "DocumentAttributeKey":{ + "shape":"DocumentAttributeKey", + "documentation":"

    The name of the document attribute used to sort the response. You can use any field that has the Sortable flag set to true.

    You can also sort by any of the following built-in attributes:

    • _category

    • _created_at

    • _last_updated_at

    • _version

    • _view_count

    " + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

    The order that the results should be returned in. In case of ties, the relevance assigned to the result by Amazon Kendra is used as the tie-breaker.

    " + } + }, + "documentation":"

    Specifies the document attribute to use to sort the response to a Amazon Kendra query. You can specify a single attribute for sorting. The attribute must have the Sortable flag set to true, otherwise Amazon Kendra returns an exception.

    You can sort attributes of the following types.

    • Date value

    • Long value

    • String value

    You can't sort attributes of the following type.

    • String list value

    " + }, + "SqlConfiguration":{ + "type":"structure", + "members":{ + "QueryIdentifiersEnclosingOption":{ + "shape":"QueryIdentifiersEnclosingOption", + "documentation":"

    Determines whether Amazon Kendra encloses SQL identifiers for tables and column names in double quotes (\") when making a database query.

    By default, Amazon Kendra passes SQL identifiers the way that they are entered into the data source configuration. It does not change the case of identifiers or enclose them in quotes.

    PostgreSQL internally converts uppercase characters to lower case characters in identifiers unless they are quoted. Choosing this option encloses identifiers in quotes so that PostgreSQL does not convert the character's case.

    For MySQL databases, you must enable the ansi_quotes option when you set this field to DOUBLE_QUOTES.

    " + } + }, + "documentation":"

    Provides information that configures Amazon Kendra to use a SQL database.

    " + }, + "StartDataSourceSyncJobRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

    The identifier of the data source to synchronize.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the data source.

    " + } + } + }, + "StartDataSourceSyncJobResponse":{ + "type":"structure", + "members":{ + "ExecutionId":{ + "shape":"String", + "documentation":"

    Identifies a particular synchronization job.

    " + } + } + }, + "StopDataSourceSyncJobRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

    The identifier of the data source for which to stop the synchronization jobs.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the data source.

    " + } + } + }, + "StorageCapacityUnit":{ + "type":"integer", + "min":0 + }, + "String":{ + "type":"string", + "max":2048, + "min":1 + }, + "SubmitFeedbackRequest":{ + "type":"structure", + "required":[ + "IndexId", + "QueryId" + ], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that was queried.

    " + }, + "QueryId":{ + "shape":"QueryId", + "documentation":"

    The identifier of the specific query for which you are submitting feedback. The query ID is returned in the response to the operation.

    " + }, + "ClickFeedbackItems":{ + "shape":"ClickFeedbackList", + "documentation":"

    Tells Amazon Kendra that a particular search result link was chosen by the user.

    " + }, + "RelevanceFeedbackItems":{ + "shape":"RelevanceFeedbackList", + "documentation":"

    Provides Amazon Kendra with relevant or not relevant feedback for whether a particular item was relevant to the search.

    " + } + } + }, + "SubnetId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[\\-0-9a-zA-Z]+" + }, + "SubnetIdList":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":6, + "min":1 + }, + "TableName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z][a-zA-Z0-9_]*$" + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, or data source.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The value associated with the tag. The value may be an empty string but it can't be null.

    " + } + }, + "documentation":"

    A list of key/value pairs that identify an index, FAQ, or data source. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the index, FAQ, or data source to tag.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of tag keys to add to the index, FAQ, or data source. If a tag already exists, the existing value is replaced with the new value.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TenantDomain":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([a-zA-Z0-9]+(-[a-zA-Z0-9]+)*\\.)+[a-z]{2,}$" + }, + "TextDocumentStatistics":{ + "type":"structure", + "required":[ + "IndexedTextDocumentsCount", + "IndexedTextBytes" + ], + "members":{ + "IndexedTextDocumentsCount":{ + "shape":"IndexedTextDocumentsCount", + "documentation":"

    The number of text documents indexed.

    " + }, + "IndexedTextBytes":{ + "shape":"IndexedTextBytes", + "documentation":"

    The total size, in bytes, of the indexed documents.

    " + } + }, + "documentation":"

    Provides information about text documents indexed in an index.

    " + }, + "TextWithHighlights":{ + "type":"structure", + "members":{ + "Text":{ + "shape":"String", + "documentation":"

    The text to display to the user.

    " + }, + "Highlights":{ + "shape":"HighlightList", + "documentation":"

    The beginning and end of the text that should be highlighted.

    " + } + }, + "documentation":"

    Provides text and information about where to highlight the text.

    " + }, + "ThesaurusId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "ThesaurusName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "ThesaurusStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "UPDATING", + "ACTIVE_BUT_UPDATE_FAILED", + "FAILED" + ] + }, + "ThesaurusSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ThesaurusId", + "documentation":"

    The identifier of the thesaurus.

    " + }, + "Name":{ + "shape":"ThesaurusName", + "documentation":"

    The name of the thesaurus.

    " + }, + "Status":{ + "shape":"ThesaurusStatus", + "documentation":"

    The status of the thesaurus.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix datetime that the thesaurus was created.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The Unix datetime that the thesaurus was last updated.

    " + } + }, + "documentation":"

    An array of summary information for one or more thesauruses.

    " + }, + "ThesaurusSummaryItems":{ + "type":"list", + "member":{"shape":"ThesaurusSummary"} + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "TimeRange":{ + "type":"structure", + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

    The UNIX datetime of the beginning of the time range.

    " + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

    The UNIX datetime of the end of the time range.

    " + } + }, + "documentation":"

    Provides a range of time.

    " + }, + "Timestamp":{"type":"timestamp"}, + "Title":{"type":"string"}, + "Token":{ + "type":"string", + "max":100000, + "min":1, + "pattern":"^\\P{C}*$" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the index, FAQ, or data source to remove the tag from.

    " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    A list of tag keys to remove from the index, FAQ, or data source. If a tag key does not exist on the resource, it is ignored.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDataSourceRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

    The unique identifier of the data source to update.

    " + }, + "Name":{ + "shape":"DataSourceName", + "documentation":"

    The name of the data source to update. The name of the data source can't be updated. To rename a data source you must delete the data source and re-create it.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index that contains the data source to update.

    " + }, + "Configuration":{"shape":"DataSourceConfiguration"}, + "Description":{ + "shape":"Description", + "documentation":"

    The new description for the data source.

    " + }, + "Schedule":{ + "shape":"ScanSchedule", + "documentation":"

    The new update schedule for the data source.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the new role to use when the data source is accessing resources on your behalf.

    " + } + } + }, + "UpdateIndexRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index to update.

    " + }, + "Name":{ + "shape":"IndexName", + "documentation":"

    The name of the index to update.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    A new IAM role that gives Amazon Kendra permission to access your Amazon CloudWatch logs.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A new description for the index.

    " + }, + "DocumentMetadataConfigurationUpdates":{ + "shape":"DocumentMetadataConfigurationList", + "documentation":"

    The document metadata to update.

    " + }, + "CapacityUnits":{ + "shape":"CapacityUnitsConfiguration", + "documentation":"

    Sets the number of addtional storage and query capacity units that should be used by the index. You can change the capacity of the index up to 5 times per day.

    If you are using extra storage units, you can't reduce the storage capacity below that required to meet the storage needs for your index.

    " + }, + "UserTokenConfigurations":{ + "shape":"UserTokenConfigurationList", + "documentation":"

    The user token configuration.

    " + }, + "UserContextPolicy":{ + "shape":"UserContextPolicy", + "documentation":"

    The user user token context policy.

    " + } + } + }, + "UpdateThesaurusRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"ThesaurusId", + "documentation":"

    The identifier of the thesaurus to update.

    " + }, + "Name":{ + "shape":"ThesaurusName", + "documentation":"

    The updated name of the thesaurus.

    " + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

    The identifier of the index associated with the thesaurus to update.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The updated description of the thesaurus.

    " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

    The updated role ARN of the thesaurus.

    " + }, + "SourceS3Path":{"shape":"S3Path"} + } + }, + "Url":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(https?|ftp|file):\\/\\/([^\\s]*)" + }, + "UserAccount":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\P{C}*$" + }, + "UserContext":{ + "type":"structure", + "members":{ + "Token":{ + "shape":"Token", + "documentation":"

    The user context token. It must be a JWT or a JSON token.

    " + } + }, + "documentation":"

    Provides information about the user context for a Amazon Kendra index.

    " + }, + "UserContextPolicy":{ + "type":"string", + "enum":[ + "ATTRIBUTE_FILTER", + "USER_TOKEN" + ] + }, + "UserNameAttributeField":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^\\P{C}*$" + }, + "UserTokenConfiguration":{ + "type":"structure", + "members":{ + "JwtTokenTypeConfiguration":{ + "shape":"JwtTokenTypeConfiguration", + "documentation":"

    Information about the JWT token type configuration.

    " + }, + "JsonTokenTypeConfiguration":{ + "shape":"JsonTokenTypeConfiguration", + "documentation":"

    Information about the JSON token type configuration.

    " + } + }, + "documentation":"

    Provides configuration information for a token configuration.

    " + }, + "UserTokenConfigurationList":{ + "type":"list", + "member":{"shape":"UserTokenConfiguration"}, + "max":1 + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "ValueImportanceMap":{ + "type":"map", + "key":{"shape":"ValueImportanceMapKey"}, + "value":{"shape":"Importance"} + }, + "ValueImportanceMapKey":{ + "type":"string", + "max":50, + "min":1 + }, + "VisitorId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "VpcSecurityGroupId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[-0-9a-zA-Z]+" + } + }, + "documentation":"

    Amazon Kendra is a service for indexing large document sets.

    " +} diff --git a/services/kinesis/build.properties b/services/kinesis/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/kinesis/build.properties +++ b/services/kinesis/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 5afe52bf11f7..545de4bba400 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + kinesisvideosignaling + AWS Java SDK :: Services :: Kinesis Video Signaling + The AWS Java SDK for Kinesis Video Signaling module holds the client classes that are used for + communicating with Kinesis Video Signaling. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.kinesisvideosignaling + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/kinesisvideosignaling/src/main/resources/codegen-resources/paginators-1.json b/services/kinesisvideosignaling/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/kinesisvideosignaling/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/kinesisvideosignaling/src/main/resources/codegen-resources/service-2.json b/services/kinesisvideosignaling/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..9f6f60e216f1 --- /dev/null +++ b/services/kinesisvideosignaling/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,249 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-04", + "endpointPrefix":"kinesisvideo", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon Kinesis Video Signaling Channels", + "serviceFullName":"Amazon Kinesis Video Signaling Channels", + "serviceId":"Kinesis Video Signaling", + "signatureVersion":"v4", + "uid":"kinesis-video-signaling-2019-12-04" + }, + "operations":{ + "GetIceServerConfig":{ + "name":"GetIceServerConfig", + "http":{ + "method":"POST", + "requestUri":"/v1/get-ice-server-config" + }, + "input":{"shape":"GetIceServerConfigRequest"}, + "output":{"shape":"GetIceServerConfigResponse"}, + "errors":[ + {"shape":"InvalidClientException"}, + {"shape":"SessionExpiredException"}, + {"shape":"ClientLimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Gets the Interactive Connectivity Establishment (ICE) server configuration information, including URIs, username, and password which can be used to configure the WebRTC connection. The ICE component uses this configuration information to setup the WebRTC connection, including authenticating with the Traversal Using Relays around NAT (TURN) relay server.

    TURN is a protocol that is used to improve the connectivity of peer-to-peer applications. By providing a cloud-based relay service, TURN ensures that a connection can be established even when one or more peers are incapable of a direct peer-to-peer connection. For more information, see A REST API For Access To TURN Services.

    You can invoke this API to establish a fallback mechanism in case either of the peers is unable to establish a direct peer-to-peer connection over a signaling channel. You must specify either a signaling channel ARN or the client ID in order to invoke this API.

    " + }, + "SendAlexaOfferToMaster":{ + "name":"SendAlexaOfferToMaster", + "http":{ + "method":"POST", + "requestUri":"/v1/send-alexa-offer-to-master" + }, + "input":{"shape":"SendAlexaOfferToMasterRequest"}, + "output":{"shape":"SendAlexaOfferToMasterResponse"}, + "errors":[ + {"shape":"ClientLimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    This API allows you to connect WebRTC-enabled devices with Alexa display devices. When invoked, it sends the Alexa Session Description Protocol (SDP) offer to the master peer. The offer is delivered as soon as the master is connected to the specified signaling channel. This API returns the SDP answer from the connected master. If the master is not connected to the signaling channel, redelivery requests are made until the message expires.

    " + } + }, + "shapes":{ + "Answer":{ + "type":"string", + "max":10000, + "min":1 + }, + "ClientId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "ClientLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Your request was throttled because you have exceeded the limit of allowed client calls. Try making the call later.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ErrorMessage":{"type":"string"}, + "GetIceServerConfigRequest":{ + "type":"structure", + "required":["ChannelARN"], + "members":{ + "ChannelARN":{ + "shape":"ResourceARN", + "documentation":"

    The ARN of the signaling channel to be used for the peer-to-peer connection between configured peers.

    " + }, + "ClientId":{ + "shape":"ClientId", + "documentation":"

    Unique identifier for the viewer. Must be unique within the signaling channel.

    " + }, + "Service":{ + "shape":"Service", + "documentation":"

    Specifies the desired service. Currently, TURN is the only valid value.

    " + }, + "Username":{ + "shape":"Username", + "documentation":"

    An optional user ID to be associated with the credentials.

    " + } + } + }, + "GetIceServerConfigResponse":{ + "type":"structure", + "members":{ + "IceServerList":{ + "shape":"IceServerList", + "documentation":"

    The list of ICE server information objects.

    " + } + } + }, + "IceServer":{ + "type":"structure", + "members":{ + "Uris":{ + "shape":"Uris", + "documentation":"

    An array of URIs, in the form specified in the I-D.petithuguenin-behave-turn-uris spec. These URIs provide the different addresses and/or protocols that can be used to reach the TURN server.

    " + }, + "Username":{ + "shape":"Username", + "documentation":"

    A username to login to the ICE server.

    " + }, + "Password":{ + "shape":"Password", + "documentation":"

    A password to login to the ICE server.

    " + }, + "Ttl":{ + "shape":"Ttl", + "documentation":"

    The period of time, in seconds, during which the username and password are valid.

    " + } + }, + "documentation":"

    A structure for the ICE server connection data.

    " + }, + "IceServerList":{ + "type":"list", + "member":{"shape":"IceServer"} + }, + "InvalidArgumentException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The value for this input parameter is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidClientException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    The specified client is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "MessagePayload":{ + "type":"string", + "max":10000, + "min":1, + "pattern":"[a-zA-Z0-9+/=]+" + }, + "NotAuthorizedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The caller is not authorized to perform this operation.

    ", + "error":{"httpStatusCode":401}, + "exception":true + }, + "Password":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "ResourceARN":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:aws:kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The specified resource is not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SendAlexaOfferToMasterRequest":{ + "type":"structure", + "required":[ + "ChannelARN", + "SenderClientId", + "MessagePayload" + ], + "members":{ + "ChannelARN":{ + "shape":"ResourceARN", + "documentation":"

    The ARN of the signaling channel by which Alexa and the master peer communicate.

    " + }, + "SenderClientId":{ + "shape":"ClientId", + "documentation":"

    The unique identifier for the sender client.

    " + }, + "MessagePayload":{ + "shape":"MessagePayload", + "documentation":"

    The base64-encoded SDP offer content.

    " + } + } + }, + "SendAlexaOfferToMasterResponse":{ + "type":"structure", + "members":{ + "Answer":{ + "shape":"Answer", + "documentation":"

    The base64-encoded SDP answer content.

    " + } + } + }, + "Service":{ + "type":"string", + "enum":["TURN"] + }, + "SessionExpiredException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    If the client session is expired. Once the client is connected, the session is valid for 45 minutes. Client should reconnect to the channel to continue sending/receiving messages.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Ttl":{ + "type":"integer", + "max":86400, + "min":30 + }, + "Uri":{ + "type":"string", + "max":256, + "min":1 + }, + "Uris":{ + "type":"list", + "member":{"shape":"Uri"} + }, + "Username":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "errorMessage":{"type":"string"} + }, + "documentation":"

    Kinesis Video Streams Signaling Service is a intermediate service that establishes a communication channel for discovering peers, transmitting offers and answers in order to establish peer-to-peer connection in webRTC technology.

    " +} diff --git a/services/kms/build.properties b/services/kms/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/kms/build.properties +++ b/services/kms/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 1df57ee53b20..07b80d2b6a9f 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + lakeformation + AWS Java SDK :: Services :: LakeFormation + The AWS Java SDK for LakeFormation module holds the client classes that are used for + communicating with LakeFormation. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.lakeformation + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/lakeformation/src/main/resources/codegen-resources/paginators-1.json b/services/lakeformation/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..85abe8f150e6 --- /dev/null +++ b/services/lakeformation/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,19 @@ +{ + "pagination": { + "GetEffectivePermissionsForPath": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListPermissions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListResources": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/lakeformation/src/main/resources/codegen-resources/service-2.json b/services/lakeformation/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..bf2df2abf9a2 --- /dev/null +++ b/services/lakeformation/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1075 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-03-31", + "endpointPrefix":"lakeformation", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Lake Formation", + "serviceId":"LakeFormation", + "signatureVersion":"v4", + "signingName":"lakeformation", + "targetPrefix":"AWSLakeFormation", + "uid":"lakeformation-2017-03-31" + }, + "operations":{ + "BatchGrantPermissions":{ + "name":"BatchGrantPermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGrantPermissionsRequest"}, + "output":{"shape":"BatchGrantPermissionsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

    Batch operation to grant permissions to the principal.

    " + }, + "BatchRevokePermissions":{ + "name":"BatchRevokePermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchRevokePermissionsRequest"}, + "output":{"shape":"BatchRevokePermissionsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

    Batch operation to revoke permissions from the principal.

    " + }, + "DeregisterResource":{ + "name":"DeregisterResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterResourceRequest"}, + "output":{"shape":"DeregisterResourceResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

    Deregisters the resource as managed by the Data Catalog.

    When you deregister a path, Lake Formation removes the path from the inline policy attached to your service-linked role.

    " + }, + "DescribeResource":{ + "name":"DescribeResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourceRequest"}, + "output":{"shape":"DescribeResourceResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

    Retrieves the current data access role for the given resource registered in AWS Lake Formation.

    " + }, + "GetDataLakeSettings":{ + "name":"GetDataLakeSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataLakeSettingsRequest"}, + "output":{"shape":"GetDataLakeSettingsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

    Retrieves the list of the data lake administrators of a Lake Formation-managed data lake.

    " + }, + "GetEffectivePermissionsForPath":{ + "name":"GetEffectivePermissionsForPath", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEffectivePermissionsForPathRequest"}, + "output":{"shape":"GetEffectivePermissionsForPathResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    Returns the Lake Formation permissions for a specified table or database resource located at a path in Amazon S3. GetEffectivePermissionsForPath will not return databases and tables if the catalog is encrypted.

    " + }, + "GrantPermissions":{ + "name":"GrantPermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GrantPermissionsRequest"}, + "output":{"shape":"GrantPermissionsResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

    For information about permissions, see Security and Access Control to Metadata and Data.

    " + }, + "ListPermissions":{ + "name":"ListPermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPermissionsRequest"}, + "output":{"shape":"ListPermissionsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    Returns a list of the principal permissions on the resource, filtered by the permissions of the caller. For example, if you are granted an ALTER permission, you are able to see only the principal permissions for ALTER.

    This operation returns only those permissions that have been explicitly granted.

    For information about permissions, see Security and Access Control to Metadata and Data.

    " + }, + "ListResources":{ + "name":"ListResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourcesRequest"}, + "output":{"shape":"ListResourcesResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

    Lists the resources registered to be managed by the Data Catalog.

    " + }, + "PutDataLakeSettings":{ + "name":"PutDataLakeSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDataLakeSettingsRequest"}, + "output":{"shape":"PutDataLakeSettingsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Sets the list of data lake administrators who have admin privileges on all resources managed by Lake Formation. For more information on admin privileges, see Granting Lake Formation Permissions.

    This API replaces the current list of data lake admins with the new list being passed. To add an admin, fetch the current list and add the new admin to that list and pass that list in this API.

    " + }, + "RegisterResource":{ + "name":"RegisterResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterResourceRequest"}, + "output":{"shape":"RegisterResourceResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"AlreadyExistsException"} + ], + "documentation":"

    Registers the resource as managed by the Data Catalog.

    To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

    The following request registers a new location and gives AWS Lake Formation permission to use the service-linked role to access that location.

    ResourceArn = arn:aws:s3:::my-bucket UseServiceLinkedRole = true

    If UseServiceLinkedRole is not set to true, you must provide or set the RoleArn:

    arn:aws:iam::12345:role/my-data-access-role

    " + }, + "RevokePermissions":{ + "name":"RevokePermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokePermissionsRequest"}, + "output":{"shape":"RevokePermissionsResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Revokes permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

    " + }, + "UpdateResource":{ + "name":"UpdateResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateResourceRequest"}, + "output":{"shape":"UpdateResourceResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

    Updates the data access role used for vending access to the given (registered) resource in AWS Lake Formation.

    " + } + }, + "shapes":{ + "AlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

    A message describing the problem.

    " + } + }, + "documentation":"

    A resource to be created or added already exists.

    ", + "exception":true + }, + "BatchGrantPermissionsRequest":{ + "type":"structure", + "required":["Entries"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + }, + "Entries":{ + "shape":"BatchPermissionsRequestEntryList", + "documentation":"

    A list of up to 20 entries for resource permissions to be granted by batch operation to the principal.

    " + } + } + }, + "BatchGrantPermissionsResponse":{ + "type":"structure", + "members":{ + "Failures":{ + "shape":"BatchPermissionsFailureList", + "documentation":"

    A list of failures to grant permissions to the resources.

    " + } + } + }, + "BatchPermissionsFailureEntry":{ + "type":"structure", + "members":{ + "RequestEntry":{ + "shape":"BatchPermissionsRequestEntry", + "documentation":"

    An identifier for an entry of the batch request.

    " + }, + "Error":{ + "shape":"ErrorDetail", + "documentation":"

    An error message that applies to the failure of the entry.

    " + } + }, + "documentation":"

    A list of failures when performing a batch grant or batch revoke operation.

    " + }, + "BatchPermissionsFailureList":{ + "type":"list", + "member":{"shape":"BatchPermissionsFailureEntry"} + }, + "BatchPermissionsRequestEntry":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Identifier", + "documentation":"

    A unique identifier for the batch permissions request entry.

    " + }, + "Principal":{ + "shape":"DataLakePrincipal", + "documentation":"

    The principal to be granted a permission.

    " + }, + "Resource":{ + "shape":"Resource", + "documentation":"

    The resource to which the principal is to be granted a permission.

    " + }, + "Permissions":{ + "shape":"PermissionList", + "documentation":"

    The permissions to be granted.

    " + }, + "PermissionsWithGrantOption":{ + "shape":"PermissionList", + "documentation":"

    Indicates if the option to pass permissions is granted.

    " + } + }, + "documentation":"

    A permission to a resource granted by batch operation to the principal.

    " + }, + "BatchPermissionsRequestEntryList":{ + "type":"list", + "member":{"shape":"BatchPermissionsRequestEntry"} + }, + "BatchRevokePermissionsRequest":{ + "type":"structure", + "required":["Entries"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + }, + "Entries":{ + "shape":"BatchPermissionsRequestEntryList", + "documentation":"

    A list of up to 20 entries for resource permissions to be revoked by batch operation to the principal.

    " + } + } + }, + "BatchRevokePermissionsResponse":{ + "type":"structure", + "members":{ + "Failures":{ + "shape":"BatchPermissionsFailureList", + "documentation":"

    A list of failures to revoke permissions to the resources.

    " + } + } + }, + "CatalogIdString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "CatalogResource":{ + "type":"structure", + "members":{ + }, + "documentation":"

    A structure for the catalog object.

    " + }, + "ColumnNames":{ + "type":"list", + "member":{"shape":"NameString"} + }, + "ColumnWildcard":{ + "type":"structure", + "members":{ + "ExcludedColumnNames":{ + "shape":"ColumnNames", + "documentation":"

    Excludes column names. Any column with this name will be excluded.

    " + } + }, + "documentation":"

    A wildcard object, consisting of an optional list of excluded column names or indexes.

    " + }, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "EQ", + "NE", + "LE", + "LT", + "GE", + "GT", + "CONTAINS", + "NOT_CONTAINS", + "BEGINS_WITH", + "IN", + "BETWEEN" + ] + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

    A message describing the problem.

    " + } + }, + "documentation":"

    Two processes are trying to modify a resource simultaneously.

    ", + "exception":true + }, + "DataLakePrincipal":{ + "type":"structure", + "members":{ + "DataLakePrincipalIdentifier":{ + "shape":"DataLakePrincipalString", + "documentation":"

    An identifier for the AWS Lake Formation principal.

    " + } + }, + "documentation":"

    The AWS Lake Formation principal. Supported principals are IAM users or IAM roles.

    " + }, + "DataLakePrincipalList":{ + "type":"list", + "member":{"shape":"DataLakePrincipal"}, + "max":10, + "min":0 + }, + "DataLakePrincipalString":{ + "type":"string", + "max":255, + "min":1 + }, + "DataLakeResourceType":{ + "type":"string", + "enum":[ + "CATALOG", + "DATABASE", + "TABLE", + "DATA_LOCATION" + ] + }, + "DataLakeSettings":{ + "type":"structure", + "members":{ + "DataLakeAdmins":{ + "shape":"DataLakePrincipalList", + "documentation":"

    A list of AWS Lake Formation principals. Supported principals are IAM users or IAM roles.

    " + }, + "CreateDatabaseDefaultPermissions":{ + "shape":"PrincipalPermissionsList", + "documentation":"

    A structure representing a list of up to three principal permissions entries for default create database permissions.

    " + }, + "CreateTableDefaultPermissions":{ + "shape":"PrincipalPermissionsList", + "documentation":"

    A structure representing a list of up to three principal permissions entries for default create table permissions.

    " + }, + "TrustedResourceOwners":{ + "shape":"TrustedResourceOwners", + "documentation":"

    A list of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). The user ARNs can be logged in the resource owner's AWS CloudTrail log.

    You may want to specify this property when you are in a high-trust boundary, such as the same team or company.

    " + } + }, + "documentation":"

    A structure representing a list of AWS Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions.

    " + }, + "DataLocationResource":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog where the location is registered with AWS Lake Formation. By default, it is the account ID of the caller.

    " + }, + "ResourceArn":{ + "shape":"ResourceArnString", + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the data location resource.

    " + } + }, + "documentation":"

    A structure for a data location object where permissions are granted or revoked.

    " + }, + "DatabaseResource":{ + "type":"structure", + "required":["Name"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, it is the account ID of the caller.

    " + }, + "Name":{ + "shape":"NameString", + "documentation":"

    The name of the database resource. Unique to the Data Catalog.

    " + } + }, + "documentation":"

    A structure for the database object.

    " + }, + "DeregisterResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArnString", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to deregister.

    " + } + } + }, + "DeregisterResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArnString", + "documentation":"

    The resource ARN.

    " + } + } + }, + "DescribeResourceResponse":{ + "type":"structure", + "members":{ + "ResourceInfo":{ + "shape":"ResourceInfo", + "documentation":"

    A structure containing information about an AWS Lake Formation resource.

    " + } + } + }, + "DescriptionString":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "DetailsMap":{ + "type":"structure", + "members":{ + "ResourceShare":{ + "shape":"ResourceShareList", + "documentation":"

    A share resource ARN for a catalog resource shared through AWS Resource Access Manager (AWS RAM).

    " + } + }, + "documentation":"

    A structure containing the additional details to be returned in the AdditionalDetails attribute of PrincipalResourcePermissions.

    If a catalog resource is shared through AWS Resource Access Manager (AWS RAM), then there will exist a corresponding RAM share resource ARN.

    " + }, + "EntityNotFoundException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

    A message describing the problem.

    " + } + }, + "documentation":"

    A specified entity does not exist

    ", + "exception":true + }, + "ErrorDetail":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"NameString", + "documentation":"

    The code associated with this error.

    " + }, + "ErrorMessage":{ + "shape":"DescriptionString", + "documentation":"

    A message describing the error.

    " + } + }, + "documentation":"

    Contains details about an error.

    " + }, + "FieldNameString":{ + "type":"string", + "enum":[ + "RESOURCE_ARN", + "ROLE_ARN", + "LAST_MODIFIED" + ] + }, + "FilterCondition":{ + "type":"structure", + "members":{ + "Field":{ + "shape":"FieldNameString", + "documentation":"

    The field to filter in the filter condition.

    " + }, + "ComparisonOperator":{ + "shape":"ComparisonOperator", + "documentation":"

    The comparison operator used in the filter condition.

    " + }, + "StringValueList":{ + "shape":"StringValueList", + "documentation":"

    A string with values used in evaluating the filter condition.

    " + } + }, + "documentation":"

    This structure describes the filtering of columns in a table based on a filter condition.

    " + }, + "FilterConditionList":{ + "type":"list", + "member":{"shape":"FilterCondition"}, + "max":20, + "min":1 + }, + "GetDataLakeSettingsRequest":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + } + } + }, + "GetDataLakeSettingsResponse":{ + "type":"structure", + "members":{ + "DataLakeSettings":{ + "shape":"DataLakeSettings", + "documentation":"

    A structure representing a list of AWS Lake Formation principals designated as data lake administrators.

    " + } + } + }, + "GetEffectivePermissionsForPathRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + }, + "ResourceArn":{ + "shape":"ResourceArnString", + "documentation":"

    The Amazon Resource Name (ARN) of the resource for which you want to get permissions.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A continuation token, if this is not the first call to retrieve this list.

    " + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of results to return.

    " + } + } + }, + "GetEffectivePermissionsForPathResponse":{ + "type":"structure", + "members":{ + "Permissions":{ + "shape":"PrincipalResourcePermissionsList", + "documentation":"

    A list of the permissions for the specified table or database resource located at the path in Amazon S3.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A continuation token, if this is not the first call to retrieve this list.

    " + } + } + }, + "GrantPermissionsRequest":{ + "type":"structure", + "required":[ + "Principal", + "Resource", + "Permissions" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + }, + "Principal":{ + "shape":"DataLakePrincipal", + "documentation":"

    The principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles, and they are defined by their principal type and their ARN.

    Note that if you define a resource with a particular ARN, then later delete, and recreate a resource with that same ARN, the resource maintains the permissions already granted.

    " + }, + "Resource":{ + "shape":"Resource", + "documentation":"

    The resource to which permissions are to be granted. Resources in AWS Lake Formation are the Data Catalog, databases, and tables.

    " + }, + "Permissions":{ + "shape":"PermissionList", + "documentation":"

    The permissions granted to the principal on the resource. AWS Lake Formation defines privileges to grant and revoke access to metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3. AWS Lake Formation requires that each principal be authorized to perform a specific task on AWS Lake Formation resources.

    " + }, + "PermissionsWithGrantOption":{ + "shape":"PermissionList", + "documentation":"

    Indicates a list of the granted permissions that the principal may pass to other users. These permissions may only be a subset of the permissions granted in the Privileges.

    " + } + } + }, + "GrantPermissionsResponse":{ + "type":"structure", + "members":{ + } + }, + "IAMRoleArn":{ + "type":"string", + "pattern":"arn:aws:iam::[0-9]*:role/.*" + }, + "Identifier":{ + "type":"string", + "max":255, + "min":1 + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

    A message describing the problem.

    " + } + }, + "documentation":"

    An internal service error occurred.

    ", + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

    A message describing the problem.

    " + } + }, + "documentation":"

    The input provided was not valid.

    ", + "exception":true + }, + "LastModifiedTimestamp":{"type":"timestamp"}, + "ListPermissionsRequest":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + }, + "Principal":{ + "shape":"DataLakePrincipal", + "documentation":"

    Specifies a principal to filter the permissions returned.

    " + }, + "ResourceType":{ + "shape":"DataLakeResourceType", + "documentation":"

    Specifies a resource type to filter the permissions returned.

    " + }, + "Resource":{ + "shape":"Resource", + "documentation":"

    A resource where you will get a list of the principal permissions.

    This operation does not support getting privileges on a table with columns. Instead, call this operation on the table, and the operation returns the table and the table w columns.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A continuation token, if this is not the first call to retrieve this list.

    " + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of results to return.

    " + } + } + }, + "ListPermissionsResponse":{ + "type":"structure", + "members":{ + "PrincipalResourcePermissions":{ + "shape":"PrincipalResourcePermissionsList", + "documentation":"

    A list of principals and their permissions on the resource for the specified principal and resource types.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A continuation token, if this is not the first call to retrieve this list.

    " + } + } + }, + "ListResourcesRequest":{ + "type":"structure", + "members":{ + "FilterConditionList":{ + "shape":"FilterConditionList", + "documentation":"

    Any applicable row-level and/or column-level filtering conditions for the resources.

    " + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of resource results.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A continuation token, if this is not the first call to retrieve these resources.

    " + } + } + }, + "ListResourcesResponse":{ + "type":"structure", + "members":{ + "ResourceInfoList":{ + "shape":"ResourceInfoList", + "documentation":"

    A summary of the data lake resources.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A continuation token, if this is not the first call to retrieve these resources.

    " + } + } + }, + "MessageString":{"type":"string"}, + "NameString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "NullableBoolean":{ + "type":"boolean", + "box":true + }, + "OperationTimeoutException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

    A message describing the problem.

    " + } + }, + "documentation":"

    The operation timed out.

    ", + "exception":true + }, + "PageSize":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "Permission":{ + "type":"string", + "enum":[ + "ALL", + "SELECT", + "ALTER", + "DROP", + "DELETE", + "INSERT", + "DESCRIBE", + "CREATE_DATABASE", + "CREATE_TABLE", + "DATA_LOCATION_ACCESS" + ] + }, + "PermissionList":{ + "type":"list", + "member":{"shape":"Permission"} + }, + "PrincipalPermissions":{ + "type":"structure", + "members":{ + "Principal":{ + "shape":"DataLakePrincipal", + "documentation":"

    The principal who is granted permissions.

    " + }, + "Permissions":{ + "shape":"PermissionList", + "documentation":"

    The permissions that are granted to the principal.

    " + } + }, + "documentation":"

    Permissions granted to a principal.

    " + }, + "PrincipalPermissionsList":{ + "type":"list", + "member":{"shape":"PrincipalPermissions"} + }, + "PrincipalResourcePermissions":{ + "type":"structure", + "members":{ + "Principal":{ + "shape":"DataLakePrincipal", + "documentation":"

    The Data Lake principal to be granted or revoked permissions.

    " + }, + "Resource":{ + "shape":"Resource", + "documentation":"

    The resource where permissions are to be granted or revoked.

    " + }, + "Permissions":{ + "shape":"PermissionList", + "documentation":"

    The permissions to be granted or revoked on the resource.

    " + }, + "PermissionsWithGrantOption":{ + "shape":"PermissionList", + "documentation":"

    Indicates whether to grant the ability to grant permissions (as a subset of permissions granted).

    " + }, + "AdditionalDetails":{ + "shape":"DetailsMap", + "documentation":"

    This attribute can be used to return any additional details of PrincipalResourcePermissions. Currently returns only as a RAM share resource ARN.

    " + } + }, + "documentation":"

    The permissions granted or revoked on a resource.

    " + }, + "PrincipalResourcePermissionsList":{ + "type":"list", + "member":{"shape":"PrincipalResourcePermissions"} + }, + "PutDataLakeSettingsRequest":{ + "type":"structure", + "required":["DataLakeSettings"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + }, + "DataLakeSettings":{ + "shape":"DataLakeSettings", + "documentation":"

    A structure representing a list of AWS Lake Formation principals designated as data lake administrators.

    " + } + } + }, + "PutDataLakeSettingsResponse":{ + "type":"structure", + "members":{ + } + }, + "RAMResourceShareArn":{"type":"string"}, + "RegisterResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArnString", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to register.

    " + }, + "UseServiceLinkedRole":{ + "shape":"NullableBoolean", + "documentation":"

    Designates an AWS Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. A service-linked role is a unique type of IAM role that is linked directly to Lake Formation.

    For more information, see Using Service-Linked Roles for Lake Formation.

    " + }, + "RoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The identifier for the role that registers the resource.

    " + } + } + }, + "RegisterResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "Resource":{ + "type":"structure", + "members":{ + "Catalog":{ + "shape":"CatalogResource", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + }, + "Database":{ + "shape":"DatabaseResource", + "documentation":"

    The database for the resource. Unique to the Data Catalog. A database is a set of associated table definitions organized into a logical group. You can Grant and Revoke database permissions to a principal.

    " + }, + "Table":{ + "shape":"TableResource", + "documentation":"

    The table for the resource. A table is a metadata definition that represents your data. You can Grant and Revoke table privileges to a principal.

    " + }, + "TableWithColumns":{ + "shape":"TableWithColumnsResource", + "documentation":"

    The table with columns for the resource. A principal with permissions to this resource can select metadata from the columns of a table in the Data Catalog and the underlying data in Amazon S3.

    " + }, + "DataLocation":{ + "shape":"DataLocationResource", + "documentation":"

    The location of an Amazon S3 path where permissions are granted or revoked.

    " + } + }, + "documentation":"

    A structure for the resource.

    " + }, + "ResourceArnString":{"type":"string"}, + "ResourceInfo":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"ResourceArnString", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + }, + "RoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The IAM role that registered a resource.

    " + }, + "LastModified":{ + "shape":"LastModifiedTimestamp", + "documentation":"

    The date and time the resource was last modified.

    " + } + }, + "documentation":"

    A structure containing information about an AWS Lake Formation resource.

    " + }, + "ResourceInfoList":{ + "type":"list", + "member":{"shape":"ResourceInfo"} + }, + "ResourceShareList":{ + "type":"list", + "member":{"shape":"RAMResourceShareArn"} + }, + "RevokePermissionsRequest":{ + "type":"structure", + "required":[ + "Principal", + "Resource", + "Permissions" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

    " + }, + "Principal":{ + "shape":"DataLakePrincipal", + "documentation":"

    The principal to be revoked permissions on the resource.

    " + }, + "Resource":{ + "shape":"Resource", + "documentation":"

    The resource to which permissions are to be revoked.

    " + }, + "Permissions":{ + "shape":"PermissionList", + "documentation":"

    The permissions revoked to the principal on the resource. For information about permissions, see Security and Access Control to Metadata and Data.

    " + }, + "PermissionsWithGrantOption":{ + "shape":"PermissionList", + "documentation":"

    Indicates a list of permissions for which to revoke the grant option allowing the principal to pass permissions to other principals.

    " + } + } + }, + "RevokePermissionsResponse":{ + "type":"structure", + "members":{ + } + }, + "StringValue":{"type":"string"}, + "StringValueList":{ + "type":"list", + "member":{"shape":"StringValue"} + }, + "TableResource":{ + "type":"structure", + "required":["DatabaseName"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, it is the account ID of the caller.

    " + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

    The name of the database for the table. Unique to a Data Catalog. A database is a set of associated table definitions organized into a logical group. You can Grant and Revoke database privileges to a principal.

    " + }, + "Name":{ + "shape":"NameString", + "documentation":"

    The name of the table.

    " + }, + "TableWildcard":{ + "shape":"TableWildcard", + "documentation":"

    A wildcard object representing every table under a database.

    At least one of TableResource$Name or TableResource$TableWildcard is required.

    " + } + }, + "documentation":"

    A structure for the table object. A table is a metadata definition that represents your data. You can Grant and Revoke table privileges to a principal.

    " + }, + "TableWildcard":{ + "type":"structure", + "members":{ + }, + "documentation":"

    A wildcard object representing every table under a database.

    " + }, + "TableWithColumnsResource":{ + "type":"structure", + "required":[ + "DatabaseName", + "Name" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

    The identifier for the Data Catalog. By default, it is the account ID of the caller.

    " + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

    The name of the database for the table with columns resource. Unique to the Data Catalog. A database is a set of associated table definitions organized into a logical group. You can Grant and Revoke database privileges to a principal.

    " + }, + "Name":{ + "shape":"NameString", + "documentation":"

    The name of the table resource. A table is a metadata definition that represents your data. You can Grant and Revoke table privileges to a principal.

    " + }, + "ColumnNames":{ + "shape":"ColumnNames", + "documentation":"

    The list of column names for the table. At least one of ColumnNames or ColumnWildcard is required.

    " + }, + "ColumnWildcard":{ + "shape":"ColumnWildcard", + "documentation":"

    A wildcard specified by a ColumnWildcard object. At least one of ColumnNames or ColumnWildcard is required.

    " + } + }, + "documentation":"

    A structure for a table with columns object. This object is only used when granting a SELECT permission.

    This object must take a value for at least one of ColumnsNames, ColumnsIndexes, or ColumnsWildcard.

    " + }, + "Token":{"type":"string"}, + "TrustedResourceOwners":{ + "type":"list", + "member":{"shape":"CatalogIdString"} + }, + "UpdateResourceRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "ResourceArn" + ], + "members":{ + "RoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The new role to use for the given resource registered in AWS Lake Formation.

    " + }, + "ResourceArn":{ + "shape":"ResourceArnString", + "documentation":"

    The resource ARN.

    " + } + } + }, + "UpdateResourceResponse":{ + "type":"structure", + "members":{ + } + } + }, + "documentation":"AWS Lake Formation

    Defines the public endpoint for the AWS Lake Formation service.

    " +} diff --git a/services/lambda/build.properties b/services/lambda/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/lambda/build.properties +++ b/services/lambda/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index 0a0967b37b7b..104de2135ec1 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + location + AWS Java SDK :: Services :: Location + The AWS Java SDK for Location module holds the client classes that are used for + communicating with Location. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.location + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/location/src/main/resources/codegen-resources/paginators-1.json b/services/location/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..72701ae990ee --- /dev/null +++ b/services/location/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,44 @@ +{ + "pagination": { + "GetDevicePositionHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "DevicePositions" + }, + "ListGeofenceCollections": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Entries" + }, + "ListGeofences": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Entries" + }, + "ListMaps": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Entries" + }, + "ListPlaceIndexes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Entries" + }, + "ListTrackerConsumers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ConsumerArns" + }, + "ListTrackers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Entries" + } + } +} diff --git a/services/location/src/main/resources/codegen-resources/service-2.json b/services/location/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..4ee1f6902c40 --- /dev/null +++ b/services/location/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2903 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-11-19", + "endpointPrefix":"geo", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Location Service", + "serviceId":"Location", + "signatureVersion":"v4", + "signingName":"geo", + "uid":"location-2020-11-19" + }, + "operations":{ + "AssociateTrackerConsumer":{ + "name":"AssociateTrackerConsumer", + "http":{ + "method":"POST", + "requestUri":"/tracking/v0/trackers/{TrackerName}/consumers", + "responseCode":200 + }, + "input":{"shape":"AssociateTrackerConsumerRequest"}, + "output":{"shape":"AssociateTrackerConsumerResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates an association between a geofence collection and a tracker resource. This allows the tracker resource to communicate location data to the linked geofence collection.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "BatchDeleteGeofence":{ + "name":"BatchDeleteGeofence", + "http":{ + "method":"POST", + "requestUri":"/geofencing/v0/collections/{CollectionName}/delete-geofences", + "responseCode":200 + }, + "input":{"shape":"BatchDeleteGeofenceRequest"}, + "output":{"shape":"BatchDeleteGeofenceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a batch of geofences from a geofence collection.

    This action deletes the resource permanently. You can't undo this action.

    ", + "endpoint":{"hostPrefix":"geofencing."} + }, + "BatchEvaluateGeofences":{ + "name":"BatchEvaluateGeofences", + "http":{ + "method":"POST", + "requestUri":"/geofencing/v0/collections/{CollectionName}/positions", + "responseCode":200 + }, + "input":{"shape":"BatchEvaluateGeofencesRequest"}, + "output":{"shape":"BatchEvaluateGeofencesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Used in geofence monitoring. Evaluates device positions against the position of geofences in a given geofence collection.

    ", + "endpoint":{"hostPrefix":"geofencing."} + }, + "BatchGetDevicePosition":{ + "name":"BatchGetDevicePosition", + "http":{ + "method":"POST", + "requestUri":"/tracking/v0/trackers/{TrackerName}/get-positions", + "responseCode":200 + }, + "input":{"shape":"BatchGetDevicePositionRequest"}, + "output":{"shape":"BatchGetDevicePositionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    A batch request to retrieve device positions.

    The response will return the device positions from the last 24 hours.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "BatchPutGeofence":{ + "name":"BatchPutGeofence", + "http":{ + "method":"POST", + "requestUri":"/geofencing/v0/collections/{CollectionName}/put-geofences", + "responseCode":200 + }, + "input":{"shape":"BatchPutGeofenceRequest"}, + "output":{"shape":"BatchPutGeofenceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    A batch request for storing geofences into a given geofence collection.

    ", + "endpoint":{"hostPrefix":"geofencing."} + }, + "BatchUpdateDevicePosition":{ + "name":"BatchUpdateDevicePosition", + "http":{ + "method":"POST", + "requestUri":"/tracking/v0/trackers/{TrackerName}/positions", + "responseCode":200 + }, + "input":{"shape":"BatchUpdateDevicePositionRequest"}, + "output":{"shape":"BatchUpdateDevicePositionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Uploads a position update for one or more devices to a tracker resource. The data is used for API queries requesting the device position and position history.

    Limitation — Location data is sampled at a fixed rate of 1 position per 30 second interval, and retained for 1 year before it is deleted.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "CreateGeofenceCollection":{ + "name":"CreateGeofenceCollection", + "http":{ + "method":"POST", + "requestUri":"/geofencing/v0/collections", + "responseCode":200 + }, + "input":{"shape":"CreateGeofenceCollectionRequest"}, + "output":{"shape":"CreateGeofenceCollectionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a geofence collection, which manages and stores geofences.

    ", + "endpoint":{"hostPrefix":"geofencing."}, + "idempotent":true + }, + "CreateMap":{ + "name":"CreateMap", + "http":{ + "method":"POST", + "requestUri":"/maps/v0/maps", + "responseCode":200 + }, + "input":{"shape":"CreateMapRequest"}, + "output":{"shape":"CreateMapResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a map resource in your AWS account, which provides map tiles of different styles sourced from global location data providers.

    By using Maps, you agree that AWS may transmit your API queries to your selected third party provider for processing, which may be outside the AWS region you are currently using. For more information, see the AWS Service Terms for Amazon Location Service.

    ", + "endpoint":{"hostPrefix":"maps."}, + "idempotent":true + }, + "CreatePlaceIndex":{ + "name":"CreatePlaceIndex", + "http":{ + "method":"POST", + "requestUri":"/places/v0/indexes", + "responseCode":200 + }, + "input":{"shape":"CreatePlaceIndexRequest"}, + "output":{"shape":"CreatePlaceIndexResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a Place index resource in your AWS account, which supports Places functions with geospatial data sourced from your chosen data provider.

    By using Places, you agree that AWS may transmit your API queries to your selected third party provider for processing, which may be outside the AWS region you are currently using.

    Because of licensing limitations, you may not use HERE to store results for locations in Japan. For more information, see the AWS Service Terms for Amazon Location Service.

    ", + "endpoint":{"hostPrefix":"places."}, + "idempotent":true + }, + "CreateTracker":{ + "name":"CreateTracker", + "http":{ + "method":"POST", + "requestUri":"/tracking/v0/trackers", + "responseCode":200 + }, + "input":{"shape":"CreateTrackerRequest"}, + "output":{"shape":"CreateTrackerResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a tracker resource in your AWS account, which lets you retrieve current and historical location of devices.

    ", + "endpoint":{"hostPrefix":"tracking."}, + "idempotent":true + }, + "DeleteGeofenceCollection":{ + "name":"DeleteGeofenceCollection", + "http":{ + "method":"DELETE", + "requestUri":"/geofencing/v0/collections/{CollectionName}", + "responseCode":200 + }, + "input":{"shape":"DeleteGeofenceCollectionRequest"}, + "output":{"shape":"DeleteGeofenceCollectionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a geofence collection from your AWS account.

    This action deletes the resource permanently. You can't undo this action. If the geofence collection is the target of a tracker resource, the devices will no longer be monitored.

    ", + "endpoint":{"hostPrefix":"geofencing."}, + "idempotent":true + }, + "DeleteMap":{ + "name":"DeleteMap", + "http":{ + "method":"DELETE", + "requestUri":"/maps/v0/maps/{MapName}", + "responseCode":200 + }, + "input":{"shape":"DeleteMapRequest"}, + "output":{"shape":"DeleteMapResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a map resource from your AWS account.

    This action deletes the resource permanently. You cannot undo this action. If the map is being used in an application, the map may not render.

    ", + "endpoint":{"hostPrefix":"maps."}, + "idempotent":true + }, + "DeletePlaceIndex":{ + "name":"DeletePlaceIndex", + "http":{ + "method":"DELETE", + "requestUri":"/places/v0/indexes/{IndexName}", + "responseCode":200 + }, + "input":{"shape":"DeletePlaceIndexRequest"}, + "output":{"shape":"DeletePlaceIndexResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a Place index resource from your AWS account.

    This action deletes the resource permanently. You cannot undo this action.

    ", + "endpoint":{"hostPrefix":"places."}, + "idempotent":true + }, + "DeleteTracker":{ + "name":"DeleteTracker", + "http":{ + "method":"DELETE", + "requestUri":"/tracking/v0/trackers/{TrackerName}", + "responseCode":200 + }, + "input":{"shape":"DeleteTrackerRequest"}, + "output":{"shape":"DeleteTrackerResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a tracker resource from your AWS account.

    This action deletes the resource permanently. You can't undo this action. If the tracker resource is in use, you may encounter an error. Make sure that the target resource is not a dependency for your applications.

    ", + "endpoint":{"hostPrefix":"tracking."}, + "idempotent":true + }, + "DescribeGeofenceCollection":{ + "name":"DescribeGeofenceCollection", + "http":{ + "method":"GET", + "requestUri":"/geofencing/v0/collections/{CollectionName}", + "responseCode":200 + }, + "input":{"shape":"DescribeGeofenceCollectionRequest"}, + "output":{"shape":"DescribeGeofenceCollectionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the geofence collection details.

    ", + "endpoint":{"hostPrefix":"geofencing."} + }, + "DescribeMap":{ + "name":"DescribeMap", + "http":{ + "method":"GET", + "requestUri":"/maps/v0/maps/{MapName}", + "responseCode":200 + }, + "input":{"shape":"DescribeMapRequest"}, + "output":{"shape":"DescribeMapResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the map resource details.

    ", + "endpoint":{"hostPrefix":"maps."} + }, + "DescribePlaceIndex":{ + "name":"DescribePlaceIndex", + "http":{ + "method":"GET", + "requestUri":"/places/v0/indexes/{IndexName}", + "responseCode":200 + }, + "input":{"shape":"DescribePlaceIndexRequest"}, + "output":{"shape":"DescribePlaceIndexResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the Place index resource details.

    ", + "endpoint":{"hostPrefix":"places."} + }, + "DescribeTracker":{ + "name":"DescribeTracker", + "http":{ + "method":"GET", + "requestUri":"/tracking/v0/trackers/{TrackerName}", + "responseCode":200 + }, + "input":{"shape":"DescribeTrackerRequest"}, + "output":{"shape":"DescribeTrackerResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the tracker resource details.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "DisassociateTrackerConsumer":{ + "name":"DisassociateTrackerConsumer", + "http":{ + "method":"DELETE", + "requestUri":"/tracking/v0/trackers/{TrackerName}/consumers/{ConsumerArn}", + "responseCode":200 + }, + "input":{"shape":"DisassociateTrackerConsumerRequest"}, + "output":{"shape":"DisassociateTrackerConsumerResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Removes the association bewteen a tracker resource and a geofence collection.

    Once you unlink a tracker resource from a geofence collection, the tracker positions will no longer be automatically evaluated against geofences.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "GetDevicePosition":{ + "name":"GetDevicePosition", + "http":{ + "method":"GET", + "requestUri":"/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/positions/latest", + "responseCode":200 + }, + "input":{"shape":"GetDevicePositionRequest"}, + "output":{"shape":"GetDevicePositionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the latest device position.

    Limitation — Device positions are deleted after one year.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "GetDevicePositionHistory":{ + "name":"GetDevicePositionHistory", + "http":{ + "method":"POST", + "requestUri":"/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/list-positions", + "responseCode":200 + }, + "input":{"shape":"GetDevicePositionHistoryRequest"}, + "output":{"shape":"GetDevicePositionHistoryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the device position history from a tracker resource within a specified range of time.

    Limitation — Device positions are deleted after one year.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "GetGeofence":{ + "name":"GetGeofence", + "http":{ + "method":"GET", + "requestUri":"/geofencing/v0/collections/{CollectionName}/geofences/{GeofenceId}", + "responseCode":200 + }, + "input":{"shape":"GetGeofenceRequest"}, + "output":{"shape":"GetGeofenceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the geofence details from a geofence collection.

    ", + "endpoint":{"hostPrefix":"geofencing."} + }, + "GetMapGlyphs":{ + "name":"GetMapGlyphs", + "http":{ + "method":"GET", + "requestUri":"/maps/v0/maps/{MapName}/glyphs/{FontStack}/{FontUnicodeRange}", + "responseCode":200 + }, + "input":{"shape":"GetMapGlyphsRequest"}, + "output":{"shape":"GetMapGlyphsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves glyphs used to display labels on a map.

    ", + "endpoint":{"hostPrefix":"maps."} + }, + "GetMapSprites":{ + "name":"GetMapSprites", + "http":{ + "method":"GET", + "requestUri":"/maps/v0/maps/{MapName}/sprites/{FileName}", + "responseCode":200 + }, + "input":{"shape":"GetMapSpritesRequest"}, + "output":{"shape":"GetMapSpritesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the sprite sheet corresponding to a map resource. The sprite sheet is a PNG image paired with a JSON document describing the offsets of individual icons that will be displayed on a rendered map.

    ", + "endpoint":{"hostPrefix":"maps."} + }, + "GetMapStyleDescriptor":{ + "name":"GetMapStyleDescriptor", + "http":{ + "method":"GET", + "requestUri":"/maps/v0/maps/{MapName}/style-descriptor", + "responseCode":200 + }, + "input":{"shape":"GetMapStyleDescriptorRequest"}, + "output":{"shape":"GetMapStyleDescriptorResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the map style descriptor from a map resource.

    The style descriptor contains specifications on how features render on a map. For example, what data to display, what order to display the data in, and the style for the data. Style descriptors follow the Mapbox Style Specification.

    ", + "endpoint":{"hostPrefix":"maps."} + }, + "GetMapTile":{ + "name":"GetMapTile", + "http":{ + "method":"GET", + "requestUri":"/maps/v0/maps/{MapName}/tiles/{Z}/{X}/{Y}", + "responseCode":200 + }, + "input":{"shape":"GetMapTileRequest"}, + "output":{"shape":"GetMapTileResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a vector data tile from the map resource. Map tiles are used by clients to render a map. They are addressed using a grid arrangement with an X coordinate, Y coordinate, and Z (zoom) level.

    The origin (0, 0) is the top left of the map. Increasing the zoom level by 1 doubles both the X and Y dimensions, so a tile containing data for the entire world at (0/0/0) will be split into 4 tiles at zoom 1 (1/0/0, 1/0/1, 1/1/0, 1/1/1).

    ", + "endpoint":{"hostPrefix":"maps."} + }, + "ListGeofenceCollections":{ + "name":"ListGeofenceCollections", + "http":{ + "method":"POST", + "requestUri":"/geofencing/v0/list-collections", + "responseCode":200 + }, + "input":{"shape":"ListGeofenceCollectionsRequest"}, + "output":{"shape":"ListGeofenceCollectionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists geofence collections in your AWS account.

    ", + "endpoint":{"hostPrefix":"geofencing."} + }, + "ListGeofences":{ + "name":"ListGeofences", + "http":{ + "method":"POST", + "requestUri":"/geofencing/v0/collections/{CollectionName}/list-geofences", + "responseCode":200 + }, + "input":{"shape":"ListGeofencesRequest"}, + "output":{"shape":"ListGeofencesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists geofences stored in a given geofence collection.

    ", + "endpoint":{"hostPrefix":"geofencing."} + }, + "ListMaps":{ + "name":"ListMaps", + "http":{ + "method":"POST", + "requestUri":"/maps/v0/list-maps", + "responseCode":200 + }, + "input":{"shape":"ListMapsRequest"}, + "output":{"shape":"ListMapsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists map resources in your AWS account.

    ", + "endpoint":{"hostPrefix":"maps."} + }, + "ListPlaceIndexes":{ + "name":"ListPlaceIndexes", + "http":{ + "method":"POST", + "requestUri":"/places/v0/list-indexes", + "responseCode":200 + }, + "input":{"shape":"ListPlaceIndexesRequest"}, + "output":{"shape":"ListPlaceIndexesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists Place index resources in your AWS account.

    ", + "endpoint":{"hostPrefix":"places."} + }, + "ListTrackerConsumers":{ + "name":"ListTrackerConsumers", + "http":{ + "method":"POST", + "requestUri":"/tracking/v0/trackers/{TrackerName}/list-consumers", + "responseCode":200 + }, + "input":{"shape":"ListTrackerConsumersRequest"}, + "output":{"shape":"ListTrackerConsumersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists geofence collections currently associated to the given tracker resource.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "ListTrackers":{ + "name":"ListTrackers", + "http":{ + "method":"POST", + "requestUri":"/tracking/v0/list-trackers", + "responseCode":200 + }, + "input":{"shape":"ListTrackersRequest"}, + "output":{"shape":"ListTrackersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists tracker resources in your AWS account.

    ", + "endpoint":{"hostPrefix":"tracking."} + }, + "PutGeofence":{ + "name":"PutGeofence", + "http":{ + "method":"PUT", + "requestUri":"/geofencing/v0/collections/{CollectionName}/geofences/{GeofenceId}", + "responseCode":200 + }, + "input":{"shape":"PutGeofenceRequest"}, + "output":{"shape":"PutGeofenceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Stores a geofence to a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request.

    ", + "endpoint":{"hostPrefix":"geofencing."} + }, + "SearchPlaceIndexForPosition":{ + "name":"SearchPlaceIndexForPosition", + "http":{ + "method":"POST", + "requestUri":"/places/v0/indexes/{IndexName}/search/position", + "responseCode":200 + }, + "input":{"shape":"SearchPlaceIndexForPositionRequest"}, + "output":{"shape":"SearchPlaceIndexForPositionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Reverse geocodes a given coordinate and returns a legible address. Allows you to search for Places or points of interest near a given position.

    By using Places, you agree that AWS may transmit your API queries to your selected third party provider for processing, which may be outside the AWS region you are currently using.

    Because of licensing limitations, you may not use HERE to store results for locations in Japan. For more information, see the AWS Service Terms for Amazon Location Service.

    ", + "endpoint":{"hostPrefix":"places."} + }, + "SearchPlaceIndexForText":{ + "name":"SearchPlaceIndexForText", + "http":{ + "method":"POST", + "requestUri":"/places/v0/indexes/{IndexName}/search/text", + "responseCode":200 + }, + "input":{"shape":"SearchPlaceIndexForTextRequest"}, + "output":{"shape":"SearchPlaceIndexForTextResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Geocodes free-form text, such as an address, name, city, or region to allow you to search for Places or points of interest.

    Includes the option to apply additional parameters to narrow your list of results.

    You can search for places near a given position using BiasPosition, or filter results within a bounding box using FilterBBox. Providing both parameters simultaneously returns an error.

    By using Places, you agree that AWS may transmit your API queries to your selected third party provider for processing, which may be outside the AWS region you are currently using.

    Also, when using HERE as your data provider, you may not (a) use HERE Places for Asset Management, or (b) select the Storage option for the IntendedUse parameter when requesting Places in Japan. For more information, see the AWS Service Terms for Amazon Location Service.

    ", + "endpoint":{"hostPrefix":"places."} + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + }, + "documentation":"

    The request was denied due to insufficient access or permission. Check with an administrator to verify your permissions.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Arn":{ + "type":"string", + "max":1600, + "min":0, + "pattern":"^arn(:[a-z0-9]+([.-][a-z0-9]+)*){2}(:([a-z0-9]+([.-][a-z0-9]+)*)?){2}:([^/].*)?$" + }, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "AssociateTrackerConsumerRequest":{ + "type":"structure", + "required":[ + "ConsumerArn", + "TrackerName" + ], + "members":{ + "ConsumerArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the geofence collection to be associated to tracker resource. Used when you need to specify a resource across all AWS.

    • Format example: arn:partition:service:region:account-id:resource-type:resource-id

    " + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name of the tracker resource to be associated with a geofence collection.

    ", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "AssociateTrackerConsumerResponse":{ + "type":"structure", + "members":{ + } + }, + "BatchDeleteGeofenceError":{ + "type":"structure", + "required":[ + "Error", + "GeofenceId" + ], + "members":{ + "Error":{ + "shape":"BatchItemError", + "documentation":"

    Contains details associated to the batch error.

    " + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

    The geofence associated with the error message.

    " + } + }, + "documentation":"

    Contains error details for each geofence that failed to delete from the geofence collection.

    " + }, + "BatchDeleteGeofenceErrorList":{ + "type":"list", + "member":{"shape":"BatchDeleteGeofenceError"} + }, + "BatchDeleteGeofenceRequest":{ + "type":"structure", + "required":[ + "CollectionName", + "GeofenceIds" + ], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The geofence collection storing the geofences to be deleted.

    ", + "location":"uri", + "locationName":"CollectionName" + }, + "GeofenceIds":{ + "shape":"BatchDeleteGeofenceRequestGeofenceIdsList", + "documentation":"

    The batch of geofences to be deleted.

    " + } + } + }, + "BatchDeleteGeofenceRequestGeofenceIdsList":{ + "type":"list", + "member":{"shape":"Id"}, + "max":10, + "min":1 + }, + "BatchDeleteGeofenceResponse":{ + "type":"structure", + "required":["Errors"], + "members":{ + "Errors":{ + "shape":"BatchDeleteGeofenceErrorList", + "documentation":"

    Contains error details for each geofence that failed to delete.

    " + } + } + }, + "BatchEvaluateGeofencesError":{ + "type":"structure", + "required":[ + "DeviceId", + "Error", + "SampleTime" + ], + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

    The device associated with the position evaluation error.

    " + }, + "Error":{ + "shape":"BatchItemError", + "documentation":"

    Contains details associated to the batch error.

    " + }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

    Specifies a timestamp for when the error occurred in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + }, + "documentation":"

    Contains error details for each device that failed to evaluate its position against the geofences in a given geofence collection.

    " + }, + "BatchEvaluateGeofencesErrorList":{ + "type":"list", + "member":{"shape":"BatchEvaluateGeofencesError"} + }, + "BatchEvaluateGeofencesRequest":{ + "type":"structure", + "required":[ + "CollectionName", + "DevicePositionUpdates" + ], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The geofence collection used in evaluating the position of devices against its geofences.

    ", + "location":"uri", + "locationName":"CollectionName" + }, + "DevicePositionUpdates":{ + "shape":"BatchEvaluateGeofencesRequestDevicePositionUpdatesList", + "documentation":"

    Contains device details for each device to be evaluated against the given geofence collection.

    " + } + } + }, + "BatchEvaluateGeofencesRequestDevicePositionUpdatesList":{ + "type":"list", + "member":{"shape":"DevicePositionUpdate"}, + "max":10, + "min":1 + }, + "BatchEvaluateGeofencesResponse":{ + "type":"structure", + "required":["Errors"], + "members":{ + "Errors":{ + "shape":"BatchEvaluateGeofencesErrorList", + "documentation":"

    Contains error details for each device that failed to evaluate its position against the given geofence collection.

    " + } + } + }, + "BatchGetDevicePositionError":{ + "type":"structure", + "required":[ + "DeviceId", + "Error" + ], + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

    The ID of the device that didn't return a position.

    " + }, + "Error":{ + "shape":"BatchItemError", + "documentation":"

    Contains details related to the error code.

    " + } + }, + "documentation":"

    Contains error details for each device that didn't return a position.

    " + }, + "BatchGetDevicePositionErrorList":{ + "type":"list", + "member":{"shape":"BatchGetDevicePositionError"} + }, + "BatchGetDevicePositionRequest":{ + "type":"structure", + "required":[ + "DeviceIds", + "TrackerName" + ], + "members":{ + "DeviceIds":{ + "shape":"BatchGetDevicePositionRequestDeviceIdsList", + "documentation":"

    Devices whose position you want to retrieve.

    • For example, for two devices: device-ids=DeviceId1&device-ids=DeviceId2

    " + }, + "TrackerName":{ + "shape":"BatchGetDevicePositionRequestTrackerNameString", + "documentation":"

    The tracker resource retrieving the device position.

    ", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "BatchGetDevicePositionRequestDeviceIdsList":{ + "type":"list", + "member":{"shape":"Id"}, + "max":10, + "min":1 + }, + "BatchGetDevicePositionRequestTrackerNameString":{ + "type":"string", + "min":1, + "pattern":"^[-._\\w]+$" + }, + "BatchGetDevicePositionResponse":{ + "type":"structure", + "required":[ + "DevicePositions", + "Errors" + ], + "members":{ + "DevicePositions":{ + "shape":"DevicePositionList", + "documentation":"

    Contains device position details such as the device ID, position, and timestamps for when the position was received and sampled.

    " + }, + "Errors":{ + "shape":"BatchGetDevicePositionErrorList", + "documentation":"

    Contains error details for each device that failed to send its position to the tracker resource.

    " + } + } + }, + "BatchItemError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"BatchItemErrorCode", + "documentation":"

    The error code associated with the batch request error.

    " + }, + "Message":{ + "shape":"String", + "documentation":"

    A message with the reason for the batch request error.

    " + } + }, + "documentation":"

    Contains the batch request error details associated with the request.

    " + }, + "BatchItemErrorCode":{ + "type":"string", + "enum":[ + "AccessDeniedError", + "ConflictError", + "InternalServerError", + "ResourceNotFoundError", + "ThrottlingError", + "ValidationError" + ] + }, + "BatchPutGeofenceError":{ + "type":"structure", + "required":[ + "Error", + "GeofenceId" + ], + "members":{ + "Error":{ + "shape":"BatchItemError", + "documentation":"

    Contains details associated to the batch error.

    " + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

    The geofence associated with the error message.

    " + } + }, + "documentation":"

    Contains error details for each geofence that failed to be stored in a given geofence collection.

    " + }, + "BatchPutGeofenceErrorList":{ + "type":"list", + "member":{"shape":"BatchPutGeofenceError"} + }, + "BatchPutGeofenceRequest":{ + "type":"structure", + "required":[ + "CollectionName", + "Entries" + ], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The geofence collection storing the geofences.

    ", + "location":"uri", + "locationName":"CollectionName" + }, + "Entries":{ + "shape":"BatchPutGeofenceRequestEntriesList", + "documentation":"

    The batch of geofences to be stored in a geofence collection.

    " + } + } + }, + "BatchPutGeofenceRequestEntriesList":{ + "type":"list", + "member":{"shape":"BatchPutGeofenceRequestEntry"}, + "max":10, + "min":1 + }, + "BatchPutGeofenceRequestEntry":{ + "type":"structure", + "required":[ + "GeofenceId", + "Geometry" + ], + "members":{ + "GeofenceId":{ + "shape":"Id", + "documentation":"

    The identifier for the geofence to be stored in a given geofence collection.

    " + }, + "Geometry":{ + "shape":"GeofenceGeometry", + "documentation":"

    The geometry details for the geofence.

    " + } + }, + "documentation":"

    Contains geofence details.

    " + }, + "BatchPutGeofenceResponse":{ + "type":"structure", + "required":[ + "Errors", + "Successes" + ], + "members":{ + "Errors":{ + "shape":"BatchPutGeofenceErrorList", + "documentation":"

    Contains additional error details for each geofence that failed to be stored in a geofence collection.

    " + }, + "Successes":{ + "shape":"BatchPutGeofenceSuccessList", + "documentation":"

    Contains each geofence that was successfully stored in a geofence collection.

    " + } + } + }, + "BatchPutGeofenceSuccess":{ + "type":"structure", + "required":[ + "CreateTime", + "GeofenceId", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence was stored in a geofence collection in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

    The geofence successfully stored in a geofence collection.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + }, + "documentation":"

    Contains a summary of each geofence that was successfully stored in a given geofence collection.

    " + }, + "BatchPutGeofenceSuccessList":{ + "type":"list", + "member":{"shape":"BatchPutGeofenceSuccess"} + }, + "BatchUpdateDevicePositionError":{ + "type":"structure", + "required":[ + "DeviceId", + "Error", + "SampleTime" + ], + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

    The device associated with the failed location update.

    " + }, + "Error":{ + "shape":"BatchItemError", + "documentation":"

    Contains details related to the error code such as the error code and error message.

    " + }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when a position sample was attempted in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + }, + "documentation":"

    Contains error details for each device that failed to update its position.

    " + }, + "BatchUpdateDevicePositionErrorList":{ + "type":"list", + "member":{"shape":"BatchUpdateDevicePositionError"} + }, + "BatchUpdateDevicePositionRequest":{ + "type":"structure", + "required":[ + "TrackerName", + "Updates" + ], + "members":{ + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name of the tracker resource to update.

    ", + "location":"uri", + "locationName":"TrackerName" + }, + "Updates":{ + "shape":"BatchUpdateDevicePositionRequestUpdatesList", + "documentation":"

    Contains the position update details for each device.

    " + } + } + }, + "BatchUpdateDevicePositionRequestUpdatesList":{ + "type":"list", + "member":{"shape":"DevicePositionUpdate"}, + "max":10, + "min":1 + }, + "BatchUpdateDevicePositionResponse":{ + "type":"structure", + "required":["Errors"], + "members":{ + "Errors":{ + "shape":"BatchUpdateDevicePositionErrorList", + "documentation":"

    Contains error details for each device that failed to update its position.

    " + } + } + }, + "Blob":{"type":"blob"}, + "BoundingBox":{ + "type":"list", + "member":{"shape":"Double"}, + "max":6, + "min":4, + "sensitive":true + }, + "ConflictException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + }, + "documentation":"

    The request was unsuccessful due to a conflict.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CountryCode":{ + "type":"string", + "pattern":"^[A-Z]{3}$" + }, + "CountryCodeList":{ + "type":"list", + "member":{"shape":"CountryCode"}, + "max":100, + "min":1 + }, + "CreateGeofenceCollectionRequest":{ + "type":"structure", + "required":[ + "CollectionName", + "PricingPlan" + ], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    A custom name for the geofence collection.

    Requirements:

    • Contain only alphanumeric characters (A–Z, a–z, 0-9), hyphens (-), and underscores (_).

    • Must be a unique geofence collection name.

    • No spaces allowed. For example, ExampleGeofenceCollection.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    An optional description for the geofence collection.

    " + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

    Specifies the pricing plan for your geofence collection. There's three pricing plan options:

    • RequestBasedUsage — Selects the \"Request-Based Usage\" pricing plan.

    • MobileAssetTracking — Selects the \"Mobile Asset Tracking\" pricing plan.

    • MobileAssetManagement — Selects the \"Mobile Asset Management\" pricing plan.

    For additional details and restrictions on each pricing plan option, see the Amazon Location Service pricing page.

    " + } + } + }, + "CreateGeofenceCollectionResponse":{ + "type":"structure", + "required":[ + "CollectionArn", + "CollectionName", + "CreateTime" + ], + "members":{ + "CollectionArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the geofence collection resource. Used when you need to specify a resource across all AWS.

    " + }, + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The name for the geofence collection.

    " + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence collection was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + } + }, + "CreateMapRequest":{ + "type":"structure", + "required":[ + "Configuration", + "MapName", + "PricingPlan" + ], + "members":{ + "Configuration":{ + "shape":"MapConfiguration", + "documentation":"

    Specifies the map style selected from an available data provider.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    An optional description for the map resource.

    " + }, + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The name for the map resource.

    Requirements:

    • Must contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-), and underscores (_).

    • Must be a unique map resource name.

    • No spaces allowed. For example, ExampleMap.

    " + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

    Specifies the pricing plan for your map resource. There's three pricing plan options:

    • RequestBasedUsage — Selects the \"Request-Based Usage\" pricing plan.

    • MobileAssetTracking — Selects the \"Mobile Asset Tracking\" pricing plan.

    • MobileAssetManagement — Selects the \"Mobile Asset Management\" pricing plan.

    For additional details and restrictions on each pricing plan option, see the Amazon Location Service pricing page.

    " + } + } + }, + "CreateMapResponse":{ + "type":"structure", + "required":[ + "CreateTime", + "MapArn", + "MapName" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "MapArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the map resource. Used when you need to specify a resource across all AWS.

    • Format example: arn:partition:service:region:account-id:resource-type:resource-id

    " + }, + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The name of the map resource.

    " + } + } + }, + "CreatePlaceIndexRequest":{ + "type":"structure", + "required":[ + "DataSource", + "IndexName", + "PricingPlan" + ], + "members":{ + "DataSource":{ + "shape":"String", + "documentation":"

    Specifies the data provider of geospatial data.

    " + }, + "DataSourceConfiguration":{ + "shape":"DataSourceConfiguration", + "documentation":"

    Specifies the data storage option for requesting Places.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The optional description for the Place index resource.

    " + }, + "IndexName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Place index resource.

    Requirements:

    • Contain only alphanumeric characters (A-Z, a-z, 0-9) , hyphens (-) and underscores (_) ).

    • Must be a unique Place index resource name.

    • No spaces allowed. For example, ExamplePlaceIndex.

    " + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

    Specifies the pricing plan for your Place index resource. There's three pricing plan options:

    • RequestBasedUsage — Selects the \"Request-Based Usage\" pricing plan.

    • MobileAssetTracking — Selects the \"Mobile Asset Tracking\" pricing plan.

    • MobileAssetManagement — Selects the \"Mobile Asset Management\" pricing plan.

    For additional details and restrictions on each pricing plan option, see the Amazon Location Service pricing page.

    " + } + } + }, + "CreatePlaceIndexResponse":{ + "type":"structure", + "required":[ + "CreateTime", + "IndexArn", + "IndexName" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the Place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "IndexArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the Place index resource. Used when you need to specify a resource across all AWS.

    " + }, + "IndexName":{ + "shape":"ResourceName", + "documentation":"

    The name for the Place index resource.

    " + } + } + }, + "CreateTrackerRequest":{ + "type":"structure", + "required":[ + "PricingPlan", + "TrackerName" + ], + "members":{ + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    An optional description for the tracker resource.

    " + }, + "PricingPlan":{ + "shape":"PricingPlan", + "documentation":"

    Specifies the pricing plan for your tracker resource. There's three pricing plan options:

    • RequestBasedUsage — Selects the \"Request-Based Usage\" pricing plan.

    • MobileAssetTracking — Selects the \"Mobile Asset Tracking\" pricing plan.

    • MobileAssetManagement — Selects the \"Mobile Asset Management\" pricing plan.

    For additional details and restrictions on each pricing plan option, see the Amazon Location Service pricing page.

    " + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name for the tracker resource.

    Requirements:

    • Contain only alphanumeric characters (A-Z, a-z, 0-9) , hyphens (-) and underscores (_).

    • Must be a unique tracker resource name.

    • No spaces allowed. For example, ExampleTracker.

    " + } + } + }, + "CreateTrackerResponse":{ + "type":"structure", + "required":[ + "CreateTime", + "TrackerArn", + "TrackerName" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "TrackerArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the tracker resource. Used when you need to specify a resource across all AWS.

    " + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name of the tracker resource.

    " + } + } + }, + "DataSourceConfiguration":{ + "type":"structure", + "members":{ + "IntendedUse":{ + "shape":"IntendedUse", + "documentation":"

    Specifies how the results of an operation will be stored by the caller.

    Valid values include:

    • SingleUse specifies that the results won't be stored.

    • Storage specifies that the result can be cached or stored in a database.

    Default value: SingleUse

    " + } + }, + "documentation":"

    Specifies the data storage option chosen for requesting Places.

    By using Places, you agree that AWS may transmit your API queries to your selected third party provider for processing, which may be outside the AWS region you are currently using.

    Also, when using HERE as your data provider, you may not (a) use HERE Places for Asset Management, or (b) select the Storage option for the IntendedUse parameter when requesting Places in Japan. For more information, see the AWS Service Terms for Amazon Location Service.

    " + }, + "DeleteGeofenceCollectionRequest":{ + "type":"structure", + "required":["CollectionName"], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The name of the geofence collection to be deleted.

    ", + "location":"uri", + "locationName":"CollectionName" + } + } + }, + "DeleteGeofenceCollectionResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteMapRequest":{ + "type":"structure", + "required":["MapName"], + "members":{ + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The name of the map resource to be deleted.

    ", + "location":"uri", + "locationName":"MapName" + } + } + }, + "DeleteMapResponse":{ + "type":"structure", + "members":{ + } + }, + "DeletePlaceIndexRequest":{ + "type":"structure", + "required":["IndexName"], + "members":{ + "IndexName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Place index resource to be deleted.

    ", + "location":"uri", + "locationName":"IndexName" + } + } + }, + "DeletePlaceIndexResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTrackerRequest":{ + "type":"structure", + "required":["TrackerName"], + "members":{ + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name of the tracker resource to be deleted.

    ", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "DeleteTrackerResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeGeofenceCollectionRequest":{ + "type":"structure", + "required":["CollectionName"], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The name of the geofence collection.

    ", + "location":"uri", + "locationName":"CollectionName" + } + } + }, + "DescribeGeofenceCollectionResponse":{ + "type":"structure", + "required":[ + "CollectionArn", + "CollectionName", + "CreateTime", + "Description", + "UpdateTime" + ], + "members":{ + "CollectionArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the geofence collection resource. Used when you need to specify a resource across all AWS.

    " + }, + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The name of the geofence collection.

    " + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The optional description for the geofence collection.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence collection was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + } + }, + "DescribeMapRequest":{ + "type":"structure", + "required":["MapName"], + "members":{ + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The name of the map resource.

    ", + "location":"uri", + "locationName":"MapName" + } + } + }, + "DescribeMapResponse":{ + "type":"structure", + "required":[ + "Configuration", + "CreateTime", + "DataSource", + "Description", + "MapArn", + "MapName", + "UpdateTime" + ], + "members":{ + "Configuration":{ + "shape":"MapConfiguration", + "documentation":"

    Specifies the map tile style selected from a partner data provider.

    " + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "DataSource":{ + "shape":"String", + "documentation":"

    Specifies the data provider for the associated map tiles.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The optional description for the map resource.

    " + }, + "MapArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the map resource. Used when you need to specify a resource across all AWS.

    " + }, + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The map style selected from an available provider.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the map resource was last update in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + } + }, + "DescribePlaceIndexRequest":{ + "type":"structure", + "required":["IndexName"], + "members":{ + "IndexName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Place index resource.

    ", + "location":"uri", + "locationName":"IndexName" + } + } + }, + "DescribePlaceIndexResponse":{ + "type":"structure", + "required":[ + "CreateTime", + "DataSource", + "DataSourceConfiguration", + "Description", + "IndexArn", + "IndexName", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the Place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "DataSource":{ + "shape":"String", + "documentation":"

    The data provider of geospatial data.

    " + }, + "DataSourceConfiguration":{ + "shape":"DataSourceConfiguration", + "documentation":"

    The specified data storage option for requesting Places.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The optional description for the Place index resource.

    " + }, + "IndexArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the Place index resource. Used when you need to specify a resource across all AWS.

    " + }, + "IndexName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Place index resource being described.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the Place index resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + } + }, + "DescribeTrackerRequest":{ + "type":"structure", + "required":["TrackerName"], + "members":{ + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name of the tracker resource.

    ", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "DescribeTrackerResponse":{ + "type":"structure", + "required":[ + "CreateTime", + "Description", + "TrackerArn", + "TrackerName", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The optional description for the tracker resource.

    " + }, + "TrackerArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the tracker resource. Used when you need to specify a resource across all AWS.

    " + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name of the tracker resource.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the tracker resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + } + }, + "DevicePosition":{ + "type":"structure", + "required":[ + "Position", + "ReceivedTime", + "SampleTime" + ], + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

    The device whose position you retrieved.

    " + }, + "Position":{ + "shape":"Position", + "documentation":"

    The last known device position.

    " + }, + "ReceivedTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the tracker resource recieved the position in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the position was detected and sampled in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + }, + "documentation":"

    Contains the device position details.

    " + }, + "DevicePositionList":{ + "type":"list", + "member":{"shape":"DevicePosition"} + }, + "DevicePositionUpdate":{ + "type":"structure", + "required":[ + "DeviceId", + "Position", + "SampleTime" + ], + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

    The device associated to the position update.

    " + }, + "Position":{ + "shape":"Position", + "documentation":"

    The latest device position defined in WGS 84 format: [Xlongitude, Ylatitude].

    " + }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the position update was received in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + }, + "documentation":"

    Contains the position update details for a device.

    " + }, + "DisassociateTrackerConsumerRequest":{ + "type":"structure", + "required":[ + "ConsumerArn", + "TrackerName" + ], + "members":{ + "ConsumerArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the geofence collection to be disassociated from the tracker resource. Used when you need to specify a resource across all AWS.

    • Format example: arn:partition:service:region:account-id:resource-type:resource-id

    ", + "location":"uri", + "locationName":"ConsumerArn" + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name of the tracker resource to be dissociated from the consumer.

    ", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "DisassociateTrackerConsumerResponse":{ + "type":"structure", + "members":{ + } + }, + "Double":{ + "type":"double", + "box":true + }, + "GeofenceGeometry":{ + "type":"structure", + "members":{ + "Polygon":{ + "shape":"LinearRings", + "documentation":"

    An array of 1 or more linear rings. A linear ring is an array of 4 or more vertices, where the first and last vertex are the same to form a closed boundary. Each vertex is a 2-dimensional point of the form: [longitude, latitude].

    The first linear ring is an outer ring, describing the polygon's boundary. Subsequent linear rings may be inner or outer rings to describe holes and islands. Outer rings must list their vertices in counter-clockwise order around the ring's center, where the left side is the polygon's exterior. Inner rings must list their vertices in clockwise order, where the left side is the polygon's interior.

    " + } + }, + "documentation":"

    Contains the geofence geometry details.

    Limitation — Amazon Location does not currently support polygons with holes, multipolygons, polygons that are wound clockwise, or that cross the antimeridian.

    " + }, + "GetDevicePositionHistoryRequest":{ + "type":"structure", + "required":[ + "DeviceId", + "TrackerName" + ], + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

    The device whose position history you want to retrieve.

    ", + "location":"uri", + "locationName":"DeviceId" + }, + "EndTimeExclusive":{ + "shape":"Timestamp", + "documentation":"

    Specify the end time for the position history in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    • The given time for EndTimeExclusive must be after the time for StartTimeInclusive.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

    Default value: null

    " + }, + "StartTimeInclusive":{ + "shape":"Timestamp", + "documentation":"

    Specify the start time for the position history in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    • The given time for EndTimeExclusive must be after the time for StartTimeInclusive.

    " + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The tracker resource receiving the request for the device position history.

    ", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "GetDevicePositionHistoryResponse":{ + "type":"structure", + "required":["DevicePositions"], + "members":{ + "DevicePositions":{ + "shape":"DevicePositionList", + "documentation":"

    Contains the position history details for the requested device.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A pagination token indicating there are additional pages available. You can use the token in a following request to fetch the next set of results.

    " + } + } + }, + "GetDevicePositionRequest":{ + "type":"structure", + "required":[ + "DeviceId", + "TrackerName" + ], + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

    The device whose position you want to retreieve.

    ", + "location":"uri", + "locationName":"DeviceId" + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The tracker resource receiving the position update.

    ", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "GetDevicePositionResponse":{ + "type":"structure", + "required":[ + "Position", + "ReceivedTime", + "SampleTime" + ], + "members":{ + "DeviceId":{ + "shape":"Id", + "documentation":"

    The device whose position you retrieved.

    " + }, + "Position":{ + "shape":"Position", + "documentation":"

    The last known device position.

    " + }, + "ReceivedTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the tracker resource recieved the position in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "SampleTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the position was detected and sampled in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + } + }, + "GetGeofenceRequest":{ + "type":"structure", + "required":[ + "CollectionName", + "GeofenceId" + ], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The geofence collection storing the target geofence.

    ", + "location":"uri", + "locationName":"CollectionName" + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

    The geofence you're retrieving details for.

    ", + "location":"uri", + "locationName":"GeofenceId" + } + } + }, + "GetGeofenceResponse":{ + "type":"structure", + "required":[ + "CreateTime", + "GeofenceId", + "Geometry", + "Status", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence collection was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

    The geofence identifier.

    " + }, + "Geometry":{ + "shape":"GeofenceGeometry", + "documentation":"

    Contains the geofence geometry details describing a polygon.

    " + }, + "Status":{ + "shape":"String", + "documentation":"

    Identifies the state of the geofence. A geofence will hold one of the following states:

    • ACTIVE — The geofence has been indexed by the system.

    • PENDING — The geofence is being processed by the system.

    • FAILED — The geofence failed to be indexed by the system.

    • DELETED — The geofence has been deleted from the system index.

    • DELETING — The geofence is being deleted from the system index.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence collection was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + } + }, + "GetMapGlyphsRequest":{ + "type":"structure", + "required":[ + "FontStack", + "FontUnicodeRange", + "MapName" + ], + "members":{ + "FontStack":{ + "shape":"String", + "documentation":"

    A comma-separated list of fonts to load glyphs from in order of preference.. For example, Noto Sans, Arial Unicode.

    ", + "location":"uri", + "locationName":"FontStack" + }, + "FontUnicodeRange":{ + "shape":"GetMapGlyphsRequestFontUnicodeRangeString", + "documentation":"

    A Unicode range of characters to download glyphs for. Each response will contain 256 characters. For example, 0-255 includes all characters from range U+0000 to 00FF. Must be aligned to multiples of 256.

    ", + "location":"uri", + "locationName":"FontUnicodeRange" + }, + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The map resource associated with the glyph file.

    ", + "location":"uri", + "locationName":"MapName" + } + } + }, + "GetMapGlyphsRequestFontUnicodeRangeString":{ + "type":"string", + "pattern":"^[0-9]+-[0-9]+\\.pbf$" + }, + "GetMapGlyphsResponse":{ + "type":"structure", + "members":{ + "Blob":{ + "shape":"Blob", + "documentation":"

    The blob's content type.

    " + }, + "ContentType":{ + "shape":"String", + "documentation":"

    The map glyph content type. For example, application/octet-stream.

    ", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"Blob" + }, + "GetMapSpritesRequest":{ + "type":"structure", + "required":[ + "FileName", + "MapName" + ], + "members":{ + "FileName":{ + "shape":"GetMapSpritesRequestFileNameString", + "documentation":"

    The name of the sprite file. Use the following file names for the sprite sheet:

    • sprites.png

    • sprites@2x.png for high pixel density displays

    For the JSON document contain image offsets. Use the following file names:

    • sprites.json

    • sprites@2x.json for high pixel density displays

    ", + "location":"uri", + "locationName":"FileName" + }, + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The map resource associated with the sprite file.

    ", + "location":"uri", + "locationName":"MapName" + } + } + }, + "GetMapSpritesRequestFileNameString":{ + "type":"string", + "pattern":"^sprites(@2x)?\\.(png|json)$" + }, + "GetMapSpritesResponse":{ + "type":"structure", + "members":{ + "Blob":{ + "shape":"Blob", + "documentation":"

    Contains the body of the sprite sheet or JSON offset file.

    " + }, + "ContentType":{ + "shape":"String", + "documentation":"

    The content type of the sprite sheet and offsets. For example, the sprite sheet content type is image/png, and the sprite offset JSON document is application/json.

    ", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"Blob" + }, + "GetMapStyleDescriptorRequest":{ + "type":"structure", + "required":["MapName"], + "members":{ + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The map resource to retrieve the style descriptor from.

    ", + "location":"uri", + "locationName":"MapName" + } + } + }, + "GetMapStyleDescriptorResponse":{ + "type":"structure", + "members":{ + "Blob":{ + "shape":"Blob", + "documentation":"

    Contains the body of the style descriptor.

    " + }, + "ContentType":{ + "shape":"String", + "documentation":"

    The style descriptor's content type. For example, application/json.

    ", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"Blob" + }, + "GetMapTileRequest":{ + "type":"structure", + "required":[ + "MapName", + "X", + "Y", + "Z" + ], + "members":{ + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The map resource to retrieve the map tiles from.

    ", + "location":"uri", + "locationName":"MapName" + }, + "X":{ + "shape":"GetMapTileRequestXString", + "documentation":"

    The X axis value for the map tile.

    ", + "location":"uri", + "locationName":"X" + }, + "Y":{ + "shape":"GetMapTileRequestYString", + "documentation":"

    The Y axis value for the map tile.

    ", + "location":"uri", + "locationName":"Y" + }, + "Z":{ + "shape":"GetMapTileRequestZString", + "documentation":"

    The zoom value for the map tile.

    ", + "location":"uri", + "locationName":"Z" + } + } + }, + "GetMapTileRequestXString":{ + "type":"string", + "pattern":"\\d+" + }, + "GetMapTileRequestYString":{ + "type":"string", + "pattern":"\\d+" + }, + "GetMapTileRequestZString":{ + "type":"string", + "pattern":"\\d+" + }, + "GetMapTileResponse":{ + "type":"structure", + "members":{ + "Blob":{ + "shape":"Blob", + "documentation":"

    Contains Mapbox Vector Tile (MVT) data.

    " + }, + "ContentType":{ + "shape":"String", + "documentation":"

    The map tile's content type. For example, application/vnd.mapbox-vector-tile.

    ", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"Blob" + }, + "Id":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[-._\\p{L}\\p{N}]+$" + }, + "IntendedUse":{ + "type":"string", + "enum":[ + "SingleUse", + "Storage" + ] + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + }, + "documentation":"

    The request has failed to process because of an unknown server error, exception, or failure.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "LinearRing":{ + "type":"list", + "member":{"shape":"Position"}, + "min":4 + }, + "LinearRings":{ + "type":"list", + "member":{"shape":"LinearRing"}, + "min":1 + }, + "ListGeofenceCollectionsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListGeofenceCollectionsRequestMaxResultsInteger", + "documentation":"

    An optional limit for the number of resources returned in a single call.

    Default value: 100

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

    Default value: null

    " + } + } + }, + "ListGeofenceCollectionsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListGeofenceCollectionsResponse":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{ + "shape":"ListGeofenceCollectionsResponseEntryList", + "documentation":"

    Lists the geofence collections that exist in your AWS account.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A pagination token indicating there are additional pages available. You can use the token in a following request to fetch the next set of results.

    " + } + } + }, + "ListGeofenceCollectionsResponseEntry":{ + "type":"structure", + "required":[ + "CollectionName", + "CreateTime", + "Description", + "UpdateTime" + ], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The name of the geofence collection.

    " + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence collection was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The description for the geofence collection

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    Specifies a timestamp for when the resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + }, + "documentation":"

    Contains the geofence collection details.

    " + }, + "ListGeofenceCollectionsResponseEntryList":{ + "type":"list", + "member":{"shape":"ListGeofenceCollectionsResponseEntry"} + }, + "ListGeofenceResponseEntry":{ + "type":"structure", + "required":[ + "CreateTime", + "GeofenceId", + "Geometry", + "Status", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence was stored in a geofence collection in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

    The geofence identifier.

    " + }, + "Geometry":{ + "shape":"GeofenceGeometry", + "documentation":"

    Contains the geofence geometry details describing a polygon.

    " + }, + "Status":{ + "shape":"String", + "documentation":"

    Identifies the state of the geofence. A geofence will hold one of the following states:

    • ACTIVE — The geofence has been indexed by the system.

    • PENDING — The geofence is being processed by the system.

    • FAILED — The geofence failed to be indexed by the system.

    • DELETED — The geofence has been deleted from the system index.

    • DELETING — The geofence is being deleted from the system index.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + }, + "documentation":"

    Contains a list of geofences stored in a given geofence collection.

    " + }, + "ListGeofenceResponseEntryList":{ + "type":"list", + "member":{"shape":"ListGeofenceResponseEntry"} + }, + "ListGeofencesRequest":{ + "type":"structure", + "required":["CollectionName"], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The name of the geofence collection storing the list of geofences.

    ", + "location":"uri", + "locationName":"CollectionName" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

    Default value: null

    " + } + } + }, + "ListGeofencesResponse":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{ + "shape":"ListGeofenceResponseEntryList", + "documentation":"

    Contains a list of geofences stored in the geofence collection.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A pagination token indicating there are additional pages available. You can use the token in a following request to fetch the next set of results.

    " + } + } + }, + "ListMapsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListMapsRequestMaxResultsInteger", + "documentation":"

    An optional limit for the number of resources returned in a single call.

    Default value: 100

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

    Default value: null

    " + } + } + }, + "ListMapsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListMapsResponse":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{ + "shape":"ListMapsResponseEntryList", + "documentation":"

    Contains a list of maps in your AWS account

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A pagination token indicating there are additional pages available. You can use the token in a following request to fetch the next set of results.

    " + } + } + }, + "ListMapsResponseEntry":{ + "type":"structure", + "required":[ + "CreateTime", + "DataSource", + "Description", + "MapName", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the map resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "DataSource":{ + "shape":"String", + "documentation":"

    Specifies the data provider for the associated map tiles.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The description for the map resource.

    " + }, + "MapName":{ + "shape":"ResourceName", + "documentation":"

    The name of the associated map resource.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the map resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + }, + "documentation":"

    Contains details of an existing map resource in your AWS account.

    " + }, + "ListMapsResponseEntryList":{ + "type":"list", + "member":{"shape":"ListMapsResponseEntry"} + }, + "ListPlaceIndexesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListPlaceIndexesRequestMaxResultsInteger", + "documentation":"

    An optional limit for the maximum number of results returned in a single call.

    Default value: 100

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

    Default value: null

    " + } + } + }, + "ListPlaceIndexesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListPlaceIndexesResponse":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{ + "shape":"ListPlaceIndexesResponseEntryList", + "documentation":"

    Lists the Place index resources that exist in your AWS account

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A pagination token indicating there are additional pages available. You can use the token in a following request to fetch the next set of results.

    " + } + } + }, + "ListPlaceIndexesResponseEntry":{ + "type":"structure", + "required":[ + "CreateTime", + "DataSource", + "Description", + "IndexName", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the Place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "DataSource":{ + "shape":"String", + "documentation":"

    The data provider of geospatial data.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The optional description for the Place index resource.

    " + }, + "IndexName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Place index resource.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the Place index resource was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + }, + "documentation":"

    A Place index resource listed in your AWS account.

    " + }, + "ListPlaceIndexesResponseEntryList":{ + "type":"list", + "member":{"shape":"ListPlaceIndexesResponseEntry"} + }, + "ListTrackerConsumersRequest":{ + "type":"structure", + "required":["TrackerName"], + "members":{ + "MaxResults":{ + "shape":"ListTrackerConsumersRequestMaxResultsInteger", + "documentation":"

    An optional limit for the number of resources returned in a single call.

    Default value: 100

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

    Default value: null

    " + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The tracker resource whose associated geofence collections you want to list.

    ", + "location":"uri", + "locationName":"TrackerName" + } + } + }, + "ListTrackerConsumersRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListTrackerConsumersResponse":{ + "type":"structure", + "required":["ConsumerArns"], + "members":{ + "ConsumerArns":{ + "shape":"ArnList", + "documentation":"

    Contains the list of geofence collection ARNs associated to the tracker resource.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A pagination token indicating there are additional pages available. You can use the token in a following request to fetch the next set of results.

    " + } + } + }, + "ListTrackersRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListTrackersRequestMaxResultsInteger", + "documentation":"

    An optional limit for the number of resources returned in a single call.

    Default value: 100

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page.

    Default value: null

    " + } + } + }, + "ListTrackersRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListTrackersResponse":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{ + "shape":"ListTrackersResponseEntryList", + "documentation":"

    Contains tracker resources in your AWS account. Details include tracker name, description and timestamps for when the tracker was created and last updated.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A pagination token indicating there are additional pages available. You can use the token in a following request to fetch the next set of results.

    " + } + } + }, + "ListTrackersResponseEntry":{ + "type":"structure", + "required":[ + "CreateTime", + "Description", + "TrackerName", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the tracker resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

    The description for the tracker resource.

    " + }, + "TrackerName":{ + "shape":"ResourceName", + "documentation":"

    The name of the tracker resource.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the position was detected and sampled in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

    " + } + }, + "documentation":"

    Contains the tracker resource details.

    " + }, + "ListTrackersResponseEntryList":{ + "type":"list", + "member":{"shape":"ListTrackersResponseEntry"} + }, + "MapConfiguration":{ + "type":"structure", + "required":["Style"], + "members":{ + "Style":{ + "shape":"MapStyle", + "documentation":"

    Specifies the map style selected from an available data provider.

    Valid styles: VectorEsriLightGrayCanvas, VectorEsriLight, VectorEsriStreets, VectorEsriNavigation, VectorEsriDarkGrayCanvas, VectorEsriLightGrayCanvas, VectorHereBerlin

    When using HERE as your data provider, and selecting the Style VectorHereBerlin, you may not use HERE Maps for Asset Management. See the AWS Service Terms for Amazon Location Service.

    " + } + }, + "documentation":"

    Specifies the map tile style selected from an available provider.

    " + }, + "MapStyle":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[-._\\p{L}\\p{N}]+$" + }, + "Place":{ + "type":"structure", + "required":["Geometry"], + "members":{ + "AddressNumber":{ + "shape":"String", + "documentation":"

    The numerical portion of an address, such as a building number.

    " + }, + "Country":{ + "shape":"String", + "documentation":"

    A country/region specified using ISO 3166 3-digit country/region code. For example, CAN.

    " + }, + "Geometry":{"shape":"PlaceGeometry"}, + "Label":{ + "shape":"String", + "documentation":"

    The full name and address of the point of interest such as a city, region, or country. For example, 123 Any Street, Any Town, USA.

    " + }, + "Municipality":{ + "shape":"String", + "documentation":"

    A name for a local area, such as a city or town name. For example, Toronto.

    " + }, + "Neighborhood":{ + "shape":"String", + "documentation":"

    The name of a community district. For example, Downtown.

    " + }, + "PostalCode":{ + "shape":"String", + "documentation":"

    A group of numbers and letters in a country-specific format, which accompanies the address for the purpose of identifying a location.

    " + }, + "Region":{ + "shape":"String", + "documentation":"

    A name for an area or geographical division, such as a province or state name. For example, British Columbia.

    " + }, + "Street":{ + "shape":"String", + "documentation":"

    The name for a street or a road to identify a location. For example, Main Street.

    " + }, + "SubRegion":{ + "shape":"String", + "documentation":"

    A country, or an area that's part of a larger region . For example, Metro Vancouver.

    " + } + }, + "documentation":"

    Contains details about addresses or points of interest that match the search criteria.

    " + }, + "PlaceGeometry":{ + "type":"structure", + "members":{ + "Point":{ + "shape":"Position", + "documentation":"

    A single point geometry specifies a location for a Place using WGS 84 coordinates:

    • x — Specifies the x coordinate or longitude.

    • y — Specifies the y coordinate or latitude.

    " + } + }, + "documentation":"

    Places uses a point geometry to specify a location or a Place.

    " + }, + "PlaceIndexSearchResultLimit":{ + "type":"integer", + "max":50, + "min":1 + }, + "Position":{ + "type":"list", + "member":{"shape":"Double"}, + "max":2, + "min":2, + "sensitive":true + }, + "PricingPlan":{ + "type":"string", + "enum":[ + "RequestBasedUsage", + "MobileAssetTracking", + "MobileAssetManagement" + ] + }, + "PutGeofenceRequest":{ + "type":"structure", + "required":[ + "CollectionName", + "GeofenceId", + "Geometry" + ], + "members":{ + "CollectionName":{ + "shape":"ResourceName", + "documentation":"

    The geofence collection to store the geofence in.

    ", + "location":"uri", + "locationName":"CollectionName" + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

    An identifier for the geofence. For example, ExampleGeofence-1.

    ", + "location":"uri", + "locationName":"GeofenceId" + }, + "Geometry":{ + "shape":"GeofenceGeometry", + "documentation":"

    Contains the polygon details to specify the position of the geofence.

    " + } + } + }, + "PutGeofenceResponse":{ + "type":"structure", + "required":[ + "CreateTime", + "GeofenceId", + "UpdateTime" + ], + "members":{ + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + }, + "GeofenceId":{ + "shape":"Id", + "documentation":"

    The geofence identifier entered in the request.

    " + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp for when the geofence was last updated in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ

    " + } + } + }, + "ResourceDescription":{ + "type":"string", + "max":1000, + "min":0 + }, + "ResourceName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[-._\\w]+$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + }, + "documentation":"

    The resource that you've entered was not found in your AWS account.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SearchForPositionResult":{ + "type":"structure", + "required":["Place"], + "members":{ + "Place":{ + "shape":"Place", + "documentation":"

    Contains details about the relevant point of interest.

    " + } + }, + "documentation":"

    Specifies a single point of interest, or Place as a result of a search query obtained from a dataset configured in the Place index Resource.

    " + }, + "SearchForPositionResultList":{ + "type":"list", + "member":{"shape":"SearchForPositionResult"} + }, + "SearchForTextResult":{ + "type":"structure", + "required":["Place"], + "members":{ + "Place":{ + "shape":"Place", + "documentation":"

    Contains details about the relevant point of interest.

    " + } + }, + "documentation":"

    Contains relevant Places returned by calling SearchPlaceIndexForText.

    " + }, + "SearchForTextResultList":{ + "type":"list", + "member":{"shape":"SearchForTextResult"} + }, + "SearchPlaceIndexForPositionRequest":{ + "type":"structure", + "required":[ + "IndexName", + "Position" + ], + "members":{ + "IndexName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Place index resource you want to use for the search.

    ", + "location":"uri", + "locationName":"IndexName" + }, + "MaxResults":{ + "shape":"PlaceIndexSearchResultLimit", + "documentation":"

    An optional paramer. The maximum number of results returned per request.

    Default value: 50

    " + }, + "Position":{ + "shape":"Position", + "documentation":"

    Specifies a coordinate for the query defined by a longitude, and latitude.

    • The first position is the X coordinate, or longitude.

    • The second position is the Y coordinate, or latitude.

    For example, position=xLongitude&position=yLatitude .

    " + } + } + }, + "SearchPlaceIndexForPositionResponse":{ + "type":"structure", + "required":[ + "Results", + "Summary" + ], + "members":{ + "Results":{ + "shape":"SearchForPositionResultList", + "documentation":"

    Returns a list of Places closest to the specified position. Each result contains additional information about the Places returned.

    " + }, + "Summary":{ + "shape":"SearchPlaceIndexForPositionSummary", + "documentation":"

    Contains a summary of the request.

    " + } + } + }, + "SearchPlaceIndexForPositionSummary":{ + "type":"structure", + "required":[ + "DataSource", + "Position" + ], + "members":{ + "DataSource":{ + "shape":"String", + "documentation":"

    The data provider of geospatial data for the Place index resource.

    " + }, + "MaxResults":{ + "shape":"PlaceIndexSearchResultLimit", + "documentation":"

    An optional parameter. The maximum number of results returned per request.

    Default value: 50

    " + }, + "Position":{ + "shape":"Position", + "documentation":"

    The position given in the reverse geocoding request.

    " + } + }, + "documentation":"

    A summary of the reverse geocoding request sent using SearchPlaceIndexForPosition.

    " + }, + "SearchPlaceIndexForTextRequest":{ + "type":"structure", + "required":[ + "IndexName", + "Text" + ], + "members":{ + "BiasPosition":{ + "shape":"Position", + "documentation":"

    Searches for results closest to the given position. An optional parameter defined by longitude, and latitude.

    • The first bias position is the X coordinate, or longitude.

    • The second bias position is the Y coordinate, or latitude.

    For example, bias=xLongitude&bias=yLatitude.

    " + }, + "FilterBBox":{ + "shape":"BoundingBox", + "documentation":"

    Filters the results by returning only Places within the provided bounding box. An optional parameter.

    The first 2 bbox parameters describe the lower southwest corner:

    • The first bbox position is the X coordinate or longitude of the lower southwest corner.

    • The second bbox position is the Y coordinate or latitude of the lower southwest corner.

    For example, bbox=xLongitudeSW&bbox=yLatitudeSW.

    The next bbox parameters describe the upper northeast corner:

    • The third bbox position is the X coordinate, or longitude of the upper northeast corner.

    • The fourth bbox position is the Y coordinate, or longitude of the upper northeast corner.

    For example, bbox=xLongitudeNE&bbox=yLatitudeNE

    " + }, + "FilterCountries":{ + "shape":"CountryCodeList", + "documentation":"

    Limits the search to the given a list of countries/regions. An optional parameter.

    • Use the ISO 3166 3-digit country code. For example, Australia uses three upper-case characters: AUS.

    " + }, + "IndexName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Place index resource you want to use for the search.

    ", + "location":"uri", + "locationName":"IndexName" + }, + "MaxResults":{ + "shape":"PlaceIndexSearchResultLimit", + "documentation":"

    An optional parameter. The maximum number of results returned per request.

    The default: 50

    " + }, + "Text":{ + "shape":"SyntheticSearchPlaceIndexForTextRequestString", + "documentation":"

    The address, name, city, or region to be used in the search. In free-form text format. For example, 123 Any Street.

    " + } + } + }, + "SearchPlaceIndexForTextResponse":{ + "type":"structure", + "required":[ + "Results", + "Summary" + ], + "members":{ + "Results":{ + "shape":"SearchForTextResultList", + "documentation":"

    A list of Places closest to the specified position. Each result contains additional information about the specific point of interest.

    " + }, + "Summary":{ + "shape":"SearchPlaceIndexForTextSummary", + "documentation":"

    Contains a summary of the request. Contains the BiasPosition, DataSource, FilterBBox, FilterCountries, MaxResults, ResultBBox, and Text.

    " + } + } + }, + "SearchPlaceIndexForTextSummary":{ + "type":"structure", + "required":[ + "DataSource", + "Text" + ], + "members":{ + "BiasPosition":{ + "shape":"Position", + "documentation":"

    Contains the coordinates for the bias position entered in the geocoding request.

    " + }, + "DataSource":{ + "shape":"String", + "documentation":"

    The data provider of geospatial data for the Place index resource.

    " + }, + "FilterBBox":{ + "shape":"BoundingBox", + "documentation":"

    Contains the coordinates for the optional bounding box coordinated entered in the geocoding request.

    " + }, + "FilterCountries":{ + "shape":"CountryCodeList", + "documentation":"

    Contains the country filter entered in the geocoding request.

    " + }, + "MaxResults":{ + "shape":"PlaceIndexSearchResultLimit", + "documentation":"

    Contains the maximum number of results indicated for the request.

    " + }, + "ResultBBox":{ + "shape":"BoundingBox", + "documentation":"

    A bounding box that contains the search results within the specified area indicated by FilterBBox. A subset of bounding box specified using FilterBBox.

    " + }, + "Text":{ + "shape":"SyntheticSearchPlaceIndexForTextSummaryString", + "documentation":"

    The address, name, city or region to be used in the geocoding request. In free-form text format. For example, Vancouver.

    " + } + }, + "documentation":"

    A summary of the geocoding request sent using SearchPlaceIndexForText.

    " + }, + "String":{"type":"string"}, + "SyntheticSearchPlaceIndexForTextRequestString":{ + "type":"string", + "max":200, + "min":1, + "sensitive":true + }, + "SyntheticSearchPlaceIndexForTextSummaryString":{ + "type":"string", + "sensitive":true + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "Token":{ + "type":"string", + "max":2000, + "min":1 + }, + "ValidationException":{ + "type":"structure", + "required":[ + "FieldList", + "Message", + "Reason" + ], + "members":{ + "FieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The field where the invalid entry was detected.

    ", + "locationName":"fieldList" + }, + "Message":{ + "shape":"String", + "locationName":"message" + }, + "Reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    A message with the reason for the validation exception error.

    ", + "locationName":"reason" + } + }, + "documentation":"

    The input failed to meet the constraints specified by the AWS service.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "Message", + "Name" + ], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

    A message with the reason for the validation exception error.

    ", + "locationName":"message" + }, + "Name":{ + "shape":"String", + "documentation":"

    The field name where the invalid entry was detected.

    ", + "locationName":"name" + } + }, + "documentation":"

    The input failed to meet the constraints specified by the AWS service in a specified field.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "UnknownOperation", + "Missing", + "CannotParse", + "FieldValidationFailed", + "Other" + ] + } + }, + "documentation":"

    Suite of geospatial services including Maps, Places, Tracking, and Geofencing

    " +} diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml new file mode 100644 index 000000000000..ceede45292d8 --- /dev/null +++ b/services/lookoutvision/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + lookoutvision + AWS Java SDK :: Services :: Lookout Vision + The AWS Java SDK for Lookout Vision module holds the client classes that are used for + communicating with Lookout Vision. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.lookoutvision + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/lookoutvision/src/main/resources/codegen-resources/paginators-1.json b/services/lookoutvision/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..7f89a609b333 --- /dev/null +++ b/services/lookoutvision/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListDatasetEntries": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DatasetEntries" + }, + "ListModels": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Models" + }, + "ListProjects": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Projects" + } + } +} diff --git a/services/lookoutvision/src/main/resources/codegen-resources/service-2.json b/services/lookoutvision/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..eda65e756d73 --- /dev/null +++ b/services/lookoutvision/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1570 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-11-20", + "endpointPrefix":"lookoutvision", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Lookout for Vision", + "serviceId":"LookoutVision", + "signatureVersion":"v4", + "signingName":"lookoutvision", + "uid":"lookoutvision-2020-11-20" + }, + "operations":{ + "CreateDataset":{ + "name":"CreateDataset", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/datasets", + "responseCode":202 + }, + "input":{"shape":"CreateDatasetRequest"}, + "output":{"shape":"CreateDatasetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new dataset in an Amazon Lookout for Vision project. CreateDataset can create a training or a test dataset from a valid dataset source (DatasetSource).

    If you want a single dataset project, specify train for the value of DatasetType.

    To have a project with separate training and test datasets, call CreateDataset twice. On the first call, specify train for the value of DatasetType. On the second call, specify test for the value of DatasetType. of dataset with

    " + }, + "CreateModel":{ + "name":"CreateModel", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/models", + "responseCode":202 + }, + "input":{"shape":"CreateModelRequest"}, + "output":{"shape":"CreateModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a new version of a model within an an Amazon Lookout for Vision project. CreateModel is an asynchronous operation in which Amazon Lookout for Vision trains, tests, and evaluates a new version of a model.

    To get the current status, check the Status field returned in the response from DescribeModel.

    If the project has a single dataset, Amazon Lookout for Vision internally splits the dataset to create a training and a test dataset. If the project has a training and a test dataset, Lookout for Vision uses the respective datasets to train and test the model.

    After training completes, the evaluation metrics are stored at the location specified in OutputConfig.

    " + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects" + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates an empty Amazon Lookout for Vision project. After you create the project, add a dataset by calling CreateDataset.

    " + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"DELETE", + "requestUri":"/2020-11-20/projects/{projectName}/datasets/{datasetType}", + "responseCode":202 + }, + "input":{"shape":"DeleteDatasetRequest"}, + "output":{"shape":"DeleteDatasetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes an existing Amazon Lookout for Vision dataset.

    If your the project has a single dataset, you must create a new dataset before you can create a model.

    If you project has a training dataset and a test dataset consider the following.

    • If you delete the test dataset, your project reverts to a single dataset project. If you then train the model, Amazon Lookout for Vision internally splits the remaining dataset into a training and test dataset.

    • If you delete the training dataset, you must create a training dataset before you can create a model.

    It might take a while to delete the dataset. To check the current status, check the Status field in the response from a call to DescribeDataset.

    " + }, + "DeleteModel":{ + "name":"DeleteModel", + "http":{ + "method":"DELETE", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}", + "responseCode":202 + }, + "input":{"shape":"DeleteModelRequest"}, + "output":{"shape":"DeleteModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes an Amazon Lookout for Vision model. You can't delete a running model. To stop a running model, use the StopModel operation.

    " + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"DELETE", + "requestUri":"/2020-11-20/projects/{projectName}" + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes an Amazon Lookout for Vision project.

    To delete a project, you must first delete each version of the model associated with the project. To delete a model use the DeleteModel operation.

    The training and test datasets are deleted automatically for you. The images referenced by the training and test datasets aren't deleted.

    " + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}/datasets/{datasetType}" + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Describe an Amazon Lookout for Vision dataset.

    " + }, + "DescribeModel":{ + "name":"DescribeModel", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}" + }, + "input":{"shape":"DescribeModelRequest"}, + "output":{"shape":"DescribeModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Describes a version of an Amazon Lookout for Vision model.

    " + }, + "DescribeProject":{ + "name":"DescribeProject", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}" + }, + "input":{"shape":"DescribeProjectRequest"}, + "output":{"shape":"DescribeProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Describes an Amazon Lookout for Vision project.

    " + }, + "DetectAnomalies":{ + "name":"DetectAnomalies", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}/detect" + }, + "input":{"shape":"DetectAnomaliesRequest"}, + "output":{"shape":"DetectAnomaliesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Detects anomalies in an image that you supply.

    The response from DetectAnomalies includes a boolean prediction that the image contains one or more anomalies and a confidence value for the prediction.

    Before calling DetectAnomalies, you must first start your model with the StartModel operation. You are charged for the amount of time, in minutes, that a model runs and for the number of anomaly detection units that your model uses. If you are not using a model, use the StopModel operation to stop your model.

    " + }, + "ListDatasetEntries":{ + "name":"ListDatasetEntries", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}/datasets/{datasetType}/entries" + }, + "input":{"shape":"ListDatasetEntriesRequest"}, + "output":{"shape":"ListDatasetEntriesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists the JSON Lines within a dataset. An Amazon Lookout for Vision JSON Line contains the anomaly information for a single image, including the image location and the assigned label.

    " + }, + "ListModels":{ + "name":"ListModels", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}/models" + }, + "input":{"shape":"ListModelsRequest"}, + "output":{"shape":"ListModelsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists the versions of a model in an Amazon Lookout for Vision project.

    " + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects" + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Lists the Amazon Lookout for Vision projects in your AWS account.

    " + }, + "StartModel":{ + "name":"StartModel", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}/start", + "responseCode":202 + }, + "input":{"shape":"StartModelRequest"}, + "output":{"shape":"StartModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Starts the running of the version of an Amazon Lookout for Vision model. Starting a model takes a while to complete. To check the current state of the model, use DescribeModel.

    Once the model is running, you can detect custom labels in new images by calling DetectAnomalies.

    You are charged for the amount of time that the model is running. To stop a running model, call StopModel.

    " + }, + "StopModel":{ + "name":"StopModel", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}/stop", + "responseCode":202 + }, + "input":{"shape":"StopModelRequest"}, + "output":{"shape":"StopModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Stops a running model. The operation might take a while to complete. To check the current status, call DescribeModel.

    " + }, + "UpdateDatasetEntries":{ + "name":"UpdateDatasetEntries", + "http":{ + "method":"PATCH", + "requestUri":"/2020-11-20/projects/{projectName}/datasets/{datasetType}/entries", + "responseCode":202 + }, + "input":{"shape":"UpdateDatasetEntriesRequest"}, + "output":{"shape":"UpdateDatasetEntriesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Adds one or more JSON Line entries to a dataset. A JSON Line includes information about an image used for training or testing an Amazon Lookout for Vision model. The following is an example JSON Line.

    Updating a dataset might take a while to complete. To check the current status, call DescribeDataset and check the Status field in the response.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionString"} + }, + "documentation":"

    You are not authorized to perform the action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AnomalyClassFilter":{ + "type":"string", + "max":10, + "min":1, + "pattern":"(normal|anomaly)" + }, + "Boolean":{"type":"boolean"}, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"ExceptionString"}, + "ResourceId":{ + "shape":"ExceptionString", + "documentation":"

    The ID of the resource.

    " + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of the resource.

    " + } + }, + "documentation":"

    The update or deletion of a resource caused an inconsistent state.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ContentType":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "CreateDatasetRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project in which you want to create a dataset.

    ", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The type of the dataset. Specify train for a training dataset. Specify test for a test dataset.

    " + }, + "DatasetSource":{ + "shape":"DatasetSource", + "documentation":"

    The location of the manifest file that Amazon Lookout for Vision uses to create the dataset.

    If you don't specify DatasetSource, an empty dataset is created and the operation synchronously returns. Later, you can add JSON Lines by calling UpdateDatasetEntries.

    If you specify a value for DataSource, the manifest at the S3 location is validated and used to create the dataset. The call to CreateDataset is asynchronous and might take a while to complete. To find out the current status, Check the value of Status returned in a call to DescribeDataset.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to CreateDataset completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from CreateDataset. In this case, safely retry your call to CreateDataset by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to CreateDataset. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "CreateDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetMetadata":{ + "shape":"DatasetMetadata", + "documentation":"

    Information about the dataset.

    " + } + } + }, + "CreateModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "OutputConfig" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project in which you want to create a model version.

    ", + "location":"uri", + "locationName":"projectName" + }, + "Description":{ + "shape":"ModelDescription", + "documentation":"

    A description for the version of the model.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to CreateModel completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from CreateModel. In this case, safely retry your call to CreateModel by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to CreateModel. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + }, + "OutputConfig":{ + "shape":"OutputConfig", + "documentation":"

    The location where Amazon Lookout for Vision saves the training results.

    " + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

    The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for encypting the model. If this parameter is not specified, the model is encrypted by a key that AWS owns and manages.

    " + } + } + }, + "CreateModelResponse":{ + "type":"structure", + "members":{ + "ModelMetadata":{ + "shape":"ModelMetadata", + "documentation":"

    The response from a call to CreateModel.

    " + } + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    S nsme for the project.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to CreateProject completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from CreateProject. In this case, safely retry your call to CreateProject by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to CreateProject. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "CreateProjectResponse":{ + "type":"structure", + "members":{ + "ProjectMetadata":{ + "shape":"ProjectMetadata", + "documentation":"

    Information about the project.

    " + } + } + }, + "DatasetChanges":{ + "type":"blob", + "max":10485760, + "min":1 + }, + "DatasetDescription":{ + "type":"structure", + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the dataset.

    " + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The type of the dataset. The value train represents a training dataset or single dataset project. The value test represents a test dataset.

    " + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

    The Unix timestamp for the time and date that the dataset was created.

    " + }, + "LastUpdatedTimestamp":{ + "shape":"DateTime", + "documentation":"

    The Unix timestamp for the date and time that the dataset was last updated.

    " + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"

    The status of the dataset.

    " + }, + "StatusMessage":{ + "shape":"DatasetStatusMessage", + "documentation":"

    The status message for the dataset.

    " + }, + "ImageStats":{ + "shape":"DatasetImageStats", + "documentation":"

    " + } + }, + "documentation":"

    The description for a dataset. For more information, see DescribeDataset.

    " + }, + "DatasetEntry":{ + "type":"string", + "max":8192, + "min":2, + "pattern":"^\\{.*\\}$" + }, + "DatasetEntryList":{ + "type":"list", + "member":{"shape":"DatasetEntry"} + }, + "DatasetGroundTruthManifest":{ + "type":"structure", + "members":{ + "S3Object":{ + "shape":"InputS3Object", + "documentation":"

    The S3 bucket location for the manifest file.

    " + } + }, + "documentation":"

    Location information about a manifest file. You can use a manifest file to create a dataset.

    " + }, + "DatasetImageStats":{ + "type":"structure", + "members":{ + "Total":{ + "shape":"Integer", + "documentation":"

    The total number of images in the dataset.

    " + }, + "Labeled":{ + "shape":"Integer", + "documentation":"

    The total number of labeled images.

    " + }, + "Normal":{ + "shape":"Integer", + "documentation":"

    The total number of images labeled as normal.

    " + }, + "Anomaly":{ + "shape":"Integer", + "documentation":"

    the total number of images labeled as an anomaly.

    " + } + }, + "documentation":"

    Statistics about the images in a dataset.

    " + }, + "DatasetMetadata":{ + "type":"structure", + "members":{ + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The type of the dataset.

    " + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

    The Unix timestamp for the date and time that the dataset was created.

    " + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"

    The status for the dataset.

    " + }, + "StatusMessage":{ + "shape":"DatasetStatusMessage", + "documentation":"

    The status message for the dataset.

    " + } + }, + "documentation":"

    Sumary information for an Amazon Lookout for Vision dataset.

    " + }, + "DatasetMetadataList":{ + "type":"list", + "member":{"shape":"DatasetMetadata"} + }, + "DatasetSource":{ + "type":"structure", + "members":{ + "GroundTruthManifest":{ + "shape":"DatasetGroundTruthManifest", + "documentation":"

    Location information for the manifest file.

    " + } + }, + "documentation":"

    Information about the location of a manifest file that Amazon Lookout for Vision uses to to create a dataset.

    " + }, + "DatasetStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "CREATE_FAILED", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_FAILED_ROLLBACK_IN_PROGRESS", + "UPDATE_FAILED_ROLLBACK_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_COMPLETE", + "DELETE_FAILED" + ] + }, + "DatasetStatusMessage":{"type":"string"}, + "DatasetType":{ + "type":"string", + "max":10, + "min":1, + "pattern":"train|test" + }, + "DateTime":{"type":"timestamp"}, + "DeleteDatasetRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the dataset that you want to delete.

    ", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The type of the dataset to delete. Specify train to delete the training dataset. Specify test to delete the test dataset. To delete the dataset in a single dataset project, specify train.

    ", + "location":"uri", + "locationName":"datasetType" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to DeleteDataset completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from DeleteDataset. In this case, safely retry your call to DeleteDataset by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to DeleteDataset. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "DeleteDatasetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the model that you want to delete.

    ", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

    The version of the model that you want to delete.

    ", + "location":"uri", + "locationName":"modelVersion" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to DeleteModel completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from DeleteModel. In this case, safely retry your call to DeleteModel by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to DeleteModel. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "DeleteModelResponse":{ + "type":"structure", + "members":{ + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the model that was deleted.

    " + } + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project to delete.

    ", + "location":"uri", + "locationName":"projectName" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to DeleteProject completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from DeleteProject. In this case, safely retry your call to DeleteProject by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to DeleteProject. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "DeleteProjectResponse":{ + "type":"structure", + "members":{ + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

    The Amazon Resource Name (ARN) of the project that was deleted.

    " + } + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the dataset that you want to describe.

    ", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The type of the dataset to describe. Specify train to describe the training dataset. Specify test to describe the test dataset. If you have a single dataset project, specify train

    ", + "location":"uri", + "locationName":"datasetType" + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetDescription":{ + "shape":"DatasetDescription", + "documentation":"

    The description of the requested dataset.

    " + } + } + }, + "DescribeModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The project that contains the version of a model that you want to describe.

    ", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

    The version of the model that you want to describe.

    ", + "location":"uri", + "locationName":"modelVersion" + } + } + }, + "DescribeModelResponse":{ + "type":"structure", + "members":{ + "ModelDescription":{ + "shape":"ModelDescription", + "documentation":"

    Contains the description of the model.

    " + } + } + }, + "DescribeProjectRequest":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that you want to describe.

    ", + "location":"uri", + "locationName":"projectName" + } + } + }, + "DescribeProjectResponse":{ + "type":"structure", + "members":{ + "ProjectDescription":{ + "shape":"ProjectDescription", + "documentation":"

    The description of the project.

    " + } + } + }, + "DetectAnomaliesRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion", + "Body", + "ContentType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the model version that you want to use.

    ", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

    The version of the model that you want to use.

    ", + "location":"uri", + "locationName":"modelVersion" + }, + "Body":{ + "shape":"Stream", + "documentation":"

    The unencrypted image bytes that you want to analyze.

    " + }, + "ContentType":{ + "shape":"ContentType", + "documentation":"

    The type of the image passed in Body. Valid values are image/png (PNG format images) and image/jpeg (JPG format images).

    ", + "location":"header", + "locationName":"content-type" + } + }, + "payload":"Body" + }, + "DetectAnomaliesResponse":{ + "type":"structure", + "members":{ + "DetectAnomalyResult":{ + "shape":"DetectAnomalyResult", + "documentation":"

    The results of the DetectAnomalies operation.

    " + } + } + }, + "DetectAnomalyResult":{ + "type":"structure", + "members":{ + "Source":{ + "shape":"ImageSource", + "documentation":"

    The source of the image that was analyzed. direct means that the images was supplied from the local computer. No other values are supported.

    " + }, + "IsAnomalous":{ + "shape":"Boolean", + "documentation":"

    True if the image contains an anomaly, otherwise false.

    " + }, + "Confidence":{ + "shape":"Float", + "documentation":"

    The confidence that Amazon Lookout for Vision has in the accuracy of the prediction.

    " + } + }, + "documentation":"

    The prediction results from a call to DetectAnomalies.

    " + }, + "ExceptionString":{"type":"string"}, + "Float":{"type":"float"}, + "ImageSource":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"ImageSourceType", + "documentation":"

    The type of the image.

    " + } + }, + "documentation":"

    The source for an image.

    " + }, + "ImageSourceType":{ + "type":"string", + "pattern":"direct" + }, + "InferenceUnits":{ + "type":"integer", + "min":1 + }, + "InputS3Object":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"S3BucketName", + "documentation":"

    The Amazon S3 bucket that contains the manifest.

    " + }, + "Key":{ + "shape":"S3ObjectKey", + "documentation":"

    The name and location of the manifest file withiin the bucket.

    " + }, + "VersionId":{ + "shape":"S3ObjectVersion", + "documentation":"

    The version ID of the bucket.

    " + } + }, + "documentation":"

    Amazon S3 Location information for an input manifest file.

    " + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionString"}, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    The period of time, in seconds, before the operation can be retried.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    Amazon Lookout for Vision experienced a service issue. Try your call again.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "IsLabeled":{"type":"boolean"}, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$" + }, + "ListDatasetEntriesRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the dataset that you want to list.

    ", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The type of the dataset that you want to list. Specify train to list the training dataset. Specify test to list the test dataset. If you have a single dataset project, specify train.

    ", + "location":"uri", + "locationName":"datasetType" + }, + "Labeled":{ + "shape":"IsLabeled", + "documentation":"

    Specify true to include labeled entries, otherwise specify false. If you don't specify a value, Lookout for Vision returns all entries.

    ", + "location":"querystring", + "locationName":"labeled" + }, + "AnomalyClass":{ + "shape":"AnomalyClassFilter", + "documentation":"

    Specify normal to include only normal images. Specify anomaly to only include anomalous entries. If you don't specify a value, Amazon Lookout for Vision returns normal and anomalous images.

    ", + "location":"querystring", + "locationName":"anomalyClass" + }, + "BeforeCreationDate":{ + "shape":"DateTime", + "documentation":"

    Only includes entries before the specified date in the response. For example, 2020-06-23T00:00:00.

    ", + "location":"querystring", + "locationName":"createdBefore" + }, + "AfterCreationDate":{ + "shape":"DateTime", + "documentation":"

    Only includes entries after the specified date in the response. For example, 2020-06-23T00:00:00.

    ", + "location":"querystring", + "locationName":"createdAfter" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If the previous response was incomplete (because there is more data to retrieve), Amazon Lookout for Vision returns a pagination token in the response. You can use this pagination token to retrieve the next set of dataset entries.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "SourceRefContains":{ + "shape":"QueryString", + "documentation":"

    Perform a \"contains\" search on the values of the source-ref key within the dataset. For example a value of \"IMG_17\" returns all JSON Lines where the source-ref key value matches *IMG_17*.

    ", + "location":"querystring", + "locationName":"sourceRefContains" + } + } + }, + "ListDatasetEntriesResponse":{ + "type":"structure", + "members":{ + "DatasetEntries":{ + "shape":"DatasetEntryList", + "documentation":"

    A list of the entries (JSON Lines) within the dataset.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If the response is truncated, Amazon Lookout for Vision returns this token that you can use in the subsequent request to retrieve the next set ofdataset entries.

    " + } + } + }, + "ListModelsRequest":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the model versions that you want to list.

    ", + "location":"uri", + "locationName":"projectName" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If the previous response was incomplete (because there is more data to retrieve), Amazon Lookout for Vision returns a pagination token in the response. You can use this pagination token to retrieve the next set of models.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListModelsResponse":{ + "type":"structure", + "members":{ + "Models":{ + "shape":"ModelMetadataList", + "documentation":"

    A list of model versions in the specified project.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If the response is truncated, Amazon Lookout for Vision returns this token that you can use in the subsequent request to retrieve the next set of models.

    " + } + } + }, + "ListProjectsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If the previous response was incomplete (because there is more data to retrieve), Amazon Lookout for Vision returns a pagination token in the response. You can use this pagination token to retrieve the next set of projects.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectsResponse":{ + "type":"structure", + "members":{ + "Projects":{ + "shape":"ProjectMetadataList", + "documentation":"

    A list of projects in your AWS account.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    If the response is truncated, Amazon Lookout for Vision returns this token that you can use in the subsequent request to retrieve the next set of projects.

    " + } + } + }, + "ModelArn":{"type":"string"}, + "ModelDescription":{ + "type":"structure", + "members":{ + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

    The version of the model

    " + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the model.

    " + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

    The unix timestamp for the date and time that the model was created.

    " + }, + "Description":{ + "shape":"ModelDescriptionMessage", + "documentation":"

    The description for the model.

    " + }, + "Status":{ + "shape":"ModelStatus", + "documentation":"

    The status of the model.

    " + }, + "StatusMessage":{ + "shape":"ModelStatusMessage", + "documentation":"

    The status message for the model.

    " + }, + "Performance":{ + "shape":"ModelPerformance", + "documentation":"

    Performance metrics for the model. Created during training.

    " + }, + "OutputConfig":{ + "shape":"OutputConfig", + "documentation":"

    The S3 location where Amazon Lookout for Vision saves model training files.

    " + }, + "EvaluationManifest":{ + "shape":"OutputS3Object", + "documentation":"

    The S3 location where Amazon Lookout for Vision saves the manifest file that was used to test the trained model and generate the performance scores.

    " + }, + "EvaluationResult":{ + "shape":"OutputS3Object", + "documentation":"

    The S3 location where Amazon Lookout for Vision saves the performance metrics.

    " + }, + "EvaluationEndTimestamp":{ + "shape":"DateTime", + "documentation":"

    The unix timestamp for the date and time that the evaluation ended.

    " + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

    The identifer for the AWS Key Management Service (AWS KMS) key that was used to encrypt the model during training.

    " + } + }, + "documentation":"

    Describes an Amazon Lookout for Vision model.

    " + }, + "ModelDescriptionMessage":{ + "type":"string", + "max":500, + "min":1, + "pattern":"[0-9A-Za-z\\.\\-_]*" + }, + "ModelHostingStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "STARTING", + "STOPPED", + "FAILED" + ] + }, + "ModelMetadata":{ + "type":"structure", + "members":{ + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

    The unix timestamp for the date and time that the model was created.

    " + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

    The version of the model.

    " + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the model.

    " + }, + "Description":{ + "shape":"ModelDescriptionMessage", + "documentation":"

    The description for the model.

    " + }, + "Status":{ + "shape":"ModelStatus", + "documentation":"

    The status of the model.

    " + }, + "StatusMessage":{ + "shape":"ModelStatusMessage", + "documentation":"

    The status message for the model.

    " + }, + "Performance":{ + "shape":"ModelPerformance", + "documentation":"

    Performance metrics for the model. Created during training.

    " + } + }, + "documentation":"

    Describes an Amazon Lookout for Vision model.

    " + }, + "ModelMetadataList":{ + "type":"list", + "member":{"shape":"ModelMetadata"} + }, + "ModelPerformance":{ + "type":"structure", + "members":{ + "F1Score":{ + "shape":"Float", + "documentation":"

    The overall F1 score metric for the trained model.

    " + }, + "Recall":{ + "shape":"Float", + "documentation":"

    The overall recall metric value for the trained model.

    " + }, + "Precision":{ + "shape":"Float", + "documentation":"

    The overall precision metric value for the trained model.

    " + } + }, + "documentation":"

    Information about the evaluation performance of a trained model.

    " + }, + "ModelStatus":{ + "type":"string", + "enum":[ + "TRAINING", + "TRAINED", + "TRAINING_FAILED", + "STARTING_HOSTING", + "HOSTED", + "HOSTING_FAILED", + "STOPPING_HOSTING", + "SYSTEM_UPDATING", + "DELETING" + ] + }, + "ModelStatusMessage":{"type":"string"}, + "ModelVersion":{ + "type":"string", + "max":10, + "min":1, + "pattern":"([1-9][0-9]*|latest)" + }, + "OutputConfig":{ + "type":"structure", + "required":["S3Location"], + "members":{ + "S3Location":{ + "shape":"S3Location", + "documentation":"

    The S3 location for the output.

    " + } + }, + "documentation":"

    The S3 location where Amazon Lookout for Vision saves model training files.

    " + }, + "OutputS3Object":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"S3BucketName", + "documentation":"

    The bucket that contains the training output.

    " + }, + "Key":{ + "shape":"S3ObjectKey", + "documentation":"

    The location of the training output in the bucket.

    " + } + }, + "documentation":"

    The S3 location where Amazon Lookout for Vision saves training output.

    " + }, + "PageSize":{ + "type":"integer", + "max":100, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":2048, + "pattern":"^[a-zA-Z0-9\\/\\+\\=]{0,2048}$" + }, + "ProjectArn":{"type":"string"}, + "ProjectDescription":{ + "type":"structure", + "members":{ + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

    The Amazon Resource Name (ARN) of the project.

    " + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project.

    " + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

    The unix timestamp for the date and time that the project was created.

    " + }, + "Datasets":{ + "shape":"DatasetMetadataList", + "documentation":"

    A list of datasets in the project.

    " + } + }, + "documentation":"

    Describe an Amazon Lookout for Vision project. For more information, see DescribeProject.

    " + }, + "ProjectMetadata":{ + "type":"structure", + "members":{ + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

    The Amazon Resource Name (ARN) of the project.

    " + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project.

    " + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

    The unix timestamp for the date and time that the project was created.

    " + } + }, + "documentation":"

    Metadata about an Amazon Lookout for Vision project.

    " + }, + "ProjectMetadataList":{ + "type":"list", + "member":{"shape":"ProjectMetadata"} + }, + "ProjectName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_\\-]*" + }, + "QueryString":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*\\S.*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"ExceptionString"}, + "ResourceId":{ + "shape":"ExceptionString", + "documentation":"

    The ID of the resource.

    " + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of the resource.

    " + } + }, + "documentation":"

    The resource could not be found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "PROJECT", + "DATASET", + "MODEL", + "TRIAL" + ] + }, + "RetryAfterSeconds":{"type":"integer"}, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"[0-9A-Za-z\\.\\-_]*" + }, + "S3KeyPrefix":{ + "type":"string", + "max":1024, + "pattern":"^([a-zA-Z0-9!_.*'()-][/a-zA-Z0-9!_.*'()-]*)?$" + }, + "S3Location":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"S3BucketName", + "documentation":"

    The S3 bucket that contain the manifest file.

    " + }, + "Prefix":{ + "shape":"S3KeyPrefix", + "documentation":"

    The path and name of the manifest file with the S3 bucket.

    " + } + }, + "documentation":"

    Information about the location of a manifest file.

    " + }, + "S3ObjectKey":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^([a-zA-Z0-9!_.*'()-][/a-zA-Z0-9!_.*'()-]*)?$" + }, + "S3ObjectVersion":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "Message", + "QuotaCode", + "ServiceCode" + ], + "members":{ + "Message":{"shape":"ExceptionString"}, + "ResourceId":{ + "shape":"ExceptionString", + "documentation":"

    The ID of the resource.

    " + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of the resource.

    " + }, + "QuotaCode":{ + "shape":"ExceptionString", + "documentation":"

    The quota code.

    " + }, + "ServiceCode":{ + "shape":"ExceptionString", + "documentation":"

    The service code.

    " + } + }, + "documentation":"

    A service quota was exceeded the allowed limit. For more information, see Limits in Amazon Lookout for Vision in the Amazon Lookout for Vision Developer Guide.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "StartModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion", + "MinInferenceUnits" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the model that you want to start.

    ", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

    The version of the model that you want to start.

    ", + "location":"uri", + "locationName":"modelVersion" + }, + "MinInferenceUnits":{ + "shape":"InferenceUnits", + "documentation":"

    The minimum number of inference units to use. A single inference unit represents 1 hour of processing and can support up to 5 Transaction Pers Second (TPS). Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to StartModel completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from StartModel. In this case, safely retry your call to StartModel by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to StartModel. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "StartModelResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"ModelHostingStatus", + "documentation":"

    The current running status of the model.

    " + } + } + }, + "StopModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the model that you want to stop.

    ", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

    The version of the model that you want to stop.

    ", + "location":"uri", + "locationName":"modelVersion" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to StopModel completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from StopModel. In this case, safely retry your call to StopModel by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to StopModel. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "StopModelResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"ModelHostingStatus", + "documentation":"

    The status of the model.

    " + } + } + }, + "Stream":{ + "type":"blob", + "requiresLength":true, + "streaming":true + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionString"}, + "QuotaCode":{ + "shape":"ExceptionString", + "documentation":"

    The quota code.

    " + }, + "ServiceCode":{ + "shape":"ExceptionString", + "documentation":"

    The service code.

    " + }, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    The period of time, in seconds, before the operation can be retried.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    Amazon Lookout for Vision is temporarily unable to process the request. Try your call again.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UpdateDatasetEntriesRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType", + "Changes" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

    The name of the project that contains the dataset that you want to update.

    ", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

    The type of the dataset that you want to update. Specify train to update the training dataset. Specify test to update the test dataset. If you have a single dataset project, specify train.

    ", + "location":"uri", + "locationName":"datasetType" + }, + "Changes":{ + "shape":"DatasetChanges", + "documentation":"

    The entries to add to the dataset.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    ClientToken is an idempotency token that ensures a call to UpdateDatasetEntries completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from UpdateDatasetEntries. In this case, safely retry your call to UpdateDatasetEntries by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to UpdateDatasetEntries. An idempotency token is active for 8 hours.

    ", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "UpdateDatasetEntriesResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"DatasetStatus", + "documentation":"

    The status of the dataset update.

    " + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionString"} + }, + "documentation":"

    An input validation error occured. For example, invalid characters in a project name, or if a pagination token is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

    This is the Amazon Lookout for Vision API Reference. It provides descriptions of actions, data types, common parameters, and common errors.

    Amazon Lookout for Vision enables you to find visual defects in industrial products, accurately and at scale. It uses computer vision to identify missing components in an industrial product, damage to vehicles or structures, irregularities in production lines, and even minuscule defects in silicon wafers — or any other physical item where quality is important such as a missing capacitor on printed circuit boards.

    " +} diff --git a/services/machinelearning/build.properties b/services/machinelearning/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/machinelearning/build.properties +++ b/services/machinelearning/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index 72ea653eb6cb..6f1946e2fbf0 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + macie2 + AWS Java SDK :: Services :: Macie2 + The AWS Java SDK for Macie2 module holds the client classes that are used for + communicating with Macie2. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.macie2 + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/macie2/src/main/resources/codegen-resources/paginators-1.json b/services/macie2/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..0ab359873da1 --- /dev/null +++ b/services/macie2/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,58 @@ +{ + "pagination": { + "DescribeBuckets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "buckets" + }, + "GetUsageStatistics": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "records" + }, + "ListClassificationJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListCustomDataIdentifiers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListFindings": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findingIds" + }, + "ListFindingsFilters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findingsFilterListItems" + }, + "ListInvitations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "invitations" + }, + "ListMembers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "members" + }, + "ListOrganizationAdminAccounts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "adminAccounts" + } + } +} diff --git a/services/macie2/src/main/resources/codegen-resources/service-2.json b/services/macie2/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..0d2b1dfc7109 --- /dev/null +++ b/services/macie2/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,6911 @@ +{ + "metadata": { + "apiVersion": "2020-01-01", + "endpointPrefix": "macie2", + "signingName": "macie2", + "serviceFullName": "Amazon Macie 2", + "serviceId": "Macie2", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "macie2-2020-01-01", + "signatureVersion": "v4" + }, + "operations": { + "AcceptInvitation": { + "name": "AcceptInvitation", + "http": { + "method": "POST", + "requestUri": "/invitations/accept", + "responseCode": 200 + }, + "input": { + "shape": "AcceptInvitationRequest" + }, + "output": { + "shape": "AcceptInvitationResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Accepts an Amazon Macie membership invitation that was received from a specific account.

    " + }, + "BatchGetCustomDataIdentifiers": { + "name": "BatchGetCustomDataIdentifiers", + "http": { + "method": "POST", + "requestUri": "/custom-data-identifiers/get", + "responseCode": 200 + }, + "input": { + "shape": "BatchGetCustomDataIdentifiersRequest" + }, + "output": { + "shape": "BatchGetCustomDataIdentifiersResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves information about one or more custom data identifiers.

    " + }, + "CreateClassificationJob": { + "name": "CreateClassificationJob", + "http": { + "method": "POST", + "requestUri": "/jobs", + "responseCode": 200 + }, + "input": { + "shape": "CreateClassificationJobRequest" + }, + "output": { + "shape": "CreateClassificationJobResponse", + "documentation": "

    The request succeeded. The specified job was created.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Creates and defines the settings for a classification job.

    " + }, + "CreateCustomDataIdentifier": { + "name": "CreateCustomDataIdentifier", + "http": { + "method": "POST", + "requestUri": "/custom-data-identifiers", + "responseCode": 200 + }, + "input": { + "shape": "CreateCustomDataIdentifierRequest" + }, + "output": { + "shape": "CreateCustomDataIdentifierResponse", + "documentation": "

    The request succeeded. The specified custom data identifier was created.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Creates and defines the criteria and other settings for a custom data identifier.

    " + }, + "CreateFindingsFilter": { + "name": "CreateFindingsFilter", + "http": { + "method": "POST", + "requestUri": "/findingsfilters", + "responseCode": 200 + }, + "input": { + "shape": "CreateFindingsFilterRequest" + }, + "output": { + "shape": "CreateFindingsFilterResponse" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Creates and defines the criteria and other settings for a findings filter.

    " + }, + "CreateInvitations": { + "name": "CreateInvitations", + "http": { + "method": "POST", + "requestUri": "/invitations", + "responseCode": 200 + }, + "input": { + "shape": "CreateInvitationsRequest" + }, + "output": { + "shape": "CreateInvitationsResponse", + "documentation": "

    The request succeeded. Processing might not be complete.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Sends an Amazon Macie membership invitation to one or more accounts.

    " + }, + "CreateMember": { + "name": "CreateMember", + "http": { + "method": "POST", + "requestUri": "/members", + "responseCode": 200 + }, + "input": { + "shape": "CreateMemberRequest" + }, + "output": { + "shape": "CreateMemberResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Associates an account with an Amazon Macie master account.

    " + }, + "CreateSampleFindings": { + "name": "CreateSampleFindings", + "http": { + "method": "POST", + "requestUri": "/findings/sample", + "responseCode": 200 + }, + "input": { + "shape": "CreateSampleFindingsRequest" + }, + "output": { + "shape": "CreateSampleFindingsResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Creates sample findings.

    " + }, + "DeclineInvitations": { + "name": "DeclineInvitations", + "http": { + "method": "POST", + "requestUri": "/invitations/decline", + "responseCode": 200 + }, + "input": { + "shape": "DeclineInvitationsRequest" + }, + "output": { + "shape": "DeclineInvitationsResponse", + "documentation": "

    The request succeeded. Processing might not be complete.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Declines Amazon Macie membership invitations that were received from specific accounts.

    " + }, + "DeleteCustomDataIdentifier": { + "name": "DeleteCustomDataIdentifier", + "http": { + "method": "DELETE", + "requestUri": "/custom-data-identifiers/{id}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteCustomDataIdentifierRequest" + }, + "output": { + "shape": "DeleteCustomDataIdentifierResponse", + "documentation": "

    The request succeeded. The specified custom data identifier was deleted and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Soft deletes a custom data identifier.

    " + }, + "DeleteFindingsFilter": { + "name": "DeleteFindingsFilter", + "http": { + "method": "DELETE", + "requestUri": "/findingsfilters/{id}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteFindingsFilterRequest" + }, + "output": { + "shape": "DeleteFindingsFilterResponse", + "documentation": "

    The request succeeded. The specified findings filter was deleted and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Deletes a findings filter.

    " + }, + "DeleteInvitations": { + "name": "DeleteInvitations", + "http": { + "method": "POST", + "requestUri": "/invitations/delete", + "responseCode": 200 + }, + "input": { + "shape": "DeleteInvitationsRequest" + }, + "output": { + "shape": "DeleteInvitationsResponse", + "documentation": "

    The request succeeded. Processing might not be complete.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Deletes Amazon Macie membership invitations that were received from specific accounts.

    " + }, + "DeleteMember": { + "name": "DeleteMember", + "http": { + "method": "DELETE", + "requestUri": "/members/{id}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteMemberRequest" + }, + "output": { + "shape": "DeleteMemberResponse", + "documentation": "

    The request succeeded. The association was deleted and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Deletes the association between an Amazon Macie master account and an account.

    " + }, + "DescribeBuckets": { + "name": "DescribeBuckets", + "http": { + "method": "POST", + "requestUri": "/datasources/s3", + "responseCode": 200 + }, + "input": { + "shape": "DescribeBucketsRequest" + }, + "output": { + "shape": "DescribeBucketsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves (queries) statistical data and other information about one or more S3 buckets that Amazon Macie monitors and analyzes.

    " + }, + "DescribeClassificationJob": { + "name": "DescribeClassificationJob", + "http": { + "method": "GET", + "requestUri": "/jobs/{jobId}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeClassificationJobRequest" + }, + "output": { + "shape": "DescribeClassificationJobResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves the status and settings for a classification job.

    " + }, + "DescribeOrganizationConfiguration": { + "name": "DescribeOrganizationConfiguration", + "http": { + "method": "GET", + "requestUri": "/admin/configuration", + "responseCode": 200 + }, + "input": { + "shape": "DescribeOrganizationConfigurationRequest" + }, + "output": { + "shape": "DescribeOrganizationConfigurationResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves the Amazon Macie configuration settings for an AWS organization.

    " + }, + "DisableMacie": { + "name": "DisableMacie", + "http": { + "method": "DELETE", + "requestUri": "/macie", + "responseCode": 200 + }, + "input": { + "shape": "DisableMacieRequest" + }, + "output": { + "shape": "DisableMacieResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Disables an Amazon Macie account and deletes Macie resources for the account.

    " + }, + "DisableOrganizationAdminAccount": { + "name": "DisableOrganizationAdminAccount", + "http": { + "method": "DELETE", + "requestUri": "/admin", + "responseCode": 200 + }, + "input": { + "shape": "DisableOrganizationAdminAccountRequest" + }, + "output": { + "shape": "DisableOrganizationAdminAccountResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Disables an account as the delegated Amazon Macie administrator account for an AWS organization.

    " + }, + "DisassociateFromMasterAccount": { + "name": "DisassociateFromMasterAccount", + "http": { + "method": "POST", + "requestUri": "/master/disassociate", + "responseCode": 200 + }, + "input": { + "shape": "DisassociateFromMasterAccountRequest" + }, + "output": { + "shape": "DisassociateFromMasterAccountResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Disassociates a member account from its Amazon Macie master account.

    " + }, + "DisassociateMember": { + "name": "DisassociateMember", + "http": { + "method": "POST", + "requestUri": "/members/disassociate/{id}", + "responseCode": 200 + }, + "input": { + "shape": "DisassociateMemberRequest" + }, + "output": { + "shape": "DisassociateMemberResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Disassociates an Amazon Macie master account from a member account.

    " + }, + "EnableMacie": { + "name": "EnableMacie", + "http": { + "method": "POST", + "requestUri": "/macie", + "responseCode": 200 + }, + "input": { + "shape": "EnableMacieRequest" + }, + "output": { + "shape": "EnableMacieResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Enables Amazon Macie and specifies the configuration settings for a Macie account.

    " + }, + "EnableOrganizationAdminAccount": { + "name": "EnableOrganizationAdminAccount", + "http": { + "method": "POST", + "requestUri": "/admin", + "responseCode": 200 + }, + "input": { + "shape": "EnableOrganizationAdminAccountRequest" + }, + "output": { + "shape": "EnableOrganizationAdminAccountResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Designates an account as the delegated Amazon Macie administrator account for an AWS organization.

    " + }, + "GetBucketStatistics": { + "name": "GetBucketStatistics", + "http": { + "method": "POST", + "requestUri": "/datasources/s3/statistics", + "responseCode": 200 + }, + "input": { + "shape": "GetBucketStatisticsRequest" + }, + "output": { + "shape": "GetBucketStatisticsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves (queries) aggregated statistical data for all the S3 buckets that Amazon Macie monitors and analyzes.

    " + }, + "GetClassificationExportConfiguration": { + "name": "GetClassificationExportConfiguration", + "http": { + "method": "GET", + "requestUri": "/classification-export-configuration", + "responseCode": 200 + }, + "input": { + "shape": "GetClassificationExportConfigurationRequest" + }, + "output": { + "shape": "GetClassificationExportConfigurationResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves the configuration settings for storing data classification results.

    " + }, + "GetCustomDataIdentifier": { + "name": "GetCustomDataIdentifier", + "http": { + "method": "GET", + "requestUri": "/custom-data-identifiers/{id}", + "responseCode": 200 + }, + "input": { + "shape": "GetCustomDataIdentifierRequest" + }, + "output": { + "shape": "GetCustomDataIdentifierResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves the criteria and other settings for a custom data identifier.

    " + }, + "GetFindingStatistics": { + "name": "GetFindingStatistics", + "http": { + "method": "POST", + "requestUri": "/findings/statistics", + "responseCode": 200 + }, + "input": { + "shape": "GetFindingStatisticsRequest" + }, + "output": { + "shape": "GetFindingStatisticsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves (queries) aggregated statistical data about findings.

    " + }, + "GetFindings": { + "name": "GetFindings", + "http": { + "method": "POST", + "requestUri": "/findings/describe", + "responseCode": 200 + }, + "input": { + "shape": "GetFindingsRequest" + }, + "output": { + "shape": "GetFindingsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves the details of one or more findings.

    " + }, + "GetFindingsFilter": { + "name": "GetFindingsFilter", + "http": { + "method": "GET", + "requestUri": "/findingsfilters/{id}", + "responseCode": 200 + }, + "input": { + "shape": "GetFindingsFilterRequest" + }, + "output": { + "shape": "GetFindingsFilterResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves the criteria and other settings for a findings filter.

    " + }, + "GetInvitationsCount": { + "name": "GetInvitationsCount", + "http": { + "method": "GET", + "requestUri": "/invitations/count", + "responseCode": 200 + }, + "input": { + "shape": "GetInvitationsCountRequest" + }, + "output": { + "shape": "GetInvitationsCountResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves the count of Amazon Macie membership invitations that were received by an account.

    " + }, + "GetMacieSession": { + "name": "GetMacieSession", + "http": { + "method": "GET", + "requestUri": "/macie", + "responseCode": 200 + }, + "input": { + "shape": "GetMacieSessionRequest" + }, + "output": { + "shape": "GetMacieSessionResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves the current status and configuration settings for an Amazon Macie account.

    " + }, + "GetMasterAccount": { + "name": "GetMasterAccount", + "http": { + "method": "GET", + "requestUri": "/master", + "responseCode": 200 + }, + "input": { + "shape": "GetMasterAccountRequest" + }, + "output": { + "shape": "GetMasterAccountResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves information about the Amazon Macie master account for an account.

    " + }, + "GetMember": { + "name": "GetMember", + "http": { + "method": "GET", + "requestUri": "/members/{id}", + "responseCode": 200 + }, + "input": { + "shape": "GetMemberRequest" + }, + "output": { + "shape": "GetMemberResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves information about a member account that's associated with an Amazon Macie master account.

    " + }, + "GetUsageStatistics": { + "name": "GetUsageStatistics", + "http": { + "method": "POST", + "requestUri": "/usage/statistics", + "responseCode": 200 + }, + "input": { + "shape": "GetUsageStatisticsRequest" + }, + "output": { + "shape": "GetUsageStatisticsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves (queries) quotas and aggregated usage data for one or more accounts.

    " + }, + "GetUsageTotals": { + "name": "GetUsageTotals", + "http": { + "method": "GET", + "requestUri": "/usage", + "responseCode": 200 + }, + "input": { + "shape": "GetUsageTotalsRequest" + }, + "output": { + "shape": "GetUsageTotalsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves (queries) aggregated usage data for an account.

    " + }, + "ListClassificationJobs": { + "name": "ListClassificationJobs", + "http": { + "method": "POST", + "requestUri": "/jobs/list", + "responseCode": 200 + }, + "input": { + "shape": "ListClassificationJobsRequest" + }, + "output": { + "shape": "ListClassificationJobsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves a subset of information about one or more classification jobs.

    " + }, + "ListCustomDataIdentifiers": { + "name": "ListCustomDataIdentifiers", + "http": { + "method": "POST", + "requestUri": "/custom-data-identifiers/list", + "responseCode": 200 + }, + "input": { + "shape": "ListCustomDataIdentifiersRequest" + }, + "output": { + "shape": "ListCustomDataIdentifiersResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves a subset of information about all the custom data identifiers for an account.

    " + }, + "ListFindings": { + "name": "ListFindings", + "http": { + "method": "POST", + "requestUri": "/findings", + "responseCode": 200 + }, + "input": { + "shape": "ListFindingsRequest" + }, + "output": { + "shape": "ListFindingsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves a subset of information about one or more findings.

    " + }, + "ListFindingsFilters": { + "name": "ListFindingsFilters", + "http": { + "method": "GET", + "requestUri": "/findingsfilters", + "responseCode": 200 + }, + "input": { + "shape": "ListFindingsFiltersRequest" + }, + "output": { + "shape": "ListFindingsFiltersResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves a subset of information about all the findings filters for an account.

    " + }, + "ListInvitations": { + "name": "ListInvitations", + "http": { + "method": "GET", + "requestUri": "/invitations", + "responseCode": 200 + }, + "input": { + "shape": "ListInvitationsRequest" + }, + "output": { + "shape": "ListInvitationsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves information about all the Amazon Macie membership invitations that were received by an account.

    " + }, + "ListMembers": { + "name": "ListMembers", + "http": { + "method": "GET", + "requestUri": "/members", + "responseCode": 200 + }, + "input": { + "shape": "ListMembersRequest" + }, + "output": { + "shape": "ListMembersResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves information about the accounts that are associated with an Amazon Macie master account.

    " + }, + "ListOrganizationAdminAccounts": { + "name": "ListOrganizationAdminAccounts", + "http": { + "method": "GET", + "requestUri": "/admin", + "responseCode": 200 + }, + "input": { + "shape": "ListOrganizationAdminAccountsRequest" + }, + "output": { + "shape": "ListOrganizationAdminAccountsResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Retrieves information about the delegated Amazon Macie administrator account for an AWS organization.

    " + }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/tags/{resourceArn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "output": { + "shape": "ListTagsForResourceResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [], + "documentation": "

    Retrieves the tags (keys and values) that are associated with a classification job, custom data identifier, findings filter, or member account.

    " + }, + "PutClassificationExportConfiguration": { + "name": "PutClassificationExportConfiguration", + "http": { + "method": "PUT", + "requestUri": "/classification-export-configuration", + "responseCode": 200 + }, + "input": { + "shape": "PutClassificationExportConfigurationRequest" + }, + "output": { + "shape": "PutClassificationExportConfigurationResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Creates or updates the configuration settings for storing data classification results.

    " + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "POST", + "requestUri": "/tags/{resourceArn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "output": { + "shape": "TagResourceResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [], + "documentation": "

    Adds or updates one or more tags (keys and values) that are associated with a classification job, custom data identifier, findings filter, or member account.

    " + }, + "TestCustomDataIdentifier": { + "name": "TestCustomDataIdentifier", + "http": { + "method": "POST", + "requestUri": "/custom-data-identifiers/test", + "responseCode": 200 + }, + "input": { + "shape": "TestCustomDataIdentifierRequest" + }, + "output": { + "shape": "TestCustomDataIdentifierResponse", + "documentation": "

    The request succeeded.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Tests a custom data identifier.

    " + }, + "UntagResource": { + "name": "UntagResource", + "http": { + "method": "DELETE", + "requestUri": "/tags/{resourceArn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "output": { + "shape": "UntagResourceResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [], + "documentation": "

    Removes one or more tags (keys and values) from a classification job, custom data identifier, findings filter, or member account.

    " + }, + "UpdateClassificationJob": { + "name": "UpdateClassificationJob", + "http": { + "method": "PATCH", + "requestUri": "/jobs/{jobId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateClassificationJobRequest" + }, + "output": { + "shape": "UpdateClassificationJobResponse", + "documentation": "

    The request succeeded. The job's status was changed and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Changes the status of a classification job.

    " + }, + "UpdateFindingsFilter": { + "name": "UpdateFindingsFilter", + "http": { + "method": "PATCH", + "requestUri": "/findingsfilters/{id}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateFindingsFilterRequest" + }, + "output": { + "shape": "UpdateFindingsFilterResponse", + "documentation": "

    The request succeeded. The specified findings filter was updated.

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Updates the criteria and other settings for a findings filter.

    " + }, + "UpdateMacieSession": { + "name": "UpdateMacieSession", + "http": { + "method": "PATCH", + "requestUri": "/macie", + "responseCode": 200 + }, + "input": { + "shape": "UpdateMacieSessionRequest" + }, + "output": { + "shape": "UpdateMacieSessionResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Suspends or re-enables an Amazon Macie account, or updates the configuration settings for a Macie account.

    " + }, + "UpdateMemberSession": { + "name": "UpdateMemberSession", + "http": { + "method": "PATCH", + "requestUri": "/macie/members/{id}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateMemberSessionRequest" + }, + "output": { + "shape": "UpdateMemberSessionResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Enables an Amazon Macie master account to suspend or re-enable a member account.

    " + }, + "UpdateOrganizationConfiguration": { + "name": "UpdateOrganizationConfiguration", + "http": { + "method": "PATCH", + "requestUri": "/admin/configuration", + "responseCode": 200 + }, + "input": { + "shape": "UpdateOrganizationConfigurationRequest" + }, + "output": { + "shape": "UpdateOrganizationConfigurationResponse", + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

    The request failed because it contains a syntax error.

    " + }, + { + "shape": "InternalServerException", + "documentation": "

    The request failed due to an unknown internal server error, exception, or failure.

    " + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

    The request failed because fulfilling the request would exceed one or more service quotas for your account.

    " + }, + { + "shape": "AccessDeniedException", + "documentation": "

    The request was denied because you don't have sufficient access to the specified resource.

    " + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

    The request failed because the specified resource wasn't found.

    " + }, + { + "shape": "ThrottlingException", + "documentation": "

    The request failed because you sent too many requests during a certain amount of time.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The request failed because it conflicts with the current state of the specified resource.

    " + } + ], + "documentation": "

    Updates the Amazon Macie configuration settings for an AWS organization.

    " + } + }, + "shapes": { + "AcceptInvitationRequest": { + "type": "structure", + "members": { + "invitationId": { + "shape": "__string", + "locationName": "invitationId", + "documentation": "

    The unique identifier for the invitation to accept.

    " + }, + "masterAccount": { + "shape": "__string", + "locationName": "masterAccount", + "documentation": "

    The AWS account ID for the account that sent the invitation.

    " + } + }, + "required": [ + "masterAccount", + "invitationId" + ] + }, + "AcceptInvitationResponse": { + "type": "structure", + "members": {} + }, + "AccessControlList": { + "type": "structure", + "members": { + "allowsPublicReadAccess": { + "shape": "__boolean", + "locationName": "allowsPublicReadAccess", + "documentation": "

    Specifies whether the ACL grants the general public with read access permissions for the bucket.

    " + }, + "allowsPublicWriteAccess": { + "shape": "__boolean", + "locationName": "allowsPublicWriteAccess", + "documentation": "

    Specifies whether the ACL grants the general public with write access permissions for the bucket.

    " + } + }, + "documentation": "

    Provides information about the permissions settings of the bucket-level access control list (ACL) for an S3 bucket.

    " + }, + "AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    The explanation of the error that occurred.

    " + } + }, + "documentation": "

    Provides information about an error that occurred due to insufficient access to a specified resource.

    ", + "exception": true, + "error": { + "httpStatusCode": 403 + } + }, + "AccountDetail": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The AWS account ID for the account.

    " + }, + "email": { + "shape": "__string", + "locationName": "email", + "documentation": "

    The email address for the account.

    " + } + }, + "documentation": "

    Specifies details for an account to associate with an Amazon Macie master account.

    ", + "required": [ + "email", + "accountId" + ] + }, + "AccountLevelPermissions": { + "type": "structure", + "members": { + "blockPublicAccess": { + "shape": "BlockPublicAccess", + "locationName": "blockPublicAccess", + "documentation": "

    The block public access settings for the bucket.

    " + } + }, + "documentation": "

    Provides information about account-level permissions settings that apply to an S3 bucket.

    " + }, + "AdminAccount": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The AWS account ID for the account.

    " + }, + "status": { + "shape": "AdminStatus", + "locationName": "status", + "documentation": "

    The current status of the account as a delegated administrator of Amazon Macie for the organization.

    " + } + }, + "documentation": "

    Provides information about the delegated Amazon Macie administrator account for an AWS organization.

    " + }, + "AdminStatus": { + "type": "string", + "documentation": "

    The current status of an account as the delegated Amazon Macie administrator account for an AWS organization.

    ", + "enum": [ + "ENABLED", + "DISABLING_IN_PROGRESS" + ] + }, + "ApiCallDetails": { + "type": "structure", + "members": { + "api": { + "shape": "__string", + "locationName": "api", + "documentation": "

    The name of the operation that was invoked most recently and produced the finding.

    " + }, + "apiServiceName": { + "shape": "__string", + "locationName": "apiServiceName", + "documentation": "

    The URL of the AWS service that provides the operation, for example: s3.amazonaws.com.

    " + }, + "firstSeen": { + "shape": "__timestampIso8601", + "locationName": "firstSeen", + "documentation": "

    The first date and time, in UTC and extended ISO 8601 format, when any operation was invoked and produced the finding.

    " + }, + "lastSeen": { + "shape": "__timestampIso8601", + "locationName": "lastSeen", + "documentation": "

    The most recent date and time, in UTC and extended ISO 8601 format, when the specified operation (api) was invoked and produced the finding.

    " + } + }, + "documentation": "

    Provides information about an API operation that an entity invoked for an affected resource.

    " + }, + "AssumedRole": { + "type": "structure", + "members": { + "accessKeyId": { + "shape": "__string", + "locationName": "accessKeyId", + "documentation": "

    The AWS access key ID that identifies the credentials.

    " + }, + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account that owns the entity that was used to get the credentials.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the entity that was used to get the credentials.

    " + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

    The unique identifier for the entity that was used to get the credentials.

    " + }, + "sessionContext": { + "shape": "SessionContext", + "locationName": "sessionContext", + "documentation": "

    The details of the session that was created for the credentials, including the entity that issued the session.

    " + } + }, + "documentation": "

    Provides information about an identity that performed an action on an affected resource by using temporary security credentials. The credentials were obtained using the AssumeRole operation of the AWS Security Token Service (AWS STS) API.

    " + }, + "AwsAccount": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account.

    " + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

    The unique identifier for the entity that performed the action.

    " + } + }, + "documentation": "

    Provides information about an AWS account and entity that performed an action on an affected resource. The action was performed using the credentials for an AWS account other than your own account.

    " + }, + "AwsService": { + "type": "structure", + "members": { + "invokedBy": { + "shape": "__string", + "locationName": "invokedBy", + "documentation": "

    The name of the AWS service that performed the action.

    " + } + }, + "documentation": "

    Provides information about an AWS service that performed an action on an affected resource.

    " + }, + "BatchGetCustomDataIdentifierSummary": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the custom data identifier.

    " + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the custom data identifier was created.

    " + }, + "deleted": { + "shape": "__boolean", + "locationName": "deleted", + "documentation": "

    Specifies whether the custom data identifier was deleted. If you delete a custom data identifier, Amazon Macie doesn't delete it permanently. Instead, it soft deletes the identifier.

    " + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    The custom description of the custom data identifier.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The unique identifier for the custom data identifier.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The custom name of the custom data identifier.

    " + } + }, + "documentation": "

    Provides information about a custom data identifier.

    " + }, + "BatchGetCustomDataIdentifiersRequest": { + "type": "structure", + "members": { + "ids": { + "shape": "__listOf__string", + "locationName": "ids", + "documentation": "

    An array of strings that lists the unique identifiers for the custom data identifiers to retrieve information about.

    " + } + } + }, + "BatchGetCustomDataIdentifiersResponse": { + "type": "structure", + "members": { + "customDataIdentifiers": { + "shape": "__listOfBatchGetCustomDataIdentifierSummary", + "locationName": "customDataIdentifiers", + "documentation": "

    An array of objects, one for each custom data identifier that meets the criteria specified in the request.

    " + }, + "notFoundIdentifierIds": { + "shape": "__listOf__string", + "locationName": "notFoundIdentifierIds", + "documentation": "

    An array of identifiers, one for each identifier that was specified in the request, but doesn't correlate to an existing custom data identifier.

    " + } + } + }, + "BlockPublicAccess": { + "type": "structure", + "members": { + "blockPublicAcls": { + "shape": "__boolean", + "locationName": "blockPublicAcls", + "documentation": "

    Specifies whether Amazon S3 blocks public access control lists (ACLs) for the bucket and objects in the bucket.

    " + }, + "blockPublicPolicy": { + "shape": "__boolean", + "locationName": "blockPublicPolicy", + "documentation": "

    Specifies whether Amazon S3 blocks public bucket policies for the bucket.

    " + }, + "ignorePublicAcls": { + "shape": "__boolean", + "locationName": "ignorePublicAcls", + "documentation": "

    Specifies whether Amazon S3 ignores public ACLs for the bucket and objects in the bucket.

    " + }, + "restrictPublicBuckets": { + "shape": "__boolean", + "locationName": "restrictPublicBuckets", + "documentation": "

    Specifies whether Amazon S3 restricts public bucket policies for the bucket.

    " + } + }, + "documentation": "

    Provides information about the block public access settings for an S3 bucket. These settings can apply to a bucket at the account level or bucket level. For detailed information about each setting, see Using Amazon S3 block public access in the Amazon Simple Storage Service Developer Guide.

    " + }, + "BucketCountByEffectivePermission": { + "type": "structure", + "members": { + "publiclyAccessible": { + "shape": "__long", + "locationName": "publiclyAccessible", + "documentation": "

    The total number of buckets that allow the general public to have read or write access to the bucket.

    " + }, + "publiclyReadable": { + "shape": "__long", + "locationName": "publiclyReadable", + "documentation": "

    The total number of buckets that allow the general public to have read access to the bucket.

    " + }, + "publiclyWritable": { + "shape": "__long", + "locationName": "publiclyWritable", + "documentation": "

    The total number of buckets that allow the general public to have write access to the bucket.

    " + }, + "unknown": { + "shape": "__long", + "locationName": "unknown", + "documentation": "

    The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. Macie can't determine whether these buckets are publicly accessible.

    " + } + }, + "documentation": "

    Provides information about the number of S3 buckets that are publicly accessible based on a combination of permissions settings for each bucket.

    " + }, + "BucketCountByEncryptionType": { + "type": "structure", + "members": { + "kmsManaged": { + "shape": "__long", + "locationName": "kmsManaged", + "documentation": "

    The total number of buckets that use an AWS Key Management Service (AWS KMS) customer master key (CMK) by default to encrypt objects. These buckets use AWS managed AWS KMS (AWS-KMS) encryption or customer managed AWS KMS (SSE-KMS) encryption.

    " + }, + "s3Managed": { + "shape": "__long", + "locationName": "s3Managed", + "documentation": "

    The total number of buckets that use an Amazon S3 managed key by default to encrypt objects. These buckets use Amazon S3 managed (SSE-S3) encryption.

    " + }, + "unencrypted": { + "shape": "__long", + "locationName": "unencrypted", + "documentation": "

    The total number of buckets that don't encrypt objects by default. Default encryption is disabled for these buckets.

    " + } + }, + "documentation": "

    Provides information about the number of S3 buckets that use certain types of server-side encryption or don't encrypt objects by default.

    " + }, + "BucketCountBySharedAccessType": { + "type": "structure", + "members": { + "external": { + "shape": "__long", + "locationName": "external", + "documentation": "

    The total number of buckets that are shared with an AWS account that isn't part of the same Amazon Macie organization.

    " + }, + "internal": { + "shape": "__long", + "locationName": "internal", + "documentation": "

    The total number of buckets that are shared with an AWS account that's part of the same Amazon Macie organization.

    " + }, + "notShared": { + "shape": "__long", + "locationName": "notShared", + "documentation": "

    The total number of buckets that aren't shared with other AWS accounts.

    " + }, + "unknown": { + "shape": "__long", + "locationName": "unknown", + "documentation": "

    The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. Macie can't determine whether these buckets are shared with other AWS accounts.

    " + } + }, + "documentation": "

    Provides information about the number of S3 buckets that are shared with other AWS accounts.

    " + }, + "BucketCriteria": { + "type": "map", + "documentation": "

    Specifies, as a map, one or more property-based conditions that filter the results of a query for information about S3 buckets.

    ", + "key": { + "shape": "__string" + }, + "value": { + "shape": "BucketCriteriaAdditionalProperties" + } + }, + "BucketCriteriaAdditionalProperties": { + "type": "structure", + "members": { + "eq": { + "shape": "__listOf__string", + "locationName": "eq", + "documentation": "

    The value for the property matches (equals) the specified value. If you specify multiple values, Macie uses OR logic to join the values.

    " + }, + "gt": { + "shape": "__long", + "locationName": "gt", + "documentation": "

    The value for the property is greater than the specified value.

    " + }, + "gte": { + "shape": "__long", + "locationName": "gte", + "documentation": "

    The value for the property is greater than or equal to the specified value.

    " + }, + "lt": { + "shape": "__long", + "locationName": "lt", + "documentation": "

    The value for the property is less than the specified value.

    " + }, + "lte": { + "shape": "__long", + "locationName": "lte", + "documentation": "

    The value for the property is less than or equal to the specified value.

    " + }, + "neq": { + "shape": "__listOf__string", + "locationName": "neq", + "documentation": "

    The value for the property doesn't match (doesn't equal) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values.

    " + }, + "prefix": { + "shape": "__string", + "locationName": "prefix", + "documentation": "

    The name of the bucket begins with the specified value.

    " + } + }, + "documentation": "

    Specifies the operator to use in a property-based condition that filters the results of a query for information about S3 buckets.

    " + }, + "BucketLevelPermissions": { + "type": "structure", + "members": { + "accessControlList": { + "shape": "AccessControlList", + "locationName": "accessControlList", + "documentation": "

    The permissions settings of the access control list (ACL) for the bucket. This value is null if an ACL hasn't been defined for the bucket.

    " + }, + "blockPublicAccess": { + "shape": "BlockPublicAccess", + "locationName": "blockPublicAccess", + "documentation": "

    The block public access settings for the bucket.

    " + }, + "bucketPolicy": { + "shape": "BucketPolicy", + "locationName": "bucketPolicy", + "documentation": "

    The permissions settings of the bucket policy for the bucket. This value is null if a bucket policy hasn't been defined for the bucket.

    " + } + }, + "documentation": "

    Provides information about the bucket-level permissions settings for an S3 bucket.

    " + }, + "BucketMetadata": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account that owns the bucket.

    " + }, + "bucketArn": { + "shape": "__string", + "locationName": "bucketArn", + "documentation": "

    The Amazon Resource Name (ARN) of the bucket.

    " + }, + "bucketCreatedAt": { + "shape": "__timestampIso8601", + "locationName": "bucketCreatedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the bucket was created.

    " + }, + "bucketName": { + "shape": "__string", + "locationName": "bucketName", + "documentation": "

    The name of the bucket.

    " + }, + "classifiableObjectCount": { + "shape": "__long", + "locationName": "classifiableObjectCount", + "documentation": "

    The total number of objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format.

    " + }, + "classifiableSizeInBytes": { + "shape": "__long", + "locationName": "classifiableSizeInBytes", + "documentation": "

    The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format.

    " + }, + "jobDetails": { + "shape": "JobDetails", + "locationName": "jobDetails", + "documentation": "

    Specifies whether any one-time or recurring classification jobs are configured to analyze data in the bucket, and, if so, the details of the job that ran most recently.

    " + }, + "lastUpdated": { + "shape": "__timestampIso8601", + "locationName": "lastUpdated", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved data about the bucket from Amazon S3.

    " + }, + "objectCount": { + "shape": "__long", + "locationName": "objectCount", + "documentation": "

    The total number of objects in the bucket.

    " + }, + "objectCountByEncryptionType": { + "shape": "ObjectCountByEncryptionType", + "locationName": "objectCountByEncryptionType", + "documentation": "

    The total number of objects that are in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted or use client-side encryption.

    " + }, + "publicAccess": { + "shape": "BucketPublicAccess", + "locationName": "publicAccess", + "documentation": "

    Specifies whether the bucket is publicly accessible. If this value is true, an access control list (ACL), bucket policy, or block public access settings allow the bucket to be accessed by the general public.

    " + }, + "region": { + "shape": "__string", + "locationName": "region", + "documentation": "

    The AWS Region that hosts the bucket.

    " + }, + "replicationDetails": { + "shape": "ReplicationDetails", + "locationName": "replicationDetails", + "documentation": "

    Specifies whether the bucket is configured to replicate one or more objects to buckets for other AWS accounts and, if so, which accounts.

    " + }, + "sharedAccess": { + "shape": "SharedAccess", + "locationName": "sharedAccess", + "documentation": "

    Specifies whether the bucket is shared with another AWS account. Possible values are:

    • EXTERNAL - The bucket is shared with an AWS account that isn't part of the same Amazon Macie organization.

    • INTERNAL - The bucket is shared with an AWS account that's part of the same Amazon Macie organization.

    • NOT_SHARED - The bucket isn't shared with other AWS accounts.

    • UNKNOWN - Amazon Macie wasn't able to evaluate the shared access settings for the bucket.

    " + }, + "sizeInBytes": { + "shape": "__long", + "locationName": "sizeInBytes", + "documentation": "

    The total storage size, in bytes, of the bucket.

    " + }, + "sizeInBytesCompressed": { + "shape": "__long", + "locationName": "sizeInBytesCompressed", + "documentation": "

    The total compressed storage size, in bytes, of the bucket.

    " + }, + "tags": { + "shape": "__listOfKeyValuePair", + "locationName": "tags", + "documentation": "

    An array that specifies the tags (keys and values) that are associated with the bucket.

    " + }, + "unclassifiableObjectCount": { + "shape": "ObjectLevelStatistics", + "locationName": "unclassifiableObjectCount", + "documentation": "

    The total number of objects that Amazon Macie can't analyze in the bucket. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

    " + }, + "unclassifiableObjectSizeInBytes": { + "shape": "ObjectLevelStatistics", + "locationName": "unclassifiableObjectSizeInBytes", + "documentation": "

    The total storage size, in bytes, of the objects that Amazon Macie can't analyze in the bucket. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

    " + }, + "versioning": { + "shape": "__boolean", + "locationName": "versioning", + "documentation": "

    Specifies whether versioning is enabled for the bucket.

    " + } + }, + "documentation": "

    Provides information about an S3 bucket that Amazon Macie monitors and analyzes.

    " + }, + "BucketPermissionConfiguration": { + "type": "structure", + "members": { + "accountLevelPermissions": { + "shape": "AccountLevelPermissions", + "locationName": "accountLevelPermissions", + "documentation": "

    The account-level permissions settings that apply to the bucket.

    " + }, + "bucketLevelPermissions": { + "shape": "BucketLevelPermissions", + "locationName": "bucketLevelPermissions", + "documentation": "

    The bucket-level permissions settings for the bucket.

    " + } + }, + "documentation": "

    Provides information about the account-level and bucket-level permissions settings for an S3 bucket.

    " + }, + "BucketPolicy": { + "type": "structure", + "members": { + "allowsPublicReadAccess": { + "shape": "__boolean", + "locationName": "allowsPublicReadAccess", + "documentation": "

    Specifies whether the bucket policy allows the general public to have read access to the bucket.

    " + }, + "allowsPublicWriteAccess": { + "shape": "__boolean", + "locationName": "allowsPublicWriteAccess", + "documentation": "

    Specifies whether the bucket policy allows the general public to have write access to the bucket.

    " + } + }, + "documentation": "

    Provides information about the permissions settings of a bucket policy for an S3 bucket.

    " + }, + "BucketPublicAccess": { + "type": "structure", + "members": { + "effectivePermission": { + "shape": "EffectivePermission", + "locationName": "effectivePermission", + "documentation": "

    Specifies whether the bucket is publicly accessible due to the combination of permissions settings that apply to the bucket. Possible values are:

    • NOT_PUBLIC - The bucket isn't publicly accessible.

    • PUBLIC - The bucket is publicly accessible.

    • UNKNOWN - Amazon Macie can't determine whether the bucket is publicly accessible.

    " + }, + "permissionConfiguration": { + "shape": "BucketPermissionConfiguration", + "locationName": "permissionConfiguration", + "documentation": "

    The account-level and bucket-level permissions for the bucket.

    " + } + }, + "documentation": "

    Provides information about the permissions settings that determine whether an S3 bucket is publicly accessible.

    " + }, + "BucketSortCriteria": { + "type": "structure", + "members": { + "attributeName": { + "shape": "__string", + "locationName": "attributeName", + "documentation": "

    The name of the property to sort the results by. This value can be the name of any property that Amazon Macie defines as bucket metadata, such as bucketName or accountId.

    " + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

    The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

    " + } + }, + "documentation": "

    Specifies criteria for sorting the results of a query for information about S3 buckets.

    " + }, + "Cell": { + "type": "structure", + "members": { + "cellReference": { + "shape": "__string", + "locationName": "cellReference", + "documentation": "

    The location of the cell, as an absolute cell reference, that contains the data. For example, Sheet2!C5 for cell C5 on Sheet2 in a Microsoft Excel workbook. This value is null for CSV and TSV files.

    " + }, + "column": { + "shape": "__long", + "locationName": "column", + "documentation": "

    The column number of the column that contains the data. For a Microsoft Excel workbook, this value correlates to the alphabetical character(s) for a column identifier. For example, 1 for column A, 2 for column B, and so on.

    " + }, + "columnName": { + "shape": "__string", + "locationName": "columnName", + "documentation": "

    The name of the column that contains the data, if available.

    " + }, + "row": { + "shape": "__long", + "locationName": "row", + "documentation": "

    The row number of the row that contains the data.

    " + } + }, + "documentation": "

    Specifies the location of an occurrence of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file.

    " + }, + "Cells": { + "type": "list", + "documentation": "

    Specifies the location of occurrences of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file.

    ", + "member": { + "shape": "Cell" + } + }, + "ClassificationDetails": { + "type": "structure", + "members": { + "detailedResultsLocation": { + "shape": "__string", + "locationName": "detailedResultsLocation", + "documentation": "

    The path to the folder or file (in Amazon S3) that contains the corresponding sensitive data discovery result for the finding. If a finding applies to a large archive or compressed file, this value is the path to a folder. Otherwise, this value is the path to a file.

    " + }, + "jobArn": { + "shape": "__string", + "locationName": "jobArn", + "documentation": "

    The Amazon Resource Name (ARN) of the classification job that produced the finding.

    " + }, + "jobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The unique identifier for the classification job that produced the finding.

    " + }, + "result": { + "shape": "ClassificationResult", + "locationName": "result", + "documentation": "

    The status and other details for the finding.

    " + } + }, + "documentation": "

    Provides information about a sensitive data finding, including the classification job that produced the finding.

    " + }, + "ClassificationExportConfiguration": { + "type": "structure", + "members": { + "s3Destination": { + "shape": "S3Destination", + "locationName": "s3Destination", + "documentation": "

    The S3 bucket to store data classification results in, and the encryption settings to use when storing results in that bucket.

    " + } + }, + "documentation": "

    Specifies where to store data classification results, and the encryption settings to use when storing results in that location. Currently, you can store classification results only in an S3 bucket.

    " + }, + "ClassificationResult": { + "type": "structure", + "members": { + "additionalOccurrences": { + "shape": "__boolean", + "locationName": "additionalOccurrences", + "documentation": "

    Specifies whether Amazon Macie detected additional occurrences of sensitive data in the S3 object. A finding includes location data for a maximum of 15 occurrences of sensitive data.

    This value can help you determine whether to investigate additional occurrences of sensitive data in an object. You can do this by referring to the corresponding sensitive data discovery result for the finding (ClassificationDetails.detailedResultsLocation).

    " + }, + "customDataIdentifiers": { + "shape": "CustomDataIdentifiers", + "locationName": "customDataIdentifiers", + "documentation": "

    The custom data identifiers that detected the sensitive data and the number of occurrences of the data that they detected.

    " + }, + "mimeType": { + "shape": "__string", + "locationName": "mimeType", + "documentation": "

    The type of content, as a MIME type, that the finding applies to. For example, application/gzip, for a GNU Gzip compressed archive file, or application/pdf, for an Adobe Portable Document Format file.

    " + }, + "sensitiveData": { + "shape": "SensitiveData", + "locationName": "sensitiveData", + "documentation": "

    The category, types, and number of occurrences of the sensitive data that produced the finding.

    " + }, + "sizeClassified": { + "shape": "__long", + "locationName": "sizeClassified", + "documentation": "

    The total size, in bytes, of the data that the finding applies to.

    " + }, + "status": { + "shape": "ClassificationResultStatus", + "locationName": "status", + "documentation": "

    The status of the finding.

    " + } + }, + "documentation": "

    Provides the details of a sensitive data finding, including the types, number of occurrences, and locations of the sensitive data that was detected.

    " + }, + "ClassificationResultStatus": { + "type": "structure", + "members": { + "code": { + "shape": "__string", + "locationName": "code", + "documentation": "

    The status of the finding. Possible values are:

    • COMPLETE - Amazon Macie successfully completed its analysis of the object that the finding applies to.

    • PARTIAL - Macie analyzed only a subset of the data in the object that the finding applies to. For example, the object is an archive file that contains files in an unsupported format.

    • SKIPPED - Macie wasn't able to analyze the object that the finding applies to. For example, the object is a malformed file or a file that uses an unsupported format.

    " + }, + "reason": { + "shape": "__string", + "locationName": "reason", + "documentation": "

    A brief description of the status of the finding. Amazon Macie uses this value to notify you of any errors, warnings, or considerations that might impact your analysis of the finding.

    " + } + }, + "documentation": "

    Provides information about the status of a sensitive data finding.

    " + }, + "ConflictException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    The explanation of the error that occurred.

    " + } + }, + "documentation": "

    Provides information about an error that occurred due to a versioning conflict for a specified resource.

    ", + "exception": true, + "error": { + "httpStatusCode": 409 + } + }, + "CreateClassificationJobRequest": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

    A unique, case-sensitive token that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken": true + }, + "customDataIdentifierIds": { + "shape": "__listOf__string", + "locationName": "customDataIdentifierIds", + "documentation": "

    The custom data identifiers to use for data analysis and classification.

    " + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    A custom description of the job. The description can contain as many as 200 characters.

    " + }, + "initialRun": { + "shape": "__boolean", + "locationName": "initialRun", + "documentation": "

    Specifies whether to analyze all existing, eligible objects immediately after the job is created.

    " + }, + "jobType": { + "shape": "JobType", + "locationName": "jobType", + "documentation": "

    The schedule for running the job. Valid values are:

    • ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property.

    • SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the scheduleFrequency property to define the recurrence pattern for the job.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    A custom name for the job. The name can contain as many as 500 characters.

    " + }, + "s3JobDefinition": { + "shape": "S3JobDefinition", + "locationName": "s3JobDefinition", + "documentation": "

    The S3 buckets that contain the objects to analyze, and the scope of that analysis.

    " + }, + "samplingPercentage": { + "shape": "__integer", + "locationName": "samplingPercentage", + "documentation": "

    The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects.

    " + }, + "scheduleFrequency": { + "shape": "JobScheduleFrequency", + "locationName": "scheduleFrequency", + "documentation": "

    The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value for the jobType property to ONE_TIME.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that specifies the tags to associate with the job.

    A job can have a maximum of 50 tags. Each tag consists of a tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

    " + } + }, + "required": [ + "s3JobDefinition", + "jobType", + "clientToken", + "name" + ] + }, + "CreateClassificationJobResponse": { + "type": "structure", + "members": { + "jobArn": { + "shape": "__string", + "locationName": "jobArn", + "documentation": "

    The Amazon Resource Name (ARN) of the job.

    " + }, + "jobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The unique identifier for the job.

    " + } + } + }, + "CreateCustomDataIdentifierRequest": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

    A unique, case-sensitive token that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken": true + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    A custom description of the custom data identifier. The description can contain as many as 512 characters.

    We strongly recommend that you avoid including any sensitive data in the description of a custom data identifier. Other users of your account might be able to see the identifier's description, depending on the actions that they're allowed to perform in Amazon Macie.

    " + }, + "ignoreWords": { + "shape": "__listOf__string", + "locationName": "ignoreWords", + "documentation": "

    An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4 - 90 characters. Ignore words are case sensitive.

    " + }, + "keywords": { + "shape": "__listOf__string", + "locationName": "keywords", + "documentation": "

    An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 4 - 90 characters. Keywords aren't case sensitive.

    " + }, + "maximumMatchDistance": { + "shape": "__integer", + "locationName": "maximumMatchDistance", + "documentation": "

    The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1 - 300 characters. The default value is 50.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    A custom name for the custom data identifier. The name can contain as many as 128 characters.

    We strongly recommend that you avoid including any sensitive data in the name of a custom data identifier. Other users of your account might be able to see the identifier's name, depending on the actions that they're allowed to perform in Amazon Macie.

    " + }, + "regex": { + "shape": "__string", + "locationName": "regex", + "documentation": "

    The regular expression (regex) that defines the pattern to match. The expression can contain as many as 512 characters.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that specifies the tags to associate with the custom data identifier.

    A custom data identifier can have a maximum of 50 tags. Each tag consists of a tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

    " + } + } + }, + "CreateCustomDataIdentifierResponse": { + "type": "structure", + "members": { + "customDataIdentifierId": { + "shape": "__string", + "locationName": "customDataIdentifierId", + "documentation": "

    The unique identifier for the custom data identifier that was created.

    " + } + } + }, + "CreateFindingsFilterRequest": { + "type": "structure", + "members": { + "action": { + "shape": "FindingsFilterAction", + "locationName": "action", + "documentation": "

    The action to perform on findings that meet the filter criteria (findingCriteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

    " + }, + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

    A unique, case-sensitive token that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken": true + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    A custom description of the filter. The description can contain as many as 512 characters.

    We strongly recommend that you avoid including any sensitive data in the description of a filter. Other users of your account might be able to see the filter's description, depending on the actions that they're allowed to perform in Amazon Macie.

    " + }, + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

    The criteria to use to filter findings.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters.

    We strongly recommend that you avoid including any sensitive data in the name of a filter. Other users of your account might be able to see the filter's name, depending on the actions that they're allowed to perform in Amazon Macie.

    " + }, + "position": { + "shape": "__integer", + "locationName": "position", + "documentation": "

    The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that specifies the tags to associate with the filter.

    A findings filter can have a maximum of 50 tags. Each tag consists of a tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

    " + } + }, + "required": [ + "action", + "findingCriteria", + "name" + ] + }, + "CreateFindingsFilterResponse": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the filter that was created.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The unique identifier for the filter that was created.

    " + } + } + }, + "CreateInvitationsRequest": { + "type": "structure", + "members": { + "accountIds": { + "shape": "__listOf__string", + "locationName": "accountIds", + "documentation": "

    An array that lists AWS account IDs, one for each account to send the invitation to.

    " + }, + "disableEmailNotification": { + "shape": "__boolean", + "locationName": "disableEmailNotification", + "documentation": "

    Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to true.

    " + }, + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.

    " + } + }, + "required": [ + "accountIds" + ] + }, + "CreateInvitationsResponse": { + "type": "structure", + "members": { + "unprocessedAccounts": { + "shape": "__listOfUnprocessedAccount", + "locationName": "unprocessedAccounts", + "documentation": "

    An array of objects, one for each account whose invitation hasn't been processed. Each object identifies the account and explains why the invitation hasn't been processed for the account.

    " + } + } + }, + "CreateMemberRequest": { + "type": "structure", + "members": { + "account": { + "shape": "AccountDetail", + "locationName": "account", + "documentation": "

    The details for the account to associate with the master account.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.

    An account can have a maximum of 50 tags. Each tag consists of a tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

    " + } + }, + "required": [ + "account" + ] + }, + "CreateMemberResponse": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the account that was associated with the master account.

    " + } + } + }, + "CreateSampleFindingsRequest": { + "type": "structure", + "members": { + "findingTypes": { + "shape": "__listOfFindingType", + "locationName": "findingTypes", + "documentation": "

    An array that lists one or more types of findings to include in the set of sample findings. Currently, the only supported value is Policy:IAMUser/S3BucketEncryptionDisabled.

    " + } + } + }, + "CreateSampleFindingsResponse": { + "type": "structure", + "members": {} + }, + "Criterion": { + "type": "map", + "documentation": "

    Specifies a condition that defines a property, operator, and one or more values to filter the results of a query for findings. The number of values depends on the property and operator specified by the condition. For information about defining filter conditions, see Fundamentals of filtering findings in the Amazon Macie User Guide.

    ", + "key": { + "shape": "__string" + }, + "value": { + "shape": "CriterionAdditionalProperties" + } + }, + "CriterionAdditionalProperties": { + "type": "structure", + "members": { + "eq": { + "shape": "__listOf__string", + "locationName": "eq", + "documentation": "

    The value for the property matches (equals) the specified value. If you specify multiple values, Macie uses OR logic to join the values.

    " + }, + "eqExactMatch": { + "shape": "__listOf__string", + "locationName": "eqExactMatch", + "documentation": "

    The value for the property exclusively matches (equals an exact match for) all the specified values. If you specify multiple values, Amazon Macie uses AND logic to join the values.

    You can use this operator with the following properties: customDataIdentifiers.detections.arn, customDataIdentifiers.detections.name, resourcesAffected.s3Bucket.tags.key, resourcesAffected.s3Bucket.tags.value, resourcesAffected.s3Object.tags.key, resourcesAffected.s3Object.tags.value, sensitiveData.category, and sensitiveData.detections.type.

    " + }, + "gt": { + "shape": "__long", + "locationName": "gt", + "documentation": "

    The value for the property is greater than the specified value.

    " + }, + "gte": { + "shape": "__long", + "locationName": "gte", + "documentation": "

    The value for the property is greater than or equal to the specified value.

    " + }, + "lt": { + "shape": "__long", + "locationName": "lt", + "documentation": "

    The value for the property is less than the specified value.

    " + }, + "lte": { + "shape": "__long", + "locationName": "lte", + "documentation": "

    The value for the property is less than or equal to the specified value.

    " + }, + "neq": { + "shape": "__listOf__string", + "locationName": "neq", + "documentation": "

    The value for the property doesn't match (doesn't equal) the specified value. If you specify multiple values, Macie uses OR logic to join the values.

    " + } + }, + "documentation": "

    Specifies the operator to use in a property-based condition that filters the results of a query for findings. For detailed information and examples of each operator, see Fundamentals of filtering findings in the Amazon Macie User Guide.

    " + }, + "Currency": { + "type": "string", + "documentation": "

    The type of currency that data for a usage metric is reported in. Possible values are:

    ", + "enum": [ + "USD" + ] + }, + "CustomDataIdentifierSummary": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the custom data identifier.

    " + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the custom data identifier was created.

    " + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    The custom description of the custom data identifier.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The unique identifier for the custom data identifier.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The custom name of the custom data identifier.

    " + } + }, + "documentation": "

    Provides information about a custom data identifier.

    " + }, + "CustomDataIdentifiers": { + "type": "structure", + "members": { + "detections": { + "shape": "CustomDetections", + "locationName": "detections", + "documentation": "

    The custom data identifiers that detected the data, and the number of occurrences of the data that each identifier detected.

    " + }, + "totalCount": { + "shape": "__long", + "locationName": "totalCount", + "documentation": "

    The total number of occurrences of the data that was detected by the custom data identifiers and produced the finding.

    " + } + }, + "documentation": "

    Provides information about custom data identifiers that produced a sensitive data finding, and the number of occurrences of the data that they detected for the finding.

    " + }, + "CustomDetection": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the custom data identifier.

    " + }, + "count": { + "shape": "__long", + "locationName": "count", + "documentation": "

    The total number of occurrences of the sensitive data that the custom data identifier detected.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The name of the custom data identifier.

    " + }, + "occurrences": { + "shape": "Occurrences", + "locationName": "occurrences", + "documentation": "

    The location of 1-15 occurrences of the sensitive data that the custom data identifier detected. A finding includes location data for a maximum of 15 occurrences of sensitive data.

    " + } + }, + "documentation": "

    Provides information about a custom data identifier that produced a sensitive data finding, and the sensitive data that it detected for the finding.

    " + }, + "CustomDetections": { + "type": "list", + "documentation": "

    Provides information about custom data identifiers that produced a sensitive data finding, and the number of occurrences of the data that each identifier detected.

    ", + "member": { + "shape": "CustomDetection" + } + }, + "DailySchedule": { + "type": "structure", + "members": {}, + "documentation": "

    Specifies that a classification job runs once a day, every day. This is an empty object.

    " + }, + "DayOfWeek": { + "type": "string", + "enum": [ + "SUNDAY", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY" + ] + }, + "DeclineInvitationsRequest": { + "type": "structure", + "members": { + "accountIds": { + "shape": "__listOf__string", + "locationName": "accountIds", + "documentation": "

    An array that lists AWS account IDs, one for each account that sent an invitation to decline.

    " + } + }, + "required": [ + "accountIds" + ] + }, + "DeclineInvitationsResponse": { + "type": "structure", + "members": { + "unprocessedAccounts": { + "shape": "__listOfUnprocessedAccount", + "locationName": "unprocessedAccounts", + "documentation": "

    An array of objects, one for each account whose invitation hasn't been declined. Each object identifies the account and explains why the request hasn't been processed for that account.

    " + } + } + }, + "DefaultDetection": { + "type": "structure", + "members": { + "count": { + "shape": "__long", + "locationName": "count", + "documentation": "

    The total number of occurrences of the type of sensitive data that was detected.

    " + }, + "occurrences": { + "shape": "Occurrences", + "locationName": "occurrences", + "documentation": "

    The location of 1-15 occurrences of the sensitive data that was detected. A finding includes location data for a maximum of 15 occurrences of sensitive data.

    " + }, + "type": { + "shape": "__string", + "locationName": "type", + "documentation": "

    The type of sensitive data that was detected. For example, AWS_CREDENTIALS, PHONE_NUMBER, or ADDRESS.

    " + } + }, + "documentation": "

    Provides information about a type of sensitive data that was detected by managed data identifiers and produced a sensitive data finding.

    " + }, + "DefaultDetections": { + "type": "list", + "documentation": "

    Provides information about sensitive data that was detected by managed data identifiers and produced a sensitive data finding, and the number of occurrences of each type of sensitive data that was detected.

    ", + "member": { + "shape": "DefaultDetection" + } + }, + "DeleteCustomDataIdentifierRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + } + }, + "required": [ + "id" + ] + }, + "DeleteCustomDataIdentifierResponse": { + "type": "structure", + "members": {} + }, + "DeleteFindingsFilterRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + } + }, + "required": [ + "id" + ] + }, + "DeleteFindingsFilterResponse": { + "type": "structure", + "members": {} + }, + "DeleteInvitationsRequest": { + "type": "structure", + "members": { + "accountIds": { + "shape": "__listOf__string", + "locationName": "accountIds", + "documentation": "

    An array that lists AWS account IDs, one for each account that sent an invitation to delete.

    " + } + }, + "required": [ + "accountIds" + ] + }, + "DeleteInvitationsResponse": { + "type": "structure", + "members": { + "unprocessedAccounts": { + "shape": "__listOfUnprocessedAccount", + "locationName": "unprocessedAccounts", + "documentation": "

    An array of objects, one for each account whose invitation hasn't been deleted. Each object identifies the account and explains why the request hasn't been processed for that account.

    " + } + } + }, + "DeleteMemberRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + } + }, + "required": [ + "id" + ] + }, + "DeleteMemberResponse": { + "type": "structure", + "members": {} + }, + "DescribeBucketsRequest": { + "type": "structure", + "members": { + "criteria": { + "shape": "BucketCriteria", + "locationName": "criteria", + "documentation": "

    The criteria to use to filter the query results.

    " + }, + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of the response. The default value is 50.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + }, + "sortCriteria": { + "shape": "BucketSortCriteria", + "locationName": "sortCriteria", + "documentation": "

    The criteria to use to sort the query results.

    " + } + } + }, + "DescribeBucketsResponse": { + "type": "structure", + "members": { + "buckets": { + "shape": "__listOfBucketMetadata", + "locationName": "buckets", + "documentation": "

    An array of objects, one for each bucket that meets the filter criteria specified in the request.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + } + } + }, + "DescribeClassificationJobRequest": { + "type": "structure", + "members": { + "jobId": { + "shape": "__string", + "location": "uri", + "locationName": "jobId", + "documentation": "

    The unique identifier for the classification job.

    " + } + }, + "required": [ + "jobId" + ] + }, + "DescribeClassificationJobResponse": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

    The token that was provided to ensure the idempotency of the request to create the job.

    ", + "idempotencyToken": true + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the job was created.

    " + }, + "customDataIdentifierIds": { + "shape": "__listOf__string", + "locationName": "customDataIdentifierIds", + "documentation": "

    The custom data identifiers that the job uses to analyze data.

    " + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    The custom description of the job.

    " + }, + "initialRun": { + "shape": "__boolean", + "locationName": "initialRun", + "documentation": "

    Specifies whether the job is configured to analyze all existing, eligible objects immediately after it's created.

    " + }, + "jobArn": { + "shape": "__string", + "locationName": "jobArn", + "documentation": "

    The Amazon Resource Name (ARN) of the job.

    " + }, + "jobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The unique identifier for the job.

    " + }, + "jobStatus": { + "shape": "JobStatus", + "locationName": "jobStatus", + "documentation": "

    The current status of the job. Possible values are:

    • CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.

    • COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.

    • IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.

    • PAUSED - Amazon Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.

    • RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.

    • USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.

    " + }, + "jobType": { + "shape": "JobType", + "locationName": "jobType", + "documentation": "

    The schedule for running the job. Possible values are:

    • ONE_TIME - The job runs only once.

    • SCHEDULED - The job runs on a daily, weekly, or monthly basis. The scheduleFrequency property indicates the recurrence pattern for the job.

    " + }, + "lastRunErrorStatus": { + "shape": "LastRunErrorStatus", + "locationName": "lastRunErrorStatus", + "documentation": "

    Specifies whether any account- or bucket-level access errors occurred when the job ran. For a recurring job, this value indicates the error status of the job's most recent run.

    " + }, + "lastRunTime": { + "shape": "__timestampIso8601", + "locationName": "lastRunTime", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the job started. If the job is a recurring job, this value indicates when the most recent run started.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The custom name of the job.

    " + }, + "s3JobDefinition": { + "shape": "S3JobDefinition", + "locationName": "s3JobDefinition", + "documentation": "

    The S3 buckets that the job is configured to analyze, and the scope of that analysis.

    " + }, + "samplingPercentage": { + "shape": "__integer", + "locationName": "samplingPercentage", + "documentation": "

    The sampling depth, as a percentage, that determines the percentage of eligible objects that the job analyzes.

    " + }, + "scheduleFrequency": { + "shape": "JobScheduleFrequency", + "locationName": "scheduleFrequency", + "documentation": "

    The recurrence pattern for running the job. If the job is configured to run only once, this value is null.

    " + }, + "statistics": { + "shape": "Statistics", + "locationName": "statistics", + "documentation": "

    The number of times that the job has run and processing statistics for the job's current run.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that specifies which tags (keys and values) are associated with the classification job.

    " + }, + "userPausedDetails": { + "shape": "UserPausedDetails", + "locationName": "userPausedDetails", + "documentation": "

    If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job or job run will expire and be cancelled if it isn't resumed. This value is present only if the value for jobStatus is USER_PAUSED.

    " + } + } + }, + "DescribeOrganizationConfigurationRequest": { + "type": "structure", + "members": {} + }, + "DescribeOrganizationConfigurationResponse": { + "type": "structure", + "members": { + "autoEnable": { + "shape": "__boolean", + "locationName": "autoEnable", + "documentation": "

    Specifies whether Amazon Macie is enabled automatically for accounts that are added to the AWS organization.

    " + }, + "maxAccountLimitReached": { + "shape": "__boolean", + "locationName": "maxAccountLimitReached", + "documentation": "

    Specifies whether the maximum number of Amazon Macie member accounts are part of the AWS organization.

    " + } + } + }, + "DisableMacieRequest": { + "type": "structure", + "members": {} + }, + "DisableMacieResponse": { + "type": "structure", + "members": {} + }, + "DisableOrganizationAdminAccountRequest": { + "type": "structure", + "members": { + "adminAccountId": { + "shape": "__string", + "location": "querystring", + "locationName": "adminAccountId", + "documentation": "

    The AWS account ID of the delegated administrator account.

    " + } + }, + "required": [ + "adminAccountId" + ] + }, + "DisableOrganizationAdminAccountResponse": { + "type": "structure", + "members": {} + }, + "DisassociateFromMasterAccountRequest": { + "type": "structure", + "members": {} + }, + "DisassociateFromMasterAccountResponse": { + "type": "structure", + "members": {} + }, + "DisassociateMemberRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + } + }, + "required": [ + "id" + ] + }, + "DisassociateMemberResponse": { + "type": "structure", + "members": {} + }, + "DomainDetails": { + "type": "structure", + "members": { + "domainName": { + "shape": "__string", + "locationName": "domainName", + "documentation": "

    The name of the domain.

    " + } + }, + "documentation": "

    Provides information about the domain name of the device that an entity used to perform an action on an affected resource.

    " + }, + "EffectivePermission": { + "type": "string", + "enum": [ + "PUBLIC", + "NOT_PUBLIC", + "UNKNOWN" + ] + }, + "Empty": { + "type": "structure", + "members": {}, + "documentation": "

    The request succeeded and there isn't any content to include in the body of the response (No Content).

    " + }, + "EnableMacieRequest": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

    A unique, case-sensitive token that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken": true + }, + "findingPublishingFrequency": { + "shape": "FindingPublishingFrequency", + "locationName": "findingPublishingFrequency", + "documentation": "Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events)." + }, + "status": { + "shape": "MacieStatus", + "locationName": "status", + "documentation": "

    Specifies the status for the account. To enable Amazon Macie and start all Amazon Macie activities for the account, set this value to ENABLED.

    " + } + } + }, + "EnableMacieResponse": { + "type": "structure", + "members": {} + }, + "EnableOrganizationAdminAccountRequest": { + "type": "structure", + "members": { + "adminAccountId": { + "shape": "__string", + "locationName": "adminAccountId", + "documentation": "

    The AWS account ID for the account to designate as the delegated Amazon Macie administrator account for the organization.

    " + }, + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

    A unique, case-sensitive token that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken": true + } + }, + "required": [ + "adminAccountId" + ] + }, + "EnableOrganizationAdminAccountResponse": { + "type": "structure", + "members": {} + }, + "EncryptionType": { + "type": "string", + "documentation": "

    The type of server-side encryption that's used to encrypt an S3 object or objects in an S3 bucket. Valid values are:

    ", + "enum": [ + "NONE", + "AES256", + "aws:kms", + "UNKNOWN" + ] + }, + "ErrorCode": { + "type": "string", + "documentation": "

    The source of an error, issue, or delay. Possible values are:

    ", + "enum": [ + "ClientError", + "InternalError" + ] + }, + "FederatedUser": { + "type": "structure", + "members": { + "accessKeyId": { + "shape": "__string", + "locationName": "accessKeyId", + "documentation": "

    The AWS access key ID that identifies the credentials.

    " + }, + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account that owns the entity that was used to get the credentials.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the entity that was used to get the credentials.

    " + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

    The unique identifier for the entity that was used to get the credentials.

    " + }, + "sessionContext": { + "shape": "SessionContext", + "locationName": "sessionContext", + "documentation": "

    The details of the session that was created for the credentials, including the entity that issued the session.

    " + } + }, + "documentation": "

    Provides information about an identity that performed an action on an affected resource by using temporary security credentials. The credentials were obtained using the GetFederationToken operation of the AWS Security Token Service (AWS STS) API.

    " + }, + "Finding": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account that the finding applies to. This is typically the account that owns the affected resource.

    " + }, + "archived": { + "shape": "__boolean", + "locationName": "archived", + "documentation": "

    Specifies whether the finding is archived.

    " + }, + "category": { + "shape": "FindingCategory", + "locationName": "category", + "documentation": "

    The category of the finding. Possible values are: CLASSIFICATION, for a sensitive data finding; and, POLICY, for a policy finding.

    " + }, + "classificationDetails": { + "shape": "ClassificationDetails", + "locationName": "classificationDetails", + "documentation": "

    The details of a sensitive data finding. This value is null for a policy finding.

    " + }, + "count": { + "shape": "__long", + "locationName": "count", + "documentation": "

    The total number of occurrences of the finding. For sensitive data findings, this value is always 1. All sensitive data findings are considered new (unique) because they derive from individual classification jobs.

    " + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the finding was created.

    " + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    The description of the finding.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The unique identifier for the finding. This is a random string that Amazon Macie generates and assigns to a finding when it creates the finding.

    " + }, + "partition": { + "shape": "__string", + "locationName": "partition", + "documentation": "

    The AWS partition that Amazon Macie created the finding in.

    " + }, + "policyDetails": { + "shape": "PolicyDetails", + "locationName": "policyDetails", + "documentation": "

    The details of a policy finding. This value is null for a sensitive data finding.

    " + }, + "region": { + "shape": "__string", + "locationName": "region", + "documentation": "

    The AWS Region that Amazon Macie created the finding in.

    " + }, + "resourcesAffected": { + "shape": "ResourcesAffected", + "locationName": "resourcesAffected", + "documentation": "

    The resources that the finding applies to.

    " + }, + "sample": { + "shape": "__boolean", + "locationName": "sample", + "documentation": "

    Specifies whether the finding is a sample finding. A sample finding is a finding that uses example data to demonstrate what a finding might contain.

    " + }, + "schemaVersion": { + "shape": "__string", + "locationName": "schemaVersion", + "documentation": "

    The version of the schema that was used to define the data structures in the finding.

    " + }, + "severity": { + "shape": "Severity", + "locationName": "severity", + "documentation": "

    The severity level and score for the finding.

    " + }, + "title": { + "shape": "__string", + "locationName": "title", + "documentation": "

    The brief description of the finding.

    " + }, + "type": { + "shape": "FindingType", + "locationName": "type", + "documentation": "

    The type of the finding.

    " + }, + "updatedAt": { + "shape": "__timestampIso8601", + "locationName": "updatedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the finding was last updated. For sensitive data findings, this value is the same as the value for the createdAt property. All sensitive data findings are considered new (unique) because they derive from individual classification jobs.

    " + } + }, + "documentation": "

    Provides the details of a finding.

    " + }, + "FindingAction": { + "type": "structure", + "members": { + "actionType": { + "shape": "FindingActionType", + "locationName": "actionType", + "documentation": "

    The type of action that occurred for the affected resource. This value is typically AWS_API_CALL, which indicates that an entity invoked an API operation for the resource.

    " + }, + "apiCallDetails": { + "shape": "ApiCallDetails", + "locationName": "apiCallDetails", + "documentation": "

    The invocation details of the API operation that an entity invoked for the affected resource, if the value for the actionType property is AWS_API_CALL.

    " + } + }, + "documentation": "

    Provides information about an action that occurred for a resource and produced a policy finding.

    " + }, + "FindingActionType": { + "type": "string", + "documentation": "

    The type of action that occurred for the resource and produced the policy finding:

    ", + "enum": [ + "AWS_API_CALL" + ] + }, + "FindingActor": { + "type": "structure", + "members": { + "domainDetails": { + "shape": "DomainDetails", + "locationName": "domainDetails", + "documentation": "

    The domain name of the device that the entity used to perform the action on the affected resource.

    " + }, + "ipAddressDetails": { + "shape": "IpAddressDetails", + "locationName": "ipAddressDetails", + "documentation": "

    The IP address of the device that the entity used to perform the action on the affected resource. This object also provides information such as the owner and geographic location for the IP address.

    " + }, + "userIdentity": { + "shape": "UserIdentity", + "locationName": "userIdentity", + "documentation": "

    The type and other characteristics of the entity that performed the action on the affected resource.

    " + } + }, + "documentation": "

    Provides information about an entity that performed an action that produced a policy finding for a resource.

    " + }, + "FindingCategory": { + "type": "string", + "documentation": "

    The category of the finding. Valid values are:

    ", + "enum": [ + "CLASSIFICATION", + "POLICY" + ] + }, + "FindingCriteria": { + "type": "structure", + "members": { + "criterion": { + "shape": "Criterion", + "locationName": "criterion", + "documentation": "

    A condition that specifies the property, operator, and one or more values to use to filter the results.

    " + } + }, + "documentation": "

    Specifies, as a map, one or more property-based conditions that filter the results of a query for findings.

    " + }, + "FindingPublishingFrequency": { + "type": "string", + "documentation": "

    The frequency with which Amazon Macie publishes updates to policy findings for an account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events). Valid values are:

    ", + "enum": [ + "FIFTEEN_MINUTES", + "ONE_HOUR", + "SIX_HOURS" + ] + }, + "FindingStatisticsSortAttributeName": { + "type": "string", + "documentation": "

    The grouping to sort the results by. Valid values are:

    ", + "enum": [ + "groupKey", + "count" + ] + }, + "FindingStatisticsSortCriteria": { + "type": "structure", + "members": { + "attributeName": { + "shape": "FindingStatisticsSortAttributeName", + "locationName": "attributeName", + "documentation": "

    The grouping to sort the results by. Valid values are: count, sort the results by the number of findings in each group of results; and, groupKey, sort the results by the name of each group of results.

    " + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

    The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

    " + } + }, + "documentation": "

    Specifies criteria for sorting the results of a query that retrieves aggregated statistical data about findings.

    " + }, + "FindingType": { + "type": "string", + "documentation": "

    The type of finding. For details about each type, see Types of Amazon Macie findings in the Amazon Macie User Guide. Valid values are:

    ", + "enum": [ + "SensitiveData:S3Object/Multiple", + "SensitiveData:S3Object/Financial", + "SensitiveData:S3Object/Personal", + "SensitiveData:S3Object/Credentials", + "SensitiveData:S3Object/CustomIdentifier", + "Policy:IAMUser/S3BucketPublic", + "Policy:IAMUser/S3BucketSharedExternally", + "Policy:IAMUser/S3BucketReplicatedExternally", + "Policy:IAMUser/S3BucketEncryptionDisabled", + "Policy:IAMUser/S3BlockPublicAccessDisabled" + ] + }, + "FindingsFilterAction": { + "type": "string", + "documentation": "

    The action to perform on findings that meet the filter criteria. To suppress (automatically archive) findings that meet the criteria, set this value to ARCHIVE. Valid values are:

    ", + "enum": [ + "ARCHIVE", + "NOOP" + ] + }, + "FindingsFilterListItem": { + "type": "structure", + "members": { + "action": { + "shape": "FindingsFilterAction", + "locationName": "action", + "documentation": "

    The action that's performed on findings that meet the filter criteria. Possible values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the filter.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The unique identifier for the filter.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The custom name of the filter.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that identifies the tags (keys and values) that are associated with the filter.

    " + } + }, + "documentation": "

    Provides information about a findings filter.

    " + }, + "GetBucketStatisticsRequest": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account.

    " + } + } + }, + "GetBucketStatisticsResponse": { + "type": "structure", + "members": { + "bucketCount": { + "shape": "__long", + "locationName": "bucketCount", + "documentation": "

    The total number of buckets.

    " + }, + "bucketCountByEffectivePermission": { + "shape": "BucketCountByEffectivePermission", + "locationName": "bucketCountByEffectivePermission", + "documentation": "

    The total number of buckets that are publicly accessible based on a combination of permissions settings for each bucket.

    " + }, + "bucketCountByEncryptionType": { + "shape": "BucketCountByEncryptionType", + "locationName": "bucketCountByEncryptionType", + "documentation": "

    The total number of buckets, grouped by server-side encryption type. This object also reports the total number of buckets that don't encrypt objects by default.

    " + }, + "bucketCountBySharedAccessType": { + "shape": "BucketCountBySharedAccessType", + "locationName": "bucketCountBySharedAccessType", + "documentation": "

    The total number of buckets that are shared with another AWS account.

    " + }, + "classifiableObjectCount": { + "shape": "__long", + "locationName": "classifiableObjectCount", + "documentation": "

    The total number of objects that Amazon Macie can analyze in the buckets. These objects use a supported storage class and have a file name extension for a supported file or storage format.

    " + }, + "classifiableSizeInBytes": { + "shape": "__long", + "locationName": "classifiableSizeInBytes", + "documentation": "

    The total storage size, in bytes, of all the objects that Amazon Macie can analyze in the buckets. These objects use a supported storage class and have a file name extension for a supported file or storage format.

    " + }, + "lastUpdated": { + "shape": "__timestampIso8601", + "locationName": "lastUpdated", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved data about the buckets from Amazon S3.

    " + }, + "objectCount": { + "shape": "__long", + "locationName": "objectCount", + "documentation": "

    The total number of objects in the buckets.

    " + }, + "sizeInBytes": { + "shape": "__long", + "locationName": "sizeInBytes", + "documentation": "

    The total storage size, in bytes, of the buckets.

    " + }, + "sizeInBytesCompressed": { + "shape": "__long", + "locationName": "sizeInBytesCompressed", + "documentation": "

    The total compressed storage size, in bytes, of the buckets.

    " + }, + "unclassifiableObjectCount": { + "shape": "ObjectLevelStatistics", + "locationName": "unclassifiableObjectCount", + "documentation": "

    The total number of objects that Amazon Macie can't analyze in the buckets. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

    " + }, + "unclassifiableObjectSizeInBytes": { + "shape": "ObjectLevelStatistics", + "locationName": "unclassifiableObjectSizeInBytes", + "documentation": "

    The total storage size, in bytes, of all the objects that Amazon Macie can't analyze in the buckets. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

    " + } + } + }, + "GetClassificationExportConfigurationRequest": { + "type": "structure", + "members": {} + }, + "GetClassificationExportConfigurationResponse": { + "type": "structure", + "members": { + "configuration": { + "shape": "ClassificationExportConfiguration", + "locationName": "configuration", + "documentation": "

    The location where data classification results are stored, and the encryption settings that are used when storing results in that location.

    " + } + } + }, + "GetCustomDataIdentifierRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + } + }, + "required": [ + "id" + ] + }, + "GetCustomDataIdentifierResponse": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the custom data identifier.

    " + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the custom data identifier was created.

    " + }, + "deleted": { + "shape": "__boolean", + "locationName": "deleted", + "documentation": "

    Specifies whether the custom data identifier was deleted. If you delete a custom data identifier, Amazon Macie doesn't delete it permanently. Instead, it soft deletes the identifier.

    " + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    The custom description of the custom data identifier.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The unique identifier for the custom data identifier.

    " + }, + "ignoreWords": { + "shape": "__listOf__string", + "locationName": "ignoreWords", + "documentation": "

    An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. Ignore words are case sensitive.

    " + }, + "keywords": { + "shape": "__listOf__string", + "locationName": "keywords", + "documentation": "

    An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. Keywords aren't case sensitive.

    " + }, + "maximumMatchDistance": { + "shape": "__integer", + "locationName": "maximumMatchDistance", + "documentation": "

    The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The custom name of the custom data identifier.

    " + }, + "regex": { + "shape": "__string", + "locationName": "regex", + "documentation": "

    The regular expression (regex) that defines the pattern to match.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that identifies the tags (keys and values) that are associated with the custom data identifier.

    " + } + } + }, + "GetFindingStatisticsRequest": { + "type": "structure", + "members": { + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

    The criteria to use to filter the query results.

    " + }, + "groupBy": { + "shape": "GroupBy", + "locationName": "groupBy", + "documentation": "

    The finding property to use to group the query results. Valid values are:

    • classificationDetails.jobId - The unique identifier for the classification job that produced the finding.

    • resourcesAffected.s3Bucket.name - The name of the S3 bucket that the finding applies to.

    • severity.description - The severity level of the finding, such as High or Medium.

    • type - The type of finding, such as Policy:IAMUser/S3BucketPublic and SensitiveData:S3Object/Personal.

    " + }, + "size": { + "shape": "__integer", + "locationName": "size", + "documentation": "

    The maximum number of items to include in each page of the response.

    " + }, + "sortCriteria": { + "shape": "FindingStatisticsSortCriteria", + "locationName": "sortCriteria", + "documentation": "

    The criteria to use to sort the query results.

    " + } + }, + "required": [ + "groupBy" + ] + }, + "GetFindingStatisticsResponse": { + "type": "structure", + "members": { + "countsByGroup": { + "shape": "__listOfGroupCount", + "locationName": "countsByGroup", + "documentation": "

    An array of objects, one for each group of findings that meet the filter criteria specified in the request.

    " + } + } + }, + "GetFindingsFilterRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + } + }, + "required": [ + "id" + ] + }, + "GetFindingsFilterResponse": { + "type": "structure", + "members": { + "action": { + "shape": "FindingsFilterAction", + "locationName": "action", + "documentation": "

    The action that's performed on findings that meet the filter criteria (findingCriteria). Possible values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the filter.

    " + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    The custom description of the filter.

    " + }, + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

    The criteria that's used to filter findings.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The unique identifier for the filter.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The custom name of the filter.

    " + }, + "position": { + "shape": "__integer", + "locationName": "position", + "documentation": "

    The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that identifies the tags (keys and values) that are associated with the filter.

    " + } + } + }, + "GetFindingsRequest": { + "type": "structure", + "members": { + "findingIds": { + "shape": "__listOf__string", + "locationName": "findingIds", + "documentation": "

    An array of strings that lists the unique identifiers for the findings to retrieve.

    " + }, + "sortCriteria": { + "shape": "SortCriteria", + "locationName": "sortCriteria", + "documentation": "

    The criteria for sorting the results of the request.

    " + } + }, + "required": [ + "findingIds" + ] + }, + "GetFindingsResponse": { + "type": "structure", + "members": { + "findings": { + "shape": "__listOfFinding", + "locationName": "findings", + "documentation": "

    An array of objects, one for each finding that meets the criteria specified in the request.

    " + } + } + }, + "GetInvitationsCountRequest": { + "type": "structure", + "members": {} + }, + "GetInvitationsCountResponse": { + "type": "structure", + "members": { + "invitationsCount": { + "shape": "__long", + "locationName": "invitationsCount", + "documentation": "

    The total number of invitations that were received by the account, not including the currently accepted invitation.

    " + } + } + }, + "GetMacieSessionRequest": { + "type": "structure", + "members": {} + }, + "GetMacieSessionResponse": { + "type": "structure", + "members": { + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the Amazon Macie account was created.

    " + }, + "findingPublishingFrequency": { + "shape": "FindingPublishingFrequency", + "locationName": "findingPublishingFrequency", + "documentation": "

    The frequency with which Amazon Macie publishes updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events).

    " + }, + "serviceRole": { + "shape": "__string", + "locationName": "serviceRole", + "documentation": "

    The Amazon Resource Name (ARN) of the service-linked role that allows Amazon Macie to monitor and analyze data in AWS resources for the account.

    " + }, + "status": { + "shape": "MacieStatus", + "locationName": "status", + "documentation": "

    The current status of the Amazon Macie account. Possible values are: PAUSED, the account is enabled but all Amazon Macie activities are suspended (paused) for the account; and, ENABLED, the account is enabled and all Amazon Macie activities are enabled for the account.

    " + }, + "updatedAt": { + "shape": "__timestampIso8601", + "locationName": "updatedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the Amazon Macie account.

    " + } + } + }, + "GetMasterAccountRequest": { + "type": "structure", + "members": {} + }, + "GetMasterAccountResponse": { + "type": "structure", + "members": { + "master": { + "shape": "Invitation", + "locationName": "master", + "documentation": "

    The AWS account ID for the master account. If the accounts are associated by a Macie membership invitation, this object also provides details about the invitation that was sent and accepted to establish the relationship between the accounts.

    " + } + } + }, + "GetMemberRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + } + }, + "required": [ + "id" + ] + }, + "GetMemberResponse": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The AWS account ID for the account.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the account.

    " + }, + "email": { + "shape": "__string", + "locationName": "email", + "documentation": "

    The email address for the account.

    " + }, + "invitedAt": { + "shape": "__timestampIso8601", + "locationName": "invitedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when an Amazon Macie membership invitation was last sent to the account. This value is null if a Macie invitation hasn't been sent to the account.

    " + }, + "masterAccountId": { + "shape": "__string", + "locationName": "masterAccountId", + "documentation": "

    The AWS account ID for the master account.

    " + }, + "relationshipStatus": { + "shape": "RelationshipStatus", + "locationName": "relationshipStatus", + "documentation": "

    The current status of the relationship between the account and the master account.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that identifies the tags (keys and values) that are associated with the member account in Amazon Macie.

    " + }, + "updatedAt": { + "shape": "__timestampIso8601", + "locationName": "updatedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the relationship between the account and the master account.

    " + } + } + }, + "GetUsageStatisticsRequest": { + "type": "structure", + "members": { + "filterBy": { + "shape": "__listOfUsageStatisticsFilter", + "locationName": "filterBy", + "documentation": "

    An array of objects, one for each condition to use to filter the query results. If the array contains more than one object, Amazon Macie uses an AND operator to join the conditions specified by the objects.

    " + }, + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of the response.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + }, + "sortBy": { + "shape": "UsageStatisticsSortBy", + "locationName": "sortBy", + "documentation": "

    The criteria to use to sort the query results.

    " + } + } + }, + "GetUsageStatisticsResponse": { + "type": "structure", + "members": { + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + }, + "records": { + "shape": "__listOfUsageRecord", + "locationName": "records", + "documentation": "

    An array of objects that contains the results of the query. Each object contains the data for an account that meets the filter criteria specified in the request.

    " + } + } + }, + "GetUsageTotalsRequest": { + "type": "structure", + "members": {} + }, + "GetUsageTotalsResponse": { + "type": "structure", + "members": { + "usageTotals": { + "shape": "__listOfUsageTotal", + "locationName": "usageTotals", + "documentation": "

    An array of objects that contains the results of the query. Each object contains the data for a specific usage metric.

    " + } + } + }, + "GroupBy": { + "type": "string", + "enum": [ + "resourcesAffected.s3Bucket.name", + "type", + "classificationDetails.jobId", + "severity.description" + ] + }, + "GroupCount": { + "type": "structure", + "members": { + "count": { + "shape": "__long", + "locationName": "count", + "documentation": "

    The total number of findings in the group of query results.

    " + }, + "groupKey": { + "shape": "__string", + "locationName": "groupKey", + "documentation": "

    The name of the property that defines the group in the query results, as specified by the groupBy property in the query request.

    " + } + }, + "documentation": "

    Provides a group of results for a query that retrieved aggregated statistical data about findings.

    " + }, + "IamUser": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account that's associated with the IAM user who performed the action.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the principal that performed the action. The last section of the ARN contains the name of the user who performed the action.

    " + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

    The unique identifier for the IAM user who performed the action.

    " + }, + "userName": { + "shape": "__string", + "locationName": "userName", + "documentation": "

    The user name of the IAM user who performed the action.

    " + } + }, + "documentation": "

    Provides information about an AWS Identity and Access Management (IAM) user who performed an action on an affected resource.

    " + }, + "InternalServerException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    The explanation of the error that occurred.

    " + } + }, + "documentation": "

    Provides information about an error that occurred due to an unknown internal server error, exception, or failure.

    ", + "exception": true, + "error": { + "httpStatusCode": 500 + } + }, + "Invitation": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The AWS account ID for the account that sent the invitation.

    " + }, + "invitationId": { + "shape": "__string", + "locationName": "invitationId", + "documentation": "

    The unique identifier for the invitation. Amazon Macie uses this identifier to validate the inviter account with the invitee account.

    " + }, + "invitedAt": { + "shape": "__timestampIso8601", + "locationName": "invitedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the invitation was sent.

    " + }, + "relationshipStatus": { + "shape": "RelationshipStatus", + "locationName": "relationshipStatus", + "documentation": "

    The status of the relationship between the account that sent the invitation (inviter account) and the account that received the invitation (invitee account).

    " + } + }, + "documentation": "

    Provides information about an Amazon Macie membership invitation that was received by an account.

    " + }, + "IpAddressDetails": { + "type": "structure", + "members": { + "ipAddressV4": { + "shape": "__string", + "locationName": "ipAddressV4", + "documentation": "

    The Internet Protocol version 4 (IPv4) address of the device.

    " + }, + "ipCity": { + "shape": "IpCity", + "locationName": "ipCity", + "documentation": "

    The city that the IP address originated from.

    " + }, + "ipCountry": { + "shape": "IpCountry", + "locationName": "ipCountry", + "documentation": "

    The country that the IP address originated from.

    " + }, + "ipGeoLocation": { + "shape": "IpGeoLocation", + "locationName": "ipGeoLocation", + "documentation": "

    The geographic coordinates of the location that the IP address originated from.

    " + }, + "ipOwner": { + "shape": "IpOwner", + "locationName": "ipOwner", + "documentation": "

    The registered owner of the IP address.

    " + } + }, + "documentation": "

    Provides information about the IP address of the device that an entity used to perform an action on an affected resource.

    " + }, + "IpCity": { + "type": "structure", + "members": { + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The name of the city.

    " + } + }, + "documentation": "

    Provides information about the city that an IP address originated from.

    " + }, + "IpCountry": { + "type": "structure", + "members": { + "code": { + "shape": "__string", + "locationName": "code", + "documentation": "

    The two-character code, in ISO 3166-1 alpha-2 format, for the country that the IP address originated from. For example, US for the United States.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The name of the country that the IP address originated from.

    " + } + }, + "documentation": "

    Provides information about the country that an IP address originated from.

    " + }, + "IpGeoLocation": { + "type": "structure", + "members": { + "lat": { + "shape": "__double", + "locationName": "lat", + "documentation": "

    The latitude coordinate of the location, rounded to four decimal places.

    " + }, + "lon": { + "shape": "__double", + "locationName": "lon", + "documentation": "

    The longitude coordinate of the location, rounded to four decimal places.

    " + } + }, + "documentation": "

    Provides geographic coordinates that indicate where a specified IP address originated from.

    " + }, + "IpOwner": { + "type": "structure", + "members": { + "asn": { + "shape": "__string", + "locationName": "asn", + "documentation": "

    The autonomous system number (ASN) for the autonomous system that included the IP address.

    " + }, + "asnOrg": { + "shape": "__string", + "locationName": "asnOrg", + "documentation": "

    The organization identifier that's associated with the autonomous system number (ASN) for the autonomous system that included the IP address.

    " + }, + "isp": { + "shape": "__string", + "locationName": "isp", + "documentation": "

    The name of the internet service provider (ISP) that owned the IP address.

    " + }, + "org": { + "shape": "__string", + "locationName": "org", + "documentation": "

    The name of the organization that owned the IP address.

    " + } + }, + "documentation": "

    Provides information about the registered owner of an IP address.

    " + }, + "IsDefinedInJob": { + "type": "string", + "enum": [ + "TRUE", + "FALSE", + "UNKNOWN" + ] + }, + "IsMonitoredByJob": { + "type": "string", + "enum": [ + "TRUE", + "FALSE", + "UNKNOWN" + ] + }, + "JobComparator": { + "type": "string", + "documentation": "

    The operator to use in a condition. Valid values are:

    ", + "enum": [ + "EQ", + "GT", + "GTE", + "LT", + "LTE", + "NE", + "CONTAINS" + ] + }, + "JobDetails": { + "type": "structure", + "members": { + "isDefinedInJob": { + "shape": "IsDefinedInJob", + "locationName": "isDefinedInJob", + "documentation": "

    Specifies whether any one-time or recurring jobs are configured to analyze data in the bucket. Possible values are:

    • TRUE - One or more jobs is configured to analyze data in the bucket, and at least one of those jobs has a status other than CANCELLED.

    • FALSE - No jobs are configured to analyze data in the bucket, or all the jobs that are configured to analyze data in the bucket have a status of CANCELLED.

    • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

    " + }, + "isMonitoredByJob": { + "shape": "IsMonitoredByJob", + "locationName": "isMonitoredByJob", + "documentation": "

    Specifies whether any recurring jobs are configured to analyze data in the bucket. Possible values are:

    • TRUE - One or more recurring jobs is configured to analyze data in the bucket, and at least one of those jobs has a status other than CANCELLED.

    • FALSE - No recurring jobs are configured to analyze data in the bucket, or all the recurring jobs that are configured to analyze data in the bucket have a status of CANCELLED.

    • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

    " + }, + "lastJobId": { + "shape": "__string", + "locationName": "lastJobId", + "documentation": "

    The unique identifier for the job that ran most recently (either the latest run of a recurring job or the only run of a one-time job) and is configured to analyze data in the bucket.

    This value is null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

    " + }, + "lastJobRunTime": { + "shape": "__timestampIso8601", + "locationName": "lastJobRunTime", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the job (lastJobId) started. If the job is a recurring job, this value indicates when the most recent run started.

    This value is null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

    " + } + }, + "documentation": "

    Specifies whether any one-time or recurring classification jobs are configured to analyze data in an S3 bucket, and, if so, the details of the job that ran most recently.

    " + }, + "JobScheduleFrequency": { + "type": "structure", + "members": { + "dailySchedule": { + "shape": "DailySchedule", + "locationName": "dailySchedule", + "documentation": "

    Specifies a daily recurrence pattern for running the job.

    " + }, + "monthlySchedule": { + "shape": "MonthlySchedule", + "locationName": "monthlySchedule", + "documentation": "

    Specifies a monthly recurrence pattern for running the job.

    " + }, + "weeklySchedule": { + "shape": "WeeklySchedule", + "locationName": "weeklySchedule", + "documentation": "

    Specifies a weekly recurrence pattern for running the job.

    " + } + }, + "documentation": "

    Specifies the recurrence pattern for running a classification job.

    " + }, + "JobScopeTerm": { + "type": "structure", + "members": { + "simpleScopeTerm": { + "shape": "SimpleScopeTerm", + "locationName": "simpleScopeTerm", + "documentation": "

    A property-based condition that defines a property, operator, and one or more values for including or excluding an object from the job.

    " + }, + "tagScopeTerm": { + "shape": "TagScopeTerm", + "locationName": "tagScopeTerm", + "documentation": "

    A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an object from the job.

    " + } + }, + "documentation": "

    Specifies a property- or tag-based condition that defines criteria for including or excluding objects from a classification job.

    " + }, + "JobScopingBlock": { + "type": "structure", + "members": { + "and": { + "shape": "__listOfJobScopeTerm", + "locationName": "and", + "documentation": "

    An array of conditions, one for each condition that determines which objects to include or exclude from the job.

    " + } + }, + "documentation": "

    Specifies one or more property- and tag-based conditions that define criteria for including or excluding objects from a classification job. If you specify more than one condition, Amazon Macie uses an AND operator to join the conditions.

    " + }, + "JobStatus": { + "type": "string", + "documentation": "

    The status of a classification job. Possible values are:

    ", + "enum": [ + "RUNNING", + "PAUSED", + "CANCELLED", + "COMPLETE", + "IDLE", + "USER_PAUSED" + ] + }, + "JobSummary": { + "type": "structure", + "members": { + "bucketDefinitions": { + "shape": "__listOfS3BucketDefinitionForJob", + "locationName": "bucketDefinitions", + "documentation": "

    The S3 buckets that the job is configured to analyze.

    " + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the job was created.

    " + }, + "jobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

    The unique identifier for the job.

    " + }, + "jobStatus": { + "shape": "JobStatus", + "locationName": "jobStatus", + "documentation": "

    The current status of the job. Possible values are:

    • CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.

    • COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.

    • IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.

    • PAUSED - Amazon Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.

    • RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.

    • USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.

    " + }, + "jobType": { + "shape": "JobType", + "locationName": "jobType", + "documentation": "

    The schedule for running the job. Possible values are:

    • ONE_TIME - The job runs only once.

    • SCHEDULED - The job runs on a daily, weekly, or monthly basis.

    " + }, + "lastRunErrorStatus": { + "shape": "LastRunErrorStatus", + "locationName": "lastRunErrorStatus", + "documentation": "

    Specifies whether any account- or bucket-level access errors occurred when the job ran. For a recurring job, this value indicates the error status of the job's most recent run.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The custom name of the job.

    " + }, + "userPausedDetails": { + "shape": "UserPausedDetails", + "locationName": "userPausedDetails", + "documentation": "

    If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job or job run will expire and be cancelled if it isn't resumed. This value is present only if the value for jobStatus is USER_PAUSED.

    " + } + }, + "documentation": "

    Provides information about a classification job, including the current status of the job.

    " + }, + "JobType": { + "type": "string", + "documentation": "

    The schedule for running a classification job. Valid values are:

    ", + "enum": [ + "ONE_TIME", + "SCHEDULED" + ] + }, + "KeyValuePair": { + "type": "structure", + "members": { + "key": { + "shape": "__string", + "locationName": "key", + "documentation": "

    One part of a key-value pair that comprises a tag. A tag key is a general label that acts as a category for more specific tag values.

    " + }, + "value": { + "shape": "__string", + "locationName": "value", + "documentation": "

    One part of a key-value pair that comprises a tag. A tag value acts as a descriptor for a tag key. A tag value can be an empty string.

    " + } + }, + "documentation": "

    Provides information about the tags that are associated with an S3 bucket or object. Each tag consists of a required tag key and an associated tag value.

    " + }, + "KeyValuePairList": { + "type": "list", + "documentation": "

    Provides information about the tags that are associated with an S3 bucket or object. Each tag consists of a required tag key and an associated tag value.

    ", + "member": { + "shape": "KeyValuePair" + } + }, + "LastRunErrorStatus": { + "type": "structure", + "members": { + "code": { + "shape": "LastRunErrorStatusCode", + "locationName": "code", + "documentation": "

    Specifies whether any account- or bucket-level access errors occurred when the job ran. For a recurring job, this value indicates the error status of the job's most recent run. Possible values are:

    • ERROR - One or more errors occurred. Amazon Macie didn't process all the data specified for the job.

    • NONE - No errors occurred. Macie processed all the data specified for the job.

    " + } + }, + "documentation": "

    Specifies whether any account- or bucket-level access errors occurred when a classification job ran. For example, the job is configured to analyze data for a member account that was suspended, or the job is configured to analyze an S3 bucket that Amazon Macie isn't allowed to access.

    " + }, + "LastRunErrorStatusCode": { + "type": "string", + "documentation": "

    Specifies whether any account- or bucket-level access errors occurred during the run of a one-time classification job or the most recent run of a recurring classification job. Possible values are:

    ", + "enum": [ + "NONE", + "ERROR" + ] + }, + "ListClassificationJobsRequest": { + "type": "structure", + "members": { + "filterCriteria": { + "shape": "ListJobsFilterCriteria", + "locationName": "filterCriteria", + "documentation": "

    The criteria to use to filter the results.

    " + }, + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of the response.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + }, + "sortCriteria": { + "shape": "ListJobsSortCriteria", + "locationName": "sortCriteria", + "documentation": "

    The criteria to use to sort the results.

    " + } + } + }, + "ListClassificationJobsResponse": { + "type": "structure", + "members": { + "items": { + "shape": "__listOfJobSummary", + "locationName": "items", + "documentation": "

    An array of objects, one for each job that meets the filter criteria specified in the request.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + } + } + }, + "ListCustomDataIdentifiersRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of the response.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + } + } + }, + "ListCustomDataIdentifiersResponse": { + "type": "structure", + "members": { + "items": { + "shape": "__listOfCustomDataIdentifierSummary", + "locationName": "items", + "documentation": "

    An array of objects, one for each custom data identifier.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + } + } + }, + "ListFindingsFiltersRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of a paginated response.

    " + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + } + } + }, + "ListFindingsFiltersResponse": { + "type": "structure", + "members": { + "findingsFilterListItems": { + "shape": "__listOfFindingsFilterListItem", + "locationName": "findingsFilterListItems", + "documentation": "

    An array of objects, one for each filter that's associated with the account.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + } + } + }, + "ListFindingsRequest": { + "type": "structure", + "members": { + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

    The criteria to use to filter the results.

    " + }, + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of the response.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + }, + "sortCriteria": { + "shape": "SortCriteria", + "locationName": "sortCriteria", + "documentation": "

    The criteria to use to sort the results.

    " + } + } + }, + "ListFindingsResponse": { + "type": "structure", + "members": { + "findingIds": { + "shape": "__listOf__string", + "locationName": "findingIds", + "documentation": "

    An array of strings, where each string is the unique identifier for a finding that meets the filter criteria specified in the request.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + } + } + }, + "ListInvitationsRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of a paginated response.

    " + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + } + } + }, + "ListInvitationsResponse": { + "type": "structure", + "members": { + "invitations": { + "shape": "__listOfInvitation", + "locationName": "invitations", + "documentation": "

    An array of objects, one for each invitation that was received by the account.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + } + } + }, + "ListJobsFilterCriteria": { + "type": "structure", + "members": { + "excludes": { + "shape": "__listOfListJobsFilterTerm", + "locationName": "excludes", + "documentation": "

    An array of objects, one for each condition that determines which jobs to exclude from the results.

    " + }, + "includes": { + "shape": "__listOfListJobsFilterTerm", + "locationName": "includes", + "documentation": "

    An array of objects, one for each condition that determines which jobs to include in the results.

    " + } + }, + "documentation": "

    Specifies criteria for filtering the results of a request for information about classification jobs.

    " + }, + "ListJobsFilterKey": { + "type": "string", + "documentation": "

    The property to use to filter the results. Valid values are:

    ", + "enum": [ + "jobType", + "jobStatus", + "createdAt", + "name" + ] + }, + "ListJobsFilterTerm": { + "type": "structure", + "members": { + "comparator": { + "shape": "JobComparator", + "locationName": "comparator", + "documentation": "

    The operator to use to filter the results.

    " + }, + "key": { + "shape": "ListJobsFilterKey", + "locationName": "key", + "documentation": "

    The property to use to filter the results.

    " + }, + "values": { + "shape": "__listOf__string", + "locationName": "values", + "documentation": "

    An array that lists one or more values to use to filter the results.

    " + } + }, + "documentation": "

    Specifies a condition that filters the results of a request for information about classification jobs. Each condition consists of a property, an operator, and one or more values.

    " + }, + "ListJobsSortAttributeName": { + "type": "string", + "documentation": "

    The property to sort the results by. Valid values are:

    ", + "enum": [ + "createdAt", + "jobStatus", + "name", + "jobType" + ] + }, + "ListJobsSortCriteria": { + "type": "structure", + "members": { + "attributeName": { + "shape": "ListJobsSortAttributeName", + "locationName": "attributeName", + "documentation": "

    The property to sort the results by.

    " + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

    The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

    " + } + }, + "documentation": "

    Specifies criteria for sorting the results of a request for information about classification jobs.

    " + }, + "ListMembersRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of a paginated response.

    " + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + }, + "onlyAssociated": { + "shape": "__string", + "location": "querystring", + "locationName": "onlyAssociated", + "documentation": "

    Specifies which accounts to include in the response, based on the status of an account's relationship with the master account. By default, the response includes only current member accounts. To include all accounts, set the value for this parameter to false.

    " + } + } + }, + "ListMembersResponse": { + "type": "structure", + "members": { + "members": { + "shape": "__listOfMember", + "locationName": "members", + "documentation": "

    An array of objects, one for each account that's associated with the master account and meets the criteria specified by the onlyAssociated request parameter.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + } + } + }, + "ListOrganizationAdminAccountsRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of items to include in each page of a paginated response.

    " + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The nextToken string that specifies which page of results to return in a paginated response.

    " + } + } + }, + "ListOrganizationAdminAccountsResponse": { + "type": "structure", + "members": { + "adminAccounts": { + "shape": "__listOfAdminAccount", + "locationName": "adminAccounts", + "documentation": "

    An array of objects, one for each delegated Amazon Macie administrator account for the organization. Only one of these accounts can have a status of ENABLED.

    " + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    " + } + } + }, + "ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resourceArn", + "documentation": "

    The Amazon Resource Name (ARN) of the classification job, custom data identifier, findings filter, or member account.

    " + } + }, + "required": [ + "resourceArn" + ] + }, + "ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that identifies the tags (keys and values) that are associated with the resource.

    " + } + } + }, + "MacieStatus": { + "type": "string", + "documentation": "

    The status of an Amazon Macie account. Valid values are:

    ", + "enum": [ + "PAUSED", + "ENABLED" + ] + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 25 + }, + "Member": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The AWS account ID for the account.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the account.

    " + }, + "email": { + "shape": "__string", + "locationName": "email", + "documentation": "

    The email address for the account.

    " + }, + "invitedAt": { + "shape": "__timestampIso8601", + "locationName": "invitedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when an Amazon Macie membership invitation was last sent to the account. This value is null if a Macie invitation hasn't been sent to the account.

    " + }, + "masterAccountId": { + "shape": "__string", + "locationName": "masterAccountId", + "documentation": "

    The AWS account ID for the master account.

    " + }, + "relationshipStatus": { + "shape": "RelationshipStatus", + "locationName": "relationshipStatus", + "documentation": "

    The current status of the relationship between the account and the master account.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that identifies the tags (keys and values) that are associated with the account in Amazon Macie.

    " + }, + "updatedAt": { + "shape": "__timestampIso8601", + "locationName": "updatedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the relationship between the account and the master account.

    " + } + }, + "documentation": "

    Provides information about an account that's associated with an Amazon Macie master account.

    " + }, + "MonthlySchedule": { + "type": "structure", + "members": { + "dayOfMonth": { + "shape": "__integer", + "locationName": "dayOfMonth", + "documentation": "

    The numeric day of the month when Amazon Macie runs the job. This value can be an integer from 1 through 31.

    If this value exceeds the number of days in a certain month, Macie runs the job on the last day of that month. For example, if this value is 31 and a month has only 30 days, Macie runs the job on day 30 of that month.

    " + } + }, + "documentation": "

    Specifies a monthly recurrence pattern for running a classification job.

    " + }, + "ObjectCountByEncryptionType": { + "type": "structure", + "members": { + "customerManaged": { + "shape": "__long", + "locationName": "customerManaged", + "documentation": "

    The total number of objects that are encrypted using a customer-managed key. The objects use customer-provided server-side (SSE-C) encryption.

    " + }, + "kmsManaged": { + "shape": "__long", + "locationName": "kmsManaged", + "documentation": "

    The total number of objects that are encrypted using an AWS Key Management Service (AWS KMS) customer master key (CMK). The objects use AWS managed AWS KMS (AWS-KMS) encryption or customer managed AWS KMS (SSE-KMS) encryption.

    " + }, + "s3Managed": { + "shape": "__long", + "locationName": "s3Managed", + "documentation": "

    The total number of objects that are encrypted using an Amazon S3 managed key. The objects use Amazon S3 managed (SSE-S3) encryption.

    " + }, + "unencrypted": { + "shape": "__long", + "locationName": "unencrypted", + "documentation": "

    The total number of objects that aren't encrypted or use client-side encryption.

    " + } + }, + "documentation": "

    Provides information about the number of objects that are in an S3 bucket and use certain types of server-side encryption, use client-side encryption, or aren't encrypted.

    " + }, + "ObjectLevelStatistics": { + "type": "structure", + "members": { + "fileType": { + "shape": "__long", + "locationName": "fileType", + "documentation": "

    The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects don't have a file name extension for a supported file or storage format.

    " + }, + "storageClass": { + "shape": "__long", + "locationName": "storageClass", + "documentation": "

    The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported storage class.

    " + }, + "total": { + "shape": "__long", + "locationName": "total", + "documentation": "

    The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported storage class or don't have a file name extension for a supported file or storage format.

    " + } + }, + "documentation": "

    Provides information about the total storage size (in bytes) or number of objects that Amazon Macie can't analyze in one or more S3 buckets. In a BucketMetadata object, this data is for a specific bucket. In a GetBucketStatisticsResponse object, this data is aggregated for all the buckets in the query results.

    " + }, + "Occurrences": { + "type": "structure", + "members": { + "cells": { + "shape": "Cells", + "locationName": "cells", + "documentation": "

    An array of objects, one for each occurrence of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file. Each object specifies the cell or field that contains the data. This value is null for all other types of files.

    " + }, + "lineRanges": { + "shape": "Ranges", + "locationName": "lineRanges", + "documentation": "

    An array of objects, one for each occurrence of sensitive data in a Microsoft Word document or non-binary text file, such as an HTML, JSON, TXT, or XML file. Each object specifies the line that contains the data, and the position of the data on that line.

    This value is often null for file types that are supported by Cell, Page, or Record objects. Exceptions are the locations of: data in unstructured sections of an otherwise structured file, such as a comment in a file; and, data in a malformed file that Amazon Macie analyzes as plain text.

    " + }, + "offsetRanges": { + "shape": "Ranges", + "locationName": "offsetRanges", + "documentation": "

    An array of objects, one for each occurrence of sensitive data in a binary text file. Each object specifies the position of the data relative to the beginning of the file.

    This value is typically null. For binary text files, Amazon Macie adds location data to a lineRanges.Range or Page object, depending on the file type.

    " + }, + "pages": { + "shape": "Pages", + "locationName": "pages", + "documentation": "

    An array of objects, one for each occurrence of sensitive data in an Adobe Portable Document Format file. Each object specifies the page that contains the data, and the position of the data on that page. This value is null for all other types of files.

    " + }, + "records": { + "shape": "Records", + "locationName": "records", + "documentation": "

    An array of objects, one for each occurrence of sensitive data in an Apache Avro object container or Apache Parquet file. Each object specifies the record index and the path to the field in the record that contains the data. This value is null for all other types of files.

    " + } + }, + "documentation": "

    Provides the location of 1-15 occurrences of sensitive data that was detected by managed data identifiers or a custom data identifier and produced a sensitive data finding.

    " + }, + "OrderBy": { + "type": "string", + "enum": [ + "ASC", + "DESC" + ] + }, + "Page": { + "type": "structure", + "members": { + "lineRange": { + "shape": "Range", + "locationName": "lineRange", + "documentation": "

    The line that contains the data, and the position of the data on that line.

    " + }, + "offsetRange": { + "shape": "Range", + "locationName": "offsetRange", + "documentation": "

    The position of the data on the page, relative to the beginning of the page.

    " + }, + "pageNumber": { + "shape": "__long", + "locationName": "pageNumber", + "documentation": "

    The page number of the page that contains the data.

    " + } + }, + "documentation": "

    Specifies the location of an occurrence of sensitive data in an Adobe Portable Document Format file.

    " + }, + "Pages": { + "type": "list", + "documentation": "

    Specifies the location of occurrences of sensitive data in an Adobe Portable Document Format file.

    ", + "member": { + "shape": "Page" + } + }, + "PolicyDetails": { + "type": "structure", + "members": { + "action": { + "shape": "FindingAction", + "locationName": "action", + "documentation": "

    The action that produced the finding.

    " + }, + "actor": { + "shape": "FindingActor", + "locationName": "actor", + "documentation": "

    The entity that performed the action that produced the finding.

    " + } + }, + "documentation": "

    Provides the details of a policy finding.

    " + }, + "PutClassificationExportConfigurationRequest": { + "type": "structure", + "members": { + "configuration": { + "shape": "ClassificationExportConfiguration", + "locationName": "configuration", + "documentation": "

    The location to store data classification results in, and the encryption settings to use when storing results in that location.

    " + } + }, + "required": [ + "configuration" + ] + }, + "PutClassificationExportConfigurationResponse": { + "type": "structure", + "members": { + "configuration": { + "shape": "ClassificationExportConfiguration", + "locationName": "configuration", + "documentation": "

    The location where the data classification results are stored, and the encryption settings that are used when storing results in that location.

    " + } + } + }, + "Range": { + "type": "structure", + "members": { + "end": { + "shape": "__long", + "locationName": "end", + "documentation": "

    Possible values are:

    • In an Occurrences.lineRanges array, the number of lines from the beginning of the file to the end of the sensitive data.

    • In an Occurrences.offsetRanges array, the number of characters from the beginning of the file to the end of the sensitive data.

    • In a Page object, the number of lines (lineRange) or characters (offsetRange) from the beginning of the page to the end of the sensitive data.

    " + }, + "start": { + "shape": "__long", + "locationName": "start", + "documentation": "

    Possible values are:

    • In an Occurrences.lineRanges array, the number of lines from the beginning of the file to the beginning of the sensitive data.

    • In an Occurrences.offsetRanges array, the number of characters from the beginning of the file to the beginning of the sensitive data.

    • In a Page object, the number of lines (lineRange) or characters (offsetRange) from the beginning of the page to the beginning of the sensitive data.

    " + }, + "startColumn": { + "shape": "__long", + "locationName": "startColumn", + "documentation": "

    The column number for the column that contains the data, if the file contains structured data.

    " + } + }, + "documentation": "

    Provides details about the location of an occurrence of sensitive data in an Adobe Portable Document Format file, Microsoft Word document, or non-binary text file.

    " + }, + "Ranges": { + "type": "list", + "documentation": "

    Provides details about the location of occurrences of sensitive data in an Adobe Portable Document Format file, Microsoft Word document, or non-binary text file.

    ", + "member": { + "shape": "Range" + } + }, + "Record": { + "type": "structure", + "members": { + "jsonPath": { + "shape": "__string", + "locationName": "jsonPath", + "documentation": "

    The path, as a JSONPath expression, to the field in the record that contains the data.

    If the name of an element exceeds 20 characters, Amazon Macie truncates the name by removing characters from the beginning of the name. If the resulting full path exceeds 250 characters, Macie also truncates the path, starting with the first element in the path, until the path contains 250 or fewer characters.

    " + }, + "recordIndex": { + "shape": "__long", + "locationName": "recordIndex", + "documentation": "

    The record index, starting from 0, for the record that contains the data.

    " + } + }, + "documentation": "

    Specifies the location of an occurrence of sensitive data in an Apache Avro object container or Apache Parquet file.

    " + }, + "Records": { + "type": "list", + "documentation": "

    Specifies the location of occurrences of sensitive data in an Apache Parquet file.

    ", + "member": { + "shape": "Record" + } + }, + "RelationshipStatus": { + "type": "string", + "documentation": "

    The current status of the relationship between an account and an associated Amazon Macie master account (inviter account). Possible values are:

    ", + "enum": [ + "Enabled", + "Paused", + "Invited", + "Created", + "Removed", + "Resigned", + "EmailVerificationInProgress", + "EmailVerificationFailed", + "RegionDisabled", + "AccountSuspended" + ] + }, + "ReplicationDetails": { + "type": "structure", + "members": { + "replicated": { + "shape": "__boolean", + "locationName": "replicated", + "documentation": "

    Specifies whether the bucket is configured to replicate one or more objects to any destination.

    " + }, + "replicatedExternally": { + "shape": "__boolean", + "locationName": "replicatedExternally", + "documentation": "

    Specifies whether the bucket is configured to replicate one or more objects to an AWS account that isn't part of the same Amazon Macie organization.

    " + }, + "replicationAccounts": { + "shape": "__listOf__string", + "locationName": "replicationAccounts", + "documentation": "

    An array of AWS account IDs, one for each AWS account that the bucket is configured to replicate one or more objects to.

    " + } + }, + "documentation": "

    Provides information about settings that define whether one or more objects in an S3 bucket are replicated to S3 buckets for other AWS accounts and, if so, which accounts.

    " + }, + "ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    The explanation of the error that occurred.

    " + } + }, + "documentation": "

    Provides information about an error that occurred because a specified resource wasn't found.

    ", + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "ResourcesAffected": { + "type": "structure", + "members": { + "s3Bucket": { + "shape": "S3Bucket", + "locationName": "s3Bucket", + "documentation": "

    An array of objects, one for each S3 bucket that the finding applies to. Each object provides a set of metadata about an affected S3 bucket.

    " + }, + "s3Object": { + "shape": "S3Object", + "locationName": "s3Object", + "documentation": "

    An array of objects, one for each S3 object that the finding applies to. Each object provides a set of metadata about an affected S3 object.

    " + } + }, + "documentation": "

    Provides information about the resources that a finding applies to.

    " + }, + "S3Bucket": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the bucket.

    " + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the bucket was created.

    " + }, + "defaultServerSideEncryption": { + "shape": "ServerSideEncryption", + "locationName": "defaultServerSideEncryption", + "documentation": "

    The type of server-side encryption that's used by default to encrypt objects in the bucket.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    The name of the bucket.

    " + }, + "owner": { + "shape": "S3BucketOwner", + "locationName": "owner", + "documentation": "

    The display name and account identifier for the user who owns the bucket.

    " + }, + "publicAccess": { + "shape": "BucketPublicAccess", + "locationName": "publicAccess", + "documentation": "

    The permissions settings that determine whether the bucket is publicly accessible.

    " + }, + "tags": { + "shape": "KeyValuePairList", + "locationName": "tags", + "documentation": "

    The tags that are associated with the bucket.

    " + } + }, + "documentation": "

    Provides information about an S3 bucket that a finding applies to.

    " + }, + "S3BucketDefinitionForJob": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account that owns the buckets. If you specify this value and don't specify a value for the buckets array, the job analyzes objects in all the buckets that are owned by the account and meet other conditions specified for the job.

    " + }, + "buckets": { + "shape": "__listOf__string", + "locationName": "buckets", + "documentation": "

    An array that lists the names of the buckets.

    " + } + }, + "documentation": "

    Specifies which AWS account owns the S3 buckets that a classification job analyzes, and the buckets to analyze for the account.

    " + }, + "S3BucketOwner": { + "type": "structure", + "members": { + "displayName": { + "shape": "__string", + "locationName": "displayName", + "documentation": "

    The display name of the user who owns the bucket.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The AWS account ID for the user who owns the bucket.

    " + } + }, + "documentation": "

    Provides information about the user who owns an S3 bucket.

    " + }, + "S3Destination": { + "type": "structure", + "members": { + "bucketName": { + "shape": "__string", + "locationName": "bucketName", + "documentation": "

    The name of the bucket.

    " + }, + "keyPrefix": { + "shape": "__string", + "locationName": "keyPrefix", + "documentation": "

    The path prefix to use in the path to the location in the bucket. This prefix specifies where to store classification results in the bucket.

    " + }, + "kmsKeyArn": { + "shape": "__string", + "locationName": "kmsKeyArn", + "documentation": "

    The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for encryption of the results. This must be the ARN of an existing CMK that's in the same AWS Region as the bucket.

    " + } + }, + "documentation": "

    Specifies an S3 bucket to store data classification results in, and the encryption settings to use when storing results in that bucket.

    ", + "required": [ + "bucketName", + "kmsKeyArn" + ] + }, + "S3JobDefinition": { + "type": "structure", + "members": { + "bucketDefinitions": { + "shape": "__listOfS3BucketDefinitionForJob", + "locationName": "bucketDefinitions", + "documentation": "

    An array of objects, one for each AWS account that owns buckets to analyze. Each object specifies the account ID for an account and one or more buckets to analyze for the account.

    " + }, + "scoping": { + "shape": "Scoping", + "locationName": "scoping", + "documentation": "

    The property- and tag-based conditions that determine which objects to include or exclude from the analysis.

    " + } + }, + "documentation": "

    Specifies which S3 buckets contain the objects that a classification job analyzes, and the scope of that analysis.

    " + }, + "S3Object": { + "type": "structure", + "members": { + "bucketArn": { + "shape": "__string", + "locationName": "bucketArn", + "documentation": "

    The Amazon Resource Name (ARN) of the bucket that contains the object.

    " + }, + "eTag": { + "shape": "__string", + "locationName": "eTag", + "documentation": "

    The entity tag (ETag) that identifies the affected version of the object. If the object was overwritten or changed after Amazon Macie produced the finding, this value might be different from the current ETag for the object.

    " + }, + "extension": { + "shape": "__string", + "locationName": "extension", + "documentation": "

    The file name extension of the object. If the object doesn't have a file name extension, this value is \"\".

    " + }, + "key": { + "shape": "__string", + "locationName": "key", + "documentation": "

    The full key (name) that's assigned to the object.

    " + }, + "lastModified": { + "shape": "__timestampIso8601", + "locationName": "lastModified", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the object was last modified.

    " + }, + "path": { + "shape": "__string", + "locationName": "path", + "documentation": "

    The path to the object, including the full key (name).

    " + }, + "publicAccess": { + "shape": "__boolean", + "locationName": "publicAccess", + "documentation": "

    Specifies whether the object is publicly accessible due to the combination of permissions settings that apply to the object.

    " + }, + "serverSideEncryption": { + "shape": "ServerSideEncryption", + "locationName": "serverSideEncryption", + "documentation": "

    The type of server-side encryption that's used for the object.

    " + }, + "size": { + "shape": "__long", + "locationName": "size", + "documentation": "

    The total storage size, in bytes, of the object.

    " + }, + "storageClass": { + "shape": "StorageClass", + "locationName": "storageClass", + "documentation": "

    The storage class of the object.

    " + }, + "tags": { + "shape": "KeyValuePairList", + "locationName": "tags", + "documentation": "

    The tags that are associated with the object.

    " + }, + "versionId": { + "shape": "__string", + "locationName": "versionId", + "documentation": "

    The identifier for the affected version of the object.

    " + } + }, + "documentation": "

    Provides information about an S3 object that a finding applies to.

    " + }, + "ScopeFilterKey": { + "type": "string", + "documentation": "

    The property to use in a condition that determines which objects are analyzed by a classification job. Valid values are:

    ", + "enum": [ + "BUCKET_CREATION_DATE", + "OBJECT_EXTENSION", + "OBJECT_LAST_MODIFIED_DATE", + "OBJECT_SIZE", + "TAG" + ] + }, + "Scoping": { + "type": "structure", + "members": { + "excludes": { + "shape": "JobScopingBlock", + "locationName": "excludes", + "documentation": "

    The property- or tag-based conditions that determine which objects to exclude from the analysis.

    " + }, + "includes": { + "shape": "JobScopingBlock", + "locationName": "includes", + "documentation": "

    The property- or tag-based conditions that determine which objects to include in the analysis.

    " + } + }, + "documentation": "

    Specifies one or more property- and tag-based conditions that refine the scope of a classification job. These conditions define criteria that determine which objects a job analyzes. Exclude conditions take precedence over include conditions.

    " + }, + "SensitiveData": { + "type": "list", + "documentation": "

    Provides information about the category and number of occurrences of sensitive data that produced a finding.

    ", + "member": { + "shape": "SensitiveDataItem" + } + }, + "SensitiveDataItem": { + "type": "structure", + "members": { + "category": { + "shape": "SensitiveDataItemCategory", + "locationName": "category", + "documentation": "

    The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or AWS secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as driver's license identification numbers.

    " + }, + "detections": { + "shape": "DefaultDetections", + "locationName": "detections", + "documentation": "

    An array of objects, one for each type of sensitive data that was detected. Each object reports the number of occurrences of a specific type of sensitive data that was detected, and the location of up to 15 of those occurrences.

    " + }, + "totalCount": { + "shape": "__long", + "locationName": "totalCount", + "documentation": "

    The total number of occurrences of the sensitive data that was detected.

    " + } + }, + "documentation": "

    Provides information about the category, types, and occurrences of sensitive data that produced a sensitive data finding.

    " + }, + "SensitiveDataItemCategory": { + "type": "string", + "documentation": "

    The category of sensitive data that was detected and produced the finding. Possible values are:

    ", + "enum": [ + "FINANCIAL_INFORMATION", + "PERSONAL_INFORMATION", + "CREDENTIALS", + "CUSTOM_IDENTIFIER" + ] + }, + "ServerSideEncryption": { + "type": "structure", + "members": { + "encryptionType": { + "shape": "EncryptionType", + "locationName": "encryptionType", + "documentation": "

    The server-side encryption algorithm that's used when storing data in the bucket or object. If encryption is disabled for the bucket or object, this value is NONE.

    " + }, + "kmsMasterKeyId": { + "shape": "__string", + "locationName": "kmsMasterKeyId", + "documentation": "

    The unique identifier for the AWS Key Management Service (AWS KMS) master key that's used to encrypt the bucket or object. This value is null if AWS KMS isn't used to encrypt the bucket or object.

    " + } + }, + "documentation": "

    Provides information about the server-side encryption settings for an S3 bucket or S3 object.

    " + }, + "ServiceLimit": { + "type": "structure", + "members": { + "isServiceLimited": { + "shape": "__boolean", + "locationName": "isServiceLimited", + "documentation": "

    Specifies whether the account has met the quota that corresponds to the metric specified by the UsageByAccount.type field in the response.

    " + }, + "unit": { + "shape": "Unit", + "locationName": "unit", + "documentation": "

    The unit of measurement for the value specified by the value field.

    " + }, + "value": { + "shape": "__long", + "locationName": "value", + "documentation": "

    The value for the metric specified by the UsageByAccount.type field in the response.

    " + } + }, + "documentation": "

    Specifies a current quota for an account.

    " + }, + "ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    The explanation of the error that occurred.

    " + } + }, + "documentation": "

    Provides information about an error that occurred due to one or more service quotas for an account.

    ", + "exception": true, + "error": { + "httpStatusCode": 402 + } + }, + "SessionContext": { + "type": "structure", + "members": { + "attributes": { + "shape": "SessionContextAttributes", + "locationName": "attributes", + "documentation": "

    The date and time when the credentials were issued, and whether the credentials were authenticated with a multi-factor authentication (MFA) device.

    " + }, + "sessionIssuer": { + "shape": "SessionIssuer", + "locationName": "sessionIssuer", + "documentation": "

    The source and type of credentials that were issued to the entity.

    " + } + }, + "documentation": "

    Provides information about a session that was created for an entity that performed an action by using temporary security credentials.

    " + }, + "SessionContextAttributes": { + "type": "structure", + "members": { + "creationDate": { + "shape": "__timestampIso8601", + "locationName": "creationDate", + "documentation": "

    The date and time, in UTC and ISO 8601 format, when the credentials were issued.

    " + }, + "mfaAuthenticated": { + "shape": "__boolean", + "locationName": "mfaAuthenticated", + "documentation": "

    Specifies whether the credentials were authenticated with a multi-factor authentication (MFA) device.

    " + } + }, + "documentation": "

    Provides information about the context in which temporary security credentials were issued to an entity.

    " + }, + "SessionIssuer": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account that owns the entity that was used to get the credentials.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the source account, IAM user, or role that was used to get the credentials.

    " + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

    The unique identifier for the entity that was used to get the credentials.

    " + }, + "type": { + "shape": "__string", + "locationName": "type", + "documentation": "

    The source of the temporary security credentials, such as Root, IAMUser, or Role.

    " + }, + "userName": { + "shape": "__string", + "locationName": "userName", + "documentation": "

    The name or alias of the user or role that issued the session. This value is null if the credentials were obtained from a root account that doesn't have an alias.

    " + } + }, + "documentation": "

    Provides information about the source and type of temporary security credentials that were issued to an entity.

    " + }, + "Severity": { + "type": "structure", + "members": { + "description": { + "shape": "SeverityDescription", + "locationName": "description", + "documentation": "

    The qualitative representation of the finding's severity, ranging from Low (least severe) to High (most severe).

    " + }, + "score": { + "shape": "__long", + "locationName": "score", + "documentation": "

    The numerical representation of the finding's severity, ranging from 1 (least severe) to 3 (most severe).

    " + } + }, + "documentation": "

    Provides the numerical and qualitative representations of a finding's severity.

    " + }, + "SeverityDescription": { + "type": "string", + "documentation": "

    The qualitative representation of the finding's severity. Possible values are:

    ", + "enum": [ + "Low", + "Medium", + "High" + ] + }, + "SharedAccess": { + "type": "string", + "enum": [ + "EXTERNAL", + "INTERNAL", + "NOT_SHARED", + "UNKNOWN" + ] + }, + "SimpleScopeTerm": { + "type": "structure", + "members": { + "comparator": { + "shape": "JobComparator", + "locationName": "comparator", + "documentation": "

    The operator to use in the condition. Valid operators for each supported property (key) are:

    • OBJECT_EXTENSION - EQ (equals) or NE (not equals)

    • OBJECT_LAST_MODIFIED_DATE - Any operator except CONTAINS

    • OBJECT_SIZE - Any operator except CONTAINS

    • TAG - EQ (equals) or NE (not equals)

    " + }, + "key": { + "shape": "ScopeFilterKey", + "locationName": "key", + "documentation": "

    The object property to use in the condition.

    " + }, + "values": { + "shape": "__listOf__string", + "locationName": "values", + "documentation": "

    An array that lists the values to use in the condition. If the value for the key property is OBJECT_EXTENSION, this array can specify multiple values and Amazon Macie uses an OR operator to join the values. Otherwise, this array can specify only one value. Valid values for each supported property (key) are:

    • OBJECT_EXTENSION - A string that represents the file name extension of an object. For example: doc, docx, pdf

    • OBJECT_LAST_MODIFIED_DATE - The date and time (in UTC and extended ISO 8601 format) when an object was created or last changed, whichever is latest. For example: 2020-09-28T14:31:13Z

    • OBJECT_SIZE - An integer that represents the storage size (in bytes) of an object.

    • TAG - A string that represents a tag key for an object. For advanced options, use a TagScopeTerm object, instead of a SimpleScopeTerm object, to define a tag-based condition for the job.

    " + } + }, + "documentation": "

    Specifies a property-based condition that determines whether an object is included or excluded from a classification job.

    " + }, + "SortCriteria": { + "type": "structure", + "members": { + "attributeName": { + "shape": "__string", + "locationName": "attributeName", + "documentation": "

    The name of the property to sort the results by. This value can be the name of any property that Amazon Macie defines for a finding.

    " + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

    The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

    " + } + }, + "documentation": "

    Specifies criteria for sorting the results of a request for findings.

    " + }, + "Statistics": { + "type": "structure", + "members": { + "approximateNumberOfObjectsToProcess": { + "shape": "__double", + "locationName": "approximateNumberOfObjectsToProcess", + "documentation": "

    The approximate number of objects that the job has yet to process during its current run.

    " + }, + "numberOfRuns": { + "shape": "__double", + "locationName": "numberOfRuns", + "documentation": "

    The number of times that the job has run.

    " + } + }, + "documentation": "

    Provides processing statistics for a classification job.

    " + }, + "StorageClass": { + "type": "string", + "documentation": "

    The storage class of the S3 object. Possible values are:

    ", + "enum": [ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE", + "ONEZONE_IA", + "GLACIER" + ] + }, + "TagMap": { + "type": "map", + "documentation": "

    A string-to-string map of key-value pairs that specifies the tags (keys and values) for a classification job, custom data identifier, findings filter, or member account.

    ", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + } + }, + "TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resourceArn", + "documentation": "

    The Amazon Resource Name (ARN) of the classification job, custom data identifier, findings filter, or member account.

    " + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

    A map of key-value pairs that specifies the tags to associate with the resource.

    A resource can have a maximum of 50 tags. Each tag consists of a tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

    " + } + }, + "required": [ + "resourceArn", + "tags" + ] + }, + "TagResourceResponse": { + "type": "structure", + "members": {} + }, + "TagScopeTerm": { + "type": "structure", + "members": { + "comparator": { + "shape": "JobComparator", + "locationName": "comparator", + "documentation": "

    The operator to use in the condition. Valid operators are EQ (equals) or NE (not equals).

    " + }, + "key": { + "shape": "__string", + "locationName": "key", + "documentation": "

    The tag key to use in the condition.

    " + }, + "tagValues": { + "shape": "__listOfTagValuePair", + "locationName": "tagValues", + "documentation": "

    The tag keys or tag key and value pairs to use in the condition.

    " + }, + "target": { + "shape": "TagTarget", + "locationName": "target", + "documentation": "

    The type of object to apply the condition to.

    " + } + }, + "documentation": "

    Specifies a tag-based condition that determines whether an object is included or excluded from a classification job.

    " + }, + "TagTarget": { + "type": "string", + "documentation": "

    The type of object to apply a tag-based condition to. Valid values are:

    ", + "enum": [ + "S3_OBJECT" + ] + }, + "TagValuePair": { + "type": "structure", + "members": { + "key": { + "shape": "__string", + "locationName": "key", + "documentation": "

    The value for the tag key to use in the condition.

    " + }, + "value": { + "shape": "__string", + "locationName": "value", + "documentation": "

    The tag value, associated with the specified tag key (key), to use in the condition. To specify only a tag key for a condition, specify the tag key for the key property and set this value to an empty string.

    " + } + }, + "documentation": "

    Specifies a tag key or tag key and value pair to use in a tag-based condition for a classification job.

    " + }, + "TestCustomDataIdentifierRequest": { + "type": "structure", + "members": { + "ignoreWords": { + "shape": "__listOf__string", + "locationName": "ignoreWords", + "documentation": "

    An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4 - 90 characters. Ignore words are case sensitive.

    " + }, + "keywords": { + "shape": "__listOf__string", + "locationName": "keywords", + "documentation": "

    An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 4 - 90 characters. Keywords aren't case sensitive.

    " + }, + "maximumMatchDistance": { + "shape": "__integer", + "locationName": "maximumMatchDistance", + "documentation": "

    The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1 - 300 characters. The default value is 50.

    " + }, + "regex": { + "shape": "__string", + "locationName": "regex", + "documentation": "

    The regular expression (regex) that defines the pattern to match. The expression can contain as many as 512 characters.

    " + }, + "sampleText": { + "shape": "__string", + "locationName": "sampleText", + "documentation": "

    The sample text to inspect by using the custom data identifier. The text can contain as many as 1,000 characters.

    " + } + }, + "required": [ + "regex", + "sampleText" + ] + }, + "TestCustomDataIdentifierResponse": { + "type": "structure", + "members": { + "matchCount": { + "shape": "__integer", + "locationName": "matchCount", + "documentation": "

    The number of instances of sample text that matched the detection criteria specified in the custom data identifier.

    " + } + } + }, + "ThrottlingException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    The explanation of the error that occurred.

    " + } + }, + "documentation": "

    Provides information about an error that occurred because too many requests were sent during a certain amount of time.

    ", + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "Unit": { + "type": "string", + "enum": [ + "TERABYTES" + ] + }, + "UnprocessedAccount": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The AWS account ID for the account that the request applies to.

    " + }, + "errorCode": { + "shape": "ErrorCode", + "locationName": "errorCode", + "documentation": "

    The source of the issue or delay in processing the request.

    " + }, + "errorMessage": { + "shape": "__string", + "locationName": "errorMessage", + "documentation": "

    The reason why the request hasn't been processed.

    " + } + }, + "documentation": "

    Provides information about an account-related request that hasn't been processed.

    " + }, + "UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resourceArn", + "documentation": "

    The Amazon Resource Name (ARN) of the classification job, custom data identifier, findings filter, or member account.

    " + }, + "tagKeys": { + "shape": "__listOf__string", + "location": "querystring", + "locationName": "tagKeys", + "documentation": "

    The key of the tag to remove from the resource. To remove multiple tags, append the tagKeys parameter and argument for each additional tag to remove, separated by an ampersand (&).

    " + } + }, + "required": [ + "tagKeys", + "resourceArn" + ] + }, + "UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "UpdateClassificationJobRequest": { + "type": "structure", + "members": { + "jobId": { + "shape": "__string", + "location": "uri", + "locationName": "jobId", + "documentation": "

    The unique identifier for the classification job.

    " + }, + "jobStatus": { + "shape": "JobStatus", + "locationName": "jobStatus", + "documentation": "

    The new status for the job. Valid values are:

    • CANCELLED - Stops the job permanently and cancels it. This value is valid only if the job's current status is IDLE, PAUSED, RUNNING, or USER_PAUSED.

      If you specify this value and the job's current status is RUNNING, Amazon Macie immediately begins to stop all processing tasks for the job. You can't resume or restart a job after you cancel it.

    • RUNNING - Resumes the job. This value is valid only if the job's current status is USER_PAUSED.

      If you paused the job while it was actively running and you specify this value less than 30 days after you paused the job, Macie immediately resumes processing from the point where you paused the job. Otherwise, Macie resumes the job according to the schedule and other settings for the job.

    • USER_PAUSED - Pauses the job temporarily. This value is valid only if the job's current status is IDLE or RUNNING. If you specify this value and the job's current status is RUNNING, Macie immediately begins to pause all processing tasks for the job.

      If you pause a one-time job and you don't resume it within 30 days, the job expires and Macie cancels the job. If you pause a recurring job when its status is RUNNING and you don't resume it within 30 days, the job run expires and Macie cancels the run. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.

    " + } + }, + "required": [ + "jobId", + "jobStatus" + ] + }, + "UpdateClassificationJobResponse": { + "type": "structure", + "members": {} + }, + "UpdateFindingsFilterRequest": { + "type": "structure", + "members": { + "action": { + "shape": "FindingsFilterAction", + "locationName": "action", + "documentation": "

    The action to perform on findings that meet the filter criteria (findingCriteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

    " + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

    A custom description of the filter. The description can contain as many as 512 characters.

    We strongly recommend that you avoid including any sensitive data in the description of a filter. Other users might be able to see the filter's description, depending on the actions that they're allowed to perform in Amazon Macie.

    " + }, + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

    The criteria to use to filter findings.

    " + }, + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

    A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters.

    We strongly recommend that you avoid including any sensitive data in the name of a filter. Other users might be able to see the filter's name, depending on the actions that they're allowed to perform in Amazon Macie.

    " + }, + "position": { + "shape": "__integer", + "locationName": "position", + "documentation": "

    The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

    " + } + }, + "required": [ + "id" + ] + }, + "UpdateFindingsFilterResponse": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the filter that was updated.

    " + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

    The unique identifier for the filter that was updated.

    " + } + } + }, + "UpdateMacieSessionRequest": { + "type": "structure", + "members": { + "findingPublishingFrequency": { + "shape": "FindingPublishingFrequency", + "locationName": "findingPublishingFrequency", + "documentation": "Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events)." + }, + "status": { + "shape": "MacieStatus", + "locationName": "status", + "documentation": "

    Specifies whether to change the status of the account. Valid values are: ENABLED, resume all Amazon Macie activities for the account; and, PAUSED, suspend all Macie activities for the account.

    " + } + } + }, + "UpdateMacieSessionResponse": { + "type": "structure", + "members": {} + }, + "UpdateMemberSessionRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

    The unique identifier for the Amazon Macie resource or account that the request applies to.

    " + }, + "status": { + "shape": "MacieStatus", + "locationName": "status", + "documentation": "

    Specifies the new status for the account. Valid values are: ENABLED, resume all Amazon Macie activities for the account; and, PAUSED, suspend all Macie activities for the account.

    " + } + }, + "required": [ + "id", + "status" + ] + }, + "UpdateMemberSessionResponse": { + "type": "structure", + "members": {} + }, + "UpdateOrganizationConfigurationRequest": { + "type": "structure", + "members": { + "autoEnable": { + "shape": "__boolean", + "locationName": "autoEnable", + "documentation": "

    Specifies whether Amazon Macie is enabled automatically for each account, when the account is added to the AWS organization.

    " + } + }, + "required": [ + "autoEnable" + ] + }, + "UpdateOrganizationConfigurationResponse": { + "type": "structure", + "members": {} + }, + "UsageByAccount": { + "type": "structure", + "members": { + "currency": { + "shape": "Currency", + "locationName": "currency", + "documentation": "

    The type of currency that the value for the metric (estimatedCost) is reported in.

    " + }, + "estimatedCost": { + "shape": "__string", + "locationName": "estimatedCost", + "documentation": "

    The estimated value for the metric.

    " + }, + "serviceLimit": { + "shape": "ServiceLimit", + "locationName": "serviceLimit", + "documentation": "

    The current value for the quota that corresponds to the metric specified by the type field.

    " + }, + "type": { + "shape": "UsageType", + "locationName": "type", + "documentation": "

    The name of the metric. Possible values are: DATA_INVENTORY_EVALUATION, for monitoring S3 buckets; and, SENSITIVE_DATA_DISCOVERY, for analyzing sensitive data.

    " + } + }, + "documentation": "

    Provides data for a specific usage metric and the corresponding quota for an account. The value for the metric is an aggregated value that reports usage during the past 30 days.

    " + }, + "UsageRecord": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account that the data applies to.

    " + }, + "freeTrialStartDate": { + "shape": "__timestampIso8601", + "locationName": "freeTrialStartDate", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the free trial started for the account.

    " + }, + "usage": { + "shape": "__listOfUsageByAccount", + "locationName": "usage", + "documentation": "

    An array of objects that contains usage data and quotas for the account. Each object contains the data for a specific usage metric and the corresponding quota.

    " + } + }, + "documentation": "

    Provides quota and aggregated usage data for an account.

    " + }, + "UsageStatisticsFilter": { + "type": "structure", + "members": { + "comparator": { + "shape": "UsageStatisticsFilterComparator", + "locationName": "comparator", + "documentation": "

    The operator to use in the condition. If the value for the key property is accountId, this value must be CONTAINS. If the value for the key property is any other supported field, this value can be EQ, GT, GTE, LT, LTE, or NE.

    " + }, + "key": { + "shape": "UsageStatisticsFilterKey", + "locationName": "key", + "documentation": "

    The field to use in the condition.

    " + }, + "values": { + "shape": "__listOf__string", + "locationName": "values", + "documentation": "

    An array that lists values to use in the condition, based on the value for the field specified by the key property. If the value for the key property is accountId, this array can specify multiple values. Otherwise, this array can specify only one value.

    Valid values for each supported field are:

    • accountId - The unique identifier for an AWS account.

    • freeTrialStartDate - The date and time, in UTC and extended ISO 8601 format, when the free trial started for an account.

    • serviceLimit - A Boolean (true or false) value that indicates whether an account has reached its monthly quota.

    • total - A string that represents the current, estimated month-to-date cost for an account.

    " + } + }, + "documentation": "

    Specifies a condition for filtering the results of a query for account quotas and usage data.

    " + }, + "UsageStatisticsFilterComparator": { + "type": "string", + "documentation": "

    The operator to use in a condition that filters the results of a query for account quotas and usage data. Valid values are:

    ", + "enum": [ + "GT", + "GTE", + "LT", + "LTE", + "EQ", + "NE", + "CONTAINS" + ] + }, + "UsageStatisticsFilterKey": { + "type": "string", + "documentation": "

    The field to use in a condition that filters the results of a query for account quotas and usage data. Valid values are:

    ", + "enum": [ + "accountId", + "serviceLimit", + "freeTrialStartDate", + "total" + ] + }, + "UsageStatisticsSortBy": { + "type": "structure", + "members": { + "key": { + "shape": "UsageStatisticsSortKey", + "locationName": "key", + "documentation": "

    The field to sort the results by.

    " + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

    The sort order to apply to the results, based on the value for the field specified by the key property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

    " + } + }, + "documentation": "

    Specifies criteria for sorting the results of a query for account quotas and usage data.

    " + }, + "UsageStatisticsSortKey": { + "type": "string", + "documentation": "

    The field to use to sort the results of a query for account quotas and usage data. Valid values are:

    ", + "enum": [ + "accountId", + "total", + "serviceLimitValue", + "freeTrialStartDate" + ] + }, + "UsageTotal": { + "type": "structure", + "members": { + "currency": { + "shape": "Currency", + "locationName": "currency", + "documentation": "

    The type of currency that the value for the metric (estimatedCost) is reported in.

    " + }, + "estimatedCost": { + "shape": "__string", + "locationName": "estimatedCost", + "documentation": "

    The estimated value for the metric.

    " + }, + "type": { + "shape": "UsageType", + "locationName": "type", + "documentation": "

    The name of the metric. Possible values are: DATA_INVENTORY_EVALUATION, for monitoring S3 buckets; and, SENSITIVE_DATA_DISCOVERY, for analyzing sensitive data.

    " + } + }, + "documentation": "

    Provides aggregated data for a usage metric. The value for the metric reports usage data for an account during the past 30 days.

    " + }, + "UsageType": { + "type": "string", + "documentation": "

    The name of a usage metric for an account. Possible values are:

    ", + "enum": [ + "DATA_INVENTORY_EVALUATION", + "SENSITIVE_DATA_DISCOVERY" + ] + }, + "UserIdentity": { + "type": "structure", + "members": { + "assumedRole": { + "shape": "AssumedRole", + "locationName": "assumedRole", + "documentation": "

    If the action was performed with temporary security credentials that were obtained using the AssumeRole operation of the AWS Security Token Service (AWS STS) API, the identifiers, session context, and other details about the identity.

    " + }, + "awsAccount": { + "shape": "AwsAccount", + "locationName": "awsAccount", + "documentation": "

    If the action was performed using the credentials for another AWS account, the details of that account.

    " + }, + "awsService": { + "shape": "AwsService", + "locationName": "awsService", + "documentation": "

    If the action was performed by an AWS account that belongs to an AWS service, the name of the service.

    " + }, + "federatedUser": { + "shape": "FederatedUser", + "locationName": "federatedUser", + "documentation": "

    If the action was performed with temporary security credentials that were obtained using the GetFederationToken operation of the AWS Security Token Service (AWS STS) API, the identifiers, session context, and other details about the identity.

    " + }, + "iamUser": { + "shape": "IamUser", + "locationName": "iamUser", + "documentation": "

    If the action was performed using the credentials for an AWS Identity and Access Management (IAM) user, the name and other details about the user.

    " + }, + "root": { + "shape": "UserIdentityRoot", + "locationName": "root", + "documentation": "

    If the action was performed using the credentials for your AWS account, the details of your account.

    " + }, + "type": { + "shape": "UserIdentityType", + "locationName": "type", + "documentation": "

    The type of entity that performed the action.

    " + } + }, + "documentation": "

    Provides information about the type and other characteristics of an entity that performed an action on an affected resource.

    " + }, + "UserIdentityRoot": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

    The unique identifier for the AWS account.

    " + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

    The Amazon Resource Name (ARN) of the principal that performed the action. The last section of the ARN contains the name of the user or role that performed the action.

    " + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

    The unique identifier for the entity that performed the action.

    " + } + }, + "documentation": "

    Provides information about an AWS account and entity that performed an action on an affected resource. The action was performed using the credentials for your AWS account.

    " + }, + "UserIdentityType": { + "type": "string", + "documentation": "

    The type of entity that performed the action on the affected resource. Possible values are:

    ", + "enum": [ + "AssumedRole", + "IAMUser", + "FederatedUser", + "Root", + "AWSAccount", + "AWSService" + ] + }, + "UserPausedDetails": { + "type": "structure", + "members": { + "jobExpiresAt": { + "shape": "__timestampIso8601", + "locationName": "jobExpiresAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when the job or job run will expire and be cancelled if you don't resume it first.

    " + }, + "jobImminentExpirationHealthEventArn": { + "shape": "__string", + "locationName": "jobImminentExpirationHealthEventArn", + "documentation": "

    The Amazon Resource Name (ARN) of the AWS Health event that Amazon Macie sent to notify you of the job or job run's pending expiration and cancellation. This value is null if a job has been paused for less than 23 days.

    " + }, + "jobPausedAt": { + "shape": "__timestampIso8601", + "locationName": "jobPausedAt", + "documentation": "

    The date and time, in UTC and extended ISO 8601 format, when you paused the job.

    " + } + }, + "documentation": "

    Provides information about when a classification job was paused. For a one-time job, this object also specifies when the job will expire and be cancelled if it isn't resumed. For a recurring job, this object also specifies when the paused job run will expire and be cancelled if it isn't resumed. This object is present only if a job's current status (jobStatus) is USER_PAUSED. The information in this object applies only to a job that was paused while it had a status of RUNNING.

    " + }, + "ValidationException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

    The explanation of the error that occurred.

    " + } + }, + "documentation": "

    Provides information about an error that occurred due to a syntax error in a request.

    ", + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "WeeklySchedule": { + "type": "structure", + "members": { + "dayOfWeek": { + "shape": "DayOfWeek", + "locationName": "dayOfWeek", + "documentation": "

    The day of the week when Amazon Macie runs the job.

    " + } + }, + "documentation": "

    Specifies a weekly recurrence pattern for running a classification job.

    " + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__integer": { + "type": "integer" + }, + "__listOfAdminAccount": { + "type": "list", + "member": { + "shape": "AdminAccount" + } + }, + "__listOfBatchGetCustomDataIdentifierSummary": { + "type": "list", + "member": { + "shape": "BatchGetCustomDataIdentifierSummary" + } + }, + "__listOfBucketMetadata": { + "type": "list", + "member": { + "shape": "BucketMetadata" + } + }, + "__listOfCustomDataIdentifierSummary": { + "type": "list", + "member": { + "shape": "CustomDataIdentifierSummary" + } + }, + "__listOfFinding": { + "type": "list", + "member": { + "shape": "Finding" + } + }, + "__listOfFindingType": { + "type": "list", + "member": { + "shape": "FindingType" + } + }, + "__listOfFindingsFilterListItem": { + "type": "list", + "member": { + "shape": "FindingsFilterListItem" + } + }, + "__listOfGroupCount": { + "type": "list", + "member": { + "shape": "GroupCount" + } + }, + "__listOfInvitation": { + "type": "list", + "member": { + "shape": "Invitation" + } + }, + "__listOfJobScopeTerm": { + "type": "list", + "member": { + "shape": "JobScopeTerm" + } + }, + "__listOfJobSummary": { + "type": "list", + "member": { + "shape": "JobSummary" + } + }, + "__listOfKeyValuePair": { + "type": "list", + "member": { + "shape": "KeyValuePair" + } + }, + "__listOfListJobsFilterTerm": { + "type": "list", + "member": { + "shape": "ListJobsFilterTerm" + } + }, + "__listOfMember": { + "type": "list", + "member": { + "shape": "Member" + } + }, + "__listOfS3BucketDefinitionForJob": { + "type": "list", + "member": { + "shape": "S3BucketDefinitionForJob" + } + }, + "__listOfTagValuePair": { + "type": "list", + "member": { + "shape": "TagValuePair" + } + }, + "__listOfUnprocessedAccount": { + "type": "list", + "member": { + "shape": "UnprocessedAccount" + } + }, + "__listOfUsageByAccount": { + "type": "list", + "member": { + "shape": "UsageByAccount" + } + }, + "__listOfUsageRecord": { + "type": "list", + "member": { + "shape": "UsageRecord" + } + }, + "__listOfUsageStatisticsFilter": { + "type": "list", + "member": { + "shape": "UsageStatisticsFilter" + } + }, + "__listOfUsageTotal": { + "type": "list", + "member": { + "shape": "UsageTotal" + } + }, + "__listOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "__long": { + "type": "long" + }, + "__string": { + "type": "string" + }, + "__timestampIso8601": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" + } + }, + "documentation": "

    Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS. Macie automates the discovery of sensitive data, such as PII and intellectual property, to provide you with insight into the data that your organization stores in AWS. Macie also provides an inventory of your Amazon S3 buckets, which it continually monitors for you. If Macie detects sensitive data or potential data access issues, it generates detailed findings for you to review and act upon as necessary.

    " +} \ No newline at end of file diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index c3604a60cc47..b416f1d2bdaf 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + marketplacecatalog + AWS Java SDK :: Services :: Marketplace Catalog + The AWS Java SDK for Marketplace Catalog module holds the client classes that are used for + communicating with Marketplace Catalog. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.marketplacecatalog + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/marketplacecatalog/src/main/resources/codegen-resources/paginators-1.json b/services/marketplacecatalog/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..dbcad8968038 --- /dev/null +++ b/services/marketplacecatalog/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,14 @@ +{ + "pagination": { + "ListChangeSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListEntities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f627fd44d668 --- /dev/null +++ b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,787 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-09-17", + "endpointPrefix":"catalog.marketplace", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AWS Marketplace Catalog", + "serviceFullName":"AWS Marketplace Catalog Service", + "serviceId":"Marketplace Catalog", + "signatureVersion":"v4", + "signingName":"aws-marketplace", + "uid":"marketplace-catalog-2018-09-17" + }, + "operations":{ + "CancelChangeSet":{ + "name":"CancelChangeSet", + "http":{ + "method":"PATCH", + "requestUri":"/CancelChangeSet" + }, + "input":{"shape":"CancelChangeSetRequest"}, + "output":{"shape":"CancelChangeSetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Used to cancel an open change request. Must be sent before the status of the request changes to APPLYING, the final stage of completing your change request. You can describe a change during the 60-day request history retention period for API calls.

    " + }, + "DescribeChangeSet":{ + "name":"DescribeChangeSet", + "http":{ + "method":"GET", + "requestUri":"/DescribeChangeSet" + }, + "input":{"shape":"DescribeChangeSetRequest"}, + "output":{"shape":"DescribeChangeSetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Provides information about a given change set.

    " + }, + "DescribeEntity":{ + "name":"DescribeEntity", + "http":{ + "method":"GET", + "requestUri":"/DescribeEntity" + }, + "input":{"shape":"DescribeEntityRequest"}, + "output":{"shape":"DescribeEntityResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotSupportedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the metadata and content of the entity.

    " + }, + "ListChangeSets":{ + "name":"ListChangeSets", + "http":{ + "method":"POST", + "requestUri":"/ListChangeSets" + }, + "input":{"shape":"ListChangeSetsRequest"}, + "output":{"shape":"ListChangeSetsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the list of change sets owned by the account being used to make the call. You can filter this list by providing any combination of entityId, ChangeSetName, and status. If you provide more than one filter, the API operation applies a logical AND between the filters.

    You can describe a change during the 60-day request history retention period for API calls.

    " + }, + "ListEntities":{ + "name":"ListEntities", + "http":{ + "method":"POST", + "requestUri":"/ListEntities" + }, + "input":{"shape":"ListEntitiesRequest"}, + "output":{"shape":"ListEntitiesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Provides the list of entities of a given type.

    " + }, + "StartChangeSet":{ + "name":"StartChangeSet", + "http":{ + "method":"POST", + "requestUri":"/StartChangeSet" + }, + "input":{"shape":"StartChangeSetRequest"}, + "output":{"shape":"StartChangeSetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    This operation allows you to request changes for your entities. Within a single ChangeSet, you cannot start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the ChangeSet has completed (either succeeded, cancelled, or failed). If you try to start a ChangeSet containing a change against an entity that is already locked, you will receive a ResourceInUseException.

    For example, you cannot start the ChangeSet described in the example below because it contains two changes to execute the same change type (AddRevisions) against the same entity (entity-id@1).

    " + } + }, + "shapes":{ + "ARN":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[a-zA-Z0-9:*/-]+$" + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"StringValue"} + }, + "documentation":"

    Access is denied.

    ", + "error":{"httpStatusCode":403}, + "exception":true, + "synthetic":true + }, + "CancelChangeSetRequest":{ + "type":"structure", + "required":[ + "Catalog", + "ChangeSetId" + ], + "members":{ + "Catalog":{ + "shape":"Catalog", + "documentation":"

    Required. The catalog related to the request. Fixed value: AWSMarketplace.

    ", + "location":"querystring", + "locationName":"catalog" + }, + "ChangeSetId":{ + "shape":"ResourceId", + "documentation":"

    Required. The unique identifier of the StartChangeSet request that you want to cancel.

    ", + "location":"querystring", + "locationName":"changeSetId" + } + } + }, + "CancelChangeSetResponse":{ + "type":"structure", + "members":{ + "ChangeSetId":{ + "shape":"ResourceId", + "documentation":"

    The unique identifier for the change set referenced in this request.

    " + }, + "ChangeSetArn":{ + "shape":"ARN", + "documentation":"

    The ARN associated with the change set referenced in this request.

    " + } + } + }, + "Catalog":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z]+$" + }, + "Change":{ + "type":"structure", + "required":[ + "ChangeType", + "Entity", + "Details" + ], + "members":{ + "ChangeType":{ + "shape":"ChangeType", + "documentation":"

    Change types are single string values that describe your intention for the change. Each change type is unique for each EntityType provided in the change's scope.

    " + }, + "Entity":{ + "shape":"Entity", + "documentation":"

    The entity to be changed.

    " + }, + "Details":{ + "shape":"Json", + "documentation":"

    This object contains details specific to the change type of the requested change.

    " + } + }, + "documentation":"

    An object that contains the ChangeType, Details, and Entity.

    " + }, + "ChangeSetDescription":{ + "type":"list", + "member":{"shape":"ChangeSummary"} + }, + "ChangeSetName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[\\w\\s+=.:@-]+$" + }, + "ChangeSetSummaryList":{ + "type":"list", + "member":{"shape":"ChangeSetSummaryListItem"} + }, + "ChangeSetSummaryListItem":{ + "type":"structure", + "members":{ + "ChangeSetId":{ + "shape":"ResourceId", + "documentation":"

    The unique identifier for a change set.

    " + }, + "ChangeSetArn":{ + "shape":"ARN", + "documentation":"

    The ARN associated with the unique identifier for the change set referenced in this request.

    " + }, + "ChangeSetName":{ + "shape":"ChangeSetName", + "documentation":"

    The non-unique name for the change set.

    " + }, + "StartTime":{ + "shape":"DateTimeISO8601", + "documentation":"

    The time, in ISO 8601 format (2018-02-27T13:45:22Z), when the change set was started.

    " + }, + "EndTime":{ + "shape":"DateTimeISO8601", + "documentation":"

    The time, in ISO 8601 format (2018-02-27T13:45:22Z), when the change set was finished.

    " + }, + "Status":{ + "shape":"ChangeStatus", + "documentation":"

    The current status of the change set.

    " + }, + "EntityIdList":{ + "shape":"ResourceIdList", + "documentation":"

    This object is a list of entity IDs (string) that are a part of a change set. The entity ID list is a maximum of 20 entities. It must contain at least one entity.

    " + }, + "FailureCode":{ + "shape":"FailureCode", + "documentation":"

    Returned if the change set is in FAILED status. Can be either CLIENT_ERROR, which means that there are issues with the request (see the ErrorDetailList of DescribeChangeSet), or SERVER_FAULT, which means that there is a problem in the system, and you should retry your request.

    " + } + }, + "documentation":"

    A summary of a change set returned in a list of change sets when the ListChangeSets action is called.

    " + }, + "ChangeStatus":{ + "type":"string", + "enum":[ + "PREPARING", + "APPLYING", + "SUCCEEDED", + "CANCELLED", + "FAILED" + ] + }, + "ChangeSummary":{ + "type":"structure", + "members":{ + "ChangeType":{ + "shape":"ChangeType", + "documentation":"

    The type of the change.

    " + }, + "Entity":{ + "shape":"Entity", + "documentation":"

    The entity to be changed.

    " + }, + "Details":{ + "shape":"Json", + "documentation":"

    This object contains details specific to the change type of the requested change.

    " + }, + "ErrorDetailList":{ + "shape":"ErrorDetailList", + "documentation":"

    An array of ErrorDetail objects associated with the change.

    " + } + }, + "documentation":"

    This object is a container for common summary information about the change. The summary doesn't contain the whole change structure.

    " + }, + "ChangeType":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[A-Z][\\w]*$" + }, + "ClientRequestToken":{ + "type":"string", + "max":36, + "min":1, + "pattern":"^[\\w\\-]+$" + }, + "DateTimeISO8601":{ + "type":"string", + "max":20, + "min":20, + "pattern":"^([\\d]{4})\\-(1[0-2]|0[1-9])\\-(3[01]|0[1-9]|[12][\\d])T(2[0-3]|[01][\\d]):([0-5][\\d]):([0-5][\\d])Z$" + }, + "DescribeChangeSetRequest":{ + "type":"structure", + "required":[ + "Catalog", + "ChangeSetId" + ], + "members":{ + "Catalog":{ + "shape":"Catalog", + "documentation":"

    Required. The catalog related to the request. Fixed value: AWSMarketplace

    ", + "location":"querystring", + "locationName":"catalog" + }, + "ChangeSetId":{ + "shape":"ResourceId", + "documentation":"

    Required. The unique identifier for the StartChangeSet request that you want to describe the details for.

    ", + "location":"querystring", + "locationName":"changeSetId" + } + } + }, + "DescribeChangeSetResponse":{ + "type":"structure", + "members":{ + "ChangeSetId":{ + "shape":"ResourceId", + "documentation":"

    Required. The unique identifier for the change set referenced in this request.

    " + }, + "ChangeSetArn":{ + "shape":"ARN", + "documentation":"

    The ARN associated with the unique identifier for the change set referenced in this request.

    " + }, + "ChangeSetName":{ + "shape":"ChangeSetName", + "documentation":"

    The optional name provided in the StartChangeSet request. If you do not provide a name, one is set by default.

    " + }, + "StartTime":{ + "shape":"DateTimeISO8601", + "documentation":"

    The date and time, in ISO 8601 format (2018-02-27T13:45:22Z), the request started.

    " + }, + "EndTime":{ + "shape":"DateTimeISO8601", + "documentation":"

    The date and time, in ISO 8601 format (2018-02-27T13:45:22Z), the request transitioned to a terminal state. The change cannot transition to a different state. Null if the request is not in a terminal state.

    " + }, + "Status":{ + "shape":"ChangeStatus", + "documentation":"

    The status of the change request.

    " + }, + "FailureCode":{ + "shape":"FailureCode", + "documentation":"

    Returned if the change set is in FAILED status. Can be either CLIENT_ERROR, which means that there are issues with the request (see the ErrorDetailList), or SERVER_FAULT, which means that there is a problem in the system, and you should retry your request.

    " + }, + "FailureDescription":{ + "shape":"StringValue", + "documentation":"

    Returned if there is a failure on the change set, but that failure is not related to any of the changes in the request.

    " + }, + "ChangeSet":{ + "shape":"ChangeSetDescription", + "documentation":"

    An array of ChangeSummary objects.

    " + } + } + }, + "DescribeEntityRequest":{ + "type":"structure", + "required":[ + "Catalog", + "EntityId" + ], + "members":{ + "Catalog":{ + "shape":"Catalog", + "documentation":"

    Required. The catalog related to the request. Fixed value: AWSMarketplace

    ", + "location":"querystring", + "locationName":"catalog" + }, + "EntityId":{ + "shape":"ResourceId", + "documentation":"

    Required. The unique ID of the entity to describe.

    ", + "location":"querystring", + "locationName":"entityId" + } + } + }, + "DescribeEntityResponse":{ + "type":"structure", + "members":{ + "EntityType":{ + "shape":"EntityType", + "documentation":"

    The named type of the entity, in the format of EntityType@Version.

    " + }, + "EntityIdentifier":{ + "shape":"Identifier", + "documentation":"

    The identifier of the entity, in the format of EntityId@RevisionId.

    " + }, + "EntityArn":{ + "shape":"ARN", + "documentation":"

    The ARN associated to the unique identifier for the change set referenced in this request.

    " + }, + "LastModifiedDate":{ + "shape":"StringValue", + "documentation":"

    The last modified date of the entity, in ISO 8601 format (2018-02-27T13:45:22Z).

    " + }, + "Details":{ + "shape":"Json", + "documentation":"

    This stringified JSON object includes the details of the entity.

    " + } + } + }, + "Entity":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"EntityType", + "documentation":"

    The type of entity.

    " + }, + "Identifier":{ + "shape":"Identifier", + "documentation":"

    The identifier for the entity.

    " + } + }, + "documentation":"

    A product entity contains data that describes your product, its supported features, and how it can be used or launched by your customer.

    " + }, + "EntitySummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"StringValue", + "documentation":"

    The name for the entity. This value is not unique. It is defined by the seller.

    " + }, + "EntityType":{ + "shape":"EntityType", + "documentation":"

    The type of the entity.

    " + }, + "EntityId":{ + "shape":"ResourceId", + "documentation":"

    The unique identifier for the entity.

    " + }, + "EntityArn":{ + "shape":"ARN", + "documentation":"

    The ARN associated with the unique identifier for the entity.

    " + }, + "LastModifiedDate":{ + "shape":"StringValue", + "documentation":"

    The last time the entity was published, using ISO 8601 format (2018-02-27T13:45:22Z).

    " + }, + "Visibility":{ + "shape":"StringValue", + "documentation":"

    The visibility status of the entity to buyers. This value can be Public (everyone can view the entity), Limited (the entity is visible to limited accounts only), or Restricted (the entity was published and then unpublished and only existing buyers can view it).

    " + } + }, + "documentation":"

    This object is a container for common summary information about the entity. The summary doesn't contain the whole entity structure, but it does contain information common across all entities.

    " + }, + "EntitySummaryList":{ + "type":"list", + "member":{"shape":"EntitySummary"} + }, + "EntityType":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z]+$" + }, + "ErrorDetail":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"StringValue", + "documentation":"

    The error code that identifies the type of error.

    " + }, + "ErrorMessage":{ + "shape":"StringValue", + "documentation":"

    The message for the error.

    " + } + }, + "documentation":"

    Details about the error.

    " + }, + "ErrorDetailList":{ + "type":"list", + "member":{"shape":"ErrorDetail"} + }, + "FailureCode":{ + "type":"string", + "enum":[ + "CLIENT_ERROR", + "SERVER_FAULT" + ] + }, + "Filter":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"FilterName", + "documentation":"

    For ListEntities, the supported value for this is an EntityId.

    For ListChangeSets, the supported values are as follows:

    " + }, + "ValueList":{ + "shape":"ValueList", + "documentation":"

    ListEntities - This is a list of unique EntityIds.

    ListChangeSets - The supported filter names and associated ValueLists is as follows:

    • ChangeSetName - The supported ValueList is a list of non-unique ChangeSetNames. These are defined when you call the StartChangeSet action.

    • Status - The supported ValueList is a list of statuses for all change set requests.

    • EntityId - The supported ValueList is a list of unique EntityIds.

    • BeforeStartTime - The supported ValueList is a list of all change sets that started before the filter value.

    • AfterStartTime - The supported ValueList is a list of all change sets that started after the filter value.

    • BeforeEndTime - The supported ValueList is a list of all change sets that ended before the filter value.

    • AfterEndTime - The supported ValueList is a list of all change sets that ended after the filter value.

    " + } + }, + "documentation":"

    A filter object, used to optionally filter results from calls to the ListEntities and ListChangeSets actions.

    " + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":8, + "min":1 + }, + "FilterName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z]+$" + }, + "Identifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\w\\-@]+$" + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"StringValue"} + }, + "documentation":"

    There was an internal service exception.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "synthetic":true + }, + "Json":{ + "type":"string", + "max":16384, + "min":2, + "pattern":"^[\\s]*\\{[\\s\\S]*\\}[\\s]*$" + }, + "ListChangeSetsRequest":{ + "type":"structure", + "required":["Catalog"], + "members":{ + "Catalog":{ + "shape":"Catalog", + "documentation":"

    The catalog related to the request. Fixed value: AWSMarketplace

    " + }, + "FilterList":{ + "shape":"FilterList", + "documentation":"

    An array of filter objects.

    " + }, + "Sort":{ + "shape":"Sort", + "documentation":"

    An object that contains two attributes, SortBy and SortOrder.

    " + }, + "MaxResults":{ + "shape":"MaxResultInteger", + "documentation":"

    The maximum number of results returned by a single call. This value must be provided in the next call to retrieve the next set of results. By default, this value is 20.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token value retrieved from a previous call to access the next page of results.

    " + } + } + }, + "ListChangeSetsResponse":{ + "type":"structure", + "members":{ + "ChangeSetSummaryList":{ + "shape":"ChangeSetSummaryList", + "documentation":"

    Array of ChangeSetSummaryListItem objects.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The value of the next token, if it exists. Null if there are no more results.

    " + } + } + }, + "ListEntitiesRequest":{ + "type":"structure", + "required":[ + "Catalog", + "EntityType" + ], + "members":{ + "Catalog":{ + "shape":"Catalog", + "documentation":"

    The catalog related to the request. Fixed value: AWSMarketplace

    " + }, + "EntityType":{ + "shape":"EntityType", + "documentation":"

    The type of entities to retrieve.

    " + }, + "FilterList":{ + "shape":"FilterList", + "documentation":"

    An array of filter objects. Each filter object contains two attributes, filterName and filterValues.

    " + }, + "Sort":{ + "shape":"Sort", + "documentation":"

    An object that contains two attributes, SortBy and SortOrder.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The value of the next token, if it exists. Null if there are no more results.

    " + }, + "MaxResults":{ + "shape":"MaxResultInteger", + "documentation":"

    Specifies the upper limit of the elements on a single page. If a value isn't provided, the default value is 20.

    " + } + } + }, + "ListEntitiesResponse":{ + "type":"structure", + "members":{ + "EntitySummaryList":{ + "shape":"EntitySummaryList", + "documentation":"

    Array of EntitySummary object.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The value of the next token if it exists. Null if there is no more result.

    " + } + } + }, + "MaxResultInteger":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[\\w+=.:@\\-\\/]$" + }, + "RequestedChangeList":{ + "type":"list", + "member":{"shape":"Change"}, + "max":20, + "min":1 + }, + "ResourceId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\w\\-]+$" + }, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"ResourceId"} + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"StringValue"} + }, + "documentation":"

    The resource is currently in use.

    ", + "error":{"httpStatusCode":423}, + "exception":true, + "synthetic":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"StringValue"} + }, + "documentation":"

    The specified resource wasn't found.

    ", + "error":{"httpStatusCode":404}, + "exception":true, + "synthetic":true + }, + "ResourceNotSupportedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"StringValue"} + }, + "documentation":"

    Currently, the specified resource is not supported.

    ", + "error":{"httpStatusCode":415}, + "exception":true, + "synthetic":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"StringValue"} + }, + "documentation":"

    The maximum number of open requests per account has been exceeded.

    ", + "error":{"httpStatusCode":402}, + "exception":true, + "synthetic":true + }, + "Sort":{ + "type":"structure", + "members":{ + "SortBy":{ + "shape":"SortBy", + "documentation":"

    For ListEntities, supported attributes include LastModifiedDate (default), Visibility, EntityId, and Name.

    For ListChangeSets, supported attributes include StartTime and EndTime.

    " + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

    The sorting order. Can be ASCENDING or DESCENDING. The default value is DESCENDING.

    " + } + }, + "documentation":"

    An object that contains two attributes, SortBy and SortOrder.

    " + }, + "SortBy":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z]+$" + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "StartChangeSetRequest":{ + "type":"structure", + "required":[ + "Catalog", + "ChangeSet" + ], + "members":{ + "Catalog":{ + "shape":"Catalog", + "documentation":"

    The catalog related to the request. Fixed value: AWSMarketplace

    " + }, + "ChangeSet":{ + "shape":"RequestedChangeList", + "documentation":"

    Array of change object.

    " + }, + "ChangeSetName":{ + "shape":"ChangeSetName", + "documentation":"

    Optional case sensitive string of up to 100 ASCII characters. The change set name can be used to filter the list of change sets.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    A unique token to identify the request to ensure idempotency.

    " + } + } + }, + "StartChangeSetResponse":{ + "type":"structure", + "members":{ + "ChangeSetId":{ + "shape":"ResourceId", + "documentation":"

    Unique identifier generated for the request.

    " + }, + "ChangeSetArn":{ + "shape":"ARN", + "documentation":"

    The ARN associated to the unique identifier generated for the request.

    " + } + } + }, + "StringValue":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"StringValue"} + }, + "documentation":"

    Too many requests.

    ", + "error":{"httpStatusCode":429}, + "exception":true, + "synthetic":true + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"StringValue"} + }, + "documentation":"

    An error occurred during validation.

    ", + "error":{"httpStatusCode":422}, + "exception":true, + "synthetic":true + }, + "ValueList":{ + "type":"list", + "member":{"shape":"StringValue"}, + "max":10, + "min":1 + } + }, + "documentation":"

    Catalog API actions allow you to manage your entities through list, describe, and update capabilities. An entity can be a product or an offer on AWS Marketplace.

    You can automate your entity update process by integrating the AWS Marketplace Catalog API with your AWS Marketplace product build or deployment pipelines. You can also create your own applications on top of the Catalog API to manage your products on AWS Marketplace.

    " +} diff --git a/services/marketplacecommerceanalytics/build.properties b/services/marketplacecommerceanalytics/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/marketplacecommerceanalytics/build.properties +++ b/services/marketplacecommerceanalytics/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index d7e4015780a7..79068f1dfee5 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + migrationhubconfig + AWS Java SDK :: Services :: MigrationHub Config + The AWS Java SDK for MigrationHub Config module holds the client classes that are used for + communicating with MigrationHub Config. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.migrationhubconfig + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/migrationhubconfig/src/main/resources/codegen-resources/paginators-1.json b/services/migrationhubconfig/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..cd89e068e4d1 --- /dev/null +++ b/services/migrationhubconfig/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "DescribeHomeRegionControls": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/migrationhubconfig/src/main/resources/codegen-resources/service-2.json b/services/migrationhubconfig/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..10e600c096c3 --- /dev/null +++ b/services/migrationhubconfig/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,289 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-06-30", + "endpointPrefix":"migrationhub-config", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Migration Hub Config", + "serviceId":"MigrationHub Config", + "signatureVersion":"v4", + "signingName":"mgh", + "targetPrefix":"AWSMigrationHubMultiAccountService", + "uid":"migrationhub-config-2019-06-30" + }, + "operations":{ + "CreateHomeRegionControl":{ + "name":"CreateHomeRegionControl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHomeRegionControlRequest"}, + "output":{"shape":"CreateHomeRegionControlResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"DryRunOperation"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    This API sets up the home region for the calling account only.

    " + }, + "DescribeHomeRegionControls":{ + "name":"DescribeHomeRegionControls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHomeRegionControlsRequest"}, + "output":{"shape":"DescribeHomeRegionControlsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    This API permits filtering on the ControlId and HomeRegion fields.

    " + }, + "GetHomeRegion":{ + "name":"GetHomeRegion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetHomeRegionRequest"}, + "output":{"shape":"GetHomeRegionResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

    Returns the calling account’s home region, if configured. This API is used by other AWS services to determine the regional endpoint for calling AWS Application Discovery Service and Migration Hub. You must call GetHomeRegion at least once before you call any other AWS Application Discovery Service and AWS Migration Hub APIs, to obtain the account's Migration Hub home region.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "exception":true + }, + "ControlId":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^hrc-[a-z0-9]{12}$" + }, + "CreateHomeRegionControlRequest":{ + "type":"structure", + "required":[ + "HomeRegion", + "Target" + ], + "members":{ + "HomeRegion":{ + "shape":"HomeRegion", + "documentation":"

    The name of the home region of the calling account.

    " + }, + "Target":{ + "shape":"Target", + "documentation":"

    The account for which this command sets up a home region control. The Target is always of type ACCOUNT.

    " + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

    Optional Boolean flag to indicate whether any effect should take place. It tests whether the caller has permission to make the call.

    " + } + } + }, + "CreateHomeRegionControlResult":{ + "type":"structure", + "members":{ + "HomeRegionControl":{ + "shape":"HomeRegionControl", + "documentation":"

    This object is the HomeRegionControl object that's returned by a successful call to CreateHomeRegionControl.

    " + } + } + }, + "DescribeHomeRegionControlsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "DescribeHomeRegionControlsRequest":{ + "type":"structure", + "members":{ + "ControlId":{ + "shape":"ControlId", + "documentation":"

    The ControlID is a unique identifier string of your HomeRegionControl object.

    " + }, + "HomeRegion":{ + "shape":"HomeRegion", + "documentation":"

    The name of the home region you'd like to view.

    " + }, + "Target":{ + "shape":"Target", + "documentation":"

    The target parameter specifies the identifier to which the home region is applied, which is always of type ACCOUNT. It applies the home region to the current ACCOUNT.

    " + }, + "MaxResults":{ + "shape":"DescribeHomeRegionControlsMaxResults", + "documentation":"

    The maximum number of filtering results to display per page.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    If a NextToken was returned by a previous call, more results are available. To retrieve the next page of results, make the call again using the returned token in NextToken.

    " + } + } + }, + "DescribeHomeRegionControlsResult":{ + "type":"structure", + "members":{ + "HomeRegionControls":{ + "shape":"HomeRegionControls", + "documentation":"

    An array that contains your HomeRegionControl objects.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    If a NextToken was returned by a previous call, more results are available. To retrieve the next page of results, make the call again using the returned token in NextToken.

    " + } + } + }, + "DryRun":{"type":"boolean"}, + "DryRunOperation":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Exception raised to indicate that authorization of an action was successful, when the DryRun flag is set to true.

    ", + "exception":true + }, + "ErrorMessage":{"type":"string"}, + "GetHomeRegionRequest":{ + "type":"structure", + "members":{ + } + }, + "GetHomeRegionResult":{ + "type":"structure", + "members":{ + "HomeRegion":{ + "shape":"HomeRegion", + "documentation":"

    The name of the home region of the calling account.

    " + } + } + }, + "HomeRegion":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^([a-z]+)-([a-z]+)-([0-9]+)$" + }, + "HomeRegionControl":{ + "type":"structure", + "members":{ + "ControlId":{ + "shape":"ControlId", + "documentation":"

    A unique identifier that's generated for each home region control. It's always a string that begins with \"hrc-\" followed by 12 lowercase letters and numbers.

    " + }, + "HomeRegion":{ + "shape":"HomeRegion", + "documentation":"

    The AWS Region that's been set as home region. For example, \"us-west-2\" or \"eu-central-1\" are valid home regions.

    " + }, + "Target":{ + "shape":"Target", + "documentation":"

    The target parameter specifies the identifier to which the home region is applied, which is always an ACCOUNT. It applies the home region to the current ACCOUNT.

    " + }, + "RequestedTime":{ + "shape":"RequestedTime", + "documentation":"

    A timestamp representing the time when the customer called CreateHomeregionControl and set the home region for the account.

    " + } + }, + "documentation":"

    A home region control is an object that specifies the home region for an account, with some additional information. It contains a target (always of type ACCOUNT), an ID, and a time at which the home region was set.

    " + }, + "HomeRegionControls":{ + "type":"list", + "member":{"shape":"HomeRegionControl"}, + "max":100 + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Exception raised when an internal, configuration, or dependency error is encountered.

    ", + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Exception raised when the provided input violates a policy constraint or is entered in the wrong format or data type.

    ", + "exception":true + }, + "RequestedTime":{"type":"timestamp"}, + "RetryAfterSeconds":{"type":"integer"}, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Exception raised when a request fails due to temporary unavailability of the service.

    ", + "exception":true, + "fault":true + }, + "Target":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"TargetType", + "documentation":"

    The target type is always an ACCOUNT.

    " + }, + "Id":{ + "shape":"TargetId", + "documentation":"

    The TargetID is a 12-character identifier of the ACCOUNT for which the control was created. (This must be the current account.)

    " + } + }, + "documentation":"

    The target parameter specifies the identifier to which the home region is applied, which is always an ACCOUNT. It applies the home region to the current ACCOUNT.

    " + }, + "TargetId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d{12}$" + }, + "TargetType":{ + "type":"string", + "enum":["ACCOUNT"] + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessage"}, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    The number of seconds the caller should wait before retrying.

    " + } + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "exception":true + }, + "Token":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^[a-zA-Z0-9\\/\\+\\=]{0,2048}$" + } + }, + "documentation":"

    The AWS Migration Hub home region APIs are available specifically for working with your Migration Hub home region. You can use these APIs to determine a home region, as well as to create and work with controls that describe the home region.

    • You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home region, or a HomeRegionNotSetException error is returned.

    • API calls for read actions (list, describe, stop, and delete) are permitted outside of your home region.

    • If you call a write API outside the home region, an InvalidInputException is returned.

    • You can call GetHomeRegion action to obtain the account's Migration Hub home region.

    For specific API usage, see the sections that follow in this AWS Migration Hub Home Region API reference.

    " +} diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index cf9c5540f9b2..5896baad423f 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + mwaa + AWS Java SDK :: Services :: MWAA + The AWS Java SDK for MWAA module holds the client classes that are used for + communicating with MWAA. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.mwaa + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/mwaa/src/main/resources/codegen-resources/paginators-1.json b/services/mwaa/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5e218e4616bb --- /dev/null +++ b/services/mwaa/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListEnvironments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Environments" + } + } +} diff --git a/services/mwaa/src/main/resources/codegen-resources/service-2.json b/services/mwaa/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..92e6ba9bf7ad --- /dev/null +++ b/services/mwaa/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1207 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-01", + "endpointPrefix":"airflow", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AmazonMWAA", + "serviceId":"MWAA", + "signatureVersion":"v4", + "signingName":"airflow", + "uid":"mwaa-2020-07-01" + }, + "operations":{ + "CreateCliToken":{ + "name":"CreateCliToken", + "http":{ + "method":"POST", + "requestUri":"/clitoken/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateCliTokenRequest"}, + "output":{"shape":"CreateCliTokenResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Create a CLI token to use Airflow CLI.

    ", + "endpoint":{"hostPrefix":"env."} + }, + "CreateEnvironment":{ + "name":"CreateEnvironment", + "http":{ + "method":"PUT", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateEnvironmentInput"}, + "output":{"shape":"CreateEnvironmentOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    JSON blob that describes the environment to create.

    ", + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "CreateWebLoginToken":{ + "name":"CreateWebLoginToken", + "http":{ + "method":"POST", + "requestUri":"/webtoken/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateWebLoginTokenRequest"}, + "output":{"shape":"CreateWebLoginTokenResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Create a JWT token to be used to login to Airflow Web UI with claims based Authentication.

    ", + "endpoint":{"hostPrefix":"env."}, + "idempotent":true + }, + "DeleteEnvironment":{ + "name":"DeleteEnvironment", + "http":{ + "method":"DELETE", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"DeleteEnvironmentInput"}, + "output":{"shape":"DeleteEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Delete an existing environment.

    ", + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "GetEnvironment":{ + "name":"GetEnvironment", + "http":{ + "method":"GET", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"GetEnvironmentInput"}, + "output":{"shape":"GetEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Get details of an existing environment.

    ", + "endpoint":{"hostPrefix":"api."} + }, + "ListEnvironments":{ + "name":"ListEnvironments", + "http":{ + "method":"GET", + "requestUri":"/environments", + "responseCode":200 + }, + "input":{"shape":"ListEnvironmentsInput"}, + "output":{"shape":"ListEnvironmentsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    List Amazon MWAA Environments.

    ", + "endpoint":{"hostPrefix":"api."} + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    List the tags for MWAA environments.

    ", + "endpoint":{"hostPrefix":"api."} + }, + "PublishMetrics":{ + "name":"PublishMetrics", + "http":{ + "method":"POST", + "requestUri":"/metrics/environments/{EnvironmentName}", + "responseCode":200 + }, + "input":{"shape":"PublishMetricsInput"}, + "output":{"shape":"PublishMetricsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    An operation for publishing metrics from the customers to the Ops plane.

    ", + "endpoint":{"hostPrefix":"ops."} + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Add tag to the MWAA environments.

    ", + "endpoint":{"hostPrefix":"api."} + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Remove a tag from the MWAA environments.

    ", + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "UpdateEnvironment":{ + "name":"UpdateEnvironment", + "http":{ + "method":"PATCH", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"UpdateEnvironmentInput"}, + "output":{"shape":"UpdateEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Update an MWAA environment.

    ", + "endpoint":{"hostPrefix":"api."} + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA user guide to setup permissions to access the Web UI and CLI functionality.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"} + }, + "AirflowVersion":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[0-9a-z.]+$" + }, + "CloudWatchLogGroupArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:logs:[a-z0-9\\-]+:\\d{12}:log-group:\\w+" + }, + "ConfigKey":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-z]+([a-z._]*[a-z]+)?$" + }, + "ConfigValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*" + }, + "CreateCliTokenRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "documentation":"

    Create a CLI token request for a MWAA environment.

    ", + "location":"uri", + "locationName":"Name" + } + } + }, + "CreateCliTokenResponse":{ + "type":"structure", + "members":{ + "CliToken":{ + "shape":"SyntheticCreateCliTokenResponseToken", + "documentation":"

    Create an Airflow CLI login token response for the provided JWT token.

    " + }, + "WebServerHostname":{ + "shape":"Hostname", + "documentation":"

    Create an Airflow CLI login token response for the provided webserver hostname.

    " + } + } + }, + "CreateEnvironmentInput":{ + "type":"structure", + "required":[ + "DagS3Path", + "ExecutionRoleArn", + "Name", + "NetworkConfiguration", + "SourceBucketArn" + ], + "members":{ + "AirflowConfigurationOptions":{ + "shape":"SyntheticCreateEnvironmentInputAirflowConfigurationOptions", + "documentation":"

    The Apache Airflow configuration setting you want to override in your environment. For more information, see Environment configuration.

    " + }, + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

    The Apache Airflow version you want to use for your environment.

    " + }, + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

    The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see Importing DAGs on Amazon MWAA.

    " + }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

    The environment class you want to use for your environment. The environment class determines the size of the containers and database used for your Apache Airflow services.

    " + }, + "ExecutionRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an AWS Identity and Access Management (IAM) role that grants MWAA permission to access AWS services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Managing access to Amazon Managed Workflows for Apache Airflow.

    " + }, + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

    The AWS Key Management Service (KMS) key to encrypt and decrypt the data in your environment. You can use an AWS KMS key managed by MWAA, or a custom KMS key (advanced). For more information, see Customer master keys (CMKs) in the AWS KMS developer guide.

    " + }, + "LoggingConfiguration":{ + "shape":"LoggingConfigurationInput", + "documentation":"

    The Apache Airflow logs you want to send to Amazon CloudWatch Logs.

    " + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

    The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers and the Fargate containers that run your tasks up to the number you specify in this field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra containers leaving the one worker that is included with your environment.

    " + }, + "Name":{ + "shape":"EnvironmentName", + "documentation":"

    The name of your MWAA environment.

    ", + "location":"uri", + "locationName":"Name" + }, + "NetworkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

    The VPC networking components you want to use for your environment. At least two private subnet identifiers and one VPC security group identifier are required to create an environment. For more information, see Creating the VPC network for a MWAA environment.

    " + }, + "PluginsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

    The plugins.zip file version you want to use.

    " + }, + "PluginsS3Path":{ + "shape":"RelativePath", + "documentation":"

    The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then PluginsS3ObjectVersion is required. For more information, see Importing DAGs on Amazon MWAA.

    " + }, + "RequirementsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

    The requirements.txt file version you want to use.

    " + }, + "RequirementsS3Path":{ + "shape":"RelativePath", + "documentation":"

    The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then RequirementsS3ObjectVersion is required. For more information, see Importing DAGs on Amazon MWAA.

    " + }, + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

    The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The metadata tags you want to attach to your environment. For more information, see Tagging AWS resources.

    " + }, + "WebserverAccessMode":{ + "shape":"WebserverAccessMode", + "documentation":"

    The networking access of your Apache Airflow web server. A public network allows your Airflow UI to be accessed over the Internet by users granted access in your IAM policy. A private network limits access of your Airflow UI to users within your VPC. For more information, see Creating the VPC network for a MWAA environment.

    " + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

    The day and time you want MWAA to start weekly maintenance updates on your environment.

    " + } + }, + "documentation":"

    This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation to create an environment. For more information, see Get started with Amazon Managed Workflows for Apache Airflow.

    " + }, + "CreateEnvironmentOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"EnvironmentArn", + "documentation":"

    The resulting Amazon MWAA envirnonment ARN.

    " + } + } + }, + "CreateWebLoginTokenRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "documentation":"

    Create an Airflow Web UI login token request for a MWAA environment.

    ", + "location":"uri", + "locationName":"Name" + } + } + }, + "CreateWebLoginTokenResponse":{ + "type":"structure", + "members":{ + "WebServerHostname":{ + "shape":"Hostname", + "documentation":"

    Create an Airflow Web UI login token response for the provided webserver hostname.

    " + }, + "WebToken":{ + "shape":"SyntheticCreateWebLoginTokenResponseToken", + "documentation":"

    Create an Airflow Web UI login token response for the provided JWT token.

    " + } + } + }, + "CreatedAt":{"type":"timestamp"}, + "DeleteEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "documentation":"

    The name of the environment to delete.

    ", + "location":"uri", + "locationName":"Name" + } + } + }, + "DeleteEnvironmentOutput":{ + "type":"structure", + "members":{ + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

    Internal only API.

    " + }, + "Value":{ + "shape":"String", + "documentation":"

    Internal only API.

    " + } + }, + "documentation":"

    Internal only API.

    " + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"} + }, + "Double":{ + "type":"double", + "box":true + }, + "Environment":{ + "type":"structure", + "members":{ + "AirflowConfigurationOptions":{ + "shape":"AirflowConfigurationOptions", + "documentation":"

    The Airflow Configuration Options of the Amazon MWAA Environment.

    " + }, + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

    The AirflowV ersion of the Amazon MWAA Environment.

    " + }, + "Arn":{ + "shape":"EnvironmentArn", + "documentation":"

    The ARN of the Amazon MWAA Environment.

    " + }, + "CreatedAt":{ + "shape":"CreatedAt", + "documentation":"

    The Created At date of the Amazon MWAA Environment.

    " + }, + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

    The Dags S3 Path of the Amazon MWAA Environment.

    " + }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

    The Environment Class (size) of the Amazon MWAA Environment.

    " + }, + "ExecutionRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Execution Role ARN of the Amazon MWAA Environment.

    " + }, + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

    The Kms Key of the Amazon MWAA Environment.

    " + }, + "LastUpdate":{"shape":"LastUpdate"}, + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

    The Logging Configuration of the Amazon MWAA Environment.

    " + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

    The Maximum Workers of the Amazon MWAA Environment.

    " + }, + "Name":{ + "shape":"EnvironmentName", + "documentation":"

    The name of the Amazon MWAA Environment.

    " + }, + "NetworkConfiguration":{"shape":"NetworkConfiguration"}, + "PluginsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

    The Plugins.zip S3 Object Version of the Amazon MWAA Environment.

    " + }, + "PluginsS3Path":{ + "shape":"RelativePath", + "documentation":"

    The Plugins.zip S3 Path of the Amazon MWAA Environment.

    " + }, + "RequirementsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

    The Requirements.txt file S3 Object Version of the Amazon MWAA Environment.

    " + }, + "RequirementsS3Path":{ + "shape":"RelativePath", + "documentation":"

    The Requirement.txt S3 Path of the Amazon MWAA Environment.

    " + }, + "ServiceRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Service Role ARN of the Amazon MWAA Environment.

    " + }, + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

    The Source S3 Bucket ARN of the Amazon MWAA Environment.

    " + }, + "Status":{ + "shape":"EnvironmentStatus", + "documentation":"

    The status of the Amazon MWAA Environment.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The Tags of the Amazon MWAA Environment.

    " + }, + "WebserverAccessMode":{ + "shape":"WebserverAccessMode", + "documentation":"

    The Webserver Access Mode of the Amazon MWAA Environment (public or private only).

    " + }, + "WebserverUrl":{ + "shape":"WebserverUrl", + "documentation":"

    The Webserver URL of the Amazon MWAA Environment.

    " + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

    The Weekly Maintenance Window Start of the Amazon MWAA Environment.

    " + } + }, + "documentation":"

    An Amazon MWAA environment.

    " + }, + "EnvironmentArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:airflow:[a-z0-9\\-]+:\\d{12}:environment/\\w+" + }, + "EnvironmentClass":{ + "type":"string", + "max":1024, + "min":1 + }, + "EnvironmentList":{ + "type":"list", + "member":{"shape":"EnvironmentName"} + }, + "EnvironmentName":{ + "type":"string", + "max":80, + "min":1, + "pattern":"^[a-zA-Z][0-9a-zA-Z-_]*$" + }, + "EnvironmentStatus":{ + "type":"string", + "enum":[ + "CREATING", + "CREATE_FAILED", + "AVAILABLE", + "UPDATING", + "DELETING", + "DELETED" + ] + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.+$" + }, + "GetEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "documentation":"

    The name of the environment to retrieve.

    ", + "location":"uri", + "locationName":"Name" + } + } + }, + "GetEnvironmentOutput":{ + "type":"structure", + "members":{ + "Environment":{ + "shape":"Environment", + "documentation":"

    A JSON blob with environment details.

    " + } + } + }, + "Hostname":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])$" + }, + "IamRoleArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    InternalServerException: An internal error has occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KmsKey":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^(((arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?key\\/)?[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}|(arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?alias/.+)$" + }, + "LastUpdate":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"UpdateCreatedAt", + "documentation":"

    Time that last update occurred.

    " + }, + "Error":{ + "shape":"UpdateError", + "documentation":"

    Error string of last update, if applicable.

    " + }, + "Status":{ + "shape":"UpdateStatus", + "documentation":"

    Status of last update of SUCCESS, FAILED, CREATING, DELETING.

    " + } + }, + "documentation":"

    Last update information for the environment.

    " + }, + "ListEnvironmentsInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListEnvironmentsInputMaxResultsInteger", + "documentation":"

    The maximum results when listing MWAA environments.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The Next Token when listing MWAA environments.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListEnvironmentsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListEnvironmentsOutput":{ + "type":"structure", + "required":["Environments"], + "members":{ + "Environments":{ + "shape":"EnvironmentList", + "documentation":"

    The list of Amazon MWAA Environments.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The Next Token when listing MWAA environments.

    " + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "documentation":"

    The ARN of the MWAA environment.

    ", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags of the MWAA environments.

    " + } + } + }, + "LoggingConfiguration":{ + "type":"structure", + "members":{ + "DagProcessingLogs":{"shape":"ModuleLoggingConfiguration"}, + "SchedulerLogs":{"shape":"ModuleLoggingConfiguration"}, + "TaskLogs":{"shape":"ModuleLoggingConfiguration"}, + "WebserverLogs":{"shape":"ModuleLoggingConfiguration"}, + "WorkerLogs":{"shape":"ModuleLoggingConfiguration"} + }, + "documentation":"

    The Logging Configuration of your Amazon MWAA environment.

    " + }, + "LoggingConfigurationInput":{ + "type":"structure", + "members":{ + "DagProcessingLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "SchedulerLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "TaskLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "WebserverLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "WorkerLogs":{"shape":"ModuleLoggingConfigurationInput"} + }, + "documentation":"

    The Logging Configuration of your Amazon MWAA environment.

    " + }, + "LoggingEnabled":{ + "type":"boolean", + "box":true + }, + "LoggingLevel":{ + "type":"string", + "enum":[ + "CRITICAL", + "ERROR", + "WARNING", + "INFO", + "DEBUG" + ] + }, + "MaxWorkers":{ + "type":"integer", + "box":true, + "min":1 + }, + "MetricData":{ + "type":"list", + "member":{"shape":"MetricDatum"} + }, + "MetricDatum":{ + "type":"structure", + "required":[ + "MetricName", + "Timestamp" + ], + "members":{ + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

    Internal only API.

    " + }, + "MetricName":{ + "shape":"String", + "documentation":"

    Internal only API.

    " + }, + "StatisticValues":{ + "shape":"StatisticSet", + "documentation":"

    Internal only API.

    " + }, + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

    Internal only API.

    " + }, + "Unit":{"shape":"Unit"}, + "Value":{ + "shape":"Double", + "documentation":"

    Internal only API.

    " + } + }, + "documentation":"

    Internal only API.

    " + }, + "ModuleLoggingConfiguration":{ + "type":"structure", + "members":{ + "CloudWatchLogGroupArn":{ + "shape":"CloudWatchLogGroupArn", + "documentation":"

    Provides the ARN for the CloudWatch group where the logs will be published.

    " + }, + "Enabled":{ + "shape":"LoggingEnabled", + "documentation":"

    Defines that the logging module is enabled.

    " + }, + "LogLevel":{ + "shape":"LoggingLevel", + "documentation":"

    Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO.

    " + } + }, + "documentation":"

    A JSON blob that provides configuration to use for logging with respect to the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, and WorkerLogs.

    " + }, + "ModuleLoggingConfigurationInput":{ + "type":"structure", + "required":[ + "Enabled", + "LogLevel" + ], + "members":{ + "Enabled":{ + "shape":"LoggingEnabled", + "documentation":"

    Defines that the logging module is enabled.

    " + }, + "LogLevel":{ + "shape":"LoggingLevel", + "documentation":"

    Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO.

    " + } + }, + "documentation":"

    A JSON blob that provides configuration to use for logging with respect to the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, and WorkerLogs.

    " + }, + "NetworkConfiguration":{ + "type":"structure", + "members":{ + "SecurityGroupIds":{ + "shape":"SecurityGroupList", + "documentation":"

    A JSON list of 1 or more security groups IDs by name, in the same VPC as the subnets.

    " + }, + "SubnetIds":{ + "shape":"SubnetList", + "documentation":"

    Provide a JSON list of 2 subnet IDs by name. These must be private subnets, in the same VPC, in two different availability zones.

    " + } + }, + "documentation":"

    Provide the security group and subnet IDs for the workers and scheduler.

    " + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":0 + }, + "PublishMetricsInput":{ + "type":"structure", + "required":[ + "EnvironmentName", + "MetricData" + ], + "members":{ + "EnvironmentName":{ + "shape":"EnvironmentName", + "documentation":"

    Publishes environment metric data to Amazon CloudWatch.

    ", + "location":"uri", + "locationName":"EnvironmentName" + }, + "MetricData":{ + "shape":"MetricData", + "documentation":"

    Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metrica.

    " + } + } + }, + "PublishMetricsOutput":{ + "type":"structure", + "members":{ + } + }, + "RelativePath":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    ResourceNotFoundException: The resource is not available.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "S3BucketArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:s3:::airflow-[a-z0-9.\\-]+$" + }, + "S3ObjectVersion":{ + "type":"string", + "max":1024, + "min":1 + }, + "SecurityGroupId":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^sg-[a-zA-Z0-9\\-._]+$" + }, + "SecurityGroupList":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":5, + "min":1 + }, + "StatisticSet":{ + "type":"structure", + "members":{ + "Maximum":{ + "shape":"Double", + "documentation":"

    Internal only API.

    " + }, + "Minimum":{ + "shape":"Double", + "documentation":"

    Internal only API.

    " + }, + "SampleCount":{ + "shape":"Integer", + "documentation":"

    Internal only API.

    " + }, + "Sum":{ + "shape":"Double", + "documentation":"

    Internal only API.

    " + } + }, + "documentation":"

    Internal only API.

    " + }, + "String":{"type":"string"}, + "SubnetId":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^subnet-[a-zA-Z0-9\\-._]+$" + }, + "SubnetList":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":2, + "min":2 + }, + "SyntheticCreateCliTokenResponseToken":{ + "type":"string", + "sensitive":true + }, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"}, + "sensitive":true + }, + "SyntheticCreateWebLoginTokenResponseToken":{ + "type":"string", + "sensitive":true + }, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"}, + "sensitive":true + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "documentation":"

    The tag resource ARN of the MWAA environments.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tag resource tag of the MWAA environments.

    " + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "Timestamp":{"type":"timestamp"}, + "Unit":{ + "type":"string", + "documentation":"

    Unit

    ", + "enum":[ + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Count", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second", + "None" + ] + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "tagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "documentation":"

    The tag resource ARN of the MWAA environments.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag resource key of the MWAA environments.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateCreatedAt":{"type":"timestamp"}, + "UpdateEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "AirflowConfigurationOptions":{ + "shape":"SyntheticUpdateEnvironmentInputAirflowConfigurationOptions", + "documentation":"

    The Airflow Configuration Options to update of your Amazon MWAA environment.

    " + }, + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

    The Airflow Version to update of your Amazon MWAA environment.

    " + }, + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

    The Dags folder S3 Path to update of your Amazon MWAA environment.

    " + }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

    The Environment Class to update of your Amazon MWAA environment.

    " + }, + "ExecutionRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Executio Role ARN to update of your Amazon MWAA environment.

    " + }, + "LoggingConfiguration":{ + "shape":"LoggingConfigurationInput", + "documentation":"

    The Logging Configuration to update of your Amazon MWAA environment.

    " + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

    The Maximum Workers to update of your Amazon MWAA environment.

    " + }, + "Name":{ + "shape":"EnvironmentName", + "documentation":"

    The name of your Amazon MWAA environment that you wish to update.

    ", + "location":"uri", + "locationName":"Name" + }, + "NetworkConfiguration":{ + "shape":"UpdateNetworkConfigurationInput", + "documentation":"

    The Network Configuration to update of your Amazon MWAA environment.

    " + }, + "PluginsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

    The Plugins.zip S3 Object Version to update of your Amazon MWAA environment.

    " + }, + "PluginsS3Path":{ + "shape":"RelativePath", + "documentation":"

    The Plugins.zip S3 Path to update of your Amazon MWAA environment.

    " + }, + "RequirementsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

    The Requirements.txt S3 ObjectV ersion to update of your Amazon MWAA environment.

    " + }, + "RequirementsS3Path":{ + "shape":"RelativePath", + "documentation":"

    The Requirements.txt S3 Path to update of your Amazon MWAA environment.

    " + }, + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

    The S3 Source Bucket ARN to update of your Amazon MWAA environment.

    " + }, + "WebserverAccessMode":{ + "shape":"WebserverAccessMode", + "documentation":"

    The Webserver Access Mode to update of your Amazon MWAA environment.

    " + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

    The Weekly Maintenance Window Start to update of your Amazon MWAA environment.

    " + } + } + }, + "UpdateEnvironmentOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"EnvironmentArn", + "documentation":"

    The ARN to update of your Amazon MWAA environment.

    " + } + } + }, + "UpdateError":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    Error code of update.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    Error message of update.

    " + } + }, + "documentation":"

    Error information of update, if applicable.

    " + }, + "UpdateNetworkConfigurationInput":{ + "type":"structure", + "required":["SecurityGroupIds"], + "members":{ + "SecurityGroupIds":{ + "shape":"SecurityGroupList", + "documentation":"

    Provide a JSON list of 1 or more security groups IDs by name, in the same VPC as the subnets.

    " + } + }, + "documentation":"

    Provide the security group and subnet IDs for the workers and scheduler.

    " + }, + "UpdateStatus":{ + "type":"string", + "enum":[ + "SUCCESS", + "PENDING", + "FAILED" + ] + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    ValidationException: The provided input is not valid.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "WebserverAccessMode":{ + "type":"string", + "enum":[ + "PRIVATE_ONLY", + "PUBLIC_ONLY" + ] + }, + "WebserverUrl":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^https://.+$" + }, + "WeeklyMaintenanceWindowStart":{ + "type":"string", + "max":9, + "min":1, + "pattern":"(MON|TUE|WED|THU|FRI|SAT|SUN):([01]\\d|2[0-3]):(00|30)" + } + }, + "documentation":"

    Amazon Managed Workflows for Apache Airflow

    This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

    " +} diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 4b7c699d253b..11aa10dc938c 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + networkfirewall + AWS Java SDK :: Services :: Network Firewall + The AWS Java SDK for Network Firewall module holds the client classes that are used for + communicating with Network Firewall. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.networkfirewall + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json b/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..9c4b18bbd77b --- /dev/null +++ b/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListFirewallPolicies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FirewallPolicies" + }, + "ListFirewalls": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Firewalls" + }, + "ListRuleGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RuleGroups" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Tags" + } + } +} diff --git a/services/networkfirewall/src/main/resources/codegen-resources/service-2.json b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..a3c890097acf --- /dev/null +++ b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2752 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-11-12", + "endpointPrefix":"network-firewall", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"Network Firewall", + "serviceFullName":"AWS Network Firewall", + "serviceId":"Network Firewall", + "signatureVersion":"v4", + "signingName":"network-firewall", + "targetPrefix":"NetworkFirewall_20201112", + "uid":"network-firewall-2020-11-12" + }, + "operations":{ + "AssociateFirewallPolicy":{ + "name":"AssociateFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateFirewallPolicyRequest"}, + "output":{"shape":"AssociateFirewallPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

    Associates a FirewallPolicy to a Firewall.

    A firewall policy defines how to monitor and manage your VPC network traffic, using a collection of inspection rule groups and other settings. Each firewall requires one firewall policy association, and you can use the same firewall policy for multiple firewalls.

    " + }, + "AssociateSubnets":{ + "name":"AssociateSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateSubnetsRequest"}, + "output":{"shape":"AssociateSubnetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

    Associates the specified subnets in the Amazon VPC to the firewall. You can specify one subnet for each of the Availability Zones that the VPC spans.

    This request creates an AWS Network Firewall firewall endpoint in each of the subnets. To enable the firewall's protections, you must also modify the VPC's route tables for each subnet's Availability Zone, to redirect the traffic that's coming into and going out of the zone through the firewall endpoint.

    " + }, + "CreateFirewall":{ + "name":"CreateFirewall", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFirewallRequest"}, + "output":{"shape":"CreateFirewallResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"}, + {"shape":"InsufficientCapacityException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

    Creates an AWS Network Firewall Firewall and accompanying FirewallStatus for a VPC.

    The firewall defines the configuration settings for an AWS Network Firewall firewall. The settings that you can define at creation include the firewall policy, the subnets in your VPC to use for the firewall endpoints, and any tags that are attached to the firewall AWS resource.

    After you create a firewall, you can provide additional settings, like the logging configuration.

    To update the settings for a firewall, you use the operations that apply to the settings themselves, for example UpdateLoggingConfiguration, AssociateSubnets, and UpdateFirewallDeleteProtection.

    To manage a firewall's tags, use the standard AWS resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

    To retrieve information about firewalls, use ListFirewalls and DescribeFirewall.

    " + }, + "CreateFirewallPolicy":{ + "name":"CreateFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFirewallPolicyRequest"}, + "output":{"shape":"CreateFirewallPolicyResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InsufficientCapacityException"} + ], + "documentation":"

    Creates the firewall policy for the firewall according to the specifications.

    An AWS Network Firewall firewall policy defines the behavior of a firewall, in a collection of stateless and stateful rule groups and other settings. You can use one firewall policy for multiple firewalls.

    " + }, + "CreateRuleGroup":{ + "name":"CreateRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRuleGroupRequest"}, + "output":{"shape":"CreateRuleGroupResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InsufficientCapacityException"} + ], + "documentation":"

    Creates the specified stateless or stateful rule group, which includes the rules for network traffic inspection, a capacity setting, and tags.

    You provide your rule group specification in your request using either RuleGroup or Rules.

    " + }, + "DeleteFirewall":{ + "name":"DeleteFirewall", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFirewallRequest"}, + "output":{"shape":"DeleteFirewallResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

    Deletes the specified Firewall and its FirewallStatus. This operation requires the firewall's DeleteProtection flag to be FALSE. You can't revert this operation.

    You can check whether a firewall is in use by reviewing the route tables for the Availability Zones where you have firewall subnet mappings. Retrieve the subnet mappings by calling DescribeFirewall. You define and update the route tables through Amazon VPC. As needed, update the route tables for the zones to remove the firewall endpoints. When the route tables no longer use the firewall endpoints, you can remove the firewall safely.

    To delete a firewall, remove the delete protection if you need to using UpdateFirewallDeleteProtection, then delete the firewall by calling DeleteFirewall.

    " + }, + "DeleteFirewallPolicy":{ + "name":"DeleteFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFirewallPolicyRequest"}, + "output":{"shape":"DeleteFirewallPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

    Deletes the specified FirewallPolicy.

    " + }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourcePolicyRequest"}, + "output":{"shape":"DeleteResourcePolicyResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes a resource policy that you created in a PutResourcePolicy request.

    " + }, + "DeleteRuleGroup":{ + "name":"DeleteRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleGroupRequest"}, + "output":{"shape":"DeleteRuleGroupResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

    Deletes the specified RuleGroup.

    " + }, + "DescribeFirewall":{ + "name":"DescribeFirewall", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFirewallRequest"}, + "output":{"shape":"DescribeFirewallResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the data objects for the specified firewall.

    " + }, + "DescribeFirewallPolicy":{ + "name":"DescribeFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFirewallPolicyRequest"}, + "output":{"shape":"DescribeFirewallPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

    Returns the data objects for the specified firewall policy.

    " + }, + "DescribeLoggingConfiguration":{ + "name":"DescribeLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoggingConfigurationRequest"}, + "output":{"shape":"DescribeLoggingConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns the logging configuration for the specified firewall.

    " + }, + "DescribeResourcePolicy":{ + "name":"DescribeResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourcePolicyRequest"}, + "output":{"shape":"DescribeResourcePolicyResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a resource policy that you created in a PutResourcePolicy request.

    " + }, + "DescribeRuleGroup":{ + "name":"DescribeRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRuleGroupRequest"}, + "output":{"shape":"DescribeRuleGroupResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

    Returns the data objects for the specified rule group.

    " + }, + "DisassociateSubnets":{ + "name":"DisassociateSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateSubnetsRequest"}, + "output":{"shape":"DisassociateSubnetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

    Removes the specified subnet associations from the firewall. This removes the firewall endpoints from the subnets and removes any network filtering protections that the endpoints were providing.

    " + }, + "ListFirewallPolicies":{ + "name":"ListFirewallPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFirewallPoliciesRequest"}, + "output":{"shape":"ListFirewallPoliciesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

    Retrieves the metadata for the firewall policies that you have defined. Depending on your setting for max results and the number of firewall policies, a single call might not return the full list.

    " + }, + "ListFirewalls":{ + "name":"ListFirewalls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFirewallsRequest"}, + "output":{"shape":"ListFirewallsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves the metadata for the firewalls that you have defined. If you provide VPC identifiers in your request, this returns only the firewalls for those VPCs.

    Depending on your setting for max results and the number of firewalls, a single call might not return the full list.

    " + }, + "ListRuleGroups":{ + "name":"ListRuleGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleGroupsRequest"}, + "output":{"shape":"ListRuleGroupsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

    Retrieves the metadata for the rule groups that you have defined. Depending on your setting for max results and the number of rule groups, a single call might not return the full list.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves the tags associated with the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

    You can tag the AWS resources that you manage through AWS Network Firewall: firewalls, firewall policies, and rule groups.

    " + }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutResourcePolicyRequest"}, + "output":{"shape":"PutResourcePolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidResourcePolicyException"} + ], + "documentation":"

    Creates or updates an AWS Identity and Access Management policy for your rule group or firewall policy. Use this to share rule groups and firewall policies between accounts. This operation works in conjunction with the AWS Resource Access Manager (RAM) service to manage resource sharing for Network Firewall.

    Use this operation to create or update a resource policy for your rule group or firewall policy. In the policy, you specify the accounts that you want to share the resource with and the operations that you want the accounts to be able to perform.

    When you add an account in the resource policy, you then run the following Resource Access Manager (RAM) operations to access and accept the shared rule group or firewall policy.

    For additional information about resource sharing using RAM, see AWS Resource Access Manager User Guide.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

    Adds the specified tags to the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

    You can tag the AWS resources that you manage through AWS Network Firewall: firewalls, firewall policies, and rule groups.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

    Removes the tags with the specified keys from the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

    You can manage tags for the AWS resources that you manage through AWS Network Firewall: firewalls, firewall policies, and rule groups.

    " + }, + "UpdateFirewallDeleteProtection":{ + "name":"UpdateFirewallDeleteProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFirewallDeleteProtectionRequest"}, + "output":{"shape":"UpdateFirewallDeleteProtectionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"ResourceOwnerCheckException"} + ], + "documentation":"

    Modifies the flag, DeleteProtection, which indicates whether it is possible to delete the firewall. If the flag is set to TRUE, the firewall is protected against deletion. This setting helps protect against accidentally deleting a firewall that's in use.

    " + }, + "UpdateFirewallDescription":{ + "name":"UpdateFirewallDescription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFirewallDescriptionRequest"}, + "output":{"shape":"UpdateFirewallDescriptionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"

    Modifies the description for the specified firewall. Use the description to help you identify the firewall when you're working with it.

    " + }, + "UpdateFirewallPolicy":{ + "name":"UpdateFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFirewallPolicyRequest"}, + "output":{"shape":"UpdateFirewallPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"

    Updates the properties of the specified firewall policy.

    " + }, + "UpdateFirewallPolicyChangeProtection":{ + "name":"UpdateFirewallPolicyChangeProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFirewallPolicyChangeProtectionRequest"}, + "output":{"shape":"UpdateFirewallPolicyChangeProtectionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"ResourceOwnerCheckException"} + ], + "documentation":"

    " + }, + "UpdateLoggingConfiguration":{ + "name":"UpdateLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLoggingConfigurationRequest"}, + "output":{"shape":"UpdateLoggingConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"LogDestinationPermissionException"} + ], + "documentation":"

    Sets the logging configuration for the specified firewall.

    To change the logging configuration, retrieve the LoggingConfiguration by calling DescribeLoggingConfiguration, then change it and provide the modified object to this update call. You must change the logging configuration one LogDestinationConfig at a time inside the retrieved LoggingConfiguration object.

    You can perform only one of the following actions in any call to UpdateLoggingConfiguration:

    • Create a new log destination object by adding a single LogDestinationConfig array element to LogDestinationConfigs.

    • Delete a log destination object by removing a single LogDestinationConfig array element from LogDestinationConfigs.

    • Change the LogDestination setting in a single LogDestinationConfig array element.

    You can't change the LogDestinationType or LogType in a LogDestinationConfig. To change these settings, delete the existing LogDestinationConfig object and create a new one, using two separate calls to this update operation.

    " + }, + "UpdateRuleGroup":{ + "name":"UpdateRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuleGroupRequest"}, + "output":{"shape":"UpdateRuleGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"

    Updates the rule settings for the specified rule group. You use a rule group by reference in one or more firewall policies. When you modify a rule group, you modify all firewall policies that use the rule group.

    To update a rule group, first call DescribeRuleGroup to retrieve the current RuleGroup object, update the object as needed, and then provide the updated object to this call.

    " + }, + "UpdateSubnetChangeProtection":{ + "name":"UpdateSubnetChangeProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSubnetChangeProtectionRequest"}, + "output":{"shape":"UpdateSubnetChangeProtectionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"ResourceOwnerCheckException"} + ], + "documentation":"

    " + } + }, + "shapes":{ + "ActionDefinition":{ + "type":"structure", + "members":{ + "PublishMetricAction":{ + "shape":"PublishMetricAction", + "documentation":"

    Stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. This setting defines a CloudWatch dimension value to be published.

    You can pair this custom action with any of the standard stateless rule actions. For example, you could pair this in a rule action with the standard action that forwards the packet for stateful inspection. Then, when a packet matches the rule, Network Firewall publishes metrics for the packet and forwards it.

    " + } + }, + "documentation":"

    A custom action to use in stateless rule actions settings. This is used in CustomAction.

    " + }, + "ActionName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9]+$" + }, + "Address":{ + "type":"structure", + "required":["AddressDefinition"], + "members":{ + "AddressDefinition":{ + "shape":"AddressDefinition", + "documentation":"

    Specify an IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation. Network Firewall supports all address ranges for IPv4.

    Examples:

    • To configure Network Firewall to inspect for the IP address 192.0.2.44, specify 192.0.2.44/32.

    • To configure Network Firewall to inspect for IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

    For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    " + } + }, + "documentation":"

    A single IP address specification. This is used in the MatchAttributes source and destination specifications.

    " + }, + "AddressDefinition":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([a-fA-F\\d:\\.]+/\\d{1,3})$" + }, + "Addresses":{ + "type":"list", + "member":{"shape":"Address"} + }, + "AssociateFirewallPolicyRequest":{ + "type":"structure", + "required":["FirewallPolicyArn"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall policy.

    " + } + } + }, + "AssociateFirewallPolicyResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall policy.

    " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + } + } + }, + "AssociateSubnetsRequest":{ + "type":"structure", + "required":["SubnetMappings"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

    The IDs of the subnets that you want to associate with the firewall.

    " + } + } + }, + "AssociateSubnetsResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

    The IDs of the subnets that are associated with the firewall.

    " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + } + } + }, + "Attachment":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"AzSubnet", + "documentation":"

    The unique identifier of the subnet that you've specified to be used for a firewall endpoint.

    " + }, + "EndpointId":{ + "shape":"EndpointId", + "documentation":"

    The identifier of the firewall endpoint that Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint.

    " + }, + "Status":{ + "shape":"AttachmentStatus", + "documentation":"

    The current status of the firewall endpoint in the subnet. This value reflects both the instantiation of the endpoint in the VPC subnet and the sync states that are reported in the Config settings. When this value is READY, the endpoint is available and configured properly to handle network traffic. When the endpoint isn't available for traffic, this value will reflect its state, for example CREATING, DELETING, or FAILED.

    " + } + }, + "documentation":"

    The configuration and status for a single subnet that you've specified for use by the AWS Network Firewall firewall. This is part of the FirewallStatus.

    " + }, + "AttachmentStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "SCALING", + "READY" + ] + }, + "AvailabilityZone":{"type":"string"}, + "AzSubnet":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^subnet-[0-9a-f]+$" + }, + "AzSubnets":{ + "type":"list", + "member":{"shape":"AzSubnet"} + }, + "Boolean":{"type":"boolean"}, + "CollectionMember_String":{"type":"string"}, + "ConfigurationSyncState":{ + "type":"string", + "enum":[ + "PENDING", + "IN_SYNC" + ] + }, + "CreateFirewallPolicyRequest":{ + "type":"structure", + "required":[ + "FirewallPolicyName", + "FirewallPolicy" + ], + "members":{ + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

    " + }, + "FirewallPolicy":{ + "shape":"FirewallPolicy", + "documentation":"

    The rule groups and policy actions to use in the firewall policy.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the firewall policy.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The key:value pairs to associate with the resource.

    " + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request.

    If set to TRUE, Network Firewall checks whether the request can run successfully, but doesn't actually make the requested changes. The call returns the value that the request would return if you ran it with dry run set to FALSE, but doesn't make additions or changes to your resources. This option allows you to make sure that you have the required permissions to run the request and that your request parameters are valid.

    If set to FALSE, Network Firewall makes the requested changes to your resources.

    " + } + } + }, + "CreateFirewallPolicyResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "FirewallPolicyResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request.

    To make changes to the policy, you provide the token in your request. Network Firewall uses the token to ensure that the policy hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall policy again to get a current copy of it with current token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallPolicyResponse":{ + "shape":"FirewallPolicyResponse", + "documentation":"

    The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

    " + } + } + }, + "CreateFirewallRequest":{ + "type":"structure", + "required":[ + "FirewallName", + "FirewallPolicyArn", + "VpcId", + "SubnetMappings" + ], + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the FirewallPolicy that you want to use for the firewall.

    " + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

    The unique identifier of the VPC where Network Firewall should create the firewall.

    You can't change this setting after you create the firewall.

    " + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

    The public subnets to use for your Network Firewall firewalls. Each subnet must belong to a different Availability Zone in the VPC. Network Firewall creates a firewall endpoint in each subnet.

    " + }, + "DeleteProtection":{ + "shape":"Boolean", + "documentation":"

    A flag indicating whether it is possible to delete the firewall. A setting of TRUE indicates that the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. When you create a firewall, the operation initializes this flag to TRUE.

    " + }, + "SubnetChangeProtection":{ + "shape":"Boolean", + "documentation":"

    A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

    " + }, + "FirewallPolicyChangeProtection":{ + "shape":"Boolean", + "documentation":"

    A setting indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the firewall.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The key:value pairs to associate with the resource.

    " + } + } + }, + "CreateFirewallResponse":{ + "type":"structure", + "members":{ + "Firewall":{ + "shape":"Firewall", + "documentation":"

    The configuration settings for the firewall. These settings include the firewall policy and the subnets in your VPC to use for the firewall endpoints.

    " + }, + "FirewallStatus":{ + "shape":"FirewallStatus", + "documentation":"

    Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

    " + } + } + }, + "CreateRuleGroupRequest":{ + "type":"structure", + "required":[ + "RuleGroupName", + "Type", + "Capacity" + ], + "members":{ + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the rule group. You can't change the name of a rule group after you create it.

    " + }, + "RuleGroup":{ + "shape":"RuleGroup", + "documentation":"

    An object that defines the rule group rules.

    You must provide either this rule group setting or a Rules setting, but not both.

    " + }, + "Rules":{ + "shape":"RulesString", + "documentation":"

    The name of a file containing stateful rule group rules specifications in Suricata flat format, with one rule per line. Use this to import your existing Suricata compatible rule groups.

    You must provide either this rules setting or a populated RuleGroup setting, but not both.

    You can provide your rule group specification in a file through this setting when you create or update your rule group. The call response returns a RuleGroup object that Network Firewall has populated from your file. Network Firewall uses the file contents to populate the rule group rules, but does not maintain a reference to the file or use the file in any way after performing the create or update. If you call DescribeRuleGroup to retrieve the rule group, Network Firewall returns rules settings inside a RuleGroup object.

    " + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

    Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the rule group.

    " + }, + "Capacity":{ + "shape":"RuleCapacity", + "documentation":"

    The maximum operating resources that this rule group can use. Rule group capacity is fixed at creation. When you update a rule group, you are limited to this capacity. When you reference a rule group from a firewall policy, Network Firewall reserves this capacity for the rule group.

    You can retrieve the capacity that would be required for a rule group before you create the rule group by calling CreateRuleGroup with DryRun set to TRUE.

    You can't change or exceed this capacity when you update the rule group, so leave room for your rule group to grow.

    Capacity for a stateless rule group

    For a stateless rule group, the capacity required is the sum of the capacity requirements of the individual rules that you expect to have in the rule group.

    To calculate the capacity requirement of a single rule, multiply the capacity requirement values of each of the rule's match settings:

    • A match setting with no criteria specified has a value of 1.

    • A match setting with Any specified has a value of 1.

    • All other match settings have a value equal to the number of elements provided in the setting. For example, a protocol setting [\"UDP\"] and a source setting [\"10.0.0.0/24\"] each have a value of 1. A protocol setting [\"UDP\",\"TCP\"] has a value of 2. A source setting [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"] has a value of 3.

    A rule with no criteria specified in any of its match settings has a capacity requirement of 1. A rule with protocol setting [\"UDP\",\"TCP\"], source setting [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"], and a single specification or no specification for each of the other match settings has a capacity requirement of 6.

    Capacity for a stateful rule group

    For a stateful rule group, the minimum capacity required is the number of individual rules that you expect to have in the rule group.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The key:value pairs to associate with the resource.

    " + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request.

    If set to TRUE, Network Firewall checks whether the request can run successfully, but doesn't actually make the requested changes. The call returns the value that the request would return if you ran it with dry run set to FALSE, but doesn't make additions or changes to your resources. This option allows you to make sure that you have the required permissions to run the request and that your request parameters are valid.

    If set to FALSE, Network Firewall makes the requested changes to your resources.

    " + } + } + }, + "CreateRuleGroupResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "RuleGroupResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    A token used for optimistic locking. Network Firewall returns a token to your requests that access the rule group. The token marks the state of the rule group resource at the time of the request.

    To make changes to the rule group, you provide the token in your request. Network Firewall uses the token to ensure that the rule group hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the rule group again to get a current copy of it with a current token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "RuleGroupResponse":{ + "shape":"RuleGroupResponse", + "documentation":"

    The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

    " + } + } + }, + "CustomAction":{ + "type":"structure", + "required":[ + "ActionName", + "ActionDefinition" + ], + "members":{ + "ActionName":{ + "shape":"ActionName", + "documentation":"

    The descriptive name of the custom action. You can't change the name of a custom action after you create it.

    " + }, + "ActionDefinition":{ + "shape":"ActionDefinition", + "documentation":"

    The custom action associated with the action name.

    " + } + }, + "documentation":"

    An optional, non-standard action to use for stateless packet handling. You can define this in addition to the standard action that you must specify.

    You define and name the custom actions that you want to be able to use, and then you reference them by name in your actions settings.

    You can use custom actions in the following places:

    • In a rule group's StatelessRulesAndCustomActions specification. The custom actions are available for use by name inside the StatelessRulesAndCustomActions where you define them. You can use them for your stateless rule actions to specify what to do with a packet that matches the rule's match attributes.

    • In a FirewallPolicy specification, in StatelessCustomActions. The custom actions are available for use inside the policy where you define them. You can use them for the policy's default stateless actions settings to specify what to do with packets that don't match any of the policy's stateless rules.

    " + }, + "CustomActions":{ + "type":"list", + "member":{"shape":"CustomAction"} + }, + "DeleteFirewallPolicyRequest":{ + "type":"structure", + "members":{ + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall policy.

    You must specify the ARN or the name, and you can specify both.

    " + } + } + }, + "DeleteFirewallPolicyResponse":{ + "type":"structure", + "required":["FirewallPolicyResponse"], + "members":{ + "FirewallPolicyResponse":{ + "shape":"FirewallPolicyResponse", + "documentation":"

    The object containing the definition of the FirewallPolicyResponse that you asked to delete.

    " + } + } + }, + "DeleteFirewallRequest":{ + "type":"structure", + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + } + } + }, + "DeleteFirewallResponse":{ + "type":"structure", + "members":{ + "Firewall":{"shape":"Firewall"}, + "FirewallStatus":{"shape":"FirewallStatus"} + } + }, + "DeleteResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group or firewall policy whose resource policy you want to delete.

    " + } + } + }, + "DeleteResourcePolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteRuleGroupRequest":{ + "type":"structure", + "members":{ + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the rule group. You can't change the name of a rule group after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

    Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

    This setting is required for requests that do not include the RuleGroupARN.

    " + } + } + }, + "DeleteRuleGroupResponse":{ + "type":"structure", + "required":["RuleGroupResponse"], + "members":{ + "RuleGroupResponse":{ + "shape":"RuleGroupResponse", + "documentation":"

    The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

    " + } + } + }, + "DescribeFirewallPolicyRequest":{ + "type":"structure", + "members":{ + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall policy.

    You must specify the ARN or the name, and you can specify both.

    " + } + } + }, + "DescribeFirewallPolicyResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "FirewallPolicyResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request.

    To make changes to the policy, you provide the token in your request. Network Firewall uses the token to ensure that the policy hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall policy again to get a current copy of it with current token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallPolicyResponse":{ + "shape":"FirewallPolicyResponse", + "documentation":"

    The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

    " + }, + "FirewallPolicy":{ + "shape":"FirewallPolicy", + "documentation":"

    The policy for the specified firewall policy.

    " + } + } + }, + "DescribeFirewallRequest":{ + "type":"structure", + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + } + } + }, + "DescribeFirewallResponse":{ + "type":"structure", + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "Firewall":{ + "shape":"Firewall", + "documentation":"

    The configuration settings for the firewall. These settings include the firewall policy and the subnets in your VPC to use for the firewall endpoints.

    " + }, + "FirewallStatus":{ + "shape":"FirewallStatus", + "documentation":"

    Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

    " + } + } + }, + "DescribeLoggingConfigurationRequest":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + } + } + }, + "DescribeLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "LoggingConfiguration":{"shape":"LoggingConfiguration"} + } + }, + "DescribeResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group or firewall policy whose resource policy you want to retrieve.

    " + } + } + }, + "DescribeResourcePolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"PolicyString", + "documentation":"

    The AWS Identity and Access Management policy for the resource.

    " + } + } + }, + "DescribeRuleGroupRequest":{ + "type":"structure", + "members":{ + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the rule group. You can't change the name of a rule group after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

    Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

    This setting is required for requests that do not include the RuleGroupARN.

    " + } + } + }, + "DescribeRuleGroupResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "RuleGroupResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    A token used for optimistic locking. Network Firewall returns a token to your requests that access the rule group. The token marks the state of the rule group resource at the time of the request.

    To make changes to the rule group, you provide the token in your request. Network Firewall uses the token to ensure that the rule group hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the rule group again to get a current copy of it with a current token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "RuleGroup":{ + "shape":"RuleGroup", + "documentation":"

    The object that defines the rules in a rule group. This, along with RuleGroupResponse, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

    AWS Network Firewall uses a rule group to inspect and control network traffic. You define stateless rule groups to inspect individual packets and you define stateful rule groups to inspect packets in the context of their traffic flow.

    To use a rule group, you include it by reference in an Network Firewall firewall policy, then you use the policy in a firewall. You can reference a rule group from more than one firewall policy, and you can use a firewall policy in more than one firewall.

    " + }, + "RuleGroupResponse":{ + "shape":"RuleGroupResponse", + "documentation":"

    The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

    " + } + } + }, + "Description":{ + "type":"string", + "max":512, + "pattern":"^.*$" + }, + "Destination":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.*$" + }, + "Dimension":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{ + "shape":"DimensionValue", + "documentation":"

    The value to use in the custom metric dimension.

    " + } + }, + "documentation":"

    The value to use in an Amazon CloudWatch custom metric dimension. This is used in the PublishMetrics CustomAction. A CloudWatch custom metric dimension is a name/value pair that's part of the identity of a metric.

    AWS Network Firewall sets the dimension name to CustomAction and you provide the dimension value.

    For more information about CloudWatch custom metric dimensions, see Publishing Custom Metrics in the Amazon CloudWatch User Guide.

    " + }, + "DimensionValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-_ ]+$" + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"}, + "max":1, + "min":1 + }, + "DisassociateSubnetsRequest":{ + "type":"structure", + "required":["SubnetIds"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "SubnetIds":{ + "shape":"AzSubnets", + "documentation":"

    The unique identifiers for the subnets that you want to disassociate.

    " + } + } + }, + "DisassociateSubnetsResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

    The IDs of the subnets that are associated with the firewall.

    " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + } + } + }, + "EndpointId":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "Firewall":{ + "type":"structure", + "required":[ + "FirewallPolicyArn", + "VpcId", + "SubnetMappings", + "FirewallId" + ], + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall policy.

    The relationship of firewall to firewall policy is many to one. Each firewall requires one firewall policy association, and you can use the same firewall policy for multiple firewalls.

    " + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

    The unique identifier of the VPC where the firewall is in use.

    " + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

    The public subnets that Network Firewall is using for the firewall. Each subnet must belong to a different Availability Zone.

    " + }, + "DeleteProtection":{ + "shape":"Boolean", + "documentation":"

    A flag indicating whether it is possible to delete the firewall. A setting of TRUE indicates that the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. When you create a firewall, the operation initializes this flag to TRUE.

    " + }, + "SubnetChangeProtection":{ + "shape":"Boolean", + "documentation":"

    A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

    " + }, + "FirewallPolicyChangeProtection":{ + "shape":"Boolean", + "documentation":"

    A setting indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the firewall.

    " + }, + "FirewallId":{ + "shape":"ResourceId", + "documentation":"

    The unique identifier for the firewall.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    " + } + }, + "documentation":"

    The firewall defines the configuration settings for an AWS Network Firewall firewall. These settings include the firewall policy, the subnets in your VPC to use for the firewall endpoints, and any tags that are attached to the firewall AWS resource.

    The status of the firewall, for example whether it's ready to filter network traffic, is provided in the corresponding FirewallStatus. You can retrieve both objects by calling DescribeFirewall.

    " + }, + "FirewallMetadata":{ + "type":"structure", + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + } + }, + "documentation":"

    High-level information about a firewall, returned by operations like create and describe. You can use the information provided in the metadata to retrieve and manage a firewall.

    " + }, + "FirewallPolicies":{ + "type":"list", + "member":{"shape":"FirewallPolicyMetadata"} + }, + "FirewallPolicy":{ + "type":"structure", + "required":[ + "StatelessDefaultActions", + "StatelessFragmentDefaultActions" + ], + "members":{ + "StatelessRuleGroupReferences":{ + "shape":"StatelessRuleGroupReferences", + "documentation":"

    References to the stateless rule groups that are used in the policy. These define the matching criteria in stateless rules.

    " + }, + "StatelessDefaultActions":{ + "shape":"StatelessActions", + "documentation":"

    The actions to take on a packet if it doesn't match any of the stateless rules in the policy. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe.

    You must specify one of the standard actions: aws:pass, aws:drop, or aws:forward_to_sfe. In addition, you can specify custom actions that are compatible with your standard section choice.

    For example, you could specify [\"aws:pass\"] or you could specify [\"aws:pass\", “customActionName”]. For information about compatibility, see the custom action descriptions under CustomAction.

    " + }, + "StatelessFragmentDefaultActions":{ + "shape":"StatelessActions", + "documentation":"

    The actions to take on a fragmented packet if it doesn't match any of the stateless rules in the policy. If you want non-matching fragmented packets to be forwarded for stateful inspection, specify aws:forward_to_sfe.

    You must specify one of the standard actions: aws:pass, aws:drop, or aws:forward_to_sfe. In addition, you can specify custom actions that are compatible with your standard section choice.

    For example, you could specify [\"aws:pass\"] or you could specify [\"aws:pass\", “customActionName”]. For information about compatibility, see the custom action descriptions under CustomAction.

    " + }, + "StatelessCustomActions":{ + "shape":"CustomActions", + "documentation":"

    The custom action definitions that are available for use in the firewall policy's StatelessDefaultActions setting. You name each custom action that you define, and then you can use it by name in your default actions specifications.

    " + }, + "StatefulRuleGroupReferences":{ + "shape":"StatefulRuleGroupReferences", + "documentation":"

    References to the stateless rule groups that are used in the policy. These define the inspection criteria in stateful rules.

    " + } + }, + "documentation":"

    The firewall policy defines the behavior of a firewall using a collection of stateless and stateful rule groups and other settings. You can use one firewall policy for multiple firewalls.

    This, along with FirewallPolicyResponse, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

    " + }, + "FirewallPolicyMetadata":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

    " + }, + "Arn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall policy.

    " + } + }, + "documentation":"

    High-level information about a firewall policy, returned by operations like create and describe. You can use the information provided in the metadata to retrieve and manage a firewall policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

    " + }, + "FirewallPolicyResponse":{ + "type":"structure", + "required":[ + "FirewallPolicyName", + "FirewallPolicyArn", + "FirewallPolicyId" + ], + "members":{ + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

    " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall policy.

    If this response is for a create request that had DryRun set to TRUE, then this ARN is a placeholder that isn't attached to a valid resource.

    " + }, + "FirewallPolicyId":{ + "shape":"ResourceId", + "documentation":"

    The unique identifier for the firewall policy.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the firewall policy.

    " + }, + "FirewallPolicyStatus":{ + "shape":"ResourceStatus", + "documentation":"

    The current status of the firewall policy. You can retrieve this for a firewall policy by calling DescribeFirewallPolicy and providing the firewall policy's name or ARN.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The key:value pairs to associate with the resource.

    " + } + }, + "documentation":"

    The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

    " + }, + "FirewallStatus":{ + "type":"structure", + "required":[ + "Status", + "ConfigurationSyncStateSummary" + ], + "members":{ + "Status":{ + "shape":"FirewallStatusValue", + "documentation":"

    The readiness of the configured firewall to handle network traffic across all of the Availability Zones where you've configured it. This setting is READY only when the ConfigurationSyncStateSummary value is IN_SYNC and the Attachment Status values for all of the configured subnets are READY.

    " + }, + "ConfigurationSyncStateSummary":{ + "shape":"ConfigurationSyncState", + "documentation":"

    The configuration sync state for the firewall. This summarizes the sync states reported in the Config settings for all of the Availability Zones where you have configured the firewall.

    When you create a firewall or update its configuration, for example by adding a rule group to its firewall policy, Network Firewall distributes the configuration changes to all zones where the firewall is in use. This summary indicates whether the configuration changes have been applied everywhere.

    This status must be IN_SYNC for the firewall to be ready for use, but it doesn't indicate that the firewall is ready. The Status setting indicates firewall readiness.

    " + }, + "SyncStates":{ + "shape":"SyncStates", + "documentation":"

    The subnets that you've configured for use by the Network Firewall firewall. This contains one array element per Availability Zone where you've configured a subnet. These objects provide details of the information that is summarized in the ConfigurationSyncStateSummary and Status, broken down by zone and configuration object.

    " + } + }, + "documentation":"

    Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

    " + }, + "FirewallStatusValue":{ + "type":"string", + "enum":[ + "PROVISIONING", + "DELETING", + "READY" + ] + }, + "Firewalls":{ + "type":"list", + "member":{"shape":"FirewallMetadata"} + }, + "Flags":{ + "type":"list", + "member":{"shape":"TCPFlag"} + }, + "GeneratedRulesType":{ + "type":"string", + "enum":[ + "ALLOWLIST", + "DENYLIST" + ] + }, + "HashMapKey":{ + "type":"string", + "max":50, + "min":3, + "pattern":"^[0-9A-Za-z.\\-_@\\/]+$" + }, + "HashMapValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\s\\S]*$" + }, + "Header":{ + "type":"structure", + "required":[ + "Protocol", + "Source", + "SourcePort", + "Direction", + "Destination", + "DestinationPort" + ], + "members":{ + "Protocol":{ + "shape":"StatefulRuleProtocol", + "documentation":"

    The protocol to inspect for. To match with any protocol, specify ANY.

    " + }, + "Source":{ + "shape":"Source", + "documentation":"

    The source IP address or address range to inspect for, in CIDR notation. To match with any address, specify ANY.

    Specify an IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation. Network Firewall supports all address ranges for IPv4.

    Examples:

    • To configure Network Firewall to inspect for the IP address 192.0.2.44, specify 192.0.2.44/32.

    • To configure Network Firewall to inspect for IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

    For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    " + }, + "SourcePort":{ + "shape":"Port", + "documentation":"

    The source port to inspect for. You can specify an individual port, for example 1994 and you can specify a port range, for example 1990-1994. To match with any port, specify ANY.

    " + }, + "Direction":{ + "shape":"StatefulRuleDirection", + "documentation":"

    The direction of traffic flow to inspect. If set to ANY, the inspection matches bidirectional traffic, both from the source to the destination and from the destination to the source. If set to FORWARD, the inspection only matches traffic going from the source to the destination.

    " + }, + "Destination":{ + "shape":"Destination", + "documentation":"

    The destination IP address or address range to inspect for, in CIDR notation. To match with any address, specify ANY.

    Specify an IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation. Network Firewall supports all address ranges for IPv4.

    Examples:

    • To configure Network Firewall to inspect for the IP address 192.0.2.44, specify 192.0.2.44/32.

    • To configure Network Firewall to inspect for IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

    For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    " + }, + "DestinationPort":{ + "shape":"Port", + "documentation":"

    The destination port to inspect for. You can specify an individual port, for example 1994 and you can specify a port range, for example 1990-1994. To match with any port, specify ANY.

    " + } + }, + "documentation":"

    The 5-tuple criteria for AWS Network Firewall to use to inspect packet headers in stateful traffic flow inspection. Traffic flows that match the criteria are a match for the corresponding StatefulRule.

    " + }, + "IPSet":{ + "type":"structure", + "required":["Definition"], + "members":{ + "Definition":{ + "shape":"VariableDefinitionList", + "documentation":"

    The list of IP addresses and address ranges, in CIDR notation.

    " + } + }, + "documentation":"

    A list of IP addresses and address ranges, in CIDR notation. This is part of a RuleVariables.

    " + }, + "IPSets":{ + "type":"map", + "key":{"shape":"RuleVariableName"}, + "value":{"shape":"IPSet"} + }, + "InsufficientCapacityException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS doesn't currently have enough available capacity to fulfill your request. Try your request later.

    ", + "exception":true, + "fault":true + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Your request is valid, but Network Firewall couldn’t perform the operation because of a system problem. Retry your request.

    ", + "exception":true, + "fault":true + }, + "InvalidOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The operation failed because it's not valid. For example, you might have tried to delete a rule group or firewall policy that's in use.

    ", + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The operation failed because of a problem with your request. Examples include:

    • You specified an unsupported parameter name or value.

    • You tried to update a property with a value that isn't among the available types.

    • Your request references an ARN that is malformed, or corresponds to a resource that isn't valid in the context of the request.

    ", + "exception":true + }, + "InvalidResourcePolicyException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "InvalidTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The token you provided is stale or isn't valid for the operation.

    ", + "exception":true + }, + "Keyword":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Unable to perform the operation because doing so would violate a limit setting.

    ", + "exception":true + }, + "ListFirewallPoliciesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

    " + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

    The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListFirewallPoliciesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

    " + }, + "FirewallPolicies":{ + "shape":"FirewallPolicies", + "documentation":"

    The metadata for the firewall policies. Depending on your setting for max results and the number of firewall policies that you have, this might not be the full list.

    " + } + } + }, + "ListFirewallsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

    " + }, + "VpcIds":{ + "shape":"VpcIds", + "documentation":"

    The unique identifiers of the VPCs that you want Network Firewall to retrieve the firewalls for. Leave this blank to retrieve all firewalls that you have defined.

    " + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

    The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListFirewallsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

    " + }, + "Firewalls":{ + "shape":"Firewalls", + "documentation":"

    The firewall metadata objects for the VPCs that you specified. Depending on your setting for max results and the number of firewalls you have, a single call might not be the full list.

    " + } + } + }, + "ListRuleGroupsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

    " + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

    The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListRuleGroupsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

    " + }, + "RuleGroups":{ + "shape":"RuleGroups", + "documentation":"

    The rule group metadata objects that you've defined. Depending on your setting for max results and the number of rule groups, this might not be the full list.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

    " + }, + "MaxResults":{ + "shape":"TagsPaginationMaxResults", + "documentation":"

    The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

    " + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags that are associated with the resource.

    " + } + } + }, + "LogDestinationConfig":{ + "type":"structure", + "required":[ + "LogType", + "LogDestinationType", + "LogDestination" + ], + "members":{ + "LogType":{ + "shape":"LogType", + "documentation":"

    The type of log to send. Alert logs report traffic that matches a StatefulRule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs.

    " + }, + "LogDestinationType":{ + "shape":"LogDestinationType", + "documentation":"

    The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.

    " + }, + "LogDestination":{ + "shape":"LogDestinationMap", + "documentation":"

    The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.

    • For an Amazon S3 bucket, provide the name of the bucket, with key bucketName, and optionally provide a prefix, with key prefix. The following example specifies an Amazon S3 bucket named DOC-EXAMPLE-BUCKET and the prefix alerts:

      \"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }

    • For a CloudWatch log group, provide the name of the CloudWatch log group, with key logGroup. The following example specifies a log group named alert-log-group:

      \"LogDestination\": { \"logGroup\": \"alert-log-group\" }

    • For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key deliveryStream. The following example specifies a delivery stream named alert-delivery-stream:

      \"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }

    " + } + }, + "documentation":"

    Defines where AWS Network Firewall sends logs for the firewall for one log type. This is used in LoggingConfiguration. You can send each type of log to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.

    Network Firewall generates logs for stateful rule groups. You can save alert and flow log types. The stateful rules engine records flow logs for all network traffic that it receives. It records alert logs for traffic that matches stateful rules that have the rule action set to DROP or ALERT.

    " + }, + "LogDestinationConfigs":{ + "type":"list", + "member":{"shape":"LogDestinationConfig"} + }, + "LogDestinationMap":{ + "type":"map", + "key":{"shape":"HashMapKey"}, + "value":{"shape":"HashMapValue"} + }, + "LogDestinationPermissionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Unable to send logs to a configured logging destination.

    ", + "exception":true + }, + "LogDestinationType":{ + "type":"string", + "enum":[ + "S3", + "CloudWatchLogs", + "KinesisDataFirehose" + ], + "max":30, + "min":2, + "pattern":"[0-9A-Za-z]+" + }, + "LogType":{ + "type":"string", + "enum":[ + "ALERT", + "FLOW" + ] + }, + "LoggingConfiguration":{ + "type":"structure", + "required":["LogDestinationConfigs"], + "members":{ + "LogDestinationConfigs":{ + "shape":"LogDestinationConfigs", + "documentation":"

    Defines the logging destinations for the logs for a firewall. Network Firewall generates logs for stateful rule groups.

    " + } + }, + "documentation":"

    Defines how AWS Network Firewall performs logging for a Firewall.

    " + }, + "MatchAttributes":{ + "type":"structure", + "members":{ + "Sources":{ + "shape":"Addresses", + "documentation":"

    The source IP addresses and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address.

    " + }, + "Destinations":{ + "shape":"Addresses", + "documentation":"

    The destination IP addresses and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address.

    " + }, + "SourcePorts":{ + "shape":"PortRanges", + "documentation":"

    The source ports to inspect for. If not specified, this matches with any source port. This setting is only used for protocols 6 (TCP) and 17 (UDP).

    You can specify individual ports, for example 1994 and you can specify port ranges, for example 1990-1994.

    " + }, + "DestinationPorts":{ + "shape":"PortRanges", + "documentation":"

    The destination ports to inspect for. If not specified, this matches with any destination port. This setting is only used for protocols 6 (TCP) and 17 (UDP).

    You can specify individual ports, for example 1994 and you can specify port ranges, for example 1990-1994.

    " + }, + "Protocols":{ + "shape":"ProtocolNumbers", + "documentation":"

    The protocols to inspect for, specified using each protocol's assigned internet protocol number (IANA). If not specified, this matches with any protocol.

    " + }, + "TCPFlags":{ + "shape":"TCPFlags", + "documentation":"

    The TCP flags and masks to inspect for. If not specified, this matches with any settings. This setting is only used for protocol 6 (TCP).

    " + } + }, + "documentation":"

    Criteria for Network Firewall to use to inspect an individual packet in stateless rule inspection. Each match attributes set can include one or more items such as IP address, CIDR range, port number, protocol, and TCP flags.

    " + }, + "PaginationMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[0-9A-Za-z:\\/+=]+$" + }, + "PerObjectStatus":{ + "type":"structure", + "members":{ + "SyncStatus":{ + "shape":"PerObjectSyncStatus", + "documentation":"

    " + } + }, + "documentation":"

    " + }, + "PerObjectSyncStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_SYNC" + ] + }, + "PolicyString":{ + "type":"string", + "max":395000, + "min":1, + "pattern":".*\\S.*" + }, + "Port":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.*$" + }, + "PortRange":{ + "type":"structure", + "required":[ + "FromPort", + "ToPort" + ], + "members":{ + "FromPort":{ + "shape":"PortRangeBound", + "documentation":"

    The lower limit of the port range. This must be less than or equal to the ToPort specification.

    " + }, + "ToPort":{ + "shape":"PortRangeBound", + "documentation":"

    The upper limit of the port range. This must be greater than or equal to the FromPort specification.

    " + } + }, + "documentation":"

    A single port range specification. This is used for source and destination port ranges in the stateless rule MatchAttributes, SourcePorts, and DestinationPorts settings.

    " + }, + "PortRangeBound":{ + "type":"integer", + "max":65535, + "min":0 + }, + "PortRanges":{ + "type":"list", + "member":{"shape":"PortRange"} + }, + "PortSet":{ + "type":"structure", + "members":{ + "Definition":{ + "shape":"VariableDefinitionList", + "documentation":"

    The set of port ranges.

    " + } + }, + "documentation":"

    A set of port ranges for use in the rules in a rule group.

    " + }, + "PortSets":{ + "type":"map", + "key":{"shape":"RuleVariableName"}, + "value":{"shape":"PortSet"} + }, + "Priority":{ + "type":"integer", + "max":65535, + "min":1 + }, + "ProtocolNumber":{ + "type":"integer", + "max":255, + "min":0 + }, + "ProtocolNumbers":{ + "type":"list", + "member":{"shape":"ProtocolNumber"} + }, + "PublishMetricAction":{ + "type":"structure", + "required":["Dimensions"], + "members":{ + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

    " + } + }, + "documentation":"

    Stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. This setting defines a CloudWatch dimension value to be published.

    " + }, + "PutResourcePolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Policy" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the account that you want to share rule groups and firewall policies with.

    " + }, + "Policy":{ + "shape":"PolicyString", + "documentation":"

    The AWS Identity and Access Management policy statement that lists the accounts that you want to share your rule group or firewall policy with and the operations that you want the accounts to be able to perform.

    For a rule group resource, you can specify the following operations in the Actions section of the statement:

    • network-firewall:CreateFirewallPolicy

    • network-firewall:UpdateFirewallPolicy

    • network-firewall:ListRuleGroups

    For a firewall policy resource, you can specify the following operations in the Actions section of the statement:

    • network-firewall:CreateFirewall

    • network-firewall:UpdateFirewall

    • network-firewall:AssociateFirewallPolicy

    • network-firewall:ListFirewallPolicies

    In the Resource section of the statement, you specify the ARNs for the rule groups and firewall policies that you want to share with the account that you specified in Arn.

    " + } + } + }, + "PutResourcePolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "ResourceArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^arn:aws.*" + }, + "ResourceId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^([0-9a-f]{8})-([0-9a-f]{4}-){3}([0-9a-f]{12})$" + }, + "ResourceName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Unable to locate a resource using the parameters that you provided.

    ", + "exception":true + }, + "ResourceOwnerCheckException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "ResourceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "RuleCapacity":{"type":"integer"}, + "RuleDefinition":{ + "type":"structure", + "required":[ + "MatchAttributes", + "Actions" + ], + "members":{ + "MatchAttributes":{ + "shape":"MatchAttributes", + "documentation":"

    Criteria for Network Firewall to use to inspect an individual packet in stateless rule inspection. Each match attributes set can include one or more items such as IP address, CIDR range, port number, protocol, and TCP flags.

    " + }, + "Actions":{ + "shape":"StatelessActions", + "documentation":"

    The actions to take on a packet that matches one of the stateless rule definition's match attributes. You must specify a standard action and you can add custom actions.

    Network Firewall only forwards a packet for stateful rule inspection if you specify aws:forward_to_sfe for a rule that the packet matches, or if the packet doesn't match any stateless rule and you specify aws:forward_to_sfe for the StatelessDefaultActions setting for the FirewallPolicy.

    For every rule, you must specify exactly one of the following standard actions.

    • aws:pass - Discontinues all inspection of the packet and permits it to go to its intended destination.

    • aws:drop - Discontinues all inspection of the packet and blocks it from going to its intended destination.

    • aws:forward_to_sfe - Discontinues stateless inspection of the packet and forwards it to the stateful rule engine for inspection.

    Additionally, you can specify a custom action. To do this, you define a custom action by name and type, then provide the name you've assigned to the action in this Actions setting. For information about the options, see CustomAction.

    To provide more than one action in this setting, separate the settings with a comma. For example, if you have a custom PublishMetrics action that you've named MyMetricsAction, then you could specify the standard action aws:pass and the custom action with [“aws:pass”, “MyMetricsAction”].

    " + } + }, + "documentation":"

    The inspection criteria and action for a single stateless rule. AWS Network Firewall inspects each packet for the specified matching criteria. When a packet matches the criteria, Network Firewall performs the rule's actions on the packet.

    " + }, + "RuleGroup":{ + "type":"structure", + "required":["RulesSource"], + "members":{ + "RuleVariables":{ + "shape":"RuleVariables", + "documentation":"

    Settings that are available for use in the rules in the rule group. You can only use these for stateful rule groups.

    " + }, + "RulesSource":{ + "shape":"RulesSource", + "documentation":"

    The stateful rules or stateless rules for the rule group.

    " + } + }, + "documentation":"

    The object that defines the rules in a rule group. This, along with RuleGroupResponse, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

    AWS Network Firewall uses a rule group to inspect and control network traffic. You define stateless rule groups to inspect individual packets and you define stateful rule groups to inspect packets in the context of their traffic flow.

    To use a rule group, you include it by reference in an Network Firewall firewall policy, then you use the policy in a firewall. You can reference a rule group from more than one firewall policy, and you can use a firewall policy in more than one firewall.

    " + }, + "RuleGroupMetadata":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the rule group. You can't change the name of a rule group after you create it.

    " + }, + "Arn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group.

    " + } + }, + "documentation":"

    High-level information about a rule group, returned by ListRuleGroups. You can use the information provided in the metadata to retrieve and manage a rule group.

    " + }, + "RuleGroupResponse":{ + "type":"structure", + "required":[ + "RuleGroupArn", + "RuleGroupName", + "RuleGroupId" + ], + "members":{ + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group.

    If this response is for a create request that had DryRun set to TRUE, then this ARN is a placeholder that isn't attached to a valid resource.

    " + }, + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the rule group. You can't change the name of a rule group after you create it.

    " + }, + "RuleGroupId":{ + "shape":"ResourceId", + "documentation":"

    The unique identifier for the rule group.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the rule group.

    " + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

    Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

    " + }, + "Capacity":{ + "shape":"RuleCapacity", + "documentation":"

    The maximum operating resources that this rule group can use. Rule group capacity is fixed at creation. When you update a rule group, you are limited to this capacity. When you reference a rule group from a firewall policy, Network Firewall reserves this capacity for the rule group.

    You can retrieve the capacity that would be required for a rule group before you create the rule group by calling CreateRuleGroup with DryRun set to TRUE.

    " + }, + "RuleGroupStatus":{ + "shape":"ResourceStatus", + "documentation":"

    Detailed information about the current status of a rule group.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The key:value pairs to associate with the resource.

    " + } + }, + "documentation":"

    The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

    " + }, + "RuleGroupType":{ + "type":"string", + "enum":[ + "STATELESS", + "STATEFUL" + ] + }, + "RuleGroups":{ + "type":"list", + "member":{"shape":"RuleGroupMetadata"} + }, + "RuleOption":{ + "type":"structure", + "required":["Keyword"], + "members":{ + "Keyword":{ + "shape":"Keyword", + "documentation":"

    " + }, + "Settings":{ + "shape":"Settings", + "documentation":"

    " + } + }, + "documentation":"

    Additional settings for a stateful rule. This is part of the StatefulRule configuration.

    " + }, + "RuleOptions":{ + "type":"list", + "member":{"shape":"RuleOption"} + }, + "RuleTargets":{ + "type":"list", + "member":{"shape":"CollectionMember_String"} + }, + "RuleVariableName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[A-Za-z][A-Za-z0-9_]*$" + }, + "RuleVariables":{ + "type":"structure", + "members":{ + "IPSets":{ + "shape":"IPSets", + "documentation":"

    A list of IP addresses and address ranges, in CIDR notation.

    " + }, + "PortSets":{ + "shape":"PortSets", + "documentation":"

    A list of port ranges.

    " + } + }, + "documentation":"

    Settings that are available for use in the rules in the RuleGroup where this is defined.

    " + }, + "RulesSource":{ + "type":"structure", + "members":{ + "RulesString":{ + "shape":"RulesString", + "documentation":"

    Stateful inspection criteria, provided in Suricata compatible intrusion prevention system (IPS) rules. Suricata is an open-source network IPS that includes a standard rule-based language for network traffic inspection.

    These rules contain the inspection criteria and the action to take for traffic that matches the criteria, so this type of rule group doesn't have a separate action setting.

    You can provide the rules from a file that you've stored in an Amazon S3 bucket, or by providing the rules in a Suricata rules string. To import from Amazon S3, provide the fully qualified name of the file that contains the rules definitions. To provide a Suricata rule string, provide the complete, Suricata compatible rule.

    " + }, + "RulesSourceList":{ + "shape":"RulesSourceList", + "documentation":"

    Stateful inspection criteria for a domain list rule group.

    " + }, + "StatefulRules":{ + "shape":"StatefulRules", + "documentation":"

    The 5-tuple stateful inspection criteria. This contains an array of individual 5-tuple stateful rules to be used together in a stateful rule group.

    " + }, + "StatelessRulesAndCustomActions":{ + "shape":"StatelessRulesAndCustomActions", + "documentation":"

    Stateless inspection criteria to be used in a stateless rule group.

    " + } + }, + "documentation":"

    The stateless or stateful rules definitions for use in a single rule group. Each rule group requires a single RulesSource. You can use an instance of this for either stateless rules or stateful rules.

    " + }, + "RulesSourceList":{ + "type":"structure", + "required":[ + "Targets", + "TargetTypes", + "GeneratedRulesType" + ], + "members":{ + "Targets":{ + "shape":"RuleTargets", + "documentation":"

    The domains that you want to inspect for in your traffic flows. To provide multiple domains, separate them with commas.

    " + }, + "TargetTypes":{ + "shape":"TargetTypes", + "documentation":"

    " + }, + "GeneratedRulesType":{ + "shape":"GeneratedRulesType", + "documentation":"

    Whether you want to allow or deny access to the domains in your target list.

    " + } + }, + "documentation":"

    Stateful inspection criteria for a domain list rule group.

    " + }, + "RulesString":{ + "type":"string", + "max":1000000, + "min":0 + }, + "Setting":{ + "type":"string", + "max":8192, + "min":1, + "pattern":".*" + }, + "Settings":{ + "type":"list", + "member":{"shape":"Setting"} + }, + "Source":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.*$" + }, + "StatefulAction":{ + "type":"string", + "enum":[ + "PASS", + "DROP", + "ALERT" + ] + }, + "StatefulRule":{ + "type":"structure", + "required":[ + "Action", + "Header", + "RuleOptions" + ], + "members":{ + "Action":{ + "shape":"StatefulAction", + "documentation":"

    Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow.

    The actions for a stateful rule are defined as follows:

    • PASS - Permits the packets to go to the intended destination.

    • DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

    • ALERT - Permits the packets to go to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

      You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT action, verify in the logs that the rule is filtering as you want, then change the action to DROP.

    " + }, + "Header":{ + "shape":"Header", + "documentation":"

    The stateful 5-tuple inspection criteria for this rule, used to inspect traffic flows.

    " + }, + "RuleOptions":{ + "shape":"RuleOptions", + "documentation":"

    " + } + }, + "documentation":"

    A single 5-tuple stateful rule, for use in a stateful rule group.

    " + }, + "StatefulRuleDirection":{ + "type":"string", + "enum":[ + "FORWARD", + "ANY" + ] + }, + "StatefulRuleGroupReference":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the stateful rule group.

    " + } + }, + "documentation":"

    Identifier for a single stateful rule group, used in a firewall policy to refer to a rule group.

    " + }, + "StatefulRuleGroupReferences":{ + "type":"list", + "member":{"shape":"StatefulRuleGroupReference"} + }, + "StatefulRuleProtocol":{ + "type":"string", + "enum":[ + "IP", + "TCP", + "UDP", + "ICMP", + "HTTP", + "FTP", + "TLS", + "SMB", + "DNS", + "DCERPC", + "SSH", + "SMTP", + "IMAP", + "MSN", + "KRB5", + "IKEV2", + "TFTP", + "NTP", + "DHCP" + ] + }, + "StatefulRules":{ + "type":"list", + "member":{"shape":"StatefulRule"} + }, + "StatelessActions":{ + "type":"list", + "member":{"shape":"CollectionMember_String"} + }, + "StatelessRule":{ + "type":"structure", + "required":[ + "RuleDefinition", + "Priority" + ], + "members":{ + "RuleDefinition":{ + "shape":"RuleDefinition", + "documentation":"

    Defines the stateless 5-tuple packet inspection criteria and the action to take on a packet that matches the criteria.

    " + }, + "Priority":{ + "shape":"Priority", + "documentation":"

    A setting that indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. Network Firewall evaluates the rules in a rule group starting with the lowest priority setting. You must ensure that the priority settings are unique for the rule group.

    Each stateless rule group uses exactly one StatelessRulesAndCustomActions object, and each StatelessRulesAndCustomActions contains exactly one StatelessRules object. To ensure unique priority settings for your rule groups, set unique priorities for the stateless rules that you define inside any single StatelessRules object.

    You can change the priority settings of your rules at any time. To make it easier to insert rules later, number them so there's a wide range in between, for example use 100, 200, and so on.

    " + } + }, + "documentation":"

    A single stateless rule. This is used in StatelessRulesAndCustomActions.

    " + }, + "StatelessRuleGroupReference":{ + "type":"structure", + "required":[ + "ResourceArn", + "Priority" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the stateless rule group.

    " + }, + "Priority":{ + "shape":"Priority", + "documentation":"

    An integer setting that indicates the order in which to run the stateless rule groups in a single FirewallPolicy. Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. You must ensure that the priority settings are unique within each policy.

    " + } + }, + "documentation":"

    Identifier for a single stateless rule group, used in a firewall policy to refer to the rule group.

    " + }, + "StatelessRuleGroupReferences":{ + "type":"list", + "member":{"shape":"StatelessRuleGroupReference"} + }, + "StatelessRules":{ + "type":"list", + "member":{"shape":"StatelessRule"} + }, + "StatelessRulesAndCustomActions":{ + "type":"structure", + "required":["StatelessRules"], + "members":{ + "StatelessRules":{ + "shape":"StatelessRules", + "documentation":"

    Defines the set of stateless rules for use in a stateless rule group.

    " + }, + "CustomActions":{ + "shape":"CustomActions", + "documentation":"

    Defines an array of individual custom action definitions that are available for use by the stateless rules in this StatelessRulesAndCustomActions specification. You name each custom action that you define, and then you can use it by name in your StatelessRule RuleDefinition Actions specification.

    " + } + }, + "documentation":"

    Stateless inspection criteria. Each stateless rule group uses exactly one of these data types to define its stateless rules.

    " + }, + "SubnetMapping":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"CollectionMember_String", + "documentation":"

    The unique identifier for the subnet.

    " + } + }, + "documentation":"

    The ID for a subnet that you want to associate with the firewall. This is used with CreateFirewall and AssociateSubnets. AWS Network Firewall creates an instance of the associated firewall in each subnet that you specify, to filter traffic in the subnet's Availability Zone.

    " + }, + "SubnetMappings":{ + "type":"list", + "member":{"shape":"SubnetMapping"} + }, + "SyncState":{ + "type":"structure", + "members":{ + "Attachment":{ + "shape":"Attachment", + "documentation":"

    The attachment status of the firewall's association with a single VPC subnet. For each configured subnet, Network Firewall creates the attachment by instantiating the firewall endpoint in the subnet so that it's ready to take traffic. This is part of the FirewallStatus.

    " + }, + "Config":{ + "shape":"SyncStateConfig", + "documentation":"

    The configuration status of the firewall endpoint in a single VPC subnet. Network Firewall provides each endpoint with the rules that are configured in the firewall policy. Each time you add a subnet or modify the associated firewall policy, Network Firewall synchronizes the rules in the endpoint, so it can properly filter network traffic. This is part of the FirewallStatus.

    " + } + }, + "documentation":"

    The status of the firewall endpoint and firewall policy configuration for a single VPC subnet.

    For each VPC subnet that you associate with a firewall, AWS Network Firewall does the following:

    • Instantiates a firewall endpoint in the subnet, ready to take traffic.

    • Configures the endpoint with the current firewall policy settings, to provide the filtering behavior for the endpoint.

    When you update a firewall, for example to add a subnet association or change a rule group in the firewall policy, the affected sync states reflect out-of-sync or not ready status until the changes are complete.

    " + }, + "SyncStateConfig":{ + "type":"map", + "key":{"shape":"ResourceName"}, + "value":{"shape":"PerObjectStatus"} + }, + "SyncStates":{ + "type":"map", + "key":{"shape":"AvailabilityZone"}, + "value":{"shape":"SyncState"} + }, + "TCPFlag":{ + "type":"string", + "enum":[ + "FIN", + "SYN", + "RST", + "PSH", + "ACK", + "URG", + "ECE", + "CWR" + ] + }, + "TCPFlagField":{ + "type":"structure", + "required":["Flags"], + "members":{ + "Flags":{ + "shape":"Flags", + "documentation":"

    Used in conjunction with the Masks setting to define the flags that must be set and flags that must not be set in order for the packet to match. This setting can only specify values that are also specified in the Masks setting.

    For the flags that are specified in the masks setting, the following must be true for the packet to match:

    • The ones that are set in this flags setting must be set in the packet.

    • The ones that are not set in this flags setting must also not be set in the packet.

    " + }, + "Masks":{ + "shape":"Flags", + "documentation":"

    The set of flags to consider in the inspection. To inspect all flags in the valid values list, leave this with no setting.

    " + } + }, + "documentation":"

    TCP flags and masks to inspect packets for, used in stateless rules MatchAttributes settings.

    " + }, + "TCPFlags":{ + "type":"list", + "member":{"shape":"TCPFlagField"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    The part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as \"customer.\" Tag keys are case-sensitive.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive.

    " + } + }, + "documentation":"

    A key:value pair associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each AWS resource.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^.*$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^.*$" + }, + "TagsPaginationMaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "TargetType":{ + "type":"string", + "enum":[ + "TLS_SNI", + "HTTP_HOST" + ] + }, + "TargetTypes":{ + "type":"list", + "member":{"shape":"TargetType"} + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Unable to process the request due to throttling limitations.

    ", + "exception":true + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The operation you requested isn't supported by Network Firewall.

    ", + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateFirewallDeleteProtectionRequest":{ + "type":"structure", + "required":["DeleteProtection"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "DeleteProtection":{ + "shape":"Boolean", + "documentation":"

    A flag indicating whether it is possible to delete the firewall. A setting of TRUE indicates that the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. When you create a firewall, the operation initializes this flag to TRUE.

    " + } + } + }, + "UpdateFirewallDeleteProtectionResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "DeleteProtection":{ + "shape":"Boolean", + "documentation":"

    " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + } + } + }, + "UpdateFirewallDescriptionRequest":{ + "type":"structure", + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    The new description for the firewall. If you omit this setting, Network Firewall removes the description for the firewall.

    " + } + } + }, + "UpdateFirewallDescriptionResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the firewall.

    " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + } + } + }, + "UpdateFirewallPolicyChangeProtectionRequest":{ + "type":"structure", + "required":["FirewallPolicyChangeProtection"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallPolicyChangeProtection":{ + "shape":"Boolean", + "documentation":"

    A setting indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

    " + } + } + }, + "UpdateFirewallPolicyChangeProtectionResponse":{ + "type":"structure", + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "FirewallPolicyChangeProtection":{ + "shape":"Boolean", + "documentation":"

    A setting indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

    " + } + } + }, + "UpdateFirewallPolicyRequest":{ + "type":"structure", + "required":[ + "UpdateToken", + "FirewallPolicy" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request.

    To make changes to the policy, you provide the token in your request. Network Firewall uses the token to ensure that the policy hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall policy again to get a current copy of it with current token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall policy.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallPolicy":{ + "shape":"FirewallPolicy", + "documentation":"

    The updated firewall policy to use for the firewall.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the firewall policy.

    " + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request.

    If set to TRUE, Network Firewall checks whether the request can run successfully, but doesn't actually make the requested changes. The call returns the value that the request would return if you ran it with dry run set to FALSE, but doesn't make additions or changes to your resources. This option allows you to make sure that you have the required permissions to run the request and that your request parameters are valid.

    If set to FALSE, Network Firewall makes the requested changes to your resources.

    " + } + } + }, + "UpdateFirewallPolicyResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "FirewallPolicyResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request.

    To make changes to the policy, you provide the token in your request. Network Firewall uses the token to ensure that the policy hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall policy again to get a current copy of it with current token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallPolicyResponse":{ + "shape":"FirewallPolicyResponse", + "documentation":"

    The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

    " + } + } + }, + "UpdateLoggingConfigurationRequest":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

    Defines how Network Firewall performs logging for a firewall. If you omit this setting, Network Firewall disables logging for the firewall.

    " + } + } + }, + "UpdateLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "LoggingConfiguration":{"shape":"LoggingConfiguration"} + } + }, + "UpdateRuleGroupRequest":{ + "type":"structure", + "required":["UpdateToken"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    A token used for optimistic locking. Network Firewall returns a token to your requests that access the rule group. The token marks the state of the rule group resource at the time of the request.

    To make changes to the rule group, you provide the token in your request. Network Firewall uses the token to ensure that the rule group hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the rule group again to get a current copy of it with a current token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the rule group. You can't change the name of a rule group after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "RuleGroup":{ + "shape":"RuleGroup", + "documentation":"

    An object that defines the rule group rules.

    You must provide either this rule group setting or a Rules setting, but not both.

    " + }, + "Rules":{ + "shape":"RulesString", + "documentation":"

    The name of a file containing stateful rule group rules specifications in Suricata flat format, with one rule per line. Use this to import your existing Suricata compatible rule groups.

    You must provide either this rules setting or a populated RuleGroup setting, but not both.

    You can provide your rule group specification in a file through this setting when you create or update your rule group. The call response returns a RuleGroup object that Network Firewall has populated from your file. Network Firewall uses the file contents to populate the rule group rules, but does not maintain a reference to the file or use the file in any way after performing the create or update. If you call DescribeRuleGroup to retrieve the rule group, Network Firewall returns rules settings inside a RuleGroup object.

    " + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

    Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

    This setting is required for requests that do not include the RuleGroupARN.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of the rule group.

    " + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request.

    If set to TRUE, Network Firewall checks whether the request can run successfully, but doesn't actually make the requested changes. The call returns the value that the request would return if you ran it with dry run set to FALSE, but doesn't make additions or changes to your resources. This option allows you to make sure that you have the required permissions to run the request and that your request parameters are valid.

    If set to FALSE, Network Firewall makes the requested changes to your resources.

    " + } + } + }, + "UpdateRuleGroupResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "RuleGroupResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    A token used for optimistic locking. Network Firewall returns a token to your requests that access the rule group. The token marks the state of the rule group resource at the time of the request.

    To make changes to the rule group, you provide the token in your request. Network Firewall uses the token to ensure that the rule group hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the rule group again to get a current copy of it with a current token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "RuleGroupResponse":{ + "shape":"RuleGroupResponse", + "documentation":"

    The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

    " + } + } + }, + "UpdateSubnetChangeProtectionRequest":{ + "type":"structure", + "required":["SubnetChangeProtection"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    You must specify the ARN or the name, and you can specify both.

    " + }, + "SubnetChangeProtection":{ + "shape":"Boolean", + "documentation":"

    A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

    " + } + } + }, + "UpdateSubnetChangeProtectionResponse":{ + "type":"structure", + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

    An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

    To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

    To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

    " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the firewall.

    " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

    The descriptive name of the firewall. You can't change the name of a firewall after you create it.

    " + }, + "SubnetChangeProtection":{ + "shape":"Boolean", + "documentation":"

    A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

    " + } + } + }, + "UpdateToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^([0-9a-f]{8})-([0-9a-f]{4}-){3}([0-9a-f]{12})$" + }, + "VariableDefinition":{ + "type":"string", + "min":1, + "pattern":"^.*$" + }, + "VariableDefinitionList":{ + "type":"list", + "member":{"shape":"VariableDefinition"} + }, + "VpcId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^vpc-[0-9a-f]+$" + }, + "VpcIds":{ + "type":"list", + "member":{"shape":"VpcId"} + } + }, + "documentation":"

    This is the API Reference for AWS Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

    • The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the AWS REST APIs, see AWS APIs.

      To access Network Firewall using the REST API endpoint: https://network-firewall.<region>.amazonaws.com

    • Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

    • For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide.

    Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or AWS Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source intrusion detection system (IDS) engine. For information about Suricata, see the Suricata website.

    You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

    • Allow domains or IP addresses for known AWS service endpoints, such as Amazon S3, and block all other forms of traffic.

    • Use custom lists of known bad domains to limit the types of domain names that your applications can access.

    • Perform deep packet inspection on traffic entering or leaving your VPC.

    • Rate limit traffic going from AWS to on-premises IP destinations.

    • Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used.

    To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

    To start using Network Firewall, do the following:

    1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

    2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

    3. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

    4. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

    5. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

    6. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

    " +} diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml new file mode 100644 index 000000000000..f8d321a90217 --- /dev/null +++ b/services/networkmanager/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + networkmanager + AWS Java SDK :: Services :: NetworkManager + The AWS Java SDK for NetworkManager module holds the client classes that are used for + communicating with NetworkManager. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.networkmanager + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/networkmanager/src/main/resources/codegen-resources/paginators-1.json b/services/networkmanager/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..8987a383639f --- /dev/null +++ b/services/networkmanager/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,58 @@ +{ + "pagination": { + "DescribeGlobalNetworks": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GlobalNetworks" + }, + "GetConnections": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Connections" + }, + "GetCustomerGatewayAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "CustomerGatewayAssociations" + }, + "GetDevices": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Devices" + }, + "GetLinkAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "LinkAssociations" + }, + "GetLinks": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Links" + }, + "GetSites": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Sites" + }, + "GetTransitGatewayConnectPeerAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "TransitGatewayConnectPeerAssociations" + }, + "GetTransitGatewayRegistrations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "TransitGatewayRegistrations" + } + } +} diff --git a/services/networkmanager/src/main/resources/codegen-resources/service-2.json b/services/networkmanager/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f1d35af81b9e --- /dev/null +++ b/services/networkmanager/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2668 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-07-05", + "endpointPrefix":"networkmanager", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"NetworkManager", + "serviceFullName":"AWS Network Manager", + "serviceId":"NetworkManager", + "signatureVersion":"v4", + "signingName":"networkmanager", + "uid":"networkmanager-2019-07-05" + }, + "operations":{ + "AssociateCustomerGateway":{ + "name":"AssociateCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/global-networks/{globalNetworkId}/customer-gateway-associations" + }, + "input":{"shape":"AssociateCustomerGatewayRequest"}, + "output":{"shape":"AssociateCustomerGatewayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Associates a customer gateway with a device and optionally, with a link. If you specify a link, it must be associated with the specified device.

    You can only associate customer gateways that are connected to a VPN attachment on a transit gateway. The transit gateway must be registered in your global network. When you register a transit gateway, customer gateways that are connected to the transit gateway are automatically included in the global network. To list customer gateways that are connected to a transit gateway, use the DescribeVpnConnections EC2 API and filter by transit-gateway-id.

    You cannot associate a customer gateway with more than one device and link.

    " + }, + "AssociateLink":{ + "name":"AssociateLink", + "http":{ + "method":"POST", + "requestUri":"/global-networks/{globalNetworkId}/link-associations" + }, + "input":{"shape":"AssociateLinkRequest"}, + "output":{"shape":"AssociateLinkResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Associates a link to a device. A device can be associated to multiple links and a link can be associated to multiple devices. The device and link must be in the same global network and the same site.

    " + }, + "AssociateTransitGatewayConnectPeer":{ + "name":"AssociateTransitGatewayConnectPeer", + "http":{ + "method":"POST", + "requestUri":"/global-networks/{globalNetworkId}/transit-gateway-connect-peer-associations" + }, + "input":{"shape":"AssociateTransitGatewayConnectPeerRequest"}, + "output":{"shape":"AssociateTransitGatewayConnectPeerResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Associates a transit gateway Connect peer with a device, and optionally, with a link. If you specify a link, it must be associated with the specified device.

    You can only associate transit gateway Connect peers that have been created on a transit gateway that's registered in your global network.

    You cannot associate a transit gateway Connect peer with more than one device and link.

    " + }, + "CreateConnection":{ + "name":"CreateConnection", + "http":{ + "method":"POST", + "requestUri":"/global-networks/{globalNetworkId}/connections" + }, + "input":{"shape":"CreateConnectionRequest"}, + "output":{"shape":"CreateConnectionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a connection between two devices. The devices can be a physical or virtual appliance that connects to a third-party appliance in a VPC, or a physical appliance that connects to another physical appliance in an on-premises network.

    " + }, + "CreateDevice":{ + "name":"CreateDevice", + "http":{ + "method":"POST", + "requestUri":"/global-networks/{globalNetworkId}/devices" + }, + "input":{"shape":"CreateDeviceRequest"}, + "output":{"shape":"CreateDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new device in a global network. If you specify both a site ID and a location, the location of the site is used for visualization in the Network Manager console.

    " + }, + "CreateGlobalNetwork":{ + "name":"CreateGlobalNetwork", + "http":{ + "method":"POST", + "requestUri":"/global-networks" + }, + "input":{"shape":"CreateGlobalNetworkRequest"}, + "output":{"shape":"CreateGlobalNetworkResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new, empty global network.

    " + }, + "CreateLink":{ + "name":"CreateLink", + "http":{ + "method":"POST", + "requestUri":"/global-networks/{globalNetworkId}/links" + }, + "input":{"shape":"CreateLinkRequest"}, + "output":{"shape":"CreateLinkResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new link for a specified site.

    " + }, + "CreateSite":{ + "name":"CreateSite", + "http":{ + "method":"POST", + "requestUri":"/global-networks/{globalNetworkId}/sites" + }, + "input":{"shape":"CreateSiteRequest"}, + "output":{"shape":"CreateSiteResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new site in a global network.

    " + }, + "DeleteConnection":{ + "name":"DeleteConnection", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}/connections/{connectionId}" + }, + "input":{"shape":"DeleteConnectionRequest"}, + "output":{"shape":"DeleteConnectionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the specified connection in your global network.

    " + }, + "DeleteDevice":{ + "name":"DeleteDevice", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}/devices/{deviceId}" + }, + "input":{"shape":"DeleteDeviceRequest"}, + "output":{"shape":"DeleteDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an existing device. You must first disassociate the device from any links and customer gateways.

    " + }, + "DeleteGlobalNetwork":{ + "name":"DeleteGlobalNetwork", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}" + }, + "input":{"shape":"DeleteGlobalNetworkRequest"}, + "output":{"shape":"DeleteGlobalNetworkResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an existing global network. You must first delete all global network objects (devices, links, and sites) and deregister all transit gateways.

    " + }, + "DeleteLink":{ + "name":"DeleteLink", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}/links/{linkId}" + }, + "input":{"shape":"DeleteLinkRequest"}, + "output":{"shape":"DeleteLinkResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an existing link. You must first disassociate the link from any devices and customer gateways.

    " + }, + "DeleteSite":{ + "name":"DeleteSite", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}/sites/{siteId}" + }, + "input":{"shape":"DeleteSiteRequest"}, + "output":{"shape":"DeleteSiteResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an existing site. The site cannot be associated with any device or link.

    " + }, + "DeregisterTransitGateway":{ + "name":"DeregisterTransitGateway", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}/transit-gateway-registrations/{transitGatewayArn}" + }, + "input":{"shape":"DeregisterTransitGatewayRequest"}, + "output":{"shape":"DeregisterTransitGatewayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deregisters a transit gateway from your global network. This action does not delete your transit gateway, or modify any of its attachments. This action removes any customer gateway associations.

    " + }, + "DescribeGlobalNetworks":{ + "name":"DescribeGlobalNetworks", + "http":{ + "method":"GET", + "requestUri":"/global-networks" + }, + "input":{"shape":"DescribeGlobalNetworksRequest"}, + "output":{"shape":"DescribeGlobalNetworksResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes one or more global networks. By default, all global networks are described. To describe the objects in your global network, you must use the appropriate Get* action. For example, to list the transit gateways in your global network, use GetTransitGatewayRegistrations.

    " + }, + "DisassociateCustomerGateway":{ + "name":"DisassociateCustomerGateway", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}/customer-gateway-associations/{customerGatewayArn}" + }, + "input":{"shape":"DisassociateCustomerGatewayRequest"}, + "output":{"shape":"DisassociateCustomerGatewayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Disassociates a customer gateway from a device and a link.

    " + }, + "DisassociateLink":{ + "name":"DisassociateLink", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}/link-associations" + }, + "input":{"shape":"DisassociateLinkRequest"}, + "output":{"shape":"DisassociateLinkResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Disassociates an existing device from a link. You must first disassociate any customer gateways that are associated with the link.

    " + }, + "DisassociateTransitGatewayConnectPeer":{ + "name":"DisassociateTransitGatewayConnectPeer", + "http":{ + "method":"DELETE", + "requestUri":"/global-networks/{globalNetworkId}/transit-gateway-connect-peer-associations/{transitGatewayConnectPeerArn}" + }, + "input":{"shape":"DisassociateTransitGatewayConnectPeerRequest"}, + "output":{"shape":"DisassociateTransitGatewayConnectPeerResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Disassociates a transit gateway Connect peer from a device and link.

    " + }, + "GetConnections":{ + "name":"GetConnections", + "http":{ + "method":"GET", + "requestUri":"/global-networks/{globalNetworkId}/connections" + }, + "input":{"shape":"GetConnectionsRequest"}, + "output":{"shape":"GetConnectionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about one or more of your connections in a global network.

    " + }, + "GetCustomerGatewayAssociations":{ + "name":"GetCustomerGatewayAssociations", + "http":{ + "method":"GET", + "requestUri":"/global-networks/{globalNetworkId}/customer-gateway-associations" + }, + "input":{"shape":"GetCustomerGatewayAssociationsRequest"}, + "output":{"shape":"GetCustomerGatewayAssociationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets the association information for customer gateways that are associated with devices and links in your global network.

    " + }, + "GetDevices":{ + "name":"GetDevices", + "http":{ + "method":"GET", + "requestUri":"/global-networks/{globalNetworkId}/devices" + }, + "input":{"shape":"GetDevicesRequest"}, + "output":{"shape":"GetDevicesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about one or more of your devices in a global network.

    " + }, + "GetLinkAssociations":{ + "name":"GetLinkAssociations", + "http":{ + "method":"GET", + "requestUri":"/global-networks/{globalNetworkId}/link-associations" + }, + "input":{"shape":"GetLinkAssociationsRequest"}, + "output":{"shape":"GetLinkAssociationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets the link associations for a device or a link. Either the device ID or the link ID must be specified.

    " + }, + "GetLinks":{ + "name":"GetLinks", + "http":{ + "method":"GET", + "requestUri":"/global-networks/{globalNetworkId}/links" + }, + "input":{"shape":"GetLinksRequest"}, + "output":{"shape":"GetLinksResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about one or more links in a specified global network.

    If you specify the site ID, you cannot specify the type or provider in the same request. You can specify the type and provider in the same request.

    " + }, + "GetSites":{ + "name":"GetSites", + "http":{ + "method":"GET", + "requestUri":"/global-networks/{globalNetworkId}/sites" + }, + "input":{"shape":"GetSitesRequest"}, + "output":{"shape":"GetSitesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about one or more of your sites in a global network.

    " + }, + "GetTransitGatewayConnectPeerAssociations":{ + "name":"GetTransitGatewayConnectPeerAssociations", + "http":{ + "method":"GET", + "requestUri":"/global-networks/{globalNetworkId}/transit-gateway-connect-peer-associations" + }, + "input":{"shape":"GetTransitGatewayConnectPeerAssociationsRequest"}, + "output":{"shape":"GetTransitGatewayConnectPeerAssociationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about one or more of your transit gateway Connect peer associations in a global network.

    " + }, + "GetTransitGatewayRegistrations":{ + "name":"GetTransitGatewayRegistrations", + "http":{ + "method":"GET", + "requestUri":"/global-networks/{globalNetworkId}/transit-gateway-registrations" + }, + "input":{"shape":"GetTransitGatewayRegistrationsRequest"}, + "output":{"shape":"GetTransitGatewayRegistrationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about the transit gateway registrations in a specified global network.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the tags for a specified resource.

    " + }, + "RegisterTransitGateway":{ + "name":"RegisterTransitGateway", + "http":{ + "method":"POST", + "requestUri":"/global-networks/{globalNetworkId}/transit-gateway-registrations" + }, + "input":{"shape":"RegisterTransitGatewayRequest"}, + "output":{"shape":"RegisterTransitGatewayResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Registers a transit gateway in your global network. The transit gateway can be in any AWS Region, but it must be owned by the same AWS account that owns the global network. You cannot register a transit gateway in more than one global network.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Tags a specified resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes tags from a specified resource.

    " + }, + "UpdateConnection":{ + "name":"UpdateConnection", + "http":{ + "method":"PATCH", + "requestUri":"/global-networks/{globalNetworkId}/connections/{connectionId}" + }, + "input":{"shape":"UpdateConnectionRequest"}, + "output":{"shape":"UpdateConnectionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the information for an existing connection. To remove information for any of the parameters, specify an empty string.

    " + }, + "UpdateDevice":{ + "name":"UpdateDevice", + "http":{ + "method":"PATCH", + "requestUri":"/global-networks/{globalNetworkId}/devices/{deviceId}" + }, + "input":{"shape":"UpdateDeviceRequest"}, + "output":{"shape":"UpdateDeviceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the details for an existing device. To remove information for any of the parameters, specify an empty string.

    " + }, + "UpdateGlobalNetwork":{ + "name":"UpdateGlobalNetwork", + "http":{ + "method":"PATCH", + "requestUri":"/global-networks/{globalNetworkId}" + }, + "input":{"shape":"UpdateGlobalNetworkRequest"}, + "output":{"shape":"UpdateGlobalNetworkResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates an existing global network. To remove information for any of the parameters, specify an empty string.

    " + }, + "UpdateLink":{ + "name":"UpdateLink", + "http":{ + "method":"PATCH", + "requestUri":"/global-networks/{globalNetworkId}/links/{linkId}" + }, + "input":{"shape":"UpdateLinkRequest"}, + "output":{"shape":"UpdateLinkResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the details for an existing link. To remove information for any of the parameters, specify an empty string.

    " + }, + "UpdateSite":{ + "name":"UpdateSite", + "http":{ + "method":"PATCH", + "requestUri":"/global-networks/{globalNetworkId}/sites/{siteId}" + }, + "input":{"shape":"UpdateSiteRequest"}, + "output":{"shape":"UpdateSiteResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the information for an existing site. To remove information for any of the parameters, specify an empty string.

    " + } + }, + "shapes":{ + "AWSLocation":{ + "type":"structure", + "members":{ + "Zone":{ + "shape":"String", + "documentation":"

    The Zone the device is located in. This can be the ID of an Availability Zone, Local Zone, Wavelength Zone, or an Outpost.

    " + }, + "SubnetArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the subnet the device is located in.

    " + } + }, + "documentation":"

    Specifies a location in AWS.

    " + }, + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AssociateCustomerGatewayRequest":{ + "type":"structure", + "required":[ + "CustomerGatewayArn", + "GlobalNetworkId", + "DeviceId" + ], + "members":{ + "CustomerGatewayArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the customer gateway. For more information, see Resources Defined by Amazon EC2.

    " + }, + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    " + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    " + } + } + }, + "AssociateCustomerGatewayResponse":{ + "type":"structure", + "members":{ + "CustomerGatewayAssociation":{ + "shape":"CustomerGatewayAssociation", + "documentation":"

    The customer gateway association.

    " + } + } + }, + "AssociateLinkRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "DeviceId", + "LinkId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    " + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    " + } + } + }, + "AssociateLinkResponse":{ + "type":"structure", + "members":{ + "LinkAssociation":{ + "shape":"LinkAssociation", + "documentation":"

    The link association.

    " + } + } + }, + "AssociateTransitGatewayConnectPeerRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "TransitGatewayConnectPeerArn", + "DeviceId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "TransitGatewayConnectPeerArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the Connect peer.

    " + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    " + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    " + } + } + }, + "AssociateTransitGatewayConnectPeerResponse":{ + "type":"structure", + "members":{ + "TransitGatewayConnectPeerAssociation":{ + "shape":"TransitGatewayConnectPeerAssociation", + "documentation":"

    The transit gateway Connect peer association.

    " + } + } + }, + "Bandwidth":{ + "type":"structure", + "members":{ + "UploadSpeed":{ + "shape":"Integer", + "documentation":"

    Upload speed in Mbps.

    " + }, + "DownloadSpeed":{ + "shape":"Integer", + "documentation":"

    Download speed in Mbps.

    " + } + }, + "documentation":"

    Describes bandwidth information.

    " + }, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource.

    " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

    The resource type.

    " + } + }, + "documentation":"

    There was a conflict processing the request. Updating or deleting the resource can cause an inconsistent state.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "Connection":{ + "type":"structure", + "members":{ + "ConnectionId":{ + "shape":"String", + "documentation":"

    The ID of the connection.

    " + }, + "ConnectionArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the connection.

    " + }, + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the first device in the connection.

    " + }, + "ConnectedDeviceId":{ + "shape":"String", + "documentation":"

    The ID of the second device in the connection.

    " + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link for the first device in the connection.

    " + }, + "ConnectedLinkId":{ + "shape":"String", + "documentation":"

    The ID of the link for the second device in the connection.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    The description of the connection.

    " + }, + "CreatedAt":{ + "shape":"DateTime", + "documentation":"

    The date and time that the connection was created.

    " + }, + "State":{ + "shape":"ConnectionState", + "documentation":"

    The state of the connection.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags for the connection.

    " + } + }, + "documentation":"

    Describes a connection.

    " + }, + "ConnectionList":{ + "type":"list", + "member":{"shape":"Connection"} + }, + "ConnectionState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "UPDATING" + ] + }, + "CreateConnectionRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "DeviceId", + "ConnectedDeviceId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the first device in the connection.

    " + }, + "ConnectedDeviceId":{ + "shape":"String", + "documentation":"

    The ID of the second device in the connection.

    " + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link for the first device.

    " + }, + "ConnectedLinkId":{ + "shape":"String", + "documentation":"

    The ID of the link for the second device.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of the connection.

    Length Constraints: Maximum length of 256 characters.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to apply to the resource during creation.

    " + } + } + }, + "CreateConnectionResponse":{ + "type":"structure", + "members":{ + "Connection":{ + "shape":"Connection", + "documentation":"

    Information about the connection.

    " + } + } + }, + "CreateDeviceRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "AWSLocation":{ + "shape":"AWSLocation", + "documentation":"

    The AWS location of the device.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of the device.

    Length Constraints: Maximum length of 256 characters.

    " + }, + "Type":{ + "shape":"String", + "documentation":"

    The type of the device.

    " + }, + "Vendor":{ + "shape":"String", + "documentation":"

    The vendor of the device.

    Length Constraints: Maximum length of 128 characters.

    " + }, + "Model":{ + "shape":"String", + "documentation":"

    The model of the device.

    Length Constraints: Maximum length of 128 characters.

    " + }, + "SerialNumber":{ + "shape":"String", + "documentation":"

    The serial number of the device.

    Length Constraints: Maximum length of 128 characters.

    " + }, + "Location":{ + "shape":"Location", + "documentation":"

    The location of the device.

    " + }, + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of the site.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to apply to the resource during creation.

    " + } + } + }, + "CreateDeviceResponse":{ + "type":"structure", + "members":{ + "Device":{ + "shape":"Device", + "documentation":"

    Information about the device.

    " + } + } + }, + "CreateGlobalNetworkRequest":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"String", + "documentation":"

    A description of the global network.

    Length Constraints: Maximum length of 256 characters.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to apply to the resource during creation.

    " + } + } + }, + "CreateGlobalNetworkResponse":{ + "type":"structure", + "members":{ + "GlobalNetwork":{ + "shape":"GlobalNetwork", + "documentation":"

    Information about the global network object.

    " + } + } + }, + "CreateLinkRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "Bandwidth", + "SiteId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of the link.

    Length Constraints: Maximum length of 256 characters.

    " + }, + "Type":{ + "shape":"String", + "documentation":"

    The type of the link.

    Constraints: Cannot include the following characters: | \\ ^

    Length Constraints: Maximum length of 128 characters.

    " + }, + "Bandwidth":{ + "shape":"Bandwidth", + "documentation":"

    The upload speed and download speed in Mbps.

    " + }, + "Provider":{ + "shape":"String", + "documentation":"

    The provider of the link.

    Constraints: Cannot include the following characters: | \\ ^

    Length Constraints: Maximum length of 128 characters.

    " + }, + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of the site.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to apply to the resource during creation.

    " + } + } + }, + "CreateLinkResponse":{ + "type":"structure", + "members":{ + "Link":{ + "shape":"Link", + "documentation":"

    Information about the link.

    " + } + } + }, + "CreateSiteRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of your site.

    Length Constraints: Maximum length of 256 characters.

    " + }, + "Location":{ + "shape":"Location", + "documentation":"

    The site location. This information is used for visualization in the Network Manager console. If you specify the address, the latitude and longitude are automatically calculated.

    • Address: The physical address of the site.

    • Latitude: The latitude of the site.

    • Longitude: The longitude of the site.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to apply to the resource during creation.

    " + } + } + }, + "CreateSiteResponse":{ + "type":"structure", + "members":{ + "Site":{ + "shape":"Site", + "documentation":"

    Information about the site.

    " + } + } + }, + "CustomerGatewayAssociation":{ + "type":"structure", + "members":{ + "CustomerGatewayArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the customer gateway.

    " + }, + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    " + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    " + }, + "State":{ + "shape":"CustomerGatewayAssociationState", + "documentation":"

    The association state.

    " + } + }, + "documentation":"

    Describes the association between a customer gateway, a device, and a link.

    " + }, + "CustomerGatewayAssociationList":{ + "type":"list", + "member":{"shape":"CustomerGatewayAssociation"} + }, + "CustomerGatewayAssociationState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "DELETED" + ] + }, + "DateTime":{"type":"timestamp"}, + "DeleteConnectionRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "ConnectionId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "ConnectionId":{ + "shape":"String", + "documentation":"

    The ID of the connection.

    ", + "location":"uri", + "locationName":"connectionId" + } + } + }, + "DeleteConnectionResponse":{ + "type":"structure", + "members":{ + "Connection":{ + "shape":"Connection", + "documentation":"

    Information about the connection.

    " + } + } + }, + "DeleteDeviceRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "DeviceId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    ", + "location":"uri", + "locationName":"deviceId" + } + } + }, + "DeleteDeviceResponse":{ + "type":"structure", + "members":{ + "Device":{ + "shape":"Device", + "documentation":"

    Information about the device.

    " + } + } + }, + "DeleteGlobalNetworkRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + } + } + }, + "DeleteGlobalNetworkResponse":{ + "type":"structure", + "members":{ + "GlobalNetwork":{ + "shape":"GlobalNetwork", + "documentation":"

    Information about the global network.

    " + } + } + }, + "DeleteLinkRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "LinkId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    ", + "location":"uri", + "locationName":"linkId" + } + } + }, + "DeleteLinkResponse":{ + "type":"structure", + "members":{ + "Link":{ + "shape":"Link", + "documentation":"

    Information about the link.

    " + } + } + }, + "DeleteSiteRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "SiteId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of the site.

    ", + "location":"uri", + "locationName":"siteId" + } + } + }, + "DeleteSiteResponse":{ + "type":"structure", + "members":{ + "Site":{ + "shape":"Site", + "documentation":"

    Information about the site.

    " + } + } + }, + "DeregisterTransitGatewayRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "TransitGatewayArn" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "TransitGatewayArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the transit gateway.

    ", + "location":"uri", + "locationName":"transitGatewayArn" + } + } + }, + "DeregisterTransitGatewayResponse":{ + "type":"structure", + "members":{ + "TransitGatewayRegistration":{ + "shape":"TransitGatewayRegistration", + "documentation":"

    The transit gateway registration information.

    " + } + } + }, + "DescribeGlobalNetworksRequest":{ + "type":"structure", + "members":{ + "GlobalNetworkIds":{ + "shape":"StringList", + "documentation":"

    The IDs of one or more global networks. The maximum is 10.

    ", + "location":"querystring", + "locationName":"globalNetworkIds" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "DescribeGlobalNetworksResponse":{ + "type":"structure", + "members":{ + "GlobalNetworks":{ + "shape":"GlobalNetworkList", + "documentation":"

    Information about the global networks.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    " + } + } + }, + "Device":{ + "type":"structure", + "members":{ + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    " + }, + "DeviceArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the device.

    " + }, + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "AWSLocation":{ + "shape":"AWSLocation", + "documentation":"

    The AWS location of the device.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    The description of the device.

    " + }, + "Type":{ + "shape":"String", + "documentation":"

    The device type.

    " + }, + "Vendor":{ + "shape":"String", + "documentation":"

    The device vendor.

    " + }, + "Model":{ + "shape":"String", + "documentation":"

    The device model.

    " + }, + "SerialNumber":{ + "shape":"String", + "documentation":"

    The device serial number.

    " + }, + "Location":{ + "shape":"Location", + "documentation":"

    The site location.

    " + }, + "SiteId":{ + "shape":"String", + "documentation":"

    The site ID.

    " + }, + "CreatedAt":{ + "shape":"DateTime", + "documentation":"

    The date and time that the site was created.

    " + }, + "State":{ + "shape":"DeviceState", + "documentation":"

    The device state.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags for the device.

    " + } + }, + "documentation":"

    Describes a device.

    " + }, + "DeviceList":{ + "type":"list", + "member":{"shape":"Device"} + }, + "DeviceState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "UPDATING" + ] + }, + "DisassociateCustomerGatewayRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "CustomerGatewayArn" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "CustomerGatewayArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the customer gateway. For more information, see Resources Defined by Amazon EC2.

    ", + "location":"uri", + "locationName":"customerGatewayArn" + } + } + }, + "DisassociateCustomerGatewayResponse":{ + "type":"structure", + "members":{ + "CustomerGatewayAssociation":{ + "shape":"CustomerGatewayAssociation", + "documentation":"

    Information about the customer gateway association.

    " + } + } + }, + "DisassociateLinkRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "DeviceId", + "LinkId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    ", + "location":"querystring", + "locationName":"deviceId" + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    ", + "location":"querystring", + "locationName":"linkId" + } + } + }, + "DisassociateLinkResponse":{ + "type":"structure", + "members":{ + "LinkAssociation":{ + "shape":"LinkAssociation", + "documentation":"

    Information about the link association.

    " + } + } + }, + "DisassociateTransitGatewayConnectPeerRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "TransitGatewayConnectPeerArn" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "TransitGatewayConnectPeerArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the transit gateway Connect peer.

    ", + "location":"uri", + "locationName":"transitGatewayConnectPeerArn" + } + } + }, + "DisassociateTransitGatewayConnectPeerResponse":{ + "type":"structure", + "members":{ + "TransitGatewayConnectPeerAssociation":{ + "shape":"TransitGatewayConnectPeerAssociation", + "documentation":"

    The transit gateway Connect peer association.

    " + } + } + }, + "GetConnectionsRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "ConnectionIds":{ + "shape":"StringList", + "documentation":"

    One or more connection IDs.

    ", + "location":"querystring", + "locationName":"connectionIds" + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    ", + "location":"querystring", + "locationName":"deviceId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetConnectionsResponse":{ + "type":"structure", + "members":{ + "Connections":{ + "shape":"ConnectionList", + "documentation":"

    Information about the connections.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token to use for the next page of results.

    " + } + } + }, + "GetCustomerGatewayAssociationsRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "CustomerGatewayArns":{ + "shape":"StringList", + "documentation":"

    One or more customer gateway Amazon Resource Names (ARNs). For more information, see Resources Defined by Amazon EC2. The maximum is 10.

    ", + "location":"querystring", + "locationName":"customerGatewayArns" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetCustomerGatewayAssociationsResponse":{ + "type":"structure", + "members":{ + "CustomerGatewayAssociations":{ + "shape":"CustomerGatewayAssociationList", + "documentation":"

    The customer gateway associations.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    " + } + } + }, + "GetDevicesRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "DeviceIds":{ + "shape":"StringList", + "documentation":"

    One or more device IDs. The maximum is 10.

    ", + "location":"querystring", + "locationName":"deviceIds" + }, + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of the site.

    ", + "location":"querystring", + "locationName":"siteId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetDevicesResponse":{ + "type":"structure", + "members":{ + "Devices":{ + "shape":"DeviceList", + "documentation":"

    The devices.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    " + } + } + }, + "GetLinkAssociationsRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    ", + "location":"querystring", + "locationName":"deviceId" + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    ", + "location":"querystring", + "locationName":"linkId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetLinkAssociationsResponse":{ + "type":"structure", + "members":{ + "LinkAssociations":{ + "shape":"LinkAssociationList", + "documentation":"

    The link associations.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    " + } + } + }, + "GetLinksRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "LinkIds":{ + "shape":"StringList", + "documentation":"

    One or more link IDs. The maximum is 10.

    ", + "location":"querystring", + "locationName":"linkIds" + }, + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of the site.

    ", + "location":"querystring", + "locationName":"siteId" + }, + "Type":{ + "shape":"String", + "documentation":"

    The link type.

    ", + "location":"querystring", + "locationName":"type" + }, + "Provider":{ + "shape":"String", + "documentation":"

    The link provider.

    ", + "location":"querystring", + "locationName":"provider" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetLinksResponse":{ + "type":"structure", + "members":{ + "Links":{ + "shape":"LinkList", + "documentation":"

    The links.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    " + } + } + }, + "GetSitesRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "SiteIds":{ + "shape":"StringList", + "documentation":"

    One or more site IDs. The maximum is 10.

    ", + "location":"querystring", + "locationName":"siteIds" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetSitesResponse":{ + "type":"structure", + "members":{ + "Sites":{ + "shape":"SiteList", + "documentation":"

    The sites.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    " + } + } + }, + "GetTransitGatewayConnectPeerAssociationsRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "TransitGatewayConnectPeerArns":{ + "shape":"StringList", + "documentation":"

    One or more transit gateway Connect peer Amazon Resource Names (ARNs).

    ", + "location":"querystring", + "locationName":"transitGatewayConnectPeerArns" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetTransitGatewayConnectPeerAssociationsResponse":{ + "type":"structure", + "members":{ + "TransitGatewayConnectPeerAssociations":{ + "shape":"TransitGatewayConnectPeerAssociationList", + "documentation":"

    Information about the transit gateway Connect peer associations.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token to use for the next page of results.

    " + } + } + }, + "GetTransitGatewayRegistrationsRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "TransitGatewayArns":{ + "shape":"StringList", + "documentation":"

    The Amazon Resource Names (ARNs) of one or more transit gateways. The maximum is 10.

    ", + "location":"querystring", + "locationName":"transitGatewayArns" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetTransitGatewayRegistrationsResponse":{ + "type":"structure", + "members":{ + "TransitGatewayRegistrations":{ + "shape":"TransitGatewayRegistrationList", + "documentation":"

    The transit gateway registrations.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token for the next page of results.

    " + } + } + }, + "GlobalNetwork":{ + "type":"structure", + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "GlobalNetworkArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the global network.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    The description of the global network.

    " + }, + "CreatedAt":{ + "shape":"DateTime", + "documentation":"

    The date and time that the global network was created.

    " + }, + "State":{ + "shape":"GlobalNetworkState", + "documentation":"

    The state of the global network.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags for the global network.

    " + } + }, + "documentation":"

    Describes a global network.

    " + }, + "GlobalNetworkList":{ + "type":"list", + "member":{"shape":"GlobalNetwork"} + }, + "GlobalNetworkState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "UPDATING" + ] + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"}, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    Indicates when to retry the request.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    The request has failed due to an internal error.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "Link":{ + "type":"structure", + "members":{ + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    " + }, + "LinkArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the link.

    " + }, + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of the site.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    The description of the link.

    " + }, + "Type":{ + "shape":"String", + "documentation":"

    The type of the link.

    " + }, + "Bandwidth":{ + "shape":"Bandwidth", + "documentation":"

    The bandwidth for the link.

    " + }, + "Provider":{ + "shape":"String", + "documentation":"

    The provider of the link.

    " + }, + "CreatedAt":{ + "shape":"DateTime", + "documentation":"

    The date and time that the link was created.

    " + }, + "State":{ + "shape":"LinkState", + "documentation":"

    The state of the link.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags for the link.

    " + } + }, + "documentation":"

    Describes a link.

    " + }, + "LinkAssociation":{ + "type":"structure", + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The device ID for the link association.

    " + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    " + }, + "LinkAssociationState":{ + "shape":"LinkAssociationState", + "documentation":"

    The state of the association.

    " + } + }, + "documentation":"

    Describes the association between a device and a link.

    " + }, + "LinkAssociationList":{ + "type":"list", + "member":{"shape":"LinkAssociation"} + }, + "LinkAssociationState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "DELETED" + ] + }, + "LinkList":{ + "type":"list", + "member":{"shape":"Link"} + }, + "LinkState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "UPDATING" + ] + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceARN", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "TagList":{ + "shape":"TagList", + "documentation":"

    The list of tags.

    " + } + } + }, + "Location":{ + "type":"structure", + "members":{ + "Address":{ + "shape":"String", + "documentation":"

    The physical address.

    " + }, + "Latitude":{ + "shape":"String", + "documentation":"

    The latitude.

    " + }, + "Longitude":{ + "shape":"String", + "documentation":"

    The longitude.

    " + } + }, + "documentation":"

    Describes a location.

    ", + "sensitive":true + }, + "MaxResults":{ + "type":"integer", + "max":500, + "min":1 + }, + "RegisterTransitGatewayRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "TransitGatewayArn" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "TransitGatewayArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the transit gateway. For more information, see Resources Defined by Amazon EC2.

    " + } + } + }, + "RegisterTransitGatewayResponse":{ + "type":"structure", + "members":{ + "TransitGatewayRegistration":{ + "shape":"TransitGatewayRegistration", + "documentation":"

    Information about the transit gateway registration.

    " + } + } + }, + "ResourceARN":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource.

    " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

    The resource type.

    " + } + }, + "documentation":"

    The specified resource could not be found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RetryAfterSeconds":{"type":"integer"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "Message", + "LimitCode", + "ServiceCode" + ], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

    The error message.

    " + }, + "ResourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource.

    " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

    The resource type.

    " + }, + "LimitCode":{ + "shape":"String", + "documentation":"

    The limit code.

    " + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

    The service code.

    " + } + }, + "documentation":"

    A service limit was exceeded.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "Site":{ + "type":"structure", + "members":{ + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of the site.

    " + }, + "SiteArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the site.

    " + }, + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    The description of the site.

    " + }, + "Location":{ + "shape":"Location", + "documentation":"

    The location of the site.

    " + }, + "CreatedAt":{ + "shape":"DateTime", + "documentation":"

    The date and time that the site was created.

    " + }, + "State":{ + "shape":"SiteState", + "documentation":"

    The state of the site.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags for the site.

    " + } + }, + "documentation":"

    Describes a site.

    " + }, + "SiteList":{ + "type":"list", + "member":{"shape":"Site"} + }, + "SiteState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "UPDATING" + ] + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    The tag key.

    Length Constraints: Maximum length of 128 characters.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The tag value.

    Length Constraints: Maximum length of 256 characters.

    " + } + }, + "documentation":"

    Describes a tag.

    " + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceARN", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to apply to the specified resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"}, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

    Indicates when to retry the request.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "TransitGatewayConnectPeerAssociation":{ + "type":"structure", + "members":{ + "TransitGatewayConnectPeerArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the transit gateway Connect peer.

    " + }, + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    " + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    " + }, + "State":{ + "shape":"TransitGatewayConnectPeerAssociationState", + "documentation":"

    The state of the association.

    " + } + }, + "documentation":"

    Describes a transit gateway Connect peer association.

    " + }, + "TransitGatewayConnectPeerAssociationList":{ + "type":"list", + "member":{"shape":"TransitGatewayConnectPeerAssociation"} + }, + "TransitGatewayConnectPeerAssociationState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "DELETED" + ] + }, + "TransitGatewayRegistration":{ + "type":"structure", + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    " + }, + "TransitGatewayArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the transit gateway.

    " + }, + "State":{ + "shape":"TransitGatewayRegistrationStateReason", + "documentation":"

    The state of the transit gateway registration.

    " + } + }, + "documentation":"

    Describes the registration of a transit gateway to a global network.

    " + }, + "TransitGatewayRegistrationList":{ + "type":"list", + "member":{"shape":"TransitGatewayRegistration"} + }, + "TransitGatewayRegistrationState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "DELETING", + "DELETED", + "FAILED" + ] + }, + "TransitGatewayRegistrationStateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"TransitGatewayRegistrationState", + "documentation":"

    The code for the state reason.

    " + }, + "Message":{ + "shape":"String", + "documentation":"

    The message for the state reason.

    " + } + }, + "documentation":"

    Describes the status of a transit gateway registration.

    " + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceARN", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys to remove from the specified resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateConnectionRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "ConnectionId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "ConnectionId":{ + "shape":"String", + "documentation":"

    The ID of the connection.

    ", + "location":"uri", + "locationName":"connectionId" + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link for the first device in the connection.

    " + }, + "ConnectedLinkId":{ + "shape":"String", + "documentation":"

    The ID of the link for the second device in the connection.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of the connection.

    Length Constraints: Maximum length of 256 characters.

    " + } + } + }, + "UpdateConnectionResponse":{ + "type":"structure", + "members":{ + "Connection":{ + "shape":"Connection", + "documentation":"

    Information about the connection.

    " + } + } + }, + "UpdateDeviceRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "DeviceId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "DeviceId":{ + "shape":"String", + "documentation":"

    The ID of the device.

    ", + "location":"uri", + "locationName":"deviceId" + }, + "AWSLocation":{ + "shape":"AWSLocation", + "documentation":"

    The AWS location of the device.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of the device.

    Length Constraints: Maximum length of 256 characters.

    " + }, + "Type":{ + "shape":"String", + "documentation":"

    The type of the device.

    " + }, + "Vendor":{ + "shape":"String", + "documentation":"

    The vendor of the device.

    Length Constraints: Maximum length of 128 characters.

    " + }, + "Model":{ + "shape":"String", + "documentation":"

    The model of the device.

    Length Constraints: Maximum length of 128 characters.

    " + }, + "SerialNumber":{ + "shape":"String", + "documentation":"

    The serial number of the device.

    Length Constraints: Maximum length of 128 characters.

    " + }, + "Location":{"shape":"Location"}, + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of the site.

    " + } + } + }, + "UpdateDeviceResponse":{ + "type":"structure", + "members":{ + "Device":{ + "shape":"Device", + "documentation":"

    Information about the device.

    " + } + } + }, + "UpdateGlobalNetworkRequest":{ + "type":"structure", + "required":["GlobalNetworkId"], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of your global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of the global network.

    Length Constraints: Maximum length of 256 characters.

    " + } + } + }, + "UpdateGlobalNetworkResponse":{ + "type":"structure", + "members":{ + "GlobalNetwork":{ + "shape":"GlobalNetwork", + "documentation":"

    Information about the global network object.

    " + } + } + }, + "UpdateLinkRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "LinkId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "LinkId":{ + "shape":"String", + "documentation":"

    The ID of the link.

    ", + "location":"uri", + "locationName":"linkId" + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of the link.

    Length Constraints: Maximum length of 256 characters.

    " + }, + "Type":{ + "shape":"String", + "documentation":"

    The type of the link.

    Length Constraints: Maximum length of 128 characters.

    " + }, + "Bandwidth":{ + "shape":"Bandwidth", + "documentation":"

    The upload and download speed in Mbps.

    " + }, + "Provider":{ + "shape":"String", + "documentation":"

    The provider of the link.

    Length Constraints: Maximum length of 128 characters.

    " + } + } + }, + "UpdateLinkResponse":{ + "type":"structure", + "members":{ + "Link":{ + "shape":"Link", + "documentation":"

    Information about the link.

    " + } + } + }, + "UpdateSiteRequest":{ + "type":"structure", + "required":[ + "GlobalNetworkId", + "SiteId" + ], + "members":{ + "GlobalNetworkId":{ + "shape":"String", + "documentation":"

    The ID of the global network.

    ", + "location":"uri", + "locationName":"globalNetworkId" + }, + "SiteId":{ + "shape":"String", + "documentation":"

    The ID of your site.

    ", + "location":"uri", + "locationName":"siteId" + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of your site.

    Length Constraints: Maximum length of 256 characters.

    " + }, + "Location":{ + "shape":"Location", + "documentation":"

    The site location:

    • Address: The physical address of the site.

    • Latitude: The latitude of the site.

    • Longitude: The longitude of the site.

    " + } + } + }, + "UpdateSiteResponse":{ + "type":"structure", + "members":{ + "Site":{ + "shape":"Site", + "documentation":"

    Information about the site.

    " + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"}, + "Reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason for the error.

    " + }, + "Fields":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The fields that caused the error, if applicable.

    " + } + }, + "documentation":"

    The input fails to satisfy the constraints.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "Name", + "Message" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

    The name of the field.

    " + }, + "Message":{ + "shape":"String", + "documentation":"

    The message for the field.

    " + } + }, + "documentation":"

    Describes a validation exception for a field.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "UnknownOperation", + "CannotParse", + "FieldValidationFailed", + "Other" + ] + } + }, + "documentation":"

    Transit Gateway Network Manager (Network Manager) enables you to create a global network, in which you can monitor your AWS and on-premises networks that are built around transit gateways.

    The Network Manager APIs are supported in the US West (Oregon) Region only. You must specify the us-west-2 Region in all requests made to Network Manager.

    " +} diff --git a/services/new-service-template/pom.xml b/services/new-service-template/pom.xml index 78d4d4349f88..2ae4673db1e7 100644 --- a/services/new-service-template/pom.xml +++ b/services/new-service-template/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + outposts + AWS Java SDK :: Services :: Outposts + The AWS Java SDK for Outposts module holds the client classes that are used for + communicating with Outposts. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.outposts + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/outposts/src/main/resources/codegen-resources/paginators-1.json b/services/outposts/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..a22e9d094c75 --- /dev/null +++ b/services/outposts/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,14 @@ +{ + "pagination": { + "ListOutposts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListSites": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/outposts/src/main/resources/codegen-resources/service-2.json b/services/outposts/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..3e8da5e66b8e --- /dev/null +++ b/services/outposts/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,627 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-03", + "endpointPrefix":"outposts", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Outposts", + "serviceFullName":"AWS Outposts", + "serviceId":"Outposts", + "signatureVersion":"v4", + "signingName":"outposts", + "uid":"outposts-2019-12-03" + }, + "operations":{ + "CreateOutpost":{ + "name":"CreateOutpost", + "http":{ + "method":"POST", + "requestUri":"/outposts" + }, + "input":{"shape":"CreateOutpostInput"}, + "output":{"shape":"CreateOutpostOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates an Outpost.

    " + }, + "DeleteOutpost":{ + "name":"DeleteOutpost", + "http":{ + "method":"DELETE", + "requestUri":"/outposts/{OutpostId}" + }, + "input":{"shape":"DeleteOutpostInput"}, + "output":{"shape":"DeleteOutpostOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the Outpost.

    " + }, + "DeleteSite":{ + "name":"DeleteSite", + "http":{ + "method":"DELETE", + "requestUri":"/sites/{SiteId}" + }, + "input":{"shape":"DeleteSiteInput"}, + "output":{"shape":"DeleteSiteOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the site.

    " + }, + "GetOutpost":{ + "name":"GetOutpost", + "http":{ + "method":"GET", + "requestUri":"/outposts/{OutpostId}" + }, + "input":{"shape":"GetOutpostInput"}, + "output":{"shape":"GetOutpostOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets information about the specified Outpost.

    " + }, + "GetOutpostInstanceTypes":{ + "name":"GetOutpostInstanceTypes", + "http":{ + "method":"GET", + "requestUri":"/outposts/{OutpostId}/instanceTypes" + }, + "input":{"shape":"GetOutpostInstanceTypesInput"}, + "output":{"shape":"GetOutpostInstanceTypesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the instance types for the specified Outpost.

    " + }, + "ListOutposts":{ + "name":"ListOutposts", + "http":{ + "method":"GET", + "requestUri":"/outposts" + }, + "input":{"shape":"ListOutpostsInput"}, + "output":{"shape":"ListOutpostsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    List the Outposts for your AWS account.

    " + }, + "ListSites":{ + "name":"ListSites", + "http":{ + "method":"GET", + "requestUri":"/sites" + }, + "input":{"shape":"ListSitesInput"}, + "output":{"shape":"ListSitesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the sites for the specified AWS account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Lists the tags for the specified resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Adds tags to the specified resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Removes tags from the specified resource.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You do not have permission to perform this operation.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccountId":{ + "type":"string", + "documentation":"

    The ID of the AWS account.

    ", + "max":12, + "min":12 + }, + "Arn":{ + "type":"string", + "max":1011, + "pattern":"^(arn:aws([a-z-]+)?:outposts:[a-z\\d-]+:\\d{12}:([a-z\\d-]+)/)[a-z]{2,8}-[a-f0-9]{17}$" + }, + "AvailabilityZone":{ + "type":"string", + "documentation":"

    The Availability Zone.

    You must specify AvailabilityZone or AvailabilityZoneId.

    ", + "max":1000, + "min":1, + "pattern":"[a-z\\d-]+" + }, + "AvailabilityZoneId":{ + "type":"string", + "documentation":"

    The ID of the Availability Zone.

    You must specify AvailabilityZone or AvailabilityZoneId.

    ", + "max":255, + "min":1, + "pattern":"[a-z]+[0-9]+-az[0-9]+" + }, + "CreateOutpostInput":{ + "type":"structure", + "required":[ + "Name", + "SiteId" + ], + "members":{ + "Name":{"shape":"OutpostName"}, + "Description":{"shape":"OutpostDescription"}, + "SiteId":{"shape":"SiteId"}, + "AvailabilityZone":{"shape":"AvailabilityZone"}, + "AvailabilityZoneId":{"shape":"AvailabilityZoneId"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags to apply to the Outpost.

    " + } + } + }, + "CreateOutpostOutput":{ + "type":"structure", + "members":{ + "Outpost":{"shape":"Outpost"} + } + }, + "DeleteOutpostInput":{ + "type":"structure", + "required":["OutpostId"], + "members":{ + "OutpostId":{ + "shape":"OutpostId", + "location":"uri", + "locationName":"OutpostId" + } + } + }, + "DeleteOutpostOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteSiteInput":{ + "type":"structure", + "required":["SiteId"], + "members":{ + "SiteId":{ + "shape":"SiteId", + "location":"uri", + "locationName":"SiteId" + } + } + }, + "DeleteSiteOutput":{ + "type":"structure", + "members":{ + } + }, + "ErrorMessage":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[\\S \\n]+$" + }, + "GetOutpostInput":{ + "type":"structure", + "required":["OutpostId"], + "members":{ + "OutpostId":{ + "shape":"OutpostId", + "location":"uri", + "locationName":"OutpostId" + } + } + }, + "GetOutpostInstanceTypesInput":{ + "type":"structure", + "required":["OutpostId"], + "members":{ + "OutpostId":{ + "shape":"OutpostId", + "location":"uri", + "locationName":"OutpostId" + }, + "NextToken":{ + "shape":"Token", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults1000", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "GetOutpostInstanceTypesOutput":{ + "type":"structure", + "members":{ + "InstanceTypes":{"shape":"InstanceTypeListDefinition"}, + "NextToken":{"shape":"Token"}, + "OutpostId":{"shape":"OutpostId"}, + "OutpostArn":{"shape":"OutpostArn"} + } + }, + "GetOutpostOutput":{ + "type":"structure", + "members":{ + "Outpost":{"shape":"Outpost"} + } + }, + "InstanceType":{ + "type":"string", + "documentation":"

    The instance type.

    " + }, + "InstanceTypeItem":{ + "type":"structure", + "members":{ + "InstanceType":{"shape":"InstanceType"} + }, + "documentation":"

    Information about an instance type.

    " + }, + "InstanceTypeListDefinition":{ + "type":"list", + "member":{"shape":"InstanceTypeItem"}, + "documentation":"

    Information about the instance types.

    " + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An internal error has occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "LifeCycleStatus":{ + "type":"string", + "documentation":"

    The life cycle status.

    " + }, + "ListOutpostsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults1000", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListOutpostsOutput":{ + "type":"structure", + "members":{ + "Outposts":{"shape":"outpostListDefinition"}, + "NextToken":{"shape":"Token"} + } + }, + "ListSitesInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults1000", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListSitesOutput":{ + "type":"structure", + "members":{ + "Sites":{"shape":"siteListDefinition"}, + "NextToken":{"shape":"Token"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

    The resource tags.

    " + } + } + }, + "MaxResults1000":{ + "type":"integer", + "documentation":"

    The maximum page size.

    ", + "box":true, + "max":1000, + "min":1 + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The specified request is not valid.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "Outpost":{ + "type":"structure", + "members":{ + "OutpostId":{"shape":"OutpostId"}, + "OwnerId":{"shape":"OwnerId"}, + "OutpostArn":{"shape":"OutpostArn"}, + "SiteId":{"shape":"SiteId"}, + "Name":{"shape":"OutpostName"}, + "Description":{"shape":"OutpostDescription"}, + "LifeCycleStatus":{"shape":"LifeCycleStatus"}, + "AvailabilityZone":{"shape":"AvailabilityZone"}, + "AvailabilityZoneId":{"shape":"AvailabilityZoneId"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The Outpost tags.

    " + } + }, + "documentation":"

    Information about an Outpost.

    " + }, + "OutpostArn":{ + "type":"string", + "documentation":"

    The Amazon Resource Name (ARN) of the Outpost.

    ", + "max":255, + "min":1, + "pattern":"^arn:aws([a-z-]+)?:outposts:[a-z\\d-]+:\\d{12}:outpost/op-[a-f0-9]{17}$" + }, + "OutpostDescription":{ + "type":"string", + "documentation":"

    The description of the Outpost.

    ", + "max":1000, + "min":1, + "pattern":"^[\\S ]+$" + }, + "OutpostId":{ + "type":"string", + "documentation":"

    The ID of the Outpost.

    ", + "max":180, + "min":1, + "pattern":"^(arn:aws([a-z-]+)?:outposts:[a-z\\d-]+:\\d{12}:outpost/)?op-[a-f0-9]{17}$" + }, + "OutpostName":{ + "type":"string", + "documentation":"

    The name of the Outpost.

    ", + "max":255, + "min":1, + "pattern":"^[\\S ]+$" + }, + "OwnerId":{ + "type":"string", + "documentation":"

    The AWS account ID of the Outpost owner.

    ", + "max":12, + "min":12, + "pattern":"\\d{12}" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have exceeded a service quota.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "Site":{ + "type":"structure", + "members":{ + "SiteId":{"shape":"SiteId"}, + "AccountId":{"shape":"AccountId"}, + "Name":{"shape":"SiteName"}, + "Description":{"shape":"SiteDescription"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The site tags.

    " + } + }, + "documentation":"

    Information about a site.

    " + }, + "SiteDescription":{ + "type":"string", + "documentation":"

    The description of the site.

    ", + "max":1000, + "min":1, + "pattern":"^[\\S ]+$" + }, + "SiteId":{ + "type":"string", + "documentation":"

    The ID of the site.

    ", + "max":255, + "min":1, + "pattern":"os-[a-f0-9]{17}" + }, + "SiteName":{ + "type":"string", + "documentation":"

    The name of the site.

    ", + "max":1000, + "min":1, + "pattern":"^[\\S ]+$" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags to add to the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "pattern":"^[\\S \\n]+$" + }, + "Token":{ + "type":"string", + "documentation":"

    The pagination token.

    ", + "max":1005, + "min":1, + "pattern":".*\\S.*" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    A parameter is not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "outpostListDefinition":{ + "type":"list", + "member":{"shape":"Outpost"}, + "documentation":"

    Information about the Outposts.

    " + }, + "siteListDefinition":{ + "type":"list", + "member":{"shape":"Site"}, + "documentation":"

    Information about the sites.

    " + } + }, + "documentation":"

    AWS Outposts is a fully managed service that extends AWS infrastructure, APIs, and tools to customer premises. By providing local access to AWS managed infrastructure, AWS Outposts enables customers to build and run applications on premises using the same programming interfaces as in AWS Regions, while using local compute and storage resources for lower latency and local data processing needs.

    " +} diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 8f47c0f08968..01c39ebfdc8e 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -1,6 +1,6 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT services AWS Java SDK :: Services pom - - ../.. - acm apigateway @@ -93,6 +103,7 @@ route53 route53domains s3 + s3control sms servicecatalog ses @@ -194,6 +205,77 @@ applicationinsights ec2instanceconnect eventbridge + lakeformation + forecast + forecastquery + qldb + qldbsession + workmailmessageflow + codestarnotifications + savingsplans + sso + ssooidc + marketplacecatalog + sesv2 + dataexchange + migrationhubconfig + connectparticipant + wafv2 + appconfig + iotsecuretunneling + elasticinference + imagebuilder + schemas + accessanalyzer + computeoptimizer + networkmanager + kendra + frauddetector + codegurureviewer + codeguruprofiler + outposts + sagemakera2iruntime + ebs + kinesisvideosignaling + detective + codestarconnections + synthetics + iotsitewise + macie2 + codeartifact + honeycode + ivs + braket + identitystore + appflow + redshiftdata + ssoadmin + timestreamwrite + timestreamquery + s3outposts + databrew + servicecatalogappregistry + networkfirewall + mwaa + devopsguru + sagemakerfeaturestoreruntime + appintegrations + ecrpublic + amplifybackend + connectcontactlens + lookoutvision + customerprofiles + emrcontainers + sagemakeredge + healthlake + auditmanager + amp + greengrassv2 + iotwireless + iotfleethub + iotdeviceadvisor + location + wellarchitected The AWS Java SDK services https://aws.amazon.com/sdkforjava @@ -244,6 +326,11 @@ software.amazon.awssdk ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + apache-client software.amazon.awssdk diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index 829b0f0403a0..f5d83c0be0c4 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + qldb + AWS Java SDK :: Services :: QLDB + The AWS Java SDK for QLDB module holds the client classes that are used for + communicating with QLDB. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.qldb + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/qldb/src/main/resources/codegen-resources/paginators-1.json b/services/qldb/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..dcebff0861bd --- /dev/null +++ b/services/qldb/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,24 @@ +{ + "pagination": { + "ListJournalKinesisStreamsForLedger": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListJournalS3Exports": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListJournalS3ExportsForLedger": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListLedgers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/qldb/src/main/resources/codegen-resources/service-2.json b/services/qldb/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..e60515dbb911 --- /dev/null +++ b/services/qldb/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1345 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-01-02", + "endpointPrefix":"qldb", + "jsonVersion":"1.0", + "protocol":"rest-json", + "serviceAbbreviation":"QLDB", + "serviceFullName":"Amazon QLDB", + "serviceId":"QLDB", + "signatureVersion":"v4", + "signingName":"qldb", + "uid":"qldb-2019-01-02" + }, + "operations":{ + "CancelJournalKinesisStream":{ + "name":"CancelJournalKinesisStream", + "http":{ + "method":"DELETE", + "requestUri":"/ledgers/{name}/journal-kinesis-streams/{streamId}" + }, + "input":{"shape":"CancelJournalKinesisStreamRequest"}, + "output":{"shape":"CancelJournalKinesisStreamResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Ends a given Amazon QLDB journal stream. Before a stream can be canceled, its current status must be ACTIVE.

    You can't restart a stream after you cancel it. Canceled QLDB stream resources are subject to a 7-day retention period, so they are automatically deleted after this limit expires.

    " + }, + "CreateLedger":{ + "name":"CreateLedger", + "http":{ + "method":"POST", + "requestUri":"/ledgers" + }, + "input":{"shape":"CreateLedgerRequest"}, + "output":{"shape":"CreateLedgerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Creates a new ledger in your AWS account.

    " + }, + "DeleteLedger":{ + "name":"DeleteLedger", + "http":{ + "method":"DELETE", + "requestUri":"/ledgers/{name}" + }, + "input":{"shape":"DeleteLedgerRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Deletes a ledger and all of its contents. This action is irreversible.

    If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

    " + }, + "DescribeJournalKinesisStream":{ + "name":"DescribeJournalKinesisStream", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}/journal-kinesis-streams/{streamId}" + }, + "input":{"shape":"DescribeJournalKinesisStreamRequest"}, + "output":{"shape":"DescribeJournalKinesisStreamResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Returns detailed information about a given Amazon QLDB journal stream. The output includes the Amazon Resource Name (ARN), stream name, current status, creation time, and the parameters of your original stream creation request.

    " + }, + "DescribeJournalS3Export":{ + "name":"DescribeJournalS3Export", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}/journal-s3-exports/{exportId}" + }, + "input":{"shape":"DescribeJournalS3ExportRequest"}, + "output":{"shape":"DescribeJournalS3ExportResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

    This action does not return any expired export jobs. For more information, see Export Job Expiration in the Amazon QLDB Developer Guide.

    If the export job with the given ExportId doesn't exist, then throws ResourceNotFoundException.

    If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

    " + }, + "DescribeLedger":{ + "name":"DescribeLedger", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}" + }, + "input":{"shape":"DescribeLedgerRequest"}, + "output":{"shape":"DescribeLedgerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns information about a ledger, including its state and when it was created.

    " + }, + "ExportJournalToS3":{ + "name":"ExportJournalToS3", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/journal-s3-exports" + }, + "input":{"shape":"ExportJournalToS3Request"}, + "output":{"shape":"ExportJournalToS3Response"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Exports journal contents within a date and time range from a ledger into a specified Amazon Simple Storage Service (Amazon S3) bucket. The data is written as files in Amazon Ion format.

    If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

    If the ledger with the given Name is in CREATING status, then throws ResourcePreconditionNotMetException.

    You can initiate up to two concurrent journal export requests for each ledger. Beyond this limit, journal export requests throw LimitExceededException.

    " + }, + "GetBlock":{ + "name":"GetBlock", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/block" + }, + "input":{"shape":"GetBlockRequest"}, + "output":{"shape":"GetBlockResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Returns a block object at a specified address in a journal. Also returns a proof of the specified block for verification if DigestTipAddress is provided.

    For information about the data contents in a block, see Journal contents in the Amazon QLDB Developer Guide.

    If the specified ledger doesn't exist or is in DELETING status, then throws ResourceNotFoundException.

    If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException.

    If no block exists with the specified address, then throws InvalidParameterException.

    " + }, + "GetDigest":{ + "name":"GetDigest", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/digest" + }, + "input":{"shape":"GetDigestRequest"}, + "output":{"shape":"GetDigestResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Returns the digest of a ledger at the latest committed block in the journal. The response includes a 256-bit hash value and a block address.

    " + }, + "GetRevision":{ + "name":"GetRevision", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/revision" + }, + "input":{"shape":"GetRevisionRequest"}, + "output":{"shape":"GetRevisionResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Returns a revision data object for a specified document ID and block address. Also returns a proof of the specified revision for verification if DigestTipAddress is provided.

    " + }, + "ListJournalKinesisStreamsForLedger":{ + "name":"ListJournalKinesisStreamsForLedger", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}/journal-kinesis-streams" + }, + "input":{"shape":"ListJournalKinesisStreamsForLedgerRequest"}, + "output":{"shape":"ListJournalKinesisStreamsForLedgerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Returns an array of all Amazon QLDB journal stream descriptors for a given ledger. The output of each stream descriptor includes the same details that are returned by DescribeJournalKinesisStream.

    This action returns a maximum of MaxResults items. It is paginated so that you can retrieve all the items by calling ListJournalKinesisStreamsForLedger multiple times.

    " + }, + "ListJournalS3Exports":{ + "name":"ListJournalS3Exports", + "http":{ + "method":"GET", + "requestUri":"/journal-s3-exports" + }, + "input":{"shape":"ListJournalS3ExportsRequest"}, + "output":{"shape":"ListJournalS3ExportsResponse"}, + "documentation":"

    Returns an array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

    This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3Exports multiple times.

    This action does not return any expired export jobs. For more information, see Export Job Expiration in the Amazon QLDB Developer Guide.

    " + }, + "ListJournalS3ExportsForLedger":{ + "name":"ListJournalS3ExportsForLedger", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}/journal-s3-exports" + }, + "input":{"shape":"ListJournalS3ExportsForLedgerRequest"}, + "output":{"shape":"ListJournalS3ExportsForLedgerResponse"}, + "documentation":"

    Returns an array of journal export job descriptions for a specified ledger.

    This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3ExportsForLedger multiple times.

    This action does not return any expired export jobs. For more information, see Export Job Expiration in the Amazon QLDB Developer Guide.

    " + }, + "ListLedgers":{ + "name":"ListLedgers", + "http":{ + "method":"GET", + "requestUri":"/ledgers" + }, + "input":{"shape":"ListLedgersRequest"}, + "output":{"shape":"ListLedgersResponse"}, + "documentation":"

    Returns an array of ledger summaries that are associated with the current AWS account and Region.

    This action returns a maximum of 100 items and is paginated so that you can retrieve all the items by calling ListLedgers multiple times.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns all tags for a specified Amazon QLDB resource.

    " + }, + "StreamJournalToKinesis":{ + "name":"StreamJournalToKinesis", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/journal-kinesis-streams" + }, + "input":{"shape":"StreamJournalToKinesisRequest"}, + "output":{"shape":"StreamJournalToKinesisResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

    Creates a journal stream for a given Amazon QLDB ledger. The stream captures every document revision that is committed to the ledger's journal and delivers the data to a specified Amazon Kinesis Data Streams resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Adds one or more tags to a specified Amazon QLDB resource.

    A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, your request fails and returns an error.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Removes one or more tags from a specified Amazon QLDB resource. You can specify up to 50 tag keys to remove.

    " + }, + "UpdateLedger":{ + "name":"UpdateLedger", + "http":{ + "method":"PATCH", + "requestUri":"/ledgers/{name}" + }, + "input":{"shape":"UpdateLedgerRequest"}, + "output":{"shape":"UpdateLedgerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Updates properties on a ledger.

    " + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "max":1600, + "min":20 + }, + "Boolean":{"type":"boolean"}, + "CancelJournalKinesisStreamRequest":{ + "type":"structure", + "required":[ + "LedgerName", + "StreamId" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "StreamId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID that QLDB assigns to each QLDB journal stream.

    ", + "location":"uri", + "locationName":"streamId" + } + } + }, + "CancelJournalKinesisStreamResponse":{ + "type":"structure", + "members":{ + "StreamId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID that QLDB assigns to each QLDB journal stream.

    " + } + } + }, + "CreateLedgerRequest":{ + "type":"structure", + "required":[ + "Name", + "PermissionsMode" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger that you want to create. The name must be unique among all of your ledgers in the current AWS Region.

    Naming constraints for ledger names are defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The key-value pairs to add as tags to the ledger that you want to create. Tag keys are case sensitive. Tag values are case sensitive and can be null.

    " + }, + "PermissionsMode":{ + "shape":"PermissionsMode", + "documentation":"

    The permissions mode to assign to the ledger that you want to create.

    " + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

    The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

    If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

    " + } + } + }, + "CreateLedgerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the ledger.

    " + }, + "State":{ + "shape":"LedgerState", + "documentation":"

    The current status of the ledger.

    " + }, + "CreationDateTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

    " + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

    The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

    If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

    " + } + } + }, + "DeleteLedgerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger that you want to delete.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeletionProtection":{"type":"boolean"}, + "DescribeJournalKinesisStreamRequest":{ + "type":"structure", + "required":[ + "LedgerName", + "StreamId" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "StreamId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID that QLDB assigns to each QLDB journal stream.

    ", + "location":"uri", + "locationName":"streamId" + } + } + }, + "DescribeJournalKinesisStreamResponse":{ + "type":"structure", + "members":{ + "Stream":{ + "shape":"JournalKinesisStreamDescription", + "documentation":"

    Information about the QLDB journal stream returned by a DescribeJournalS3Export request.

    " + } + } + }, + "DescribeJournalS3ExportRequest":{ + "type":"structure", + "required":[ + "Name", + "ExportId" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "ExportId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID of the journal export job that you want to describe.

    ", + "location":"uri", + "locationName":"exportId" + } + } + }, + "DescribeJournalS3ExportResponse":{ + "type":"structure", + "required":["ExportDescription"], + "members":{ + "ExportDescription":{ + "shape":"JournalS3ExportDescription", + "documentation":"

    Information about the journal export job returned by a DescribeJournalS3Export request.

    " + } + } + }, + "DescribeLedgerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger that you want to describe.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeLedgerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the ledger.

    " + }, + "State":{ + "shape":"LedgerState", + "documentation":"

    The current status of the ledger.

    " + }, + "CreationDateTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

    " + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

    The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

    If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

    " + } + } + }, + "Digest":{ + "type":"blob", + "max":32, + "min":32 + }, + "ErrorCause":{ + "type":"string", + "enum":[ + "KINESIS_STREAM_NOT_FOUND", + "IAM_PERMISSION_REVOKED" + ] + }, + "ErrorMessage":{"type":"string"}, + "ExportJournalToS3Request":{ + "type":"structure", + "required":[ + "Name", + "InclusiveStartTime", + "ExclusiveEndTime", + "S3ExportConfiguration", + "RoleArn" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "InclusiveStartTime":{ + "shape":"Timestamp", + "documentation":"

    The inclusive start date and time for the range of journal contents that you want to export.

    The InclusiveStartTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

    The InclusiveStartTime must be before ExclusiveEndTime.

    If you provide an InclusiveStartTime that is before the ledger's CreationDateTime, Amazon QLDB defaults it to the ledger's CreationDateTime.

    " + }, + "ExclusiveEndTime":{ + "shape":"Timestamp", + "documentation":"

    The exclusive end date and time for the range of journal contents that you want to export.

    The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

    The ExclusiveEndTime must be less than or equal to the current UTC date and time.

    " + }, + "S3ExportConfiguration":{ + "shape":"S3ExportConfiguration", + "documentation":"

    The configuration settings of the Amazon S3 bucket destination for your export request.

    " + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

    • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

    • (Optional) Use your customer master key (CMK) in AWS Key Management Service (AWS KMS) for server-side encryption of your exported data.

    " + } + } + }, + "ExportJournalToS3Response":{ + "type":"structure", + "required":["ExportId"], + "members":{ + "ExportId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID that QLDB assigns to each journal export job.

    To describe your export request and check the status of the job, you can use ExportId to call DescribeJournalS3Export.

    " + } + } + }, + "ExportStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETED", + "CANCELLED" + ] + }, + "GetBlockRequest":{ + "type":"structure", + "required":[ + "Name", + "BlockAddress" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "BlockAddress":{ + "shape":"ValueHolder", + "documentation":"

    The location of the block that you want to request. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

    For example: {strandId:\"BlFTjlSXze9BIh1KOszcE3\",sequenceNo:14}

    " + }, + "DigestTipAddress":{ + "shape":"ValueHolder", + "documentation":"

    The latest block location covered by the digest for which to request a proof. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

    For example: {strandId:\"BlFTjlSXze9BIh1KOszcE3\",sequenceNo:49}

    " + } + } + }, + "GetBlockResponse":{ + "type":"structure", + "required":["Block"], + "members":{ + "Block":{ + "shape":"ValueHolder", + "documentation":"

    The block data object in Amazon Ion format.

    " + }, + "Proof":{ + "shape":"ValueHolder", + "documentation":"

    The proof object in Amazon Ion format returned by a GetBlock request. A proof contains the list of hash values required to recalculate the specified digest using a Merkle tree, starting with the specified block.

    " + } + } + }, + "GetDigestRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetDigestResponse":{ + "type":"structure", + "required":[ + "Digest", + "DigestTipAddress" + ], + "members":{ + "Digest":{ + "shape":"Digest", + "documentation":"

    The 256-bit hash value representing the digest returned by a GetDigest request.

    " + }, + "DigestTipAddress":{ + "shape":"ValueHolder", + "documentation":"

    The latest block location covered by the digest that you requested. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

    " + } + } + }, + "GetRevisionRequest":{ + "type":"structure", + "required":[ + "Name", + "BlockAddress", + "DocumentId" + ], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "BlockAddress":{ + "shape":"ValueHolder", + "documentation":"

    The block location of the document revision to be verified. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

    For example: {strandId:\"BlFTjlSXze9BIh1KOszcE3\",sequenceNo:14}

    " + }, + "DocumentId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID of the document to be verified.

    " + }, + "DigestTipAddress":{ + "shape":"ValueHolder", + "documentation":"

    The latest block location covered by the digest for which to request a proof. An address is an Amazon Ion structure that has two fields: strandId and sequenceNo.

    For example: {strandId:\"BlFTjlSXze9BIh1KOszcE3\",sequenceNo:49}

    " + } + } + }, + "GetRevisionResponse":{ + "type":"structure", + "required":["Revision"], + "members":{ + "Proof":{ + "shape":"ValueHolder", + "documentation":"

    The proof object in Amazon Ion format returned by a GetRevision request. A proof contains the list of hash values that are required to recalculate the specified digest using a Merkle tree, starting with the specified document revision.

    " + }, + "Revision":{ + "shape":"ValueHolder", + "documentation":"

    The document revision data object in Amazon Ion format.

    " + } + } + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ParameterName":{ + "shape":"ParameterName", + "documentation":"

    The name of the invalid parameter.

    " + } + }, + "documentation":"

    One or more parameters in the request aren't valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "IonText":{ + "type":"string", + "max":1048576, + "min":1, + "sensitive":true + }, + "JournalKinesisStreamDescription":{ + "type":"structure", + "required":[ + "LedgerName", + "RoleArn", + "StreamId", + "Status", + "KinesisConfiguration", + "StreamName" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time, in epoch time format, when the QLDB journal stream was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

    " + }, + "InclusiveStartTime":{ + "shape":"Timestamp", + "documentation":"

    The inclusive start date and time from which to start streaming journal data.

    " + }, + "ExclusiveEndTime":{ + "shape":"Timestamp", + "documentation":"

    The exclusive date and time that specifies when the stream ends. If this parameter is blank, the stream runs indefinitely until you cancel it.

    " + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource.

    " + }, + "StreamId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID that QLDB assigns to each QLDB journal stream.

    " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the QLDB journal stream.

    " + }, + "Status":{ + "shape":"StreamStatus", + "documentation":"

    The current state of the QLDB journal stream.

    " + }, + "KinesisConfiguration":{ + "shape":"KinesisConfiguration", + "documentation":"

    The configuration settings of the Amazon Kinesis Data Streams destination for your QLDB journal stream.

    " + }, + "ErrorCause":{ + "shape":"ErrorCause", + "documentation":"

    The error message that describes the reason that a stream has a status of IMPAIRED or FAILED. This is not applicable to streams that have other status values.

    " + }, + "StreamName":{ + "shape":"StreamName", + "documentation":"

    The user-defined name of the QLDB journal stream.

    " + } + }, + "documentation":"

    The information about an Amazon QLDB journal stream, including the Amazon Resource Name (ARN), stream name, creation time, current status, and the parameters of your original stream creation request.

    " + }, + "JournalKinesisStreamDescriptionList":{ + "type":"list", + "member":{"shape":"JournalKinesisStreamDescription"} + }, + "JournalS3ExportDescription":{ + "type":"structure", + "required":[ + "LedgerName", + "ExportId", + "ExportCreationTime", + "Status", + "InclusiveStartTime", + "ExclusiveEndTime", + "S3ExportConfiguration", + "RoleArn" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    " + }, + "ExportId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID of the journal export job.

    " + }, + "ExportCreationTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time, in epoch time format, when the export job was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

    " + }, + "Status":{ + "shape":"ExportStatus", + "documentation":"

    The current state of the journal export job.

    " + }, + "InclusiveStartTime":{ + "shape":"Timestamp", + "documentation":"

    The inclusive start date and time for the range of journal contents that are specified in the original export request.

    " + }, + "ExclusiveEndTime":{ + "shape":"Timestamp", + "documentation":"

    The exclusive end date and time for the range of journal contents that are specified in the original export request.

    " + }, + "S3ExportConfiguration":{"shape":"S3ExportConfiguration"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

    • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

    • (Optional) Use your customer master key (CMK) in AWS Key Management Service (AWS KMS) for server-side encryption of your exported data.

    " + } + }, + "documentation":"

    The information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

    " + }, + "JournalS3ExportList":{ + "type":"list", + "member":{"shape":"JournalS3ExportDescription"} + }, + "KinesisConfiguration":{ + "type":"structure", + "required":["StreamArn"], + "members":{ + "StreamArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the Kinesis data stream resource.

    " + }, + "AggregationEnabled":{ + "shape":"Boolean", + "documentation":"

    Enables QLDB to publish multiple data records in a single Kinesis Data Streams record. To learn more, see KPL Key Concepts in the Amazon Kinesis Data Streams Developer Guide.

    " + } + }, + "documentation":"

    The configuration settings of the Amazon Kinesis Data Streams destination for your Amazon QLDB journal stream.

    " + }, + "LedgerList":{ + "type":"list", + "member":{"shape":"LedgerSummary"} + }, + "LedgerName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"(?!^.*--)(?!^[0-9]+$)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" + }, + "LedgerState":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "DELETED" + ] + }, + "LedgerSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    " + }, + "State":{ + "shape":"LedgerState", + "documentation":"

    The current status of the ledger.

    " + }, + "CreationDateTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

    " + } + }, + "documentation":"

    Information about a ledger, including its name, state, and when it was created.

    " + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource.

    " + } + }, + "documentation":"

    You have reached the limit on the maximum number of resources allowed.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListJournalKinesisStreamsForLedgerRequest":{ + "type":"structure", + "required":["LedgerName"], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in a single ListJournalKinesisStreamsForLedger request. (The actual number of results returned might be fewer.)

    ", + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListJournalKinesisStreamsForLedger call, you should use that value as input here.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListJournalKinesisStreamsForLedgerResponse":{ + "type":"structure", + "members":{ + "Streams":{ + "shape":"JournalKinesisStreamDescriptionList", + "documentation":"

    The array of QLDB journal stream descriptors that are associated with the given ledger.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"
    • If NextToken is empty, the last page of results has been processed and there are no more results to be retrieved.

    • If NextToken is not empty, more results are available. To retrieve the next page of results, use the value of NextToken in a subsequent ListJournalKinesisStreamsForLedger call.

    " + } + } + }, + "ListJournalS3ExportsForLedgerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in a single ListJournalS3ExportsForLedger request. (The actual number of results returned might be fewer.)

    ", + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListJournalS3ExportsForLedger call, then you should use that value as input here.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListJournalS3ExportsForLedgerResponse":{ + "type":"structure", + "members":{ + "JournalS3Exports":{ + "shape":"JournalS3ExportList", + "documentation":"

    The array of journal export job descriptions that are associated with the specified ledger.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"
    • If NextToken is empty, then the last page of results has been processed and there are no more results to be retrieved.

    • If NextToken is not empty, then there are more results available. To retrieve the next page of results, use the value of NextToken in a subsequent ListJournalS3ExportsForLedger call.

    " + } + } + }, + "ListJournalS3ExportsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in a single ListJournalS3Exports request. (The actual number of results returned might be fewer.)

    ", + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListJournalS3Exports call, then you should use that value as input here.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListJournalS3ExportsResponse":{ + "type":"structure", + "members":{ + "JournalS3Exports":{ + "shape":"JournalS3ExportList", + "documentation":"

    The array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"
    • If NextToken is empty, then the last page of results has been processed and there are no more results to be retrieved.

    • If NextToken is not empty, then there are more results available. To retrieve the next page of results, use the value of NextToken in a subsequent ListJournalS3Exports call.

    " + } + } + }, + "ListLedgersRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in a single ListLedgers request. (The actual number of results returned might be fewer.)

    ", + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListLedgers call, then you should use that value as input here.

    ", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListLedgersResponse":{ + "type":"structure", + "members":{ + "Ledgers":{ + "shape":"LedgerList", + "documentation":"

    The array of ledger summaries that are associated with the current AWS account and Region.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token, indicating whether there are more results available:

    • If NextToken is empty, then the last page of results has been processed and there are no more results to be retrieved.

    • If NextToken is not empty, then there are more results available. To retrieve the next page of results, use the value of NextToken in a subsequent ListLedgers call.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for which you want to list the tags. For example:

    arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

    The tags that are currently associated with the specified Amazon QLDB resource.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":4, + "pattern":"^[A-Za-z-0-9+/=]+$" + }, + "ParameterName":{"type":"string"}, + "PermissionsMode":{ + "type":"string", + "enum":["ALLOW_ALL"] + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource.

    " + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

    The name of the resource.

    " + } + }, + "documentation":"

    The specified resource already exists.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource.

    " + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

    The name of the resource.

    " + } + }, + "documentation":"

    The specified resource can't be modified at this time.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceName":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource.

    " + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

    The name of the resource.

    " + } + }, + "documentation":"

    The specified resource doesn't exist.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourcePreconditionNotMetException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource.

    " + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

    The name of the resource.

    " + } + }, + "documentation":"

    The operation failed because a condition wasn't satisfied in advance.

    ", + "error":{"httpStatusCode":412}, + "exception":true + }, + "ResourceType":{"type":"string"}, + "S3Bucket":{ + "type":"string", + "max":255, + "min":3, + "pattern":"^[A-Za-z-0-9-_.]+$" + }, + "S3EncryptionConfiguration":{ + "type":"structure", + "required":["ObjectEncryptionType"], + "members":{ + "ObjectEncryptionType":{ + "shape":"S3ObjectEncryptionType", + "documentation":"

    The Amazon S3 object encryption type.

    To learn more about server-side encryption options in Amazon S3, see Protecting Data Using Server-Side Encryption in the Amazon S3 Developer Guide.

    " + }, + "KmsKeyArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for a symmetric customer master key (CMK) in AWS Key Management Service (AWS KMS). Amazon QLDB does not support asymmetric CMKs.

    You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType.

    KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType.

    " + } + }, + "documentation":"

    The encryption settings that are used by a journal export job to write data in an Amazon Simple Storage Service (Amazon S3) bucket.

    " + }, + "S3ExportConfiguration":{ + "type":"structure", + "required":[ + "Bucket", + "Prefix", + "EncryptionConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

    The Amazon S3 bucket name in which a journal export job writes the journal contents.

    The bucket name must comply with the Amazon S3 bucket naming conventions. For more information, see Bucket Restrictions and Limitations in the Amazon S3 Developer Guide.

    " + }, + "Prefix":{ + "shape":"S3Prefix", + "documentation":"

    The prefix for the Amazon S3 bucket in which a journal export job writes the journal contents.

    The prefix must comply with Amazon S3 key naming rules and restrictions. For more information, see Object Key and Metadata in the Amazon S3 Developer Guide.

    The following are examples of valid Prefix values:

    • JournalExports-ForMyLedger/Testing/

    • JournalExports

    • My:Tests/

    " + }, + "EncryptionConfiguration":{ + "shape":"S3EncryptionConfiguration", + "documentation":"

    The encryption settings that are used by a journal export job to write data in an Amazon S3 bucket.

    " + } + }, + "documentation":"

    The Amazon Simple Storage Service (Amazon S3) bucket location in which a journal export job writes the journal contents.

    " + }, + "S3ObjectEncryptionType":{ + "type":"string", + "enum":[ + "SSE_KMS", + "SSE_S3", + "NO_ENCRYPTION" + ] + }, + "S3Prefix":{ + "type":"string", + "max":128, + "min":0 + }, + "StreamJournalToKinesisRequest":{ + "type":"structure", + "required":[ + "LedgerName", + "RoleArn", + "InclusiveStartTime", + "KinesisConfiguration", + "StreamName" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The key-value pairs to add as tags to the stream that you want to create. Tag keys are case sensitive. Tag values are case sensitive and can be null.

    " + }, + "InclusiveStartTime":{ + "shape":"Timestamp", + "documentation":"

    The inclusive start date and time from which to start streaming journal data. This parameter must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

    The InclusiveStartTime cannot be in the future and must be before ExclusiveEndTime.

    If you provide an InclusiveStartTime that is before the ledger's CreationDateTime, QLDB effectively defaults it to the ledger's CreationDateTime.

    " + }, + "ExclusiveEndTime":{ + "shape":"Timestamp", + "documentation":"

    The exclusive date and time that specifies when the stream ends. If you don't define this parameter, the stream runs indefinitely until you cancel it.

    The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

    " + }, + "KinesisConfiguration":{ + "shape":"KinesisConfiguration", + "documentation":"

    The configuration settings of the Kinesis Data Streams destination for your stream request.

    " + }, + "StreamName":{ + "shape":"StreamName", + "documentation":"

    The name that you want to assign to the QLDB journal stream. User-defined names can help identify and indicate the purpose of a stream.

    Your stream name must be unique among other active streams for a given ledger. Stream names have the same naming constraints as ledger names, as defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

    " + } + } + }, + "StreamJournalToKinesisResponse":{ + "type":"structure", + "members":{ + "StreamId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID that QLDB assigns to each QLDB journal stream.

    " + } + } + }, + "StreamName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"(?!^.*--)(?!^[0-9]+$)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" + }, + "StreamStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "COMPLETED", + "CANCELED", + "FAILED", + "IMPAIRED" + ] + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) to which you want to add the tags. For example:

    arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    The key-value pairs to add as tags to the specified QLDB resource. Tag keys are case sensitive. If you specify a key that already exists for the resource, your request fails and returns an error. Tag values are case sensitive and can be null.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "Timestamp":{"type":"timestamp"}, + "UniqueId":{ + "type":"string", + "max":22, + "min":22, + "pattern":"^[A-Za-z-0-9]+$" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) from which you want to remove the tags. For example:

    arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The list of tag keys that you want to remove.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLedgerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    ", + "location":"uri", + "locationName":"name" + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

    The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

    If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

    " + } + } + }, + "UpdateLedgerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger.

    " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the ledger.

    " + }, + "State":{ + "shape":"LedgerState", + "documentation":"

    The current status of the ledger.

    " + }, + "CreationDateTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time, in epoch time format, when the ledger was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

    " + }, + "DeletionProtection":{ + "shape":"DeletionProtection", + "documentation":"

    The flag that prevents a ledger from being deleted by any user. If not provided on ledger creation, this feature is enabled (true) by default.

    If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

    " + } + } + }, + "ValueHolder":{ + "type":"structure", + "members":{ + "IonText":{ + "shape":"IonText", + "documentation":"

    An Amazon Ion plaintext value contained in a ValueHolder structure.

    " + } + }, + "documentation":"

    A structure that can contain a value in multiple encoding formats.

    ", + "sensitive":true + } + }, + "documentation":"

    The control plane for Amazon QLDB

    " +} diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml new file mode 100644 index 000000000000..641eb0532475 --- /dev/null +++ b/services/qldbsession/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + qldbsession + AWS Java SDK :: Services :: QLDB Session + The AWS Java SDK for QLDB Session module holds the client classes that are used for + communicating with QLDB Session. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.qldbsession + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/qldbsession/src/main/resources/codegen-resources/paginators-1.json b/services/qldbsession/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/qldbsession/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/qldbsession/src/main/resources/codegen-resources/service-2.json b/services/qldbsession/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..20e4ce45fb09 --- /dev/null +++ b/services/qldbsession/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,448 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-07-11", + "endpointPrefix":"session.qldb", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"QLDB Session", + "serviceFullName":"Amazon QLDB Session", + "serviceId":"QLDB Session", + "signatureVersion":"v4", + "signingName":"qldb", + "targetPrefix":"QLDBSession", + "uid":"qldb-session-2019-07-11" + }, + "operations":{ + "SendCommand":{ + "name":"SendCommand", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendCommandRequest"}, + "output":{"shape":"SendCommandResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidSessionException"}, + {"shape":"OccConflictException"}, + {"shape":"RateExceededException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Sends a command to an Amazon QLDB ledger.

    Instead of interacting directly with this API, we recommend using the QLDB driver or the QLDB shell to execute data transactions on a ledger.

    • If you are working with an AWS SDK, use the QLDB driver. The driver provides a high-level abstraction layer above this QLDB Session data plane and manages SendCommand API calls for you. For information and a list of supported programming languages, see Getting started with the driver in the Amazon QLDB Developer Guide.

    • If you are working with the AWS Command Line Interface (AWS CLI), use the QLDB shell. The shell is a command line interface that uses the QLDB driver to interact with a ledger. For information, see Accessing Amazon QLDB using the QLDB shell.

    " + } + }, + "shapes":{ + "AbortTransactionRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"

    Contains the details of the transaction to abort.

    " + }, + "AbortTransactionResult":{ + "type":"structure", + "members":{ + "TimingInformation":{ + "shape":"TimingInformation", + "documentation":"

    Contains server-side performance information for the command.

    " + } + }, + "documentation":"

    Contains the details of the aborted transaction.

    " + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Code":{"shape":"ErrorCode"} + }, + "documentation":"

    Returned if the request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

    ", + "exception":true + }, + "CommitDigest":{"type":"blob"}, + "CommitTransactionRequest":{ + "type":"structure", + "required":[ + "TransactionId", + "CommitDigest" + ], + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

    Specifies the transaction ID of the transaction to commit.

    " + }, + "CommitDigest":{ + "shape":"CommitDigest", + "documentation":"

    Specifies the commit digest for the transaction to commit. For every active transaction, the commit digest must be passed. QLDB validates CommitDigest and rejects the commit with an error if the digest computed on the client does not match the digest computed by QLDB.

    The purpose of the CommitDigest parameter is to ensure that QLDB commits a transaction if and only if the server has processed the exact set of statements sent by the client, in the same order that client sent them, and with no duplicates.

    " + } + }, + "documentation":"

    Contains the details of the transaction to commit.

    " + }, + "CommitTransactionResult":{ + "type":"structure", + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

    The transaction ID of the committed transaction.

    " + }, + "CommitDigest":{ + "shape":"CommitDigest", + "documentation":"

    The commit digest of the committed transaction.

    " + }, + "TimingInformation":{ + "shape":"TimingInformation", + "documentation":"

    Contains server-side performance information for the command.

    " + }, + "ConsumedIOs":{ + "shape":"IOUsage", + "documentation":"

    Contains metrics about the number of I/O requests that were consumed.

    " + } + }, + "documentation":"

    Contains the details of the committed transaction.

    " + }, + "EndSessionRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"

    Specifies a request to end the session.

    " + }, + "EndSessionResult":{ + "type":"structure", + "members":{ + "TimingInformation":{ + "shape":"TimingInformation", + "documentation":"

    Contains server-side performance information for the command.

    " + } + }, + "documentation":"

    Contains the details of the ended session.

    " + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "ExecuteStatementRequest":{ + "type":"structure", + "required":[ + "TransactionId", + "Statement" + ], + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

    Specifies the transaction ID of the request.

    " + }, + "Statement":{ + "shape":"Statement", + "documentation":"

    Specifies the statement of the request.

    " + }, + "Parameters":{ + "shape":"StatementParameters", + "documentation":"

    Specifies the parameters for the parameterized statement in the request.

    " + } + }, + "documentation":"

    Specifies a request to execute a statement.

    " + }, + "ExecuteStatementResult":{ + "type":"structure", + "members":{ + "FirstPage":{ + "shape":"Page", + "documentation":"

    Contains the details of the first fetched page.

    " + }, + "TimingInformation":{ + "shape":"TimingInformation", + "documentation":"

    Contains server-side performance information for the command.

    " + }, + "ConsumedIOs":{ + "shape":"IOUsage", + "documentation":"

    Contains metrics about the number of I/O requests that were consumed.

    " + } + }, + "documentation":"

    Contains the details of the executed statement.

    " + }, + "FetchPageRequest":{ + "type":"structure", + "required":[ + "TransactionId", + "NextPageToken" + ], + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

    Specifies the transaction ID of the page to be fetched.

    " + }, + "NextPageToken":{ + "shape":"PageToken", + "documentation":"

    Specifies the next page token of the page to be fetched.

    " + } + }, + "documentation":"

    Specifies the details of the page to be fetched.

    " + }, + "FetchPageResult":{ + "type":"structure", + "members":{ + "Page":{ + "shape":"Page", + "documentation":"

    Contains details of the fetched page.

    " + }, + "TimingInformation":{ + "shape":"TimingInformation", + "documentation":"

    Contains server-side performance information for the command.

    " + }, + "ConsumedIOs":{ + "shape":"IOUsage", + "documentation":"

    Contains metrics about the number of I/O requests that were consumed.

    " + } + }, + "documentation":"

    Contains the page that was fetched.

    " + }, + "IOUsage":{ + "type":"structure", + "members":{ + "ReadIOs":{ + "shape":"ReadIOs", + "documentation":"

    The number of read I/O requests that the command performed.

    " + }, + "WriteIOs":{ + "shape":"WriteIOs", + "documentation":"

    The number of write I/O requests that the command performed.

    " + } + }, + "documentation":"

    Contains I/O usage metrics for a command that was invoked.

    " + }, + "InvalidSessionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Code":{"shape":"ErrorCode"} + }, + "documentation":"

    Returned if the session doesn't exist anymore because it timed out or expired.

    ", + "exception":true + }, + "IonBinary":{ + "type":"blob", + "max":131072, + "min":1 + }, + "IonText":{ + "type":"string", + "max":1048576, + "min":1 + }, + "LedgerName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"(?!^.*--)(?!^[0-9]+$)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Returned if a resource limit such as number of active sessions is exceeded.

    ", + "exception":true + }, + "OccConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Returned when a transaction cannot be written to the journal due to a failure in the verification phase of optimistic concurrency control (OCC).

    ", + "exception":true + }, + "Page":{ + "type":"structure", + "members":{ + "Values":{ + "shape":"ValueHolders", + "documentation":"

    A structure that contains values in multiple encoding formats.

    " + }, + "NextPageToken":{ + "shape":"PageToken", + "documentation":"

    The token of the next page.

    " + } + }, + "documentation":"

    Contains details of the fetched page.

    " + }, + "PageToken":{ + "type":"string", + "max":1024, + "min":4, + "pattern":"^[A-Za-z-0-9+/=]+$" + }, + "ProcessingTimeMilliseconds":{"type":"long"}, + "RateExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Returned when the rate of requests exceeds the allowed throughput.

    ", + "exception":true + }, + "ReadIOs":{"type":"long"}, + "SendCommandRequest":{ + "type":"structure", + "members":{ + "SessionToken":{ + "shape":"SessionToken", + "documentation":"

    Specifies the session token for the current command. A session token is constant throughout the life of the session.

    To obtain a session token, run the StartSession command. This SessionToken is required for every subsequent command that is issued during the current session.

    " + }, + "StartSession":{ + "shape":"StartSessionRequest", + "documentation":"

    Command to start a new session. A session token is obtained as part of the response.

    " + }, + "StartTransaction":{ + "shape":"StartTransactionRequest", + "documentation":"

    Command to start a new transaction.

    " + }, + "EndSession":{ + "shape":"EndSessionRequest", + "documentation":"

    Command to end the current session.

    " + }, + "CommitTransaction":{ + "shape":"CommitTransactionRequest", + "documentation":"

    Command to commit the specified transaction.

    " + }, + "AbortTransaction":{ + "shape":"AbortTransactionRequest", + "documentation":"

    Command to abort the current transaction.

    " + }, + "ExecuteStatement":{ + "shape":"ExecuteStatementRequest", + "documentation":"

    Command to execute a statement in the specified transaction.

    " + }, + "FetchPage":{ + "shape":"FetchPageRequest", + "documentation":"

    Command to fetch a page.

    " + } + } + }, + "SendCommandResult":{ + "type":"structure", + "members":{ + "StartSession":{ + "shape":"StartSessionResult", + "documentation":"

    Contains the details of the started session that includes a session token. This SessionToken is required for every subsequent command that is issued during the current session.

    " + }, + "StartTransaction":{ + "shape":"StartTransactionResult", + "documentation":"

    Contains the details of the started transaction.

    " + }, + "EndSession":{ + "shape":"EndSessionResult", + "documentation":"

    Contains the details of the ended session.

    " + }, + "CommitTransaction":{ + "shape":"CommitTransactionResult", + "documentation":"

    Contains the details of the committed transaction.

    " + }, + "AbortTransaction":{ + "shape":"AbortTransactionResult", + "documentation":"

    Contains the details of the aborted transaction.

    " + }, + "ExecuteStatement":{ + "shape":"ExecuteStatementResult", + "documentation":"

    Contains the details of the executed statement.

    " + }, + "FetchPage":{ + "shape":"FetchPageResult", + "documentation":"

    Contains the details of the fetched page.

    " + } + } + }, + "SessionToken":{ + "type":"string", + "max":1024, + "min":4, + "pattern":"^[A-Za-z-0-9+/=]+$" + }, + "StartSessionRequest":{ + "type":"structure", + "required":["LedgerName"], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

    The name of the ledger to start a new session against.

    " + } + }, + "documentation":"

    Specifies a request to start a new session.

    " + }, + "StartSessionResult":{ + "type":"structure", + "members":{ + "SessionToken":{ + "shape":"SessionToken", + "documentation":"

    Session token of the started session. This SessionToken is required for every subsequent command that is issued during the current session.

    " + }, + "TimingInformation":{ + "shape":"TimingInformation", + "documentation":"

    Contains server-side performance information for the command.

    " + } + }, + "documentation":"

    Contains the details of the started session.

    " + }, + "StartTransactionRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"

    Specifies a request to start a transaction.

    " + }, + "StartTransactionResult":{ + "type":"structure", + "members":{ + "TransactionId":{ + "shape":"TransactionId", + "documentation":"

    The transaction ID of the started transaction.

    " + }, + "TimingInformation":{ + "shape":"TimingInformation", + "documentation":"

    Contains server-side performance information for the command.

    " + } + }, + "documentation":"

    Contains the details of the started transaction.

    " + }, + "Statement":{ + "type":"string", + "max":100000, + "min":1 + }, + "StatementParameters":{ + "type":"list", + "member":{"shape":"ValueHolder"} + }, + "TimingInformation":{ + "type":"structure", + "members":{ + "ProcessingTimeMilliseconds":{ + "shape":"ProcessingTimeMilliseconds", + "documentation":"

    The amount of time that was taken for the command to finish processing, measured in milliseconds.

    " + } + }, + "documentation":"

    Contains server-side performance information for a command. Amazon QLDB captures timing information between the times when it receives the request and when it sends the corresponding response.

    " + }, + "TransactionId":{ + "type":"string", + "max":22, + "min":22, + "pattern":"^[A-Za-z-0-9]+$" + }, + "ValueHolder":{ + "type":"structure", + "members":{ + "IonBinary":{ + "shape":"IonBinary", + "documentation":"

    An Amazon Ion binary value contained in a ValueHolder structure.

    " + }, + "IonText":{ + "shape":"IonText", + "documentation":"

    An Amazon Ion plaintext value contained in a ValueHolder structure.

    " + } + }, + "documentation":"

    A structure that can contain a value in multiple encoding formats.

    " + }, + "ValueHolders":{ + "type":"list", + "member":{"shape":"ValueHolder"} + }, + "WriteIOs":{"type":"long"} + }, + "documentation":"

    The transactional data APIs for Amazon QLDB

    Instead of interacting directly with this API, we recommend using the QLDB driver or the QLDB shell to execute data transactions on a ledger.

    • If you are working with an AWS SDK, use the QLDB driver. The driver provides a high-level abstraction layer above this QLDB Session data plane and manages SendCommand API calls for you. For information and a list of supported programming languages, see Getting started with the driver in the Amazon QLDB Developer Guide.

    • If you are working with the AWS Command Line Interface (AWS CLI), use the QLDB shell. The shell is a command line interface that uses the QLDB driver to interact with a ledger. For information, see Accessing Amazon QLDB using the QLDB shell.

    " +} diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index afb11dca3e8d..8de4606aa321 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + redshiftdata + AWS Java SDK :: Services :: Redshift Data + The AWS Java SDK for Redshift Data module holds the client classes that are used for + communicating with Redshift Data. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.redshiftdata + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/redshiftdata/src/main/resources/codegen-resources/paginators-1.json b/services/redshiftdata/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..b33a4ab753e3 --- /dev/null +++ b/services/redshiftdata/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,39 @@ +{ + "pagination": { + "DescribeTable": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ColumnList" + }, + "GetStatementResult": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Records" + }, + "ListDatabases": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Databases" + }, + "ListSchemas": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Schemas" + }, + "ListStatements": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Statements" + }, + "ListTables": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Tables" + } + } +} diff --git a/services/redshiftdata/src/main/resources/codegen-resources/service-2.json b/services/redshiftdata/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..c782792f1b52 --- /dev/null +++ b/services/redshiftdata/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,867 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-20", + "endpointPrefix":"redshift-data", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Redshift Data API Service", + "serviceId":"Redshift Data", + "signatureVersion":"v4", + "signingName":"redshift-data", + "targetPrefix":"RedshiftData", + "uid":"redshift-data-2019-12-20" + }, + "operations":{ + "CancelStatement":{ + "name":"CancelStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelStatementRequest"}, + "output":{"shape":"CancelStatementResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Cancels a running query. To be canceled, a query must be running.

    " + }, + "DescribeStatement":{ + "name":"DescribeStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStatementRequest"}, + "output":{"shape":"DescribeStatementResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes the details about a specific instance when a query was run by the Amazon Redshift Data API. The information includes when the query started, when it finished, the query status, the number of rows returned, and the SQL statement.

    " + }, + "DescribeTable":{ + "name":"DescribeTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTableRequest"}, + "output":{"shape":"DescribeTableResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:

    • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

    • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

    " + }, + "ExecuteStatement":{ + "name":"ExecuteStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecuteStatementInput"}, + "output":{"shape":"ExecuteStatementOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ExecuteStatementException"} + ], + "documentation":"

    Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:

    • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

    • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

    " + }, + "GetStatementResult":{ + "name":"GetStatementResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetStatementResultRequest"}, + "output":{"shape":"GetStatementResultResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement results.

    " + }, + "ListDatabases":{ + "name":"ListDatabases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatabasesRequest"}, + "output":{"shape":"ListDatabasesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:

    • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

    • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

    " + }, + "ListSchemas":{ + "name":"ListSchemas", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSchemasRequest"}, + "output":{"shape":"ListSchemasResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:

    • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

    • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

    " + }, + "ListStatements":{ + "name":"ListStatements", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStatementsRequest"}, + "output":{"shape":"ListStatementsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    List of SQL statements. By default, only finished statements are shown. A token is returned to page through the statement list.

    " + }, + "ListTables":{ + "name":"ListTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTablesRequest"}, + "output":{"shape":"ListTablesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:

    • AWS Secrets Manager - specify the Amazon Resource Name (ARN) of the secret and the cluster identifier that matches the cluster in the secret.

    • Temporary credentials - specify the cluster identifier, the database name, and the database user name. Permission to call the redshift:GetClusterCredentials operation is required to use this method.

    " + } + }, + "shapes":{ + "Blob":{"type":"blob"}, + "Boolean":{ + "type":"boolean", + "box":true + }, + "BoxedBoolean":{ + "type":"boolean", + "box":true + }, + "BoxedDouble":{ + "type":"double", + "box":true + }, + "BoxedLong":{ + "type":"long", + "box":true + }, + "CancelStatementRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"UUID", + "documentation":"

    The identifier of the SQL statement to cancel. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. This identifier is returned by ExecuteStatment and ListStatements.

    " + } + } + }, + "CancelStatementResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Boolean", + "documentation":"

    A value that indicates whether the cancel statement succeeded (true).

    " + } + } + }, + "ColumnList":{ + "type":"list", + "member":{"shape":"ColumnMetadata"} + }, + "ColumnMetadata":{ + "type":"structure", + "members":{ + "columnDefault":{ + "shape":"String", + "documentation":"

    The default value of the column.

    " + }, + "isCaseSensitive":{ + "shape":"bool", + "documentation":"

    A value that indicates whether the column is case-sensitive.

    " + }, + "isCurrency":{ + "shape":"bool", + "documentation":"

    A value that indicates whether the column contains currency values.

    " + }, + "isSigned":{ + "shape":"bool", + "documentation":"

    A value that indicates whether an integer column is signed.

    " + }, + "label":{ + "shape":"String", + "documentation":"

    The label for the column.

    " + }, + "length":{ + "shape":"Integer", + "documentation":"

    The length of the column.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    The name of the column.

    " + }, + "nullable":{ + "shape":"Integer", + "documentation":"

    A value that indicates whether the column is nullable.

    " + }, + "precision":{ + "shape":"Integer", + "documentation":"

    The precision value of a decimal number column.

    " + }, + "scale":{ + "shape":"Integer", + "documentation":"

    The scale value of a decimal number column.

    " + }, + "schemaName":{ + "shape":"String", + "documentation":"

    The name of the schema that contains the table that includes the column.

    " + }, + "tableName":{ + "shape":"String", + "documentation":"

    The name of the table that includes the column.

    " + }, + "typeName":{ + "shape":"String", + "documentation":"

    The database-specific data type of the column.

    " + } + }, + "documentation":"

    The properties (metadata) of a column.

    " + }, + "ColumnMetadataList":{ + "type":"list", + "member":{"shape":"ColumnMetadata"} + }, + "DatabaseList":{ + "type":"list", + "member":{"shape":"String"} + }, + "DescribeStatementRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"UUID", + "documentation":"

    The identifier of the SQL statement to describe. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. This identifier is returned by ExecuteStatment and ListStatements.

    " + } + } + }, + "DescribeStatementResponse":{ + "type":"structure", + "required":["Id"], + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

    The cluster identifier.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time (UTC) when the SQL statement was submitted to run.

    " + }, + "Database":{ + "shape":"String", + "documentation":"

    The name of the database.

    " + }, + "DbUser":{ + "shape":"String", + "documentation":"

    The database user name.

    " + }, + "Duration":{ + "shape":"Long", + "documentation":"

    The amount of time in nanoseconds that the statement ran.

    " + }, + "Error":{ + "shape":"String", + "documentation":"

    The error message from the cluster if the SQL statement encountered an error while running.

    " + }, + "Id":{ + "shape":"UUID", + "documentation":"

    The identifier of the SQL statement described. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

    " + }, + "QueryString":{ + "shape":"StatementString", + "documentation":"

    The SQL statement text.

    " + }, + "RedshiftPid":{ + "shape":"Long", + "documentation":"

    The process identifier from Amazon Redshift.

    " + }, + "RedshiftQueryId":{ + "shape":"Long", + "documentation":"

    The identifier of the query generated by Amazon Redshift. These identifiers are also available in the query column of the STL_QUERY system view.

    " + }, + "ResultRows":{ + "shape":"Long", + "documentation":"

    Either the number of rows returned from the SQL statement or the number of rows affected. If result size is greater than zero, the result rows can be the number of rows affected by SQL statements such as INSERT, UPDATE, DELETE, COPY, and others.

    " + }, + "ResultSize":{ + "shape":"Long", + "documentation":"

    The size in bytes of the returned results.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The name or Amazon Resource Name (ARN) of the secret that enables access to the database.

    " + }, + "Status":{ + "shape":"StatusString", + "documentation":"

    The status of the SQL statement being described. Status values are defined as follows:

    • ABORTED - The query run was stopped by the user.

    • ALL - A status value that includes all query statuses. This value can be used to filter results.

    • FAILED - The query run failed.

    • FINISHED - The query has finished running.

    • PICKED - The query has been chosen to be run.

    • STARTED - The query run has started.

    • SUBMITTED - The query was submitted, but not yet processed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time (UTC) that the metadata for the SQL statement was last updated. An example is the time the status last changed.

    " + } + } + }, + "DescribeTableRequest":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{ + "shape":"Location", + "documentation":"

    The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

    " + }, + "Database":{ + "shape":"String", + "documentation":"

    The name of the database. This parameter is required when authenticating using temporary credentials.

    " + }, + "DbUser":{ + "shape":"String", + "documentation":"

    The database user name. This parameter is required when authenticating using temporary credentials.

    " + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of tables to return in the response. If more tables exist than fit in one response, then NextToken is returned to page through the results.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "Schema":{ + "shape":"String", + "documentation":"

    The schema that contains the table. If no schema is specified, then matching tables for all schemas are returned.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

    " + }, + "Table":{ + "shape":"String", + "documentation":"

    The table name. If no table is specified, then all tables for all matching schemas are returned. If no table and no schema is specified, then all tables for all schemas in the database are returned

    " + } + } + }, + "DescribeTableResponse":{ + "type":"structure", + "members":{ + "ColumnList":{ + "shape":"ColumnList", + "documentation":"

    A list of columns in the table.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "TableName":{ + "shape":"String", + "documentation":"

    The table name.

    " + } + } + }, + "ExecuteStatementException":{ + "type":"structure", + "required":[ + "Message", + "StatementId" + ], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

    The exception message.

    " + }, + "StatementId":{ + "shape":"String", + "documentation":"

    Statement identifier of the exception.

    " + } + }, + "documentation":"

    The SQL statement encountered an environmental error while running.

    ", + "exception":true, + "fault":true + }, + "ExecuteStatementInput":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "Sql" + ], + "members":{ + "ClusterIdentifier":{ + "shape":"Location", + "documentation":"

    The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

    " + }, + "Database":{ + "shape":"String", + "documentation":"

    The name of the database. This parameter is required when authenticating using temporary credentials.

    " + }, + "DbUser":{ + "shape":"String", + "documentation":"

    The database user name. This parameter is required when authenticating using temporary credentials.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

    " + }, + "Sql":{ + "shape":"StatementString", + "documentation":"

    The SQL statement text to run.

    " + }, + "StatementName":{ + "shape":"StatementNameString", + "documentation":"

    The name of the SQL statement. You can name the SQL statement when you create it to identify the query.

    " + }, + "WithEvent":{ + "shape":"Boolean", + "documentation":"

    A value that indicates whether to send an event to the Amazon EventBridge event bus after the SQL statement runs.

    " + } + } + }, + "ExecuteStatementOutput":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{ + "shape":"Location", + "documentation":"

    The cluster identifier.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time (UTC) the statement was created.

    " + }, + "Database":{ + "shape":"String", + "documentation":"

    The name of the database.

    " + }, + "DbUser":{ + "shape":"String", + "documentation":"

    The database user name.

    " + }, + "Id":{ + "shape":"UUID", + "documentation":"

    The identifier of the statement to be run. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The name or ARN of the secret that enables access to the database.

    " + } + } + }, + "Field":{ + "type":"structure", + "members":{ + "blobValue":{ + "shape":"Blob", + "documentation":"

    A value of the BLOB data type.

    " + }, + "booleanValue":{ + "shape":"BoxedBoolean", + "documentation":"

    A value of the Boolean data type.

    " + }, + "doubleValue":{ + "shape":"BoxedDouble", + "documentation":"

    A value of the double data type.

    " + }, + "isNull":{ + "shape":"BoxedBoolean", + "documentation":"

    A value that indicates whether the data is NULL.

    " + }, + "longValue":{ + "shape":"BoxedLong", + "documentation":"

    A value of the long data type.

    " + }, + "stringValue":{ + "shape":"String", + "documentation":"

    A value of the string data type.

    " + } + }, + "documentation":"

    A data value in a column.

    " + }, + "FieldList":{ + "type":"list", + "member":{"shape":"Field"} + }, + "GetStatementResultRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"UUID", + "documentation":"

    The identifier of the SQL statement whose results are to be fetched. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. This identifier is returned by ExecuteStatment and ListStatements.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + } + } + }, + "GetStatementResultResponse":{ + "type":"structure", + "required":["Records"], + "members":{ + "ColumnMetadata":{ + "shape":"ColumnMetadataList", + "documentation":"

    The properties (metadata) of a column.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "Records":{ + "shape":"SqlRecords", + "documentation":"

    The results of the SQL statement.

    " + }, + "TotalNumRows":{ + "shape":"Long", + "documentation":"

    The total number of rows in the result set returned from a query. You can use this number to estimate the number of calls to the GetStatementResult operation needed to page through the results.

    " + } + } + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

    The exception message.

    " + } + }, + "documentation":"

    The Amazon Redshift Data API operation failed due to invalid input.

    ", + "exception":true, + "fault":true + }, + "ListDatabasesRequest":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{ + "shape":"Location", + "documentation":"

    The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

    " + }, + "Database":{ + "shape":"String", + "documentation":"

    The name of the database. This parameter is required when authenticating using temporary credentials.

    " + }, + "DbUser":{ + "shape":"String", + "documentation":"

    The database user name. This parameter is required when authenticating using temporary credentials.

    " + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of databases to return in the response. If more databases exist than fit in one response, then NextToken is returned to page through the results.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

    " + } + } + }, + "ListDatabasesResponse":{ + "type":"structure", + "members":{ + "Databases":{ + "shape":"DatabaseList", + "documentation":"

    The names of databases.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + } + } + }, + "ListSchemasRequest":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "Database" + ], + "members":{ + "ClusterIdentifier":{ + "shape":"Location", + "documentation":"

    The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

    " + }, + "Database":{ + "shape":"String", + "documentation":"

    The name of the database. This parameter is required when authenticating using temporary credentials.

    " + }, + "DbUser":{ + "shape":"String", + "documentation":"

    The database user name. This parameter is required when authenticating using temporary credentials.

    " + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of schemas to return in the response. If more schemas exist than fit in one response, then NextToken is returned to page through the results.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "SchemaPattern":{ + "shape":"String", + "documentation":"

    A pattern to filter results by schema name. Within a schema pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only schema name entries matching the search pattern are returned.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

    " + } + } + }, + "ListSchemasResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "Schemas":{ + "shape":"SchemaList", + "documentation":"

    The schemas that match the request pattern.

    " + } + } + }, + "ListStatementsLimit":{ + "type":"integer", + "max":100, + "min":0 + }, + "ListStatementsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListStatementsLimit", + "documentation":"

    The maximum number of SQL statements to return in the response. If more SQL statements exist than fit in one response, then NextToken is returned to page through the results.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "StatementName":{ + "shape":"StatementNameString", + "documentation":"

    The name of the SQL statement specified as input to ExecuteStatement to identify the query. You can list multiple statements by providing a prefix that matches the beginning of the statement name. For example, to list myStatement1, myStatement2, myStatement3, and so on, then provide the a value of myStatement. Data API does a case-sensitive match of SQL statement names to the prefix value you provide.

    " + }, + "Status":{ + "shape":"StatusString", + "documentation":"

    The status of the SQL statement to list. Status values are defined as follows:

    • ABORTED - The query run was stopped by the user.

    • ALL - A status value that includes all query statuses. This value can be used to filter results.

    • FAILED - The query run failed.

    • FINISHED - The query has finished running.

    • PICKED - The query has been chosen to be run.

    • STARTED - The query run has started.

    • SUBMITTED - The query was submitted, but not yet processed.

    " + } + } + }, + "ListStatementsResponse":{ + "type":"structure", + "required":["Statements"], + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "Statements":{ + "shape":"StatementList", + "documentation":"

    The SQL statements.

    " + } + } + }, + "ListTablesRequest":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "Database" + ], + "members":{ + "ClusterIdentifier":{ + "shape":"Location", + "documentation":"

    The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

    " + }, + "Database":{ + "shape":"String", + "documentation":"

    The name of the database. This parameter is required when authenticating using temporary credentials.

    " + }, + "DbUser":{ + "shape":"String", + "documentation":"

    The database user name. This parameter is required when authenticating using temporary credentials.

    " + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of tables to return in the response. If more tables exist than fit in one response, then NextToken is returned to page through the results.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "SchemaPattern":{ + "shape":"String", + "documentation":"

    A pattern to filter results by schema name. Within a schema pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only schema name entries matching the search pattern are returned. If SchemaPattern is not specified, then all tables that match TablePattern are returned. If neither SchemaPattern or TablePattern are specified, then all tables are returned.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using AWS Secrets Manager.

    " + }, + "TablePattern":{ + "shape":"String", + "documentation":"

    A pattern to filter results by table name. Within a table pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only table name entries matching the search pattern are returned. If TablePattern is not specified, then all tables that match SchemaPatternare returned. If neither SchemaPattern or TablePattern are specified, then all tables are returned.

    " + } + } + }, + "ListTablesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

    " + }, + "Tables":{ + "shape":"TableList", + "documentation":"

    The tables that match the request pattern.

    " + } + } + }, + "Location":{"type":"string"}, + "Long":{"type":"long"}, + "PageSize":{"type":"integer"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId" + ], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

    The exception message.

    " + }, + "ResourceId":{ + "shape":"String", + "documentation":"

    Resource identifier associated with the exception.

    " + } + }, + "documentation":"

    The Amazon Redshift Data API operation failed due to a missing resource.

    ", + "exception":true + }, + "SchemaList":{ + "type":"list", + "member":{"shape":"String"} + }, + "SecretArn":{"type":"string"}, + "SqlRecords":{ + "type":"list", + "member":{"shape":"FieldList"} + }, + "StatementData":{ + "type":"structure", + "required":["Id"], + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time (UTC) the statement was created.

    " + }, + "Id":{ + "shape":"UUID", + "documentation":"

    The SQL statement identifier. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

    " + }, + "QueryString":{ + "shape":"StatementString", + "documentation":"

    The SQL statement.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The name or Amazon Resource Name (ARN) of the secret that enables access to the database.

    " + }, + "StatementName":{ + "shape":"StatementNameString", + "documentation":"

    The name of the SQL statement.

    " + }, + "Status":{ + "shape":"StatusString", + "documentation":"

    The status of the SQL statement. An example is the that the SQL statement finished.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time (UTC) that the statement metadata was last updated.

    " + } + }, + "documentation":"

    The SQL statement to run.

    " + }, + "StatementList":{ + "type":"list", + "member":{"shape":"StatementData"} + }, + "StatementNameString":{ + "type":"string", + "max":500, + "min":0 + }, + "StatementString":{"type":"string"}, + "StatusString":{ + "type":"string", + "enum":[ + "ABORTED", + "ALL", + "FAILED", + "FINISHED", + "PICKED", + "STARTED", + "SUBMITTED" + ] + }, + "String":{"type":"string"}, + "TableList":{ + "type":"list", + "member":{"shape":"TableMember"} + }, + "TableMember":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of the table.

    " + }, + "schema":{ + "shape":"String", + "documentation":"

    The schema containing the table.

    " + }, + "type":{ + "shape":"String", + "documentation":"

    The type of the table. Possible values include TABLE, VIEW, SYSTEM TABLE, GLOBAL TEMPORARY, LOCAL TEMPORARY, ALIAS, and SYNONYM.

    " + } + }, + "documentation":"

    The properties of a table.

    " + }, + "Timestamp":{"type":"timestamp"}, + "UUID":{"type":"string"}, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"

    The exception message.

    " + } + }, + "documentation":"

    The Amazon Redshift Data API operation failed due to invalid input.

    ", + "exception":true + }, + "bool":{"type":"boolean"} + }, + "documentation":"

    You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run individual SQL statements, which are committed if the statement succeeds.

    " +} diff --git a/services/rekognition/build.properties b/services/rekognition/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/rekognition/build.properties +++ b/services/rekognition/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index d4eae482513b..93b685e2edb3 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -1,6 +1,6 @@ commons-io @@ -80,10 +90,21 @@ ${awsjavasdk.version} test + + kms + software.amazon.awssdk + ${awsjavasdk.version} + test + org.apache.commons commons-lang3 test + + com.github.tomakehurst + wiremock + test +
    diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AclIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AclIntegrationTest.java new file mode 100644 index 000000000000..c5bb38836cc8 --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AclIntegrationTest.java @@ -0,0 +1,84 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertNotNull; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.util.function.Consumer; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.model.AccessControlPolicy; +import software.amazon.awssdk.services.s3.model.GetBucketAclResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAclResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; + +public class AclIntegrationTest extends S3IntegrationTestBase { + + private static final String BUCKET = temporaryBucketName(AclIntegrationTest.class); + + private static final String KEY = "some-key"; + + @BeforeClass + public static void setupFixture() { + createBucket(BUCKET); + s3.putObject(PutObjectRequest.builder() + .bucket(BUCKET) + .key(KEY) + .build(), RequestBody.fromString("helloworld")); + } + + @AfterClass + public static void deleteAllBuckets() { + deleteBucketAndAllContents(BUCKET); + } + + @Test + public void putGetObjectAcl() { + GetObjectAclResponse objectAcl = s3.getObjectAcl(b -> b.bucket(BUCKET).key(KEY)); + GetObjectAclResponse objectAclAsyncResponse = s3Async.getObjectAcl(b -> b.bucket(BUCKET).key(KEY)).join(); + assertThat(objectAcl.equalsBySdkFields(objectAclAsyncResponse)).isTrue(); + Consumer aclBuilder = a -> a.owner(objectAcl.owner()) + .grants(objectAcl.grants()); + + + assertNotNull(s3.putObjectAcl(b -> b.bucket(BUCKET) + .key(KEY) + .accessControlPolicy(aclBuilder))); + + assertNotNull(s3Async.putObjectAcl(b -> b.bucket(BUCKET) + .key(KEY) + .accessControlPolicy(aclBuilder)).join()); + } + + @Test + public void putGetBucketAcl() { + GetBucketAclResponse bucketAcl = s3.getBucketAcl(b -> b.bucket(BUCKET)); + GetBucketAclResponse bucketAclAsyncResponse = s3Async.getBucketAcl(b -> b.bucket(BUCKET)).join(); + assertThat(bucketAcl.equalsBySdkFields(bucketAclAsyncResponse)).isTrue(); + Consumer aclBuilder = a -> a.owner(bucketAcl.owner()) + .grants(bucketAcl.grants()); + assertNotNull(s3.putBucketAcl(b -> b.bucket(BUCKET) + .accessControlPolicy(aclBuilder))); + assertNotNull(s3Async.putBucketAcl(b -> b.bucket(BUCKET) + .accessControlPolicy(aclBuilder)).join()); + + } +} diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncGetObjectFaultIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncGetObjectFaultIntegrationTest.java index 207dcedd5293..6b3972c2f190 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncGetObjectFaultIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncGetObjectFaultIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncResponseTransformerIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncResponseTransformerIntegrationTest.java index 58fde0a96955..945ac902af8e 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncResponseTransformerIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncResponseTransformerIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncServerSideEncryptionIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncServerSideEncryptionIntegrationTest.java index 43d59c593ad8..70241622f610 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncServerSideEncryptionIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncServerSideEncryptionIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -14,47 +14,21 @@ */ package software.amazon.awssdk.services.s3; -import static org.assertj.core.api.Fail.fail; import static software.amazon.awssdk.services.s3.model.ServerSideEncryption.AES256; -import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; -import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; -import java.io.IOException; -import java.security.SecureRandom; import java.util.Base64; import java.util.UUID; -import javax.crypto.KeyGenerator; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.junit.Test; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.ServerSideEncryption; -import software.amazon.awssdk.testutils.RandomTempFile; import software.amazon.awssdk.testutils.SdkAsserts; import software.amazon.awssdk.utils.Md5Utils; -public class AsyncServerSideEncryptionIntegrationTest extends S3IntegrationTestBase { - - private static final String BUCKET = temporaryBucketName(GetObjectIntegrationTest.class); - - private static File file; - - @BeforeClass - public static void setupFixture() throws IOException { - createBucket(BUCKET); - file = new RandomTempFile(10_000); - } - - @AfterClass - public static void tearDownFixture() { - deleteBucketAndAllContents(BUCKET); - file.delete(); - } - +public class AsyncServerSideEncryptionIntegrationTest extends ServerSideEncryptionIntegrationTestBase { @Test public void sse_AES256_succeeds() throws FileNotFoundException { String key = UUID.randomUUID().toString(); @@ -121,20 +95,27 @@ public void sse_customerManaged_succeeds() throws FileNotFoundException { verifyGetResponse(getObjectRequest); } + @Test + public void sse_onBucket_succeeds() throws FileNotFoundException { + String key = UUID.randomUUID().toString(); + + PutObjectRequest request = PutObjectRequest.builder() + .key(key) + .bucket(BUCKET_WITH_SSE) + .build(); + + s3Async.putObject(request, file.toPath()).join(); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder() + .key(key) + .bucket(BUCKET_WITH_SSE) + .build(); + + verifyGetResponse(getObjectRequest); + } + private void verifyGetResponse(GetObjectRequest getObjectRequest) throws FileNotFoundException { String response = s3Async.getObject(getObjectRequest, AsyncResponseTransformer.toBytes()).join().asUtf8String(); SdkAsserts.assertStringEqualsStream(response, new FileInputStream(file)); } - - private static byte[] generateSecretKey() { - KeyGenerator generator; - try { - generator = KeyGenerator.getInstance("AES"); - generator.init(256, new SecureRandom()); - return generator.generateKey().getEncoded(); - } catch (Exception e) { - fail("Unable to generate symmetric key: " + e.getMessage()); - return null; - } - } } diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncUploadMultiplePartIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncUploadMultiplePartIntegrationTest.java index 36e942dad25e..09445bc16ee9 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncUploadMultiplePartIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/AsyncUploadMultiplePartIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketAccelerateIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketAccelerateIntegrationTest.java index 79cf1b963a72..cee5a393032a 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketAccelerateIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketAccelerateIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketAnalyticsConfigurationIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketAnalyticsConfigurationIntegrationTest.java index 1b155687aa2b..c9062ccadd55 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketAnalyticsConfigurationIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketAnalyticsConfigurationIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketInventoryConfigurationIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketInventoryConfigurationIntegrationTest.java index ab2298268e79..9a866d8b5768 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketInventoryConfigurationIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/BucketInventoryConfigurationIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/CopyObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/CopyObjectIntegrationTest.java index 3f73265db1ba..2e956d06538d 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/CopyObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/CopyObjectIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/CreateBucketIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/CreateBucketIntegrationTest.java index 902f2589b7c5..f83d504bb2ef 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/CreateBucketIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/CreateBucketIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ import org.junit.Test; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3.model.GetBucketLocationRequest; import software.amazon.awssdk.services.s3.utils.S3TestUtils; +import software.amazon.awssdk.testutils.Waiter; public class CreateBucketIntegrationTest extends S3IntegrationTestBase { @@ -44,9 +44,8 @@ public static void cleanup() { public void createBucket_InUsEast1_Succeeds() { US_EAST_1_CLIENT.createBucket(CreateBucketRequest.builder().bucket(US_EAST_1_BUCKET_NAME).build()); - String region = US_EAST_1_CLIENT.getBucketLocation(GetBucketLocationRequest.builder() - .bucket(US_EAST_1_BUCKET_NAME) - .build()) + String region = Waiter.run(() -> US_EAST_1_CLIENT.getBucketLocation(r -> r.bucket(US_EAST_1_BUCKET_NAME))) + .orFail() .locationConstraintAsString(); assertThat(region).isEqualToIgnoringCase(""); } @@ -56,7 +55,9 @@ public void createBucket_Succeeds_WithoutSpecifyingBucketLocation() { S3Client client = S3Client.builder().region(Region.US_WEST_2).credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); client.createBucket(CreateBucketRequest.builder().bucket(BUCKET_NAME).build()); - String region = client.getBucketLocation(GetBucketLocationRequest.builder().bucket(BUCKET_NAME).build()).locationConstraintAsString(); + String region = Waiter.run(() -> client.getBucketLocation(r -> r.bucket(BUCKET_NAME))) + .orFail() + .locationConstraintAsString(); assertThat(region).isEqualToIgnoringCase("us-west-2"); } diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/ExceptionUnmarshallingIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ExceptionUnmarshallingIntegrationTest.java index cd2c158e8d30..f16a21ff5039 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/ExceptionUnmarshallingIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ExceptionUnmarshallingIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -19,8 +19,11 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; +import java.nio.charset.StandardCharsets; + import org.junit.AfterClass; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.model.BucketAlreadyExistsException; @@ -163,6 +166,17 @@ public void asyncHeadBucketWrongRegion() { .satisfies(e -> assertThat(((S3Exception) (e.getCause())).statusCode()).isEqualTo(301)); } + @Test + @Ignore("TODO") + public void errorResponseContainsRawBytes() { + assertThatThrownBy(() -> s3.getObjectAcl(b -> b.bucket(BUCKET + KEY).key(KEY))) + .isInstanceOf(NoSuchBucketException.class) + .satisfies(e -> assertThat( + ((NoSuchBucketException) e).awsErrorDetails().rawResponse().asString(StandardCharsets.UTF_8)) + .startsWith("\nNoSuchBucketThe " + + "specified bucket does not exist")); + } + private void assertMetadata(S3Exception e, String expectedErrorCode) { assertThat(e.awsErrorDetails()).satisfies( errorDetails -> { diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectAsyncIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectAsyncIntegrationTest.java index 356ae78fa893..eb8a07a08aae 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectAsyncIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectAsyncIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectFaultIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectFaultIntegrationTest.java index 501b896f57bb..414e8edb4727 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectFaultIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectFaultIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectIntegrationTest.java index 2c92e61746d4..fcb78ef29325 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -74,7 +74,6 @@ public void toInputStream() throws Exception { } } - @Test public void toInputStream_loadFromProperties() throws IOException { s3.putObject(b -> b.bucket(BUCKET).key(PROPERTY_KEY), RequestBody.fromString("test: test")); @@ -117,6 +116,13 @@ public void customResponseHandler_InterceptorRecievesResponsePojo() throws Excep } } + @Test + public void contentRangeIsReturnedForRangeRequests() { + ResponseInputStream stream = s3.getObject(getObjectRequest.copy(r -> r.range("bytes=0-1"))); + stream.abort(); + assertThat(stream.response().contentRange()).isEqualTo("bytes 0-1/10000"); + } + private S3Client createClientWithInterceptor(ExecutionInterceptor interceptor) { return s3ClientBuilder().overrideConfiguration(ClientOverrideConfiguration.builder() .addExecutionInterceptor(interceptor) diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/HeadObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/HeadObjectIntegrationTest.java new file mode 100644 index 000000000000..4218791bc6ce --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/HeadObjectIntegrationTest.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.zip.GZIPOutputStream; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; + +public class HeadObjectIntegrationTest extends S3IntegrationTestBase { + private static final String BUCKET = temporaryBucketName(HeadObjectIntegrationTest.class); + + private static final String GZIPPED_KEY = "some-key"; + + @BeforeClass + public static void setupFixture() throws IOException { + createBucket(BUCKET); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + GZIPOutputStream gzos = new GZIPOutputStream(baos); + gzos.write("Test".getBytes(StandardCharsets.UTF_8)); + + s3.putObject(PutObjectRequest.builder() + .bucket(BUCKET) + .key(GZIPPED_KEY) + .contentEncoding("gzip") + .build(), + RequestBody.fromBytes(baos.toByteArray())); + } + + @Test + public void asyncClientSupportsGzippedObjects() { + HeadObjectResponse response = s3Async.headObject(r -> r.bucket(BUCKET).key(GZIPPED_KEY)).join(); + assertThat(response.contentEncoding()).isEqualTo("gzip"); + } + + @Test + public void syncClientSupportsGzippedObjects() { + HeadObjectResponse response = s3.headObject(r -> r.bucket(BUCKET).key(GZIPPED_KEY)); + assertThat(response.contentEncoding()).isEqualTo("gzip"); + } +} diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/KeysWithLeadingSlashIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/KeysWithLeadingSlashIntegrationTest.java index dcc8f0b25454..aeb10a99dc6f 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/KeysWithLeadingSlashIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/KeysWithLeadingSlashIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -26,7 +26,9 @@ public class KeysWithLeadingSlashIntegrationTest extends S3IntegrationTestBase { private static final String BUCKET = temporaryBucketName(KeysWithLeadingSlashIntegrationTest.class); - private static final String KEY = "/stupidkeywithillegalleadingslashthatsucks"; + private static final String KEY = "/keyWithLeadingSlash"; + private static final String SLASH_KEY = "/"; + private static final String KEY_WITH_SLASH_AND_SPECIAL_CHARS = "/special-chars-@$%"; private static final byte[] CONTENT = "Hello".getBytes(StandardCharsets.UTF_8); @BeforeClass @@ -42,9 +44,26 @@ public static void cleanup() { @Test public void putObject_KeyWithLeadingSlash_Succeeds() { - s3.putObject(r -> r.bucket(BUCKET).key(KEY), RequestBody.fromBytes(CONTENT)); + verify(KEY); + } + + @Test + public void slashKey_shouldSucceed() { + verify(SLASH_KEY); + } + + @Test + public void slashKeyWithSpecialChar_shouldSucceed() { + verify(KEY_WITH_SLASH_AND_SPECIAL_CHARS); + } + + private void verify(String key) { + s3.putObject(r -> r.bucket(BUCKET).key(key), RequestBody.fromBytes(CONTENT)); + + assertThat(s3.getObjectAsBytes(r -> r.bucket(BUCKET).key(key)).asByteArray()).isEqualTo(CONTENT); + String retrievedKey = s3.listObjects(r -> r.bucket(BUCKET)).contents().get(0).key(); - assertThat(retrievedKey).isEqualTo(KEY); + assertThat(retrievedKey).isEqualTo(key); } } diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/ListObjectsIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ListObjectsIntegrationTest.java new file mode 100644 index 000000000000..d3a039f7478e --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ListObjectsIntegrationTest.java @@ -0,0 +1,193 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.text.DecimalFormat; +import java.text.NumberFormat; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.model.EncodingType; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Object; + +/** + * Integration tests for the listObjects operation in the Amazon S3 Java + * client. + */ +public class ListObjectsIntegrationTest extends S3IntegrationTestBase { + /** + * One hour in milliseconds for verifying that a last modified date is recent. + */ + private static final long ONE_HOUR_IN_MILLISECONDS = 1000 * 60 * 60; + + private static final String KEY_NAME_WITH_SPECIAL_CHARS = "special-chars-@$%"; + private static final int BUCKET_OBJECTS = 15; + /** + * The name of the bucket created, used, and deleted by these tests. + */ + private static String bucketName = temporaryBucketName("list-objects-integ-test"); + /** + * List of all keys created by these tests. + */ + private static List keys = new ArrayList<>(); + + + /** + * Releases all resources created in this test. + */ + @AfterClass + public static void tearDown() { + deleteBucketAndAllContents(bucketName); + } + + /** + * Creates all the test resources for the tests. + */ + @BeforeClass + public static void createResources() throws Exception { + createBucket(bucketName); + + NumberFormat numberFormatter = new DecimalFormat("##00"); + for (int i = 1; i <= BUCKET_OBJECTS; i++) { + createKey("key-" + numberFormatter.format(i)); + } + createKey("aaaaa"); + createKey("aaaaa/aaaaa/aaaaa"); + createKey("aaaaa/aaaaa+a"); + createKey("aaaaa/aaaaa//aaaaa"); + createKey(KEY_NAME_WITH_SPECIAL_CHARS); + } + + private static void createKey(String key) { + s3.putObject(PutObjectRequest.builder() + .bucket(bucketName) + .key(key) + .build(), + RequestBody.fromString(RandomStringUtils.random(1000))); + keys.add(key); + } + + @Test + public void listObjectsNoParameters() { + ListObjectsResponse result = s3.listObjects(ListObjectsRequest.builder().bucket(bucketName).build()); + List objects = result.contents(); + + assertEquals(keys.size(), objects.size()); + assertEquals(bucketName, result.name()); + assertS3ObjectSummariesAreValid(objects); + assertNotNull(result.maxKeys()); + + // We didn't use a delimiter, so we expect these to be empty/null + assertNull(result.delimiter()); + + // We don't expect any truncated results + assertFalse(result.isTruncated()); + + // We didn't set other request parameters, so we expect them to be empty + assertNull(result.encodingType()); + assertThat(result.prefix()).isEmpty(); + } + + @Test + public void listObjectsWithAllElements() { + String delimiter = "/"; + String marker = "aaa"; + ListObjectsResponse result = s3.listObjects(ListObjectsRequest.builder() + .bucket(bucketName) + .prefix(KEY_NAME_WITH_SPECIAL_CHARS) + .marker(marker) + .encodingType(EncodingType.URL) + .delimiter(delimiter) + .build()); + List objects = result.contents(); + + assertEquals(bucketName, result.name()); + assertS3ObjectSummariesAreValid(objects); + assertEquals(marker, result.marker()); + assertEquals(delimiter, result.delimiter()); + assertEquals(KEY_NAME_WITH_SPECIAL_CHARS, result.prefix()); + + assertFalse(result.isTruncated()); + assertTrue(result.maxKeys() >= 1000); + } + + @Test + public void listObjectsWithMaxKeys() { + int maxKeys = 4; + ListObjectsResponse result = s3.listObjects(ListObjectsRequest.builder() + .bucket(bucketName) + .maxKeys(maxKeys) + .build()); + + List objects = result.contents(); + + assertEquals(maxKeys, objects.size()); + assertEquals(bucketName, result.name()); + assertThat(maxKeys).isEqualTo(result.maxKeys()); + assertS3ObjectSummariesAreValid(objects); + + // We didn't use a delimiter, so we expect this to be empty/null + assertNull(result.delimiter()); + + // We expect truncated results since we set maxKeys + assertTrue(result.isTruncated()); + + // URL encoding is requested by default + + // We didn't set other request parameters, so we expect them to be empty + assertNull(result.encodingType()); + assertThat(result.prefix()).isEmpty(); + assertNull(result.delimiter()); + } + + /** + * Asserts that a list of S3Object objects are valid, by checking + * that expected fields are not null or empty, that ETag values don't + * contain leading or trailing quotes, that the last modified date is + * recent, etc. + * @param objectSummaries The list of objects to validate. + * + */ + private void assertS3ObjectSummariesAreValid(List objectSummaries) { + for (S3Object obj : objectSummaries) { + assertTrue(obj.eTag().length() > 1); + assertTrue(obj.key().length() > 1); + + // Verify that the last modified date is within an hour + assertNotNull(obj.lastModified()); + long offset = obj.lastModified().toEpochMilli() - Instant.now().toEpochMilli(); + assertTrue(offset < ONE_HOUR_IN_MILLISECONDS); + + assertTrue(obj.storageClassAsString().length() > 1); + } + } +} diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/ListObjectsV2PaginatorsIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ListObjectsV2PaginatorsIntegrationTest.java index 79229e316977..f0333b3b715e 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/ListObjectsV2PaginatorsIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ListObjectsV2PaginatorsIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/ObjectTaggingIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ObjectTaggingIntegrationTest.java index 4a5f488c4aea..f27a5534e889 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/ObjectTaggingIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ObjectTaggingIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ import java.util.ArrayList; import java.util.List; +import org.apache.commons.lang3.RandomStringUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.s3.model.BucketVersioningStatus; +import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingRequest; import software.amazon.awssdk.services.s3.model.GetObjectTaggingRequest; @@ -33,6 +35,7 @@ import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.Tag; import software.amazon.awssdk.services.s3.model.Tagging; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.model.VersioningConfiguration; /** @@ -95,10 +98,7 @@ public void putObject_WithTagging_Succeeds() { @Test public void getObjectTagging_Succeeds() { - List tagSet = new ArrayList<>(); - tagSet.add(Tag.builder().key("foo").value("1").build()); - tagSet.add(Tag.builder().key("bar").value("2").build()); - tagSet.add(Tag.builder().key("baz").value("3").build()); + List tagSet = tags(); Tagging tags = Tagging.builder().tagSet(tagSet).build(); @@ -147,10 +147,7 @@ public void putObjectTagging_Succeeds_WithUrlEncodedTags() { @Test public void copyObject_Succeeds_WithNewTags() { - List tagSet = new ArrayList<>(); - tagSet.add(Tag.builder().key("foo").value("1").build()); - tagSet.add(Tag.builder().key("bar").value("2").build()); - tagSet.add(Tag.builder().key("baz").value("3").build()); + List tagSet = tags(); Tagging tags = Tagging.builder().tagSet(tagSet).build(); @@ -183,12 +180,43 @@ public void copyObject_Succeeds_WithNewTags() { assertThat(getTaggingResult).containsExactlyInAnyOrder(tagsCopy.tagSet().toArray(new Tag[tagsCopy.tagSet().size()])); } - @Test - public void testDeleteObjectTagging() { + private List tags() { List tagSet = new ArrayList<>(); tagSet.add(Tag.builder().key("foo").value("1").build()); tagSet.add(Tag.builder().key("bar").value("2").build()); tagSet.add(Tag.builder().key("baz").value("3").build()); + return tagSet; + } + + @Test + public void multipartUploadWithNewTags_shouldSucceed() { + List tagSet = tags(); + + Tagging tags = Tagging.builder().tagSet(tagSet).build(); + + String key = makeNewKey(); + String uploadId = + s3.createMultipartUpload(b -> b.tagging(tags).bucket(BUCKET).key(key)).uploadId(); + + UploadPartResponse uploadPartResponse = s3.uploadPart(b -> b.bucket(BUCKET).key(key).partNumber(1).uploadId(uploadId), + RequestBody.fromString(RandomStringUtils.random(1000))); + CompletedMultipartUpload parts = + CompletedMultipartUpload.builder().parts(p -> p.partNumber(1).eTag(uploadPartResponse.eTag()).build()).build(); + + s3.completeMultipartUpload(b -> b.bucket(BUCKET).key(key).multipartUpload(parts).uploadId(uploadId).build()); + + List getTaggingResult = s3.getObjectTagging(GetObjectTaggingRequest.builder() + .bucket(BUCKET) + .key(key) + .build()) + .tagSet(); + + assertThat(getTaggingResult).containsExactlyInAnyOrder(tags.tagSet().toArray(new Tag[0])); + } + + @Test + public void testDeleteObjectTagging() { + List tagSet = tags(); Tagging tags = Tagging.builder().tagSet(tagSet).build(); diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/OperationsWithNonStandardResponsesIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/OperationsWithNonStandardResponsesIntegrationTest.java index aebc668c6b8f..bb2142bebbf6 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/OperationsWithNonStandardResponsesIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/OperationsWithNonStandardResponsesIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/PutObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/PutObjectIntegrationTest.java index 77aeae12a5e0..4e548feb4bae 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/PutObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/PutObjectIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3IntegrationTestBase.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3IntegrationTestBase.java index cc2700609c53..f38a93e59fdb 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3IntegrationTestBase.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3IntegrationTestBase.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3ListObjectsV2IntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3ListObjectsV2IntegrationTest.java index 761dd0a201e5..0e64f5e4135f 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3ListObjectsV2IntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3ListObjectsV2IntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java new file mode 100644 index 000000000000..735f3455b8af --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java @@ -0,0 +1,374 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.time.Duration; +import java.util.Optional; +import java.util.UUID; +import java.util.function.Consumer; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse; +import software.amazon.awssdk.services.s3.model.MultipartUpload; +import software.amazon.awssdk.services.s3.model.RequestPayer; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; +import software.amazon.awssdk.services.s3.utils.S3TestUtils; +import software.amazon.awssdk.utils.IoUtils; +import software.amazon.awssdk.utils.StringInputStream; + +public class S3PresignerIntegrationTest { + private static S3Client client; + private static String testBucket; + private static String testNonDnsCompatibleBucket; + private static String testGetObjectKey; + private static String testObjectContent; + + private S3Presigner presigner; + + @BeforeClass + public static void setUpClass() { + client = S3Client.create(); + testBucket = S3TestUtils.getTestBucket(client); + testNonDnsCompatibleBucket = S3TestUtils.getNonDnsCompatibleTestBucket(client); + testGetObjectKey = generateRandomObjectKey(); + testObjectContent = "Howdy!"; + + S3TestUtils.putObject(S3PresignerIntegrationTest.class, client, testBucket, testGetObjectKey, testObjectContent); + S3TestUtils.putObject(S3PresignerIntegrationTest.class, client, testNonDnsCompatibleBucket, testGetObjectKey, testObjectContent); + } + + @AfterClass + public static void tearDownClass() { + S3TestUtils.runCleanupTasks(S3PresignerIntegrationTest.class); + client.close(); + } + + private static String generateRandomObjectKey() { + return "s3-presigner-it-" + UUID.randomUUID(); + } + + @Before + public void setUpInstance() { + this.presigner = S3Presigner.create(); + } + + @After + public void testDownInstance() { + this.presigner.close(); + } + + + @Test + public void browserCompatiblePresignedUrlWorks() throws IOException { + assertThatPresigningWorks(testBucket, testGetObjectKey); + } + + @Test + public void bucketsWithScaryCharactersWorks() throws IOException { + assertThatPresigningWorks(testNonDnsCompatibleBucket, testGetObjectKey); + } + + @Test + public void keysWithScaryCharactersWorks() throws IOException { + String scaryObjectKey = testGetObjectKey + " !'/()~`"; + S3TestUtils.putObject(S3PresignerIntegrationTest.class, client, testBucket, scaryObjectKey, testObjectContent); + + assertThatPresigningWorks(testBucket, scaryObjectKey); + } + + private void assertThatPresigningWorks(String bucket, String objectKey) throws IOException { + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(gor -> gor.bucket(bucket).key(objectKey))); + + assertThat(presigned.isBrowserExecutable()).isTrue(); + + try (InputStream response = presigned.url().openConnection().getInputStream()) { + assertThat(IoUtils.toUtf8String(response)).isEqualTo(testObjectContent); + } + } + + @Test + public void browserIncompatiblePresignedUrlDoesNotWorkWithoutAdditionalHeaders() throws IOException { + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(gor -> gor.bucket(testBucket) + .key(testGetObjectKey) + .requestPayer(RequestPayer.REQUESTER))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + + HttpURLConnection connection = (HttpURLConnection) presigned.url().openConnection(); + connection.connect(); + try { + assertThat(connection.getResponseCode()).isEqualTo(403); + } finally { + connection.disconnect(); + } + } + + @Test + public void browserIncompatiblePresignedUrlWorksWithAdditionalHeaders() throws IOException { + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(gor -> gor.bucket(testBucket) + .key(testGetObjectKey) + .requestPayer(RequestPayer.REQUESTER))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + + HttpURLConnection connection = (HttpURLConnection) presigned.url().openConnection(); + + presigned.httpRequest().headers().forEach((header, values) -> { + values.forEach(value -> { + connection.addRequestProperty(header, value); + }); + }); + + try (InputStream content = connection.getInputStream()) { + assertThat(IoUtils.toUtf8String(content)).isEqualTo(testObjectContent); + } + } + + @Test + public void getObject_PresignedHttpRequestCanBeInvokedDirectlyBySdk() throws IOException { + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(gor -> gor.bucket(testBucket) + .key(testGetObjectKey) + .requestPayer(RequestPayer.REQUESTER))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + + SdkHttpClient httpClient = ApacheHttpClient.builder().build(); // or UrlConnectionHttpClient.builder().build() + + ContentStreamProvider requestPayload = presigned.signedPayload() + .map(SdkBytes::asContentStreamProvider) + .orElse(null); + + HttpExecuteRequest request = HttpExecuteRequest.builder() + .request(presigned.httpRequest()) + .contentStreamProvider(requestPayload) + .build(); + + HttpExecuteResponse response = httpClient.prepareRequest(request).call(); + + assertThat(response.responseBody()).isPresent(); + try (InputStream responseStream = response.responseBody().get()) { + assertThat(IoUtils.toUtf8String(responseStream)).isEqualTo(testObjectContent); + } + } + + @Test + public void putObject_PresignedHttpRequestCanBeInvokedDirectlyBySdk() throws IOException { + String objectKey = generateRandomObjectKey(); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.deleteObject(r -> r.bucket(testBucket).key(objectKey))); + + PresignedPutObjectRequest presigned = + presigner.presignPutObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .putObjectRequest(por -> por.bucket(testBucket).key(objectKey))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + + SdkHttpClient httpClient = ApacheHttpClient.builder().build(); // or UrlConnectionHttpClient.builder().build() + + ContentStreamProvider requestPayload = () -> new StringInputStream(testObjectContent); + + HttpExecuteRequest request = HttpExecuteRequest.builder() + .request(presigned.httpRequest()) + .contentStreamProvider(requestPayload) + .build(); + + HttpExecuteResponse response = httpClient.prepareRequest(request).call(); + + assertThat(response.responseBody()).isPresent(); + assertThat(response.httpResponse().isSuccessful()).isTrue(); + response.responseBody().ifPresent(AbortableInputStream::abort); + String content = client.getObjectAsBytes(r -> r.bucket(testBucket).key(objectKey)).asUtf8String(); + assertThat(content).isEqualTo(testObjectContent); + } + + @Test + public void createMultipartUpload_CanBePresigned() throws IOException { + String objectKey = generateRandomObjectKey(); + + PresignedCreateMultipartUploadRequest presigned = + presigner.presignCreateMultipartUpload(p -> p.signatureDuration(Duration.ofMinutes(10)) + .createMultipartUploadRequest(createMultipartUploadRequest(objectKey))); + + HttpExecuteResponse response = execute(presigned, null); + + assertThat(response.httpResponse().isSuccessful()).isTrue(); + + Optional upload = getMultipartUpload(objectKey); + assertThat(upload).isPresent(); + + client.abortMultipartUpload(abortMultipartUploadRequest(objectKey, upload.get().uploadId())); + } + + @Test + public void uploadPart_CanBePresigned() throws IOException { + String objectKey = generateRandomObjectKey(); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.deleteObject(r -> r.bucket(testBucket).key(objectKey))); + + CreateMultipartUploadResponse create = client.createMultipartUpload(createMultipartUploadRequest(objectKey)); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.abortMultipartUpload(abortMultipartUploadRequest(objectKey, create.uploadId()))); + + PresignedUploadPartRequest uploadPart = + presigner.presignUploadPart(up -> up.signatureDuration(Duration.ofDays(1)) + .uploadPartRequest(upr -> upr.bucket(testBucket) + .key(objectKey) + .partNumber(1) + .uploadId(create.uploadId()))); + + + HttpExecuteResponse uploadPartResponse = execute(uploadPart, testObjectContent); + assertThat(uploadPartResponse.httpResponse().isSuccessful()).isTrue(); + String etag = uploadPartResponse.httpResponse().firstMatchingHeader("ETag").orElse(null); + + client.completeMultipartUpload(createMultipartUploadRequest(objectKey, create, etag)); + + String content = client.getObjectAsBytes(r -> r.bucket(testBucket).key(objectKey)).asUtf8String(); + assertThat(content).isEqualTo(testObjectContent); + } + + @Test + public void completeMultipartUpload_CanBePresigned() throws IOException { + String objectKey = generateRandomObjectKey(); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.deleteObject(r -> r.bucket(testBucket).key(objectKey))); + + CreateMultipartUploadResponse create = client.createMultipartUpload(createMultipartUploadRequest(objectKey)); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.abortMultipartUpload(abortMultipartUploadRequest(objectKey, create.uploadId()))); + + UploadPartResponse uploadPartResponse = client.uploadPart(uploadPartRequest(objectKey, create), + RequestBody.fromString(testObjectContent)); + String etag = uploadPartResponse.eTag(); + + PresignedCompleteMultipartUploadRequest presignedRequest = + presigner.presignCompleteMultipartUpload( + r -> r.signatureDuration(Duration.ofDays(1)) + .completeMultipartUploadRequest(createMultipartUploadRequest(objectKey, create, etag))); + + assertThat(execute(presignedRequest, presignedRequest.signedPayload().get().asUtf8String()) + .httpResponse().isSuccessful()).isTrue(); + + String content = client.getObjectAsBytes(r -> r.bucket(testBucket).key(objectKey)).asUtf8String(); + assertThat(content).isEqualTo(testObjectContent); + } + + @Test + public void abortMultipartUpload_CanBePresigned() throws IOException { + String objectKey = generateRandomObjectKey(); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.deleteObject(r -> r.bucket(testBucket).key(objectKey))); + + CreateMultipartUploadResponse create = client.createMultipartUpload(createMultipartUploadRequest(objectKey)); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.abortMultipartUpload(abortMultipartUploadRequest(objectKey, create.uploadId()))); + + PresignedAbortMultipartUploadRequest presignedRequest = presigner.presignAbortMultipartUpload( + r -> r.signatureDuration(Duration.ofDays(1)) + .abortMultipartUploadRequest(abortMultipartUploadRequest(objectKey, create.uploadId()))); + + + assertThat(execute(presignedRequest, null).httpResponse().isSuccessful()).isTrue(); + + assertThat(getMultipartUpload(objectKey)).isNotPresent(); + } + + private Consumer createMultipartUploadRequest(String objectKey) { + return r -> r.bucket(testBucket).key(objectKey); + } + + private Consumer uploadPartRequest(String objectKey, CreateMultipartUploadResponse create) { + return r -> r.bucket(testBucket) + .key(objectKey) + .partNumber(1) + .uploadId(create.uploadId()); + } + + private Consumer createMultipartUploadRequest(String objectKey, CreateMultipartUploadResponse create, String etag) { + return c -> c.bucket(testBucket) + .key(objectKey) + .uploadId(create.uploadId()) + .multipartUpload(m -> m.parts(p -> p.partNumber(1).eTag(etag))); + } + + private Consumer abortMultipartUploadRequest(String objectKey, String uploadId) { + return r -> r.bucket(testBucket) + .key(objectKey) + .uploadId(uploadId); + } + + private Optional getMultipartUpload(String objectKey) { + return client.listMultipartUploadsPaginator(r -> r.bucket(testBucket).prefix(objectKey)) + .uploads() + .stream() + .filter(u -> u.key().equals(objectKey)) + .findAny(); + } + + private HttpExecuteResponse execute(PresignedRequest presigned, String payload) throws IOException { + SdkHttpClient httpClient = ApacheHttpClient.builder().build(); + + ContentStreamProvider requestPayload = payload == null ? null : () -> new StringInputStream(payload); + + HttpExecuteRequest request = HttpExecuteRequest.builder() + .request(presigned.httpRequest()) + .contentStreamProvider(requestPayload) + .build(); + + return httpClient.prepareRequest(request).call(); + } +} diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3ResponseMetadataIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3ResponseMetadataIntegrationTest.java index 484f613a25d4..1763788d1627 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3ResponseMetadataIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3ResponseMetadataIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/ServerSideEncryptionIntegrationTestBase.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ServerSideEncryptionIntegrationTestBase.java new file mode 100644 index 000000000000..32d7ba225af4 --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/ServerSideEncryptionIntegrationTestBase.java @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Fail.fail; +import static software.amazon.awssdk.services.s3.S3IntegrationTestBase.createBucket; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.io.File; +import java.io.IOException; +import java.security.SecureRandom; +import javax.crypto.KeyGenerator; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import software.amazon.awssdk.services.kms.KmsClient; +import software.amazon.awssdk.services.s3.model.ServerSideEncryption; +import software.amazon.awssdk.testutils.RandomTempFile; + +public class ServerSideEncryptionIntegrationTestBase extends S3IntegrationTestBase { + + protected static final String BUCKET = temporaryBucketName(ServerSideEncryptionIntegrationTestBase.class); + protected static final String BUCKET_WITH_SSE = temporaryBucketName(ServerSideEncryptionIntegrationTestBase.class); + + private static final KmsClient KMS = KmsClient.builder() + .region(DEFAULT_REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + protected static File file; + + private static String keyId; + + @BeforeClass + public static void setupFixture() throws IOException { + createBucket(BUCKET); + createBucket(BUCKET_WITH_SSE); + keyId = KMS.createKey().keyMetadata().keyId(); + + s3.putBucketEncryption(r -> r + .bucket(BUCKET_WITH_SSE) + .serverSideEncryptionConfiguration(ssec -> ssec + .rules(rule -> rule + .applyServerSideEncryptionByDefault(d -> d.kmsMasterKeyID(keyId) + .sseAlgorithm(ServerSideEncryption.AWS_KMS))))); + file = new RandomTempFile(10_000); + } + + @AfterClass + public static void tearDownFixture() { + deleteBucketAndAllContents(BUCKET); + deleteBucketAndAllContents(BUCKET_WITH_SSE); + file.delete(); + KMS.scheduleKeyDeletion(r -> r.keyId(keyId)); + } + + protected static byte[] generateSecretKey() { + KeyGenerator generator; + try { + generator = KeyGenerator.getInstance("AES"); + generator.init(256, new SecureRandom()); + return generator.generateKey().getEncoded(); + } catch (Exception e) { + fail("Unable to generate symmetric key: " + e.getMessage()); + return null; + } + } +} diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/SignedAsyncRequestBodyUploadIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/SignedAsyncRequestBodyUploadIntegrationTest.java new file mode 100644 index 000000000000..c8752045d1a5 --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/SignedAsyncRequestBodyUploadIntegrationTest.java @@ -0,0 +1,143 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.core.client.config.SdkAdvancedClientOption.SIGNER; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.auth.signer.AsyncAws4Signer; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.signer.AsyncSigner; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpHeaders; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.utils.S3TestUtils; + +/** + * This is an integration test to verify that {@link AsyncAws4Signer} is able to correctly sign async requests that + * have a streaming payload. + */ +public class SignedAsyncRequestBodyUploadIntegrationTest extends S3IntegrationTestBase { + private static final String BUCKET = "signed-body-test-" + System.currentTimeMillis(); + private static S3AsyncClient testClient; + + private static TestSigner mockSigner; + private static final CapturingInterceptor capturingInterceptor = new CapturingInterceptor(); + + @BeforeClass + public static void setup() throws Exception { + S3IntegrationTestBase.setUp(); + + // Use a mock so we can introspect easily to verify that the signer was used for the request + mockSigner = mock(TestSigner.class); + + AsyncAws4Signer realSigner = AsyncAws4Signer.create(); + + when(mockSigner.sign(any(SdkHttpFullRequest.class), any(AsyncRequestBody.class), any(ExecutionAttributes.class))) + .thenAnswer(i -> { + SdkHttpFullRequest request = i.getArgumentAt(0, SdkHttpFullRequest.class); + AsyncRequestBody body = i.getArgumentAt(1, AsyncRequestBody.class); + ExecutionAttributes executionAttributes = i.getArgumentAt(2, ExecutionAttributes.class); + return realSigner.sign(request, body, executionAttributes); + }); + + testClient = s3AsyncClientBuilder() + .overrideConfiguration(o -> o + .putAdvancedOption(SIGNER, mockSigner) + .addExecutionInterceptor(capturingInterceptor)) + .build(); + + createBucket(BUCKET); + } + + @AfterClass + public static void teardown() { + S3TestUtils.deleteBucketAndAllContents(s3, BUCKET); + s3.close(); + s3Async.close(); + testClient.close(); + } + + @Before + public void methodSetup() { + capturingInterceptor.reset(); + } + + @Test + public void test_putObject_bodyIsSigned_succeeds() { + PutObjectRequest request = PutObjectRequest.builder() + .bucket(BUCKET).key("test.txt") + // Instructs the signer to include the SHA-256 of the body as a header; bit weird but that's how it's + // done + // See https://github.com/aws/aws-sdk-java-v2/blob/aeb4b5853c8f833f266110f1e01d6e10ea6ac1c5/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java#L75-L77 + .overrideConfiguration(o -> o.putHeader("x-amz-content-sha256", "required")) + .build(); + + testClient.putObject(request, AsyncRequestBody.fromString("Hello S3")).join(); + + // Ensure that the client used our signer + verify(mockSigner, atLeastOnce()).sign( + any(SdkHttpFullRequest.class), any(AsyncRequestBody.class), any(ExecutionAttributes.class)); + + List capturedSha256Values = capturingInterceptor.capturedRequests().stream() + .map(SdkHttpHeaders::headers) + .map(m -> m.getOrDefault("x-amz-content-sha256", Collections.emptyList())) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + + assertThat(capturedSha256Values) + // echo -n 'Hello S3' | shasum -a 256 + .containsExactly("c9f7ed78c073c16bcb2f76fa4a5739cb6cf81677d32fdbeda1d69350d107b6f3"); + } + + private interface TestSigner extends AsyncSigner, Signer { + } + + private static class CapturingInterceptor implements ExecutionInterceptor { + private final List capturedRequests = new ArrayList<>(); + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + capturedRequests.add(context.httpRequest()); + } + + public void reset() { + capturedRequests.clear(); + } + + public List capturedRequests() { + return capturedRequests; + } + } +} diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/SyncServerSideEncryptionIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/SyncServerSideEncryptionIntegrationTest.java index 3d7dc3901c40..23235b89a21b 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/SyncServerSideEncryptionIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/SyncServerSideEncryptionIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -14,47 +14,22 @@ */ package software.amazon.awssdk.services.s3; -import static org.assertj.core.api.Fail.fail; import static software.amazon.awssdk.services.s3.model.ServerSideEncryption.AES256; -import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; -import java.io.File; import java.io.FileInputStream; -import java.io.IOException; +import java.io.FileNotFoundException; import java.io.InputStream; -import java.security.SecureRandom; import java.util.Base64; import java.util.UUID; -import javax.crypto.KeyGenerator; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.junit.Test; import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.ServerSideEncryption; -import software.amazon.awssdk.testutils.RandomTempFile; import software.amazon.awssdk.testutils.SdkAsserts; import software.amazon.awssdk.utils.Md5Utils; -public class SyncServerSideEncryptionIntegrationTest extends S3IntegrationTestBase { - - private static final String BUCKET = temporaryBucketName(GetObjectIntegrationTest.class); - - private static File file; - - @BeforeClass - public static void setupFixture() throws IOException { - createBucket(BUCKET); - file = new RandomTempFile(10_000); - } - - @AfterClass - public static void tearDownFixture() { - deleteBucketAndAllContents(BUCKET); - file.delete(); - } - +public class SyncServerSideEncryptionIntegrationTest extends ServerSideEncryptionIntegrationTestBase { @Test public void sse_AES256_succeeds() throws Exception { String key = UUID.randomUUID().toString(); @@ -124,15 +99,23 @@ public void sse_customerManaged_succeeds() { SdkAsserts.assertFileEqualsStream(file, response); } - private static byte[] generateSecretKey() { - KeyGenerator generator; - try { - generator = KeyGenerator.getInstance("AES"); - generator.init(256, new SecureRandom()); - return generator.generateKey().getEncoded(); - } catch (Exception e) { - fail("Unable to generate symmetric key: " + e.getMessage()); - return null; - } + @Test + public void sse_onBucket_succeeds() throws FileNotFoundException { + String key = UUID.randomUUID().toString(); + + PutObjectRequest request = PutObjectRequest.builder() + .key(key) + .bucket(BUCKET_WITH_SSE) + .build(); + + s3.putObject(request, file.toPath()); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder() + .key(key) + .bucket(BUCKET_WITH_SSE) + .build(); + + String response = s3.getObject(getObjectRequest, ResponseTransformer.toBytes()).asUtf8String(); + SdkAsserts.assertStringEqualsStream(response, new FileInputStream(file)); } } diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadLargeObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadLargeObjectIntegrationTest.java index 5a4c79de7720..c6f9db74bb30 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadLargeObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadLargeObjectIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadMultiplePartIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadMultiplePartIntegrationTest.java index 23f94028d2e0..e3963ddfb317 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadMultiplePartIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadMultiplePartIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadMultiplePartTestBase.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadMultiplePartTestBase.java index 12285fccbac0..9ebce247c716 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadMultiplePartTestBase.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UploadMultiplePartTestBase.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UrlEncodingIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UrlEncodingIntegrationTest.java new file mode 100644 index 000000000000..e988c458c2b0 --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UrlEncodingIntegrationTest.java @@ -0,0 +1,134 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.EncodingType; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; + +/** + * Integration tests for the operations that support encoding type + */ +public class UrlEncodingIntegrationTest extends S3IntegrationTestBase { + /** + * The name of the bucket created, used, and deleted by these tests. + */ + private static final String BUCKET_NAME = temporaryBucketName(UrlEncodingIntegrationTest.class); + private static final String KEY_NAME_WITH_SPECIAL_CHARS = "filename_@_=_&_?_+_)_.temp"; + + @BeforeClass + public static void createResources() { + createBucket(BUCKET_NAME); + s3.putObject(PutObjectRequest.builder() + .bucket(BUCKET_NAME) + .key(KEY_NAME_WITH_SPECIAL_CHARS) + .build(), RequestBody.fromString(RandomStringUtils.random(1000))); + } + + /** + * Releases all resources created in this test. + */ + @AfterClass + public static void tearDown() { + deleteBucketAndAllContents(BUCKET_NAME); + } + + @Test + public void listObjectVersionsWithUrlEncodingType_shouldDecode() { + ListObjectVersionsResponse listObjectVersionsResponse = + s3.listObjectVersions(b -> b.bucket(BUCKET_NAME).encodingType(EncodingType.URL)); + listObjectVersionsResponse.versions().forEach(v -> assertKeyIsDecoded(v.key())); + + ListObjectVersionsResponse asyncResponse = + s3Async.listObjectVersions(b -> b.bucket(BUCKET_NAME).encodingType(EncodingType.URL)).join(); + + asyncResponse.versions().forEach(v -> assertKeyIsDecoded(v.key())); + } + + @Test + public void listObjectV2WithUrlEncodingType_shouldDecode() { + ListObjectsV2Response listObjectsV2Response = + s3.listObjectsV2(b -> b.bucket(BUCKET_NAME).encodingType(EncodingType.URL)); + + listObjectsV2Response.contents().forEach(c -> assertKeyIsDecoded(c.key())); + ListObjectVersionsResponse asyncResponse = + s3Async.listObjectVersions(b -> b.bucket(BUCKET_NAME).encodingType(EncodingType.URL)).join(); + + asyncResponse.versions().forEach(v -> assertKeyIsDecoded(v.key())); + } + + @Test + public void listObjectWithUrlEncodingType_shouldDecode() { + ListObjectsResponse listObjectsV2Response = + s3.listObjects(b -> b.bucket(BUCKET_NAME).encodingType(EncodingType.URL)); + + listObjectsV2Response.contents().forEach(c -> assertKeyIsDecoded(c.key())); + ListObjectVersionsResponse asyncResponse = + s3Async.listObjectVersions(b -> b.bucket(BUCKET_NAME).encodingType(EncodingType.URL)).join(); + + asyncResponse.versions().forEach(v -> assertKeyIsDecoded(v.key())); + } + + @Test + public void listMultipartUploadsWithUrlEncodingType_shouldDecode() { + String uploaddId = null; + try { + CreateMultipartUploadResponse multipartUploadResponse = + s3.createMultipartUpload(b -> b.bucket(BUCKET_NAME).key(KEY_NAME_WITH_SPECIAL_CHARS)); + uploaddId = multipartUploadResponse.uploadId(); + + String finalUploadId = uploaddId; + UploadPartResponse uploadPartResponse = s3.uploadPart(b -> b.bucket(BUCKET_NAME) + .key(KEY_NAME_WITH_SPECIAL_CHARS) + .partNumber(1) + .uploadId(finalUploadId), + RequestBody.fromString(RandomStringUtils.random(1000))); + + + ListMultipartUploadsResponse listMultipartUploadsResponse = + s3.listMultipartUploads(b -> b.encodingType(EncodingType.URL).bucket(BUCKET_NAME)); + + listMultipartUploadsResponse.uploads().forEach(upload -> assertThat(upload.key()).isEqualTo(KEY_NAME_WITH_SPECIAL_CHARS)); + + ListMultipartUploadsResponse asyncListMultipartUploadsResponse = + s3Async.listMultipartUploads(b -> b.encodingType(EncodingType.URL).bucket(BUCKET_NAME)).join(); + + asyncListMultipartUploadsResponse.uploads().forEach(upload -> assertKeyIsDecoded(upload.key())); + } finally { + if (uploaddId != null) { + String finalUploadId = uploaddId; + s3.abortMultipartUpload(b -> b.bucket(BUCKET_NAME).key(KEY_NAME_WITH_SPECIAL_CHARS).uploadId(finalUploadId)); + } + } + } + + private void assertKeyIsDecoded(String key) { + assertThat(key).isEqualTo(KEY_NAME_WITH_SPECIAL_CHARS); + } +} diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UserMetadataIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UserMetadataIntegrationTest.java index 94e82315769f..e24c029da125 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/UserMetadataIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/UserMetadataIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/signer/AwsS3V4SignerIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/signer/AwsS3V4SignerIntegrationTest.java index 8fa568b44389..2cab5a15e4c8 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/signer/AwsS3V4SignerIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/signer/AwsS3V4SignerIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/utils/S3TestUtils.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/utils/S3TestUtils.java index f551569fc9ab..952fbccdc701 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/utils/S3TestUtils.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/utils/S3TestUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,11 +15,19 @@ package software.amazon.awssdk.services.s3.utils; +import java.rmi.NoSuchObjectException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.Bucket; import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.ExpirationStatus; import software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest; import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse; import software.amazon.awssdk.services.s3.model.ListObjectsRequest; @@ -27,8 +35,84 @@ import software.amazon.awssdk.services.s3.model.NoSuchBucketException; import software.amazon.awssdk.services.s3.model.S3Object; import software.amazon.awssdk.testutils.Waiter; +import software.amazon.awssdk.utils.Logger; public class S3TestUtils { + private static final Logger log = Logger.loggerFor(S3TestUtils.class); + private static final String TEST_BUCKET_PREFIX = "s3-test-bucket-"; + private static final String NON_DNS_COMPATIBLE_TEST_BUCKET_PREFIX = "s3.test.bucket."; + + private static Map, List> cleanupTasks = new ConcurrentHashMap<>(); + + public static String getTestBucket(S3Client s3) { + return getBucketWithPrefix(s3, TEST_BUCKET_PREFIX); + } + + public static String getNonDnsCompatibleTestBucket(S3Client s3) { + return getBucketWithPrefix(s3, NON_DNS_COMPATIBLE_TEST_BUCKET_PREFIX); + } + + private static String getBucketWithPrefix(S3Client s3, String bucketPrefix) { + String testBucket = + s3.listBuckets() + .buckets() + .stream() + .map(Bucket::name) + .filter(name -> name.startsWith(bucketPrefix)) + .findAny() + .orElse(null); + + if (testBucket == null) { + String newTestBucket = bucketPrefix + UUID.randomUUID(); + s3.createBucket(r -> r.bucket(newTestBucket)); + Waiter.run(() -> s3.headBucket(r -> r.bucket(newTestBucket))) + .ignoringException(NoSuchBucketException.class) + .orFail(); + testBucket = newTestBucket; + } + + String finalTestBucket = testBucket; + + s3.putBucketLifecycleConfiguration(blc -> blc + .bucket(finalTestBucket) + .lifecycleConfiguration(lc -> lc + .rules(r -> r.expiration(ex -> ex.days(1)) + .status(ExpirationStatus.ENABLED) + .filter(f -> f.prefix("")) + .id("delete-old")))); + + + return finalTestBucket; + } + + public static void putObject(Class testClass, S3Client s3, String bucketName, String objectKey, String content) { + s3.putObject(r -> r.bucket(bucketName).key(objectKey), RequestBody.fromString(content)); + Waiter.run(() -> s3.getObjectAcl(r -> r.bucket(bucketName).key(objectKey))) + .ignoringException(NoSuchBucketException.class, NoSuchObjectException.class) + .orFail(); + addCleanupTask(testClass, () -> s3.deleteObject(r -> r.bucket(bucketName).key(objectKey))); + } + + public static void addCleanupTask(Class testClass, Runnable cleanupTask) { + cleanupTasks.compute(testClass, (k, tasks) -> { + if (tasks == null) { + tasks = new ArrayList<>(); + } + tasks.add(cleanupTask); + return tasks; + }); + } + + public static void runCleanupTasks(Class testClass) { + List tasksToRun = cleanupTasks.remove(testClass); + tasksToRun.forEach(r -> { + try { + r.run(); + } catch (Exception e) { + log.warn(() -> "Test cleanup task failed. The failure will be ignored.", e); + } + }); + } public static void deleteBucketAndAllContents(S3Client s3, String bucketName) { try { diff --git a/services/s3/src/it/resources/log4j2.xml b/services/s3/src/it/resources/log4j2.xml deleted file mode 100644 index 784a3e2dbf93..000000000000 --- a/services/s3/src/it/resources/log4j2.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Configuration.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Configuration.java index 5bf00e558fbe..9e48a335baec 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Configuration.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Configuration.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -20,6 +20,10 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.core.ServiceConfiguration; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.services.s3.internal.FieldWithDefault; +import software.amazon.awssdk.services.s3.internal.usearnregion.UseArnRegionProviderChain; import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationRequest; import software.amazon.awssdk.utils.builder.CopyableBuilder; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; @@ -55,21 +59,36 @@ public final class S3Configuration implements ServiceConfiguration, ToCopyableBu */ private static final boolean DEFAULT_CHUNKED_ENCODING_ENABLED = true; - private final boolean pathStyleAccessEnabled; - private final boolean accelerateModeEnabled; - private final boolean dualstackEnabled; - private final boolean checksumValidationEnabled; - private final boolean chunkedEncodingEnabled; + private final FieldWithDefault pathStyleAccessEnabled; + private final FieldWithDefault accelerateModeEnabled; + private final FieldWithDefault dualstackEnabled; + private final FieldWithDefault checksumValidationEnabled; + private final FieldWithDefault chunkedEncodingEnabled; + private final FieldWithDefault useArnRegionEnabled; + private final FieldWithDefault profileFile; + private final FieldWithDefault profileName; private S3Configuration(DefaultS3ServiceConfigurationBuilder builder) { - this.dualstackEnabled = resolveBoolean(builder.dualstackEnabled, DEFAULT_DUALSTACK_ENABLED); - this.accelerateModeEnabled = resolveBoolean(builder.accelerateModeEnabled, DEFAULT_ACCELERATE_MODE_ENABLED); - this.pathStyleAccessEnabled = resolveBoolean(builder.pathStyleAccessEnabled, DEFAULT_PATH_STYLE_ACCESS_ENABLED); - this.checksumValidationEnabled = resolveBoolean(builder.checksumValidationEnabled, DEFAULT_CHECKSUM_VALIDATION_ENABLED); - if (accelerateModeEnabled && pathStyleAccessEnabled) { + this.dualstackEnabled = FieldWithDefault.create(builder.dualstackEnabled, DEFAULT_DUALSTACK_ENABLED); + this.accelerateModeEnabled = FieldWithDefault.create(builder.accelerateModeEnabled, DEFAULT_ACCELERATE_MODE_ENABLED); + this.pathStyleAccessEnabled = FieldWithDefault.create(builder.pathStyleAccessEnabled, DEFAULT_PATH_STYLE_ACCESS_ENABLED); + this.checksumValidationEnabled = FieldWithDefault.create(builder.checksumValidationEnabled, + DEFAULT_CHECKSUM_VALIDATION_ENABLED); + this.chunkedEncodingEnabled = FieldWithDefault.create(builder.chunkedEncodingEnabled, DEFAULT_CHUNKED_ENCODING_ENABLED); + this.profileFile = FieldWithDefault.createLazy(builder.profileFile, ProfileFile::defaultProfileFile); + this.profileName = FieldWithDefault.create(builder.profileName, + ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow()); + this.useArnRegionEnabled = FieldWithDefault.createLazy(builder.useArnRegionEnabled, this::resolveUserArnRegionEnabled); + + if (accelerateModeEnabled() && pathStyleAccessEnabled()) { throw new IllegalArgumentException("Accelerate mode cannot be used with path style addressing"); } - this.chunkedEncodingEnabled = resolveBoolean(builder.chunkedEncodingEnabled, DEFAULT_CHUNKED_ENCODING_ENABLED); + } + + private boolean resolveUserArnRegionEnabled() { + return UseArnRegionProviderChain.create(this.profileFile.value(), this.profileName.value()) + .resolveUseArnRegion() + .orElse(false); } /** @@ -99,7 +118,7 @@ public static Builder builder() { * @return True is the client should always use path-style access */ public boolean pathStyleAccessEnabled() { - return pathStyleAccessEnabled; + return pathStyleAccessEnabled.value(); } /** @@ -115,7 +134,7 @@ public boolean pathStyleAccessEnabled() { * @return True if accelerate mode is enabled. */ public boolean accelerateModeEnabled() { - return accelerateModeEnabled; + return accelerateModeEnabled.value(); } /** @@ -132,11 +151,11 @@ public boolean accelerateModeEnabled() { * @return True if the client will use the dualstack endpoints */ public boolean dualstackEnabled() { - return dualstackEnabled; + return dualstackEnabled.value(); } public boolean checksumValidationEnabled() { - return checksumValidationEnabled; + return checksumValidationEnabled.value(); } /** @@ -150,23 +169,36 @@ public boolean checksumValidationEnabled() { * @return True if chunked encoding should be used. */ public boolean chunkedEncodingEnabled() { - return chunkedEncodingEnabled; + return chunkedEncodingEnabled.value(); } - private boolean resolveBoolean(Boolean customerSuppliedValue, boolean defaultValue) { - return customerSuppliedValue == null ? defaultValue : customerSuppliedValue; + /** + * Returns whether the client is allowed to make cross-region calls when an S3 Access Point ARN has a different + * region to the one configured on the client. + *

    + * @return True if a different region in the ARN can be used. + */ + public boolean useArnRegionEnabled() { + return useArnRegionEnabled.value(); } @Override public Builder toBuilder() { return builder() - .dualstackEnabled(dualstackEnabled) - .accelerateModeEnabled(accelerateModeEnabled) - .pathStyleAccessEnabled(pathStyleAccessEnabled); + .dualstackEnabled(dualstackEnabled.valueOrNullIfDefault()) + .accelerateModeEnabled(accelerateModeEnabled.valueOrNullIfDefault()) + .pathStyleAccessEnabled(pathStyleAccessEnabled.valueOrNullIfDefault()) + .checksumValidationEnabled(checksumValidationEnabled.valueOrNullIfDefault()) + .chunkedEncodingEnabled(chunkedEncodingEnabled.valueOrNullIfDefault()) + .useArnRegionEnabled(useArnRegionEnabled.valueOrNullIfDefault()) + .profileFile(profileFile.valueOrNullIfDefault()) + .profileName(profileName.valueOrNullIfDefault()); } @NotThreadSafe - public interface Builder extends CopyableBuilder { // (8) + public interface Builder extends CopyableBuilder { + Boolean dualstackEnabled(); + /** * Option to enable using the dualstack endpoints when accessing S3. Dualstack * should be enabled if you want to use IPv6. @@ -179,6 +211,8 @@ public interface Builder extends CopyableBuilder { // */ Builder dualstackEnabled(Boolean dualstackEnabled); + Boolean accelerateModeEnabled(); + /** * Option to enable using the accelerate enedpoint when accessing S3. Accelerate * endpoints allow faster transfer of objects by using Amazon CloudFront's @@ -192,6 +226,8 @@ public interface Builder extends CopyableBuilder { // */ Builder accelerateModeEnabled(Boolean accelerateModeEnabled); + Boolean pathStyleAccessEnabled(); + /** * Option to enable using path style access for accessing S3 objects * instead of DNS style access. DNS style access is preferred as it @@ -206,6 +242,8 @@ public interface Builder extends CopyableBuilder { // */ Builder pathStyleAccessEnabled(Boolean pathStyleAccessEnabled); + Boolean checksumValidationEnabled(); + /** * Option to disable doing a validation of the checksum of an object stored in S3. * @@ -217,6 +255,8 @@ public interface Builder extends CopyableBuilder { // */ Builder checksumValidationEnabled(Boolean checksumValidationEnabled); + Boolean chunkedEncodingEnabled(); + /** * Option to enable using chunked encoding when signing the request * payload for {@link @@ -226,21 +266,68 @@ public interface Builder extends CopyableBuilder { // * @see S3Configuration#chunkedEncodingEnabled() */ Builder chunkedEncodingEnabled(Boolean chunkedEncodingEnabled); - } - private static final class DefaultS3ServiceConfigurationBuilder implements Builder { + Boolean useArnRegionEnabled(); + + /** + * If an S3 resource ARN is passed in as the target of an S3 operation that has a different region to the one + * the client was configured with, this flag must be set to 'true' to permit the client to make a + * cross-region call to the region specified in the ARN otherwise an exception will be thrown. + * + * @see S3Configuration#useArnRegionEnabled() + */ + Builder useArnRegionEnabled(Boolean useArnRegionEnabled); + ProfileFile profileFile(); + + /** + * The profile file that should be consulted to determine the default value of {@link #useArnRegionEnabled(Boolean)}. + * This is not used, if the {@link #useArnRegionEnabled(Boolean)} is configured. + * + *

    + * By default, the {@link ProfileFile#defaultProfileFile()} is used. + *

    + */ + Builder profileFile(ProfileFile profileFile); + + String profileName(); + + /** + * The profile name that should be consulted to determine the default value of {@link #useArnRegionEnabled(Boolean)}. + * This is not used, if the {@link #useArnRegionEnabled(Boolean)} is configured. + * + *

    + * By default, the {@link ProfileFileSystemSetting#AWS_PROFILE} is used. + *

    + */ + Builder profileName(String profileName); + } + + static final class DefaultS3ServiceConfigurationBuilder implements Builder { private Boolean dualstackEnabled; private Boolean accelerateModeEnabled; private Boolean pathStyleAccessEnabled; private Boolean checksumValidationEnabled; private Boolean chunkedEncodingEnabled; + private Boolean useArnRegionEnabled; + private ProfileFile profileFile; + private String profileName; + + @Override + public Boolean dualstackEnabled() { + return dualstackEnabled; + } public Builder dualstackEnabled(Boolean dualstackEnabled) { this.dualstackEnabled = dualstackEnabled; return this; } + @Override + public Boolean accelerateModeEnabled() { + return accelerateModeEnabled; + } + public void setDualstackEnabled(Boolean dualstackEnabled) { dualstackEnabled(dualstackEnabled); } @@ -250,6 +337,11 @@ public Builder accelerateModeEnabled(Boolean accelerateModeEnabled) { return this; } + @Override + public Boolean pathStyleAccessEnabled() { + return pathStyleAccessEnabled; + } + public void setAccelerateModeEnabled(Boolean accelerateModeEnabled) { accelerateModeEnabled(accelerateModeEnabled); } @@ -259,6 +351,11 @@ public Builder pathStyleAccessEnabled(Boolean pathStyleAccessEnabled) { return this; } + @Override + public Boolean checksumValidationEnabled() { + return checksumValidationEnabled; + } + public void setPathStyleAccessEnabled(Boolean pathStyleAccessEnabled) { pathStyleAccessEnabled(pathStyleAccessEnabled); } @@ -268,6 +365,11 @@ public Builder checksumValidationEnabled(Boolean checksumValidationEnabled) { return this; } + @Override + public Boolean chunkedEncodingEnabled() { + return chunkedEncodingEnabled; + } + public void setChecksumValidationEnabled(Boolean checksumValidationEnabled) { checksumValidationEnabled(checksumValidationEnabled); } @@ -277,10 +379,46 @@ public Builder chunkedEncodingEnabled(Boolean chunkedEncodingEnabled) { return this; } + @Override + public Boolean useArnRegionEnabled() { + return useArnRegionEnabled; + } + public void setChunkedEncodingEnabled(Boolean chunkedEncodingEnabled) { chunkedEncodingEnabled(chunkedEncodingEnabled); } + public Builder useArnRegionEnabled(Boolean useArnRegionEnabled) { + this.useArnRegionEnabled = useArnRegionEnabled; + return this; + } + + @Override + public ProfileFile profileFile() { + return profileFile; + } + + @Override + public Builder profileFile(ProfileFile profileFile) { + this.profileFile = profileFile; + return this; + } + + @Override + public String profileName() { + return profileName; + } + + @Override + public Builder profileName(String profileName) { + this.profileName = profileName; + return this; + } + + public void setUseArnRegionEnabled(Boolean useArnRegionEnabled) { + useArnRegionEnabled(useArnRegionEnabled); + } + public S3Configuration build() { return new S3Configuration(this); } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3SystemSetting.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3SystemSetting.java new file mode 100644 index 000000000000..239df4aec35c --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3SystemSetting.java @@ -0,0 +1,51 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.utils.SystemSetting; + +/** + * S3 specific system setting + */ +@SdkProtectedApi +public enum S3SystemSetting implements SystemSetting { + + AWS_S3_USE_ARN_REGION("aws.s3UseArnRegion", null); + + private final String systemProperty; + private final String defaultValue; + + S3SystemSetting(String systemProperty, String defaultValue) { + this.systemProperty = systemProperty; + this.defaultValue = defaultValue; + } + + @Override + public String property() { + return systemProperty; + } + + @Override + public String environmentVariable() { + return name(); + } + + @Override + public String defaultValue() { + return defaultValue; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Utilities.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Utilities.java index 89596aca60d7..e2b04d9da236 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Utilities.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Utilities.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -23,18 +23,22 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.awscore.client.config.AwsClientOption; -import software.amazon.awssdk.awscore.internal.EndpointUtils; +import software.amazon.awssdk.awscore.endpoint.DefaultServiceEndpointBuilder; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.protocols.core.OperationInfo; import software.amazon.awssdk.protocols.core.PathMarshaller; import software.amazon.awssdk.protocols.core.ProtocolUtils; import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.internal.S3EndpointUtils; +import software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointResolverContext; +import software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointResolverFactory; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetUrlRequest; import software.amazon.awssdk.utils.Validate; @@ -46,8 +50,8 @@ * *
      * S3Utilities utilities = S3Utilities.builder().region(Region.US_WEST_2).build()
    - * GetUrlRequest request = GetUrlRequest.builder().bucket("foo-bucket").key("key-without-spaces").build()
    - * URL url = pathStyleUtilities.getUrl(request);
    + * GetUrlRequest request = GetUrlRequest.builder().bucket("foo-bucket").key("key-without-spaces").build();
    + * URL url = utilities.getUrl(request);
      * 
    *

    * @@ -58,8 +62,8 @@ *
      * S3Client s3client = S3Client.create();
      * S3Utilities utilities = s3client.utilities();
    - * GetUrlRequest request = GetUrlRequest.builder().bucket("foo-bucket").key("key-without-spaces").build()
    - * URL url = pathStyleUtilities.getUrl(request);
    + * GetUrlRequest request = GetUrlRequest.builder().bucket("foo-bucket").key("key-without-spaces").build();
    + * URL url = utilities.getUrl(request);
      * 
    *

    * @@ -68,10 +72,10 @@ @Immutable @SdkPublicApi public final class S3Utilities { - private final Region region; - private final S3Configuration s3Configuration; + private final ProfileFile profileFile; + private final String profileName; /** * SDK currently validates that region is present while constructing {@link S3Utilities} object. @@ -80,6 +84,8 @@ public final class S3Utilities { private S3Utilities(Builder builder) { this.region = Validate.paramNotNull(builder.region, "Region"); this.s3Configuration = builder.s3Configuration; + this.profileFile = builder.profileFile; + this.profileName = builder.profileName; } /** @@ -95,6 +101,8 @@ static S3Utilities create(SdkClientConfiguration clientConfiguration) { return S3Utilities.builder() .region(clientConfiguration.option(AwsClientOption.AWS_REGION)) .s3Configuration((S3Configuration) clientConfiguration.option(SdkClientOption.SERVICE_CONFIGURATION)) + .profileFile(clientConfiguration.option(SdkClientOption.PROFILE_FILE)) + .profileName(clientConfiguration.option(SdkClientOption.PROFILE_NAME)) .build(); } @@ -116,7 +124,7 @@ static S3Utilities create(SdkClientConfiguration clientConfiguration) { * * @param getUrlRequest A {@link Consumer} that will call methods on {@link GetUrlRequest.Builder} to create a request. * @return A URL for an object stored in Amazon S3. - * @throws MalformedURLException Generated Url is malformed + * @throws SdkException Generated Url is malformed */ public URL getUrl(Consumer getUrlRequest) { return getUrl(GetUrlRequest.builder().applyMutation(getUrlRequest).build()); @@ -135,24 +143,37 @@ public URL getUrl(Consumer getUrlRequest) { * * @param getUrlRequest request to construct url * @return A URL for an object stored in Amazon S3. - * @throws MalformedURLException Generated Url is malformed + * @throws SdkException Generated Url is malformed */ public URL getUrl(GetUrlRequest getUrlRequest) { Region resolvedRegion = resolveRegionForGetUrl(getUrlRequest); URI resolvedEndpoint = resolveEndpoint(getUrlRequest.endpoint(), resolvedRegion); + boolean endpointOverridden = getUrlRequest.endpoint() != null; SdkHttpFullRequest marshalledRequest = createMarshalledRequest(getUrlRequest, resolvedEndpoint); - SdkHttpRequest httpRequest = S3EndpointUtils.applyEndpointConfiguration(marshalledRequest, - getUrlRequest, - resolvedRegion, - s3Configuration, - getUrlRequest.bucket()); + GetObjectRequest getObjectRequest = GetObjectRequest.builder() + .bucket(getUrlRequest.bucket()) + .key(getUrlRequest.key()) + .versionId(getUrlRequest.versionId()) + .build(); + + S3EndpointResolverContext resolverContext = S3EndpointResolverContext.builder() + .request(marshalledRequest) + .originalRequest(getObjectRequest) + .region(resolvedRegion) + .endpointOverridden(endpointOverridden) + .serviceConfiguration(s3Configuration) + .build(); + + SdkHttpRequest httpRequest = S3EndpointResolverFactory.getEndpointResolver(getObjectRequest.bucket()) + .applyEndpointConfiguration(resolverContext) + .sdkHttpRequest(); try { return httpRequest.getUri().toURL(); } catch (MalformedURLException exception) { - throw SdkException.create(String.format("Generated URI is malformed: " + httpRequest.getUri()), + throw SdkException.create("Generated URI is malformed: " + httpRequest.getUri(), exception); } } @@ -170,7 +191,10 @@ private Region resolveRegionForGetUrl(GetUrlRequest getUrlRequest) { */ private URI resolveEndpoint(URI endpoint, Region region) { return endpoint != null ? endpoint - : EndpointUtils.buildEndpoint("https", "s3", region); + : new DefaultServiceEndpointBuilder("s3", "https").withRegion(region) + .withProfileFile(profileFile) + .withProfileName(profileName) + .getServiceEndpoint(); } /** @@ -192,6 +216,10 @@ private SdkHttpFullRequest createMarshalledRequest(GetUrlRequest getUrlRequest, // encode key builder.encodedPath(PathMarshaller.GREEDY.marshall(builder.encodedPath(), "Key", getUrlRequest.key())); + if (getUrlRequest.versionId() != null) { + builder.appendRawQueryParameter("versionId", getUrlRequest.versionId()); + } + return builder.build(); } @@ -202,6 +230,8 @@ public static final class Builder { private Region region; private S3Configuration s3Configuration; + private ProfileFile profileFile; + private String profileName; private Builder() { } @@ -232,6 +262,26 @@ public Builder s3Configuration(S3Configuration s3Configuration) { return this; } + /** + * The profile file from the {@link ClientOverrideConfiguration#defaultProfileFile()}. This is private and only used + * when the utilities is created via {@link S3Client#utilities()}. This is not currently public because it may be less + * confusing to support the full {@link ClientOverrideConfiguration} object in the future. + */ + private Builder profileFile(ProfileFile profileFile) { + this.profileFile = profileFile; + return this; + } + + /** + * The profile name from the {@link ClientOverrideConfiguration#defaultProfileFile()}. This is private and only used + * when the utilities is created via {@link S3Client#utilities()}. This is not currently public because it may be less + * confusing to support the full {@link ClientOverrideConfiguration} object in the future. + */ + private Builder profileName(String profileName) { + this.profileName = profileName; + return this; + } + /** * Construct a {@link S3Utilities} object. */ diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingAsyncRequestBody.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingAsyncRequestBody.java index c4f262a1c710..48c56d82503c 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingAsyncRequestBody.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingAsyncRequestBody.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ public Optional contentLength() { @Override public void subscribe(Subscriber s) { + sdkChecksum.reset(); wrapped.subscribe(new ChecksumCalculatingSubscriber(s, sdkChecksum)); } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingInputStream.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingInputStream.java index 382f789865cb..f6fae1ff968f 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingInputStream.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingInputStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumConstant.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumConstant.java index 03872809faca..fbe49e94eae3 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumConstant.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumConstant.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -55,5 +55,6 @@ public final class ChecksumConstant { */ public static final int S3_MD5_CHECKSUM_LENGTH = 16; - private ChecksumConstant() {} + private ChecksumConstant() { + } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStream.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStream.java index 6b18f6403d7f..ab089377e8cc 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStream.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingInputStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -105,12 +105,12 @@ public int read(byte[] buf, int off, int len) throws IOException { int read = -1; if (lengthRead < strippedLength) { - long maxRead = Math.min((long) Integer.MAX_VALUE, strippedLength - lengthRead); - int maxIterRead = (int) Math.min(maxRead, (long) len); + long maxRead = Math.min(Integer.MAX_VALUE, strippedLength - lengthRead); + int maxIterRead = (int) Math.min(maxRead, len); read = inputStream.read(buf, off, maxIterRead); - int toUpdate = (int) Math.min(strippedLength - lengthRead, (long) read); + int toUpdate = (int) Math.min(strippedLength - lengthRead, read); if (toUpdate > 0) { checkSum.update(buf, off, toUpdate); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisher.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisher.java index 2b453f4ec0c8..2c871470d84e 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisher.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisher.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumsEnabledValidator.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumsEnabledValidator.java index e74438c33a85..6c2afec748f0 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumsEnabledValidator.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/checksums/ChecksumsEnabledValidator.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -32,6 +32,8 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.http.SdkHttpHeaders; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.internal.handlers.AsyncChecksumValidationInterceptor; @@ -84,14 +86,13 @@ public static boolean getObjectChecksumEnabledPerResponse(SdkRequest request, Sd * * @param expectedClientType - The expected client type for enabling checksums * @param executionAttributes - {@link ExecutionAttributes} to determine the actual client type - * @param sdkHttpHeaders A map of headers for a given request * @return If trailing checksums should be enabled for this request. */ - public static boolean putObjectChecksumEnabled(SdkRequest request, - ClientType expectedClientType, - ExecutionAttributes executionAttributes, - SdkHttpHeaders sdkHttpHeaders) { - if (!(request instanceof PutObjectRequest)) { + public static boolean shouldRecordChecksum(SdkRequest sdkRequest, + ClientType expectedClientType, + ExecutionAttributes executionAttributes, + SdkHttpRequest httpRequest) { + if (!(sdkRequest instanceof PutObjectRequest)) { return false; } @@ -101,21 +102,33 @@ public static boolean putObjectChecksumEnabled(SdkRequest request, return false; } - // S3 doesn't support trailing checksums for customer encryption - if (sdkHttpHeaders.firstMatchingHeader(SERVER_SIDE_CUSTOMER_ENCRYPTION_HEADER).isPresent()) { - return false; - } - // S3 doesn't support trailing checksums for KMS encrypted objects - if (sdkHttpHeaders.firstMatchingHeader(SERVER_SIDE_ENCRYPTION_HEADER) - .filter(h -> h.contains(AWS_KMS.toString())) - .isPresent()) { + if (hasServerSideEncryptionHeader(httpRequest)) { return false; } return checksumEnabledPerConfig(executionAttributes); } + public static boolean responseChecksumIsValid(SdkHttpResponse httpResponse) { + return !hasServerSideEncryptionHeader(httpResponse); + } + + private static boolean hasServerSideEncryptionHeader(SdkHttpHeaders httpRequest) { + // S3 doesn't support trailing checksums for customer encryption + if (httpRequest.firstMatchingHeader(SERVER_SIDE_CUSTOMER_ENCRYPTION_HEADER).isPresent()) { + return true; + } + + // S3 doesn't support trailing checksums for KMS encrypted objects + if (httpRequest.firstMatchingHeader(SERVER_SIDE_ENCRYPTION_HEADER) + .filter(h -> h.contains(AWS_KMS.toString())) + .isPresent()) { + return true; + } + return false; + } + /** * Client side validation for {@link PutObjectRequest} * @@ -131,7 +144,9 @@ public static void validatePutObjectChecksum(PutObjectResponse response, Executi byte[] ssHash = Base16Lower.decode(response.eTag().replace("\"", "")); if (!Arrays.equals(digest, ssHash)) { - throw SdkClientException.create("Data read has a different checksum than expected."); + throw SdkClientException.create( + String.format("Data read has a different checksum than expected. Was 0x%s, but expected 0x%s", + BinaryUtils.toHex(digest), BinaryUtils.toHex(ssHash))); } } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/BucketUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/BucketUtils.java index 781d6d202b7e..9e6a31a7531d 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/BucketUtils.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/BucketUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/ConfiguredS3SdkHttpRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/ConfiguredS3SdkHttpRequest.java new file mode 100644 index 000000000000..28566dac51f9 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/ConfiguredS3SdkHttpRequest.java @@ -0,0 +1,119 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +@SdkInternalApi +public class ConfiguredS3SdkHttpRequest + implements ToCopyableBuilder { + private final SdkHttpRequest sdkHttpRequest; + private final Region signingRegionModification; + private final String signingServiceModification; + + private ConfiguredS3SdkHttpRequest(Builder builder) { + this.sdkHttpRequest = Validate.notNull(builder.sdkHttpRequest, "sdkHttpRequest"); + this.signingRegionModification = builder.signingRegionModification; + this.signingServiceModification = builder.signingServiceModification; + } + + public static Builder builder() { + return new Builder(); + } + + public SdkHttpRequest sdkHttpRequest() { + return sdkHttpRequest; + } + + public Optional signingRegionModification() { + return Optional.ofNullable(signingRegionModification); + } + + public Optional signingServiceModification() { + return Optional.ofNullable(signingServiceModification); + } + + @Override + public Builder toBuilder() { + return builder().sdkHttpRequest(sdkHttpRequest).signingRegionModification(signingRegionModification); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConfiguredS3SdkHttpRequest that = (ConfiguredS3SdkHttpRequest) o; + + if (!sdkHttpRequest.equals(that.sdkHttpRequest)) { + return false; + } + if (signingRegionModification != null ? !signingRegionModification.equals(that.signingRegionModification) : + that.signingRegionModification != null) { + return false; + } + return signingServiceModification != null ? signingServiceModification.equals(that.signingServiceModification) : + that.signingServiceModification == null; + } + + @Override + public int hashCode() { + int result = sdkHttpRequest.hashCode(); + result = 31 * result + (signingRegionModification != null ? signingRegionModification.hashCode() : 0); + result = 31 * result + (signingServiceModification != null ? signingServiceModification.hashCode() : 0); + return result; + } + + public static class Builder implements CopyableBuilder { + private String signingServiceModification; + private SdkHttpRequest sdkHttpRequest; + private Region signingRegionModification; + + private Builder() { + } + + public Builder sdkHttpRequest(SdkHttpRequest sdkHttpRequest) { + this.sdkHttpRequest = sdkHttpRequest; + return this; + } + + public Builder signingRegionModification(Region signingRegionModification) { + this.signingRegionModification = signingRegionModification; + return this; + } + + public Builder signingServiceModification(String signingServiceModification) { + this.signingServiceModification = signingServiceModification; + return this; + } + + @Override + public ConfiguredS3SdkHttpRequest build() { + return new ConfiguredS3SdkHttpRequest(this); + } + } + +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/FieldWithDefault.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/FieldWithDefault.java new file mode 100644 index 000000000000..9c1f9ca8ee17 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/FieldWithDefault.java @@ -0,0 +1,123 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal; + +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Lazy; + +/** + * A helper class for setting a field's value to a default if it isn't specified, while still keeping track of whether the value + * was from the default or from the field. + * + * For example, a "profile name" field-with-default might be set to "null" with a default of "foo". {@link #value()} returns + * "foo", while {@link #isDefault()} can be used to keep track of the fact that the value was from the default. + */ +@SdkInternalApi +public abstract class FieldWithDefault { + private FieldWithDefault(){ + } + + /** + * Create a {@link FieldWithDefault} using the provided field and its default value. If the field is null, the default value + * will be returned by {@link #value()} and {@link #isDefault()} will return true. If the field is not null, the field value + * will be returned by {@link #value()} and {@link #isDefault()} will return false. + * + * @see #createLazy(Object, Supplier) + */ + public static FieldWithDefault create(T field, T defaultValue) { + return new Impl<>(field, defaultValue); + } + + /** + * Create a {@link FieldWithDefault} using the provided field and its default value. If the field is null, the default value + * will be returned by {@link #value()} and {@link #isDefault()} will return true. If the field is not null, the field value + * will be returned by {@link #value()} and {@link #isDefault()} will return false. + * + *

    This differs from {@link #create(Object, Object)} in that the default value won't be resolved if the provided field is + * not null. The default value also won't be resolved until the first {@link #value()} call. This is useful for delaying + * expensive calculations until right before they're needed. + */ + public static FieldWithDefault createLazy(T field, Supplier defaultValue) { + return new LazyImpl<>(field, defaultValue); + } + + /** + * Retrieve the value of this field. + */ + public abstract T value(); + + /** + * True, if the value returned by {@link #value()} is the default value (i.e. the field is null). False otherwise. + */ + public abstract boolean isDefault(); + + /** + * Return the field exactly as it was specified when the field-with-default was created. If the field was null, this will + * return null. This will not resolve the default if this is a field from {@link #createLazy(Object, Supplier)}. + */ + public abstract T valueOrNullIfDefault(); + + private static class Impl extends FieldWithDefault { + private final T value; + private final boolean isDefault; + + private Impl(T field, T defaultValue) { + this.value = field != null ? field : defaultValue; + this.isDefault = field == null; + } + + @Override + public T value() { + return value; + } + + @Override + public boolean isDefault() { + return isDefault; + } + + @Override + public T valueOrNullIfDefault() { + return isDefault ? null : value; + } + } + + private static class LazyImpl extends FieldWithDefault { + private final Lazy value; + private final boolean isDefault; + + private LazyImpl(T field, Supplier defaultValue) { + this.value = field != null ? new Lazy<>(() -> field) : new Lazy<>(defaultValue); + this.isDefault = field == null; + } + + @Override + public T value() { + return value.getValue(); + } + + @Override + public boolean isDefault() { + return isDefault; + } + + @Override + public T valueOrNullIfDefault() { + return isDefault ? null : value.getValue(); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/S3EndpointUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/S3EndpointUtils.java deleted file mode 100644 index f20068fc8a2e..000000000000 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/S3EndpointUtils.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3.internal; - -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; - -import java.net.URI; -import java.net.URISyntaxException; -import java.util.Arrays; -import java.util.List; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.regions.RegionMetadata; -import software.amazon.awssdk.services.s3.S3Configuration; -import software.amazon.awssdk.services.s3.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; -import software.amazon.awssdk.services.s3.model.ListBucketsRequest; - -/** - * Utilities for working with Amazon S3 bucket names, such as validation and - * checked to see if they are compatible with DNS addressing. - */ -@SdkInternalApi -public final class S3EndpointUtils { - - private static final List> ACCELERATE_DISABLED_OPERATIONS = Arrays.asList( - ListBucketsRequest.class, CreateBucketRequest.class, DeleteBucketRequest.class); - - private S3EndpointUtils() { - } - - /** - * Returns a new instance of the given {@link SdkHttpRequest} by applying any endpoint changes based on - * the given {@link S3Configuration} options. - */ - public static SdkHttpRequest applyEndpointConfiguration(SdkHttpRequest request, - Object originalRequest, - Region region, - S3Configuration serviceConfiguration, - String bucketName) { - - SdkHttpRequest.Builder mutableRequest = request.toBuilder(); - - URI endpoint = resolveEndpoint(request, originalRequest, region, serviceConfiguration); - mutableRequest.uri(endpoint); - - if (serviceConfiguration == null || !serviceConfiguration.pathStyleAccessEnabled()) { - if (bucketName != null) { - if (BucketUtils.isVirtualAddressingCompatibleBucketName(bucketName, false)) { - changeToDnsEndpoint(mutableRequest, bucketName); - } - } - } - - return mutableRequest.build(); - } - - /** - * Determine which endpoint to use based on region and {@link S3Configuration}. Will either be a traditional - * S3 endpoint (i.e. s3.us-east-1.amazonaws.com), the global S3 accelerate endpoint (i.e. s3-accelerate.amazonaws.com) or - * a regional dualstack endpoint for IPV6 (i.e. s3.dualstack.us-east-1.amazonaws.com). - */ - private static URI resolveEndpoint(SdkHttpRequest request, - Object originalRequest, - Region region, - S3Configuration serviceConfiguration) { - RegionMetadata regionMetadata = RegionMetadata.of(region); - String protocol = request.protocol(); - - if (isAccelerateEnabled(serviceConfiguration) && isAccelerateSupported(originalRequest)) { - return accelerateEndpoint(serviceConfiguration, regionMetadata, protocol); - } - - if (serviceConfiguration != null && serviceConfiguration.dualstackEnabled()) { - return dualstackEndpoint(regionMetadata, protocol); - } - - return invokeSafely(() -> new URI(request.protocol(), null, request.host(), request.port(), null, null, null)); - } - - /** - * Changes from path style addressing (which the marshallers produce by default, to DNS style or virtual style addressing - * where the bucket name is prepended to the host. DNS style addressing is preferred due to the better load balancing - * qualities it provides, path style is an option mainly for proxy based situations and alternative S3 implementations. - * - * @param mutableRequest Marshalled HTTP request we are modifying. - * @param bucketName Bucket name for this particular operation. - */ - private static void changeToDnsEndpoint(SdkHttpRequest.Builder mutableRequest, String bucketName) { - if (mutableRequest.host().startsWith("s3")) { - String newHost = mutableRequest.host().replaceFirst("s3", bucketName + "." + "s3"); - String newPath = mutableRequest.encodedPath().replaceFirst("/" + bucketName, ""); - - mutableRequest.host(newHost).encodedPath(newPath); - } - } - - /** - * @return dual stack endpoint from given protocol and region metadata - */ - private static URI dualstackEndpoint(RegionMetadata metadata, String protocol) { - String serviceEndpoint = String.format("%s.%s.%s.%s", "s3", "dualstack", metadata.id(), metadata.domain()); - return toUri(protocol, serviceEndpoint); - } - - /** - * @return True if accelerate mode is enabled per {@link S3Configuration}, false if not. - */ - private static boolean isAccelerateEnabled(S3Configuration serviceConfiguration) { - return serviceConfiguration != null && serviceConfiguration.accelerateModeEnabled(); - } - - /** - * @param originalRequest Request object to identify the operation. - * @return True if accelerate is supported for the given operation, false if not. - */ - private static boolean isAccelerateSupported(Object originalRequest) { - return !ACCELERATE_DISABLED_OPERATIONS.contains(originalRequest.getClass()); - } - - /** - * @return The endpoint for an S3 accelerate enabled operation. S3 accelerate has a single global endpoint. - */ - private static URI accelerateEndpoint(S3Configuration serviceConfiguration, RegionMetadata metadata, String protocol) { - if (serviceConfiguration.dualstackEnabled()) { - return toUri(protocol, "s3-accelerate.dualstack." + metadata.domain()); - } - return toUri(protocol, "s3-accelerate." + metadata.domain()); - } - - private static URI toUri(String protocol, String endpoint) { - try { - return new URI(String.format("%s://%s", protocol, endpoint)); - } catch (URISyntaxException e) { - throw new IllegalArgumentException(e); - } - } -} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/TaggingAdapter.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/TaggingAdapter.java index 85a46964fbe6..c9d97e6e4090 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/TaggingAdapter.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/TaggingAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolver.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolver.java new file mode 100644 index 000000000000..fd6edb22ecde --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolver.java @@ -0,0 +1,212 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isAccelerateEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isArnRegionEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isDualstackEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isFipsRegion; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isFipsRegionProvided; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isPathStyleAccessEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.removeFipsIfNeeded; + +import java.net.URI; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.PartitionMetadata; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; +import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointBuilder; +import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource; +import software.amazon.awssdk.services.s3.internal.resource.S3ArnConverter; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostAccessPointBuilder; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; +import software.amazon.awssdk.services.s3.internal.resource.S3Resource; +import software.amazon.awssdk.services.s3.internal.resource.S3ResourceType; +import software.amazon.awssdk.utils.Validate; + +/** + * Returns a new configured HTTP request with a resolved access point endpoint and signing overrides. + */ +@SdkInternalApi +public final class S3AccessPointEndpointResolver implements S3EndpointResolver { + + private static final String S3_OUTPOSTS_NAME = "s3-outposts"; + + private S3AccessPointEndpointResolver() { + } + + public static S3AccessPointEndpointResolver create() { + return new S3AccessPointEndpointResolver(); + } + + @Override + public ConfiguredS3SdkHttpRequest applyEndpointConfiguration(S3EndpointResolverContext context) { + + S3Resource s3Resource = S3ArnConverter.create().convertArn(Arn.fromString(getBucketName(context))); + if (S3ResourceType.fromValue(s3Resource.type()) != S3ResourceType.ACCESS_POINT) { + throw new IllegalArgumentException("An ARN was passed as a bucket parameter to an S3 operation, " + + "however it does not appear to be a valid S3 access point ARN."); + } + + Region region = context.region(); + PartitionMetadata clientPartitionMetadata = PartitionMetadata.of(region); + + String arnRegion = validateConfiguration(context, s3Resource); + + S3AccessPointResource s3EndpointResource = Validate.isInstanceOf(S3AccessPointResource.class, s3Resource, + "An ARN was passed as a bucket parameter to an S3 operation, however it does not " + + "appear to be a valid S3 access point ARN."); + + URI accessPointUri = getUriForAccessPointResource(context, arnRegion, clientPartitionMetadata, s3EndpointResource); + String key = context.originalRequest().getValueForField("Key", String.class).orElse(null); + SdkHttpRequest httpRequest = context.request().toBuilder() + .protocol(accessPointUri.getScheme()) + .host(accessPointUri.getHost()) + .port(accessPointUri.getPort()) + .encodedPath(key) + .build(); + + String signingServiceModification = s3EndpointResource.parentS3Resource() + .filter(r -> r instanceof S3OutpostResource) + .map(ignore -> S3_OUTPOSTS_NAME) + .orElse(null); + + return ConfiguredS3SdkHttpRequest.builder() + .sdkHttpRequest(httpRequest) + .signingRegionModification(Region.of(arnRegion)) + .signingServiceModification(signingServiceModification) + .build(); + } + + private String validateConfiguration(S3EndpointResolverContext context, S3Resource s3Resource) { + Region region = context.region(); + String arnRegion = s3Resource.region().orElseThrow(() -> new IllegalArgumentException( + "An S3 access point ARN must have a region")); + + + S3Configuration serviceConfiguration = context.serviceConfiguration(); + if (isAccelerateEnabled(serviceConfiguration)) { + throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3 " + + "operation if the S3 client has been configured with accelerate mode" + + " enabled."); + } + + if (isPathStyleAccessEnabled(serviceConfiguration)) { + throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3 " + + "operation if the S3 client has been configured with path style " + + "addressing enabled."); + } + + if (context.endpointOverridden()) { + throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3" + + " operation if the S3 client has been configured with an endpoint " + + "override."); + } + + if (!isArnRegionEnabled(serviceConfiguration) && clientRegionDiffersFromArnRegion(region, arnRegion)) { + throw new IllegalArgumentException( + String.format("The region field of the ARN being passed as a bucket parameter to an S3 operation " + + "does not match the region the client was configured with. To enable this " + + "behavior and prevent this exception set 'useArnRegionEnabled' to true in the " + + "configuration when building the S3 client. Provided region: '%s'; client region:" + + " '%s'.", arnRegion, region)); + } + + String clientPartition = PartitionMetadata.of(region).id(); + + if (illegalPartitionConfiguration(s3Resource, clientPartition)) { + throw new IllegalArgumentException( + String.format("The partition field of the ARN being passed as a bucket parameter to an S3 operation " + + "does not match the partition the S3 client has been configured with. Provided " + + "partition: '%s'; client partition: '%s'.", s3Resource.partition().orElse(""), + clientPartition)); + } + return arnRegion; + } + + private boolean clientRegionDiffersFromArnRegion(Region clientRegion, String arnRegion) { + return !removeFipsIfNeeded(clientRegion.id()).equals(removeFipsIfNeeded(arnRegion)); + } + + private boolean illegalPartitionConfiguration(S3Resource s3Resource, String clientPartition) { + return clientPartition == null || clientPartition.isEmpty() || !s3Resource.partition().isPresent() + || !clientPartition.equals(s3Resource.partition().get()); + } + + private String getBucketName(S3EndpointResolverContext context) { + return context.originalRequest().getValueForField("Bucket", String.class).orElseThrow( + () -> new IllegalArgumentException("Bucket name cannot be empty when parsing access points.")); + } + + private URI getUriForAccessPointResource(S3EndpointResolverContext context, String arnRegion, + PartitionMetadata clientPartitionMetadata, + S3AccessPointResource s3EndpointResource) { + + boolean dualstackEnabled = isDualstackEnabled(context.serviceConfiguration()); + boolean fipsRegionProvided = isFipsRegionProvided(context.region().toString(), arnRegion, + isArnRegionEnabled(context.serviceConfiguration())); + + String accountId = s3EndpointResource.accountId().orElseThrow(() -> new IllegalArgumentException( + "An S3 access point ARN must have an account ID")); + String accessPointName = s3EndpointResource.accessPointName(); + + if (isOutpostAccessPoint(s3EndpointResource)) { + return getOutpostAccessPointUri(context, arnRegion, clientPartitionMetadata, s3EndpointResource); + } + + return S3AccessPointBuilder.create() + .accessPointName(accessPointName) + .accountId(accountId) + .fipsEnabled(fipsRegionProvided) + .region(removeFipsIfNeeded(arnRegion)) + .protocol(context.request().protocol()) + .domain(clientPartitionMetadata.dnsSuffix()) + .dualstackEnabled(dualstackEnabled) + .toUri(); + } + + private boolean isOutpostAccessPoint(S3AccessPointResource s3EndpointResource) { + return s3EndpointResource.parentS3Resource().filter(r -> r instanceof S3OutpostResource).isPresent(); + } + + private URI getOutpostAccessPointUri(S3EndpointResolverContext context, String arnRegion, + PartitionMetadata clientPartitionMetadata, S3AccessPointResource s3EndpointResource) { + if (isDualstackEnabled(context.serviceConfiguration())) { + throw new IllegalArgumentException("An Outpost Access Point ARN cannot be passed as a bucket parameter to an S3 " + + "operation if the S3 client has been configured with dualstack"); + } + + if (isFipsRegion(context.region().toString())) { + throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3" + + " operation if the S3 client has been configured with a FIPS" + + " enabled region."); + } + + S3OutpostResource parentResource = (S3OutpostResource) s3EndpointResource.parentS3Resource().get(); + return S3OutpostAccessPointBuilder.create() + .accountId(s3EndpointResource.accountId().get()) + .outpostId(parentResource.outpostId()) + .region(arnRegion) + .accessPointName(s3EndpointResource.accessPointName()) + .protocol(context.request().protocol()) + .domain(clientPartitionMetadata.dnsSuffix()) + .toUri(); + } + +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolver.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolver.java new file mode 100644 index 000000000000..a3284baf7782 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolver.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.accelerateEndpoint; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.dualstackEndpoint; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isAccelerateEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isAccelerateSupported; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isDualstackEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isPathStyleAccessEnabled; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.net.URI; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.RegionMetadata; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.BucketUtils; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; + +/** + * Returns a new configured HTTP request with a resolved endpoint with either virtual addressing or path style access. + * Supports accelerate and dual stack. + */ +@SdkInternalApi +public final class S3BucketEndpointResolver implements S3EndpointResolver { + + private S3BucketEndpointResolver() { + } + + public static S3BucketEndpointResolver create() { + return new S3BucketEndpointResolver(); + } + + @Override + public ConfiguredS3SdkHttpRequest applyEndpointConfiguration(S3EndpointResolverContext context) { + URI endpoint = resolveEndpoint(context); + SdkHttpRequest.Builder mutableRequest = context.request().toBuilder(); + mutableRequest.uri(endpoint); + + String bucketName = context.originalRequest().getValueForField("Bucket", String.class).orElse(null); + if (canUseVirtualAddressing(context.serviceConfiguration(), bucketName)) { + changeToDnsEndpoint(mutableRequest, bucketName); + } + + return ConfiguredS3SdkHttpRequest.builder() + .sdkHttpRequest(mutableRequest.build()) + .build(); + } + + /** + * Determine which endpoint to use based on region and {@link S3Configuration}. Will either be a traditional + * S3 endpoint (i.e. s3.us-east-1.amazonaws.com), the global S3 accelerate endpoint (i.e. s3-accelerate.amazonaws.com) or + * a regional dualstack endpoint for IPV6 (i.e. s3.dualstack.us-east-1.amazonaws.com). + */ + private static URI resolveEndpoint(S3EndpointResolverContext context) { + SdkHttpRequest request = context.request(); + String protocol = request.protocol(); + RegionMetadata regionMetadata = RegionMetadata.of(context.region()); + S3Configuration serviceConfiguration = context.serviceConfiguration(); + + if (isAccelerateEnabled(serviceConfiguration) && isAccelerateSupported(context.originalRequest())) { + return accelerateEndpoint(serviceConfiguration, regionMetadata.domain(), protocol); + } + + if (isDualstackEnabled(serviceConfiguration)) { + return dualstackEndpoint(regionMetadata.id(), regionMetadata.domain(), protocol); + } + + return invokeSafely(() -> new URI(protocol, null, request.host(), request.port(), null, null, null)); + } + + private static boolean canUseVirtualAddressing(S3Configuration serviceConfiguration, String bucketName) { + return !isPathStyleAccessEnabled(serviceConfiguration) && bucketName != null && + BucketUtils.isVirtualAddressingCompatibleBucketName(bucketName, false); + } + + /** + * Changes from path style addressing (which the marshallers produce by default), to DNS style/virtual style addressing, + * where the bucket name is prepended to the host. DNS style addressing is preferred due to the better load balancing + * qualities it provides; path style is an option mainly for proxy based situations and alternative S3 implementations. + * + * @param mutableRequest Marshalled HTTP request we are modifying. + * @param bucketName Bucket name for this particular operation. + */ + private static void changeToDnsEndpoint(SdkHttpRequest.Builder mutableRequest, String bucketName) { + if (mutableRequest.host().startsWith("s3")) { + String newHost = mutableRequest.host().replaceFirst("s3", bucketName + "." + "s3"); + String newPath = mutableRequest.encodedPath().replaceFirst("/" + bucketName, ""); + mutableRequest.host(newHost).encodedPath(newPath); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolver.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolver.java new file mode 100644 index 000000000000..9f8ab6e66910 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolver.java @@ -0,0 +1,31 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; + +/** + * An S3 endpoint resolver returns a {@link ConfiguredS3SdkHttpRequest} based on the HTTP context and previously + * set execution attributes. + *

    + * @see software.amazon.awssdk.services.s3.internal.handlers.EndpointAddressInterceptor + */ +@SdkInternalApi +public interface S3EndpointResolver { + + ConfiguredS3SdkHttpRequest applyEndpointConfiguration(S3EndpointResolverContext context); +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContext.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContext.java new file mode 100644 index 000000000000..48cc41a9f37c --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContext.java @@ -0,0 +1,143 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; + +/** + * Contains the information needed to resolve S3 endpoints. + */ +@SdkInternalApi +public final class S3EndpointResolverContext { + private final SdkHttpRequest request; + private final SdkRequest originalRequest; + private final Region region; + private final S3Configuration serviceConfiguration; + private final boolean endpointOverridden; + + private S3EndpointResolverContext(Builder builder) { + this.request = builder.request; + this.originalRequest = builder.originalRequest; + this.region = builder.region; + this.serviceConfiguration = builder.serviceConfiguration; + this.endpointOverridden = builder.endpointOverridden; + } + + public static Builder builder() { + return new Builder(); + } + + public SdkHttpRequest request() { + return request; + } + + public SdkRequest originalRequest() { + return originalRequest; + } + + public Region region() { + return region; + } + + public S3Configuration serviceConfiguration() { + return serviceConfiguration; + } + + public boolean endpointOverridden() { + return endpointOverridden; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + S3EndpointResolverContext that = (S3EndpointResolverContext) o; + return endpointOverridden == that.endpointOverridden && + Objects.equals(request, that.request) && + Objects.equals(originalRequest, that.originalRequest) && + Objects.equals(region, that.region) && + Objects.equals(serviceConfiguration, that.serviceConfiguration); + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + Objects.hashCode(request()); + hashCode = 31 * hashCode + Objects.hashCode(originalRequest()); + hashCode = 31 * hashCode + Objects.hashCode(region()); + hashCode = 31 * hashCode + Objects.hashCode(serviceConfiguration()); + hashCode = 31 * hashCode + Objects.hashCode(endpointOverridden()); + return hashCode; + } + + public Builder toBuilder() { + return builder().endpointOverridden(endpointOverridden) + .request(request) + .originalRequest(originalRequest) + .region(region) + .serviceConfiguration(serviceConfiguration); + } + + public static final class Builder { + private SdkHttpRequest request; + private SdkRequest originalRequest; + private Region region; + private S3Configuration serviceConfiguration; + private boolean endpointOverridden; + + private Builder() { + } + + public Builder request(SdkHttpRequest request) { + this.request = request; + return this; + } + + public Builder originalRequest(SdkRequest originalRequest) { + this.originalRequest = originalRequest; + return this; + } + + public Builder region(Region region) { + this.region = region; + return this; + } + + public Builder serviceConfiguration(S3Configuration serviceConfiguration) { + this.serviceConfiguration = serviceConfiguration; + return this; + } + + public Builder endpointOverridden(boolean endpointOverridden) { + this.endpointOverridden = endpointOverridden; + return this; + } + + public S3EndpointResolverContext build() { + return new S3EndpointResolverContext(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactory.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactory.java new file mode 100644 index 000000000000..5db130d41c3c --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactory.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Get endpoint resolver. + */ +@SdkInternalApi +public final class S3EndpointResolverFactory { + + private static final S3EndpointResolver ACCESS_POINT_ENDPOINT_RESOLVER = S3AccessPointEndpointResolver.create(); + private static final S3EndpointResolver BUCKET_ENDPOINT_RESOLVER = S3BucketEndpointResolver.create(); + + private S3EndpointResolverFactory() { + } + + public static S3EndpointResolver getEndpointResolver(String bucketName) { + if (bucketName != null && S3EndpointUtils.isArn(bucketName)) { + return ACCESS_POINT_ENDPOINT_RESOLVER; + } + return BUCKET_ENDPOINT_RESOLVER; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtils.java new file mode 100644 index 000000000000..d3176b734b0e --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtils.java @@ -0,0 +1,146 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.ListBucketsRequest; + +/** + * Utilities for working with Amazon S3 bucket names and endpoints. + */ +@SdkInternalApi +public final class S3EndpointUtils { + + private static final List> ACCELERATE_DISABLED_OPERATIONS = Arrays.asList( + ListBucketsRequest.class, CreateBucketRequest.class, DeleteBucketRequest.class); + + private S3EndpointUtils() { + } + + public static String removeFipsIfNeeded(String region) { + if (region.startsWith("fips-")) { + return region.replace("fips-", ""); + } + + if (region.endsWith("-fips")) { + return region.replace("-fips", ""); + } + return region; + } + + /** + * Returns whether a FIPS pseudo region is provided. + */ + public static boolean isFipsRegionProvided(String clientRegion, String arnRegion, boolean useArnRegion) { + if (useArnRegion) { + return isFipsRegion(arnRegion); + } + return isFipsRegion(clientRegion); + } + + public static boolean isFipsRegion(String region) { + return region.startsWith("fips-") || region.endsWith("-fips"); + } + + /** + * @return True if accelerate mode is enabled per {@link S3Configuration}, false if not. + */ + public static boolean isAccelerateEnabled(S3Configuration serviceConfiguration) { + return serviceConfiguration != null && serviceConfiguration.accelerateModeEnabled(); + } + + /** + * @param originalRequest Request object to identify the operation. + * @return True if accelerate is supported for the given operation, false if not. + */ + public static boolean isAccelerateSupported(SdkRequest originalRequest) { + return !ACCELERATE_DISABLED_OPERATIONS.contains(originalRequest.getClass()); + } + + /** + * @return The endpoint for an S3 accelerate enabled operation. S3 accelerate has a single global endpoint. + */ + public static URI accelerateEndpoint(S3Configuration serviceConfiguration, String domain, String protocol) { + if (serviceConfiguration.dualstackEnabled()) { + return toUri(protocol, "s3-accelerate.dualstack." + domain); + } + return toUri(protocol, "s3-accelerate." + domain); + } + + /** + * @return True if dualstack is enabled per {@link S3Configuration}, false if not. + */ + public static boolean isDualstackEnabled(S3Configuration serviceConfiguration) { + return serviceConfiguration != null && serviceConfiguration.dualstackEnabled(); + } + + /** + * @return dual stack endpoint from given protocol and region metadata + */ + public static URI dualstackEndpoint(String id, String domain, String protocol) { + String serviceEndpoint = String.format("%s.%s.%s.%s", "s3", "dualstack", id, domain); + return toUri(protocol, serviceEndpoint); + } + + /** + * @return True if path style access is enabled per {@link S3Configuration}, false if not. + */ + public static boolean isPathStyleAccessEnabled(S3Configuration serviceConfiguration) { + return serviceConfiguration != null && serviceConfiguration.pathStyleAccessEnabled(); + } + + public static boolean isArnRegionEnabled(S3Configuration serviceConfiguration) { + return serviceConfiguration != null && serviceConfiguration.useArnRegionEnabled(); + } + + /** + * Changes from path style addressing (which the marshallers produce by default, to DNS style or virtual style addressing + * where the bucket name is prepended to the host. DNS style addressing is preferred due to the better load balancing + * qualities it provides, path style is an option mainly for proxy based situations and alternative S3 implementations. + * + * @param mutableRequest Marshalled HTTP request we are modifying. + * @param bucketName Bucket name for this particular operation. + */ + public static void changeToDnsEndpoint(SdkHttpRequest.Builder mutableRequest, String bucketName) { + if (mutableRequest.host().startsWith("s3")) { + String newHost = mutableRequest.host().replaceFirst("s3", bucketName + "." + "s3"); + String newPath = mutableRequest.encodedPath().replaceFirst("/" + bucketName, ""); + + mutableRequest.host(newHost).encodedPath(newPath); + } + } + + public static boolean isArn(String s) { + return s.startsWith("arn:"); + } + + private static URI toUri(String protocol, String endpoint) { + try { + return new URI(String.format("%s://%s", protocol, endpoint)); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AddContentMd5HeaderInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AddContentMd5HeaderInterceptor.java deleted file mode 100644 index c113782c1eab..000000000000 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AddContentMd5HeaderInterceptor.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3.internal.handlers; - -import static software.amazon.awssdk.http.Header.CONTENT_MD5; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttribute; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.UploadPartRequest; -import software.amazon.awssdk.utils.IoUtils; -import software.amazon.awssdk.utils.Md5Utils; - -@SdkInternalApi -public class AddContentMd5HeaderInterceptor implements ExecutionInterceptor { - - private static final ExecutionAttribute CONTENT_MD5_ATTRIBUTE = new ExecutionAttribute<>("contentMd5"); - - // List of operations that should be ignored by this interceptor. - // These are costly operations, so adding the md5 header will take a performance hit - private static final List BLACKLIST_METHODS = Arrays.asList(PutObjectRequest.class, UploadPartRequest.class); - - @Override - public Optional modifyHttpContent(Context.ModifyHttpRequest context, - ExecutionAttributes executionAttributes) { - - if (!BLACKLIST_METHODS.contains(context.request().getClass()) && context.requestBody().isPresent() - && !context.httpRequest().firstMatchingHeader(CONTENT_MD5).isPresent()) { - - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - IoUtils.copy(context.requestBody().get().contentStreamProvider().newStream(), baos); - executionAttributes.putAttribute(CONTENT_MD5_ATTRIBUTE, Md5Utils.md5AsBase64(baos.toByteArray())); - return context.requestBody(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - return context.requestBody(); - } - - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, - ExecutionAttributes executionAttributes) { - String contentMd5 = executionAttributes.getAttribute(CONTENT_MD5_ATTRIBUTE); - - if (contentMd5 != null) { - return context.httpRequest().toBuilder().putHeader(CONTENT_MD5, contentMd5).build(); - } - - return context.httpRequest(); - } -} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AsyncChecksumValidationInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AsyncChecksumValidationInterceptor.java index ec94f4b40912..af691aae4ee5 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AsyncChecksumValidationInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AsyncChecksumValidationInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -19,7 +19,8 @@ import static software.amazon.awssdk.services.s3.checksums.ChecksumConstant.CONTENT_LENGTH_HEADER; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.CHECKSUM; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.getObjectChecksumEnabledPerResponse; -import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.putObjectChecksumEnabled; +import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.responseChecksumIsValid; +import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.shouldRecordChecksum; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.validatePutObjectChecksum; import java.nio.ByteBuffer; @@ -30,6 +31,7 @@ import software.amazon.awssdk.core.checksums.Md5Checksum; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.services.s3.checksums.ChecksumCalculatingAsyncRequestBody; @@ -38,16 +40,16 @@ @SdkInternalApi public final class AsyncChecksumValidationInterceptor implements ExecutionInterceptor { + private static ExecutionAttribute ASYNC_RECORDING_CHECKSUM = new ExecutionAttribute<>("asyncRecordingChecksum"); @Override public Optional modifyAsyncHttpContent(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { + boolean shouldRecordChecksum = shouldRecordChecksum(context.request(), ASYNC, executionAttributes, context.httpRequest()); - boolean putObjectTrailingChecksumsEnabled = - putObjectChecksumEnabled(context.request(), ASYNC, executionAttributes, context.httpRequest()); - - if (putObjectTrailingChecksumsEnabled && context.asyncRequestBody().isPresent()) { + if (shouldRecordChecksum && context.asyncRequestBody().isPresent()) { SdkChecksum checksum = new Md5Checksum(); + executionAttributes.putAttribute(ASYNC_RECORDING_CHECKSUM, true); executionAttributes.putAttribute(CHECKSUM, checksum); return Optional.of(new ChecksumCalculatingAsyncRequestBody(context.asyncRequestBody().get(), checksum)); } @@ -58,7 +60,6 @@ public Optional modifyAsyncHttpContent(Context.ModifyHttpReque @Override public Optional> modifyAsyncHttpResponseContent(Context.ModifyHttpResponse context, ExecutionAttributes executionAttributes) { - if (getObjectChecksumEnabledPerResponse(context.request(), context.httpResponse()) && context.responsePublisher().isPresent()) { long contentLength = context.httpResponse() @@ -78,11 +79,10 @@ public Optional> modifyAsyncHttpResponseContent(Context.Mo @Override public void afterUnmarshalling(Context.AfterUnmarshalling context, ExecutionAttributes executionAttributes) { + boolean recordingChecksum = Boolean.TRUE.equals(executionAttributes.getAttribute(ASYNC_RECORDING_CHECKSUM)); + boolean responseChecksumIsValid = responseChecksumIsValid(context.httpResponse()); - boolean putObjectChecksumsEnabled = - putObjectChecksumEnabled(context.request(), ASYNC, executionAttributes, context.httpRequest()); - - if (putObjectChecksumsEnabled) { + if (recordingChecksum && responseChecksumIsValid) { validatePutObjectChecksum((PutObjectResponse) context.response(), executionAttributes); } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CreateBucketInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CreateBucketInterceptor.java index 81e525504a4f..5bb616f8cd0d 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CreateBucketInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CreateBucketInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CreateMultipartUploadRequestInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CreateMultipartUploadRequestInterceptor.java index 5c678d2f2c82..94bf44339df1 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CreateMultipartUploadRequestInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/CreateMultipartUploadRequestInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/DecodeUrlEncodedResponseInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/DecodeUrlEncodedResponseInterceptor.java index 9a2e26b76eb4..3a94b74d9e14 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/DecodeUrlEncodedResponseInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/DecodeUrlEncodedResponseInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ import static software.amazon.awssdk.utils.http.SdkHttpUtils.urlDecode; +import java.util.Collections; import java.util.List; import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -24,9 +25,14 @@ import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.services.s3.model.CommonPrefix; import software.amazon.awssdk.services.s3.model.EncodingType; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse; import software.amazon.awssdk.services.s3.model.ListObjectsResponse; import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.MultipartUpload; +import software.amazon.awssdk.services.s3.model.ObjectVersion; import software.amazon.awssdk.services.s3.model.S3Object; /** @@ -50,9 +56,19 @@ public SdkResponse modifyResponse(Context.ModifyResponse context, SdkResponse response = context.response(); if (shouldHandle(response)) { if (response instanceof ListObjectsResponse) { - response = modifyListObjectsResponse((ListObjectsResponse) response); - } else if (response instanceof ListObjectsV2Response) { - response = modifyListObjectsV2Response((ListObjectsV2Response) response); + return modifyListObjectsResponse((ListObjectsResponse) response); + } + + if (response instanceof ListObjectsV2Response) { + return modifyListObjectsV2Response((ListObjectsV2Response) response); + } + + if (response instanceof ListObjectVersionsResponse) { + return modifyListObjectVersionsResponse((ListObjectVersionsResponse) response); + } + + if (response instanceof ListMultipartUploadsResponse) { + return modifyListMultipartUploadsResponse((ListMultipartUploadsResponse) response); } } return response; @@ -67,30 +83,90 @@ private static boolean shouldHandle(SdkResponse sdkResponse) { // Elements to decode: Delimiter, Marker, Prefix, NextMarker, Key private static SdkResponse modifyListObjectsResponse(ListObjectsResponse response) { return response.toBuilder() - .delimiter(urlDecode(response.delimiter())) - .marker(urlDecode(response.delimiter())) - .prefix(urlDecode(response.prefix())) - .nextMarker(urlDecode(response.nextMarker())) - .contents(decodeContents(response.contents())) - .build(); + .delimiter(urlDecode(response.delimiter())) + .marker(urlDecode(response.marker())) + .prefix(urlDecode(response.prefix())) + .nextMarker(urlDecode(response.nextMarker())) + .contents(decodeContents(response.contents())) + .commonPrefixes(decodeCommonPrefixes(response.commonPrefixes())) + .build(); } // Elements to decode: Delimiter, Prefix, Key, and StartAfter private static SdkResponse modifyListObjectsV2Response(ListObjectsV2Response response) { return response.toBuilder() - .delimiter(urlDecode(response.delimiter())) - .prefix(urlDecode(response.prefix())) - .startAfter(urlDecode(response.startAfter())) - .contents(decodeContents(response.contents())) - .build(); + .delimiter(urlDecode(response.delimiter())) + .prefix(urlDecode(response.prefix())) + .startAfter(urlDecode(response.startAfter())) + .contents(decodeContents(response.contents())) + .commonPrefixes(decodeCommonPrefixes(response.commonPrefixes())) + .build(); + } + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html + // Elements to decode: Delimiter, KeyMarker, NextKeyMarker, Prefix + private SdkResponse modifyListObjectVersionsResponse(ListObjectVersionsResponse response) { + + return response.toBuilder() + .prefix(urlDecode(response.prefix())) + .keyMarker(urlDecode(response.keyMarker())) + .delimiter(urlDecode(response.delimiter())) + .nextKeyMarker(urlDecode(response.nextKeyMarker())) + .commonPrefixes(decodeCommonPrefixes(response.commonPrefixes())) + .versions(decodeObjectVersions(response.versions())) + .build(); + } + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + // Elements to decode: Delimiter, KeyMarker, NextKeyMarker, Prefix, Key + private SdkResponse modifyListMultipartUploadsResponse(ListMultipartUploadsResponse response) { + return response.toBuilder() + .delimiter(urlDecode(response.delimiter())) + .keyMarker(urlDecode(response.keyMarker())) + .nextKeyMarker(urlDecode(response.nextKeyMarker())) + .prefix(urlDecode(response.prefix())) + .commonPrefixes(decodeCommonPrefixes(response.commonPrefixes())) + .uploads(decodeMultipartUpload(response.uploads())) + .build(); + } private static List decodeContents(List contents) { if (contents == null) { return null; } - return contents.stream() - .map(o -> o.toBuilder().key(urlDecode(o.key())).build()) - .collect(Collectors.toList()); + return Collections.unmodifiableList(contents.stream() + .map(o -> o.toBuilder().key(urlDecode(o.key())).build()) + .collect(Collectors.toList())); + } + + private static List decodeObjectVersions(List objectVersions) { + if (objectVersions == null) { + return null; + } + + return Collections.unmodifiableList(objectVersions.stream() + .map(o -> o.toBuilder().key(urlDecode(o.key())).build()) + .collect(Collectors.toList())); + } + + private static List decodeCommonPrefixes(List commonPrefixes) { + if (commonPrefixes == null) { + return null; + } + + return Collections.unmodifiableList(commonPrefixes.stream() + .map(p -> p.toBuilder().prefix(urlDecode(p.prefix())).build()) + .collect(Collectors.toList())); + } + + private static List decodeMultipartUpload(List multipartUploads) { + if (multipartUploads == null) { + return null; + } + + return Collections.unmodifiableList(multipartUploads.stream() + .map(u -> u.toBuilder().key(urlDecode(u.key())).build()) + .collect(Collectors.toList())); } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/DisableDoubleUrlEncodingInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/DisableDoubleUrlEncodingInterceptor.java index 5547310f112e..211a44719e7b 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/DisableDoubleUrlEncodingInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/DisableDoubleUrlEncodingInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EnableChunkedEncodingInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EnableChunkedEncodingInterceptor.java index ec11b919128c..4788782e9cf2 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EnableChunkedEncodingInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EnableChunkedEncodingInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EnableTrailingChecksumInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EnableTrailingChecksumInterceptor.java index c18455351db6..95efceb68dd7 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EnableTrailingChecksumInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EnableTrailingChecksumInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -30,6 +30,7 @@ import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.utils.Validate; @SdkInternalApi public final class EnableTrailingChecksumInterceptor implements ExecutionInterceptor { @@ -60,7 +61,11 @@ public SdkResponse modifyResponse(Context.ModifyResponse context, ExecutionAttri if (getObjectChecksumEnabledPerResponse(context.request(), httpResponse)) { GetObjectResponse getResponse = (GetObjectResponse) response; - return getResponse.toBuilder().contentLength(getResponse.contentLength() - S3_MD5_CHECKSUM_LENGTH).build(); + Long contentLength = getResponse.contentLength(); + Validate.notNull(contentLength, "Service returned null 'Content-Length'."); + return getResponse.toBuilder() + .contentLength(contentLength - S3_MD5_CHECKSUM_LENGTH) + .build(); } return response; diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptor.java index 6c7bfe52c5b1..3f6c4c9eece4 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,13 +18,15 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; import software.amazon.awssdk.awscore.AwsExecutionAttribute; -import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.services.s3.S3Configuration; -import software.amazon.awssdk.services.s3.internal.S3EndpointUtils; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; +import software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointResolverContext; +import software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointResolverFactory; @SdkInternalApi public final class EndpointAddressInterceptor implements ExecutionInterceptor { @@ -33,14 +35,30 @@ public final class EndpointAddressInterceptor implements ExecutionInterceptor { public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - SdkRequest sdkRequest = context.request(); + boolean endpointOverride = + Boolean.TRUE.equals(executionAttributes.getAttribute(SdkExecutionAttribute.ENDPOINT_OVERRIDDEN)); + S3Configuration serviceConfiguration = + (S3Configuration) executionAttributes.getAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG); + S3EndpointResolverContext resolverContext = + S3EndpointResolverContext.builder() + .request(context.httpRequest()) + .originalRequest(context.request()) + .region(executionAttributes.getAttribute(AwsExecutionAttribute.AWS_REGION)) + .endpointOverridden(endpointOverride) + .serviceConfiguration(serviceConfiguration) + .build(); - return S3EndpointUtils.applyEndpointConfiguration(context.httpRequest(), - sdkRequest, - executionAttributes.getAttribute(AwsExecutionAttribute.AWS_REGION), - (S3Configuration) executionAttributes - .getAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG), - sdkRequest.getValueForField("Bucket", String.class) - .orElse(null)); + String bucketName = context.request().getValueForField("Bucket", String.class).orElse(null); + ConfiguredS3SdkHttpRequest configuredRequest = S3EndpointResolverFactory.getEndpointResolver(bucketName) + .applyEndpointConfiguration(resolverContext); + + configuredRequest.signingRegionModification().ifPresent( + region -> executionAttributes.putAttribute(AwsSignerExecutionAttribute.SIGNING_REGION, region)); + + configuredRequest.signingServiceModification().ifPresent( + name -> executionAttributes.putAttribute(AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME, name)); + + return configuredRequest.sdkHttpRequest(); } + } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/ExceptionTranslationInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/ExceptionTranslationInterceptor.java index 933cd8f3130a..b185d36439a0 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/ExceptionTranslationInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/ExceptionTranslationInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetBucketPolicyInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetBucketPolicyInterceptor.java index d33d868a91f8..dc66f7fca380 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetBucketPolicyInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetBucketPolicyInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -18,11 +18,15 @@ import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; import java.io.InputStream; +import java.nio.ByteBuffer; import java.util.Optional; +import java.util.function.Predicate; +import org.reactivestreams.Publisher; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.internal.async.SdkPublishers; import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.services.s3.model.GetBucketPolicyRequest; import software.amazon.awssdk.utils.IoUtils; @@ -33,24 +37,38 @@ */ @SdkInternalApi public final class GetBucketPolicyInterceptor implements ExecutionInterceptor { + private static final String XML_ENVELOPE_PREFIX = ""; + + private static final Predicate INTERCEPTOR_CONTEXT_PREDICATE = + context -> context.request() instanceof GetBucketPolicyRequest && context.httpResponse().isSuccessful(); @Override public Optional modifyHttpResponseContent(Context.ModifyHttpResponse context, ExecutionAttributes executionAttributes) { - if (context.request() instanceof GetBucketPolicyRequest && context.httpResponse().isSuccessful()) { + if (INTERCEPTOR_CONTEXT_PREDICATE.test(context)) { String policy = context.responseBody() .map(r -> invokeSafely(() -> IoUtils.toUtf8String(r))) .orElse(null); if (policy != null) { - // Wrap in CDATA to deal with any escaping issues - String xml = String.format("" - + "", policy); + String xml = XML_ENVELOPE_PREFIX + policy + XML_ENVELOPE_SUFFIX; return Optional.of(AbortableInputStream.create(new StringInputStream(xml))); } } return context.responseBody(); } + + @Override + public Optional> modifyAsyncHttpResponseContent(Context.ModifyHttpResponse context, + ExecutionAttributes executionAttributes) { + if (INTERCEPTOR_CONTEXT_PREDICATE.test(context)) { + return context.responsePublisher().map( + body -> SdkPublishers.envelopeWrappedPublisher(body, XML_ENVELOPE_PREFIX, XML_ENVELOPE_SUFFIX)); + } + + return context.responsePublisher(); + } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetObjectInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetObjectInterceptor.java new file mode 100644 index 000000000000..f9973fed3617 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetObjectInterceptor.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.handlers; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkResponse; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; + +/** + * Interceptor for {@link GetObjectRequest} messages. + */ +@SdkInternalApi +public class GetObjectInterceptor implements ExecutionInterceptor { + @Override + public SdkResponse modifyResponse(Context.ModifyResponse context, ExecutionAttributes executionAttributes) { + SdkResponse response = context.response(); + if (!(response instanceof GetObjectResponse)) { + return response; + } + + return fixContentRange(response, context.httpResponse()); + } + + /** + * S3 currently returns content-range in two possible headers: Content-Range or x-amz-content-range based on the x-amz-te + * in the request. This will check the x-amz-content-range if the modeled header (Content-Range) wasn't populated. + */ + private SdkResponse fixContentRange(SdkResponse sdkResponse, SdkHttpResponse httpResponse) { + // Use the modeled content range header, if the service returned it. + GetObjectResponse getObjectResponse = (GetObjectResponse) sdkResponse; + if (getObjectResponse.contentRange() != null) { + return getObjectResponse; + } + + // If the service didn't use the modeled content range header, check the x-amz-content-range header. + Optional xAmzContentRange = httpResponse.firstMatchingHeader("x-amz-content-range"); + if (!xAmzContentRange.isPresent()) { + return getObjectResponse; + } + + return getObjectResponse.copy(r -> r.contentRange(xAmzContentRange.get())); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptor.java index f207d365555e..92859b96dea9 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/SyncChecksumValidationInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/SyncChecksumValidationInterceptor.java index 5e53224cb8da..02d58b107b5d 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/SyncChecksumValidationInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/SyncChecksumValidationInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -19,7 +19,8 @@ import static software.amazon.awssdk.services.s3.checksums.ChecksumConstant.CONTENT_LENGTH_HEADER; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.CHECKSUM; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.getObjectChecksumEnabledPerResponse; -import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.putObjectChecksumEnabled; +import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.responseChecksumIsValid; +import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.shouldRecordChecksum; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.validatePutObjectChecksum; import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; @@ -29,6 +30,7 @@ import software.amazon.awssdk.core.checksums.Md5Checksum; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.sync.RequestBody; @@ -39,15 +41,16 @@ @SdkInternalApi public final class SyncChecksumValidationInterceptor implements ExecutionInterceptor { + private static ExecutionAttribute SYNC_RECORDING_CHECKSUM = new ExecutionAttribute<>("syncRecordingChecksum"); @Override public Optional modifyHttpContent(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - - if (putObjectChecksumEnabled(context.request(), SYNC, executionAttributes, context.httpRequest()) + if (shouldRecordChecksum(context.request(), SYNC, executionAttributes, context.httpRequest()) && context.requestBody().isPresent()) { SdkChecksum checksum = new Md5Checksum(); executionAttributes.putAttribute(CHECKSUM, checksum); + executionAttributes.putAttribute(SYNC_RECORDING_CHECKSUM, true); RequestBody requestBody = context.requestBody().get(); @@ -65,7 +68,6 @@ public Optional modifyHttpContent(Context.ModifyHttpRequest context @Override public Optional modifyHttpResponseContent(Context.ModifyHttpResponse context, ExecutionAttributes executionAttributes) { - if (getObjectChecksumEnabledPerResponse(context.request(), context.httpResponse()) && context.responseBody().isPresent()) { @@ -86,7 +88,10 @@ public Optional modifyHttpResponseContent(Context.ModifyHttpRespons @Override public void afterUnmarshalling(Context.AfterUnmarshalling context, ExecutionAttributes executionAttributes) { - if (putObjectChecksumEnabled(context.request(), SYNC, executionAttributes, context.httpResponse())) { + boolean recordingChecksum = Boolean.TRUE.equals(executionAttributes.getAttribute(SYNC_RECORDING_CHECKSUM)); + boolean responseChecksumIsValid = responseChecksumIsValid(context.httpResponse()); + + if (recordingChecksum && responseChecksumIsValid) { validatePutObjectChecksum((PutObjectResponse) context.response(), executionAttributes); } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultS3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultS3Presigner.java new file mode 100644 index 000000000000..70f3725ef706 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultS3Presigner.java @@ -0,0 +1,498 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.presigner; + +import static java.util.stream.Collectors.toMap; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.PRESIGNER_EXPIRATION; +import static software.amazon.awssdk.utils.CollectionUtils.mergeLists; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.net.URI; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.signer.AwsS3V4Signer; +import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; +import software.amazon.awssdk.awscore.AwsExecutionAttribute; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; +import software.amazon.awssdk.awscore.endpoint.DefaultServiceEndpointBuilder; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.ClientType; +import software.amazon.awssdk.core.RequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.client.builder.SdkDefaultClientBuilder; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.http.ExecutionContext; +import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptorChain; +import software.amazon.awssdk.core.interceptor.InterceptorContext; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.signer.Presigner; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.protocols.xml.AwsS3ProtocolFactory; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.AbortMultipartUploadPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.CompleteMultipartUploadPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.CreateMultipartUploadPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; +import software.amazon.awssdk.services.s3.presigner.model.PutObjectPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.UploadPartPresignRequest; +import software.amazon.awssdk.services.s3.transform.AbortMultipartUploadRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.CompleteMultipartUploadRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.CreateMultipartUploadRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.GetObjectRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.PutObjectRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.UploadPartRequestMarshaller; +import software.amazon.awssdk.utils.IoUtils; +import software.amazon.awssdk.utils.Validate; + +/** + * The default implementation of the {@link S3Presigner} interface. + */ +@SdkInternalApi +public final class DefaultS3Presigner extends DefaultSdkPresigner implements S3Presigner { + private static final AwsS3V4Signer DEFAULT_SIGNER = AwsS3V4Signer.create(); + private static final S3Configuration DEFAULT_S3_CONFIGURATION = S3Configuration.builder() + .checksumValidationEnabled(false) + .build(); + private static final String SERVICE_NAME = "s3"; + private static final String SIGNING_NAME = "s3"; + + private final S3Configuration serviceConfiguration; + private final List clientInterceptors; + private final GetObjectRequestMarshaller getObjectRequestMarshaller; + private final PutObjectRequestMarshaller putObjectRequestMarshaller; + private final CreateMultipartUploadRequestMarshaller createMultipartUploadRequestMarshaller; + private final UploadPartRequestMarshaller uploadPartRequestMarshaller; + private final CompleteMultipartUploadRequestMarshaller completeMultipartUploadRequestMarshaller; + private final AbortMultipartUploadRequestMarshaller abortMultipartUploadRequestMarshaller; + + private DefaultS3Presigner(Builder b) { + super(b); + + this.serviceConfiguration = b.serviceConfiguration != null ? b.serviceConfiguration : DEFAULT_S3_CONFIGURATION; + + this.clientInterceptors = initializeInterceptors(); + + // Copied from DefaultS3Client#init + AwsS3ProtocolFactory protocolFactory = AwsS3ProtocolFactory.builder() + .clientConfiguration(createClientConfiguration()) + .build(); + + // Copied from DefaultS3Client#getObject + this.getObjectRequestMarshaller = new GetObjectRequestMarshaller(protocolFactory); + + // Copied from DefaultS3Client#putObject + this.putObjectRequestMarshaller = new PutObjectRequestMarshaller(protocolFactory); + + // Copied from DefaultS3Client#createMultipartUpload + this.createMultipartUploadRequestMarshaller = new CreateMultipartUploadRequestMarshaller(protocolFactory); + + // Copied from DefaultS3Client#uploadPart + this.uploadPartRequestMarshaller = new UploadPartRequestMarshaller(protocolFactory); + + // Copied from DefaultS3Client#completeMultipartUpload + this.completeMultipartUploadRequestMarshaller = new CompleteMultipartUploadRequestMarshaller(protocolFactory); + + // Copied from DefaultS3Client#abortMultipartUpload + this.abortMultipartUploadRequestMarshaller = new AbortMultipartUploadRequestMarshaller(protocolFactory); + } + + public static S3Presigner.Builder builder() { + return new Builder(); + } + + /** + * Copied from {@code DefaultS3BaseClientBuilder} and {@link SdkDefaultClientBuilder}. + */ + private List initializeInterceptors() { + ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); + List s3Interceptors = + interceptorFactory.getInterceptors("software/amazon/awssdk/services/s3/execution.interceptors"); + return mergeLists(interceptorFactory.getGlobalInterceptors(), s3Interceptors); + } + + /** + * Copied from {@link AwsDefaultClientBuilder}. + */ + private SdkClientConfiguration createClientConfiguration() { + if (endpointOverride() != null) { + return SdkClientConfiguration.builder() + .option(SdkClientOption.ENDPOINT, endpointOverride()) + .option(SdkClientOption.ENDPOINT_OVERRIDDEN, true) + .build(); + } else { + URI defaultEndpoint = new DefaultServiceEndpointBuilder(SERVICE_NAME, "https").withRegion(region()) + .getServiceEndpoint(); + return SdkClientConfiguration.builder() + .option(SdkClientOption.ENDPOINT, defaultEndpoint) + .build(); + } + } + + @Override + public PresignedGetObjectRequest presignGetObject(GetObjectPresignRequest request) { + return presign(PresignedGetObjectRequest.builder(), + request, + request.getObjectRequest(), + GetObjectRequest.class, + getObjectRequestMarshaller::marshall, + "GetObject") + .build(); + } + + @Override + public PresignedPutObjectRequest presignPutObject(PutObjectPresignRequest request) { + return presign(PresignedPutObjectRequest.builder(), + request, + request.putObjectRequest(), + PutObjectRequest.class, + putObjectRequestMarshaller::marshall, + "PutObject") + .build(); + } + + @Override + public PresignedCreateMultipartUploadRequest presignCreateMultipartUpload(CreateMultipartUploadPresignRequest request) { + return presign(PresignedCreateMultipartUploadRequest.builder(), + request, + request.createMultipartUploadRequest(), + CreateMultipartUploadRequest.class, + createMultipartUploadRequestMarshaller::marshall, + "CreateMultipartUpload") + .build(); + } + + @Override + public PresignedUploadPartRequest presignUploadPart(UploadPartPresignRequest request) { + return presign(PresignedUploadPartRequest.builder(), + request, + request.uploadPartRequest(), + UploadPartRequest.class, + uploadPartRequestMarshaller::marshall, + "UploadPart") + .build(); + } + + @Override + public PresignedCompleteMultipartUploadRequest presignCompleteMultipartUpload(CompleteMultipartUploadPresignRequest request) { + return presign(PresignedCompleteMultipartUploadRequest.builder(), + request, + request.completeMultipartUploadRequest(), + CompleteMultipartUploadRequest.class, + completeMultipartUploadRequestMarshaller::marshall, + "CompleteMultipartUpload") + .build(); + } + + @Override + public PresignedAbortMultipartUploadRequest presignAbortMultipartUpload(AbortMultipartUploadPresignRequest request) { + return presign(PresignedAbortMultipartUploadRequest.builder(), + request, + request.abortMultipartUploadRequest(), + AbortMultipartUploadRequest.class, + abortMultipartUploadRequestMarshaller::marshall, + "AbortMultipartUpload") + .build(); + } + + protected S3Configuration serviceConfiguration() { + return serviceConfiguration; + } + + /** + * Generate a {@link PresignedRequest} from a {@link PresignedRequest} and {@link SdkRequest}. + */ + private T presign(T presignedRequest, + PresignRequest presignRequest, + SdkRequest requestToPresign, + Class requestToPresignType, + Function requestMarshaller, + String operationName) { + ExecutionContext execCtx = createExecutionContext(presignRequest, requestToPresign, operationName); + + callBeforeExecutionHooks(execCtx); + callModifyRequestHooksAndUpdateContext(execCtx); + callBeforeMarshallingHooks(execCtx); + marshalRequestAndUpdateContext(execCtx, requestToPresignType, requestMarshaller); + callAfterMarshallingHooks(execCtx); + addRequestLevelHeadersAndQueryParameters(execCtx); + callModifyHttpRequestHooksAndUpdateContext(execCtx); + + SdkHttpFullRequest httpRequest = getHttpFullRequest(execCtx); + SdkHttpFullRequest signedHttpRequest = presignRequest(execCtx, httpRequest); + + initializePresignedRequest(presignedRequest, execCtx, signedHttpRequest); + + return presignedRequest; + } + + /** + * Creates an execution context from the provided requests information. + */ + private ExecutionContext createExecutionContext(PresignRequest presignRequest, SdkRequest sdkRequest, String operationName) { + AwsCredentialsProvider clientCredentials = credentialsProvider(); + AwsCredentialsProvider credentialsProvider = sdkRequest.overrideConfiguration() + .filter(c -> c instanceof AwsRequestOverrideConfiguration) + .map(c -> (AwsRequestOverrideConfiguration) c) + .flatMap(AwsRequestOverrideConfiguration::credentialsProvider) + .orElse(clientCredentials); + + Signer signer = sdkRequest.overrideConfiguration().flatMap(RequestOverrideConfiguration::signer).orElse(DEFAULT_SIGNER); + Instant signatureExpiration = Instant.now().plus(presignRequest.signatureDuration()); + + AwsCredentials credentials = credentialsProvider.resolveCredentials(); + Validate.validState(credentials != null, "Credential providers must never return null."); + + ExecutionAttributes executionAttributes = new ExecutionAttributes() + .putAttribute(AwsSignerExecutionAttribute.AWS_CREDENTIALS, credentials) + .putAttribute(AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME, SIGNING_NAME) + .putAttribute(AwsExecutionAttribute.AWS_REGION, region()) + .putAttribute(AwsSignerExecutionAttribute.SIGNING_REGION, region()) + .putAttribute(SdkInternalExecutionAttribute.IS_FULL_DUPLEX, false) + .putAttribute(SdkExecutionAttribute.CLIENT_TYPE, ClientType.SYNC) + .putAttribute(SdkExecutionAttribute.SERVICE_NAME, SERVICE_NAME) + .putAttribute(SdkExecutionAttribute.OPERATION_NAME, operationName) + .putAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG, serviceConfiguration()) + .putAttribute(PRESIGNER_EXPIRATION, signatureExpiration); + + ExecutionInterceptorChain executionInterceptorChain = new ExecutionInterceptorChain(clientInterceptors); + return ExecutionContext.builder() + .interceptorChain(executionInterceptorChain) + .interceptorContext(InterceptorContext.builder() + .request(sdkRequest) + .build()) + .executionAttributes(executionAttributes) + .signer(signer) + .build(); + } + + /** + * Call the before-execution interceptor hooks. + */ + private void callBeforeExecutionHooks(ExecutionContext execCtx) { + execCtx.interceptorChain().beforeExecution(execCtx.interceptorContext(), execCtx.executionAttributes()); + } + + /** + * Call the modify-request interceptor hooks and update the execution context. + */ + private void callModifyRequestHooksAndUpdateContext(ExecutionContext execCtx) { + execCtx.interceptorContext(execCtx.interceptorChain().modifyRequest(execCtx.interceptorContext(), + execCtx.executionAttributes())); + } + + /** + * Call the before-marshalling interceptor hooks. + */ + private void callBeforeMarshallingHooks(ExecutionContext execCtx) { + execCtx.interceptorChain().beforeMarshalling(execCtx.interceptorContext(), execCtx.executionAttributes()); + } + + /** + * Marshal the request and update the execution context with the result. + */ + private void marshalRequestAndUpdateContext(ExecutionContext execCtx, + Class requestType, + Function requestMarshaller) { + T sdkRequest = Validate.isInstanceOf(requestType, execCtx.interceptorContext().request(), + "Interceptor generated unsupported type (%s) when %s was expected.", + execCtx.interceptorContext().request().getClass(), requestType); + + SdkHttpFullRequest marshalledRequest = requestMarshaller.apply(sdkRequest); + + // TODO: The core SDK doesn't put the request body into the interceptor context. That should be fixed. + Optional requestBody = marshalledRequest.contentStreamProvider() + .map(ContentStreamProvider::newStream) + .map(is -> invokeSafely(() -> IoUtils.toByteArray(is))) + .map(RequestBody::fromBytes); + + execCtx.interceptorContext(execCtx.interceptorContext().copy(r -> r.httpRequest(marshalledRequest) + .requestBody(requestBody.orElse(null)))); + } + + /** + * Call the after-marshalling interceptor hooks. + */ + private void callAfterMarshallingHooks(ExecutionContext execCtx) { + execCtx.interceptorChain().afterMarshalling(execCtx.interceptorContext(), execCtx.executionAttributes()); + } + + /** + * Update the provided HTTP request by adding any HTTP headers or query parameters specified as part of the + * {@link SdkRequest}. + */ + private void addRequestLevelHeadersAndQueryParameters(ExecutionContext execCtx) { + SdkHttpRequest httpRequest = execCtx.interceptorContext().httpRequest(); + SdkRequest sdkRequest = execCtx.interceptorContext().request(); + SdkHttpRequest updatedHttpRequest = + httpRequest.toBuilder() + .applyMutation(b -> addRequestLevelHeaders(b, sdkRequest)) + .applyMutation(b -> addRequestLeveQueryParameters(b, sdkRequest)) + .build(); + execCtx.interceptorContext(execCtx.interceptorContext().copy(c -> c.httpRequest(updatedHttpRequest))); + } + + private void addRequestLevelHeaders(SdkHttpRequest.Builder builder, SdkRequest request) { + request.overrideConfiguration().ifPresent(overrideConfig -> { + if (!overrideConfig.headers().isEmpty()) { + overrideConfig.headers().forEach(builder::putHeader); + } + }); + } + + private void addRequestLeveQueryParameters(SdkHttpRequest.Builder builder, SdkRequest request) { + request.overrideConfiguration().ifPresent(overrideConfig -> { + if (!overrideConfig.rawQueryParameters().isEmpty()) { + overrideConfig.rawQueryParameters().forEach(builder::putRawQueryParameter); + } + }); + } + + /** + * Call the after-marshalling interceptor hooks and return the HTTP request that should be pre-signed. + */ + private void callModifyHttpRequestHooksAndUpdateContext(ExecutionContext execCtx) { + execCtx.interceptorContext(execCtx.interceptorChain().modifyHttpRequestAndHttpContent(execCtx.interceptorContext(), + execCtx.executionAttributes())); + } + + /** + * Get the HTTP full request from the execution context. + */ + private SdkHttpFullRequest getHttpFullRequest(ExecutionContext execCtx) { + SdkHttpRequest requestFromInterceptor = execCtx.interceptorContext().httpRequest(); + Optional bodyFromInterceptor = execCtx.interceptorContext().requestBody(); + + return SdkHttpFullRequest.builder() + .method(requestFromInterceptor.method()) + .protocol(requestFromInterceptor.protocol()) + .host(requestFromInterceptor.host()) + .port(requestFromInterceptor.port()) + .encodedPath(requestFromInterceptor.encodedPath()) + .rawQueryParameters(requestFromInterceptor.rawQueryParameters()) + .headers(requestFromInterceptor.headers()) + .contentStreamProvider(bodyFromInterceptor.map(RequestBody::contentStreamProvider) + .orElse(null)) + .build(); + } + + /** + * Presign the provided HTTP request. + */ + private SdkHttpFullRequest presignRequest(ExecutionContext execCtx, SdkHttpFullRequest request) { + Presigner presigner = Validate.isInstanceOf(Presigner.class, execCtx.signer(), + "Configured signer (%s) does not support presigning (must implement %s).", + execCtx.signer().getClass(), Presigner.class); + + return presigner.presign(request, execCtx.executionAttributes()); + } + + /** + * Initialize the provided presigned request. + */ + private void initializePresignedRequest(PresignedRequest.Builder presignedRequest, + ExecutionContext execCtx, + SdkHttpFullRequest signedHttpRequest) { + SdkBytes signedPayload = signedHttpRequest.contentStreamProvider() + .map(p -> SdkBytes.fromInputStream(p.newStream())) + .orElse(null); + + List signedHeadersQueryParam = signedHttpRequest.rawQueryParameters().get("X-Amz-SignedHeaders"); + Validate.validState(signedHeadersQueryParam != null, + "Only SigV4 presigners are supported at this time, but the configured " + + "presigner (%s) did not seem to generate a SigV4 signature.", execCtx.signer()); + + Map> signedHeaders = + signedHeadersQueryParam.stream() + .flatMap(h -> Stream.of(h.split(";"))) + .collect(toMap(h -> h, h -> signedHttpRequest.firstMatchingHeader(h) + .map(Collections::singletonList) + .orElseGet(ArrayList::new))); + + boolean isBrowserExecutable = signedHttpRequest.method() == SdkHttpMethod.GET && + signedPayload == null && + (signedHeaders.isEmpty() || + (signedHeaders.size() == 1 && signedHeaders.containsKey("host"))); + + presignedRequest.expiration(execCtx.executionAttributes().getAttribute(PRESIGNER_EXPIRATION)) + .isBrowserExecutable(isBrowserExecutable) + .httpRequest(signedHttpRequest) + .signedHeaders(signedHeaders) + .signedPayload(signedPayload); + } + + @SdkInternalApi + public static final class Builder extends DefaultSdkPresigner.Builder + implements S3Presigner.Builder { + + private S3Configuration serviceConfiguration; + + private Builder() { + } + + /** + * Allows providing a custom S3 serviceConfiguration by providing a {@link S3Configuration} object; + * + * Note: chunkedEncodingEnabled and checksumValidationEnabled do not apply to presigned requests. + * + * @param serviceConfiguration {@link S3Configuration} + * @return this Builder + */ + public Builder serviceConfiguration(S3Configuration serviceConfiguration) { + this.serviceConfiguration = serviceConfiguration; + return this; + } + + @Override + public S3Presigner build() { + return new DefaultS3Presigner(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultSdkPresigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultSdkPresigner.java new file mode 100644 index 000000000000..dcf94b6375be --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultSdkPresigner.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.presigner; + +import java.net.URI; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.awscore.presigner.SdkPresigner; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.regions.providers.AwsRegionProvider; +import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; +import software.amazon.awssdk.regions.providers.LazyAwsRegionProvider; +import software.amazon.awssdk.utils.IoUtils; + +/** + * The base class implementing the {@link SdkPresigner} interface. + *

    + * TODO: This should get moved to aws-core (or split and moved to sdk-core and aws-core) when we support presigning from + * multiple services. + * TODO: After moving, this should get marked as an @SdkProtectedApi. + */ +@SdkInternalApi +public abstract class DefaultSdkPresigner implements SdkPresigner { + private static final AwsRegionProvider DEFAULT_REGION_PROVIDER = + new LazyAwsRegionProvider(DefaultAwsRegionProviderChain::new); + private static final AwsCredentialsProvider DEFAULT_CREDENTIALS_PROVIDER = + DefaultCredentialsProvider.create(); + + private final Region region; + private final URI endpointOverride; + private final AwsCredentialsProvider credentialsProvider; + + protected DefaultSdkPresigner(Builder b) { + this.region = b.region != null ? b.region : DEFAULT_REGION_PROVIDER.getRegion(); + this.credentialsProvider = b.credentialsProvider != null ? b.credentialsProvider : DEFAULT_CREDENTIALS_PROVIDER; + this.endpointOverride = b.endpointOverride; + } + + protected Region region() { + return region; + } + + protected AwsCredentialsProvider credentialsProvider() { + return credentialsProvider; + } + + protected URI endpointOverride() { + return endpointOverride; + } + + @Override + public void close() { + IoUtils.closeIfCloseable(credentialsProvider, null); + } + + /** + * The base class implementing the {@link SdkPresigner.Builder} interface. + */ + @SdkInternalApi + public abstract static class Builder> + implements SdkPresigner.Builder { + private Region region; + private AwsCredentialsProvider credentialsProvider; + private URI endpointOverride; + + protected Builder() { + } + + @Override + public B region(Region region) { + this.region = region; + return thisBuilder(); + } + + @Override + public B credentialsProvider(AwsCredentialsProvider credentialsProvider) { + this.credentialsProvider = credentialsProvider; + return thisBuilder(); + } + + @Override + public B endpointOverride(URI endpointOverride) { + this.endpointOverride = endpointOverride; + return thisBuilder(); + } + + @SuppressWarnings("unchecked") + private B thisBuilder() { + return (B) this; + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/ArnConverter.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/ArnConverter.java new file mode 100644 index 000000000000..6a6ecd931b1f --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/ArnConverter.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.Arn; + +/** + * An interface for converting an AWS ARN into a service specific {@link AwsResource}. Services that model + * their own AWS resources will provide a specific implementation of this ARN parser. + *

    + * @param The service specific representation of {@link AwsResource}. + */ +@SdkInternalApi +@FunctionalInterface +public interface ArnConverter { + /** + * Converts an AWS ARN into a service specific {@link AwsResource}. + * + * @param arn The ARN to convert. + * @return A service specific {@link AwsResource}. + */ + T convertArn(Arn arn); +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/AwsResource.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/AwsResource.java new file mode 100644 index 000000000000..8821b383e365 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/AwsResource.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * An abstract representation of an AWS Resource. Provides an interface to properties that are common across all AWS + * resource types. Services may provide concrete implementations that can be found in each service module. + */ +@SdkInternalApi +public interface AwsResource { + /** + * Gets the partition associated with the AWS Resource (e.g.: 'aws') if one has been specified. + * @return the optional value for the partition. + */ + Optional partition(); + + /** + * Gets the region associated with the AWS Resource (e.g.: 'us-east-1') if one has been specified. + * @return the optional value for the region. + */ + Optional region(); + + /** + * Gets the account ID associated with the AWS Resource if one has been specified. + * @return the optional value for the account ID. + */ + Optional accountId(); +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/IntermediateOutpostResource.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/IntermediateOutpostResource.java new file mode 100644 index 000000000000..d1a194434c27 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/IntermediateOutpostResource.java @@ -0,0 +1,89 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.ArnResource; +import software.amazon.awssdk.utils.StringUtils; +import software.amazon.awssdk.utils.Validate; + +/** + * The intermediate outpost resource + */ +@SdkInternalApi +public final class IntermediateOutpostResource { + private final String outpostId; + private final ArnResource outpostSubresource; + + private IntermediateOutpostResource(Builder builder) { + this.outpostId = Validate.paramNotBlank(builder.outpostId, "outpostId"); + this.outpostSubresource = Validate.notNull(builder.outpostSubresource, "outpostSubresource"); + Validate.isTrue(StringUtils.isNotBlank(builder.outpostSubresource.resource()), "Invalid format for S3 Outpost ARN"); + Validate.isTrue(builder.outpostSubresource.resourceType().isPresent(), "Invalid format for S3 Outpost ARN"); + } + + public static Builder builder() { + return new Builder(); + } + + /** + * @return the ID of the outpost + */ + public String outpostId() { + return outpostId; + } + + /** + * @return the outpost subresource + */ + public ArnResource outpostSubresource() { + return outpostSubresource; + } + + public static final class Builder { + private String outpostId; + private ArnResource outpostSubresource; + + private Builder() { + } + + /** + * Sets the outpostSubResource + * + * @param outpostSubResource The new outpostSubResource value. + * @return This object for method chaining. + */ + public Builder outpostSubresource(ArnResource outpostSubResource) { + this.outpostSubresource = outpostSubResource; + return this; + } + + /** + * Sets the outpostId + * + * @param outpostId The new outpostId value. + * @return This object for method chaining. + */ + public Builder outpostId(String outpostId) { + this.outpostId = outpostId; + return this; + } + + public IntermediateOutpostResource build() { + return new IntermediateOutpostResource(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/OutpostResourceType.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/OutpostResourceType.java new file mode 100644 index 000000000000..ea902cde6ee3 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/OutpostResourceType.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.StringUtils; + +/** + * An enum representing the types of resources supported by S3 outpost. Each resource type below will have a + * concrete implementation of {@link S3OutpostResource}. + */ +@SdkInternalApi +public enum OutpostResourceType { + + /** + * A specific S3 outpost bucket. + */ + OUTPOST_BUCKET("bucket"), + + /** + * An outpost access point + */ + OUTPOST_ACCESS_POINT("accesspoint"); + + private final String value; + + OutpostResourceType(String value) { + this.value = value; + } + + /** + * @return The canonical string value of this resource type. + */ + @Override + public String toString() { + return value; + } + + /** + * Use this in place of valueOf. + * + * @param value real value + * @return S3ResourceType corresponding to the value + * @throws IllegalArgumentException If the specified value does not map to one of the known values in this enum. + */ + public static OutpostResourceType fromValue(String value) { + if (StringUtils.isEmpty(value)) { + throw new IllegalArgumentException("value cannot be null or empty!"); + } + + for (OutpostResourceType enumEntry : OutpostResourceType.values()) { + if (enumEntry.toString().equals(value)) { + return enumEntry; + } + } + + throw new IllegalArgumentException("Cannot create enum from " + value + " value!"); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilder.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilder.java new file mode 100644 index 000000000000..a2dbb2e5687c --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilder.java @@ -0,0 +1,144 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static software.amazon.awssdk.utils.http.SdkHttpUtils.urlEncode; + +import java.net.URI; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; + +/** + * This class is used to construct an endpoint host for an S3 access point. + */ +@SdkInternalApi +public class S3AccessPointBuilder { + private static final Pattern HOSTNAME_COMPLIANT_PATTERN = Pattern.compile("[A-Za-z0-9\\-]+"); + private static final int HOSTNAME_MAX_LENGTH = 63; + + private Boolean dualstackEnabled; + private String accessPointName; + private String region; + private String accountId; + private String protocol; + private String domain; + private Boolean fipsEnabled; + + /** + * Create a new instance of this builder class. + */ + public static S3AccessPointBuilder create() { + return new S3AccessPointBuilder(); + } + + /** + * Enable DualStack endpoint. + */ + public S3AccessPointBuilder dualstackEnabled(Boolean dualstackEnabled) { + this.dualstackEnabled = dualstackEnabled; + return this; + } + + /** + * Enable fips in endpoint. + */ + public S3AccessPointBuilder fipsEnabled(Boolean fipsEnabled) { + this.fipsEnabled = fipsEnabled; + return this; + } + + /** + * The S3 Access Point name. + */ + public S3AccessPointBuilder accessPointName(String accessPointName) { + this.accessPointName = accessPointName; + return this; + } + + /** + * The AWS region hosting the Access Point. + */ + public S3AccessPointBuilder region(String region) { + this.region = region; + return this; + } + + /** + * The ID of the AWS Account the Access Point is associated with. + */ + public S3AccessPointBuilder accountId(String accountId) { + this.accountId = accountId; + return this; + } + + /** + * The protocol to be used with the endpoint URI. + */ + public S3AccessPointBuilder protocol(String protocol) { + this.protocol = protocol; + return this; + } + + /** + * The TLD for the access point. + */ + public S3AccessPointBuilder domain(String domain) { + this.domain = domain; + return this; + } + + /** + * Generate an endpoint URI with no path that maps to the Access Point information stored in this builder. + */ + public URI toUri() { + validateHostnameCompliant(accountId, "accountId"); + validateHostnameCompliant(accessPointName, "accessPointName"); + + String fipsSegment = Boolean.TRUE.equals(fipsEnabled) ? "fips-" : ""; + + String dualStackSegment = Boolean.TRUE.equals(dualstackEnabled) ? ".dualstack" : ""; + String uriString = String.format("%s://%s-%s.s3-accesspoint%s.%s%s.%s", protocol, urlEncode(accessPointName), accountId, + dualStackSegment, fipsSegment, region, domain); + URI uri = URI.create(uriString); + if (uri.getHost() == null) { + throw SdkClientException.create("ARN region (" + region + ") resulted in an invalid URI:" + uri); + } + return uri; + } + + private static void validateHostnameCompliant(String hostnameComponent, String paramName) { + if (hostnameComponent.isEmpty()) { + throw new IllegalArgumentException( + String.format("An S3 Access Point ARN has been passed that is not valid: the required '%s' " + + "component is missing.", paramName)); + } + + if (hostnameComponent.length() > HOSTNAME_MAX_LENGTH) { + throw new IllegalArgumentException( + String.format("An S3 Access Point ARN has been passed that is not valid: the '%s' " + + "component exceeds the maximum length of %d characters.", paramName, HOSTNAME_MAX_LENGTH)); + } + + Matcher m = HOSTNAME_COMPLIANT_PATTERN.matcher(hostnameComponent); + if (!m.matches()) { + throw new IllegalArgumentException( + String.format("An S3 Access Point ARN has been passed that is not valid: the '%s' " + + "component must only contain alphanumeric characters and dashes.", paramName)); + } + } +} \ No newline at end of file diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointResource.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointResource.java new file mode 100644 index 000000000000..8c081d6988ae --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointResource.java @@ -0,0 +1,246 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * An {@link S3Resource} that represents an S3 access point. + */ +@SdkInternalApi +public final class S3AccessPointResource + implements S3Resource, ToCopyableBuilder { + + private static final S3ResourceType S3_RESOURCE_TYPE = S3ResourceType.ACCESS_POINT; + + private final String partition; + private final String region; + private final String accountId; + private final String accessPointName; + private final S3Resource parentS3Resource; + + private S3AccessPointResource(Builder b) { + this.accessPointName = Validate.paramNotBlank(b.accessPointName, "accessPointName"); + if (b.parentS3Resource == null) { + this.parentS3Resource = null; + this.partition = Validate.paramNotBlank(b.partition, "partition"); + this.region = Validate.paramNotBlank(b.region, "region"); + this.accountId = Validate.paramNotBlank(b.accountId, "accountId"); + } else { + this.parentS3Resource = validateParentS3Resource(b.parentS3Resource); + Validate.isTrue(b.partition == null, "partition cannot be set on builder if it has parent resource"); + Validate.isTrue(b.region == null, "region cannot be set on builder if it has parent resource"); + Validate.isTrue(b.accountId == null, "accountId cannot be set on builder if it has parent resource"); + this.partition = parentS3Resource.partition().orElse(null); + this.region = parentS3Resource.region().orElse(null); + this.accountId = parentS3Resource.accountId().orElse(null); + } + } + + /** + * Get a new builder for this class. + * @return A newly initialized instance of a builder. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the resource type for this access point. + * @return This will always return "access_point". + */ + @Override + public String type() { + return S3_RESOURCE_TYPE.toString(); + } + + @Override + public Optional parentS3Resource() { + return Optional.ofNullable(parentS3Resource); + } + + /** + * Gets the AWS partition name associated with this access point (e.g.: 'aws'). + * @return the name of the partition. + */ + @Override + public Optional partition() { + return Optional.ofNullable(this.partition); + } + + /** + * Gets the AWS region name associated with this bucket (e.g.: 'us-east-1'). + * @return the name of the region. + */ + @Override + public Optional region() { + return Optional.ofNullable(this.region); + } + + /** + * Gets the AWS account ID associated with this bucket. + * @return the AWS account ID. + */ + @Override + public Optional accountId() { + return Optional.ofNullable(this.accountId); + } + + /** + * Gets the name of the access point. + * @return the name of the access point. + */ + public String accessPointName() { + return this.accessPointName; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + S3AccessPointResource that = (S3AccessPointResource) o; + + if (partition != null ? ! partition.equals(that.partition) : that.partition != null) { + return false; + } + if (region != null ? ! region.equals(that.region) : that.region != null) { + return false; + } + if (accountId != null ? ! accountId.equals(that.accountId) : that.accountId != null) { + return false; + } + + if (parentS3Resource != null ? ! parentS3Resource.equals(that.parentS3Resource) : that.parentS3Resource != null) { + return false; + } + return accessPointName.equals(that.accessPointName); + } + + @Override + public int hashCode() { + int result = partition != null ? partition.hashCode() : 0; + result = 31 * result + (region != null ? region.hashCode() : 0); + result = 31 * result + (accountId != null ? accountId.hashCode() : 0); + result = 31 * result + accessPointName.hashCode(); + result = 31 * result + (parentS3Resource != null ? parentS3Resource.hashCode() : 0); + return result; + } + + @Override + public Builder toBuilder() { + return builder() + .partition(partition) + .region(region) + .accountId(accountId) + .accessPointName(accessPointName); + } + + private S3Resource validateParentS3Resource(S3Resource parentS3Resource) { + if (!S3ResourceType.OUTPOST.toString().equals(parentS3Resource.type())) { + throw new IllegalArgumentException("Invalid 'parentS3Resource' type. An S3 access point resource must be " + + "associated with an outpost parent resource."); + } + return parentS3Resource; + } + + /** + * A builder for {@link S3AccessPointResource} objects. + */ + public static final class Builder implements CopyableBuilder { + private String partition; + private String region; + private String accountId; + private String accessPointName; + private S3Resource parentS3Resource; + + private Builder() { + } + + public void setPartition(String partition) { + partition(partition); + } + + /** + * The AWS partition associated with the access point. + */ + public Builder partition(String partition) { + this.partition = partition; + return this; + } + + public void setRegion(String region) { + region(region); + } + + /** + * The AWS region associated with the access point. + */ + public Builder region(String region) { + this.region = region; + return this; + } + + public void setAccountId(String accountId) { + accountId(accountId); + } + + /** + * The AWS account ID associated with the access point. + */ + public Builder accountId(String accountId) { + this.accountId = accountId; + return this; + } + + public void setAccessPointName(String accessPointName) { + accessPointName(accessPointName); + } + + /** + * The name of the S3 access point. + */ + public Builder accessPointName(String accessPointName) { + this.accessPointName = accessPointName; + return this; + } + + /** + * The S3 resource this access point is associated with (contained within). Only {@link S3OutpostResource} + * is a valid parent resource types. + */ + public Builder parentS3Resource(S3Resource parentS3Resource) { + this.parentS3Resource = parentS3Resource; + return this; + } + + /** + * Builds an instance of {@link S3AccessPointResource}. + */ + @Override + public S3AccessPointResource build() { + return new S3AccessPointResource(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnConverter.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnConverter.java new file mode 100644 index 000000000000..a1980fbc6b6f --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnConverter.java @@ -0,0 +1,172 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static software.amazon.awssdk.services.s3.internal.resource.S3ArnUtils.parseOutpostArn; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.arns.ArnResource; + +/** + * An implementation of {@link ArnConverter} that can be used to convert valid {@link Arn} representations of s3 + * resources into {@link S3Resource} objects. To fetch an instance of this class, use the singleton getter method + * {@link #create()}. + */ +@SdkInternalApi +public final class S3ArnConverter implements ArnConverter { + private static final S3ArnConverter INSTANCE = new S3ArnConverter(); + private static final Pattern OBJECT_AP_PATTERN = Pattern.compile("^([0-9a-zA-Z-]+)/object/(.*)$"); + + private S3ArnConverter() { + } + + /** + * Gets a static singleton instance of an {@link S3ArnConverter}. + * @return A static instance of an {@link S3ArnConverter}. + */ + public static S3ArnConverter create() { + return INSTANCE; + } + + /** + * Converts a valid ARN representation of an S3 resource into a {@link S3Resource} object. + * @param arn The ARN to convert. + * @return An {@link S3Resource} object as specified by the ARN. + * @throws IllegalArgumentException if the ARN is not a valid representation of an S3 resource supported by this + * SDK. + */ + @Override + public S3Resource convertArn(Arn arn) { + if (isV1Arn(arn)) { + return convertV1Arn(arn); + } + S3ResourceType s3ResourceType; + + String resourceType = arn.resource().resourceType().orElseThrow(() -> new IllegalArgumentException("Unknown ARN type")); + + try { + s3ResourceType = + S3ResourceType.fromValue(resourceType); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Unknown ARN type '" + arn.resource().resourceType().get() + "'"); + } + + // OBJECT is a sub-resource under ACCESS_POINT and BUCKET and will not be recognized as a primary ARN resource + // type + switch (s3ResourceType) { + case ACCESS_POINT: + return parseS3AccessPointArn(arn); + case BUCKET: + return parseS3BucketArn(arn); + case OUTPOST: + return parseS3OutpostAccessPointArn(arn); + default: + throw new IllegalArgumentException("Unknown ARN type '" + s3ResourceType + "'"); + } + } + + private S3Resource convertV1Arn(Arn arn) { + String resource = arn.resourceAsString(); + String[] splitResource = resource.split("/", 2); + + if (splitResource.length > 1) { + // Bucket/key + S3BucketResource parentBucket = S3BucketResource.builder() + .partition(arn.partition()) + .bucketName(splitResource[0]) + .build(); + + return S3ObjectResource.builder() + .parentS3Resource(parentBucket) + .key(splitResource[1]) + .build(); + } else { + // Just bucket + return S3BucketResource.builder() + .partition(arn.partition()) + .bucketName(resource) + .build(); + } + } + + private S3BucketResource parseS3BucketArn(Arn arn) { + return S3BucketResource.builder() + .partition(arn.partition()) + .region(arn.region().orElse(null)) + .accountId(arn.accountId().orElse(null)) + .bucketName(arn.resource().resource()) + .build(); + } + + private S3Resource parseS3AccessPointArn(Arn arn) { + Matcher objectMatcher = OBJECT_AP_PATTERN.matcher(arn.resource().resource()); + + if (objectMatcher.matches()) { + // ARN is actually an object addressed through an access-point + String accessPointName = objectMatcher.group(1); + String objectKey = objectMatcher.group(2); + S3AccessPointResource parentResource = + S3AccessPointResource.builder() + .partition(arn.partition()) + .region(arn.region().orElse(null)) + .accountId(arn.accountId().orElse(null)) + .accessPointName(accessPointName) + .build(); + + return S3ObjectResource.builder() + .parentS3Resource(parentResource) + .key(objectKey) + .build(); + } + + return S3AccessPointResource.builder() + .partition(arn.partition()) + .region(arn.region().orElse(null)) + .accountId(arn.accountId().orElse(null)) + .accessPointName(arn.resource().resource()) + .build(); + } + + private S3Resource parseS3OutpostAccessPointArn(Arn arn) { + IntermediateOutpostResource intermediateOutpostResource = parseOutpostArn(arn); + ArnResource outpostSubResource = intermediateOutpostResource.outpostSubresource(); + + String resourceType = outpostSubResource.resourceType() + .orElseThrow(() -> new IllegalArgumentException("Unknown ARN type")); + + if (!OutpostResourceType.OUTPOST_ACCESS_POINT.toString().equals(resourceType)) { + throw new IllegalArgumentException("Unknown outpost ARN type '" + outpostSubResource.resourceType() + "'"); + } + + return S3AccessPointResource.builder() + .accessPointName(outpostSubResource.resource()) + .parentS3Resource(S3OutpostResource.builder() + .partition(arn.partition()) + .region(arn.region().orElse(null)) + .accountId(arn.accountId().orElse(null)) + .outpostId(intermediateOutpostResource.outpostId()) + .build()) + .build(); + } + + + private boolean isV1Arn(Arn arn) { + return !arn.accountId().isPresent() && !arn.region().isPresent(); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtils.java new file mode 100644 index 000000000000..ec5262419c5a --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtils.java @@ -0,0 +1,75 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.arns.ArnResource; +import software.amazon.awssdk.utils.StringUtils; + +@SdkInternalApi +public class S3ArnUtils { + private static final int OUTPOST_ID_START_INDEX = "outpost".length() + 1; + + private S3ArnUtils() { + } + + public static S3AccessPointResource parseS3AccessPointArn(Arn arn) { + return S3AccessPointResource.builder() + .partition(arn.partition()) + .region(arn.region().orElse(null)) + .accountId(arn.accountId().orElse(null)) + .accessPointName(arn.resource().resource()) + .build(); + } + + public static IntermediateOutpostResource parseOutpostArn(Arn arn) { + String resource = arn.resourceAsString(); + + Integer outpostIdEndIndex = null; + + for (int i = OUTPOST_ID_START_INDEX; i < resource.length(); ++i) { + char ch = resource.charAt(i); + + if (ch == ':' || ch == '/') { + outpostIdEndIndex = i; + break; + } + } + + if (outpostIdEndIndex == null) { + throw new IllegalArgumentException("Invalid format for S3 outpost ARN, missing outpostId"); + } + + String outpostId = resource.substring(OUTPOST_ID_START_INDEX, outpostIdEndIndex); + + if (StringUtils.isEmpty(outpostId)) { + throw new IllegalArgumentException("Invalid format for S3 outpost ARN, missing outpostId"); + } + + String subresource = resource.substring(outpostIdEndIndex + 1); + + if (StringUtils.isEmpty(subresource)) { + throw new IllegalArgumentException("Invalid format for S3 outpost ARN"); + } + + return IntermediateOutpostResource.builder() + .outpostId(outpostId) + .outpostSubresource(ArnResource.fromString(subresource)) + .build(); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3BucketResource.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3BucketResource.java new file mode 100644 index 000000000000..92a00129fd45 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3BucketResource.java @@ -0,0 +1,207 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * An {@link S3Resource} that represents an S3 bucket. + */ +@SdkInternalApi +public final class S3BucketResource + implements S3Resource, ToCopyableBuilder { + + private static final S3ResourceType S3_RESOURCE_TYPE = S3ResourceType.BUCKET; + + private final String partition; + private final String region; + private final String accountId; + private final String bucketName; + + private S3BucketResource(Builder b) { + this.bucketName = Validate.paramNotBlank(b.bucketName, "bucketName"); + this.partition = b.partition; + this.region = b.region; + this.accountId = b.accountId; + } + + /** + * Get a new builder for this class. + * @return A newly initialized instance of a builder. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the resource type for this bucket. + * @return This will always return "bucket_name". + */ + @Override + public String type() { + return S3_RESOURCE_TYPE.toString(); + } + + /** + * Gets the AWS partition name associated with this bucket (e.g.: 'aws') if one has been specified. + * @return the optional name of the partition or empty if it has not been specified. + */ + @Override + public Optional partition() { + return Optional.ofNullable(this.partition); + } + + /** + * Gets the AWS region name associated with this bucket (e.g.: 'us-east-1') if one has been specified. + * @return the optional name of the region or empty if the region has not been specified (e.g. the resource is in + * the global namespace). + */ + @Override + public Optional region() { + return Optional.ofNullable(this.region); + } + + /** + * Gets the AWS account ID associated with this bucket if one has been specified. + * @return the optional AWS account ID or empty if the account ID has not been specified. + */ + @Override + public Optional accountId() { + return Optional.ofNullable(this.accountId); + } + + /** + * Gets the name of the bucket. + * @return the name of the bucket. + */ + public String bucketName() { + return this.bucketName; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + S3BucketResource that = (S3BucketResource) o; + + if (partition != null ? ! partition.equals(that.partition) : that.partition != null) { + return false; + } + if (region != null ? ! region.equals(that.region) : that.region != null) { + return false; + } + if (accountId != null ? ! accountId.equals(that.accountId) : that.accountId != null) { + return false; + } + return bucketName.equals(that.bucketName); + } + + @Override + public int hashCode() { + int result = partition != null ? partition.hashCode() : 0; + result = 31 * result + (region != null ? region.hashCode() : 0); + result = 31 * result + (accountId != null ? accountId.hashCode() : 0); + result = 31 * result + bucketName.hashCode(); + return result; + } + + @Override + public Builder toBuilder() { + return builder() + .partition(partition) + .region(region) + .accountId(accountId) + .bucketName(bucketName); + } + + /** + * A builder for {@link S3BucketResource} objects. + */ + public static final class Builder implements CopyableBuilder { + private String partition; + private String region; + private String accountId; + private String bucketName; + + private Builder() { + } + + public void setPartition(String partition) { + partition(partition); + } + + /** + * The AWS partition associated with the bucket. + */ + public Builder partition(String partition) { + this.partition = partition; + return this; + } + + public void setRegion(String region) { + region(region); + } + + /** + * The AWS region associated with the bucket. This property is optional. + */ + public Builder region(String region) { + this.region = region; + return this; + } + + public void setAccountId(String accountId) { + accountId(accountId); + } + + /** + * The AWS account ID associated with the bucket. This property is optional. + */ + public Builder accountId(String accountId) { + this.accountId = accountId; + return this; + } + + public void setBucketName(String bucketName) { + bucketName(bucketName); + } + + /** + * The name of the S3 bucket. + */ + public Builder bucketName(String bucketName) { + this.bucketName = bucketName; + return this; + } + + /** + * Builds an instance of {@link S3BucketResource}. + */ + @Override + public S3BucketResource build() { + return new S3BucketResource(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ObjectResource.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ObjectResource.java new file mode 100644 index 000000000000..d64344e06da3 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ObjectResource.java @@ -0,0 +1,165 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +/** + * An {@link S3Resource} that represents an S3 object. + */ +@SdkInternalApi +public final class S3ObjectResource implements S3Resource { + + private static final S3ResourceType S3_RESOURCE_TYPE = S3ResourceType.OBJECT; + private final S3Resource parentS3Resource; + private final String key; + + private S3ObjectResource(Builder b) { + this.parentS3Resource = validateParentS3Resource(b.parentS3Resource); + this.key = Validate.paramNotBlank(b.key, "key"); + } + + /** + * Get a new builder for this class. + * @return A newly initialized instance of a builder. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the resource type for this S3 object. + * @return This will always return "object". + */ + @Override + public String type() { + return S3_RESOURCE_TYPE.toString(); + } + + /** + * Gets the AWS partition name associated with the S3 object (e.g.: 'aws'). + * @return the name of the partition. + */ + @Override + public Optional partition() { + return parentS3Resource.partition(); + } + + /** + * Gets the AWS region name associated with the S3 object (e.g.: 'us-east-1'). + * @return the name of the region or null if the region has not been specified (e.g. the resource is in the + * global namespace). + */ + @Override + public Optional region() { + return parentS3Resource.region(); + } + + /** + * Gets the AWS account ID associated with the S3 object if it has been specified. + * @return the optional AWS account ID or empty if the account ID has not been specified. + */ + @Override + public Optional accountId() { + return parentS3Resource.accountId(); + } + + /** + * Gets the key of the S3 object. + * @return the key of the S3 object. + */ + public String key() { + return this.key; + } + + @Override + public Optional parentS3Resource() { + return Optional.of(parentS3Resource); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + S3ObjectResource that = (S3ObjectResource) o; + + if (parentS3Resource != null ? !parentS3Resource.equals(that.parentS3Resource) : that.parentS3Resource != null) { + return false; + } + return key != null ? key.equals(that.key) : that.key == null; + } + + @Override + public int hashCode() { + int result = parentS3Resource != null ? parentS3Resource.hashCode() : 0; + result = 31 * result + (key != null ? key.hashCode() : 0); + return result; + } + + private S3Resource validateParentS3Resource(S3Resource parentS3Resource) { + Validate.paramNotNull(parentS3Resource, "parentS3Resource"); + + if (!S3ResourceType.ACCESS_POINT.toString().equals(parentS3Resource.type()) + && !S3ResourceType.BUCKET.toString().equals(parentS3Resource.type())) { + throw new IllegalArgumentException("Invalid 'parentS3Resource' type. An S3 object resource must be " + + "associated with either a bucket or access-point parent resource."); + } + + return parentS3Resource; + } + + /** + * A builder for {@link S3ObjectResource} objects. + */ + public static final class Builder { + private S3Resource parentS3Resource; + private String key; + + private Builder() { + } + + /** + * The key of the S3 object. + */ + public Builder key(String key) { + this.key = key; + return this; + } + + /** + * The S3 resource this object is associated with (contained within). Only {@link S3BucketResource} and + * {@link S3AccessPointResource} are valid parent resource types. + */ + public Builder parentS3Resource(S3Resource parentS3Resource) { + this.parentS3Resource = parentS3Resource; + return this; + } + + /** + * Builds an instance of {@link S3BucketResource}. + */ + public S3ObjectResource build() { + return new S3ObjectResource(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3OutpostAccessPointBuilder.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3OutpostAccessPointBuilder.java new file mode 100644 index 000000000000..3dec92e039f7 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3OutpostAccessPointBuilder.java @@ -0,0 +1,88 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static software.amazon.awssdk.utils.HostnameValidator.validateHostnameCompliant; + +import java.net.URI; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class is used to construct an endpoint for an S3 outpost access point. + */ +@SdkInternalApi +public final class S3OutpostAccessPointBuilder { + + private String accessPointName; + private String outpostId; + private String region; + private String accountId; + private String protocol; + private String domain; + + private S3OutpostAccessPointBuilder() { + } + + /** + * Create a new instance of this builder class. + */ + public static S3OutpostAccessPointBuilder create() { + return new S3OutpostAccessPointBuilder(); + } + + public S3OutpostAccessPointBuilder accessPointName(String accessPointName) { + this.accessPointName = accessPointName; + return this; + } + + public S3OutpostAccessPointBuilder region(String region) { + this.region = region; + return this; + } + + public S3OutpostAccessPointBuilder accountId(String accountId) { + this.accountId = accountId; + return this; + } + + public S3OutpostAccessPointBuilder outpostId(String outpostId) { + this.outpostId = outpostId; + return this; + } + + public S3OutpostAccessPointBuilder protocol(String protocol) { + this.protocol = protocol; + return this; + } + + public S3OutpostAccessPointBuilder domain(String domain) { + this.domain = domain; + return this; + } + + /** + * Generate an endpoint URI with no path that maps to the Outpost Access Point information stored in this builder. + */ + public URI toUri() { + validateHostnameCompliant(outpostId, "outpostId", "outpost ARN"); + validateHostnameCompliant(accountId, "accountId", "outpost ARN"); + validateHostnameCompliant(accessPointName, "accessPointName", "outpost ARN"); + + String uriString = String.format("%s://%s-%s.%s.s3-outposts.%s.%s", protocol, accessPointName, accountId, outpostId, + region, domain); + return URI.create(uriString); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3OutpostResource.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3OutpostResource.java new file mode 100644 index 000000000000..54fd37268dc1 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3OutpostResource.java @@ -0,0 +1,175 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +/** + * An {@link S3Resource} that represents an S3 outpost resource + */ +@SdkInternalApi +public final class S3OutpostResource implements S3Resource { + + private final String partition; + private final String region; + private final String accountId; + private final String outpostId; + + private S3OutpostResource(Builder b) { + this.partition = Validate.paramNotBlank(b.partition, "partition"); + this.region = Validate.paramNotBlank(b.region, "region"); + this.accountId = Validate.paramNotBlank(b.accountId, "accountId"); + this.outpostId = Validate.paramNotBlank(b.outpostId, "outpostId"); + } + + /** + * Get a new builder for this class. + * @return A newly initialized instance of a builder. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the resource type for this access point. + * @return This will always return "accesspoint". + */ + @Override + public String type() { + return S3ResourceType.OUTPOST.toString(); + } + + /** + * Gets the AWS partition name associated with this access point (e.g.: 'aws'). + * @return the name of the partition. + */ + @Override + public Optional partition() { + return Optional.ofNullable(this.partition); + } + + /** + * Gets the AWS region name associated with this bucket (e.g.: 'us-east-1'). + * @return the name of the region. + */ + @Override + public Optional region() { + return Optional.ofNullable(this.region); + } + + /** + * Gets the AWS account ID associated with this bucket. + * @return the AWS account ID. + */ + @Override + public Optional accountId() { + return Optional.ofNullable(this.accountId); + } + + /** + * Gets the outpost ID + * @return the outpost ID. + */ + public String outpostId() { + return this.outpostId; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + S3OutpostResource that = (S3OutpostResource) o; + + if (partition != null ? !partition.equals(that.partition) : that.partition != null) { + return false; + } + if (region != null ? !region.equals(that.region) : that.region != null) { + return false; + } + if (accountId != null ? !accountId.equals(that.accountId) : that.accountId != null) { + return false; + } + return outpostId.equals(that.outpostId); + } + + @Override + public int hashCode() { + int result = partition != null ? partition.hashCode() : 0; + result = 31 * result + (region != null ? region.hashCode() : 0); + result = 31 * result + (accountId != null ? accountId.hashCode() : 0); + result = 31 * result + outpostId.hashCode(); + return result; + } + + /** + * A builder for {@link S3OutpostResource} objects. + */ + public static final class Builder { + private String outpostId; + private String partition; + private String region; + private String accountId; + + private Builder() { + } + + /** + * The AWS partition associated with the access point. + */ + public Builder partition(String partition) { + this.partition = partition; + return this; + } + + /** + * The AWS region associated with the access point. + */ + public Builder region(String region) { + this.region = region; + return this; + } + + /** + * The AWS account ID associated with the access point. + */ + public Builder accountId(String accountId) { + this.accountId = accountId; + return this; + } + + /** + * The Id of the outpost + */ + public Builder outpostId(String outpostId) { + this.outpostId = outpostId; + return this; + } + + /** + * Builds an instance of {@link S3OutpostResource}. + */ + public S3OutpostResource build() { + return new S3OutpostResource(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3Resource.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3Resource.java new file mode 100644 index 000000000000..91f8ae93487c --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3Resource.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * A representation of an AWS S3 resource. See {@link S3ResourceType} for a list and description of all valid types. + */ +@SdkInternalApi +public interface S3Resource extends AwsResource { + /** + * Gets the type of S3 resource represented by this object (e.g.: 'bucket_name'). See {@link S3ResourceType} for + * a list and description of all valid types. + * @return the string name of the S3 resource type. + */ + String type(); + + /** + * Gets the optional parent resource. + * @return the optional parent resource. + */ + default Optional parentS3Resource() { + return Optional.empty(); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ResourceType.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ResourceType.java new file mode 100644 index 000000000000..48bed4aae022 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3ResourceType.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * An enum representing the types of resources supported by S3. Each resource type below will have a + * concrete implementation of {@link S3Resource}. + */ +@SdkInternalApi +public enum S3ResourceType { + /** + * A specific S3 bucket. Implemented by {@link S3BucketResource}. + */ + BUCKET("bucket_name"), + /** + * An access point that fronts a bucket. Implemented by {@link S3AccessPointResource}. + */ + ACCESS_POINT("accesspoint"), + /** + * A specific S3 object (bucket and key). Implemented by {@link S3ObjectResource}. + */ + OBJECT("object"), + + /** + * An outpost access point. Implemented by {@link S3OutpostResource}. + */ + OUTPOST("outpost"); + + private final String value; + + S3ResourceType(String value) { + this.value = value; + } + + /** + * @return The canonical string value of this resource type. + */ + @Override + public String toString() { + return value; + } + + /** + * Use this in place of valueOf. + * + * @param value real value + * @return S3ResourceType corresponding to the value + * @throws IllegalArgumentException If the specified value does not map to one of the known values in this enum. + */ + public static S3ResourceType fromValue(String value) { + if (value == null || "".equals(value)) { + throw new IllegalArgumentException("Value cannot be null or empty!"); + } + + for (S3ResourceType enumEntry : S3ResourceType.values()) { + if (enumEntry.toString().equals(value)) { + return enumEntry; + } + } + + throw new IllegalArgumentException("Cannot create enum from " + value + " value!"); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/ProfileUseArnRegionProvider.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/ProfileUseArnRegionProvider.java new file mode 100644 index 000000000000..e3703abd9aa2 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/ProfileUseArnRegionProvider.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.usearnregion; + +import java.util.Optional; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.utils.StringUtils; + +/** + * Loads useArnRegion configuration from the {@link ProfileFile#defaultProfileFile()} using the default profile name. + */ +@SdkInternalApi +public final class ProfileUseArnRegionProvider implements UseArnRegionProvider { + /** + * Property name for specifying whether or not use arn region should be enabled. + */ + private static final String AWS_USE_ARN_REGION = "s3_use_arn_region"; + + private final Supplier profileFile; + private final String profileName; + + private ProfileUseArnRegionProvider(Supplier profileFile, String profileName) { + this.profileFile = profileFile; + this.profileName = profileName; + } + + public static ProfileUseArnRegionProvider create() { + return new ProfileUseArnRegionProvider(ProfileFile::defaultProfileFile, + ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow()); + } + + public static ProfileUseArnRegionProvider create(ProfileFile profileFile, String profileName) { + return new ProfileUseArnRegionProvider(() -> profileFile, profileName); + } + + @Override + public Optional resolveUseArnRegion() { + return profileFile.get() + .profile(profileName) + .map(p -> p.properties().get(AWS_USE_ARN_REGION)) + .map(StringUtils::safeStringToBoolean); + } +} + diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/SystemsSettingsUseArnRegionProvider.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/SystemsSettingsUseArnRegionProvider.java new file mode 100644 index 000000000000..efbf213e7158 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/SystemsSettingsUseArnRegionProvider.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.usearnregion; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.S3SystemSetting; + +/** + * {@link UseArnRegionProvider} implementation that loads userArnRegion configuration from system properties + * and environment variables. + */ +@SdkInternalApi +public final class SystemsSettingsUseArnRegionProvider implements UseArnRegionProvider { + + private SystemsSettingsUseArnRegionProvider() { + } + + public static SystemsSettingsUseArnRegionProvider create() { + return new SystemsSettingsUseArnRegionProvider(); + } + + @Override + public Optional resolveUseArnRegion() { + return S3SystemSetting.AWS_S3_USE_ARN_REGION.getBooleanValue(); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProvider.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProvider.java new file mode 100644 index 000000000000..b23d32113bb4 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.usearnregion; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Interface for loading useArnRegion configuration. + */ +@FunctionalInterface +@SdkInternalApi +public interface UseArnRegionProvider { + + /** + * @return whether use-arn-region is enabled, or empty if it is not configured. + */ + Optional resolveUseArnRegion(); +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProviderChain.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProviderChain.java new file mode 100644 index 000000000000..9149152b2971 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProviderChain.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.usearnregion; + +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.utils.Logger; + +/** + * {@link UseArnRegionProvider} implementation that chains together multiple useArnRegion providers. + */ +@SdkInternalApi +public final class UseArnRegionProviderChain implements UseArnRegionProvider { + private static final Logger log = Logger.loggerFor(UseArnRegionProvider.class); + + private final List providers; + + private UseArnRegionProviderChain(List providers) { + this.providers = providers; + } + + /** + * Creates a default {@link UseArnRegionProviderChain}. + * + *

    + * AWS use arn region provider that looks for the useArnRegion in this order: + * + *

      + *
    1. Check the 'aws.useArnRegion' system property for the region.
    2. + *
    3. Check the 'AWS_USE_ARN_REGION' environment variable for the region.
    4. + *
    + */ + public static UseArnRegionProviderChain create() { + return create(ProfileFile.defaultProfileFile(), + ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow()); + } + + public static UseArnRegionProviderChain create(ProfileFile profileFile, String profileName) { + return new UseArnRegionProviderChain(Arrays.asList(SystemsSettingsUseArnRegionProvider.create(), + ProfileUseArnRegionProvider.create(profileFile, profileName))); + } + + @Override + public Optional resolveUseArnRegion() { + for (UseArnRegionProvider provider : providers) { + try { + Optional useArnRegion = provider.resolveUseArnRegion(); + if (useArnRegion.isPresent()) { + return useArnRegion; + } + } catch (Exception ex) { + log.warn(() -> "Failed to retrieve useArnRegion from " + provider); + } + } + return Optional.empty(); + } +} \ No newline at end of file diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/model/GetUrlRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/model/GetUrlRequest.java index 104ffbc91343..5ca8a3a7f61b 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/model/GetUrlRequest.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/model/GetUrlRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -42,20 +42,29 @@ @SdkPublicApi public final class GetUrlRequest implements SdkPojo, ToCopyableBuilder { private static final SdkField BUCKET_FIELD = SdkField - .builder(MarshallingType.STRING) + .builder(MarshallingType.STRING) .getter(getter(GetUrlRequest::bucket)) .setter(setter(Builder::bucket)) .traits(LocationTrait.builder().location(MarshallLocation.PATH).locationName("Bucket") .unmarshallLocationName("Bucket").build()).build(); private static final SdkField KEY_FIELD = SdkField - .builder(MarshallingType.STRING) + .builder(MarshallingType.STRING) .getter(getter(GetUrlRequest::key)) .setter(setter(Builder::key)) .traits(LocationTrait.builder().location(MarshallLocation.GREEDY_PATH).locationName("Key") .unmarshallLocationName("Key").build()).build(); - private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BUCKET_FIELD, KEY_FIELD)); + private static final SdkField VERSION_ID_FIELD = SdkField + .builder(MarshallingType.STRING) + .memberName("VersionId") + .getter(getter(GetUrlRequest::versionId)) + .setter(setter(Builder::versionId)) + .traits(LocationTrait.builder().location(MarshallLocation.QUERY_PARAM).locationName("versionId") + .unmarshallLocationName("versionId").build()).build(); + + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BUCKET_FIELD, KEY_FIELD, + VERSION_ID_FIELD)); private final String bucket; @@ -65,11 +74,14 @@ public final class GetUrlRequest implements SdkPojo, ToCopyableBuilder Optional getValueForField(String fieldName, Class clazz) { return Optional.ofNullable(clazz.cast(bucket())); case "Key": return Optional.ofNullable(clazz.cast(key())); + case "VersionId": + return Optional.ofNullable(clazz.cast(versionId())); default: return Optional.empty(); } @@ -152,6 +175,15 @@ public interface Builder extends SdkPojo, CopyableBuilder + * + * For example: if Alice has access to an S3 object, and she wants to temporarily share access to that object with Bob, she + * can generate a pre-signed {@link GetObjectRequest} to secure share with Bob so that he can download the object without + * requiring access to Alice's credentials. + *

    + * + * Signature Duration + *

    + * + * Pre-signed requests are only valid for a finite period of time, referred to as the signature duration. This signature + * duration is configured when the request is generated, and cannot be longer than 7 days. Attempting to generate a signature + * longer than 7 days in the future will fail at generation time. Attempting to use a pre-signed request after the signature + * duration has passed will result in an access denied response from the service. + *

    + * + * Example Usage + *

    + * + *

    + * {@code
    + *     // Create an S3Presigner using the default region and credentials.
    + *     // This is usually done at application startup, because creating a presigner can be expensive.
    + *     S3Presigner presigner = S3Presigner.create();
    + *
    + *     // Create a GetObjectRequest to be pre-signed
    + *     GetObjectRequest getObjectRequest =
    + *             GetObjectRequest.builder()
    + *                             .bucket("my-bucket")
    + *                             .key("my-key")
    + *                             .build();
    + *
    + *     // Create a GetObjectPresignRequest to specify the signature duration
    + *     GetObjectPresignRequest getObjectPresignRequest =
    + *         GetObjectPresignRequest.builder()
    + *                                .signatureDuration(Duration.ofMinutes(10))
    + *                                .getObjectRequest(getObjectRequest)
    + *                                .build();
    + *
    + *     // Generate the presigned request
    + *     PresignedGetObjectRequest presignedGetObjectRequest =
    + *         presigner.presignGetObject(getObjectPresignRequest);
    + *
    + *     // Log the presigned URL, for example.
    + *     System.out.println("Presigned URL: " + presignedGetObjectRequest.url());
    + *
    + *     // It is recommended to close the S3Presigner when it is done being used, because some credential
    + *     // providers (e.g. if your AWS profile is configured to assume an STS role) require system resources
    + *     // that need to be freed. If you are using one S3Presigner per application (as recommended), this
    + *     // usually is not needed.
    + *     presigner.close();
    + * }
    + * 
    + *

    + * + * Browser Compatibility + *

    + * + * Some pre-signed requests can be executed by a web browser. These "browser compatible" pre-signed requests + * do not require the customer to send anything other than a "host" header when performing an HTTP GET against + * the pre-signed URL. + *

    + * + * Whether a pre-signed request is "browser compatible" can be determined by checking the + * {@link PresignedRequest#isBrowserExecutable()} flag. It is recommended to always check this flag when the pre-signed + * request needs to be executed by a browser, because some request fields will result in the pre-signed request not + * being browser-compatible. + *

    + * + * Executing a Pre-Signed Request from Java code + *

    + * + * Browser-compatible requests (see above) can be executed using a web browser. All pre-signed requests can be executed + * from Java code. This documentation describes two methods for executing a pre-signed request: (1) using the JDK's + * {@link URLConnection} class, (2) using an SDK synchronous {@link SdkHttpClient} class. + * + *

    + * Using {code URLConnection}: + * + *

    + *

    + *     // Create a pre-signed request using one of the "presign" methods on S3Presigner
    + *     PresignedRequest presignedRequest = ...;
    + *
    + *     // Create a JDK HttpURLConnection for communicating with S3
    + *     HttpURLConnection connection = (HttpURLConnection) presignedRequest.url().openConnection();
    + *
    + *     // Specify any headers that are needed by the service (not needed when isBrowserExecutable is true)
    + *     presignedRequest.httpRequest().headers().forEach((header, values) -> {
    + *         values.forEach(value -> {
    + *             connection.addRequestProperty(header, value);
    + *         });
    + *     });
    + *
    + *     // Send any request payload that is needed by the service (not needed when isBrowserExecutable is true)
    + *     if (presignedRequest.signedPayload().isPresent()) {
    + *         connection.setDoOutput(true);
    + *         try (InputStream signedPayload = presignedRequest.signedPayload().get().asInputStream();
    + *              OutputStream httpOutputStream = connection.getOutputStream()) {
    + *             IoUtils.copy(signedPayload, httpOutputStream);
    + *         }
    + *     }
    + *
    + *     // Download the result of executing the request
    + *     try (InputStream content = connection.getInputStream()) {
    + *         System.out.println("Service returned response: ");
    + *         IoUtils.copy(content, System.out);
    + *     }
    + * 
    + *

    + * + * Using {code SdkHttpClient}: + *

    + * + *

    + *     // Create a pre-signed request using one of the "presign" methods on S3Presigner
    + *     PresignedRequest presignedRequest = ...;
    + *
    + *     // Create an SdkHttpClient using one of the implementations provided by the SDK
    + *     SdkHttpClient httpClient = ApacheHttpClient.builder().build(); // or UrlConnectionHttpClient.create()
    + *
    + *     // Specify any request payload that is needed by the service (not needed when isBrowserExecutable is true)
    + *     ContentStreamProvider requestPayload =
    + *         presignedRequest.signedPayload()
    + *                         .map(SdkBytes::asContentStreamProvider)
    + *                         .orElse(null);
    + *
    + *     // Create the request for sending to the service
    + *     HttpExecuteRequest request =
    + *         HttpExecuteRequest.builder()
    + *                           .request(presignedRequest.httpRequest())
    + *                           .contentStreamProvider(requestPayload)
    + *                           .build();
    + *
    + *     // Call the service
    + *     HttpExecuteResponse response = httpClient.prepareRequest(request).call();
    + *
    + *     // Download the result of executing the request
    + *     if (response.responseBody().isPresent()) {
    + *         try (InputStream responseStream = response.responseBody().get()) {
    + *             System.out.println("Service returned response: ");
    + *             IoUtils.copy(content, System.out);
    + *         }
    + *     }
    + * 
    + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public interface S3Presigner extends SdkPresigner { + /** + * Create an {@link S3Presigner} with default configuration. The region will be loaded from the + * {@link DefaultAwsRegionProviderChain} and credentials will be loaded from the {@link DefaultCredentialsProvider}. + *

    + * This is usually done at application startup, because creating a presigner can be expensive. It is recommended to + * {@link #close()} the {@code S3Presigner} when it is done being used. + */ + static S3Presigner create() { + return builder().build(); + } + + /** + * Create an {@link S3Presigner.Builder} that can be used to configure and create a {@link S3Presigner}. + *

    + * This is usually done at application startup, because creating a presigner can be expensive. It is recommended to + * {@link #close()} the {@code S3Presigner} when it is done being used. + */ + static Builder builder() { + return DefaultS3Presigner.builder(); + } + + /** + * Presign a {@link GetObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * + * Example Usage + *

    + * + *

    +     * {@code
    +     *     S3Presigner presigner = ...;
    +     *
    +     *     // Create a GetObjectRequest to be pre-signed
    +     *     GetObjectRequest getObjectRequest = ...;
    +     *
    +     *     // Create a GetObjectPresignRequest to specify the signature duration
    +     *     GetObjectPresignRequest getObjectPresignRequest =
    +     *         GetObjectPresignRequest.builder()
    +     *                                .signatureDuration(Duration.ofMinutes(10))
    +     *                                .getObjectRequest(request)
    +     *                                .build();
    +     *
    +     *     // Generate the presigned request
    +     *     PresignedGetObjectRequest presignedGetObjectRequest =
    +     *         presigner.presignGetObject(getObjectPresignRequest);
    +     *
    +     *     if (presignedGetObjectRequest.isBrowserExecutable())
    +     *         System.out.println("The pre-signed request can be executed using a web browser by " +
    +     *                            "visiting the following URL: " + presignedGetObjectRequest.url());
    +     *     else
    +     *         System.out.println("The pre-signed request has an HTTP method, headers or a payload " +
    +     *                            "that prohibits it from being executed by a web browser. See the S3Presigner " +
    +     *                            "class-level documentation for an example of how to execute this pre-signed " +
    +     *                            "request from Java code.");
    +     * }
    +     * 
    + */ + PresignedGetObjectRequest presignGetObject(GetObjectPresignRequest request); + + /** + * Presign a {@link GetObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * This is a shorter method of invoking {@link #presignGetObject(GetObjectPresignRequest)} without needing + * to call {@code GetObjectPresignRequest.builder()} or {@code .build()}. + * + * @see #presignGetObject(GetObjectPresignRequest) + */ + default PresignedGetObjectRequest presignGetObject(Consumer request) { + GetObjectPresignRequest.Builder builder = GetObjectPresignRequest.builder(); + request.accept(builder); + return presignGetObject(builder.build()); + } + + /** + * Presign a {@link PutObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * + * Example Usage + *

    + * + *

    +     * {@code
    +     *     S3Presigner presigner = ...;
    +     *
    +     *     // Create a PutObjectRequest to be pre-signed
    +     *     PutObjectRequest putObjectRequest = ...;
    +     *
    +     *     // Create a PutObjectPresignRequest to specify the signature duration
    +     *     PutObjectPresignRequest putObjectPresignRequest =
    +     *         PutObjectPresignRequest.builder()
    +     *                                .signatureDuration(Duration.ofMinutes(10))
    +     *                                .putObjectRequest(request)
    +     *                                .build();
    +     *
    +     *     // Generate the presigned request
    +     *     PresignedPutObjectRequest presignedPutObjectRequest =
    +     *         presigner.presignPutObject(putObjectPresignRequest);
    +     * }
    +     * 
    + */ + PresignedPutObjectRequest presignPutObject(PutObjectPresignRequest request); + + /** + * Presign a {@link PutObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * This is a shorter method of invoking {@link #presignPutObject(PutObjectPresignRequest)} without needing + * to call {@code PutObjectPresignRequest.builder()} or {@code .build()}. + * + * @see #presignPutObject(PutObjectPresignRequest) + */ + default PresignedPutObjectRequest presignPutObject(Consumer request) { + PutObjectPresignRequest.Builder builder = PutObjectPresignRequest.builder(); + request.accept(builder); + return presignPutObject(builder.build()); + } + + /** + * Presign a {@link CreateMultipartUploadRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * + * Example Usage + *

    + * + *

    +     * {@code
    +     *     S3Presigner presigner = ...;
    +     *
    +     *     // Create a CreateMultipartUploadRequest to be pre-signed
    +     *     CreateMultipartUploadRequest createMultipartUploadRequest = ...;
    +     *
    +     *     // Create a CreateMultipartUploadPresignRequest to specify the signature duration
    +     *     CreateMultipartUploadPresignRequest createMultipartUploadPresignRequest =
    +     *         CreateMultipartUploadPresignRequest.builder()
    +     *                                            .signatureDuration(Duration.ofMinutes(10))
    +     *                                            .createMultipartUploadRequest(request)
    +     *                                            .build();
    +     *
    +     *     // Generate the presigned request
    +     *     PresignedCreateMultipartUploadRequest presignedCreateMultipartUploadRequest =
    +     *         presigner.presignCreateMultipartUpload(createMultipartUploadPresignRequest);
    +     * }
    +     * 
    + */ + PresignedCreateMultipartUploadRequest presignCreateMultipartUpload(CreateMultipartUploadPresignRequest request); + + /** + * Presign a {@link CreateMultipartUploadRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * This is a shorter method of invoking {@link #presignCreateMultipartUpload(CreateMultipartUploadPresignRequest)} without + * needing to call {@code CreateMultipartUploadPresignRequest.builder()} or {@code .build()}. + * + * @see #presignCreateMultipartUpload(CreateMultipartUploadPresignRequest) + */ + default PresignedCreateMultipartUploadRequest presignCreateMultipartUpload( + Consumer request) { + CreateMultipartUploadPresignRequest.Builder builder = CreateMultipartUploadPresignRequest.builder(); + request.accept(builder); + return presignCreateMultipartUpload(builder.build()); + } + + /** + * Presign a {@link UploadPartRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * + * Example Usage + *

    + * + *

    +     * {@code
    +     *     S3Presigner presigner = ...;
    +     *
    +     *     // Create a UploadPartRequest to be pre-signed
    +     *     UploadPartRequest uploadPartRequest = ...;
    +     *
    +     *     // Create a UploadPartPresignRequest to specify the signature duration
    +     *     UploadPartPresignRequest uploadPartPresignRequest =
    +     *         UploadPartPresignRequest.builder()
    +     *                                 .signatureDuration(Duration.ofMinutes(10))
    +     *                                 .uploadPartRequest(request)
    +     *                                 .build();
    +     *
    +     *     // Generate the presigned request
    +     *     PresignedUploadPartRequest presignedUploadPartRequest =
    +     *         presigner.presignUploadPart(uploadPartPresignRequest);
    +     * }
    +     * 
    + */ + PresignedUploadPartRequest presignUploadPart(UploadPartPresignRequest request); + + /** + * Presign a {@link UploadPartRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * This is a shorter method of invoking {@link #presignUploadPart(UploadPartPresignRequest)} without needing + * to call {@code UploadPartPresignRequest.builder()} or {@code .build()}. + * + * @see #presignUploadPart(UploadPartPresignRequest) + */ + default PresignedUploadPartRequest presignUploadPart(Consumer request) { + UploadPartPresignRequest.Builder builder = UploadPartPresignRequest.builder(); + request.accept(builder); + return presignUploadPart(builder.build()); + } + + /** + * Presign a {@link CompleteMultipartUploadRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * + * Example Usage + *

    + * + *

    +     * {@code
    +     *     S3Presigner presigner = ...;
    +     *
    +     *     // Complete a CompleteMultipartUploadRequest to be pre-signed
    +     *     CompleteMultipartUploadRequest completeMultipartUploadRequest = ...;
    +     *
    +     *     // Create a CompleteMultipartUploadPresignRequest to specify the signature duration
    +     *     CompleteMultipartUploadPresignRequest completeMultipartUploadPresignRequest =
    +     *         CompleteMultipartUploadPresignRequest.builder()
    +     *                                              .signatureDuration(Duration.ofMinutes(10))
    +     *                                              .completeMultipartUploadRequest(request)
    +     *                                              .build();
    +     *
    +     *     // Generate the presigned request
    +     *     PresignedCompleteMultipartUploadRequest presignedCompleteMultipartUploadRequest =
    +     *         presigner.presignCompleteMultipartUpload(completeMultipartUploadPresignRequest);
    +     * }
    +     * 
    + */ + PresignedCompleteMultipartUploadRequest presignCompleteMultipartUpload(CompleteMultipartUploadPresignRequest request); + + /** + * Presign a {@link CompleteMultipartUploadRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * This is a shorter method of invoking {@link #presignCompleteMultipartUpload(CompleteMultipartUploadPresignRequest)} without + * needing to call {@code CompleteMultipartUploadPresignRequest.builder()} or {@code .build()}. + * + * @see #presignCompleteMultipartUpload(CompleteMultipartUploadPresignRequest) + */ + default PresignedCompleteMultipartUploadRequest presignCompleteMultipartUpload( + Consumer request) { + CompleteMultipartUploadPresignRequest.Builder builder = CompleteMultipartUploadPresignRequest.builder(); + request.accept(builder); + return presignCompleteMultipartUpload(builder.build()); + } + + /** + * Presign a {@link AbortMultipartUploadRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * + * Example Usage + *

    + * + *

    +     * {@code
    +     *     S3Presigner presigner = ...;
    +     *
    +     *     // Complete a AbortMultipartUploadRequest to be pre-signed
    +     *     AbortMultipartUploadRequest abortMultipartUploadRequest = ...;
    +     *
    +     *     // Create a AbortMultipartUploadPresignRequest to specify the signature duration
    +     *     AbortMultipartUploadPresignRequest abortMultipartUploadPresignRequest =
    +     *         AbortMultipartUploadPresignRequest.builder()
    +     *                                              .signatureDuration(Duration.ofMinutes(10))
    +     *                                              .abortMultipartUploadRequest(request)
    +     *                                              .build();
    +     *
    +     *     // Generate the presigned request
    +     *     PresignedAbortMultipartUploadRequest presignedAbortMultipartUploadRequest =
    +     *         presigner.presignAbortMultipartUpload(abortMultipartUploadPresignRequest);
    +     * }
    +     * 
    + */ + PresignedAbortMultipartUploadRequest presignAbortMultipartUpload(AbortMultipartUploadPresignRequest request); + + /** + * Presign a {@link AbortMultipartUploadRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * This is a shorter method of invoking {@link #presignAbortMultipartUpload(AbortMultipartUploadPresignRequest)} without + * needing to call {@code AbortMultipartUploadPresignRequest.builder()} or {@code .build()}. + * + * @see #presignAbortMultipartUpload(AbortMultipartUploadPresignRequest) + */ + default PresignedAbortMultipartUploadRequest presignAbortMultipartUpload( + Consumer request) { + AbortMultipartUploadPresignRequest.Builder builder = AbortMultipartUploadPresignRequest.builder(); + request.accept(builder); + return presignAbortMultipartUpload(builder.build()); + } + + /** + * A builder for creating {@link S3Presigner}s. Created using {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + interface Builder extends SdkPresigner.Builder { + /** + * Allows providing a custom S3 serviceConfiguration by providing a {@link S3Configuration} object; + * + * Note: chunkedEncodingEnabled and checksumValidationEnabled do not apply to presigned requests. + * + * @param serviceConfiguration {@link S3Configuration} + * @return this Builder + */ + Builder serviceConfiguration(S3Configuration serviceConfiguration); + + @Override + Builder region(Region region); + + @Override + Builder credentialsProvider(AwsCredentialsProvider credentialsProvider); + + @Override + Builder endpointOverride(URI endpointOverride); + + @Override + S3Presigner build(); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/AbortMultipartUploadPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/AbortMultipartUploadPresignRequest.java new file mode 100644 index 000000000000..5198c03ca4fc --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/AbortMultipartUploadPresignRequest.java @@ -0,0 +1,152 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link AbortMultipartUploadRequest} so that it can be executed at a later time without requiring + * additional signing or authentication. + * + * @see S3Presigner#presignAbortMultipartUpload(AbortMultipartUploadPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class AbortMultipartUploadPresignRequest + extends PresignRequest + implements ToCopyableBuilder { + private final AbortMultipartUploadRequest abortMultipartUploadRequest; + + private AbortMultipartUploadPresignRequest(DefaultBuilder builder) { + super(builder); + this.abortMultipartUploadRequest = Validate.notNull(builder.abortMultipartUploadRequest, "abortMultipartUploadRequest"); + } + + /** + * Create a builder that can be used to create a {@link AbortMultipartUploadPresignRequest}. + * + * @see S3Presigner#presignAbortMultipartUpload(AbortMultipartUploadPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Retrieve the {@link AbortMultipartUploadRequest} that should be presigned. + */ + public AbortMultipartUploadRequest abortMultipartUploadRequest() { + return abortMultipartUploadRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + AbortMultipartUploadPresignRequest that = (AbortMultipartUploadPresignRequest) o; + + return abortMultipartUploadRequest.equals(that.abortMultipartUploadRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + abortMultipartUploadRequest.hashCode(); + return result; + } + + /** + * A builder for a {@link AbortMultipartUploadPresignRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder + extends PresignRequest.Builder, + CopyableBuilder { + /** + * Configure the {@link AbortMultipartUploadRequest} that should be presigned. + */ + Builder abortMultipartUploadRequest(AbortMultipartUploadRequest abortMultipartUploadRequest); + + /** + * Configure the {@link AbortMultipartUploadRequest} that should be presigned. + *

    + * This is a convenience method for invoking {@link #abortMultipartUploadRequest(AbortMultipartUploadRequest)} + * without needing to invoke {@code AbortMultipartUploadRequest.builder()} or {@code build()}. + */ + default Builder abortMultipartUploadRequest(Consumer abortMultipartUploadRequest) { + AbortMultipartUploadRequest.Builder builder = AbortMultipartUploadRequest.builder(); + abortMultipartUploadRequest.accept(builder); + return abortMultipartUploadRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + AbortMultipartUploadPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private AbortMultipartUploadRequest abortMultipartUploadRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(AbortMultipartUploadPresignRequest request) { + super(request); + this.abortMultipartUploadRequest = request.abortMultipartUploadRequest; + } + + @Override + public Builder abortMultipartUploadRequest(AbortMultipartUploadRequest abortMultipartUploadRequest) { + this.abortMultipartUploadRequest = abortMultipartUploadRequest; + return this; + } + + @Override + public AbortMultipartUploadPresignRequest build() { + return new AbortMultipartUploadPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/CompleteMultipartUploadPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/CompleteMultipartUploadPresignRequest.java new file mode 100644 index 000000000000..012d72222d03 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/CompleteMultipartUploadPresignRequest.java @@ -0,0 +1,154 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link CompleteMultipartUploadRequest} so that it can be executed at a later time without requiring + * additional signing or authentication. + * + * @see S3Presigner#presignCompleteMultipartUpload(CompleteMultipartUploadPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class CompleteMultipartUploadPresignRequest + extends PresignRequest + implements ToCopyableBuilder { + private final CompleteMultipartUploadRequest completeMultipartUploadRequest; + + private CompleteMultipartUploadPresignRequest(DefaultBuilder builder) { + super(builder); + this.completeMultipartUploadRequest = Validate.notNull(builder.completeMultipartUploadRequest, + "completeMultipartUploadRequest"); + } + + /** + * Create a builder that can be used to create a {@link CompleteMultipartUploadPresignRequest}. + * + * @see S3Presigner#presignCompleteMultipartUpload(CompleteMultipartUploadPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Retrieve the {@link CompleteMultipartUploadRequest} that should be presigned. + */ + public CompleteMultipartUploadRequest completeMultipartUploadRequest() { + return completeMultipartUploadRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + CompleteMultipartUploadPresignRequest that = (CompleteMultipartUploadPresignRequest) o; + + return completeMultipartUploadRequest.equals(that.completeMultipartUploadRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + completeMultipartUploadRequest.hashCode(); + return result; + } + + /** + * A builder for a {@link CompleteMultipartUploadPresignRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder + extends PresignRequest.Builder, + CopyableBuilder { + /** + * Configure the {@link CompleteMultipartUploadRequest} that should be presigned. + */ + Builder completeMultipartUploadRequest(CompleteMultipartUploadRequest completeMultipartUploadRequest); + + /** + * Configure the {@link CompleteMultipartUploadRequest} that should be presigned. + *

    + * This is a convenience method for invoking {@link #completeMultipartUploadRequest(CompleteMultipartUploadRequest)} + * without needing to invoke {@code CompleteMultipartUploadRequest.builder()} or {@code build()}. + */ + default Builder completeMultipartUploadRequest( + Consumer completeMultipartUploadRequest) { + CompleteMultipartUploadRequest.Builder builder = CompleteMultipartUploadRequest.builder(); + completeMultipartUploadRequest.accept(builder); + return completeMultipartUploadRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + CompleteMultipartUploadPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private CompleteMultipartUploadRequest completeMultipartUploadRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(CompleteMultipartUploadPresignRequest request) { + super(request); + this.completeMultipartUploadRequest = request.completeMultipartUploadRequest; + } + + @Override + public Builder completeMultipartUploadRequest(CompleteMultipartUploadRequest completeMultipartUploadRequest) { + this.completeMultipartUploadRequest = completeMultipartUploadRequest; + return this; + } + + @Override + public CompleteMultipartUploadPresignRequest build() { + return new CompleteMultipartUploadPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/CreateMultipartUploadPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/CreateMultipartUploadPresignRequest.java new file mode 100644 index 000000000000..06531b295cc1 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/CreateMultipartUploadPresignRequest.java @@ -0,0 +1,154 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link CreateMultipartUploadRequest} so that it can be executed at a later time without requiring + * additional signing or authentication. + * + * @see S3Presigner#presignCreateMultipartUpload(CreateMultipartUploadPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class CreateMultipartUploadPresignRequest + extends PresignRequest + implements ToCopyableBuilder { + private final CreateMultipartUploadRequest createMultipartUploadRequest; + + private CreateMultipartUploadPresignRequest(DefaultBuilder builder) { + super(builder); + this.createMultipartUploadRequest = Validate.notNull(builder.createMultipartUploadRequest, + "createMultipartUploadRequest"); + } + + /** + * Create a builder that can be used to create a {@link CreateMultipartUploadPresignRequest}. + * + * @see S3Presigner#presignCreateMultipartUpload(CreateMultipartUploadPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Retrieve the {@link CreateMultipartUploadRequest} that should be presigned. + */ + public CreateMultipartUploadRequest createMultipartUploadRequest() { + return createMultipartUploadRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + CreateMultipartUploadPresignRequest that = (CreateMultipartUploadPresignRequest) o; + + return createMultipartUploadRequest.equals(that.createMultipartUploadRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + createMultipartUploadRequest.hashCode(); + return result; + } + + /** + * A builder for a {@link CreateMultipartUploadPresignRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder + extends PresignRequest.Builder, + CopyableBuilder { + /** + * Configure the {@link CreateMultipartUploadRequest} that should be presigned. + */ + Builder createMultipartUploadRequest(CreateMultipartUploadRequest createMultipartUploadRequest); + + /** + * Configure the {@link CreateMultipartUploadRequest} that should be presigned. + *

    + * This is a convenience method for invoking {@link #createMultipartUploadRequest(CreateMultipartUploadRequest)} + * without needing to invoke {@code CreateMultipartUploadRequest.builder()} or {@code build()}. + */ + default Builder createMultipartUploadRequest( + Consumer createMultipartUploadRequest) { + CreateMultipartUploadRequest.Builder builder = CreateMultipartUploadRequest.builder(); + createMultipartUploadRequest.accept(builder); + return createMultipartUploadRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + CreateMultipartUploadPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private CreateMultipartUploadRequest createMultipartUploadRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(CreateMultipartUploadPresignRequest request) { + super(request); + this.createMultipartUploadRequest = request.createMultipartUploadRequest; + } + + @Override + public Builder createMultipartUploadRequest(CreateMultipartUploadRequest createMultipartUploadRequest) { + this.createMultipartUploadRequest = createMultipartUploadRequest; + return this; + } + + @Override + public CreateMultipartUploadPresignRequest build() { + return new CreateMultipartUploadPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/GetObjectPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/GetObjectPresignRequest.java new file mode 100644 index 000000000000..ce0c5d5fa2aa --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/GetObjectPresignRequest.java @@ -0,0 +1,151 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link GetObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + * + * @see S3Presigner#presignGetObject(GetObjectPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class GetObjectPresignRequest + extends PresignRequest + implements ToCopyableBuilder { + private final GetObjectRequest getObjectRequest; + + private GetObjectPresignRequest(DefaultBuilder builder) { + super(builder); + this.getObjectRequest = Validate.notNull(builder.getObjectRequest, "getObjectRequest"); + } + + /** + * Create a builder that can be used to create a {@link GetObjectPresignRequest}. + * + * @see S3Presigner#presignGetObject(GetObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Retrieve the {@link GetObjectRequest} that should be presigned. + */ + public GetObjectRequest getObjectRequest() { + return getObjectRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + GetObjectPresignRequest that = (GetObjectPresignRequest) o; + + return getObjectRequest.equals(that.getObjectRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + getObjectRequest.hashCode(); + return result; + } + + /** + * A builder for a {@link GetObjectPresignRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignRequest.Builder, + CopyableBuilder { + /** + * Configure the {@link GetObjectRequest} that should be presigned. + */ + Builder getObjectRequest(GetObjectRequest getObjectRequest); + + /** + * Configure the {@link GetObjectRequest} that should be presigned. + *

    + * This is a convenience method for invoking {@link #getObjectRequest(GetObjectRequest)} without needing to invoke + * {@code GetObjectRequest.builder()} or {@code build()}. + */ + default Builder getObjectRequest(Consumer getObjectRequest) { + GetObjectRequest.Builder builder = GetObjectRequest.builder(); + getObjectRequest.accept(builder); + return getObjectRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + GetObjectPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private GetObjectRequest getObjectRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(GetObjectPresignRequest request) { + super(request); + this.getObjectRequest = request.getObjectRequest; + } + + @Override + public Builder getObjectRequest(GetObjectRequest getObjectRequest) { + this.getObjectRequest = getObjectRequest; + return this; + } + + @Override + public GetObjectPresignRequest build() { + return new GetObjectPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedAbortMultipartUploadRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedAbortMultipartUploadRequest.java new file mode 100644 index 000000000000..9848e647b6b4 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedAbortMultipartUploadRequest.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed {@link AbortMultipartUploadRequest} that can be executed at a later time without requiring additional signing + * or authentication. + * + * @see S3Presigner#presignAbortMultipartUpload(AbortMultipartUploadPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedAbortMultipartUploadRequest + extends PresignedRequest + implements ToCopyableBuilder { + private PresignedAbortMultipartUploadRequest(DefaultBuilder builder) { + super(builder); + } + + /** + * Create a builder that can be used to create a {@link PresignedAbortMultipartUploadRequest}. + * + * @see S3Presigner#presignAbortMultipartUpload(AbortMultipartUploadPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * A builder for a {@link PresignedAbortMultipartUploadRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder + extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedAbortMultipartUploadRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder + extends PresignedRequest.DefaultBuilder + implements Builder { + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedAbortMultipartUploadRequest request) { + super(request); + } + + @Override + public PresignedAbortMultipartUploadRequest build() { + return new PresignedAbortMultipartUploadRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedCompleteMultipartUploadRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedCompleteMultipartUploadRequest.java new file mode 100644 index 000000000000..c06da5ecab06 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedCompleteMultipartUploadRequest.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed {@link CompleteMultipartUploadRequest} that can be executed at a later time without requiring additional signing + * or authentication. + * + * @see S3Presigner#presignCompleteMultipartUpload(CompleteMultipartUploadPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedCompleteMultipartUploadRequest + extends PresignedRequest + implements ToCopyableBuilder { + private PresignedCompleteMultipartUploadRequest(DefaultBuilder builder) { + super(builder); + } + + /** + * Create a builder that can be used to create a {@link PresignedCompleteMultipartUploadRequest}. + * + * @see S3Presigner#presignCompleteMultipartUpload(CompleteMultipartUploadPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * A builder for a {@link PresignedCompleteMultipartUploadRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder + extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedCompleteMultipartUploadRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder + extends PresignedRequest.DefaultBuilder + implements Builder { + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedCompleteMultipartUploadRequest request) { + super(request); + } + + @Override + public PresignedCompleteMultipartUploadRequest build() { + return new PresignedCompleteMultipartUploadRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedCreateMultipartUploadRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedCreateMultipartUploadRequest.java new file mode 100644 index 000000000000..b16e16de1d1b --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedCreateMultipartUploadRequest.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed {@link CreateMultipartUploadRequest} that can be executed at a later time without requiring additional signing or + * authentication. + * + * @see S3Presigner#presignCreateMultipartUpload(CreateMultipartUploadPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedCreateMultipartUploadRequest + extends PresignedRequest + implements ToCopyableBuilder { + private PresignedCreateMultipartUploadRequest(DefaultBuilder builder) { + super(builder); + } + + /** + * Create a builder that can be used to create a {@link PresignedCreateMultipartUploadRequest}. + * + * @see S3Presigner#presignCreateMultipartUpload(CreateMultipartUploadPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * A builder for a {@link PresignedCreateMultipartUploadRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder + extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedCreateMultipartUploadRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder + extends PresignedRequest.DefaultBuilder + implements Builder { + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedCreateMultipartUploadRequest request) { + super(request); + } + + @Override + public PresignedCreateMultipartUploadRequest build() { + return new PresignedCreateMultipartUploadRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedGetObjectRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedGetObjectRequest.java new file mode 100644 index 000000000000..f9cacffae249 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedGetObjectRequest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed a {@link GetObjectRequest} that can be executed at a later time without requiring additional signing or + * authentication. + * + * @see S3Presigner#presignGetObject(GetObjectPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedGetObjectRequest + extends PresignedRequest + implements ToCopyableBuilder { + private PresignedGetObjectRequest(DefaultBuilder builder) { + super(builder); + } + + /** + * Create a builder that can be used to create a {@link PresignedGetObjectRequest}. + * + * @see S3Presigner#presignGetObject(GetObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * A builder for a {@link PresignedGetObjectRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedGetObjectRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder + extends PresignedRequest.DefaultBuilder + implements Builder { + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedGetObjectRequest request) { + super(request); + } + + @Override + public PresignedGetObjectRequest build() { + return new PresignedGetObjectRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedPutObjectRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedPutObjectRequest.java new file mode 100644 index 000000000000..003a7ab0343b --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedPutObjectRequest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed a {@link PutObjectRequest} that can be executed at a later time without requiring additional signing or + * authentication. + * + * @see S3Presigner#presignPutObject(PutObjectPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedPutObjectRequest + extends PresignedRequest + implements ToCopyableBuilder { + private PresignedPutObjectRequest(DefaultBuilder builder) { + super(builder); + } + + /** + * Create a builder that can be used to create a {@link PresignedPutObjectRequest}. + * + * @see S3Presigner#presignPutObject(PutObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * A builder for a {@link PresignedPutObjectRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedPutObjectRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder + extends PresignedRequest.DefaultBuilder + implements Builder { + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedPutObjectRequest request) { + super(request); + } + + @Override + public PresignedPutObjectRequest build() { + return new PresignedPutObjectRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedUploadPartRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedUploadPartRequest.java new file mode 100644 index 000000000000..89dcc6b9d68a --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedUploadPartRequest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed {@link UploadPartRequest} that can be executed at a later time without requiring additional signing or + * authentication. + * + * @see S3Presigner#presignUploadPart(UploadPartPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedUploadPartRequest + extends PresignedRequest + implements ToCopyableBuilder { + private PresignedUploadPartRequest(DefaultBuilder builder) { + super(builder); + } + + /** + * Create a builder that can be used to create a {@link PresignedUploadPartRequest}. + * + * @see S3Presigner#presignUploadPart(UploadPartPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * A builder for a {@link PresignedUploadPartRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedUploadPartRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder + extends PresignedRequest.DefaultBuilder + implements Builder { + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedUploadPartRequest request) { + super(request); + } + + @Override + public PresignedUploadPartRequest build() { + return new PresignedUploadPartRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PutObjectPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PutObjectPresignRequest.java new file mode 100644 index 000000000000..2d02b0b079f8 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PutObjectPresignRequest.java @@ -0,0 +1,151 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link PutObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + * + * @see S3Presigner#presignPutObject(PutObjectPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class PutObjectPresignRequest + extends PresignRequest + implements ToCopyableBuilder { + private final PutObjectRequest putObjectRequest; + + private PutObjectPresignRequest(DefaultBuilder builder) { + super(builder); + this.putObjectRequest = Validate.notNull(builder.putObjectRequest, "putObjectRequest"); + } + + /** + * Create a builder that can be used to create a {@link PutObjectPresignRequest}. + * + * @see S3Presigner#presignPutObject(PutObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Retrieve the {@link PutObjectRequest} that should be presigned. + */ + public PutObjectRequest putObjectRequest() { + return putObjectRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + PutObjectPresignRequest that = (PutObjectPresignRequest) o; + + return putObjectRequest.equals(that.putObjectRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + putObjectRequest.hashCode(); + return result; + } + + /** + * A builder for a {@link PutObjectPresignRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignRequest.Builder, + CopyableBuilder { + /** + * Configure the {@link PutObjectRequest} that should be presigned. + */ + Builder putObjectRequest(PutObjectRequest putObjectRequest); + + /** + * Configure the {@link PutObjectRequest} that should be presigned. + *

    + * This is a convenience method for invoking {@link #putObjectRequest(PutObjectRequest)} without needing to invoke + * {@code PutObjectRequest.builder()} or {@code build()}. + */ + default Builder putObjectRequest(Consumer putObjectRequest) { + PutObjectRequest.Builder builder = PutObjectRequest.builder(); + putObjectRequest.accept(builder); + return putObjectRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + PutObjectPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private PutObjectRequest putObjectRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(PutObjectPresignRequest request) { + super(request); + this.putObjectRequest = request.putObjectRequest; + } + + @Override + public Builder putObjectRequest(PutObjectRequest putObjectRequest) { + this.putObjectRequest = putObjectRequest; + return this; + } + + @Override + public PutObjectPresignRequest build() { + return new PutObjectPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/UploadPartPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/UploadPartPresignRequest.java new file mode 100644 index 000000000000..9b8dc595f191 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/UploadPartPresignRequest.java @@ -0,0 +1,151 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link UploadPartRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + * + * @see S3Presigner#presignUploadPart(UploadPartPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class UploadPartPresignRequest + extends PresignRequest + implements ToCopyableBuilder { + private final UploadPartRequest uploadPartRequest; + + private UploadPartPresignRequest(DefaultBuilder builder) { + super(builder); + this.uploadPartRequest = Validate.notNull(builder.uploadPartRequest, "uploadPartRequest"); + } + + /** + * Create a builder that can be used to create a {@link UploadPartPresignRequest}. + * + * @see S3Presigner#presignUploadPart(UploadPartPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Retrieve the {@link UploadPartRequest} that should be presigned. + */ + public UploadPartRequest uploadPartRequest() { + return uploadPartRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + UploadPartPresignRequest that = (UploadPartPresignRequest) o; + + return uploadPartRequest.equals(that.uploadPartRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + uploadPartRequest.hashCode(); + return result; + } + + /** + * A builder for a {@link UploadPartPresignRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignRequest.Builder, + CopyableBuilder { + /** + * Configure the {@link UploadPartRequest} that should be presigned. + */ + Builder uploadPartRequest(UploadPartRequest uploadPartRequest); + + /** + * Configure the {@link UploadPartRequest} that should be presigned. + *

    + * This is a convenience method for invoking {@link #uploadPartRequest(UploadPartRequest)} without needing to invoke + * {@code UploadPartRequest.builder()} or {@code build()}. + */ + default Builder uploadPartRequest(Consumer uploadPartRequest) { + UploadPartRequest.Builder builder = UploadPartRequest.builder(); + uploadPartRequest.accept(builder); + return uploadPartRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + UploadPartPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private UploadPartRequest uploadPartRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(UploadPartPresignRequest request) { + super(request); + this.uploadPartRequest = request.uploadPartRequest; + } + + @Override + public Builder uploadPartRequest(UploadPartRequest uploadPartRequest) { + this.uploadPartRequest = uploadPartRequest; + return this; + } + + @Override + public UploadPartPresignRequest build() { + return new UploadPartPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/resources/codegen-resources/customization.config b/services/s3/src/main/resources/codegen-resources/customization.config index c11b5f7eb400..e284b3aa21cf 100644 --- a/services/s3/src/main/resources/codegen-resources/customization.config +++ b/services/s3/src/main/resources/codegen-resources/customization.config @@ -16,6 +16,20 @@ } ] }, + "CopyObjectRequest": { + "modify": [ + { + "Bucket": { + "emitPropertyName": "DestinationBucket", + "existingNameDeprecated": true + }, + "Key": { + "emitPropertyName": "DestinationKey", + "existingNameDeprecated": true + } + } + ] + }, "ObjectVersion": { "modify": [ { @@ -61,6 +75,12 @@ "memberName": "Tagging", "convenienceType": "software.amazon.awssdk.services.s3.model.Tagging", "typeAdapterFqcn": "software.amazon.awssdk.services.s3.internal.TaggingAdapter" + }, + { + "shapeName": "CreateMultipartUploadRequest", + "memberName": "Tagging", + "convenienceType": "software.amazon.awssdk.services.s3.model.Tagging", + "typeAdapterFqcn": "software.amazon.awssdk.services.s3.internal.TaggingAdapter" } ], "customResponseMetadata": { diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index f13ae3f6bc44..c9db84bac561 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"NoSuchUpload"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", - "documentation":"

    Aborts a multipart upload.

    To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.

    " + "documentation":"

    This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

    To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure that the parts list is empty.

    For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    The following operations are related to AbortMultipartUpload:

    " }, "CompleteMultipartUpload":{ "name":"CompleteMultipartUpload", @@ -37,7 +37,7 @@ "input":{"shape":"CompleteMultipartUploadRequest"}, "output":{"shape":"CompleteMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html", - "documentation":"

    Completes a multipart upload by assembling previously uploaded parts.

    " + "documentation":"

    Completes a multipart upload by assembling previously uploaded parts.

    You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag value, returned after that part was uploaded.

    Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.

    Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.

    For more information about multipart uploads, see Uploading Objects Using Multipart Upload.

    For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    CompleteMultipartUpload has the following special errors:

    • Error code: EntityTooSmall

      • Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.

      • 400 Bad Request

    • Error code: InvalidPart

      • Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.

      • 400 Bad Request

    • Error code: InvalidPartOrder

      • Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.

      • 400 Bad Request

    • Error code: NoSuchUpload

      • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      • 404 Not Found

    The following operations are related to CompleteMultipartUpload:

    " }, "CopyObject":{ "name":"CopyObject", @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

    Creates a copy of an object that is already stored in Amazon S3.

    ", + "documentation":"

    Creates a copy of an object that is already stored in Amazon S3.

    You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

    All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

    If the copy is successful, you receive a response with information about the copied object.

    If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

    The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

    Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

    Metadata

    When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

    To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

    x-amz-copy-source-if Headers

    To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

    • x-amz-copy-source-if-match

    • x-amz-copy-source-if-none-match

    • x-amz-copy-source-if-unmodified-since

    • x-amz-copy-source-if-modified-since

    If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

    • x-amz-copy-source-if-match condition evaluates to true

    • x-amz-copy-source-if-unmodified-since condition evaluates to false

    If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

    • x-amz-copy-source-if-none-match condition evaluates to false

    • x-amz-copy-source-if-modified-since condition evaluates to true

    All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

    Server-side encryption

    When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.

    If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

    Access Control List (ACL)-Specific Request Headers

    When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

    Storage Class Options

    You can use the CopyObject operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

    Versioning

    By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

    If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

    If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

    If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.

    The following operations are related to CopyObject:

    For more information, see Copying Objects.

    ", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -67,7 +67,7 @@ {"shape":"BucketAlreadyOwnedByYou"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", - "documentation":"

    Creates a new bucket.

    ", + "documentation":"

    Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

    Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Working with Amazon S3 buckets.

    If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

    By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.

    If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.

    When creating a bucket using this operation, you can optionally specify the accounts or groups that should be granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the request headers.

    • Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

    • Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access control list (ACL) overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      • emailAddress – if the value specified is the email address of an AWS account

        Using email addresses to specify a grantee is only supported in the following AWS Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

      For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:

      x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    The following operations are related to CreateBucket:

    ", "alias":"PutBucket" }, "CreateMultipartUpload":{ @@ -79,7 +79,7 @@ "input":{"shape":"CreateMultipartUploadRequest"}, "output":{"shape":"CreateMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation":"

    Initiates a multipart upload and returns an upload ID.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    ", + "documentation":"

    This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

    For more information about multipart uploads, see Multipart Upload Overview.

    If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

    For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

    After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

    You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

    To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

    If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

    For more information, see Protecting Data Using Server-Side Encryption.

    Access Permissions

    When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

    • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Server-Side- Encryption-Specific Request Headers

    You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

    • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

      • x-amz-server-side-encryption

      • x-amz-server-side-encryption-aws-kms-key-id

      • x-amz-server-side-encryption-context

      If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

      All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

      For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

    • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

      • x-amz-server-side-encryption-customer-algorithm

      • x-amz-server-side-encryption-customer-key

      • x-amz-server-side-encryption-customer-key-MD5

      For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

    Access-Control-List (ACL)-Specific Request Headers

    You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

    • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

    • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

      • x-amz-grant-read

      • x-amz-grant-write

      • x-amz-grant-read-acp

      • x-amz-grant-write-acp

      • x-amz-grant-full-control

      You specify each grantee as a type=value pair, where the type is one of the following:

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      • emailAddress – if the value specified is the email address of an AWS account

        Using email addresses to specify a grantee is only supported in the following AWS Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

      For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:

      x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

    The following operations are related to CreateMultipartUpload:

    ", "alias":"InitiateMultipartUpload" }, "DeleteBucket":{ @@ -91,7 +91,7 @@ }, "input":{"shape":"DeleteBucketRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETE.html", - "documentation":"

    Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.

    " + "documentation":"

    Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.

    Related Resources

    " }, "DeleteBucketAnalyticsConfiguration":{ "name":"DeleteBucketAnalyticsConfiguration", @@ -101,7 +101,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketAnalyticsConfigurationRequest"}, - "documentation":"

    Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

    To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others.

    " + "documentation":"

    Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

    To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

    The following operations are related to DeleteBucketAnalyticsConfiguration:

    " }, "DeleteBucketCors":{ "name":"DeleteBucketCors", @@ -112,7 +112,7 @@ }, "input":{"shape":"DeleteBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html", - "documentation":"

    Deletes the CORS configuration information set for the bucket.

    " + "documentation":"

    Deletes the cors configuration information set for the bucket.

    To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

    For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

    Related Resources:

    " }, "DeleteBucketEncryption":{ "name":"DeleteBucketEncryption", @@ -122,7 +122,17 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketEncryptionRequest"}, - "documentation":"

    Deletes the server-side encryption configuration from the bucket.

    " + "documentation":"

    This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

    To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    " + }, + "DeleteBucketIntelligentTieringConfiguration":{ + "name":"DeleteBucketIntelligentTieringConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?intelligent-tiering", + "responseCode":204 + }, + "input":{"shape":"DeleteBucketIntelligentTieringConfigurationRequest"}, + "documentation":"

    Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

    The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

    The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

    If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

    Operations related to DeleteBucketIntelligentTieringConfiguration include:

    " }, "DeleteBucketInventoryConfiguration":{ "name":"DeleteBucketInventoryConfiguration", @@ -132,7 +142,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketInventoryConfigurationRequest"}, - "documentation":"

    Deletes an inventory configuration (identified by the inventory ID) from the bucket.

    " + "documentation":"

    Deletes an inventory configuration (identified by the inventory ID) from the bucket.

    To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

    Operations related to DeleteBucketInventoryConfiguration include:

    " }, "DeleteBucketLifecycle":{ "name":"DeleteBucketLifecycle", @@ -143,7 +153,7 @@ }, "input":{"shape":"DeleteBucketLifecycleRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html", - "documentation":"

    Deletes the lifecycle configuration from the bucket.

    " + "documentation":"

    Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.

    To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration action. By default, the bucket owner has this permission and the bucket owner can grant this permission to others.

    There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3 systems.

    For more information about the object expiration, see Elements to Describe Lifecycle Actions.

    Related actions include:

    " }, "DeleteBucketMetricsConfiguration":{ "name":"DeleteBucketMetricsConfiguration", @@ -153,7 +163,17 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketMetricsConfigurationRequest"}, - "documentation":"

    Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.

    " + "documentation":"

    Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

    To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to DeleteBucketMetricsConfiguration:

    " + }, + "DeleteBucketOwnershipControls":{ + "name":"DeleteBucketOwnershipControls", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?ownershipControls", + "responseCode":204 + }, + "input":{"shape":"DeleteBucketOwnershipControlsRequest"}, + "documentation":"

    Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    For information about Amazon S3 Object Ownership, see Using Object Ownership.

    The following operations are related to DeleteBucketOwnershipControls:

    " }, "DeleteBucketPolicy":{ "name":"DeleteBucketPolicy", @@ -164,7 +184,7 @@ }, "input":{"shape":"DeleteBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", - "documentation":"

    Deletes the policy from the bucket.

    " + "documentation":"

    This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's account to use this operation.

    If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and UserPolicies.

    The following operations are related to DeleteBucketPolicy

    " }, "DeleteBucketReplication":{ "name":"DeleteBucketReplication", @@ -174,7 +194,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketReplicationRequest"}, - "documentation":"

    Deletes the replication configuration from the bucket. For information about replication configuration, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

    " + "documentation":"

    Deletes the replication configuration from the bucket.

    To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    It can take a while for the deletion of a replication configuration to fully propagate.

    For information about replication configuration, see Replication in the Amazon S3 Developer Guide.

    The following operations are related to DeleteBucketReplication:

    " }, "DeleteBucketTagging":{ "name":"DeleteBucketTagging", @@ -185,7 +205,7 @@ }, "input":{"shape":"DeleteBucketTaggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEtagging.html", - "documentation":"

    Deletes the tags from the bucket.

    " + "documentation":"

    Deletes the tags from the bucket.

    To use this operation, you must have permission to perform the s3:PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

    The following operations are related to DeleteBucketTagging:

    " }, "DeleteBucketWebsite":{ "name":"DeleteBucketWebsite", @@ -196,7 +216,7 @@ }, "input":{"shape":"DeleteBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html", - "documentation":"

    This operation removes the website configuration from the bucket.

    " + "documentation":"

    This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

    This DELETE operation requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

    For more information about hosting websites, see Hosting Websites on Amazon S3.

    The following operations are related to DeleteBucketWebsite:

    " }, "DeleteObject":{ "name":"DeleteObject", @@ -208,7 +228,7 @@ "input":{"shape":"DeleteObjectRequest"}, "output":{"shape":"DeleteObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html", - "documentation":"

    Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

    " + "documentation":"

    Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

    To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, to true.

    If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS.

    For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.

    You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

    The following operation is related to DeleteObject:

    " }, "DeleteObjectTagging":{ "name":"DeleteObjectTagging", @@ -219,7 +239,7 @@ }, "input":{"shape":"DeleteObjectTaggingRequest"}, "output":{"shape":"DeleteObjectTaggingOutput"}, - "documentation":"

    Removes the tag-set from an existing object.

    " + "documentation":"

    Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.

    To use this operation, you must have permission to perform the s3:DeleteObjectTagging action.

    To delete tags of a specific object version, add the versionId query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging action.

    The following operations are related to DeleteBucketMetricsConfiguration:

    " }, "DeleteObjects":{ "name":"DeleteObjects", @@ -230,8 +250,9 @@ "input":{"shape":"DeleteObjectsRequest"}, "output":{"shape":"DeleteObjectsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", - "documentation":"

    This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.

    ", - "alias":"DeleteMultipleObjects" + "documentation":"

    This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

    The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

    The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body.

    When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.

    Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

    The following operations are related to DeleteObjects:

    ", + "alias":"DeleteMultipleObjects", + "httpChecksumRequired":true }, "DeletePublicAccessBlock":{ "name":"DeletePublicAccessBlock", @@ -241,7 +262,7 @@ "responseCode":204 }, "input":{"shape":"DeletePublicAccessBlockRequest"}, - "documentation":"

    Removes the PublicAccessBlock configuration from an Amazon S3 bucket.

    " + "documentation":"

    Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    The following operations are related to DeletePublicAccessBlock:

    " }, "GetBucketAccelerateConfiguration":{ "name":"GetBucketAccelerateConfiguration", @@ -251,7 +272,7 @@ }, "input":{"shape":"GetBucketAccelerateConfigurationRequest"}, "output":{"shape":"GetBucketAccelerateConfigurationOutput"}, - "documentation":"

    Returns the accelerate configuration of a bucket.

    " + "documentation":"

    This implementation of the GET operation uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

    To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

    A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.

    For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    " }, "GetBucketAcl":{ "name":"GetBucketAcl", @@ -262,7 +283,7 @@ "input":{"shape":"GetBucketAclRequest"}, "output":{"shape":"GetBucketAclOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html", - "documentation":"

    Gets the access control policy for the bucket.

    " + "documentation":"

    This implementation of the GET operation uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

    Related Resources

    " }, "GetBucketAnalyticsConfiguration":{ "name":"GetBucketAnalyticsConfiguration", @@ -272,7 +293,7 @@ }, "input":{"shape":"GetBucketAnalyticsConfigurationRequest"}, "output":{"shape":"GetBucketAnalyticsConfigurationOutput"}, - "documentation":"

    Gets an analytics configuration for the bucket (specified by the analytics configuration ID).

    " + "documentation":"

    This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.

    To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    " }, "GetBucketCors":{ "name":"GetBucketCors", @@ -283,7 +304,7 @@ "input":{"shape":"GetBucketCorsRequest"}, "output":{"shape":"GetBucketCorsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html", - "documentation":"

    Returns the CORS configuration for the bucket.

    " + "documentation":"

    Returns the cors configuration information set for the bucket.

    To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

    For more information about cors, see Enabling Cross-Origin Resource Sharing.

    The following operations are related to GetBucketCors:

    " }, "GetBucketEncryption":{ "name":"GetBucketEncryption", @@ -293,7 +314,17 @@ }, "input":{"shape":"GetBucketEncryptionRequest"}, "output":{"shape":"GetBucketEncryptionOutput"}, - "documentation":"

    Returns the server-side encryption configuration of a bucket.

    " + "documentation":"

    Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

    To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    The following operations are related to GetBucketEncryption:

    " + }, + "GetBucketIntelligentTieringConfiguration":{ + "name":"GetBucketIntelligentTieringConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"GetBucketIntelligentTieringConfigurationRequest"}, + "output":{"shape":"GetBucketIntelligentTieringConfigurationOutput"}, + "documentation":"

    Gets the S3 Intelligent-Tiering configuration from the specified bucket.

    The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

    The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

    If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

    Operations related to GetBucketIntelligentTieringConfiguration include:

    " }, "GetBucketInventoryConfiguration":{ "name":"GetBucketInventoryConfiguration", @@ -303,7 +334,7 @@ }, "input":{"shape":"GetBucketInventoryConfigurationRequest"}, "output":{"shape":"GetBucketInventoryConfigurationOutput"}, - "documentation":"

    Returns an inventory configuration (identified by the inventory ID) from the bucket.

    " + "documentation":"

    Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.

    To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

    The following operations are related to GetBucketInventoryConfiguration:

    " }, "GetBucketLifecycle":{ "name":"GetBucketLifecycle", @@ -314,7 +345,7 @@ "input":{"shape":"GetBucketLifecycleRequest"}, "output":{"shape":"GetBucketLifecycleOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html", - "documentation":"

    No longer used, see the GetBucketLifecycleConfiguration operation.

    ", + "documentation":"

    For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should see the updated version of this topic. This topic is provided for backward compatibility.

    Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

    To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    GetBucketLifecycle has the following special error:

    • Error code: NoSuchLifecycleConfiguration

      • Description: The lifecycle configuration does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

    The following operations are related to GetBucketLifecycle:

    ", "deprecated":true }, "GetBucketLifecycleConfiguration":{ @@ -325,7 +356,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationOutput"}, - "documentation":"

    Returns the lifecycle configuration information set on the bucket.

    " + "documentation":"

    Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier API description, see GetBucketLifecycle.

    Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

    To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    GetBucketLifecycleConfiguration has the following special error:

    • Error code: NoSuchLifecycleConfiguration

      • Description: The lifecycle configuration does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

    The following operations are related to GetBucketLifecycleConfiguration:

    " }, "GetBucketLocation":{ "name":"GetBucketLocation", @@ -336,7 +367,7 @@ "input":{"shape":"GetBucketLocationRequest"}, "output":{"shape":"GetBucketLocationOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlocation.html", - "documentation":"

    Returns the region the bucket resides in.

    " + "documentation":"

    Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint request parameter in a CreateBucket request. For more information, see CreateBucket.

    To use this implementation of the operation, you must be the bucket owner.

    The following operations are related to GetBucketLocation:

    " }, "GetBucketLogging":{ "name":"GetBucketLogging", @@ -347,7 +378,7 @@ "input":{"shape":"GetBucketLoggingRequest"}, "output":{"shape":"GetBucketLoggingOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html", - "documentation":"

    Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.

    " + "documentation":"

    Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.

    The following operations are related to GetBucketLogging:

    " }, "GetBucketMetricsConfiguration":{ "name":"GetBucketMetricsConfiguration", @@ -357,7 +388,7 @@ }, "input":{"shape":"GetBucketMetricsConfigurationRequest"}, "output":{"shape":"GetBucketMetricsConfigurationOutput"}, - "documentation":"

    Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.

    " + "documentation":"

    Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

    To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to GetBucketMetricsConfiguration:

    " }, "GetBucketNotification":{ "name":"GetBucketNotification", @@ -368,7 +399,7 @@ "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfigurationDeprecated"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETnotification.html", - "documentation":"

    No longer used, see the GetBucketNotificationConfiguration operation.

    ", + "documentation":"

    No longer used, see GetBucketNotificationConfiguration.

    ", "deprecated":true }, "GetBucketNotificationConfiguration":{ @@ -379,7 +410,17 @@ }, "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfiguration"}, - "documentation":"

    Returns the notification configuration of a bucket.

    " + "documentation":"

    Returns the notification configuration of a bucket.

    If notifications are not enabled on the bucket, the operation returns an empty NotificationConfiguration element.

    By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification permission.

    For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.

    The following operation is related to GetBucketNotification:

    " + }, + "GetBucketOwnershipControls":{ + "name":"GetBucketOwnershipControls", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?ownershipControls" + }, + "input":{"shape":"GetBucketOwnershipControlsRequest"}, + "output":{"shape":"GetBucketOwnershipControlsOutput"}, + "documentation":"

    Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    For information about Amazon S3 Object Ownership, see Using Object Ownership.

    The following operations are related to GetBucketOwnershipControls:

    " }, "GetBucketPolicy":{ "name":"GetBucketPolicy", @@ -390,7 +431,7 @@ "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html", - "documentation":"

    Returns the policy of a specified bucket.

    " + "documentation":"

    Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

    If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and User Policies.

    The following operation is related to GetBucketPolicy:

    " }, "GetBucketPolicyStatus":{ "name":"GetBucketPolicyStatus", @@ -400,7 +441,7 @@ }, "input":{"shape":"GetBucketPolicyStatusRequest"}, "output":{"shape":"GetBucketPolicyStatusOutput"}, - "documentation":"

    Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public.

    " + "documentation":"

    Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".

    The following operations are related to GetBucketPolicyStatus:

    " }, "GetBucketReplication":{ "name":"GetBucketReplication", @@ -410,7 +451,7 @@ }, "input":{"shape":"GetBucketReplicationRequest"}, "output":{"shape":"GetBucketReplicationOutput"}, - "documentation":"

    Returns the replication configuration of a bucket.

    It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.

    " + "documentation":"

    Returns the replication configuration of a bucket.

    It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.

    For information about replication configuration, see Replication in the Amazon Simple Storage Service Developer Guide.

    This operation requires permissions for the s3:GetReplicationConfiguration action. For more information about permissions, see Using Bucket Policies and User Policies.

    If you include the Filter element in a replication configuration, you must also include the DeleteMarkerReplication and Priority elements. The response also returns those elements.

    For information about GetBucketReplication errors, see List of replication-related error codes

    The following operations are related to GetBucketReplication:

    " }, "GetBucketRequestPayment":{ "name":"GetBucketRequestPayment", @@ -421,7 +462,7 @@ "input":{"shape":"GetBucketRequestPaymentRequest"}, "output":{"shape":"GetBucketRequestPaymentOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentGET.html", - "documentation":"

    Returns the request payment configuration of a bucket.

    " + "documentation":"

    Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.

    The following operations are related to GetBucketRequestPayment:

    " }, "GetBucketTagging":{ "name":"GetBucketTagging", @@ -432,7 +473,7 @@ "input":{"shape":"GetBucketTaggingRequest"}, "output":{"shape":"GetBucketTaggingOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETtagging.html", - "documentation":"

    Returns the tag set associated with the bucket.

    " + "documentation":"

    Returns the tag set associated with the bucket.

    To use this operation, you must have permission to perform the s3:GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

    GetBucketTagging has the following special error:

    • Error code: NoSuchTagSetError

      • Description: There is no tag set associated with the bucket.

    The following operations are related to GetBucketTagging:

    " }, "GetBucketVersioning":{ "name":"GetBucketVersioning", @@ -443,7 +484,7 @@ "input":{"shape":"GetBucketVersioningRequest"}, "output":{"shape":"GetBucketVersioningOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETversioningStatus.html", - "documentation":"

    Returns the versioning state of a bucket.

    " + "documentation":"

    Returns the versioning state of a bucket.

    To retrieve the versioning state of a bucket, you must be the bucket owner.

    This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is enabled, the bucket owner must use an authentication device to change the versioning state of the bucket.

    The following operations are related to GetBucketVersioning:

    " }, "GetBucketWebsite":{ "name":"GetBucketWebsite", @@ -454,7 +495,7 @@ "input":{"shape":"GetBucketWebsiteRequest"}, "output":{"shape":"GetBucketWebsiteOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html", - "documentation":"

    Returns the website configuration for a bucket.

    " + "documentation":"

    Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

    This GET operation requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

    The following operations are related to DeleteBucketWebsite:

    " }, "GetObject":{ "name":"GetObject", @@ -465,10 +506,11 @@ "input":{"shape":"GetObjectRequest"}, "output":{"shape":"GetObjectOutput"}, "errors":[ - {"shape":"NoSuchKey"} + {"shape":"NoSuchKey"}, + {"shape":"InvalidObjectState"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", - "documentation":"

    Retrieves objects from Amazon S3.

    " + "documentation":"

    Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

    An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

    To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

    To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

    If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

    Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

    If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

    Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

    Permissions

    You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.

    Versioning

    By default, the GET operation returns the current version of an object. To return a different version, use the versionId subresource.

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    For more information about versioning, see PutBucketVersioning.

    Overriding Response Header Values

    There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

    You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

    You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

    • response-content-type

    • response-content-language

    • response-expires

    • response-cache-control

    • response-content-disposition

    • response-content-encoding

    Additional Considerations about Request Headers

    If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

    If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

    For more information about conditional requests, see RFC 7232.

    The following operations are related to GetObject:

    " }, "GetObjectAcl":{ "name":"GetObjectAcl", @@ -482,7 +524,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html", - "documentation":"

    Returns the access control list (ACL) of an object.

    " + "documentation":"

    Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP access to the object.

    This action is not supported by Amazon S3 on Outposts.

    Versioning

    By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.

    The following operations are related to GetObjectAcl:

    " }, "GetObjectLegalHold":{ "name":"GetObjectLegalHold", @@ -492,7 +534,7 @@ }, "input":{"shape":"GetObjectLegalHoldRequest"}, "output":{"shape":"GetObjectLegalHoldOutput"}, - "documentation":"

    Gets an object's current Legal Hold status.

    " + "documentation":"

    Gets an object's current Legal Hold status. For more information, see Locking Objects.

    This action is not supported by Amazon S3 on Outposts.

    " }, "GetObjectLockConfiguration":{ "name":"GetObjectLockConfiguration", @@ -502,7 +544,7 @@ }, "input":{"shape":"GetObjectLockConfigurationRequest"}, "output":{"shape":"GetObjectLockConfigurationOutput"}, - "documentation":"

    Gets the object lock configuration for a bucket. The rule specified in the object lock configuration will be applied by default to every new object placed in the specified bucket.

    " + "documentation":"

    Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.

    " }, "GetObjectRetention":{ "name":"GetObjectRetention", @@ -512,7 +554,7 @@ }, "input":{"shape":"GetObjectRetentionRequest"}, "output":{"shape":"GetObjectRetentionOutput"}, - "documentation":"

    Retrieves an object's retention settings.

    " + "documentation":"

    Retrieves an object's retention settings. For more information, see Locking Objects.

    This action is not supported by Amazon S3 on Outposts.

    " }, "GetObjectTagging":{ "name":"GetObjectTagging", @@ -522,7 +564,7 @@ }, "input":{"shape":"GetObjectTaggingRequest"}, "output":{"shape":"GetObjectTaggingOutput"}, - "documentation":"

    Returns the tag-set of an object.

    " + "documentation":"

    Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.

    To use this operation, you must have permission to perform the s3:GetObjectTagging action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging action.

    By default, the bucket owner has this permission and can grant this permission to others.

    For information about the Amazon S3 object tagging feature, see Object Tagging.

    The following operation is related to GetObjectTagging:

    " }, "GetObjectTorrent":{ "name":"GetObjectTorrent", @@ -533,7 +575,7 @@ "input":{"shape":"GetObjectTorrentRequest"}, "output":{"shape":"GetObjectTorrentOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html", - "documentation":"

    Return torrent files from a bucket.

    " + "documentation":"

    Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.

    You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.

    To use GET, you must have READ access to the object.

    This action is not supported by Amazon S3 on Outposts.

    The following operation is related to GetObjectTorrent:

    " }, "GetPublicAccessBlock":{ "name":"GetPublicAccessBlock", @@ -543,7 +585,7 @@ }, "input":{"shape":"GetPublicAccessBlockRequest"}, "output":{"shape":"GetPublicAccessBlockOutput"}, - "documentation":"

    Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket.

    " + "documentation":"

    Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

    For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

    The following operations are related to GetPublicAccessBlock:

    " }, "HeadBucket":{ "name":"HeadBucket", @@ -556,7 +598,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html", - "documentation":"

    This operation is useful to determine if a bucket exists and you have permission to access it.

    " + "documentation":"

    This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK if the bucket exists and you have permission to access it. Otherwise, the operation might return responses such as 404 Not Found and 403 Forbidden.

    To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    " }, "HeadObject":{ "name":"HeadObject", @@ -570,7 +612,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", - "documentation":"

    The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

    " + "documentation":"

    The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

    A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body.

    If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

    Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

    Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

    Consider the following when using request headers:

    • Consideration 1 – If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:

      • If-Match condition evaluates to true, and;

      • If-Unmodified-Since condition evaluates to false;

      Then Amazon S3 returns 200 OK and the data requested.

    • Consideration 2 – If both of the If-None-Match and If-Modified-Since headers are present in the request as follows:

      • If-None-Match condition evaluates to false, and;

      • If-Modified-Since condition evaluates to true;

      Then Amazon S3 returns the 304 Not Modified response code.

    For more information about conditional requests, see RFC 7232.

    Permissions

    You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.

    The following operation is related to HeadObject:

    " }, "ListBucketAnalyticsConfigurations":{ "name":"ListBucketAnalyticsConfigurations", @@ -580,7 +622,17 @@ }, "input":{"shape":"ListBucketAnalyticsConfigurationsRequest"}, "output":{"shape":"ListBucketAnalyticsConfigurationsOutput"}, - "documentation":"

    Lists the analytics configurations for the bucket.

    " + "documentation":"

    Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

    This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

    To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

    The following operations are related to ListBucketAnalyticsConfigurations:

    " + }, + "ListBucketIntelligentTieringConfigurations":{ + "name":"ListBucketIntelligentTieringConfigurations", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"ListBucketIntelligentTieringConfigurationsRequest"}, + "output":{"shape":"ListBucketIntelligentTieringConfigurationsOutput"}, + "documentation":"

    Lists the S3 Intelligent-Tiering configuration from the specified bucket.

    The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

    The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

    If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

    Operations related to ListBucketIntelligentTieringConfigurations include:

    " }, "ListBucketInventoryConfigurations":{ "name":"ListBucketInventoryConfigurations", @@ -590,7 +642,7 @@ }, "input":{"shape":"ListBucketInventoryConfigurationsRequest"}, "output":{"shape":"ListBucketInventoryConfigurationsOutput"}, - "documentation":"

    Returns a list of inventory configurations for the bucket.

    " + "documentation":"

    Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

    This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

    To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about the Amazon S3 inventory feature, see Amazon S3 Inventory

    The following operations are related to ListBucketInventoryConfigurations:

    " }, "ListBucketMetricsConfigurations":{ "name":"ListBucketMetricsConfigurations", @@ -600,7 +652,7 @@ }, "input":{"shape":"ListBucketMetricsConfigurationsRequest"}, "output":{"shape":"ListBucketMetricsConfigurationsOutput"}, - "documentation":"

    Lists the metrics configurations for the bucket.

    " + "documentation":"

    Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.

    This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

    To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to ListBucketMetricsConfigurations:

    " }, "ListBuckets":{ "name":"ListBuckets", @@ -622,7 +674,7 @@ "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", - "documentation":"

    This operation lists in-progress multipart uploads.

    " + "documentation":"

    This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.

    This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with the value true. To list the additional multipart uploads, use the key-marker and upload-id-marker request parameters.

    In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.

    For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

    For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    The following operations are related to ListMultipartUploads:

    " }, "ListObjectVersions":{ "name":"ListObjectVersions", @@ -633,7 +685,7 @@ "input":{"shape":"ListObjectVersionsRequest"}, "output":{"shape":"ListObjectVersionsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html", - "documentation":"

    Returns metadata about all of the versions of objects in a bucket.

    ", + "documentation":"

    Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

    A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

    To use this operation, you must have READ access to the bucket.

    This action is not supported by Amazon S3 on Outposts.

    The following operations are related to ListObjectVersions:

    ", "alias":"GetBucketObjectVersions" }, "ListObjects":{ @@ -648,7 +700,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html", - "documentation":"

    Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.

    ", + "documentation":"

    Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

    This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

    The following operations are related to ListObjects:

    ", "alias":"GetBucket" }, "ListObjectsV2":{ @@ -662,7 +714,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentation":"

    Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.

    " + "documentation":"

    Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

    To use this operation, you must have READ access to the bucket.

    To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.

    To get a list of your buckets, see ListBuckets.

    The following operations are related to ListObjectsV2:

    " }, "ListParts":{ "name":"ListParts", @@ -673,7 +725,7 @@ "input":{"shape":"ListPartsRequest"}, "output":{"shape":"ListPartsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html", - "documentation":"

    Lists the parts that have been uploaded for a specific multipart upload.

    " + "documentation":"

    Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

    For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

    For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    The following operations are related to ListParts:

    " }, "PutBucketAccelerateConfiguration":{ "name":"PutBucketAccelerateConfiguration", @@ -682,7 +734,7 @@ "requestUri":"/{Bucket}?accelerate" }, "input":{"shape":"PutBucketAccelerateConfigurationRequest"}, - "documentation":"

    Sets the accelerate configuration of an existing bucket.

    " + "documentation":"

    Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.

    To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    The Transfer Acceleration state of a bucket can be set to one of the following two values:

    • Enabled – Enables accelerated data transfers to the bucket.

    • Suspended – Disables accelerated data transfers to the bucket.

    The GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.

    After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.

    The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").

    For more information about transfer acceleration, see Transfer Acceleration.

    The following operations are related to PutBucketAccelerateConfiguration:

    " }, "PutBucketAcl":{ "name":"PutBucketAcl", @@ -692,7 +744,8 @@ }, "input":{"shape":"PutBucketAclRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html", - "documentation":"

    Sets the permissions on a bucket using access control lists (ACL).

    " + "documentation":"

    Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

    You can use one of the following two ways to set a bucket's permissions:

    • Specify the ACL in the request body

    • Specify permissions using request headers

    You cannot specify access permission using both the body and the request headers.

    Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

    Access Permissions

    You can set access permissions using one of the following methods:

    • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      • emailAddress – if the value specified is the email address of an AWS account

        Using email addresses to specify a grantee is only supported in the following AWS Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

      For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses.

      x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    Related Resources

    ", + "httpChecksumRequired":true }, "PutBucketAnalyticsConfiguration":{ "name":"PutBucketAnalyticsConfiguration", @@ -701,7 +754,7 @@ "requestUri":"/{Bucket}?analytics" }, "input":{"shape":"PutBucketAnalyticsConfigurationRequest"}, - "documentation":"

    Sets an analytics configuration for the bucket (specified by the analytics configuration ID).

    " + "documentation":"

    Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.

    You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.

    You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

    To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    Special Errors

      • HTTP Error: HTTP 400 Bad Request

      • Code: InvalidArgument

      • Cause: Invalid argument.

      • HTTP Error: HTTP 400 Bad Request

      • Code: TooManyConfigurations

      • Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

      • HTTP Error: HTTP 403 Forbidden

      • Code: AccessDenied

      • Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket.

    Related Resources

    " }, "PutBucketCors":{ "name":"PutBucketCors", @@ -711,7 +764,8 @@ }, "input":{"shape":"PutBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html", - "documentation":"

    Sets the CORS configuration for a bucket.

    " + "documentation":"

    Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

    To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

    You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

    To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

    When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

    • The request's Origin header must match AllowedOrigin elements.

    • The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method header in case of a pre-flight OPTIONS request must be one of the AllowedMethod elements.

    • Every header specified in the Access-Control-Request-Headers request header of a pre-flight request must match an AllowedHeader element.

    For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    ", + "httpChecksumRequired":true }, "PutBucketEncryption":{ "name":"PutBucketEncryption", @@ -720,7 +774,17 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

    Creates a new server-side encryption configuration (or replaces an existing one, if present).

    " + "documentation":"

    This operation uses the encryption subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.

    Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service Developer Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

    This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

    To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    ", + "httpChecksumRequired":true + }, + "PutBucketIntelligentTieringConfiguration":{ + "name":"PutBucketIntelligentTieringConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"PutBucketIntelligentTieringConfigurationRequest"}, + "documentation":"

    Puts a S3 Intelligent-Tiering configuration to the specified bucket.

    The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

    The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

    If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

    Operations related to PutBucketIntelligentTieringConfiguration include:

    " }, "PutBucketInventoryConfiguration":{ "name":"PutBucketInventoryConfiguration", @@ -729,7 +793,7 @@ "requestUri":"/{Bucket}?inventory" }, "input":{"shape":"PutBucketInventoryConfigurationRequest"}, - "documentation":"

    Adds an inventory configuration (identified by the inventory ID) from the bucket.

    " + "documentation":"

    This implementation of the PUT operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

    Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

    When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.

    You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

    To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    Special Errors

    • HTTP 400 Bad Request Error

      • Code: InvalidArgument

      • Cause: Invalid Argument

    • HTTP 400 Bad Request Error

      • Code: TooManyConfigurations

      • Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

    • HTTP 403 Forbidden Error

      • Code: AccessDenied

      • Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration bucket permission to set the configuration on the bucket.

    Related Resources

    " }, "PutBucketLifecycle":{ "name":"PutBucketLifecycle", @@ -739,8 +803,9 @@ }, "input":{"shape":"PutBucketLifecycleRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", - "documentation":"

    No longer used, see the PutBucketLifecycleConfiguration operation.

    ", - "deprecated":true + "documentation":"

    For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

    Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

    By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

    You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

    • s3:DeleteObject

    • s3:DeleteObjectVersion

    • s3:PutLifecycleConfiguration

    For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

    Related Resources

    ", + "deprecated":true, + "httpChecksumRequired":true }, "PutBucketLifecycleConfiguration":{ "name":"PutBucketLifecycleConfiguration", @@ -749,7 +814,8 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "documentation":"

    Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.

    " + "documentation":"

    Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.

    Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

    Rules

    You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:

    • Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.

    • Status whether the rule is in effect.

    • One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.

    For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

    Permissions

    By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

    You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

    • s3:DeleteObject

    • s3:DeleteObjectVersion

    • s3:PutLifecycleConfiguration

    For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

    The following are related to PutBucketLifecycleConfiguration:

    ", + "httpChecksumRequired":true }, "PutBucketLogging":{ "name":"PutBucketLogging", @@ -759,7 +825,8 @@ }, "input":{"shape":"PutBucketLoggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html", - "documentation":"

    Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.

    " + "documentation":"

    Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

    The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request.

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

    <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

    For more information about server access logging, see Server Access Logging.

    For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

    The following operations are related to PutBucketLogging:

    ", + "httpChecksumRequired":true }, "PutBucketMetricsConfiguration":{ "name":"PutBucketMetricsConfiguration", @@ -768,7 +835,7 @@ "requestUri":"/{Bucket}?metrics" }, "input":{"shape":"PutBucketMetricsConfigurationRequest"}, - "documentation":"

    Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.

    " + "documentation":"

    Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

    To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to PutBucketMetricsConfiguration:

    GetBucketLifecycle has the following special error:

    • Error code: TooManyConfigurations

      • Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

      • HTTP Status Code: HTTP 400 Bad Request

    " }, "PutBucketNotification":{ "name":"PutBucketNotification", @@ -778,8 +845,9 @@ }, "input":{"shape":"PutBucketNotificationRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTnotification.html", - "documentation":"

    No longer used, see the PutBucketNotificationConfiguration operation.

    ", - "deprecated":true + "documentation":"

    No longer used, see the PutBucketNotificationConfiguration operation.

    ", + "deprecated":true, + "httpChecksumRequired":true }, "PutBucketNotificationConfiguration":{ "name":"PutBucketNotificationConfiguration", @@ -788,7 +856,17 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationConfigurationRequest"}, - "documentation":"

    Enables notifications of specified events for a bucket.

    " + "documentation":"

    Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

    Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

    By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

    <NotificationConfiguration>

    </NotificationConfiguration>

    This operation replaces the existing notification configuration with the configuration you include in the request body.

    After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

    You can disable notifications by adding the empty NotificationConfiguration element.

    By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

    The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 will not add the configuration to your bucket.

    Responses

    If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

    The following operation is related to PutBucketNotificationConfiguration:

    " + }, + "PutBucketOwnershipControls":{ + "name":"PutBucketOwnershipControls", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?ownershipControls" + }, + "input":{"shape":"PutBucketOwnershipControlsRequest"}, + "documentation":"

    Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    For information about Amazon S3 Object Ownership, see Using Object Ownership.

    The following operations are related to PutBucketOwnershipControls:

    ", + "httpChecksumRequired":true }, "PutBucketPolicy":{ "name":"PutBucketPolicy", @@ -798,7 +876,8 @@ }, "input":{"shape":"PutBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html", - "documentation":"

    Applies an Amazon S3 bucket policy to an Amazon S3 bucket.

    " + "documentation":"

    Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

    If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and User Policies.

    The following operations are related to PutBucketPolicy:

    ", + "httpChecksumRequired":true }, "PutBucketReplication":{ "name":"PutBucketReplication", @@ -807,7 +886,8 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"PutBucketReplicationRequest"}, - "documentation":"

    Creates a replication configuration or replaces an existing one. For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

    " + "documentation":"

    Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

    To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

    Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

    A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.

    To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

    If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

    For information about enabling versioning on a bucket, see Using Versioning.

    By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

    Handling Replication of Encrypted Objects

    By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

    For information on PutBucketReplication errors, see List of replication-related error codes

    The following operations are related to PutBucketReplication:

    ", + "httpChecksumRequired":true }, "PutBucketRequestPayment":{ "name":"PutBucketRequestPayment", @@ -817,7 +897,8 @@ }, "input":{"shape":"PutBucketRequestPaymentRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html", - "documentation":"

    Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html

    " + "documentation":"

    Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.

    The following operations are related to PutBucketRequestPayment:

    ", + "httpChecksumRequired":true }, "PutBucketTagging":{ "name":"PutBucketTagging", @@ -827,7 +908,8 @@ }, "input":{"shape":"PutBucketTaggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", - "documentation":"

    Sets the tags for a bucket.

    " + "documentation":"

    Sets the tags for a bucket.

    Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

    Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

    To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    PutBucketTagging has the following special errors:

    • Error code: InvalidTagError

    • Error code: MalformedXMLError

      • Description: The XML provided does not match the schema.

    • Error code: OperationAbortedError

      • Description: A conflicting conditional operation is currently in progress against this resource. Please try again.

    • Error code: InternalError

      • Description: The service was unable to apply the provided tag to the bucket.

    The following operations are related to PutBucketTagging:

    ", + "httpChecksumRequired":true }, "PutBucketVersioning":{ "name":"PutBucketVersioning", @@ -837,7 +919,8 @@ }, "input":{"shape":"PutBucketVersioningRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html", - "documentation":"

    Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.

    " + "documentation":"

    Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.

    You can set the versioning state with one of the following values:

    Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.

    Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.

    If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.

    If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket.

    If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.

    Related Resources

    ", + "httpChecksumRequired":true }, "PutBucketWebsite":{ "name":"PutBucketWebsite", @@ -847,7 +930,8 @@ }, "input":{"shape":"PutBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html", - "documentation":"

    Set the website configuration for a bucket.

    " + "documentation":"

    Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

    This PUT operation requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

    To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

    • WebsiteConfiguration

    • RedirectAllRequestsTo

    • HostName

    • Protocol

    If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

    • WebsiteConfiguration

    • IndexDocument

    • Suffix

    • ErrorDocument

    • Key

    • RoutingRules

    • RoutingRule

    • Condition

    • HttpErrorCodeReturnedEquals

    • KeyPrefixEquals

    • Redirect

    • Protocol

    • HostName

    • ReplaceKeyPrefixWith

    • ReplaceKeyWith

    • HttpRedirectCode

    Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service Developer Guide.

    ", + "httpChecksumRequired":true }, "PutObject":{ "name":"PutObject", @@ -858,7 +942,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

    Adds an object to a bucket.

    " + "documentation":"

    Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

    Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

    Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

    To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.

    Server-side Encryption

    You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.

    If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

    Access Control List (ACL)-Specific Request Headers

    You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

    Storage Class Options

    By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

    Versioning

    If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

    For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

    Related Resources

    " }, "PutObjectAcl":{ "name":"PutObjectAcl", @@ -872,7 +956,8 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "documentation":"

    uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket

    " + "documentation":"

    Uses the acl subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon Simple Storage Service Developer Guide.

    This action is not supported by Amazon S3 on Outposts.

    Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.

    Access Permissions

    You can set access permissions using one of the following methods:

    • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      • emailAddress – if the value specified is the email address of an AWS account

        Using email addresses to specify a grantee is only supported in the following AWS Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

      For example, the following x-amz-grant-read header grants list objects permission to the two AWS accounts identified by their email addresses.

      x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request.

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    Versioning

    The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

    Related Resources

    ", + "httpChecksumRequired":true }, "PutObjectLegalHold":{ "name":"PutObjectLegalHold", @@ -882,7 +967,8 @@ }, "input":{"shape":"PutObjectLegalHoldRequest"}, "output":{"shape":"PutObjectLegalHoldOutput"}, - "documentation":"

    Applies a Legal Hold configuration to the specified object.

    " + "documentation":"

    Applies a Legal Hold configuration to the specified object.

    This action is not supported by Amazon S3 on Outposts.

    Related Resources

    ", + "httpChecksumRequired":true }, "PutObjectLockConfiguration":{ "name":"PutObjectLockConfiguration", @@ -892,7 +978,8 @@ }, "input":{"shape":"PutObjectLockConfigurationRequest"}, "output":{"shape":"PutObjectLockConfigurationOutput"}, - "documentation":"

    Places an object lock configuration on the specified bucket. The rule specified in the object lock configuration will be applied by default to every new object placed in the specified bucket.

    " + "documentation":"

    Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.

    DefaultRetention requires either Days or Years. You can't specify both at the same time.

    Related Resources

    ", + "httpChecksumRequired":true }, "PutObjectRetention":{ "name":"PutObjectRetention", @@ -902,7 +989,8 @@ }, "input":{"shape":"PutObjectRetentionRequest"}, "output":{"shape":"PutObjectRetentionOutput"}, - "documentation":"

    Places an Object Retention configuration on an object.

    " + "documentation":"

    Places an Object Retention configuration on an object.

    This action is not supported by Amazon S3 on Outposts.

    Related Resources

    ", + "httpChecksumRequired":true }, "PutObjectTagging":{ "name":"PutObjectTagging", @@ -912,7 +1000,8 @@ }, "input":{"shape":"PutObjectTaggingRequest"}, "output":{"shape":"PutObjectTaggingOutput"}, - "documentation":"

    Sets the supplied tag-set to an object that already exists in a bucket

    " + "documentation":"

    Sets the supplied tag-set to an object that already exists in a bucket.

    A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

    For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

    To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

    To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

    For information about the Amazon S3 object tagging feature, see Object Tagging.

    Special Errors

      • Code: InvalidTagError

      • Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.

      • Code: MalformedXMLError

      • Cause: The XML provided does not match the schema.

      • Code: OperationAbortedError

      • Cause: A conflicting conditional operation is currently in progress against this resource. Please try again.

      • Code: InternalError

      • Cause: The service was unable to apply the provided tag to the object.

    Related Resources

    ", + "httpChecksumRequired":true }, "PutPublicAccessBlock":{ "name":"PutPublicAccessBlock", @@ -921,7 +1010,8 @@ "requestUri":"/{Bucket}?publicAccessBlock" }, "input":{"shape":"PutPublicAccessBlockRequest"}, - "documentation":"

    Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket.

    " + "documentation":"

    Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

    For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

    Related Resources

    ", + "httpChecksumRequired":true }, "RestoreObject":{ "name":"RestoreObject", @@ -935,7 +1025,7 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation":"

    Restores an archived copy of an object back into Amazon S3

    ", + "documentation":"

    Restores an archived copy of an object back into Amazon S3

    This action is not supported by Amazon S3 on Outposts.

    This action performs the following types of requests:

    • select - Perform a select query on an archived object

    • restore an archive - Restore an archived object

    To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    Querying Archives with Select Requests

    You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

    When making a select request, do the following:

    • Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

      For more information about the S3 structure in the request body, see the following:

    • Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

      • The following expression returns all records from the specified object.

        SELECT * FROM Object

      • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

        SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

      • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

        SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

    For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

    When making a select request, you can also do the following:

    • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

    • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

    The following are additional important facts about the select feature:

    • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.

    • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.

    • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

    Restoring objects

    Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.

    To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

    When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

    • Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

    • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.

    • Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.

    For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

    You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide.

    To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

    After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

    If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

    Responses

    A successful operation returns either the 200 OK or 202 Accepted status code.

    • If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response.

    • If the object is previously restored, Amazon S3 returns 200 OK in the response.

    Special Errors

      • Code: RestoreAlreadyInProgress

      • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: GlacierExpeditedRetrievalNotAvailable

      • Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

      • HTTP Status Code: 503

      • SOAP Fault Code Prefix: N/A

    Related Resources

    ", "alias":"PostObjectRestore" }, "SelectObjectContent":{ @@ -950,7 +1040,7 @@ "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "output":{"shape":"SelectObjectContentOutput"}, - "documentation":"

    This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

    " + "documentation":"

    This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

    This action is not supported by Amazon S3 on Outposts.

    For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.

    For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

    Permissions

    You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.

    Object Data Formats

    You can use Amazon S3 Select to query objects that have the following format properties:

    • CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.

    • UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.

    • GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.

    • Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.

      For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service Developer Guide.

      For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

    Working with the Response Body

    Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response .

    GetObject Support

    The SelectObjectContent operation does not support the following GetObject functionality. For more information, see GetObject.

    • Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.

    • GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service Developer Guide.

    Special Errors

    For a list of special errors for this operation, see List of SELECT Object Content Error Codes

    Related Resources

    " }, "UploadPart":{ "name":"UploadPart", @@ -961,7 +1051,7 @@ "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", - "documentation":"

    Uploads a part in a multipart upload.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    " + "documentation":"

    Uploads a part in a multipart upload.

    In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

    You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.

    Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.

    To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.

    If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service Developer Guide .

    For information on the permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.

    You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

    Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

    If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    Special Errors

      • Code: NoSuchUpload

      • Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

    Related Resources

    " }, "UploadPartCopy":{ "name":"UploadPartCopy", @@ -972,7 +1062,7 @@ "input":{"shape":"UploadPartCopyRequest"}, "output":{"shape":"UploadPartCopyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html", - "documentation":"

    Uploads a part by copying data from an existing object as data source.

    " + "documentation":"

    Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source in your request and a byte range by adding the request header x-amz-copy-source-range in your request.

    The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service Developer Guide.

    Instead of using an existing object as part data, you might use the UploadPart operation and provide data in your request.

    You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.

    For more information about using the UploadPartCopy operation, see the following:

    • For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon Simple Storage Service Developer Guide.

    • For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.

    • For information about copying objects using a single atomic operation vs. the multipart upload, see Operations on Objects in the Amazon Simple Storage Service Developer Guide.

    • For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.

    Note the following additional considerations about the request headers x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and x-amz-copy-source-if-modified-since:

    • Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request as follows:

      x-amz-copy-source-if-match condition evaluates to true, and;

      x-amz-copy-source-if-unmodified-since condition evaluates to false;

      Amazon S3 returns 200 OK and copies the data.

    • Consideration 2 - If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request as follows:

      x-amz-copy-source-if-none-match condition evaluates to false, and;

      x-amz-copy-source-if-modified-since condition evaluates to true;

      Amazon S3 returns 412 Precondition Failed response code.

    Versioning

    If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source.

    You can optionally specify a specific version of the source object to copy by adding the versionId subresource as shown in the following example:

    x-amz-copy-source: /bucket/object?versionId=version id

    Special Errors

      • Code: NoSuchUpload

      • Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      • HTTP Status Code: 404 Not Found

      • Code: InvalidRequest

      • Cause: The specified copy source is not supported as a byte-range copy source.

      • HTTP Status Code: 400 Bad Request

    Related Resources

    " } }, "shapes":{ @@ -1007,7 +1097,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to which the multipart upload was initiated.

    ", + "documentation":"

    The bucket name to which the upload was taking place.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -1027,6 +1117,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -1124,7 +1220,7 @@ "documentation":"

    Contains data related to access patterns to be collected and made available to analyze the tradeoffs between different storage classes.

    " } }, - "documentation":"

    Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.

    For more information, see GET Bucket analytics in the Amazon Simple Storage Service API Reference.

    " + "documentation":"

    Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.

    " }, "AnalyticsConfigurationList":{ "type":"list", @@ -1158,7 +1254,7 @@ "documentation":"

    A conjunction (logical AND) of predicates, which is used in evaluating an analytics filter. The operator must have at least two predicates.

    " } }, - "documentation":"

    " + "documentation":"

    The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.

    " }, "AnalyticsId":{"type":"string"}, "AnalyticsS3BucketDestination":{ @@ -1174,7 +1270,7 @@ }, "BucketAccountId":{ "shape":"AccountId", - "documentation":"

    The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data.

    " + "documentation":"

    The account ID that owns the destination S3 bucket. If no account ID is provided, the owner is not validated before exporting data.

    Although this value is optional, we strongly recommend that you set it to help prevent problems if the destination bucket ownership changes.

    " }, "Bucket":{ "shape":"BucketName", @@ -1185,12 +1281,19 @@ "documentation":"

    The prefix to use when exporting data. The prefix is prepended to all results.

    " } }, - "documentation":"

    " + "documentation":"

    Contains information about where to publish the analytics results.

    " }, "AnalyticsS3ExportFileFormat":{ "type":"string", "enum":["CSV"] }, + "ArchiveStatus":{ + "type":"string", + "enum":[ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS" + ] + }, "Body":{"type":"blob"}, "Bucket":{ "type":"structure", @@ -1201,10 +1304,10 @@ }, "CreationDate":{ "shape":"CreationDate", - "documentation":"

    Date the bucket was created.

    " + "documentation":"

    Date the bucket was created. This date can change when making changes to your bucket, such as editing its bucket policy.

    " } }, - "documentation":"

    " + "documentation":"

    In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally unique, and the namespace is shared by all AWS accounts.

    " }, "BucketAccelerateStatus":{ "type":"string", @@ -1217,14 +1320,14 @@ "type":"structure", "members":{ }, - "documentation":"

    The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.

    ", + "documentation":"

    The requested bucket name is not available. The bucket namespace is shared by all users of the system. Select a different name and try again.

    ", "exception":true }, "BucketAlreadyOwnedByYou":{ "type":"structure", "members":{ }, - "documentation":"

    ", + "documentation":"

    The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).

    ", "exception":true }, "BucketCannedACL":{ @@ -1236,6 +1339,7 @@ "authenticated-read" ] }, + "BucketKeyEnabled":{"type":"boolean"}, "BucketLifecycleConfiguration":{ "type":"structure", "required":["Rules"], @@ -1251,28 +1355,39 @@ "BucketLocationConstraint":{ "type":"string", "enum":[ - "EU", - "eu-west-1", - "us-west-1", - "us-west-2", + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", - "ap-northeast-1", - "sa-east-1", + "ca-central-1", "cn-north-1", - "eu-central-1" + "cn-northwest-1", + "EU", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2" ] }, "BucketLoggingStatus":{ "type":"structure", "members":{ - "LoggingEnabled":{ - "shape":"LoggingEnabled", - "documentation":"

    " - } + "LoggingEnabled":{"shape":"LoggingEnabled"} }, - "documentation":"

    " + "documentation":"

    Container for logging status information.

    " }, "BucketLogsPermission":{ "type":"string", @@ -1307,7 +1422,7 @@ "members":{ "CORSRules":{ "shape":"CORSRules", - "documentation":"

    A set of allowed origins and methods.

    ", + "documentation":"

    A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the configuration.

    ", "locationName":"CORSRule" } }, @@ -1357,60 +1472,60 @@ "members":{ "FileHeaderInfo":{ "shape":"FileHeaderInfo", - "documentation":"

    Describes the first line of input. Valid values: None, Ignore, Use.

    " + "documentation":"

    Describes the first line of input. Valid values are:

    • NONE: First line is not a header.

    • IGNORE: First line is a header, but you can't use the header values to indicate the column in an expression. You can use column position (such as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s).

    • Use: First line is a header, and you can use the header value to identify a column in an expression (SELECT \"name\" FROM OBJECT).

    " }, "Comments":{ "shape":"Comments", - "documentation":"

    The single character used to indicate a row should be ignored when present at the start of a row.

    " + "documentation":"

    A single character used to indicate that a row should be ignored when the character is present at the start of that row. You can specify any character to indicate a comment line.

    " }, "QuoteEscapeCharacter":{ "shape":"QuoteEscapeCharacter", - "documentation":"

    The single character used for escaping the quote character inside an already escaped value.

    " + "documentation":"

    A single character used for escaping the quotation mark character inside an already escaped value. For example, the value \"\"\" a , b \"\"\" is parsed as \" a , b \".

    " }, "RecordDelimiter":{ "shape":"RecordDelimiter", - "documentation":"

    The value used to separate individual records.

    " + "documentation":"

    A single character used to separate individual records in the input. Instead of the default value, you can specify an arbitrary delimiter.

    " }, "FieldDelimiter":{ "shape":"FieldDelimiter", - "documentation":"

    The value used to separate individual fields in a record.

    " + "documentation":"

    A single character used to separate individual fields in a record. You can specify an arbitrary delimiter.

    " }, "QuoteCharacter":{ "shape":"QuoteCharacter", - "documentation":"

    Value used for escaping where the field delimiter is part of the value.

    " + "documentation":"

    A single character used for escaping when the field delimiter is part of the value. For example, if the value is a, b, Amazon S3 wraps this field value in quotation marks, as follows: \" a , b \".

    Type: String

    Default: \"

    Ancestors: CSV

    " }, "AllowQuotedRecordDelimiter":{ "shape":"AllowQuotedRecordDelimiter", "documentation":"

    Specifies that CSV field values may contain quoted record delimiters and such records should be allowed. Default value is FALSE. Setting this value to TRUE may lower performance.

    " } }, - "documentation":"

    Describes how a CSV-formatted input object is formatted.

    " + "documentation":"

    Describes how an uncompressed comma-separated values (CSV)-formatted input object is formatted.

    " }, "CSVOutput":{ "type":"structure", "members":{ "QuoteFields":{ "shape":"QuoteFields", - "documentation":"

    Indicates whether or not all output fields should be quoted.

    " + "documentation":"

    Indicates whether to use quotation marks around output fields.

    • ALWAYS: Always use quotation marks for output fields.

    • ASNEEDED: Use quotation marks for output fields when needed.

    " }, "QuoteEscapeCharacter":{ "shape":"QuoteEscapeCharacter", - "documentation":"

    Th single character used for escaping the quote character inside an already escaped value.

    " + "documentation":"

    The single character used for escaping the quote character inside an already escaped value.

    " }, "RecordDelimiter":{ "shape":"RecordDelimiter", - "documentation":"

    The value used to separate individual records.

    " + "documentation":"

    A single character used to separate individual records in the output. Instead of the default value, you can specify an arbitrary delimiter.

    " }, "FieldDelimiter":{ "shape":"FieldDelimiter", - "documentation":"

    The value used to separate individual fields in a record.

    " + "documentation":"

    The value used to separate individual fields in a record. You can specify an arbitrary delimiter.

    " }, "QuoteCharacter":{ "shape":"QuoteCharacter", - "documentation":"

    The value used for escaping where the field delimiter is part of the value.

    " + "documentation":"

    A single character used for escaping when the field delimiter is part of the value. For example, if the value is a, b, Amazon S3 wraps this field value in quotation marks, as follows: \" a , b \".

    " } }, - "documentation":"

    Describes how CSV-formatted results are formatted.

    " + "documentation":"

    Describes how uncompressed comma-separated values (CSV)-formatted results are formatted.

    " }, "CacheControl":{"type":"string"}, "CloudFunction":{"type":"string"}, @@ -1424,19 +1539,19 @@ }, "Events":{ "shape":"EventList", - "documentation":"

    ", + "documentation":"

    Bucket events for which to send notifications.

    ", "locationName":"Event" }, "CloudFunction":{ "shape":"CloudFunction", - "documentation":"

    " + "documentation":"

    Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type.

    " }, "InvocationRole":{ "shape":"CloudFunctionInvocationRole", - "documentation":"

    " + "documentation":"

    The role supporting the invocation of the Lambda function

    " } }, - "documentation":"

    " + "documentation":"

    Container for specifying the AWS Lambda notification configuration.

    " }, "CloudFunctionInvocationRole":{"type":"string"}, "Code":{"type":"string"}, @@ -1446,10 +1561,10 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"

    " + "documentation":"

    Container for the specified common prefix.

    " } }, - "documentation":"

    " + "documentation":"

    Container for all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter. CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix. For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/.

    " }, "CommonPrefixList":{ "type":"list", @@ -1461,15 +1576,15 @@ "members":{ "Location":{ "shape":"Location", - "documentation":"

    " + "documentation":"

    The URI that identifies the newly created object.

    " }, "Bucket":{ "shape":"BucketName", - "documentation":"

    " + "documentation":"

    The name of the bucket that contains the newly created object.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    " }, "Key":{ "shape":"ObjectKey", - "documentation":"

    " + "documentation":"

    The object key of the newly created object.

    " }, "Expiration":{ "shape":"Expiration", @@ -1479,26 +1594,32 @@ }, "ETag":{ "shape":"ETag", - "documentation":"

    Entity tag of the object.

    " + "documentation":"

    Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits.

    " }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    If you specified server-side encryption either with an Amazon S3-managed encryption key or an AWS KMS customer master key (CMK) in your initiate multipart upload request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"

    Version of the object.

    ", + "documentation":"

    Version ID of the newly created object, in case the bucket has versioning turned on.

    ", "location":"header", "locationName":"x-amz-version-id" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -1516,25 +1637,25 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    Name of the bucket to which the multipart upload was initiated.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Object key for which the multipart upload was initiated.

    ", "location":"uri", "locationName":"Key" }, "MultipartUpload":{ "shape":"CompletedMultipartUpload", - "documentation":"

    ", + "documentation":"

    The container for the multipart upload request information.

    ", "locationName":"CompleteMultipartUpload", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "UploadId":{ "shape":"MultipartUploadId", - "documentation":"

    ", + "documentation":"

    ID for the initiated multipart upload.

    ", "location":"querystring", "locationName":"uploadId" }, @@ -1542,6 +1663,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"MultipartUpload" @@ -1551,11 +1678,11 @@ "members":{ "Parts":{ "shape":"CompletedPartList", - "documentation":"

    ", + "documentation":"

    Array of CompletedPart data types.

    ", "locationName":"Part" } }, - "documentation":"

    " + "documentation":"

    The container for the completed multipart upload details.

    " }, "CompletedPart":{ "type":"structure", @@ -1569,7 +1696,7 @@ "documentation":"

    Part number that identifies the part. This is a positive integer between 1 and 10,000.

    " } }, - "documentation":"

    " + "documentation":"

    Details of the parts that were uploaded.

    " }, "CompletedPartList":{ "type":"list", @@ -1593,10 +1720,10 @@ }, "KeyPrefixEquals":{ "shape":"KeyPrefixEquals", - "documentation":"

    The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.

    " + "documentation":"

    The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.

    " } }, - "documentation":"

    Specifies a condition that must be met for a redirect to apply.

    " + "documentation":"

    A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.

    " }, "ConfirmRemoveSelfBucketAccess":{"type":"boolean"}, "ContentDisposition":{"type":"string"}, @@ -1618,7 +1745,7 @@ "members":{ "CopyObjectResult":{ "shape":"CopyObjectResult", - "documentation":"

    " + "documentation":"

    Container for all response elements.

    " }, "Expiration":{ "shape":"Expiration", @@ -1628,7 +1755,7 @@ }, "CopySourceVersionId":{ "shape":"CopySourceVersionId", - "documentation":"

    ", + "documentation":"

    Version of the copied object in the destination bucket.

    ", "location":"header", "locationName":"x-amz-copy-source-version-id" }, @@ -1640,7 +1767,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -1652,13 +1779,13 @@ }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

    ", + "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -1668,6 +1795,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -1686,13 +1819,13 @@ "members":{ "ACL":{ "shape":"ObjectCannedACL", - "documentation":"

    The canned ACL to apply to the object.

    ", + "documentation":"

    The canned ACL to apply to the object.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-acl" }, "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the destination bucket.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -1728,7 +1861,7 @@ }, "CopySource":{ "shape":"CopySource", - "documentation":"

    The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.

    ", + "documentation":"

    Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:

    • For objects not accessed through an access point, specify the name of the source bucket and the key of the source object, separated by a slash (/). For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value must be URL encoded.

    • For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. The value must be URL encoded.

      Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same AWS Region.

      Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. The value must be URL encoded.

    To copy a specific version of an object, append ?versionId=<version-id> to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.

    ", "location":"header", "locationName":"x-amz-copy-source" }, @@ -1764,31 +1897,31 @@ }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"

    Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

    ", + "documentation":"

    Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"

    Allows grantee to read the object data and its metadata.

    ", + "documentation":"

    Allows grantee to read the object data and its metadata.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"

    Allows grantee to read the object ACL.

    ", + "documentation":"

    Allows grantee to read the object ACL.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"

    Allows grantee to write the ACL for the applicable object.

    ", + "documentation":"

    Allows grantee to write the ACL for the applicable object.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-write-acp" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    The key of the destination object.

    ", "location":"uri", "locationName":"Key" }, @@ -1812,13 +1945,13 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

    The type of storage to use for the object. Defaults to 'STANDARD'.

    ", + "documentation":"

    By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

    ", "location":"header", "locationName":"x-amz-storage-class" }, @@ -1830,25 +1963,25 @@ }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use to when encrypting the object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use to when encrypting the object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

    ", + "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

    ", + "documentation":"

    Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -1858,9 +1991,15 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with a COPY operation doesn’t affect bucket-level settings for S3 Bucket Key.

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "CopySourceSSECustomerAlgorithm":{ "shape":"CopySourceSSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use when decrypting the source object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use when decrypting the source object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-algorithm" }, @@ -1872,7 +2011,7 @@ }, "CopySourceSSECustomerKeyMD5":{ "shape":"CopySourceSSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-key-MD5" }, @@ -1883,19 +2022,19 @@ }, "Tagging":{ "shape":"TaggingHeader", - "documentation":"

    The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters

    ", + "documentation":"

    The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters.

    ", "location":"header", "locationName":"x-amz-tagging" }, "ObjectLockMode":{ "shape":"ObjectLockMode", - "documentation":"

    The object lock mode that you want to apply to the copied object.

    ", + "documentation":"

    The Object Lock mode that you want to apply to the copied object.

    ", "location":"header", "locationName":"x-amz-object-lock-mode" }, "ObjectLockRetainUntilDate":{ "shape":"ObjectLockRetainUntilDate", - "documentation":"

    The date and time when you want the copied object's object lock to expire.

    ", + "documentation":"

    The date and time when you want the copied object's Object Lock to expire.

    ", "location":"header", "locationName":"x-amz-object-lock-retain-until-date" }, @@ -1904,6 +2043,18 @@ "documentation":"

    Specifies whether you want to apply a Legal Hold to the copied object.

    ", "location":"header", "locationName":"x-amz-object-lock-legal-hold" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + }, + "ExpectedSourceBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-source-expected-bucket-owner" } } }, @@ -1912,14 +2063,14 @@ "members":{ "ETag":{ "shape":"ETag", - "documentation":"

    " + "documentation":"

    Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. The source and destination ETag is identical for a successfully copied object.

    " }, "LastModified":{ "shape":"LastModified", - "documentation":"

    " + "documentation":"

    Returns the date that the object was last modified.

    " } }, - "documentation":"

    " + "documentation":"

    Container for all response elements.

    " }, "CopyPartResult":{ "type":"structure", @@ -1933,7 +2084,7 @@ "documentation":"

    Date and time at which the object was uploaded.

    " } }, - "documentation":"

    " + "documentation":"

    Container for all response elements.

    " }, "CopySource":{ "type":"string", @@ -1956,17 +2107,17 @@ "members":{ "LocationConstraint":{ "shape":"BucketLocationConstraint", - "documentation":"

    Specifies the region where the bucket will be created. If you don't specify a region, the bucket is created in US East (N. Virginia) Region (us-east-1).

    " + "documentation":"

    Specifies the Region where the bucket will be created. If you don't specify a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1).

    " } }, - "documentation":"

    " + "documentation":"

    The configuration information for the bucket.

    " }, "CreateBucketOutput":{ "type":"structure", "members":{ "Location":{ "shape":"Location", - "documentation":"

    ", + "documentation":"

    Specifies the Region where the bucket will be created. If you are creating a bucket on the US East (N. Virginia) Region (us-east-1), you do not need to specify the location.

    ", "location":"header", "locationName":"Location" } @@ -1984,13 +2135,13 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket to create.

    ", "location":"uri", "locationName":"Bucket" }, "CreateBucketConfiguration":{ "shape":"CreateBucketConfiguration", - "documentation":"

    ", + "documentation":"

    The configuration information for the bucket.

    ", "locationName":"CreateBucketConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, @@ -2026,7 +2177,7 @@ }, "ObjectLockEnabledForBucket":{ "shape":"ObjectLockEnabledForBucket", - "documentation":"

    Specifies whether you want Amazon S3 object lock to be enabled for the new bucket.

    ", + "documentation":"

    Specifies whether you want S3 Object Lock to be enabled for the new bucket.

    ", "location":"header", "locationName":"x-amz-bucket-object-lock-enabled" } @@ -2038,19 +2189,19 @@ "members":{ "AbortDate":{ "shape":"AbortDate", - "documentation":"

    Date when multipart upload will become eligible for abort operation by lifecycle.

    ", + "documentation":"

    If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, the response includes this header. The header indicates when the initiated multipart upload becomes eligible for an abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

    The response also includes the x-amz-abort-rule-id header that provides the ID of the lifecycle configuration rule that defines this action.

    ", "location":"header", "locationName":"x-amz-abort-date" }, "AbortRuleId":{ "shape":"AbortRuleId", - "documentation":"

    Id of the lifecycle rule that makes a multipart upload eligible for abort operation.

    ", + "documentation":"

    This header is returned along with the x-amz-abort-date header. It identifies the applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads.

    ", "location":"header", "locationName":"x-amz-abort-rule-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to which the multipart upload was initiated.

    ", + "documentation":"

    The name of the bucket to which the multipart upload was initiated.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "locationName":"Bucket" }, "Key":{ @@ -2063,7 +2214,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2075,13 +2226,13 @@ }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

    ", + "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2091,6 +2242,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -2107,13 +2264,13 @@ "members":{ "ACL":{ "shape":"ObjectCannedACL", - "documentation":"

    The canned ACL to apply to the object.

    ", + "documentation":"

    The canned ACL to apply to the object.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-acl" }, "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket to which to initiate the upload

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -2155,31 +2312,31 @@ }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"

    Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

    ", + "documentation":"

    Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"

    Allows grantee to read the object data and its metadata.

    ", + "documentation":"

    Allows grantee to read the object data and its metadata.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"

    Allows grantee to read the object ACL.

    ", + "documentation":"

    Allows grantee to read the object ACL.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"

    Allows grantee to write the ACL for the applicable object.

    ", + "documentation":"

    Allows grantee to write the ACL for the applicable object.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-write-acp" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Object key for which the multipart upload is to be initiated.

    ", "location":"uri", "locationName":"Key" }, @@ -2191,13 +2348,13 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

    The type of storage to use for the object. Defaults to 'STANDARD'.

    ", + "documentation":"

    By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

    ", "location":"header", "locationName":"x-amz-storage-class" }, @@ -2209,25 +2366,25 @@ }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use to when encrypting the object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use to when encrypting the object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

    ", + "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

    ", + "documentation":"

    Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2237,6 +2394,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with an object operation doesn’t affect bucket-level settings for S3 Bucket Key.

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestPayer":{ "shape":"RequestPayer", "location":"header", @@ -2244,19 +2407,19 @@ }, "Tagging":{ "shape":"TaggingHeader", - "documentation":"

    The tag-set for the object. The tag-set must be encoded as URL Query parameters

    ", + "documentation":"

    The tag-set for the object. The tag-set must be encoded as URL Query parameters.

    ", "location":"header", "locationName":"x-amz-tagging" }, "ObjectLockMode":{ "shape":"ObjectLockMode", - "documentation":"

    Specifies the object lock mode that you want to apply to the uploaded object.

    ", + "documentation":"

    Specifies the Object Lock mode that you want to apply to the uploaded object.

    ", "location":"header", "locationName":"x-amz-object-lock-mode" }, "ObjectLockRetainUntilDate":{ "shape":"ObjectLockRetainUntilDate", - "documentation":"

    Specifies the date and time when you want the object lock to expire.

    ", + "documentation":"

    Specifies the date and time when you want the Object Lock to expire.

    ", "location":"header", "locationName":"x-amz-object-lock-retain-until-date" }, @@ -2265,6 +2428,12 @@ "documentation":"

    Specifies whether you want to apply a Legal Hold to the uploaded object.

    ", "location":"header", "locationName":"x-amz-object-lock-legal-hold" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2280,7 +2449,7 @@ "members":{ "Mode":{ "shape":"ObjectLockRetentionMode", - "documentation":"

    The default object lock retention mode you want to apply to new objects placed in the specified bucket.

    " + "documentation":"

    The default Object Lock retention mode you want to apply to new objects placed in the specified bucket.

    " }, "Days":{ "shape":"Days", @@ -2291,7 +2460,7 @@ "documentation":"

    The number of years that you want to specify for the default retention period.

    " } }, - "documentation":"

    The container element for specifying the default object lock retention settings for new objects placed in the specified bucket.

    " + "documentation":"

    The container element for specifying the default Object Lock retention settings for new objects placed in the specified bucket.

    " }, "Delete":{ "type":"structure", @@ -2299,7 +2468,7 @@ "members":{ "Objects":{ "shape":"ObjectIdentifierList", - "documentation":"

    ", + "documentation":"

    The objects to delete.

    ", "locationName":"Object" }, "Quiet":{ @@ -2307,7 +2476,7 @@ "documentation":"

    Element to enable quiet mode for the request. When you add this element, you must set its value to true.

    " } }, - "documentation":"

    " + "documentation":"

    Container for the objects to delete.

    " }, "DeleteBucketAnalyticsConfigurationRequest":{ "type":"structure", @@ -2327,6 +2496,12 @@ "documentation":"

    The ID that identifies the analytics configuration.

    ", "location":"querystring", "locationName":"id" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2336,9 +2511,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    Specifies the bucket whose cors configuration is being deleted.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2351,6 +2532,33 @@ "documentation":"

    The name of the bucket containing the server-side encryption configuration to delete.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + } + } + }, + "DeleteBucketIntelligentTieringConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

    ", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"IntelligentTieringId", + "documentation":"

    The ID used to identify the S3 Intelligent-Tiering configuration.

    ", + "location":"querystring", + "locationName":"id" } } }, @@ -2372,6 +2580,12 @@ "documentation":"

    The ID used to identify the inventory configuration.

    ", "location":"querystring", "locationName":"id" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2381,9 +2595,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name of the lifecycle to delete.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2405,6 +2625,30 @@ "documentation":"

    The ID used to identify the metrics configuration.

    ", "location":"querystring", "locationName":"id" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + } + } + }, + "DeleteBucketOwnershipControlsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The Amazon S3 bucket whose OwnershipControls you want to delete.

    ", + "location":"uri", + "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2414,9 +2658,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2426,9 +2676,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The bucket name.

    It can take a while to propagate the deletion of a replication configuration to all Amazon S3 systems.

    ", + "documentation":"

    The bucket name.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2438,9 +2694,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    Specifies the bucket being deleted.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2450,9 +2712,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket that has the tag set to be removed.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2462,9 +2730,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name for which you want to remove the website configuration.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2474,7 +2748,7 @@ "members":{ "Owner":{ "shape":"Owner", - "documentation":"

    " + "documentation":"

    The account that created the delete marker.>

    " }, "Key":{ "shape":"ObjectKey", @@ -2493,17 +2767,17 @@ "documentation":"

    Date and time the object was last modified.

    " } }, - "documentation":"

    " + "documentation":"

    Information about the delete marker.

    " }, "DeleteMarkerReplication":{ "type":"structure", "members":{ "Status":{ "shape":"DeleteMarkerReplicationStatus", - "documentation":"

    The status of the delete marker replication.

    In the current implementation, Amazon S3 doesn't replicate the delete markers. The status must be Disabled.

    " + "documentation":"

    Indicates whether to replicate delete markers.

    Indicates whether to replicate delete markers.

    " } }, - "documentation":"

    Specifies whether Amazon S3 should replicate delete makers.

    " + "documentation":"

    Specifies whether Amazon S3 replicates delete markers. If you specify a Filter in your replication configuration, you must also include a DeleteMarkerReplication element. If your Filter includes a Tag element, the DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does not support replicating delete markers for tag-based rules. For an example configuration, see Basic Rule Configuration.

    For more information about delete marker replication, see Basic Rule Configuration.

    If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

    " }, "DeleteMarkerReplicationStatus":{ "type":"string", @@ -2549,19 +2823,19 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name of the bucket containing the object.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Key name of the object to delete.

    ", "location":"uri", "locationName":"Key" }, "MFA":{ "shape":"MFA", - "documentation":"

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

    ", + "documentation":"

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versioned object if versioning is configured with MFA delete enabled.

    ", "location":"header", "locationName":"x-amz-mfa" }, @@ -2578,9 +2852,15 @@ }, "BypassGovernanceRetention":{ "shape":"BypassGovernanceRetention", - "documentation":"

    Indicates whether Amazon S3 object lock should bypass governance-mode restrictions to process this operation.

    ", + "documentation":"

    Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation.

    ", "location":"header", "locationName":"x-amz-bypass-governance-retention" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2604,13 +2884,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name containing the objects from which to remove the tags.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Name of the object key.

    ", "location":"uri", "locationName":"Key" }, @@ -2619,6 +2899,12 @@ "documentation":"

    The versionId of the object that the tag-set will be removed from.

    ", "location":"querystring", "locationName":"versionId" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2627,7 +2913,7 @@ "members":{ "Deleted":{ "shape":"DeletedObjects", - "documentation":"

    " + "documentation":"

    Container element for a successful delete. It identifies the object that was successfully deleted.

    " }, "RequestCharged":{ "shape":"RequestCharged", @@ -2636,7 +2922,7 @@ }, "Errors":{ "shape":"Errors", - "documentation":"

    ", + "documentation":"

    Container for a failed delete operation that describes the object that Amazon S3 attempted to delete and the error it encountered.

    ", "locationName":"Error" } } @@ -2650,19 +2936,19 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name containing the objects to delete.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Delete":{ "shape":"Delete", - "documentation":"

    ", + "documentation":"

    Container for the request.

    ", "locationName":"Delete", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "MFA":{ "shape":"MFA", - "documentation":"

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

    ", + "documentation":"

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versioned object if versioning is configured with MFA delete enabled.

    ", "location":"header", "locationName":"x-amz-mfa" }, @@ -2673,9 +2959,15 @@ }, "BypassGovernanceRetention":{ "shape":"BypassGovernanceRetention", - "documentation":"

    Specifies whether you want to delete this object even if it has a Governance-type object lock in place. You must have sufficient permissions to perform this operation.

    ", + "documentation":"

    Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. You must have sufficient permissions to perform this operation.

    ", "location":"header", "locationName":"x-amz-bypass-governance-retention" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"Delete" @@ -2689,6 +2981,12 @@ "documentation":"

    The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2697,22 +2995,22 @@ "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"

    " + "documentation":"

    The name of the deleted object.

    " }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"

    " + "documentation":"

    The version ID of the deleted object.

    " }, "DeleteMarker":{ "shape":"DeleteMarker", - "documentation":"

    " + "documentation":"

    Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. In a simple DELETE, this header indicates whether (true) or not (false) a delete marker was created.

    " }, "DeleteMarkerVersionId":{ "shape":"DeleteMarkerVersionId", - "documentation":"

    " + "documentation":"

    The version ID of the delete marker created as a result of the DELETE operation. If you delete a specific object version, the value returned by this header is the version ID of the object version deleted.

    " } }, - "documentation":"

    " + "documentation":"

    Information about the deleted object.

    " }, "DeletedObjects":{ "type":"list", @@ -2727,15 +3025,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.

    A replication configuration can replicate objects to only one destination bucket. If there are multiple rules in your replication configuration, all rules must specify the same destination bucket.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store the results.

    " }, "Account":{ "shape":"AccountId", - "documentation":"

    Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to change replica ownership to the AWS account that owns the destination bucket by specifying the AccessControlTranslation property, this is the account ID of the destination bucket owner. For more information, see Cross-Region Replication Additional Configuration: Change Replica Owner in the Amazon Simple Storage Service Developer Guide.

    " + "documentation":"

    Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to change replica ownership to the AWS account that owns the destination bucket by specifying the AccessControlTranslation property, this is the account ID of the destination bucket owner. For more information, see Replication Additional Configuration: Changing the Replica Owner in the Amazon Simple Storage Service Developer Guide.

    " }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

    The storage class to use when replicating objects, such as standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.

    For valid values, see the StorageClass element of the PUT Bucket replication action in the Amazon Simple Storage Service API Reference.

    " + "documentation":"

    The storage class to use when replicating objects, such as S3 Standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.

    For valid values, see the StorageClass element of the PUT Bucket replication action in the Amazon Simple Storage Service API Reference.

    " }, "AccessControlTranslation":{ "shape":"AccessControlTranslation", @@ -2744,9 +3042,17 @@ "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", "documentation":"

    A container that provides information about encryption. If SourceSelectionCriteria is specified, you must specify this element.

    " + }, + "ReplicationTime":{ + "shape":"ReplicationTime", + "documentation":"

    A container specifying S3 Replication Time Control (S3 RTC), including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated. Must be specified together with a Metrics block.

    " + }, + "Metrics":{ + "shape":"Metrics", + "documentation":"

    A container specifying replication metrics-related settings enabling replication metrics and events.

    " } }, - "documentation":"

    Specifies information about where to publish analysis or configuration results for an Amazon S3 bucket.

    " + "documentation":"

    Specifies information about where to publish analysis or configuration results for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC).

    " }, "DisplayName":{"type":"string"}, "ETag":{"type":"string"}, @@ -2763,34 +3069,35 @@ "members":{ "EncryptionType":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing job results in Amazon S3 (e.g., AES256, aws:kms).

    " + "documentation":"

    The server-side encryption algorithm used when storing job results in Amazon S3 (for example, AES256, aws:kms).

    " }, "KMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results.

    " + "documentation":"

    If the encryption type is aws:kms, this optional value specifies the ID of the symmetric customer managed AWS KMS CMK to use for encryption of job results. Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    " }, "KMSContext":{ "shape":"KMSContext", - "documentation":"

    If the encryption type is aws:kms, this optional value can be used to specify the encryption context for the restore results.

    " + "documentation":"

    If the encryption type is aws:kms, this optional value can be used to specify the encryption context for the restore results.

    " } }, - "documentation":"

    Describes the server-side encryption that will be applied to the restore results.

    " + "documentation":"

    Contains the type of server-side encryption used.

    " }, "EncryptionConfiguration":{ "type":"structure", "members":{ "ReplicaKmsKeyID":{ "shape":"ReplicaKmsKeyID", - "documentation":"

    Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. Amazon S3 uses this key to encrypt replica objects.

    " + "documentation":"

    Specifies the ID (Key ARN or Alias ARN) of the customer managed customer master key (CMK) stored in AWS Key Management Service (KMS) for the destination bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only supports symmetric customer managed CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    " } }, "documentation":"

    Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects.

    " }, + "End":{"type":"long"}, "EndEvent":{ "type":"structure", "members":{ }, - "documentation":"

    ", + "documentation":"

    A message that indicates the request is complete and no more messages will be sent. You should not assume that the request is complete until the client receives an EndEvent.

    ", "event":true }, "Error":{ @@ -2798,22 +3105,22 @@ "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"

    " + "documentation":"

    The error key.

    " }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"

    " + "documentation":"

    The version ID of the error.

    " }, "Code":{ "shape":"Code", - "documentation":"

    " + "documentation":"

    The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.

    Amazon S3 error codes

      • Code: AccessDenied

      • Description: Access Denied

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: AccountProblem

      • Description: There is a problem with your AWS account that prevents the operation from completing successfully. Contact AWS Support for further assistance.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: AllAccessDisabled

      • Description: All access to this Amazon S3 resource has been disabled. Contact AWS Support for further assistance.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: AmbiguousGrantByEmailAddress

      • Description: The email address you provided is associated with more than one account.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: AuthorizationHeaderMalformed

      • Description: The authorization header you provided is invalid.

      • HTTP Status Code: 400 Bad Request

      • HTTP Status Code: N/A

      • Code: BadDigest

      • Description: The Content-MD5 you specified did not match what we received.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: BucketAlreadyExists

      • Description: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: BucketAlreadyOwnedByYou

      • Description: The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).

      • Code: 409 Conflict (in all Regions except the North Virginia Region)

      • SOAP Fault Code Prefix: Client

      • Code: BucketNotEmpty

      • Description: The bucket you tried to delete is not empty.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: CredentialsNotSupported

      • Description: This request does not support credentials.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: CrossLocationLoggingProhibited

      • Description: Cross-location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: EntityTooSmall

      • Description: Your proposed upload is smaller than the minimum allowed object size.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: EntityTooLarge

      • Description: Your proposed upload exceeds the maximum allowed object size.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: ExpiredToken

      • Description: The provided token has expired.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: IllegalVersioningConfigurationException

      • Description: Indicates that the versioning configuration specified in the request is invalid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: IncompleteBody

      • Description: You did not provide the number of bytes specified by the Content-Length HTTP header

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: IncorrectNumberOfFilesInPostRequest

      • Description: POST requires exactly one file upload per request.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InlineDataTooLarge

      • Description: Inline data exceeds the maximum allowed size.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InternalError

      • Description: We encountered an internal error. Please try again.

      • HTTP Status Code: 500 Internal Server Error

      • SOAP Fault Code Prefix: Server

      • Code: InvalidAccessKeyId

      • Description: The AWS access key ID you provided does not exist in our records.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: InvalidAddressingHeader

      • Description: You must specify the Anonymous role.

      • HTTP Status Code: N/A

      • SOAP Fault Code Prefix: Client

      • Code: InvalidArgument

      • Description: Invalid Argument

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidBucketName

      • Description: The specified bucket is not valid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidBucketState

      • Description: The request is not valid with the current state of the bucket.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: InvalidDigest

      • Description: The Content-MD5 you specified is not valid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidEncryptionAlgorithmError

      • Description: The encryption request you specified is not valid. The valid value is AES256.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidLocationConstraint

      • Description: The specified location constraint is not valid. For more information about Regions, see How to Select a Region for Your Buckets.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidObjectState

      • Description: The operation is not valid for the current state of the object.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: InvalidPart

      • Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidPartOrder

      • Description: The list of parts was not in ascending order. Parts list must be specified in order by part number.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidPayer

      • Description: All access to this object has been disabled. Please contact AWS Support for further assistance.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: InvalidPolicyDocument

      • Description: The content of the form does not meet the conditions specified in the policy document.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidRange

      • Description: The requested range cannot be satisfied.

      • HTTP Status Code: 416 Requested Range Not Satisfiable

      • SOAP Fault Code Prefix: Client

      • Code: InvalidRequest

      • Description: Please use AWS4-HMAC-SHA256.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: SOAP requests must be made over an HTTPS connection.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Acceleration is not supported for buckets with non-DNS compliant names.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Acceleration is not supported for buckets with periods (.) in their names.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style requests.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Accelerate is not configured on this bucket.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Accelerate is disabled on this bucket.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Acceleration is not supported on this bucket. Contact AWS Support for more information.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for more information.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidSecurity

      • Description: The provided security credentials are not valid.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: InvalidSOAPRequest

      • Description: The SOAP request body is invalid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidStorageClass

      • Description: The storage class you specified is not valid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidTargetBucketForLogging

      • Description: The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidToken

      • Description: The provided token is malformed or otherwise invalid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidURI

      • Description: Couldn't parse the specified URI.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: KeyTooLongError

      • Description: Your key is too long.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MalformedACLError

      • Description: The XML you provided was not well-formed or did not validate against our published schema.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MalformedPOSTRequest

      • Description: The body of your POST request is not well-formed multipart/form-data.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MalformedXML

      • Description: This happens when the user sends malformed XML (XML that doesn't conform to the published XSD) for the configuration. The error message is, \"The XML you provided was not well-formed or did not validate against our published schema.\"

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MaxMessageLengthExceeded

      • Description: Your request was too big.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MaxPostPreDataLengthExceededError

      • Description: Your POST request fields preceding the upload file were too large.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MetadataTooLarge

      • Description: Your metadata headers exceed the maximum allowed metadata size.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MethodNotAllowed

      • Description: The specified method is not allowed against this resource.

      • HTTP Status Code: 405 Method Not Allowed

      • SOAP Fault Code Prefix: Client

      • Code: MissingAttachment

      • Description: A SOAP attachment was expected, but none were found.

      • HTTP Status Code: N/A

      • SOAP Fault Code Prefix: Client

      • Code: MissingContentLength

      • Description: You must provide the Content-Length HTTP header.

      • HTTP Status Code: 411 Length Required

      • SOAP Fault Code Prefix: Client

      • Code: MissingRequestBodyError

      • Description: This happens when the user sends an empty XML document as a request. The error message is, \"Request body is empty.\"

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MissingSecurityElement

      • Description: The SOAP 1.1 request is missing a security element.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MissingSecurityHeader

      • Description: Your request is missing a required header.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: NoLoggingStatusForKey

      • Description: There is no such thing as a logging status subresource for a key.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchBucket

      • Description: The specified bucket does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchBucketPolicy

      • Description: The specified bucket does not have a bucket policy.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchKey

      • Description: The specified key does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchLifecycleConfiguration

      • Description: The lifecycle configuration does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchUpload

      • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchVersion

      • Description: Indicates that the version ID specified in the request does not match an existing version.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NotImplemented

      • Description: A header you provided implies functionality that is not implemented.

      • HTTP Status Code: 501 Not Implemented

      • SOAP Fault Code Prefix: Server

      • Code: NotSignedUp

      • Description: Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: OperationAborted

      • Description: A conflicting conditional operation is currently in progress against this resource. Try again.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: PermanentRedirect

      • Description: The bucket you are attempting to access must be addressed using the specified endpoint. Send all future requests to this endpoint.

      • HTTP Status Code: 301 Moved Permanently

      • SOAP Fault Code Prefix: Client

      • Code: PreconditionFailed

      • Description: At least one of the preconditions you specified did not hold.

      • HTTP Status Code: 412 Precondition Failed

      • SOAP Fault Code Prefix: Client

      • Code: Redirect

      • Description: Temporary redirect.

      • HTTP Status Code: 307 Moved Temporarily

      • SOAP Fault Code Prefix: Client

      • Code: RestoreAlreadyInProgress

      • Description: Object restore is already in progress.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: RequestIsNotMultiPartContent

      • Description: Bucket POST must be of the enclosure-type multipart/form-data.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: RequestTimeout

      • Description: Your socket connection to the server was not read from or written to within the timeout period.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: RequestTimeTooSkewed

      • Description: The difference between the request time and the server's time is too large.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: RequestTorrentOfBucketError

      • Description: Requesting the torrent file of a bucket is not permitted.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: SignatureDoesNotMatch

      • Description: The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method. For more information, see REST Authentication and SOAP Authentication for details.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: ServiceUnavailable

      • Description: Reduce your request rate.

      • HTTP Status Code: 503 Service Unavailable

      • SOAP Fault Code Prefix: Server

      • Code: SlowDown

      • Description: Reduce your request rate.

      • HTTP Status Code: 503 Slow Down

      • SOAP Fault Code Prefix: Server

      • Code: TemporaryRedirect

      • Description: You are being redirected to the bucket while DNS updates.

      • HTTP Status Code: 307 Moved Temporarily

      • SOAP Fault Code Prefix: Client

      • Code: TokenRefreshRequired

      • Description: The provided token must be refreshed.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: TooManyBuckets

      • Description: You have attempted to create more buckets than allowed.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: UnexpectedContent

      • Description: This request does not support content.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: UnresolvableGrantByEmailAddress

      • Description: The email address you provided does not match any account on record.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: UserKeyMustBeSpecified

      • Description: The bucket POST must contain the specified field name. If it is specified, check the order of the fields.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

    " }, "Message":{ "shape":"Message", - "documentation":"

    " + "documentation":"

    The error message contains a generic description of the error condition in English. It is intended for a human audience. Simple programs display the message directly to the end user if they encounter an error condition they don't know how or don't care to handle. Sophisticated programs with more exhaustive error handling and proper internationalization are more likely to ignore the error message.

    " } }, - "documentation":"

    " + "documentation":"

    Container for all error elements.

    " }, "ErrorDocument":{ "type":"structure", @@ -2824,7 +3131,7 @@ "documentation":"

    The object key name to use when a 4XX class error occurs.

    " } }, - "documentation":"

    " + "documentation":"

    The error information.

    " }, "Errors":{ "type":"list", @@ -2844,8 +3151,14 @@ "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:*", "s3:ObjectRestore:Post", - "s3:ObjectRestore:Completed" + "s3:ObjectRestore:Completed", + "s3:Replication:*", + "s3:Replication:OperationFailedReplication", + "s3:Replication:OperationNotTracked", + "s3:Replication:OperationMissedThreshold", + "s3:Replication:OperationReplicatedAfterThreshold" ] }, "EventList":{ @@ -2853,6 +3166,24 @@ "member":{"shape":"Event"}, "flattened":true }, + "ExistingObjectReplication":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"ExistingObjectReplicationStatus", + "documentation":"

    " + } + }, + "documentation":"

    Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.

    " + }, + "ExistingObjectReplicationStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "Expiration":{"type":"string"}, "ExpirationStatus":{ "type":"string", @@ -2901,7 +3232,7 @@ "FilterRuleList":{ "type":"list", "member":{"shape":"FilterRule"}, - "documentation":"

    A list of containers for the key value pair that defines the criteria for the filter rule.

    ", + "documentation":"

    A list of containers for the key-value pair that defines the criteria for the filter rule.

    ", "flattened":true }, "FilterRuleName":{ @@ -2927,9 +3258,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket for which the accelerate configuration is retrieved.

    ", + "documentation":"

    The name of the bucket for which the accelerate configuration is retrieved.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2938,7 +3275,7 @@ "members":{ "Owner":{ "shape":"Owner", - "documentation":"

    " + "documentation":"

    Container for the bucket owner's display name and ID.

    " }, "Grants":{ "shape":"Grants", @@ -2953,9 +3290,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    Specifies the S3 bucket whose ACL is being requested.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2987,6 +3330,12 @@ "documentation":"

    The ID that identifies the analytics configuration.

    ", "location":"querystring", "locationName":"id" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -2995,7 +3344,7 @@ "members":{ "CORSRules":{ "shape":"CORSRules", - "documentation":"

    ", + "documentation":"

    A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the configuration.

    ", "locationName":"CORSRule" } } @@ -3006,19 +3355,22 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name for which to get the cors configuration.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, "GetBucketEncryptionOutput":{ "type":"structure", "members":{ - "ServerSideEncryptionConfiguration":{ - "shape":"ServerSideEncryptionConfiguration", - "documentation":"

    " - } + "ServerSideEncryptionConfiguration":{"shape":"ServerSideEncryptionConfiguration"} }, "payload":"ServerSideEncryptionConfiguration" }, @@ -3031,20 +3383,26 @@ "documentation":"

    The name of the bucket from which the server-side encryption configuration is retrieved.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, - "GetBucketInventoryConfigurationOutput":{ + "GetBucketIntelligentTieringConfigurationOutput":{ "type":"structure", "members":{ - "InventoryConfiguration":{ - "shape":"InventoryConfiguration", - "documentation":"

    Specifies the inventory configuration.

    " + "IntelligentTieringConfiguration":{ + "shape":"IntelligentTieringConfiguration", + "documentation":"

    Container for S3 Intelligent-Tiering configuration.

    " } }, - "payload":"InventoryConfiguration" + "payload":"IntelligentTieringConfiguration" }, - "GetBucketInventoryConfigurationRequest":{ + "GetBucketIntelligentTieringConfigurationRequest":{ "type":"structure", "required":[ "Bucket", @@ -3053,15 +3411,52 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The name of the bucket containing the inventory configuration to retrieve.

    ", + "documentation":"

    The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

    ", "location":"uri", "locationName":"Bucket" }, "Id":{ - "shape":"InventoryId", + "shape":"IntelligentTieringId", + "documentation":"

    The ID used to identify the S3 Intelligent-Tiering configuration.

    ", + "location":"querystring", + "locationName":"id" + } + } + }, + "GetBucketInventoryConfigurationOutput":{ + "type":"structure", + "members":{ + "InventoryConfiguration":{ + "shape":"InventoryConfiguration", + "documentation":"

    Specifies the inventory configuration.

    " + } + }, + "payload":"InventoryConfiguration" + }, + "GetBucketInventoryConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the bucket containing the inventory configuration to retrieve.

    ", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"InventoryId", "documentation":"

    The ID used to identify the inventory configuration.

    ", "location":"querystring", "locationName":"id" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3070,7 +3465,7 @@ "members":{ "Rules":{ "shape":"LifecycleRules", - "documentation":"

    ", + "documentation":"

    Container for a lifecycle rule.

    ", "locationName":"Rule" } } @@ -3081,9 +3476,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket for which to get the lifecycle information.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3092,7 +3493,7 @@ "members":{ "Rules":{ "shape":"Rules", - "documentation":"

    ", + "documentation":"

    Container for a lifecycle rule.

    ", "locationName":"Rule" } } @@ -3103,9 +3504,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket for which to get the lifecycle information.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3114,7 +3521,7 @@ "members":{ "LocationConstraint":{ "shape":"BucketLocationConstraint", - "documentation":"

    " + "documentation":"

    Specifies the Region where the bucket resides. For a list of all the Amazon S3 supported location constraints by Region, see Regions and Endpoints. Buckets in Region us-east-1 have a LocationConstraint of null.

    " } } }, @@ -3124,19 +3531,22 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket for which to get the location.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, "GetBucketLoggingOutput":{ "type":"structure", "members":{ - "LoggingEnabled":{ - "shape":"LoggingEnabled", - "documentation":"

    " - } + "LoggingEnabled":{"shape":"LoggingEnabled"} } }, "GetBucketLoggingRequest":{ @@ -3145,9 +3555,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name for which to get the logging information.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3179,6 +3595,12 @@ "documentation":"

    The ID used to identify the metrics configuration.

    ", "location":"querystring", "locationName":"id" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3188,9 +3610,43 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to get the notification configuration for.

    ", + "documentation":"

    The name of the bucket for which to get the notification configuration.

    ", + "location":"uri", + "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + } + } + }, + "GetBucketOwnershipControlsOutput":{ + "type":"structure", + "members":{ + "OwnershipControls":{ + "shape":"OwnershipControls", + "documentation":"

    The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in effect for this Amazon S3 bucket.

    " + } + }, + "payload":"OwnershipControls" + }, + "GetBucketOwnershipControlsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3210,9 +3666,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name for which to get the bucket policy.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3235,16 +3697,19 @@ "documentation":"

    The name of the Amazon S3 bucket whose policy status you want to retrieve.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, "GetBucketReplicationOutput":{ "type":"structure", "members":{ - "ReplicationConfiguration":{ - "shape":"ReplicationConfiguration", - "documentation":"

    " - } + "ReplicationConfiguration":{"shape":"ReplicationConfiguration"} }, "payload":"ReplicationConfiguration" }, @@ -3254,9 +3719,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name for which to get the replication information.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3275,9 +3746,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket for which to get the payment request configuration

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3287,7 +3764,7 @@ "members":{ "TagSet":{ "shape":"TagSet", - "documentation":"

    " + "documentation":"

    Contains the tag set.

    " } } }, @@ -3297,9 +3774,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket for which to get the tagging information.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3323,9 +3806,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket for which to get the versioning information.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3334,19 +3823,19 @@ "members":{ "RedirectAllRequestsTo":{ "shape":"RedirectAllRequestsTo", - "documentation":"

    " + "documentation":"

    Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket.

    " }, "IndexDocument":{ "shape":"IndexDocument", - "documentation":"

    " + "documentation":"

    The name of the index document for the website (for example index.html).

    " }, "ErrorDocument":{ "shape":"ErrorDocument", - "documentation":"

    " + "documentation":"

    The object key name of the website error document to use for 4XX class errors.

    " }, "RoutingRules":{ "shape":"RoutingRules", - "documentation":"

    " + "documentation":"

    Rules that define when a redirect is applied and the redirect behavior.

    " } } }, @@ -3356,9 +3845,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name for which to get the website configuration.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3367,7 +3862,7 @@ "members":{ "Owner":{ "shape":"Owner", - "documentation":"

    " + "documentation":"

    Container for the bucket owner's display name and ID.

    " }, "Grants":{ "shape":"Grants", @@ -3390,13 +3885,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name that contains the object for which to get the ACL information.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    The key of the object for which to get the ACL information.

    ", "location":"uri", "locationName":"Key" }, @@ -3410,6 +3905,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3432,7 +3933,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The bucket containing the object whose Legal Hold status you want to retrieve.

    ", + "documentation":"

    The bucket name containing the object whose Legal Hold status you want to retrieve.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -3452,6 +3953,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3460,7 +3967,7 @@ "members":{ "ObjectLockConfiguration":{ "shape":"ObjectLockConfiguration", - "documentation":"

    The specified bucket's object lock configuration.

    " + "documentation":"

    The specified bucket's Object Lock configuration.

    " } }, "payload":"ObjectLockConfiguration" @@ -3471,9 +3978,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The bucket whose object lock configuration you want to retrieve.

    ", + "documentation":"

    The bucket whose Object Lock configuration you want to retrieve.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3493,13 +4006,13 @@ }, "AcceptRanges":{ "shape":"AcceptRanges", - "documentation":"

    ", + "documentation":"

    Indicates that a range of bytes was specified.

    ", "location":"header", "locationName":"accept-ranges" }, "Expiration":{ "shape":"Expiration", - "documentation":"

    If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.

    ", + "documentation":"

    If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key-value pairs providing object expiration information. The value of the rule-id is URL encoded.

    ", "location":"header", "locationName":"x-amz-expiration" }, @@ -3523,13 +4036,13 @@ }, "ETag":{ "shape":"ETag", - "documentation":"

    An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL

    ", + "documentation":"

    An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.

    ", "location":"header", "locationName":"ETag" }, "MissingMeta":{ "shape":"MissingMeta", - "documentation":"

    This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.

    ", + "documentation":"

    This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.

    ", "location":"header", "locationName":"x-amz-missing-meta" }, @@ -3589,7 +4102,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -3607,19 +4120,25 @@ }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

    ", + "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Indicates whether the object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

    ", + "documentation":"

    Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

    ", "location":"header", "locationName":"x-amz-storage-class" }, @@ -3630,7 +4149,7 @@ }, "ReplicationStatus":{ "shape":"ReplicationStatus", - "documentation":"

    ", + "documentation":"

    Amazon S3 can return this if your request involves a bucket that is either a source or destination in a replication rule.

    ", "location":"header", "locationName":"x-amz-replication-status" }, @@ -3648,19 +4167,19 @@ }, "ObjectLockMode":{ "shape":"ObjectLockMode", - "documentation":"

    The object lock mode currently in place for this object.

    ", + "documentation":"

    The Object Lock mode currently in place for this object.

    ", "location":"header", "locationName":"x-amz-object-lock-mode" }, "ObjectLockRetainUntilDate":{ "shape":"ObjectLockRetainUntilDate", - "documentation":"

    The date and time when this object's object lock will expire.

    ", + "documentation":"

    The date and time when this object's Object Lock will expire.

    ", "location":"header", "locationName":"x-amz-object-lock-retain-until-date" }, "ObjectLockLegalHoldStatus":{ "shape":"ObjectLockLegalHoldStatus", - "documentation":"

    Indicates whether this object has an active legal hold. This field is only returned if you have permission to view an object's legal hold status.

    ", + "documentation":"

    Indicates whether this object has an active legal hold. This field is only returned if you have permission to view an object's legal hold status.

    ", "location":"header", "locationName":"x-amz-object-lock-legal-hold" } @@ -3676,7 +4195,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name containing the object.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -3706,49 +4225,49 @@ }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Key of the object to get.

    ", "location":"uri", "locationName":"Key" }, "Range":{ "shape":"Range", - "documentation":"

    Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

    ", + "documentation":"

    Downloads the specified range bytes of an object. For more information about the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

    Amazon S3 doesn't support retrieving multiple ranges of data per GET request.

    ", "location":"header", "locationName":"Range" }, "ResponseCacheControl":{ "shape":"ResponseCacheControl", - "documentation":"

    Sets the Cache-Control header of the response.

    ", + "documentation":"

    Sets the Cache-Control header of the response.

    ", "location":"querystring", "locationName":"response-cache-control" }, "ResponseContentDisposition":{ "shape":"ResponseContentDisposition", - "documentation":"

    Sets the Content-Disposition header of the response

    ", + "documentation":"

    Sets the Content-Disposition header of the response

    ", "location":"querystring", "locationName":"response-content-disposition" }, "ResponseContentEncoding":{ "shape":"ResponseContentEncoding", - "documentation":"

    Sets the Content-Encoding header of the response.

    ", + "documentation":"

    Sets the Content-Encoding header of the response.

    ", "location":"querystring", "locationName":"response-content-encoding" }, "ResponseContentLanguage":{ "shape":"ResponseContentLanguage", - "documentation":"

    Sets the Content-Language header of the response.

    ", + "documentation":"

    Sets the Content-Language header of the response.

    ", "location":"querystring", "locationName":"response-content-language" }, "ResponseContentType":{ "shape":"ResponseContentType", - "documentation":"

    Sets the Content-Type header of the response.

    ", + "documentation":"

    Sets the Content-Type header of the response.

    ", "location":"querystring", "locationName":"response-content-type" }, "ResponseExpires":{ "shape":"ResponseExpires", - "documentation":"

    Sets the Expires header of the response.

    ", + "documentation":"

    Sets the Expires header of the response.

    ", "location":"querystring", "locationName":"response-expires" }, @@ -3760,19 +4279,19 @@ }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use to when encrypting the object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use to when encrypting the object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

    ", + "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, @@ -3786,6 +4305,12 @@ "documentation":"

    Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. Useful for downloading just a part of an object.

    ", "location":"querystring", "locationName":"partNumber" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3808,7 +4333,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The bucket containing the object whose retention settings you want to retrieve.

    ", + "documentation":"

    The bucket name containing the object whose retention settings you want to retrieve.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -3828,6 +4353,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3837,13 +4368,13 @@ "members":{ "VersionId":{ "shape":"ObjectVersionId", - "documentation":"

    ", + "documentation":"

    The versionId of the object for which you got the tagging information.

    ", "location":"header", "locationName":"x-amz-version-id" }, "TagSet":{ "shape":"TagSet", - "documentation":"

    " + "documentation":"

    Contains the tag set.

    " } } }, @@ -3856,21 +4387,27 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name containing the object for which to get the tagging information.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Object key for which to get the tagging information.

    ", "location":"uri", "locationName":"Key" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"

    ", + "documentation":"

    The versionId of the object for which to get the tagging information.

    ", "location":"querystring", "locationName":"versionId" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3879,7 +4416,7 @@ "members":{ "Body":{ "shape":"Body", - "documentation":"

    ", + "documentation":"

    A Bencoded dictionary as defined by the BitTorrent specification

    ", "streaming":true }, "RequestCharged":{ @@ -3899,13 +4436,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket containing the object for which to get the torrent files.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    The object key for which to get the information.

    ", "location":"uri", "locationName":"Key" }, @@ -3913,6 +4450,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3935,6 +4478,12 @@ "documentation":"

    The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want to retrieve.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -3944,24 +4493,24 @@ "members":{ "Tier":{ "shape":"Tier", - "documentation":"

    Glacier retrieval tier at which the restore will be processed.

    " + "documentation":"

    Retrieval tier at which the restore will be processed.

    " } }, - "documentation":"

    " + "documentation":"

    Container for S3 Glacier job parameters.

    " }, "Grant":{ "type":"structure", "members":{ "Grantee":{ "shape":"Grantee", - "documentation":"

    " + "documentation":"

    The person being granted permissions.

    " }, "Permission":{ "shape":"Permission", "documentation":"

    Specifies the permission given to the grantee.

    " } }, - "documentation":"

    " + "documentation":"

    Container for grant information.

    " }, "GrantFullControl":{"type":"string"}, "GrantRead":{"type":"string"}, @@ -3978,7 +4527,7 @@ }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

    Email address of the grantee.

    " + "documentation":"

    Email address of the grantee.

    Using email addresses to specify a grantee is only supported in the following AWS Regions:

    • US East (N. Virginia)

    • US West (N. California)

    • US West (Oregon)

    • Asia Pacific (Singapore)

    • Asia Pacific (Sydney)

    • Asia Pacific (Tokyo)

    • Europe (Ireland)

    • South America (São Paulo)

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    " }, "ID":{ "shape":"ID", @@ -3995,7 +4544,7 @@ "documentation":"

    URI of the grantee group.

    " } }, - "documentation":"

    ", + "documentation":"

    Container for the person being granted permissions.

    ", "xmlNamespace":{ "prefix":"xsi", "uri":"http://www.w3.org/2001/XMLSchema-instance" @@ -4014,9 +4563,15 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -4031,22 +4586,28 @@ }, "AcceptRanges":{ "shape":"AcceptRanges", - "documentation":"

    ", + "documentation":"

    Indicates that a range of bytes was specified.

    ", "location":"header", "locationName":"accept-ranges" }, "Expiration":{ "shape":"Expiration", - "documentation":"

    If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.

    ", + "documentation":"

    If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key-value pairs providing object expiration information. The value of the rule-id is URL encoded.

    ", "location":"header", "locationName":"x-amz-expiration" }, "Restore":{ "shape":"Restore", - "documentation":"

    Provides information about object restoration operation and expiration time of the restored object copy.

    ", + "documentation":"

    If the object is an archived object (an object whose storage class is GLACIER), the response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored.

    If an archive copy is already restored, the header value indicates when Amazon S3 is scheduled to delete the object copy. For example:

    x-amz-restore: ongoing-request=\"false\", expiry-date=\"Fri, 23 Dec 2012 00:00:00 GMT\"

    If the object restoration is in progress, the header returns the value ongoing-request=\"true\".

    For more information about archiving objects, see Transitioning Objects: General Considerations.

    ", "location":"header", "locationName":"x-amz-restore" }, + "ArchiveStatus":{ + "shape":"ArchiveStatus", + "documentation":"

    The archive state of the head object.

    ", + "location":"header", + "locationName":"x-amz-archive-status" + }, "LastModified":{ "shape":"LastModified", "documentation":"

    Last modified date of the object

    ", @@ -4061,13 +4622,13 @@ }, "ETag":{ "shape":"ETag", - "documentation":"

    An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL

    ", + "documentation":"

    An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.

    ", "location":"header", "locationName":"ETag" }, "MissingMeta":{ "shape":"MissingMeta", - "documentation":"

    This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.

    ", + "documentation":"

    This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.

    ", "location":"header", "locationName":"x-amz-missing-meta" }, @@ -4121,7 +4682,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    If the object is stored using server-side encryption either with an AWS KMS customer master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with the value of the server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -4139,19 +4700,25 @@ }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

    ", + "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Indicates whether the object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

    ", + "documentation":"

    Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

    For more information, see Storage Classes.

    ", "location":"header", "locationName":"x-amz-storage-class" }, @@ -4162,7 +4729,7 @@ }, "ReplicationStatus":{ "shape":"ReplicationStatus", - "documentation":"

    ", + "documentation":"

    Amazon S3 can return this header if your request involves a bucket that is either a source or a destination in a replication rule.

    In replication, you have a source bucket on which you configure replication and destination bucket or buckets where Amazon S3 stores object replicas. When you request an object (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will return the x-amz-replication-status header in the response as follows:

    • If requesting an object from the source bucket — Amazon S3 will return the x-amz-replication-status header if the object in your request is eligible for replication.

      For example, suppose that in your replication configuration, you specify object prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix TaxDocs. Any objects you upload with this key name prefix, for example TaxDocs/document1.pdf, are eligible for replication. For any object request with this key name prefix, Amazon S3 will return the x-amz-replication-status header with value PENDING, COMPLETED or FAILED indicating object replication status.

    • If requesting an object from a destination bucket — Amazon S3 will return the x-amz-replication-status header with value REPLICA if the object in your request is a replica that Amazon S3 created and there is no replica modification replication in progress.

    • When replicating objects to multiple destination buckets the x-amz-replication-status header acts differently. The header of the source object will only return a value of COMPLETED when replication is successful to all destinations. The header will remain at value PENDING until replication has completed for all destinations. If one or more destinations fails replication the header will return FAILED.

    For more information, see Replication.

    ", "location":"header", "locationName":"x-amz-replication-status" }, @@ -4174,19 +4741,19 @@ }, "ObjectLockMode":{ "shape":"ObjectLockMode", - "documentation":"

    The object lock mode currently in place for this object.

    ", + "documentation":"

    The Object Lock mode, if any, that's in effect for this object. This header is only returned if the requester has the s3:GetObjectRetention permission. For more information about S3 Object Lock, see Object Lock.

    ", "location":"header", "locationName":"x-amz-object-lock-mode" }, "ObjectLockRetainUntilDate":{ "shape":"ObjectLockRetainUntilDate", - "documentation":"

    The date and time when this object's object lock expires.

    ", + "documentation":"

    The date and time when the Object Lock retention period expires. This header is only returned if the requester has the s3:GetObjectRetention permission.

    ", "location":"header", "locationName":"x-amz-object-lock-retain-until-date" }, "ObjectLockLegalHoldStatus":{ "shape":"ObjectLockLegalHoldStatus", - "documentation":"

    The Legal Hold status for the specified object.

    ", + "documentation":"

    Specifies whether a legal hold is in effect for this object. This header is only returned if the requester has the s3:GetObjectLegalHold permission. This header is not returned if the specified version of this object has never had a legal hold applied. For more information about S3 Object Lock, see Object Lock.

    ", "location":"header", "locationName":"x-amz-object-lock-legal-hold" } @@ -4201,7 +4768,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket containing the object.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -4231,13 +4798,13 @@ }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    The object key.

    ", "location":"uri", "locationName":"Key" }, "Range":{ "shape":"Range", - "documentation":"

    Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

    ", + "documentation":"

    Downloads the specified range bytes of an object. For more information about the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

    Amazon S3 doesn't support retrieving multiple ranges of data per GET request.

    ", "location":"header", "locationName":"Range" }, @@ -4249,19 +4816,19 @@ }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use to when encrypting the object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use to when encrypting the object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

    ", + "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, @@ -4275,6 +4842,12 @@ "documentation":"

    Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object.

    ", "location":"querystring", "locationName":"partNumber" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -4292,10 +4865,10 @@ "members":{ "Suffix":{ "shape":"Suffix", - "documentation":"

    A suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

    " + "documentation":"

    A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

    " } }, - "documentation":"

    " + "documentation":"

    Container for the Suffix element.

    " }, "Initiated":{"type":"timestamp"}, "Initiator":{ @@ -4310,7 +4883,7 @@ "documentation":"

    Name of the Principal.

    " } }, - "documentation":"

    " + "documentation":"

    Container element that identifies who initiated the multipart upload.

    " }, "InputSerialization":{ "type":"structure", @@ -4334,6 +4907,95 @@ }, "documentation":"

    Describes the serialization format of the object.

    " }, + "IntelligentTieringAccessTier":{ + "type":"string", + "enum":[ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS" + ] + }, + "IntelligentTieringAndOperator":{ + "type":"structure", + "members":{ + "Prefix":{ + "shape":"Prefix", + "documentation":"

    An object key name prefix that identifies the subset of objects to which the configuration applies.

    " + }, + "Tags":{ + "shape":"TagSet", + "documentation":"

    All of these tags must exist in the object's tag set in order for the configuration to apply.

    ", + "flattened":true, + "locationName":"Tag" + } + }, + "documentation":"

    A container for specifying S3 Intelligent-Tiering filters. The filters determine the subset of objects to which the rule applies.

    " + }, + "IntelligentTieringConfiguration":{ + "type":"structure", + "required":[ + "Id", + "Status", + "Tierings" + ], + "members":{ + "Id":{ + "shape":"IntelligentTieringId", + "documentation":"

    The ID used to identify the S3 Intelligent-Tiering configuration.

    " + }, + "Filter":{ + "shape":"IntelligentTieringFilter", + "documentation":"

    Specifies a bucket filter. The configuration only includes objects that meet the filter's criteria.

    " + }, + "Status":{ + "shape":"IntelligentTieringStatus", + "documentation":"

    Specifies the status of the configuration.

    " + }, + "Tierings":{ + "shape":"TieringList", + "documentation":"

    Specifies the S3 Intelligent-Tiering storage class tier of the configuration.

    ", + "locationName":"Tiering" + } + }, + "documentation":"

    Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket.

    For information about the S3 Intelligent-Tiering storage class, see Storage class for automatically optimizing frequently and infrequently accessed objects.

    " + }, + "IntelligentTieringConfigurationList":{ + "type":"list", + "member":{"shape":"IntelligentTieringConfiguration"}, + "flattened":true + }, + "IntelligentTieringDays":{"type":"integer"}, + "IntelligentTieringFilter":{ + "type":"structure", + "members":{ + "Prefix":{ + "shape":"Prefix", + "documentation":"

    An object key name prefix that identifies the subset of objects to which the rule applies.

    " + }, + "Tag":{"shape":"Tag"}, + "And":{ + "shape":"IntelligentTieringAndOperator", + "documentation":"

    A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.

    " + } + }, + "documentation":"

    The Filter is used to identify objects that the S3 Intelligent-Tiering configuration applies to.

    " + }, + "IntelligentTieringId":{"type":"string"}, + "IntelligentTieringStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "InvalidObjectState":{ + "type":"structure", + "members":{ + "StorageClass":{"shape":"StorageClass"}, + "AccessTier":{"shape":"IntelligentTieringAccessTier"} + }, + "documentation":"

    Object is archived and inaccessible until restored.

    ", + "exception":true + }, "InventoryConfiguration":{ "type":"structure", "required":[ @@ -4389,19 +5051,19 @@ "documentation":"

    Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.

    " } }, - "documentation":"

    " + "documentation":"

    Specifies the inventory configuration for an Amazon S3 bucket.

    " }, "InventoryEncryption":{ "type":"structure", "members":{ "SSES3":{ "shape":"SSES3", - "documentation":"

    Specifies the use of SSE-S3 to encrypt delivered Inventory reports.

    ", + "documentation":"

    Specifies the use of SSE-S3 to encrypt delivered inventory reports.

    ", "locationName":"SSE-S3" }, "SSEKMS":{ "shape":"SSEKMS", - "documentation":"

    Specifies the use of SSE-KMS to encrypt delivered Inventory reports.

    ", + "documentation":"

    Specifies the use of SSE-KMS to encrypt delivered inventory reports.

    ", "locationName":"SSE-KMS" } }, @@ -4416,7 +5078,7 @@ "documentation":"

    The prefix that an object must have to be included in the inventory results.

    " } }, - "documentation":"

    " + "documentation":"

    Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria.

    " }, "InventoryFormat":{ "type":"string", @@ -4453,7 +5115,8 @@ "EncryptionStatus", "ObjectLockRetainUntilDate", "ObjectLockMode", - "ObjectLockLegalHoldStatus" + "ObjectLockLegalHoldStatus", + "IntelligentTieringAccessTier" ] }, "InventoryOptionalFields":{ @@ -4472,11 +5135,11 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

    The ID of the account that owns the destination bucket.

    " + "documentation":"

    The account ID that owns the destination S3 bucket. If no account ID is provided, the owner is not validated before exporting data.

    Although this value is optional, we strongly recommend that you set it to help prevent problems if the destination bucket ownership changes.

    " }, "Bucket":{ "shape":"BucketName", - "documentation":"

    The Amazon resource name (ARN) of the bucket where inventory results will be published.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the bucket where inventory results will be published.

    " }, "Format":{ "shape":"InventoryFormat", @@ -4491,7 +5154,7 @@ "documentation":"

    Contains the type of server-side encryption used to encrypt the inventory results.

    " } }, - "documentation":"

    " + "documentation":"

    Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.

    " }, "InventorySchedule":{ "type":"structure", @@ -4502,7 +5165,7 @@ "documentation":"

    Specifies how frequently inventory results are produced.

    " } }, - "documentation":"

    " + "documentation":"

    Specifies the schedule for generating inventory results.

    " }, "IsEnabled":{"type":"boolean"}, "IsLatest":{"type":"boolean"}, @@ -4516,17 +5179,17 @@ "documentation":"

    The type of JSON. Valid values: Document, Lines.

    " } }, - "documentation":"

    " + "documentation":"

    Specifies JSON as object's input serialization format.

    " }, "JSONOutput":{ "type":"structure", "members":{ "RecordDelimiter":{ "shape":"RecordDelimiter", - "documentation":"

    The value used to separate individual records in the output.

    " + "documentation":"

    The value used to separate individual records in the output. If no value is specified, Amazon S3 uses a newline character ('\\n').

    " } }, - "documentation":"

    " + "documentation":"

    Specifies JSON as request's output serialization format.

    " }, "JSONType":{ "type":"string", @@ -4558,10 +5221,7 @@ "documentation":"

    The Amazon S3 bucket event for which to invoke the AWS Lambda function. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.

    ", "locationName":"Event" }, - "Filter":{ - "shape":"NotificationConfigurationFilter", - "documentation":"

    " - } + "Filter":{"shape":"NotificationConfigurationFilter"} }, "documentation":"

    A container for specifying the configuration for AWS Lambda notifications.

    " }, @@ -4577,11 +5237,11 @@ "members":{ "Rules":{ "shape":"Rules", - "documentation":"

    ", + "documentation":"

    Specifies lifecycle configuration rules for an Amazon S3 bucket.

    ", "locationName":"Rule" } }, - "documentation":"

    " + "documentation":"

    Container for lifecycle rules. You can add as many as 1000 rules.

    " }, "LifecycleExpiration":{ "type":"structure", @@ -4599,7 +5259,7 @@ "documentation":"

    Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.

    " } }, - "documentation":"

    " + "documentation":"

    Container for the expiration for the lifecycle of the object.

    " }, "LifecycleRule":{ "type":"structure", @@ -4607,7 +5267,7 @@ "members":{ "Expiration":{ "shape":"LifecycleExpiration", - "documentation":"

    " + "documentation":"

    Specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker.

    " }, "ID":{ "shape":"ID", @@ -4615,44 +5275,35 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

    Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

    ", + "documentation":"

    Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

    ", "deprecated":true }, - "Filter":{ - "shape":"LifecycleRuleFilter", - "documentation":"

    " - }, + "Filter":{"shape":"LifecycleRuleFilter"}, "Status":{ "shape":"ExpirationStatus", "documentation":"

    If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.

    " }, "Transitions":{ "shape":"TransitionList", - "documentation":"

    ", + "documentation":"

    Specifies when an Amazon S3 object transitions to a specified storage class.

    ", "locationName":"Transition" }, "NoncurrentVersionTransitions":{ "shape":"NoncurrentVersionTransitionList", - "documentation":"

    ", + "documentation":"

    Specifies the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to a specific storage class at a set period in the object's lifetime.

    ", "locationName":"NoncurrentVersionTransition" }, - "NoncurrentVersionExpiration":{ - "shape":"NoncurrentVersionExpiration", - "documentation":"

    " - }, - "AbortIncompleteMultipartUpload":{ - "shape":"AbortIncompleteMultipartUpload", - "documentation":"

    " - } + "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"}, + "AbortIncompleteMultipartUpload":{"shape":"AbortIncompleteMultipartUpload"} }, - "documentation":"

    " + "documentation":"

    A lifecycle rule for individual objects in an Amazon S3 bucket.

    " }, "LifecycleRuleAndOperator":{ "type":"structure", "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"

    " + "documentation":"

    Prefix identifying one or more objects to which the rule applies.

    " }, "Tags":{ "shape":"TagSet", @@ -4674,12 +5325,9 @@ "shape":"Tag", "documentation":"

    This tag must exist in the object's tag set in order for the rule to apply.

    " }, - "And":{ - "shape":"LifecycleRuleAndOperator", - "documentation":"

    " - } + "And":{"shape":"LifecycleRuleAndOperator"} }, - "documentation":"

    The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified.

    " + "documentation":"

    The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified.

    " }, "LifecycleRules":{ "type":"list", @@ -4695,11 +5343,11 @@ }, "ContinuationToken":{ "shape":"Token", - "documentation":"

    The ContinuationToken that represents where this request began.

    " + "documentation":"

    The marker that is used as a starting point for this analytics configuration list response. This value is present if it was sent in the request.

    " }, "NextContinuationToken":{ "shape":"NextToken", - "documentation":"

    NextContinuationToken is sent when isTruncated is true, which indicates that there are more analytics configurations to list. The next request must include this NextContinuationToken. The token is obfuscated and is not a usable value.

    " + "documentation":"

    NextContinuationToken is sent when isTruncated is true, which indicates that there are more analytics configurations to list. The next request must include this NextContinuationToken. The token is obfuscated and is not a usable value.

    " }, "AnalyticsConfigurationList":{ "shape":"AnalyticsConfigurationList", @@ -4718,6 +5366,52 @@ "location":"uri", "locationName":"Bucket" }, + "ContinuationToken":{ + "shape":"Token", + "documentation":"

    The ContinuationToken that represents a placeholder from where this request should begin.

    ", + "location":"querystring", + "locationName":"continuation-token" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + } + } + }, + "ListBucketIntelligentTieringConfigurationsOutput":{ + "type":"structure", + "members":{ + "IsTruncated":{ + "shape":"IsTruncated", + "documentation":"

    Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

    " + }, + "ContinuationToken":{ + "shape":"Token", + "documentation":"

    The ContinuationToken that represents a placeholder from where this request should begin.

    " + }, + "NextContinuationToken":{ + "shape":"NextToken", + "documentation":"

    The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.

    " + }, + "IntelligentTieringConfigurationList":{ + "shape":"IntelligentTieringConfigurationList", + "documentation":"

    The list of S3 Intelligent-Tiering configurations for a bucket.

    ", + "locationName":"IntelligentTieringConfiguration" + } + } + }, + "ListBucketIntelligentTieringConfigurationsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

    ", + "location":"uri", + "locationName":"Bucket" + }, "ContinuationToken":{ "shape":"Token", "documentation":"

    The ContinuationToken that represents a placeholder from where this request should begin.

    ", @@ -4740,11 +5434,11 @@ }, "IsTruncated":{ "shape":"IsTruncated", - "documentation":"

    Indicates whether the returned list of inventory configurations is truncated in this response. A value of true indicates that the list is truncated.

    " + "documentation":"

    Tells whether the returned list of inventory configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken is provided for a subsequent request.

    " }, "NextContinuationToken":{ "shape":"NextToken", - "documentation":"

    The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.

    " + "documentation":"

    The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.

    " } } }, @@ -4763,6 +5457,12 @@ "documentation":"

    The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

    ", "location":"querystring", "locationName":"continuation-token" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -4779,7 +5479,7 @@ }, "NextContinuationToken":{ "shape":"NextToken", - "documentation":"

    The marker used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

    " + "documentation":"

    The marker used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

    " }, "MetricsConfigurationList":{ "shape":"MetricsConfigurationList", @@ -4803,6 +5503,12 @@ "documentation":"

    The marker that is used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

    ", "location":"querystring", "locationName":"continuation-token" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -4811,11 +5517,11 @@ "members":{ "Buckets":{ "shape":"Buckets", - "documentation":"

    " + "documentation":"

    The list of buckets owned by the requestor.

    " }, "Owner":{ "shape":"Owner", - "documentation":"

    " + "documentation":"

    The owner of the buckets listed.

    " } } }, @@ -4824,7 +5530,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to which the multipart upload was initiated.

    " + "documentation":"

    The name of the bucket to which the multipart upload was initiated.

    " }, "KeyMarker":{ "shape":"KeyMarker", @@ -4844,11 +5550,11 @@ }, "Delimiter":{ "shape":"Delimiter", - "documentation":"

    " + "documentation":"

    Contains the delimiter you specified in the request. If you don't specify a delimiter in your request, this element is absent from the response.

    " }, "NextUploadIdMarker":{ "shape":"NextUploadIdMarker", - "documentation":"

    When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request.

    " + "documentation":"

    When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request.

    " }, "MaxUploads":{ "shape":"MaxUploads", @@ -4860,16 +5566,16 @@ }, "Uploads":{ "shape":"MultipartUploadList", - "documentation":"

    ", + "documentation":"

    Container for elements related to a particular multipart upload. A response can contain zero or more Upload elements.

    ", "locationName":"Upload" }, "CommonPrefixes":{ "shape":"CommonPrefixList", - "documentation":"

    " + "documentation":"

    If you specify a delimiter in the request, then the result returns each distinct key prefix containing the delimiter in a CommonPrefixes element. The distinct key prefixes are returned in the Prefix child element.

    " }, "EncodingType":{ "shape":"EncodingType", - "documentation":"

    Encoding type used by Amazon S3 to encode object keys in the response.

    " + "documentation":"

    Encoding type used by Amazon S3 to encode object keys in the response.

    If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

    Delimiter, KeyMarker, Prefix, NextKeyMarker, Key.

    " } } }, @@ -4879,13 +5585,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket to which the multipart upload was initiated.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Delimiter":{ "shape":"Delimiter", - "documentation":"

    Character you use to group keys.

    ", + "documentation":"

    Character you use to group keys.

    All keys that contain the same string between the prefix, if specified, and the first occurrence of the delimiter after the prefix are grouped under a single result element, CommonPrefixes. If you don't specify the prefix parameter, then the substring starts at the beginning of the key. The keys that are grouped under CommonPrefixes result element are not returned elsewhere in the response.

    ", "location":"querystring", "locationName":"delimiter" }, @@ -4896,7 +5602,7 @@ }, "KeyMarker":{ "shape":"KeyMarker", - "documentation":"

    Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.

    ", + "documentation":"

    Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.

    If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list.

    If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included, provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker.

    ", "location":"querystring", "locationName":"key-marker" }, @@ -4908,15 +5614,21 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

    Lists in-progress uploads only for those keys that begin with the specified prefix.

    ", + "documentation":"

    Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different grouping of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.)

    ", "location":"querystring", "locationName":"prefix" }, "UploadIdMarker":{ "shape":"UploadIdMarker", - "documentation":"

    Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored.

    ", + "documentation":"

    Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the key-marker might be included in the list only if they have an upload ID lexicographically greater than the specified upload-id-marker.

    ", "location":"querystring", "locationName":"upload-id-marker" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -4925,57 +5637,57 @@ "members":{ "IsTruncated":{ "shape":"IsTruncated", - "documentation":"

    A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.

    " + "documentation":"

    A flag that indicates whether Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.

    " }, "KeyMarker":{ "shape":"KeyMarker", - "documentation":"

    Marks the last Key returned in a truncated response.

    " + "documentation":"

    Marks the last key returned in a truncated response.

    " }, "VersionIdMarker":{ "shape":"VersionIdMarker", - "documentation":"

    " + "documentation":"

    Marks the last version of the key returned in a truncated response.

    " }, "NextKeyMarker":{ "shape":"NextKeyMarker", - "documentation":"

    Use this value for the key marker request parameter in a subsequent request.

    " + "documentation":"

    When the number of responses exceeds the value of MaxKeys, NextKeyMarker specifies the first key not returned that satisfies the search criteria. Use this value for the key-marker request parameter in a subsequent request.

    " }, "NextVersionIdMarker":{ "shape":"NextVersionIdMarker", - "documentation":"

    Use this value for the next version id marker parameter in a subsequent request.

    " + "documentation":"

    When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker specifies the first object version not returned that satisfies the search criteria. Use this value for the version-id-marker request parameter in a subsequent request.

    " }, "Versions":{ "shape":"ObjectVersionList", - "documentation":"

    ", + "documentation":"

    Container for version information.

    ", "locationName":"Version" }, "DeleteMarkers":{ "shape":"DeleteMarkers", - "documentation":"

    ", + "documentation":"

    Container for an object that is a delete marker.

    ", "locationName":"DeleteMarker" }, "Name":{ "shape":"BucketName", - "documentation":"

    " + "documentation":"

    The bucket name.

    " }, "Prefix":{ "shape":"Prefix", - "documentation":"

    " + "documentation":"

    Selects objects that start with the value supplied by this parameter.

    " }, "Delimiter":{ "shape":"Delimiter", - "documentation":"

    " + "documentation":"

    The delimiter grouping the included keys. A delimiter is a character that you specify to group keys. All keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped under a single result element in CommonPrefixes. These groups are counted as one result against the max-keys limitation. These keys are not returned elsewhere in the response.

    " }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

    " + "documentation":"

    Specifies the maximum number of objects to return.

    " }, "CommonPrefixes":{ "shape":"CommonPrefixList", - "documentation":"

    " + "documentation":"

    All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.

    " }, "EncodingType":{ "shape":"EncodingType", - "documentation":"

    Encoding type used by Amazon S3 to encode object keys in the response.

    " + "documentation":"

    Encoding type used by Amazon S3 to encode object key names in the XML response.

    If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

    KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.

    " } } }, @@ -4985,13 +5697,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name that contains the objects.

    ", "location":"uri", "locationName":"Bucket" }, "Delimiter":{ "shape":"Delimiter", - "documentation":"

    A delimiter is a character you use to group keys.

    ", + "documentation":"

    A delimiter is a character that you specify to group keys. All keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped under a single result element in CommonPrefixes. These groups are counted as one result against the max-keys limitation. These keys are not returned elsewhere in the response.

    ", "location":"querystring", "locationName":"delimiter" }, @@ -5008,13 +5720,13 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

    ", + "documentation":"

    Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.

    ", "location":"querystring", "locationName":"max-keys" }, "Prefix":{ "shape":"Prefix", - "documentation":"

    Limits the response to keys that begin with the specified prefix.

    ", + "documentation":"

    Use this parameter to select only those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different groupings of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.) You can use prefix with delimiter to roll up numerous objects into a single result under CommonPrefixes.

    ", "location":"querystring", "locationName":"prefix" }, @@ -5023,6 +5735,12 @@ "documentation":"

    Specifies the object version you want to start listing from.

    ", "location":"querystring", "locationName":"version-id-marker" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -5031,39 +5749,39 @@ "members":{ "IsTruncated":{ "shape":"IsTruncated", - "documentation":"

    A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.

    " + "documentation":"

    A flag that indicates whether Amazon S3 returned all of the results that satisfied the search criteria.

    " }, "Marker":{ "shape":"Marker", - "documentation":"

    " + "documentation":"

    Indicates where in the bucket listing begins. Marker is included in the response if it was sent with the request.

    " }, "NextMarker":{ "shape":"NextMarker", - "documentation":"

    When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. Amazon S3 lists objects in alphabetical order Note: This element is returned only if you have delimiter request parameter specified. If response does not include the NextMaker and it is truncated, you can use the value of the last Key in the response as the marker in the subsequent request to get the next set of object keys.

    " + "documentation":"

    When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. Amazon S3 lists objects in alphabetical order Note: This element is returned only if you have delimiter request parameter specified. If response does not include the NextMarker and it is truncated, you can use the value of the last Key in the response as the marker in the subsequent request to get the next set of object keys.

    " }, "Contents":{ "shape":"ObjectList", - "documentation":"

    " + "documentation":"

    Metadata about each object returned.

    " }, "Name":{ "shape":"BucketName", - "documentation":"

    " + "documentation":"

    The bucket name.

    " }, "Prefix":{ "shape":"Prefix", - "documentation":"

    " + "documentation":"

    Keys that begin with the indicated prefix.

    " }, "Delimiter":{ "shape":"Delimiter", - "documentation":"

    " + "documentation":"

    Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value.

    " }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

    " + "documentation":"

    The maximum number of keys returned in the response body.

    " }, "CommonPrefixes":{ "shape":"CommonPrefixList", - "documentation":"

    " + "documentation":"

    All of the keys rolled up in a common prefix count as a single return when calculating the number of returns.

    A response can contain CommonPrefixes only if you specify a delimiter.

    CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by the delimiter.

    CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

    For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

    " }, "EncodingType":{ "shape":"EncodingType", @@ -5077,7 +5795,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket containing the objects.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -5100,7 +5818,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

    ", + "documentation":"

    Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

    ", "location":"querystring", "locationName":"max-keys" }, @@ -5115,6 +5833,12 @@ "documentation":"

    Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests.

    ", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -5123,7 +5847,7 @@ "members":{ "IsTruncated":{ "shape":"IsTruncated", - "documentation":"

    A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.

    " + "documentation":"

    Set to false if all of the results were returned. Set to true if more keys are available to return. If the number of results exceeds that specified by MaxKeys, all of the results might not be returned.

    " }, "Contents":{ "shape":"ObjectList", @@ -5131,27 +5855,27 @@ }, "Name":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to list.

    " + "documentation":"

    The bucket name.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    " }, "Prefix":{ "shape":"Prefix", - "documentation":"

    Limits the response to keys that begin with the specified prefix.

    " + "documentation":"

    Keys that begin with the indicated prefix.

    " }, "Delimiter":{ "shape":"Delimiter", - "documentation":"

    A delimiter is a character you use to group keys.

    " + "documentation":"

    Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value.

    " }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

    " + "documentation":"

    Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

    " }, "CommonPrefixes":{ "shape":"CommonPrefixList", - "documentation":"

    CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by delimiter

    " + "documentation":"

    All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.

    A response can contain CommonPrefixes only if you specify a delimiter.

    CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter.

    CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

    For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

    " }, "EncodingType":{ "shape":"EncodingType", - "documentation":"

    Encoding type used by Amazon S3 to encode object keys in the response.

    " + "documentation":"

    Encoding type used by Amazon S3 to encode object key names in the XML response.

    If you specify the encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

    Delimiter, Prefix, Key, and StartAfter.

    " }, "KeyCount":{ "shape":"KeyCount", @@ -5159,15 +5883,15 @@ }, "ContinuationToken":{ "shape":"Token", - "documentation":"

    ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key

    " + "documentation":"

    If ContinuationToken was sent with the request, it is included in the response.

    " }, "NextContinuationToken":{ "shape":"NextToken", - "documentation":"

    NextContinuationToken is sent when isTruncated is true which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key

    " + "documentation":"

    NextContinuationToken is sent when isTruncated is true, which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key

    " }, "StartAfter":{ "shape":"StartAfter", - "documentation":"

    StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket

    " + "documentation":"

    If StartAfter was sent with the request, it is included in the response.

    " } } }, @@ -5177,7 +5901,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to list.

    ", + "documentation":"

    Bucket name to list.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -5195,7 +5919,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

    ", + "documentation":"

    Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

    ", "location":"querystring", "locationName":"max-keys" }, @@ -5207,19 +5931,19 @@ }, "ContinuationToken":{ "shape":"Token", - "documentation":"

    ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key

    ", + "documentation":"

    ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key.

    ", "location":"querystring", "locationName":"continuation-token" }, "FetchOwner":{ "shape":"FetchOwner", - "documentation":"

    The owner field is not present in listV2 by default, if you want to return owner field with each key in the result then set the fetch owner field to true

    ", + "documentation":"

    The owner field is not present in listV2 by default, if you want to return owner field with each key in the result then set the fetch owner field to true.

    ", "location":"querystring", "locationName":"fetch-owner" }, "StartAfter":{ "shape":"StartAfter", - "documentation":"

    StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket

    ", + "documentation":"

    StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket.

    ", "location":"querystring", "locationName":"start-after" }, @@ -5228,6 +5952,12 @@ "documentation":"

    Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in their requests.

    ", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -5236,19 +5966,19 @@ "members":{ "AbortDate":{ "shape":"AbortDate", - "documentation":"

    Date when multipart upload will become eligible for abort operation by lifecycle.

    ", + "documentation":"

    If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, then the response includes this header indicating when the initiated multipart upload will become eligible for abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

    The response will also include the x-amz-abort-rule-id header that will provide the ID of the lifecycle configuration rule that defines this action.

    ", "location":"header", "locationName":"x-amz-abort-date" }, "AbortRuleId":{ "shape":"AbortRuleId", - "documentation":"

    Id of the lifecycle rule that makes a multipart upload eligible for abort operation.

    ", + "documentation":"

    This header is returned along with the x-amz-abort-date header. It identifies applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads.

    ", "location":"header", "locationName":"x-amz-abort-rule-id" }, "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to which the multipart upload was initiated.

    " + "documentation":"

    The name of the bucket to which the multipart upload was initiated.

    " }, "Key":{ "shape":"ObjectKey", @@ -5260,7 +5990,7 @@ }, "PartNumberMarker":{ "shape":"PartNumberMarker", - "documentation":"

    Part number after which listing begins.

    " + "documentation":"

    When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request.

    " }, "NextPartNumberMarker":{ "shape":"NextPartNumberMarker", @@ -5272,24 +6002,24 @@ }, "IsTruncated":{ "shape":"IsTruncated", - "documentation":"

    Indicates whether the returned list of parts is truncated.

    " + "documentation":"

    Indicates whether the returned list of parts is truncated. A true value indicates that the list was truncated. A list can be truncated if the number of parts exceeds the limit returned in the MaxParts element.

    " }, "Parts":{ "shape":"Parts", - "documentation":"

    ", + "documentation":"

    Container for elements related to a particular part. A response can contain zero or more Part elements.

    ", "locationName":"Part" }, "Initiator":{ "shape":"Initiator", - "documentation":"

    Identifies who initiated the multipart upload.

    " + "documentation":"

    Container element that identifies who initiated the multipart upload. If the initiator is an AWS account, this element provides the same information as the Owner element. If the initiator is an IAM User, this element provides the user ARN and display name.

    " }, "Owner":{ "shape":"Owner", - "documentation":"

    " + "documentation":"

    Container element that identifies the object owner, after the object is created. If multipart upload is initiated by an IAM user, this element provides the parent account ID and display name.

    " }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

    The class of storage used to store the object.

    " + "documentation":"

    Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded object.

    " }, "RequestCharged":{ "shape":"RequestCharged", @@ -5308,13 +6038,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket to which the parts are being uploaded.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Object key for which the multipart upload was initiated.

    ", "location":"uri", "locationName":"Key" }, @@ -5340,6 +6070,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -5354,11 +6090,11 @@ "members":{ "TargetBucket":{ "shape":"TargetBucket", - "documentation":"

    Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key.

    " + "documentation":"

    Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case, you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key.

    " }, "TargetGrants":{ "shape":"TargetGrants", - "documentation":"

    " + "documentation":"

    Container for granting information.

    " }, "TargetPrefix":{ "shape":"TargetPrefix", @@ -5405,17 +6141,32 @@ "members":{ "Name":{ "shape":"MetadataKey", - "documentation":"

    " + "documentation":"

    Name of the Object.

    " }, "Value":{ "shape":"MetadataValue", - "documentation":"

    " + "documentation":"

    Value of the Object.

    " } }, "documentation":"

    A metadata key-value pair to store with an object.

    " }, "MetadataKey":{"type":"string"}, "MetadataValue":{"type":"string"}, + "Metrics":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"MetricsStatus", + "documentation":"

    Specifies whether the replication metrics are enabled.

    " + }, + "EventThreshold":{ + "shape":"ReplicationTimeValue", + "documentation":"

    A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold event.

    " + } + }, + "documentation":"

    A container specifying replication metrics-related settings enabling replication metrics and events.

    " + }, "MetricsAndOperator":{ "type":"structure", "members":{ @@ -5430,7 +6181,7 @@ "locationName":"Tag" } }, - "documentation":"

    " + "documentation":"

    A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.

    " }, "MetricsConfiguration":{ "type":"structure", @@ -5468,9 +6219,17 @@ "documentation":"

    A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.

    " } }, - "documentation":"

    " + "documentation":"

    Specifies a metrics configuration filter. The metrics configuration only includes objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).

    " }, "MetricsId":{"type":"string"}, + "MetricsStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "Minutes":{"type":"integer"}, "MissingMeta":{"type":"integer"}, "MultipartUpload":{ "type":"structure", @@ -5493,14 +6252,14 @@ }, "Owner":{ "shape":"Owner", - "documentation":"

    " + "documentation":"

    Specifies the owner of the object that is part of the multipart upload.

    " }, "Initiator":{ "shape":"Initiator", "documentation":"

    Identifies who initiated the multipart upload.

    " } }, - "documentation":"

    " + "documentation":"

    Container for the MultipartUpload for the Amazon S3 object.

    " }, "MultipartUploadId":{"type":"string"}, "MultipartUploadList":{ @@ -5540,7 +6299,7 @@ "members":{ "NoncurrentDays":{ "shape":"Days", - "documentation":"

    Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.

    " + "documentation":"

    Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.

    " } }, "documentation":"

    Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.

    " @@ -5550,7 +6309,7 @@ "members":{ "NoncurrentDays":{ "shape":"Days", - "documentation":"

    Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.

    " + "documentation":"

    Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent in the Amazon Simple Storage Service Developer Guide.

    " }, "StorageClass":{ "shape":"TransitionStorageClass", @@ -5590,15 +6349,15 @@ "members":{ "TopicConfiguration":{ "shape":"TopicConfigurationDeprecated", - "documentation":"

    " + "documentation":"

    This data type is deprecated. A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events.

    " }, "QueueConfiguration":{ "shape":"QueueConfigurationDeprecated", - "documentation":"

    " + "documentation":"

    This data type is deprecated. This data type specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events.

    " }, "CloudFunctionConfiguration":{ "shape":"CloudFunctionConfiguration", - "documentation":"

    " + "documentation":"

    Container for specifying the AWS Lambda notification configuration.

    " } } }, @@ -5607,7 +6366,6 @@ "members":{ "Key":{ "shape":"S3KeyFilter", - "documentation":"

    ", "locationName":"S3Key" } }, @@ -5622,19 +6380,19 @@ "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"

    " + "documentation":"

    The name that you assign to an object. You use the object key to retrieve the object.

    " }, "LastModified":{ "shape":"LastModified", - "documentation":"

    " + "documentation":"

    The date the Object was Last Modified

    " }, "ETag":{ "shape":"ETag", - "documentation":"

    " + "documentation":"

    The entity tag is a hash of the object. The ETag reflects changes only to the contents of an object, not its metadata. The ETag may or may not be an MD5 digest of the object data. Whether or not it is depends on how the object was created and how it is encrypted as described below:

    • Objects created by the PUT Object, POST Object, or Copy operation, or through the AWS Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that are an MD5 digest of their object data.

    • Objects created by the PUT Object, POST Object, or Copy operation, or through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object data.

    • If an object is created by either the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the method of encryption.

    " }, "Size":{ "shape":"Size", - "documentation":"

    " + "documentation":"

    Size in bytes of the object

    " }, "StorageClass":{ "shape":"ObjectStorageClass", @@ -5642,16 +6400,16 @@ }, "Owner":{ "shape":"Owner", - "documentation":"

    " + "documentation":"

    The owner of the object

    " } }, - "documentation":"

    " + "documentation":"

    An object consists of data and its descriptive metadata.

    " }, "ObjectAlreadyInActiveTierError":{ "type":"structure", "members":{ }, - "documentation":"

    This operation is not allowed against this storage tier

    ", + "documentation":"

    This operation is not allowed against this storage tier.

    ", "exception":true }, "ObjectCannedACL":{ @@ -5679,7 +6437,7 @@ "documentation":"

    VersionId for the specific version of the object to delete.

    " } }, - "documentation":"

    " + "documentation":"

    Object Identifier is unique value to identify objects.

    " }, "ObjectIdentifierList":{ "type":"list", @@ -5700,14 +6458,14 @@ "members":{ "ObjectLockEnabled":{ "shape":"ObjectLockEnabled", - "documentation":"

    Indicates whether this bucket has an object lock configuration enabled.

    " + "documentation":"

    Indicates whether this bucket has an Object Lock configuration enabled.

    " }, "Rule":{ "shape":"ObjectLockRule", - "documentation":"

    The object lock rule in place for the specified object.

    " + "documentation":"

    The Object Lock rule in place for the specified object.

    " } }, - "documentation":"

    The container element for object lock configuration parameters.

    " + "documentation":"

    The container element for Object Lock configuration parameters.

    " }, "ObjectLockEnabled":{ "type":"string", @@ -5751,7 +6509,7 @@ }, "RetainUntilDate":{ "shape":"Date", - "documentation":"

    The date on which this object lock retention expires.

    " + "documentation":"

    The date on which this Object Lock Retention will expire.

    " } }, "documentation":"

    A Retention configuration for an object.

    " @@ -5771,16 +6529,24 @@ "documentation":"

    The default retention period that you want to apply to new objects placed in the specified bucket.

    " } }, - "documentation":"

    The container element for an object lock rule.

    " + "documentation":"

    The container element for an Object Lock rule.

    " }, "ObjectLockToken":{"type":"string"}, "ObjectNotInActiveTierError":{ "type":"structure", "members":{ }, - "documentation":"

    The source object of the COPY operation is not in the active tier and is only stored in Amazon Glacier.

    ", + "documentation":"

    The source object of the COPY operation is not in the active tier and is only stored in Amazon S3 Glacier.

    ", "exception":true }, + "ObjectOwnership":{ + "type":"string", + "documentation":"

    The container element for object ownership for a bucket's ownership controls.

    BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the bucket owner if the objects are uploaded with the bucket-owner-full-control canned ACL.

    ObjectWriter - The uploading account will own the object if the object is uploaded with the bucket-owner-full-control canned ACL.

    ", + "enum":[ + "BucketOwnerPreferred", + "ObjectWriter" + ] + }, "ObjectStorageClass":{ "type":"string", "enum":[ @@ -5790,7 +6556,8 @@ "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", - "DEEP_ARCHIVE" + "DEEP_ARCHIVE", + "OUTPOSTS" ] }, "ObjectVersion":{ @@ -5798,7 +6565,7 @@ "members":{ "ETag":{ "shape":"ETag", - "documentation":"

    " + "documentation":"

    The entity tag is an MD5 hash of that version of the object.

    " }, "Size":{ "shape":"Size", @@ -5826,10 +6593,10 @@ }, "Owner":{ "shape":"Owner", - "documentation":"

    " + "documentation":"

    Specifies the owner of the object.

    " } }, - "documentation":"

    " + "documentation":"

    The version of an object.

    " }, "ObjectVersionId":{"type":"string"}, "ObjectVersionList":{ @@ -5870,24 +6637,49 @@ "members":{ "DisplayName":{ "shape":"DisplayName", - "documentation":"

    " + "documentation":"

    Container for the display name of the owner.

    " }, "ID":{ "shape":"ID", - "documentation":"

    " + "documentation":"

    Container for the ID of the owner.

    " } }, - "documentation":"

    " + "documentation":"

    Container for the owner's display name and ID.

    " }, "OwnerOverride":{ "type":"string", "enum":["Destination"] }, + "OwnershipControls":{ + "type":"structure", + "required":["Rules"], + "members":{ + "Rules":{ + "shape":"OwnershipControlsRules", + "documentation":"

    The container element for an ownership control rule.

    ", + "locationName":"Rule" + } + }, + "documentation":"

    The container element for a bucket's ownership controls.

    " + }, + "OwnershipControlsRule":{ + "type":"structure", + "required":["ObjectOwnership"], + "members":{ + "ObjectOwnership":{"shape":"ObjectOwnership"} + }, + "documentation":"

    The container element for an ownership control rule.

    " + }, + "OwnershipControlsRules":{ + "type":"list", + "member":{"shape":"OwnershipControlsRule"}, + "flattened":true + }, "ParquetInput":{ "type":"structure", "members":{ }, - "documentation":"

    " + "documentation":"

    Container for Parquet.

    " }, "Part":{ "type":"structure", @@ -5909,7 +6701,7 @@ "documentation":"

    Size in bytes of the uploaded part data.

    " } }, - "documentation":"

    " + "documentation":"

    Container for elements related to a part.

    " }, "PartNumber":{"type":"integer"}, "PartNumberMarker":{"type":"integer"}, @@ -5966,7 +6758,7 @@ "documentation":"

    The current number of bytes of records payload data returned.

    " } }, - "documentation":"

    " + "documentation":"

    This data type contains information about progress of an operation.

    " }, "ProgressEvent":{ "type":"structure", @@ -5977,7 +6769,7 @@ "eventpayload":true } }, - "documentation":"

    ", + "documentation":"

    This data type contains information about the progress event of an operation.

    ", "event":true }, "Protocol":{ @@ -5992,7 +6784,7 @@ "members":{ "BlockPublicAcls":{ "shape":"Setting", - "documentation":"

    Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE causes the following behavior:

    • PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.

    • PUT Object calls fail if the request includes a public ACL.

    Enabling this setting doesn't affect existing policies or ACLs.

    ", + "documentation":"

    Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE causes the following behavior:

    • PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.

    • PUT Object calls fail if the request includes a public ACL.

    • PUT Bucket calls fail if the request includes a public ACL.

    Enabling this setting doesn't affect existing policies or ACLs.

    ", "locationName":"BlockPublicAcls" }, "IgnorePublicAcls":{ @@ -6007,11 +6799,11 @@ }, "RestrictPublicBuckets":{ "shape":"Setting", - "documentation":"

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a public policy.

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    ", + "documentation":"

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    ", "locationName":"RestrictPublicBuckets" } }, - "documentation":"

    Specifies the Block Public Access configuration for an Amazon S3 bucket.

    " + "documentation":"

    The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

    " }, "PutBucketAccelerateConfigurationRequest":{ "type":"structure", @@ -6022,15 +6814,21 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket for which the accelerate configuration is set.

    ", + "documentation":"

    The name of the bucket for which the accelerate configuration is set.

    ", "location":"uri", "locationName":"Bucket" }, "AccelerateConfiguration":{ "shape":"AccelerateConfiguration", - "documentation":"

    Specifies the Accelerate Configuration you want to set for the bucket.

    ", + "documentation":"

    Container for setting the transfer acceleration state.

    ", "locationName":"AccelerateConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"AccelerateConfiguration" @@ -6053,13 +6851,13 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket to which to apply the ACL.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, @@ -6092,6 +6890,12 @@ "documentation":"

    Allows grantee to write the ACL for the applicable bucket.

    ", "location":"header", "locationName":"x-amz-grant-write-acp" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"AccessControlPolicy" @@ -6121,6 +6925,12 @@ "documentation":"

    The configuration and any analyses for the analytics filter.

    ", "locationName":"AnalyticsConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"AnalyticsConfiguration" @@ -6134,21 +6944,27 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    Specifies the bucket impacted by the corsconfiguration.

    ", "location":"uri", "locationName":"Bucket" }, "CORSConfiguration":{ "shape":"CORSConfiguration", - "documentation":"

    ", + "documentation":"

    Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

    ", "locationName":"CORSConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"CORSConfiguration" @@ -6162,25 +6978,59 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

    ", + "documentation":"

    Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. This parameter is auto-populated when using the command from the CLI.

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, "ServerSideEncryptionConfiguration":{ "shape":"ServerSideEncryptionConfiguration", - "documentation":"

    ", "locationName":"ServerSideEncryptionConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"ServerSideEncryptionConfiguration" }, + "PutBucketIntelligentTieringConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id", + "IntelligentTieringConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

    ", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"IntelligentTieringId", + "documentation":"

    The ID used to identify the S3 Intelligent-Tiering configuration.

    ", + "location":"querystring", + "locationName":"id" + }, + "IntelligentTieringConfiguration":{ + "shape":"IntelligentTieringConfiguration", + "documentation":"

    Container for S3 Intelligent-Tiering configuration.

    ", + "locationName":"IntelligentTieringConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"IntelligentTieringConfiguration" + }, "PutBucketInventoryConfigurationRequest":{ "type":"structure", "required":[ @@ -6206,6 +7056,12 @@ "documentation":"

    Specifies the inventory configuration.

    ", "locationName":"InventoryConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"InventoryConfiguration" @@ -6216,15 +7072,21 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket for which to set the configuration.

    ", "location":"uri", "locationName":"Bucket" }, "LifecycleConfiguration":{ "shape":"BucketLifecycleConfiguration", - "documentation":"

    ", + "documentation":"

    Container for lifecycle rules. You can add as many as 1,000 rules.

    ", "locationName":"LifecycleConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"LifecycleConfiguration" @@ -6241,7 +7103,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, @@ -6250,6 +7112,12 @@ "documentation":"

    ", "locationName":"LifecycleConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"LifecycleConfiguration" @@ -6263,21 +7131,27 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket for which to set the logging parameters.

    ", "location":"uri", "locationName":"Bucket" }, "BucketLoggingStatus":{ "shape":"BucketLoggingStatus", - "documentation":"

    ", + "documentation":"

    Container for logging status information.

    ", "locationName":"BucketLoggingStatus", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The MD5 hash of the PutBucketLogging request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"BucketLoggingStatus" @@ -6307,6 +7181,12 @@ "documentation":"

    Specifies the metrics configuration.

    ", "locationName":"MetricsConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"MetricsConfiguration" @@ -6320,15 +7200,20 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket.

    ", "location":"uri", "locationName":"Bucket" }, "NotificationConfiguration":{ "shape":"NotificationConfiguration", - "documentation":"

    ", "locationName":"NotificationConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"NotificationConfiguration" @@ -6342,25 +7227,65 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The MD5 hash of the PutPublicAccessBlock request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, "NotificationConfiguration":{ "shape":"NotificationConfigurationDeprecated", - "documentation":"

    ", + "documentation":"

    The container for the configuration.

    ", "locationName":"NotificationConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"NotificationConfiguration" }, + "PutBucketOwnershipControlsRequest":{ + "type":"structure", + "required":[ + "Bucket", + "OwnershipControls" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the Amazon S3 bucket whose OwnershipControls you want to set.

    ", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "documentation":"

    The MD5 hash of the OwnershipControls request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", + "location":"header", + "locationName":"Content-MD5" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + }, + "OwnershipControls":{ + "shape":"OwnershipControls", + "documentation":"

    The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want to apply to this Amazon S3 bucket.

    ", + "locationName":"OwnershipControls", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"OwnershipControls" + }, "PutBucketPolicyRequest":{ "type":"structure", "required":[ @@ -6370,13 +7295,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The MD5 hash of the request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, @@ -6389,6 +7314,12 @@ "Policy":{ "shape":"Policy", "documentation":"

    The bucket policy as a JSON document.

    " + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"Policy" @@ -6402,27 +7333,32 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The name of the bucket

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit.

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, "ReplicationConfiguration":{ "shape":"ReplicationConfiguration", - "documentation":"

    ", "locationName":"ReplicationConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "Token":{ "shape":"ObjectLockToken", - "documentation":"

    A token that allows Amazon S3 object lock to be enabled for an existing bucket.

    ", + "documentation":"

    A token to allow Object Lock to be enabled for an existing bucket.

    ", "location":"header", "locationName":"x-amz-bucket-object-lock-token" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"ReplicationConfiguration" @@ -6436,21 +7372,27 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    >The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, "RequestPaymentConfiguration":{ "shape":"RequestPaymentConfiguration", - "documentation":"

    ", + "documentation":"

    Container for Payer.

    ", "locationName":"RequestPaymentConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"RequestPaymentConfiguration" @@ -6464,21 +7406,27 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, "Tagging":{ "shape":"Tagging", - "documentation":"

    ", + "documentation":"

    Container for the TagSet and Tag elements.

    ", "locationName":"Tagging", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"Tagging" @@ -6492,13 +7440,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    >The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, @@ -6510,9 +7458,15 @@ }, "VersioningConfiguration":{ "shape":"VersioningConfiguration", - "documentation":"

    ", + "documentation":"

    Container for setting the versioning state.

    ", "locationName":"VersioningConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"VersioningConfiguration" @@ -6526,21 +7480,27 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, "WebsiteConfiguration":{ "shape":"WebsiteConfiguration", - "documentation":"

    ", + "documentation":"

    Container for the request.

    ", "locationName":"WebsiteConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"WebsiteConfiguration" @@ -6564,7 +7524,7 @@ "members":{ "ACL":{ "shape":"ObjectCannedACL", - "documentation":"

    The canned ACL to apply to the object.

    ", + "documentation":"

    The canned ACL to apply to the object. For more information, see Canned ACL.

    ", "location":"header", "locationName":"x-amz-acl" }, @@ -6576,31 +7536,31 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name that contains the object to which you want to attach the ACL.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.>

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"

    Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.

    ", + "documentation":"

    Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"

    Allows grantee to list the objects in the bucket.

    ", + "documentation":"

    Allows grantee to list the objects in the bucket.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"

    Allows grantee to read the bucket ACL.

    ", + "documentation":"

    Allows grantee to read the bucket ACL.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-read-acp" }, @@ -6612,13 +7572,13 @@ }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"

    Allows grantee to write the ACL for the applicable bucket.

    ", + "documentation":"

    Allows grantee to write the ACL for the applicable bucket.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-write-acp" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Key for which the PUT operation was initiated.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Key" }, @@ -6632,6 +7592,12 @@ "documentation":"

    VersionId used to reference a specific version of the object.

    ", "location":"querystring", "locationName":"versionId" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"AccessControlPolicy" @@ -6655,7 +7621,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The bucket containing the object that you want to place a Legal Hold on.

    ", + "documentation":"

    The bucket name containing the object that you want to place a Legal Hold on.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -6684,9 +7650,15 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    The MD5 hash for the request body.

    ", + "documentation":"

    The MD5 hash for the request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"LegalHold" @@ -6707,13 +7679,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The bucket whose object lock configuration you want to create or replace.

    ", + "documentation":"

    The bucket whose Object Lock configuration you want to create or replace.

    ", "location":"uri", "locationName":"Bucket" }, "ObjectLockConfiguration":{ "shape":"ObjectLockConfiguration", - "documentation":"

    The object lock configuration that you want to apply to the specified bucket.

    ", + "documentation":"

    The Object Lock configuration that you want to apply to the specified bucket.

    ", "locationName":"ObjectLockConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, @@ -6724,15 +7696,21 @@ }, "Token":{ "shape":"ObjectLockToken", - "documentation":"

    A token to allow Amazon S3 object lock to be enabled for an existing bucket.

    ", + "documentation":"

    A token to allow Object Lock to be enabled for an existing bucket.

    ", "location":"header", "locationName":"x-amz-bucket-object-lock-token" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    The MD5 hash for the request body.

    ", + "documentation":"

    The MD5 hash for the request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"ObjectLockConfiguration" @@ -6742,7 +7720,7 @@ "members":{ "Expiration":{ "shape":"Expiration", - "documentation":"

    If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.

    ", + "documentation":"

    If the expiration is configured for the object (see PutBucketLifecycleConfiguration), the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL encoded.

    ", "location":"header", "locationName":"x-amz-expiration" }, @@ -6754,7 +7732,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    If you specified server-side encryption either with an AWS KMS customer master key (CMK) or Amazon S3-managed encryption key in your PUT request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -6772,13 +7750,13 @@ }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

    ", + "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "documentation":"

    If x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -6788,6 +7766,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -6804,7 +7788,7 @@ "members":{ "ACL":{ "shape":"ObjectCannedACL", - "documentation":"

    The canned ACL to apply to the object.

    ", + "documentation":"

    The canned ACL to apply to the object. For more information, see Canned ACL.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-acl" }, @@ -6815,25 +7799,25 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to which the PUT operation was initiated.

    ", + "documentation":"

    The bucket name to which the PUT operation was initiated.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "CacheControl":{ "shape":"CacheControl", - "documentation":"

    Specifies caching behavior along the request/reply chain.

    ", + "documentation":"

    Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

    ", "location":"header", "locationName":"Cache-Control" }, "ContentDisposition":{ "shape":"ContentDisposition", - "documentation":"

    Specifies presentational information for the object.

    ", + "documentation":"

    Specifies presentational information for the object. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1.

    ", "location":"header", "locationName":"Content-Disposition" }, "ContentEncoding":{ "shape":"ContentEncoding", - "documentation":"

    Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

    ", + "documentation":"

    Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11.

    ", "location":"header", "locationName":"Content-Encoding" }, @@ -6845,49 +7829,49 @@ }, "ContentLength":{ "shape":"ContentLength", - "documentation":"

    Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.

    ", + "documentation":"

    Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13.

    ", "location":"header", "locationName":"Content-Length" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameted is required if object lock parameters are specified.

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    ", "location":"header", "locationName":"Content-MD5" }, "ContentType":{ "shape":"ContentType", - "documentation":"

    A standard MIME type describing the format of the object data.

    ", + "documentation":"

    A standard MIME type describing the format of the contents. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17.

    ", "location":"header", "locationName":"Content-Type" }, "Expires":{ "shape":"Expires", - "documentation":"

    The date and time at which the object is no longer cacheable.

    ", + "documentation":"

    The date and time at which the object is no longer cacheable. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21.

    ", "location":"header", "locationName":"Expires" }, "GrantFullControl":{ "shape":"GrantFullControl", - "documentation":"

    Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

    ", + "documentation":"

    Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-full-control" }, "GrantRead":{ "shape":"GrantRead", - "documentation":"

    Allows grantee to read the object data and its metadata.

    ", + "documentation":"

    Allows grantee to read the object data and its metadata.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-read" }, "GrantReadACP":{ "shape":"GrantReadACP", - "documentation":"

    Allows grantee to read the object ACL.

    ", + "documentation":"

    Allows grantee to read the object ACL.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-read-acp" }, "GrantWriteACP":{ "shape":"GrantWriteACP", - "documentation":"

    Allows grantee to write the ACL for the applicable object.

    ", + "documentation":"

    Allows grantee to write the ACL for the applicable object.

    This action is not supported by Amazon S3 on Outposts.

    ", "location":"header", "locationName":"x-amz-grant-write-acp" }, @@ -6905,43 +7889,43 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

    The type of storage to use for the object. Defaults to 'STANDARD'.

    ", + "documentation":"

    By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

    ", "location":"header", "locationName":"x-amz-storage-class" }, "WebsiteRedirectLocation":{ "shape":"WebsiteRedirectLocation", - "documentation":"

    If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

    ", + "documentation":"

    If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see Object Key and Metadata.

    In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket:

    x-amz-website-redirect-location: /anotherPage.html

    In the following example, the request header sets the object redirect to another website:

    x-amz-website-redirect-location: http://www.example.com/

    For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects.

    ", "location":"header", "locationName":"x-amz-website-redirect-location" }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use to when encrypting the object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use to when encrypting the object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.

    ", + "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

    ", + "documentation":"

    If x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (AWS KMS) symmetrical customer managed customer master key (CMK) that was used for the object.

    If the value of x-amz-server-side-encryption is aws:kms, this header specifies the ID of the symmetric customer managed AWS KMS CMK that will be used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS to protect the data.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -6951,6 +7935,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with a PUT operation doesn’t affect bucket-level settings for S3 Bucket Key.

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestPayer":{ "shape":"RequestPayer", "location":"header", @@ -6964,21 +7954,27 @@ }, "ObjectLockMode":{ "shape":"ObjectLockMode", - "documentation":"

    The object lock mode that you want to apply to this object.

    ", + "documentation":"

    The Object Lock mode that you want to apply to this object.

    ", "location":"header", "locationName":"x-amz-object-lock-mode" }, "ObjectLockRetainUntilDate":{ "shape":"ObjectLockRetainUntilDate", - "documentation":"

    The date and time when you want this object's object lock to expire.

    ", + "documentation":"

    The date and time when you want this object's Object Lock to expire.

    ", "location":"header", "locationName":"x-amz-object-lock-retain-until-date" }, "ObjectLockLegalHoldStatus":{ "shape":"ObjectLockLegalHoldStatus", - "documentation":"

    The Legal Hold status that you want to apply to the specified object.

    ", + "documentation":"

    Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock.

    ", "location":"header", "locationName":"x-amz-object-lock-legal-hold" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"Body" @@ -7002,7 +7998,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    The bucket that contains the object you want to apply this Object Retention configuration to.

    ", + "documentation":"

    The bucket name that contains the object you want to apply this Object Retention configuration to.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -7031,15 +8027,21 @@ }, "BypassGovernanceRetention":{ "shape":"BypassGovernanceRetention", - "documentation":"

    Indicates whether this operation should bypass Governance-mode restrictions.j

    ", + "documentation":"

    Indicates whether this operation should bypass Governance-mode restrictions.

    ", "location":"header", "locationName":"x-amz-bypass-governance-retention" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    The MD5 hash for the request body.

    ", + "documentation":"

    The MD5 hash for the request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"Retention" @@ -7049,7 +8051,7 @@ "members":{ "VersionId":{ "shape":"ObjectVersionId", - "documentation":"

    ", + "documentation":"

    The versionId of the object the tag-set was added to.

    ", "location":"header", "locationName":"x-amz-version-id" } @@ -7065,33 +8067,39 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name containing the object.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Name of the object key.

    ", "location":"uri", "locationName":"Key" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"

    ", + "documentation":"

    The versionId of the object that the tag-set will be added to.

    ", "location":"querystring", "locationName":"versionId" }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    ", + "documentation":"

    The MD5 hash for the request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, "Tagging":{ "shape":"Tagging", - "documentation":"

    ", + "documentation":"

    Container for the TagSet and Tag elements

    ", "locationName":"Tagging", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"Tagging" @@ -7111,7 +8119,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    The MD5 hash of the PutPublicAccessBlock request body.

    ", + "documentation":"

    The MD5 hash of the PutPublicAccessBlock request body.

    For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

    ", "location":"header", "locationName":"Content-MD5" }, @@ -7120,6 +8128,12 @@ "documentation":"

    The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

    ", "locationName":"PublicAccessBlockConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"PublicAccessBlockConfiguration" @@ -7140,13 +8154,10 @@ }, "Events":{ "shape":"EventList", - "documentation":"

    ", + "documentation":"

    A collection of bucket events for which to send notifications

    ", "locationName":"Event" }, - "Filter":{ - "shape":"NotificationConfigurationFilter", - "documentation":"

    " - } + "Filter":{"shape":"NotificationConfigurationFilter"} }, "documentation":"

    Specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events.

    " }, @@ -7160,15 +8171,15 @@ }, "Events":{ "shape":"EventList", - "documentation":"

    ", + "documentation":"

    A collection of bucket events for which to send notifications

    ", "locationName":"Event" }, "Queue":{ "shape":"QueueArn", - "documentation":"

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 publishes a message when it detects events of the specified type.

    " } }, - "documentation":"

    " + "documentation":"

    This data type is deprecated. Use QueueConfiguration for the same purposes. This data type specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events.

    " }, "QueueConfigurationList":{ "type":"list", @@ -7196,7 +8207,7 @@ "eventpayload":true } }, - "documentation":"

    ", + "documentation":"

    The container for the records event.

    ", "event":true }, "Redirect":{ @@ -7243,6 +8254,24 @@ "ReplaceKeyPrefixWith":{"type":"string"}, "ReplaceKeyWith":{"type":"string"}, "ReplicaKmsKeyID":{"type":"string"}, + "ReplicaModifications":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"ReplicaModificationsStatus", + "documentation":"

    Specifies whether Amazon S3 replicates modifications on replicas.

    " + } + }, + "documentation":"

    A filter that you can specify for selection for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when Filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas.

    If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, this element is not allowed.

    " + }, + "ReplicaModificationsStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "ReplicationConfiguration":{ "type":"structure", "required":[ @@ -7252,7 +8281,7 @@ "members":{ "Role":{ "shape":"Role", - "documentation":"

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that Amazon S3 assumes when replicating objects. For more information, see How to Set Up Cross-Region Replication in the Amazon Simple Storage Service Developer Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that Amazon S3 assumes when replicating objects. For more information, see How to Set Up Replication in the Amazon Simple Storage Service Developer Guide.

    " }, "Rules":{ "shape":"ReplicationRules", @@ -7275,33 +8304,31 @@ }, "Priority":{ "shape":"Priority", - "documentation":"

    The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:

    • Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap

    • Same object qualify tag based filter criteria specified in multiple rules

    For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

    " + "documentation":"

    The priority indicates which rule has precedence whenever two or more replication rules conflict. Amazon S3 will attempt to replicate objects according to all replication rules. However, if there are two or more rules with the same destination bucket, then objects will be replicated according to the rule with the highest priority. The higher the number, the higher the priority.

    For more information, see Replication in the Amazon Simple Storage Service Developer Guide.

    " }, "Prefix":{ "shape":"Prefix", - "documentation":"

    An object keyname prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters. To include all objects in a bucket, specify an empty string.

    ", + "documentation":"

    An object key name prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters. To include all objects in a bucket, specify an empty string.

    ", "deprecated":true }, - "Filter":{ - "shape":"ReplicationRuleFilter", - "documentation":"

    " - }, + "Filter":{"shape":"ReplicationRuleFilter"}, "Status":{ "shape":"ReplicationRuleStatus", "documentation":"

    Specifies whether the rule is enabled.

    " }, "SourceSelectionCriteria":{ "shape":"SourceSelectionCriteria", - "documentation":"

    A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-Managed Key (SSE-KMS).

    " + "documentation":"

    A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).

    " + }, + "ExistingObjectReplication":{ + "shape":"ExistingObjectReplication", + "documentation":"

    " }, "Destination":{ "shape":"Destination", - "documentation":"

    A container for information about the replication destination.

    " + "documentation":"

    A container for information about the replication destination and its configurations including enabling the S3 Replication Time Control (S3 RTC).

    " }, - "DeleteMarkerReplication":{ - "shape":"DeleteMarkerReplication", - "documentation":"

    " - } + "DeleteMarkerReplication":{"shape":"DeleteMarkerReplication"} }, "documentation":"

    Specifies which Amazon S3 objects to replicate and where to store the replicas.

    " }, @@ -7310,23 +8337,23 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"

    " + "documentation":"

    An object key name prefix that identifies the subset of objects to which the rule applies.

    " }, "Tags":{ "shape":"TagSet", - "documentation":"

    ", + "documentation":"

    An array of tags containing key and value pairs.

    ", "flattened":true, "locationName":"Tag" } }, - "documentation":"

    " + "documentation":"

    A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter.

    For example:

    • If you specify both a Prefix and a Tag filter, wrap these filters in an And tag.

    • If you specify a filter based on multiple tags, wrap the Tag elements in an And tag

    " }, "ReplicationRuleFilter":{ "type":"structure", "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"

    An object keyname prefix that identifies the subset of objects to which the rule applies.

    " + "documentation":"

    An object key name prefix that identifies the subset of objects to which the rule applies.

    " }, "Tag":{ "shape":"Tag", @@ -7360,6 +8387,41 @@ "REPLICA" ] }, + "ReplicationTime":{ + "type":"structure", + "required":[ + "Status", + "Time" + ], + "members":{ + "Status":{ + "shape":"ReplicationTimeStatus", + "documentation":"

    Specifies whether the replication time is enabled.

    " + }, + "Time":{ + "shape":"ReplicationTimeValue", + "documentation":"

    A container specifying the time by which replication should be complete for all objects and operations on objects.

    " + } + }, + "documentation":"

    A container specifying S3 Replication Time Control (S3 RTC) related information, including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated. Must be specified together with a Metrics block.

    " + }, + "ReplicationTimeStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "ReplicationTimeValue":{ + "type":"structure", + "members":{ + "Minutes":{ + "shape":"Minutes", + "documentation":"

    Contains an integer specifying time in minutes.

    Valid values: 15 minutes.

    " + } + }, + "documentation":"

    A container specifying the time value for S3 Replication Time Control (S3 RTC) and replication metrics EventThreshold.

    " + }, "RequestCharged":{ "type":"string", "documentation":"

    If present, indicates that the requester was successfully charged for the request.

    ", @@ -7367,7 +8429,7 @@ }, "RequestPayer":{ "type":"string", - "documentation":"

    Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html

    ", + "documentation":"

    Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from requester pays buckets, see Downloading Objects in Requestor Pays Buckets in the Amazon S3 Developer Guide.

    ", "enum":["requester"] }, "RequestPaymentConfiguration":{ @@ -7379,7 +8441,7 @@ "documentation":"

    Specifies who pays for the download and request fees.

    " } }, - "documentation":"

    " + "documentation":"

    Container for Payer.

    " }, "RequestProgress":{ "type":"structure", @@ -7389,14 +8451,17 @@ "documentation":"

    Specifies whether periodic QueryProgress frames should be sent. Valid values: TRUE, FALSE. Default value: FALSE.

    " } }, - "documentation":"

    " + "documentation":"

    Container for specifying if periodic QueryProgress messages should be sent.

    " }, "ResponseCacheControl":{"type":"string"}, "ResponseContentDisposition":{"type":"string"}, "ResponseContentEncoding":{"type":"string"}, "ResponseContentLanguage":{"type":"string"}, "ResponseContentType":{"type":"string"}, - "ResponseExpires":{"type":"timestamp"}, + "ResponseExpires":{ + "type":"timestamp", + "timestampFormat":"rfc822" + }, "Restore":{"type":"string"}, "RestoreObjectOutput":{ "type":"structure", @@ -7423,25 +8488,24 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name containing the object to restore.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Object key for which the operation was initiated.

    ", "location":"uri", "locationName":"Key" }, "VersionId":{ "shape":"ObjectVersionId", - "documentation":"

    ", + "documentation":"

    VersionId used to reference a specific version of the object.

    ", "location":"querystring", "locationName":"versionId" }, "RestoreRequest":{ "shape":"RestoreRequest", - "documentation":"

    ", "locationName":"RestoreRequest", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, @@ -7449,6 +8513,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"RestoreRequest" @@ -7459,11 +8529,11 @@ "members":{ "Days":{ "shape":"Days", - "documentation":"

    Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.

    " + "documentation":"

    Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.

    The Days element is required for regular restores, and must not be provided for select requests.

    " }, "GlacierJobParameters":{ "shape":"GlacierJobParameters", - "documentation":"

    Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.

    " + "documentation":"

    S3 Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.

    " }, "Type":{ "shape":"RestoreRequestType", @@ -7471,7 +8541,7 @@ }, "Tier":{ "shape":"Tier", - "documentation":"

    Glacier retrieval tier at which the restore will be processed.

    " + "documentation":"

    Retrieval tier at which the restore will be processed.

    " }, "Description":{ "shape":"Description", @@ -7506,7 +8576,7 @@ "documentation":"

    Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can specify a different error code to return.

    " } }, - "documentation":"

    Specifies the redirect behavior and when a redirect is applied.

    " + "documentation":"

    Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon Simple Storage Service Developer Guide.

    " }, "RoutingRules":{ "type":"list", @@ -7524,7 +8594,7 @@ "members":{ "Expiration":{ "shape":"LifecycleExpiration", - "documentation":"

    " + "documentation":"

    Specifies the expiration for the lifecycle of the object.

    " }, "ID":{ "shape":"ID", @@ -7540,22 +8610,13 @@ }, "Transition":{ "shape":"Transition", - "documentation":"

    " - }, - "NoncurrentVersionTransition":{ - "shape":"NoncurrentVersionTransition", - "documentation":"

    " - }, - "NoncurrentVersionExpiration":{ - "shape":"NoncurrentVersionExpiration", - "documentation":"

    " + "documentation":"

    Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

    " }, - "AbortIncompleteMultipartUpload":{ - "shape":"AbortIncompleteMultipartUpload", - "documentation":"

    " - } + "NoncurrentVersionTransition":{"shape":"NoncurrentVersionTransition"}, + "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"}, + "AbortIncompleteMultipartUpload":{"shape":"AbortIncompleteMultipartUpload"} }, - "documentation":"

    Specifies lifecycle rules for an Amazon S3 bucket. For more information, see PUT Bucket lifecycle in the Amazon Simple Storage Service API Reference.

    " + "documentation":"

    Specifies lifecycle rules for an Amazon S3 bucket. For more information, see Put Bucket Lifecycle Configuration in the Amazon Simple Storage Service API Reference. For examples, see Put Bucket Lifecycle Configuration Examples

    " }, "Rules":{ "type":"list", @@ -7567,7 +8628,6 @@ "members":{ "FilterRules":{ "shape":"FilterRuleList", - "documentation":"

    ", "locationName":"FilterRule" } }, @@ -7588,10 +8648,7 @@ "shape":"LocationPrefix", "documentation":"

    The prefix that is prepended to the restore results for this request.

    " }, - "Encryption":{ - "shape":"Encryption", - "documentation":"

    " - }, + "Encryption":{"shape":"Encryption"}, "CannedACL":{ "shape":"ObjectCannedACL", "documentation":"

    The canned ACL to apply to the restore results.

    " @@ -7613,7 +8670,7 @@ "documentation":"

    The class of storage used to store the restore results.

    " } }, - "documentation":"

    Describes an S3 location that will receive the results of the restore request.

    " + "documentation":"

    Describes an Amazon S3 location that will receive the results of the restore request.

    " }, "SSECustomerAlgorithm":{"type":"string"}, "SSECustomerKey":{ @@ -7627,10 +8684,10 @@ "members":{ "KeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    Specifies the ID of the AWS Key Management Service (KMS) master encryption key to use for encrypting Inventory reports.

    " + "documentation":"

    Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) to use for encrypting inventory reports.

    " } }, - "documentation":"

    Specifies the use of SSE-KMS to encrypt delivered Inventory reports.

    ", + "documentation":"

    Specifies the use of SSE-KMS to encrypt delivered inventory reports.

    ", "locationName":"SSE-KMS" }, "SSEKMSEncryptionContext":{ @@ -7645,9 +8702,23 @@ "type":"structure", "members":{ }, - "documentation":"

    Specifies the use of SSE-S3 to encrypt delivered Inventory reports.

    ", + "documentation":"

    Specifies the use of SSE-S3 to encrypt delivered inventory reports.

    ", "locationName":"SSE-S3" }, + "ScanRange":{ + "type":"structure", + "members":{ + "Start":{ + "shape":"Start", + "documentation":"

    Specifies the start of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is 0. If only start is supplied, it means scan from that point to the end of the file.For example; <scanrange><start>50</start></scanrange> means scan from byte 50 until the end of the file.

    " + }, + "End":{ + "shape":"End", + "documentation":"

    Specifies the end of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is one less than the size of the object being queried. If only the End parameter is supplied, it is interpreted to mean scan the last N bytes of the file. For example, <scanrange><end>50</end></scanrange> means scan the last 50 bytes.

    " + } + }, + "documentation":"

    Specifies the byte range of the object to get the records from. A record is processed when its first byte is contained by the range. This parameter is optional, but when specified, it must not be empty. See RFC 2616, Section 14.35.1 about how to specify the start and end of the range.

    " + }, "SelectObjectContentEventStream":{ "type":"structure", "members":{ @@ -7672,7 +8743,7 @@ "documentation":"

    The End Event.

    " } }, - "documentation":"

    ", + "documentation":"

    The container for selecting objects from a content event stream.

    ", "eventstream":true }, "SelectObjectContentOutput":{ @@ -7680,7 +8751,7 @@ "members":{ "Payload":{ "shape":"SelectObjectContentEventStream", - "documentation":"

    " + "documentation":"

    The array of results.

    " } }, "payload":"Payload" @@ -7710,19 +8781,19 @@ }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

    The SSE Algorithm used to encrypt the object. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.

    ", + "documentation":"

    The SSE Algorithm used to encrypt the object. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

    The SSE Customer Key. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.

    ", + "documentation":"

    The SSE Customer Key. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    The SSE Customer Key MD5. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.

    ", + "documentation":"

    The SSE Customer Key MD5. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, @@ -7732,7 +8803,7 @@ }, "ExpressionType":{ "shape":"ExpressionType", - "documentation":"

    The type of the provided expression (for example., SQL).

    " + "documentation":"

    The type of the provided expression (for example, SQL).

    " }, "RequestProgress":{ "shape":"RequestProgress", @@ -7745,6 +8816,16 @@ "OutputSerialization":{ "shape":"OutputSerialization", "documentation":"

    Describes the format of the data that you want Amazon S3 to return in response.

    " + }, + "ScanRange":{ + "shape":"ScanRange", + "documentation":"

    Specifies the byte range of the object to get the records from. A record is processed when its first byte is contained by the range. This parameter is optional, but when specified, it must not be empty. See RFC 2616, Section 14.35.1 about how to specify the start and end of the range.

    ScanRangemay be used in the following ways:

    • <scanrange><start>50</start><end>100</end></scanrange> - process only the records starting between the bytes 50 and 100 (inclusive, counting from zero)

    • <scanrange><start>50</start></scanrange> - process only the records starting after the byte 50

    • <scanrange><end>50</end></scanrange> - process only the records within the last 50 bytes of the file.

    " + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "documentation":"

    Request to filter the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records. It returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. For more information, see S3Select API Documentation.

    " @@ -7764,7 +8845,7 @@ }, "ExpressionType":{ "shape":"ExpressionType", - "documentation":"

    The type of the provided expression (e.g., SQL).

    " + "documentation":"

    The type of the provided expression (for example, SQL).

    " }, "Expression":{ "shape":"Expression", @@ -7794,7 +8875,7 @@ }, "KMSMasterKeyID":{ "shape":"SSEKMSKeyId", - "documentation":"

    KMS master key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm is set to aws:kms.

    " + "documentation":"

    AWS Key Management Service (KMS) customer master key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm is set to aws:kms.

    You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are using encryption with cross-account operations, you must use a fully qualified CMK ARN. For more information, see Using encryption for cross-account operations.

    For example:

    • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

    • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

    Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    " } }, "documentation":"

    Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. For more information, see PUT Bucket encryption in the Amazon Simple Storage Service API Reference.

    " @@ -7817,6 +8898,10 @@ "ApplyServerSideEncryptionByDefault":{ "shape":"ServerSideEncryptionByDefault", "documentation":"

    Specifies the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied.

    " + }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.

    For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

    " } }, "documentation":"

    Specifies the default server-side encryption configuration.

    " @@ -7834,9 +8919,13 @@ "SseKmsEncryptedObjects":{ "shape":"SseKmsEncryptedObjects", "documentation":"

    A container for filter information for the selection of Amazon S3 objects encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, this element is required.

    " + }, + "ReplicaModifications":{ + "shape":"ReplicaModifications", + "documentation":"

    A filter that you can specify for selections for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when Filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas.

    If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, this element is not allowed

    " } }, - "documentation":"

    A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-Managed Key (SSE-KMS).

    " + "documentation":"

    A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).

    " }, "SseKmsEncryptedObjects":{ "type":"structure", @@ -7844,7 +8933,7 @@ "members":{ "Status":{ "shape":"SseKmsEncryptedObjectsStatus", - "documentation":"

    Specifies whether Amazon S3 replicates objects created with server-side encryption using an AWS KMS-managed key.

    " + "documentation":"

    Specifies whether Amazon S3 replicates objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service.

    " } }, "documentation":"

    A container for filter information for the selection of S3 objects encrypted with AWS KMS.

    " @@ -7856,6 +8945,7 @@ "Disabled" ] }, + "Start":{"type":"long"}, "StartAfter":{"type":"string"}, "Stats":{ "type":"structure", @@ -7873,7 +8963,7 @@ "documentation":"

    The total number of bytes of records payload data returned.

    " } }, - "documentation":"

    " + "documentation":"

    Container for the stats details.

    " }, "StatsEvent":{ "type":"structure", @@ -7884,7 +8974,7 @@ "eventpayload":true } }, - "documentation":"

    ", + "documentation":"

    Container for the Stats Event.

    ", "event":true }, "StorageClass":{ @@ -7896,7 +8986,8 @@ "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", - "DEEP_ARCHIVE" + "DEEP_ARCHIVE", + "OUTPOSTS" ] }, "StorageClassAnalysis":{ @@ -7925,7 +9016,7 @@ "documentation":"

    The place to store the data for an analysis.

    " } }, - "documentation":"

    " + "documentation":"

    Container for data related to the storage class analysis for an Amazon S3 bucket for export.

    " }, "StorageClassAnalysisSchemaVersion":{ "type":"string", @@ -7941,14 +9032,14 @@ "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"

    Name of the tag.

    " + "documentation":"

    Name of the object key.

    " }, "Value":{ "shape":"Value", "documentation":"

    Value of the tag.

    " } }, - "documentation":"

    " + "documentation":"

    A container of a key value name pair.

    " }, "TagCount":{"type":"integer"}, "TagSet":{ @@ -7964,10 +9055,10 @@ "members":{ "TagSet":{ "shape":"TagSet", - "documentation":"

    " + "documentation":"

    A collection for a set of tags

    " } }, - "documentation":"

    " + "documentation":"

    Container for TagSet elements.

    " }, "TaggingDirective":{ "type":"string", @@ -7983,14 +9074,14 @@ "members":{ "Grantee":{ "shape":"Grantee", - "documentation":"

    " + "documentation":"

    Container for the person being granted permissions.

    " }, "Permission":{ "shape":"BucketLogsPermission", - "documentation":"

    Logging permissions assigned to the Grantee for the bucket.

    " + "documentation":"

    Logging permissions assigned to the grantee for the bucket.

    " } }, - "documentation":"

    " + "documentation":"

    Container for granting information.

    " }, "TargetGrants":{ "type":"list", @@ -8008,6 +9099,29 @@ "Expedited" ] }, + "Tiering":{ + "type":"structure", + "required":[ + "Days", + "AccessTier" + ], + "members":{ + "Days":{ + "shape":"IntelligentTieringDays", + "documentation":"

    The number of consecutive days of no access after which an object will be eligible to be transitioned to the corresponding tier. The minimum number of days specified for Archive Access tier must be at least 90 days and Deep Archive Access tier must be at least 180 days. The maximum can be up to 2 years (730 days).

    " + }, + "AccessTier":{ + "shape":"IntelligentTieringAccessTier", + "documentation":"

    S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing frequently and infrequently accessed objects for a list of access tiers in the S3 Intelligent-Tiering storage class.

    " + } + }, + "documentation":"

    The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead.

    " + }, + "TieringList":{ + "type":"list", + "member":{"shape":"Tiering"}, + "flattened":true + }, "Token":{"type":"string"}, "TopicArn":{"type":"string"}, "TopicConfiguration":{ @@ -8028,10 +9142,7 @@ "documentation":"

    The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.

    ", "locationName":"Event" }, - "Filter":{ - "shape":"NotificationConfigurationFilter", - "documentation":"

    " - } + "Filter":{"shape":"NotificationConfigurationFilter"} }, "documentation":"

    A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events.

    " }, @@ -8041,7 +9152,7 @@ "Id":{"shape":"NotificationId"}, "Events":{ "shape":"EventList", - "documentation":"

    ", + "documentation":"

    A collection of events related to objects

    ", "locationName":"Event" }, "Event":{ @@ -8054,7 +9165,7 @@ "documentation":"

    Amazon SNS topic to which Amazon S3 will publish a message to report the specified events for the bucket.

    " } }, - "documentation":"

    " + "documentation":"

    A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events. This data type is deprecated. Use TopicConfiguration instead.

    " }, "TopicConfigurationList":{ "type":"list", @@ -8077,7 +9188,7 @@ "documentation":"

    The storage class to which you want the object to transition.

    " } }, - "documentation":"

    Specifies when an object transitions to a specified storage class.

    " + "documentation":"

    Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

    " }, "TransitionList":{ "type":"list", @@ -8115,11 +9226,11 @@ }, "CopyPartResult":{ "shape":"CopyPartResult", - "documentation":"

    " + "documentation":"

    Container for all response elements.

    " }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -8131,16 +9242,22 @@ }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

    ", + "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -8161,13 +9278,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

    ", + "documentation":"

    The bucket name.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, "CopySource":{ "shape":"CopySource", - "documentation":"

    The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.

    ", + "documentation":"

    Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:

    • For objects not accessed through an access point, specify the name of the source bucket and key of the source object, separated by a slash (/). For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value must be URL encoded.

    • For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. The value must be URL encoded.

      Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same AWS Region.

      Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. The value must be URL encoded.

    To copy a specific version of an object, append ?versionId=<version-id> to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.

    ", "location":"header", "locationName":"x-amz-copy-source" }, @@ -8197,13 +9314,13 @@ }, "CopySourceRange":{ "shape":"CopySourceRange", - "documentation":"

    The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source. You can copy a range only if the source object is greater than 5 MB.

    ", + "documentation":"

    The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first 10 bytes of the source. You can copy a range only if the source object is greater than 5 MB.

    ", "location":"header", "locationName":"x-amz-copy-source-range" }, "Key":{ "shape":"ObjectKey", - "documentation":"

    ", + "documentation":"

    Object key for which the multipart upload was initiated.

    ", "location":"uri", "locationName":"Key" }, @@ -8221,25 +9338,25 @@ }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use to when encrypting the object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use to when encrypting the object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.

    ", + "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "CopySourceSSECustomerAlgorithm":{ "shape":"CopySourceSSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use when decrypting the source object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use when decrypting the source object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-algorithm" }, @@ -8251,7 +9368,7 @@ }, "CopySourceSSECustomerKeyMD5":{ "shape":"CopySourceSSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-copy-source-server-side-encryption-customer-key-MD5" }, @@ -8259,6 +9376,18 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + }, + "ExpectedSourceBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-source-expected-bucket-owner" } } }, @@ -8267,7 +9396,7 @@ "members":{ "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -8285,16 +9414,22 @@ }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.

    ", + "documentation":"

    If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

    ", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -8318,7 +9453,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

    Name of the bucket to which the multipart upload was initiated.

    ", + "documentation":"

    The name of the bucket to which the multipart upload was initiated.

    When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

    When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    ", "location":"uri", "locationName":"Bucket" }, @@ -8330,7 +9465,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

    The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameted is required if object lock parameters are specified.

    ", + "documentation":"

    The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object lock parameters are specified.

    ", "location":"header", "locationName":"Content-MD5" }, @@ -8354,19 +9489,19 @@ }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

    Specifies the algorithm to use to when encrypting the object (e.g., AES256).

    ", + "documentation":"

    Specifies the algorithm to use to when encrypting the object (for example, AES256).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.

    ", + "documentation":"

    Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, "SSECustomerKeyMD5":{ "shape":"SSECustomerKeyMD5", - "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.

    ", + "documentation":"

    Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key-MD5" }, @@ -8374,6 +9509,12 @@ "shape":"RequestPayer", "location":"header", "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

    The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

    ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } }, "payload":"Body" diff --git a/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors b/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors index 535869feb1e0..d9d447cb955d 100644 --- a/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors +++ b/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors @@ -5,9 +5,9 @@ software.amazon.awssdk.services.s3.internal.handlers.CreateMultipartUploadReques software.amazon.awssdk.services.s3.internal.handlers.EnableChunkedEncodingInterceptor software.amazon.awssdk.services.s3.internal.handlers.DisableDoubleUrlEncodingInterceptor software.amazon.awssdk.services.s3.internal.handlers.DecodeUrlEncodedResponseInterceptor -software.amazon.awssdk.services.s3.internal.handlers.AddContentMd5HeaderInterceptor software.amazon.awssdk.services.s3.internal.handlers.GetBucketPolicyInterceptor software.amazon.awssdk.services.s3.internal.handlers.AsyncChecksumValidationInterceptor software.amazon.awssdk.services.s3.internal.handlers.SyncChecksumValidationInterceptor software.amazon.awssdk.services.s3.internal.handlers.EnableTrailingChecksumInterceptor software.amazon.awssdk.services.s3.internal.handlers.ExceptionTranslationInterceptor +software.amazon.awssdk.services.s3.internal.handlers.GetObjectInterceptor \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/AclTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/AclTest.java new file mode 100644 index 000000000000..bc558e57fae7 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/AclTest.java @@ -0,0 +1,105 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.put; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import com.github.tomakehurst.wiremock.matching.ContainsPattern; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.model.GetBucketAclResponse; +import software.amazon.awssdk.services.s3.model.Grant; +import software.amazon.awssdk.services.s3.model.Permission; +import software.amazon.awssdk.services.s3.model.PutBucketAclRequest; +import software.amazon.awssdk.services.s3.model.Type; + +public class AclTest { + private static final String OWNER_ID = "123456"; + private static final String OWNER_DISPLAY_NAME = "foobar"; + private static final String READ_ONLY_USER_ID = "7891011"; + private static final String READ_ONLY_USER_DISPLAY_NAME = "helloworld"; + private static final String MOCK_ACL_RESPONSE = String.format("%s%sFULL_CONTROL%s%sREAD%s%s", + OWNER_DISPLAY_NAME,OWNER_ID, READ_ONLY_USER_DISPLAY_NAME, READ_ONLY_USER_ID, OWNER_DISPLAY_NAME, OWNER_ID); + + @Rule + public WireMockRule mockServer = new WireMockRule(0); + + private S3Client s3Client; + + @Before + public void setup() { + URI endpoint = URI.create("http://localhost:" + mockServer.port()); + s3Client = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_WEST_2) + .endpointOverride(endpoint) + .build(); + } + + @Test + public void putBucketAcl_marshalling() { + stubFor(put(anyUrl()) + .willReturn(aResponse().withStatus(200))); + + s3Client.putBucketAcl(request()); + verify(anyRequestedFor(anyUrl()).withRequestBody(new ContainsPattern(MOCK_ACL_RESPONSE))); + } + + @Test + public void getBucketAcl_shouldUnmarshallCorrectly() { + stubFor(get(anyUrl()) + .willReturn(aResponse().withBody(MOCK_ACL_RESPONSE).withStatus(200))); + + GetBucketAclResponse bucketAcl = s3Client.getBucketAcl(b -> b.bucket("test")); + assertThat(bucketAcl.owner()).isEqualTo(request().accessControlPolicy().owner()); + assertThat(bucketAcl.grants()).isEqualTo(request().accessControlPolicy().grants()); + } + + private PutBucketAclRequest request() { + + List grants = new ArrayList<>(); + grants.add(Grant.builder() + .grantee(g -> g.type(Type.CANONICAL_USER).id(OWNER_ID).displayName(OWNER_DISPLAY_NAME)) + .permission(Permission.FULL_CONTROL) + .build()); + grants.add(Grant.builder() + .grantee(g -> g.type(Type.CANONICAL_USER).id(READ_ONLY_USER_ID).displayName(READ_ONLY_USER_DISPLAY_NAME)) + .permission(Permission.READ) + .build()); + return PutBucketAclRequest.builder() + .bucket("bucket") + .accessControlPolicy(b -> b.grants(grants) + .owner(o -> o.id(OWNER_ID).displayName(OWNER_DISPLAY_NAME)) + .build()).build(); + + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/EndpointOverrideTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/EndpointOverrideTest.java index a552c0e32e56..0d58559d4e5d 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/EndpointOverrideTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/EndpointOverrideTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/InvalidRegionTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/InvalidRegionTest.java new file mode 100644 index 000000000000..82cc321f2bd6 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/InvalidRegionTest.java @@ -0,0 +1,89 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.time.Duration; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; + +public class InvalidRegionTest { + @Test + public void invalidS3UtilitiesRegionAtClientGivesHelpfulMessage() { + S3Utilities utilities = S3Utilities.builder().region(Region.of("US_EAST_1")).build(); + + assertThatThrownBy(() -> utilities.getUrl(r -> r.bucket("foo").key("bar"))) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region") + .hasMessageContaining("us-east-1"); + } + + @Test + public void invalidS3UtilitiesRegionAtRequestGivesHelpfulMessage() { + S3Utilities utilities = S3Utilities.builder().region(Region.of("us-east-1")).build(); + + assertThatThrownBy(() -> utilities.getUrl(r -> r.bucket("foo").key("bar").region(Region.of("US_WEST_2")))) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_WEST_2") + .hasMessageContaining("region") + .hasMessageContaining("us-west-2"); + } + + @Test + public void invalidS3ArnRegionAtRequestGivesHelpfulMessage() { + S3Client client = S3Client.builder() + .region(Region.of("us-east-1")) + .credentialsProvider(AnonymousCredentialsProvider.create()) + .serviceConfiguration(c -> c.useArnRegionEnabled(true)) + .build(); + + assertThatThrownBy(() -> client.getObject(r -> r.bucket("arn:aws:s3:US_EAST_1:123456789012:accesspoint/test") + .key("test"))) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region"); + } + + @Test + public void invalidS3PresignerRegionAtClientGivesHelpfulMessage() { + assertThatThrownBy(() -> S3Presigner.builder().region(Region.of("US_EAST_1")).build()) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region") + .hasMessageContaining("us-east-1"); + } + + @Test + public void invalidS3PresignerArnRegionAtRequestGivesHelpfulMessage() { + S3Presigner presigner = S3Presigner.builder() + .region(Region.of("us-east-1")) + .credentialsProvider(AnonymousCredentialsProvider.create()) + .serviceConfiguration(S3Configuration.builder().useArnRegionEnabled(true).build()) + .build(); + + String arn = "arn:aws:s3:US_EAST_1:123456789012:accesspoint/test"; + assertThatThrownBy(() -> presigner.presignGetObject(r -> r.getObjectRequest(g -> g.bucket(arn).key("test")) + .signatureDuration(Duration.ofMinutes(15)))) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region"); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/MultipartUploadTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/MultipartUploadTest.java index 2ff1986e660d..1c314ded4fc5 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/MultipartUploadTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/MultipartUploadTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3EndpointResolutionTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3EndpointResolutionTest.java new file mode 100644 index 000000000000..2da7f5ceeaa8 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3EndpointResolutionTest.java @@ -0,0 +1,523 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.services.s3.S3MockUtils.mockListBucketsResponse; +import static software.amazon.awssdk.services.s3.S3MockUtils.mockListObjectsResponse; + +import java.io.UnsupportedEncodingException; +import java.net.URI; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.internal.handlers.EndpointAddressInterceptor; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; +import software.amazon.awssdk.utils.StringInputStream; + +/** + * Functional tests for various endpoint related behavior in S3. + */ +public class S3EndpointResolutionTest { + + private static final String BUCKET = "some-bucket"; + private static final String NON_DNS_COMPATIBLE_BUCKET = "SOME.BUCKET"; + private static final String ENDPOINT_WITHOUT_BUCKET = "https://s3.ap-south-1.amazonaws.com"; + private static final String ENDPOINT_WITH_BUCKET = String.format("https://%s.s3.ap-south-1.amazonaws.com", BUCKET); + + private MockSyncHttpClient mockHttpClient; + private Signer mockSigner; + + @Before + public void setup() { + mockHttpClient = new MockSyncHttpClient(); + mockSigner = (request, executionAttributes) -> request; + } + + /** + * Only APIs that operate on buckets uses virtual addressing. Service level operations like ListBuckets will use the normal + * endpoint. + */ + @Test + public void serviceLevelOperation_UsesStandardEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListBucketsResponse()); + S3Client s3Client = buildClient(null); + + s3Client.listBuckets(); + + assertThat(mockHttpClient.getLastRequest().getUri()) + .as("Uses regional S3 endpoint without bucket") + .isEqualTo(URI.create(ENDPOINT_WITHOUT_BUCKET + "/")); + + assertThat(mockHttpClient.getLastRequest().encodedPath()) + .as("Bucket is not in resource path") + .isEqualTo("/"); + } + + /** + * Service level operations for dualstack mode should go to the dualstack endpoint (without virtual addressing). + */ + @Test + public void serviceLevelOperation_WithDualstackEnabled_UsesDualstackEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListBucketsResponse()); + S3Client s3Client = buildClient(withDualstackEnabled()); + + s3Client.listBuckets(); + + assertThat(mockHttpClient.getLastRequest().getUri()) + .as("Uses regional S3 endpoint without bucket") + .isEqualTo(URI.create("https://s3.dualstack.ap-south-1.amazonaws.com/")); + } + + /** + * When a custom endpoint is provided via the builder we should honor that instead of trying to re-resolve it in the + * {@link EndpointAddressInterceptor}. + */ + @Test + public void customEndpointProvided_UsesCustomEndpoint() throws Exception { + URI customEndpoint = URI.create("https://foobar.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListBucketsResponse()); + S3Client s3Client = clientBuilder().endpointOverride(customEndpoint).build(); + + s3Client.listBuckets(); + + assertThat(mockHttpClient.getLastRequest().getUri()) + .as("Uses custom endpoint") + .isEqualTo(URI.create(customEndpoint + "/")); + } + + @Test + public void accessPointArn_correctlyRewritesEndpoint() throws Exception { + URI customEndpoint = URI.create("https://foobar-12345678910.s3-accesspoint.ap-south-1.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().build(); + String accessPointArn = "arn:aws:s3:ap-south-1:12345678910:accesspoint:foobar"; + + s3Client.listObjects(ListObjectsRequest.builder().bucket(accessPointArn).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), customEndpoint.toString()); + } + + @Test + public void accessPointArn_customEndpoint_throwsIllegalArgumentException() throws Exception { + URI customEndpoint = URI.create("https://foobar.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().endpointOverride(customEndpoint).build(); + String accessPointArn = "arn:aws:s3:ap-south-1:12345678910:accesspoint:foobar"; + + assertThatThrownBy(() -> s3Client.listObjects(ListObjectsRequest.builder().bucket(accessPointArn).build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("endpoint override"); + } + + @Test + public void accessPointArn_differentRegion_useArnRegionFalse_throwsIllegalArgumentException() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().build(); + String accessPointArn = "arn:aws:s3:us-west-2:12345678910:accesspoint:foobar"; + + assertThatThrownBy(() -> s3Client.listObjects(ListObjectsRequest.builder().bucket(accessPointArn).build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("region"); + } + + @Test + public void accessPointArn_differentRegion_useArnRegionTrue() throws Exception { + URI customEndpoint = URI.create("https://foobar-12345678910.s3-accesspoint.us-west-2.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().serviceConfiguration(b -> b.useArnRegionEnabled(true)).build(); + String accessPointArn = "arn:aws:s3:us-west-2:12345678910:accesspoint:foobar"; + + s3Client.listObjects(ListObjectsRequest.builder().bucket(accessPointArn).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), customEndpoint.toString()); + } + + /** + * If a custom, non-s3 endpoint is used we revert to path style addressing. This is useful for alternative S3 implementations + * like Ceph that do not support virtual style addressing. + */ + @Test + public void nonS3EndpointProvided_DoesNotUseVirtualAddressing() throws Exception { + URI customEndpoint = URI.create("https://foobar.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().endpointOverride(customEndpoint).build(); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), customEndpoint.toString() + "/" + BUCKET); + } + + /** + * If a custom S3 endpoint is provided (like s3-external-1 or a FIPS endpoint) then we should still use virtual addressing + * when possible. + */ + @Test + public void customS3EndpointProvided_UsesVirtualAddressing() throws Exception { + URI customEndpoint = URI.create("https://s3-external-1.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().endpointOverride(customEndpoint).build(); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), + String.format("https://%s.s3-external-1.amazonaws.com", BUCKET)); + } + + /** + * If customer is using HTTP we need to preserve that scheme when switching to virtual addressing. + */ + @Test + public void customHttpEndpoint_PreservesSchemeWhenSwitchingToVirtualAddressing() throws Exception { + URI customEndpoint = URI.create("http://s3-external-1.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilderWithMockSigner().endpointOverride(customEndpoint).build(); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), + String.format("http://%s.s3-external-1.amazonaws.com", BUCKET)); + } + + /** + * In us-east-1 buckets can have non-DNS compliant names. For those buckets we must always use path style even when it + * is disabled per the advanced configuration. + */ + @Test + public void pathStyleDisabled_NonDnsCompatibleBucket_StillUsesPathStyleAddressing() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = buildClient(null); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(NON_DNS_COMPATIBLE_BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), ENDPOINT_WITHOUT_BUCKET + "/" + NON_DNS_COMPATIBLE_BUCKET); + } + + /** + * When path style is enabled in the advanced configuration we should always use it. + */ + @Test + public void pathStyleConfigured_UsesPathStyleAddressing() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = buildClient(withPathStyle()); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), ENDPOINT_WITHOUT_BUCKET + "/" + BUCKET); + } + + /** + * By default we use virtual addressing when possible. + */ + @Test + public void noServiceConfigurationProvided_UsesVirtualAddressingWithStandardEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = buildClient(null); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), ENDPOINT_WITH_BUCKET); + } + + /** + * By default we use virtual addressing when possible. + */ + @Test + public void emptyServiceConfigurationProvided_UsesVirtualAddressingWithStandardEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = buildClient(S3Configuration.builder().build()); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), ENDPOINT_WITH_BUCKET); + } + + /** + * S3 accelerate has a global endpoint, we use that when accelerate mode is enabled in the advanced configuration. + */ + @Test + public void accelerateEnabled_UsesVirtualAddressingWithAccelerateEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = buildClient(withAccelerateEnabled()); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), + String.format("https://%s.s3-accelerate.amazonaws.com", BUCKET)); + } + + /** + * Dualstack uses regional endpoints that support virtual addressing. + */ + @Test + public void dualstackEnabled_UsesVirtualAddressingWithDualstackEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = buildClient(withDualstackEnabled()); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), + String.format("https://%s.s3.dualstack.ap-south-1.amazonaws.com", BUCKET)); + } + + /** + * Dualstack also supports path style endpoints just like the normal endpoints. + */ + @Test + public void dualstackAndPathStyleEnabled_UsesPathStyleAddressingWithDualstackEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = buildClient(withDualstackAndPathStyleEnabled()); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), "https://s3.dualstack.ap-south-1.amazonaws.com/" + BUCKET); + } + + /** + * When dualstack and accelerate are both enabled there is a special, global dualstack endpoint we must use. + */ + @Test + public void dualstackAndAccelerateEnabled_UsesDualstackAccelerateEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = buildClient(withDualstackAndAccelerateEnabled()); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), + String.format("https://%s.s3-accelerate.dualstack.amazonaws.com", BUCKET)); + } + + /** + * Accelerate is not supported for several operations. For those we should go to the normal, regional endpoint. + */ + @Test + public void unsupportedAccelerateOption_UsesStandardEndpoint() throws Exception { + mockHttpClient.stubNextResponse(mockListBucketsResponse()); + S3Client s3Client = buildClient(withAccelerateEnabled()); + + s3Client.listBuckets(); + + assertThat(mockHttpClient.getLastRequest().getUri()) + .as("Uses regional S3 endpoint") + .isEqualTo(URI.create("https://s3.ap-south-1.amazonaws.com/")); + } + + /** + * Accelerate only supports virtual addressing. Path style cannot be used with accelerate enabled. + */ + @Test(expected = IllegalArgumentException.class) + public void accelerateAndPathStyleEnabled_ThrowsIllegalArgumentException() { + buildClient(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .accelerateModeEnabled(true) + .build()); + } + + @Test + public void regionalSettingEnabled_usesRegionalIadEndpoint() throws UnsupportedEncodingException { + EnvironmentVariableHelper environmentVariableHelper = new EnvironmentVariableHelper(); + environmentVariableHelper.set(SdkSystemSetting.AWS_S3_US_EAST_1_REGIONAL_ENDPOINT.environmentVariable(), "regional"); + + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + + S3Client s3Client = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .httpClient(mockHttpClient) + .region(Region.US_EAST_1) + .serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build()) + .build(); + try { + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + assertThat(mockHttpClient.getLastRequest().getUri().getHost()).isEqualTo("s3.us-east-1.amazonaws.com"); + } finally { + environmentVariableHelper.reset(); + } + } + + @Test + public void regionalSettingEnabledViaProfile_usesRegionalIadEndpoint() throws UnsupportedEncodingException { + String profile = + "[profile test]\n" + + "s3_us_east_1_regional_endpoint = regional"; + + ProfileFile profileFile = ProfileFile.builder() + .content(new StringInputStream(profile)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + + S3Client s3Client = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .httpClient(mockHttpClient) + .region(Region.US_EAST_1) + .overrideConfiguration(c -> c.defaultProfileFile(profileFile) + .defaultProfileName("test")) + .serviceConfiguration(c -> c.pathStyleAccessEnabled(true)) + .build(); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + assertThat(mockHttpClient.getLastRequest().getUri().getHost()).isEqualTo("s3.us-east-1.amazonaws.com"); + } + + @Test + public void regionalSettingDisabled_usesGlobalEndpoint() throws UnsupportedEncodingException { + EnvironmentVariableHelper environmentVariableHelper = new EnvironmentVariableHelper(); + environmentVariableHelper.set(SdkSystemSetting.AWS_S3_US_EAST_1_REGIONAL_ENDPOINT.environmentVariable(), "nonregional"); + + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + + S3Client s3Client = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .httpClient(mockHttpClient) + .region(Region.US_EAST_1) + .serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build()) + .build(); + try { + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + assertThat(mockHttpClient.getLastRequest().getUri().getHost()).isEqualTo("s3.amazonaws.com"); + } finally { + environmentVariableHelper.reset(); + } + } + + @Test + public void regionalSettingUnset_usesGlobalEndpoint() throws UnsupportedEncodingException { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + + S3Client s3Client = S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .httpClient(mockHttpClient) + .region(Region.US_EAST_1) + .serviceConfiguration(S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build()) + .build(); + + s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); + assertThat(mockHttpClient.getLastRequest().getUri().getHost()).isEqualTo("s3.amazonaws.com"); + } + + /** + * Assert that the provided request would have gone to the given endpoint. + * + * @param capturedRequest Request captured by mock HTTP client. + * @param endpoint Expected endpoint. + */ + private void assertEndpointMatches(SdkHttpRequest capturedRequest, String endpoint) { + assertThat(capturedRequest.getUri()).isEqualTo(URI.create(endpoint)); + } + + /** + * @param s3ServiceConfiguration Advanced configuration to use for this client. + * @return A built client with the given advanced configuration. + */ + private S3Client buildClient(S3Configuration s3ServiceConfiguration) { + return clientBuilder() + .serviceConfiguration(s3ServiceConfiguration) + .build(); + } + + /** + * @return Client builder instance preconfigured with credentials and region using the {@link #mockHttpClient} for transport. + */ + private S3ClientBuilder clientBuilder() { + return S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.AP_SOUTH_1) + .httpClient(mockHttpClient); + } + + /** + * @return Client builder instance preconfigured with credentials and region using the {@link #mockHttpClient} for transport + * and {@link #mockSigner} for signing. Using actual AwsS3V4Signer results in NPE as the execution goes into payload signing + * due to "http" protocol and input stream is not mark supported. + */ + private S3ClientBuilder clientBuilderWithMockSigner() { + return S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.AP_SOUTH_1) + .overrideConfiguration(ClientOverrideConfiguration.builder() + .putAdvancedOption(SdkAdvancedClientOption.SIGNER, + mockSigner) + .build()) + .httpClient(mockHttpClient); + } + + /** + * @return S3Configuration with path style enabled. + */ + private S3Configuration withPathStyle() { + return S3Configuration.builder() + .pathStyleAccessEnabled(true) + .build(); + } + + /** + * @return S3Configuration with accelerate mode enabled. + */ + private S3Configuration withAccelerateEnabled() { + return S3Configuration.builder() + .accelerateModeEnabled(true) + .build(); + } + + /** + * @return S3Configuration with dualstack mode enabled. + */ + private S3Configuration withDualstackEnabled() { + return S3Configuration.builder() + .dualstackEnabled(true) + .build(); + } + + /** + * @return S3Configuration with dualstack mode and path style enabled. + */ + private S3Configuration withDualstackAndPathStyleEnabled() { + return S3Configuration.builder() + .dualstackEnabled(true) + .pathStyleAccessEnabled(true) + .build(); + } + + /** + * @return S3Configuration with dualstack mode and accelerate mode enabled. + */ + private S3Configuration withDualstackAndAccelerateEnabled() { + return S3Configuration.builder() + .dualstackEnabled(true) + .accelerateModeEnabled(true) + .build(); + } + +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3MockUtils.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3MockUtils.java index fa0efc327a89..10e7d779e9bc 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3MockUtils.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3MockUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java new file mode 100644 index 000000000000..7ac86abc0170 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java @@ -0,0 +1,456 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.net.URI; +import java.time.Duration; +import org.assertj.core.data.Offset; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.signer.AwsS3V4Signer; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.core.signer.NoOpSigner; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.model.RequestPayer; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; + +@RunWith(MockitoJUnitRunner.class) +public class S3PresignerTest { + private static final URI FAKE_URL; + private static final String BUCKET = "some-bucket"; + + private S3Presigner presigner; + + static { + FAKE_URL = URI.create("https://localhost"); + } + + @Before + public void setUp() { + this.presigner = presignerBuilder().build(); + } + + @After + public void tearDown() { + this.presigner.close(); + } + + private S3Presigner.Builder presignerBuilder() { + return S3Presigner.builder() + .region(Region.US_WEST_2) + .credentialsProvider(() -> AwsBasicCredentials.create("x", "x")); + } + + + private S3Presigner generateMaximal() { + return S3Presigner.builder() + .serviceConfiguration(S3Configuration.builder() + .checksumValidationEnabled(false) + .build()) + .credentialsProvider(() -> AwsBasicCredentials.create("x", "x")) + .region(Region.US_EAST_1) + .endpointOverride(FAKE_URL) + .build(); + } + + private S3Presigner generateMinimal() { + return S3Presigner.builder() + .credentialsProvider(() -> AwsBasicCredentials.create("x", "x")) + .region(Region.US_EAST_1) + .build(); + } + + @Test + public void build_allProperties() { + generateMaximal(); + } + + @Test + public void build_minimalProperties() { + generateMinimal(); + } + + @Test + public void getObject_SignatureIsUrlCompatible() { + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar") + .responseContentType("text/plain"))); + assertThat(presigned.isBrowserExecutable()).isTrue(); + assertThat(presigned.signedHeaders().keySet()).containsExactly("host"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void getObject_RequesterPaysIsNotUrlCompatible() { + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar") + .requestPayer(RequestPayer.REQUESTER))); + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders().keySet()).containsExactlyInAnyOrder("host", "x-amz-request-payer"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void getObject_EndpointOverrideIsIncludedInPresignedUrl() { + S3Presigner presigner = presignerBuilder().endpointOverride(URI.create("http://foo.com")).build(); + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar"))); + + assertThat(presigned.url().toString()).startsWith("http://foo.com/foo34343434/bar?"); + assertThat(presigned.isBrowserExecutable()).isTrue(); + assertThat(presigned.signedHeaders().get("host")).containsExactly("foo.com"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void getObject_CredentialsCanBeOverriddenAtTheRequestLevel() { + AwsCredentials clientCredentials = AwsBasicCredentials.create("a", "a"); + AwsCredentials requestCredentials = AwsBasicCredentials.create("b", "b"); + + S3Presigner presigner = presignerBuilder().credentialsProvider(() -> clientCredentials).build(); + + + AwsRequestOverrideConfiguration overrideConfiguration = + AwsRequestOverrideConfiguration.builder() + .credentialsProvider(() -> requestCredentials) + .build(); + + PresignedGetObjectRequest presignedWithClientCredentials = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar"))); + + PresignedGetObjectRequest presignedWithRequestCredentials = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar") + .overrideConfiguration(overrideConfiguration))); + + System.out.println(presignedWithClientCredentials.url()); + + assertThat(presignedWithClientCredentials.httpRequest().rawQueryParameters().get("X-Amz-Credential").get(0)) + .startsWith("a"); + assertThat(presignedWithRequestCredentials.httpRequest().rawQueryParameters().get("X-Amz-Credential").get(0)) + .startsWith("b"); + } + + @Test + public void getObject_AdditionalHeadersAndQueryStringsCanBeAdded() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .putHeader("X-Amz-AdditionalHeader", "foo1") + .putRawQueryParameter("additionalQueryParam", "foo2") + .build(); + + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar") + .overrideConfiguration(override))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders()).containsOnlyKeys("host", "x-amz-additionalheader"); + assertThat(presigned.signedHeaders().get("x-amz-additionalheader")).containsExactly("foo1"); + assertThat(presigned.httpRequest().headers()).containsKeys("x-amz-additionalheader"); + assertThat(presigned.httpRequest().rawQueryParameters().get("additionalQueryParam").get(0)).isEqualTo("foo2"); + } + + @Test + public void getObject_NonSigV4SignersRaisesException() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .signer(new NoOpSigner()) + .build(); + + assertThatThrownBy(() -> presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar") + .overrideConfiguration(override)))) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("NoOpSigner"); + } + + @Test + public void getObject_Sigv4PresignerHonorsSignatureDuration() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .signer(AwsS3V4Signer.create()) + .build(); + + PresignedGetObjectRequest presigned = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofSeconds(1234)) + .getObjectRequest(gor -> gor.bucket("a") + .key("b") + .overrideConfiguration(override))); + + assertThat(presigned.httpRequest().rawQueryParameters().get("X-Amz-Expires").get(0)).satisfies(expires -> { + assertThat(expires).containsOnlyDigits(); + assertThat(Integer.parseInt(expires)).isCloseTo(1234, Offset.offset(2)); + }); + } + + @Test + public void putObject_IsNotUrlCompatible() { + PresignedPutObjectRequest presigned = + presigner.presignPutObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .putObjectRequest(go -> go.bucket("foo34343434") + .key("bar"))); + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders().keySet()).containsExactlyInAnyOrder("host"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void putObject_EndpointOverrideIsIncludedInPresignedUrl() { + S3Presigner presigner = presignerBuilder().endpointOverride(URI.create("http://foo.com")).build(); + PresignedPutObjectRequest presigned = + presigner.presignPutObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .putObjectRequest(go -> go.bucket("foo34343434") + .key("bar"))); + + assertThat(presigned.url().toString()).startsWith("http://foo.com/foo34343434/bar?"); + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders().get("host")).containsExactly("foo.com"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void putObject_CredentialsCanBeOverriddenAtTheRequestLevel() { + AwsCredentials clientCredentials = AwsBasicCredentials.create("a", "a"); + AwsCredentials requestCredentials = AwsBasicCredentials.create("b", "b"); + + S3Presigner presigner = presignerBuilder().credentialsProvider(() -> clientCredentials).build(); + + + AwsRequestOverrideConfiguration overrideConfiguration = + AwsRequestOverrideConfiguration.builder() + .credentialsProvider(() -> requestCredentials) + .build(); + + PresignedPutObjectRequest presignedWithClientCredentials = + presigner.presignPutObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .putObjectRequest(go -> go.bucket("foo34343434") + .key("bar"))); + + PresignedPutObjectRequest presignedWithRequestCredentials = + presigner.presignPutObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .putObjectRequest(go -> go.bucket("foo34343434") + .key("bar") + .overrideConfiguration(overrideConfiguration))); + + System.out.println(presignedWithClientCredentials.url()); + + assertThat(presignedWithClientCredentials.httpRequest().rawQueryParameters().get("X-Amz-Credential").get(0)) + .startsWith("a"); + assertThat(presignedWithRequestCredentials.httpRequest().rawQueryParameters().get("X-Amz-Credential").get(0)) + .startsWith("b"); + } + + @Test + public void putObject_AdditionalHeadersAndQueryStringsCanBeAdded() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .putHeader("X-Amz-AdditionalHeader", "foo1") + .putRawQueryParameter("additionalQueryParam", "foo2") + .build(); + + PresignedPutObjectRequest presigned = + presigner.presignPutObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .putObjectRequest(go -> go.bucket("foo34343434") + .key("bar") + .overrideConfiguration(override))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders()).containsOnlyKeys("host", "x-amz-additionalheader"); + assertThat(presigned.signedHeaders().get("x-amz-additionalheader")).containsExactly("foo1"); + assertThat(presigned.httpRequest().headers()).containsKeys("x-amz-additionalheader"); + assertThat(presigned.httpRequest().rawQueryParameters().get("additionalQueryParam").get(0)).isEqualTo("foo2"); + } + + @Test + public void putObject_NonSigV4SignersRaisesException() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .signer(new NoOpSigner()) + .build(); + + assertThatThrownBy(() -> presigner.presignPutObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .putObjectRequest(go -> go.bucket("foo34343434") + .key("bar") + .overrideConfiguration(override)))) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("NoOpSigner"); + } + + @Test + public void putObject_Sigv4PresignerHonorsSignatureDuration() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .signer(AwsS3V4Signer.create()) + .build(); + + PresignedPutObjectRequest presigned = + presigner.presignPutObject(r -> r.signatureDuration(Duration.ofSeconds(1234)) + .putObjectRequest(gor -> gor.bucket("a") + .key("b") + .overrideConfiguration(override))); + + assertThat(presigned.httpRequest().rawQueryParameters().get("X-Amz-Expires").get(0)).satisfies(expires -> { + assertThat(expires).containsOnlyDigits(); + assertThat(Integer.parseInt(expires)).isCloseTo(1234, Offset.offset(2)); + }); + } + + @Test + public void getObject_S3ConfigurationCanBeOverriddenToLeverageTransferAcceleration() { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .accelerateModeEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar"))); + + + System.out.println(presignedRequest.url()); + + assertThat(presignedRequest.httpRequest().host()).contains(".s3-accelerate."); + } + + + @Test + public void accelerateEnabled_UsesVirtualAddressingWithAccelerateEndpoint() { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .accelerateModeEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(BUCKET) + .key("bar"))); + + assertThat(presignedRequest.httpRequest().host()).isEqualTo(String.format("%s.s3-accelerate.amazonaws.com", BUCKET)); + } + + /** + * Dualstack uses regional endpoints that support virtual addressing. + */ + @Test + public void dualstackEnabled_UsesVirtualAddressingWithDualstackEndpoint() throws Exception { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .dualstackEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(BUCKET) + .key("bar"))); + + assertThat(presignedRequest.httpRequest().host()).contains(String.format("%s.s3.dualstack.us-west-2.amazonaws.com", BUCKET)); + } + + /** + * Dualstack also supports path style endpoints just like the normal endpoints. + */ + @Test + public void dualstackAndPathStyleEnabled_UsesPathStyleAddressingWithDualstackEndpoint() throws Exception { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .dualstackEnabled(true) + .pathStyleAccessEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(BUCKET) + .key("bar"))); + + assertThat(presignedRequest.httpRequest().host()).isEqualTo("s3.dualstack.us-west-2.amazonaws.com"); + assertThat(presignedRequest.url().toString()).startsWith(String.format("https://s3.dualstack.us-west-2.amazonaws.com/%s/%s?", BUCKET, "bar")); + } + + /** + * When dualstack and accelerate are both enabled there is a special, global dualstack endpoint we must use. + */ + @Test + public void dualstackAndAccelerateEnabled_UsesDualstackAccelerateEndpoint() throws Exception { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .dualstackEnabled(true) + .accelerateModeEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(BUCKET) + .key("bar"))); + + assertThat(presignedRequest.httpRequest().host()).isEqualTo(String.format("%s.s3-accelerate.dualstack.amazonaws.com", BUCKET)); + } + + @Test + public void accessPointArn_differentRegion_useArnRegionTrue() throws Exception { + String customEndpoint = "https://foobar-12345678910.s3-accesspoint.us-west-2.amazonaws.com"; + String accessPointArn = "arn:aws:s3:us-west-2:12345678910:accesspoint:foobar"; + + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .useArnRegionEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(accessPointArn) + .key("bar"))); + + assertThat(presignedRequest.url().toString()).startsWith(customEndpoint); + } + + @Test + public void accessPointArn_differentRegion_useArnRegionFalse_throwsIllegalArgumentException() throws Exception { + String accessPointArn = "arn:aws:s3:us-east-1:12345678910:accesspoint:foobar"; + + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .useArnRegionEnabled(false) + .build()) + .build(); + + assertThatThrownBy(() -> presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(accessPointArn).key("bar")))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("region"); + } +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3UtilitiesTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3UtilitiesTest.java index 93a90bcf708c..c5f4f21132b5 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3UtilitiesTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3UtilitiesTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -9,7 +9,7 @@ * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governings3 + * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ @@ -19,6 +19,7 @@ import java.net.MalformedURLException; import java.net.URI; +import java.net.URL; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -159,6 +160,19 @@ public void failIfRegionIsNotSetOnS3UtilitiesObject() throws MalformedURLExcepti S3Utilities.builder().build(); } + @Test + public void getUrlWithVersionId() { + S3Utilities utilities = S3Utilities.builder().region(Region.US_WEST_2).build(); + + assertThat(utilities.getUrl(b -> b.bucket("foo").key("bar").versionId("1")) + .toExternalForm()) + .isEqualTo("https://foo.s3.us-west-2.amazonaws.com/bar?versionId=1"); + + assertThat(utilities.getUrl(b -> b.bucket("foo").key("bar").versionId("@1")) + .toExternalForm()) + .isEqualTo("https://foo.s3.us-west-2.amazonaws.com/bar?versionId=%401"); + } + private static GetUrlRequest requestWithoutSpaces() { return GetUrlRequest.builder() .bucket("foo-bucket") diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/bucketaddressingsep/VirtualHostAddressingSepTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/bucketaddressingsep/VirtualHostAddressingSepTest.java index 446427931f34..aeac510402ac 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/bucketaddressingsep/VirtualHostAddressingSepTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/bucketaddressingsep/VirtualHostAddressingSepTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -35,12 +35,12 @@ import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.ListObjectsRequest; -import software.amazon.awssdk.testutils.service.http.MockHttpClient; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; @RunWith(Parameterized.class) public class VirtualHostAddressingSepTest { private static final String TEST_FILE_PATH = "VirtualAddressingSepTestCases.json"; - private MockHttpClient mockHttpClient; + private MockSyncHttpClient mockHttpClient; private TestCaseModel testCaseModel; public VirtualHostAddressingSepTest(TestCaseModel testCaseModel) { @@ -49,7 +49,7 @@ public VirtualHostAddressingSepTest(TestCaseModel testCaseModel) { @Before public void setup() { - mockHttpClient = new MockHttpClient(); + mockHttpClient = new MockSyncHttpClient(); } @Parameterized.Parameters diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingInputStreamTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingInputStreamTest.java index f29d81c6e957..33b075be7142 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingInputStreamTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumCalculatingInputStreamTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumResetsOnRetryTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumResetsOnRetryTest.java new file mode 100644 index 000000000000..e3fccda697c7 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumResetsOnRetryTest.java @@ -0,0 +1,148 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.checksums; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.core.async.AsyncResponseTransformer.toBytes; + +import com.github.tomakehurst.wiremock.client.ResponseDefinitionBuilder; +import com.github.tomakehurst.wiremock.client.WireMock; +import com.github.tomakehurst.wiremock.common.ConsoleNotifier; +import com.github.tomakehurst.wiremock.common.Slf4jNotifier; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import com.github.tomakehurst.wiremock.stubbing.Scenario; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.function.Consumer; +import org.apache.commons.lang3.ArrayUtils; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.ResponseBytes; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.utils.BinaryUtils; + +/** + * Verifies that the checksum validators are reset on an HTTP retry. + */ +public class ChecksumResetsOnRetryTest { + @Rule + public WireMockRule mockServer = new WireMockRule(new WireMockConfiguration().port(0) + .notifier(new ConsoleNotifier(true))); + + private S3Client s3Client; + + private S3AsyncClient s3AsyncClient; + + private byte[] body; + + private byte[] bodyWithTrailingChecksum; + + private String bodyEtag; + + @Before + public void setup() { + StaticCredentialsProvider credentials = StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid")); + s3Client = S3Client.builder() + .credentialsProvider(credentials) + .region(Region.US_WEST_2) + .endpointOverride(URI.create("http://localhost:" + mockServer.port())) + .build(); + + s3AsyncClient = S3AsyncClient.builder() + .credentialsProvider(credentials) + .region(Region.US_WEST_2) + .endpointOverride(URI.create("http://localhost:" + mockServer.port())) + .build(); + + body = "foo".getBytes(StandardCharsets.UTF_8); + String checksumAsHexString = "acbd18db4cc2f85cedef654fccc4a4d8"; + bodyEtag = "\"" + checksumAsHexString + "\""; + bodyWithTrailingChecksum = ArrayUtils.addAll(body, BinaryUtils.fromHex(checksumAsHexString)); + } + + @Test + public void syncPutObject_resetsChecksumOnRetry() { + stubSuccessAfterOneRetry(r -> r.withHeader("ETag", bodyEtag)); + + PutObjectResponse response = s3Client.putObject(r -> r.bucket("foo").key("bar"), RequestBody.fromBytes(body)); + assertThat(response.eTag()).isEqualTo(bodyEtag); + } + + @Test + public void asyncPutObject_resetsChecksumOnRetry() { + stubSuccessAfterOneRetry(r -> r.withHeader("ETag", bodyEtag)); + + PutObjectResponse response = s3AsyncClient.putObject(r -> r.bucket("foo").key("bar"), AsyncRequestBody.fromBytes(body)).join(); + assertThat(response.eTag()).isEqualTo(bodyEtag); + } + + @Test + public void syncGetObject_resetsChecksumOnRetry() { + stubSuccessAfterOneRetry(r -> r.withHeader("ETag", bodyEtag) + .withHeader("x-amz-transfer-encoding", "append-md5") + .withHeader("content-length", Integer.toString(bodyWithTrailingChecksum.length)) + .withBody(bodyWithTrailingChecksum)); + + ResponseBytes response = s3Client.getObjectAsBytes(r -> r.bucket("foo").key("bar")); + assertThat(response.response().eTag()).isEqualTo(bodyEtag); + assertThat(response.asByteArray()).isEqualTo(body); + } + + @Test + public void asyncGetObject_resetsChecksumOnRetry() { + stubSuccessAfterOneRetry(r -> r.withHeader("ETag", bodyEtag) + .withHeader("x-amz-transfer-encoding", "append-md5") + .withHeader("content-length", Integer.toString(bodyWithTrailingChecksum.length)) + .withBody(bodyWithTrailingChecksum)); + + ResponseBytes response = s3AsyncClient.getObject(r -> r.bucket("foo").key("bar"), toBytes()).join(); + assertThat(response.response().eTag()).isEqualTo(bodyEtag); + assertThat(response.asByteArray()).isEqualTo(body); + } + + private void stubSuccessAfterOneRetry(Consumer successfulResponseModifier) { + WireMock.reset(); + + String scenario = "stubSuccessAfterOneRetry"; + stubFor(any(anyUrl()) + .willReturn(aResponse().withStatus(500).withBody("")) + .inScenario(scenario) + .whenScenarioStateIs(Scenario.STARTED) + .willSetStateTo("200")); + + ResponseDefinitionBuilder successfulResponse = aResponse().withStatus(200).withBody(""); + successfulResponseModifier.accept(successfulResponse); + stubFor(any(anyUrl()) + .willReturn(successfulResponse) + .inScenario(scenario) + .whenScenarioStateIs("200")); + } +} + diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisherTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisherTest.java index a89b547044b5..935b656d8539 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisherTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumValidatingPublisherTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumsEnabledValidatorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumsEnabledValidatorTest.java index ee951a62a367..b2e26e837ffa 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumsEnabledValidatorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumsEnabledValidatorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -25,13 +25,17 @@ import static software.amazon.awssdk.services.s3.checksums.ChecksumConstant.SERVER_SIDE_ENCRYPTION_HEADER; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.getObjectChecksumEnabledPerRequest; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.getObjectChecksumEnabledPerResponse; -import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.putObjectChecksumEnabled; +import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.responseChecksumIsValid; +import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.shouldRecordChecksum; import static software.amazon.awssdk.services.s3.model.ServerSideEncryption.AWS_KMS; import org.junit.Test; import software.amazon.awssdk.core.ClientType; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.model.GetObjectAclRequest; @@ -77,65 +81,86 @@ public void getObjectChecksumEnabledPerResponse_responseNotContainsChecksumHeade } @Test - public void putObjectChecksumEnabled_defaultTrue() { - assertThat(putObjectChecksumEnabled(PutObjectRequest.builder().build(), - ClientType.SYNC, - getSyncExecutionAttributes(), - SdkHttpFullResponse.builder().build())).isTrue(); + public void putObjectChecksumEnabled_defaultShouldRecord() { + assertThat(shouldRecordChecksum(PutObjectRequest.builder().build(), + ClientType.SYNC, + getSyncExecutionAttributes(), + emptyHttpRequest().build())).isTrue(); } @Test public void putObjectChecksumEnabled_nonPutObjectRequest_false() { - assertThat(putObjectChecksumEnabled(PutBucketAclRequest.builder().build(), - ClientType.SYNC, - getSyncExecutionAttributes(), - SdkHttpFullResponse.builder().build())).isFalse(); + assertThat(shouldRecordChecksum(PutBucketAclRequest.builder().build(), + ClientType.SYNC, + getSyncExecutionAttributes(), + emptyHttpRequest().build())).isFalse(); } @Test public void putObjectChecksumEnabled_disabledFromConfig_false() { ExecutionAttributes executionAttributes = getExecutionAttributesWithChecksumDisabled(); - assertThat(putObjectChecksumEnabled(PutObjectRequest.builder().build(), - ClientType.SYNC, - executionAttributes, - SdkHttpFullResponse.builder().build())).isFalse(); + assertThat(shouldRecordChecksum(PutObjectRequest.builder().build(), + ClientType.SYNC, + executionAttributes, + emptyHttpRequest().build())).isFalse(); } @Test public void putObjectChecksumEnabled_wrongClientType_false() { ExecutionAttributes executionAttributes = getSyncExecutionAttributes(); - assertThat(putObjectChecksumEnabled(PutObjectRequest.builder().build(), - ClientType.ASYNC, - executionAttributes, - SdkHttpFullResponse.builder().build())).isFalse(); + assertThat(shouldRecordChecksum(PutObjectRequest.builder().build(), + ClientType.ASYNC, + executionAttributes, + emptyHttpRequest().build())).isFalse(); } @Test public void putObjectChecksumEnabled_serverSideCustomerEncryption_false() { ExecutionAttributes executionAttributes = getSyncExecutionAttributes(); - SdkHttpFullResponse response = SdkHttpFullResponse.builder() - .putHeader(SERVER_SIDE_CUSTOMER_ENCRYPTION_HEADER, "test") - .build(); + SdkHttpRequest response = emptyHttpRequest().putHeader(SERVER_SIDE_CUSTOMER_ENCRYPTION_HEADER, "test") + .build(); - assertThat(putObjectChecksumEnabled(PutObjectRequest.builder().build(), - ClientType.SYNC, - executionAttributes, - response)).isFalse(); + assertThat(shouldRecordChecksum(PutObjectRequest.builder().build(), + ClientType.SYNC, + executionAttributes, + response)).isFalse(); } @Test public void putObjectChecksumEnabled_serverSideEncryption_false() { ExecutionAttributes executionAttributes = getSyncExecutionAttributes(); - SdkHttpFullResponse response = SdkHttpFullResponse.builder() - .putHeader(SERVER_SIDE_ENCRYPTION_HEADER, AWS_KMS.toString()) - .build(); + SdkHttpRequest response = emptyHttpRequest().putHeader(SERVER_SIDE_ENCRYPTION_HEADER, AWS_KMS.toString()) + .build(); - assertThat(putObjectChecksumEnabled(PutObjectRequest.builder().build(), - ClientType.SYNC, - executionAttributes, - response)).isFalse(); + assertThat(shouldRecordChecksum(PutObjectRequest.builder().build(), + ClientType.SYNC, + executionAttributes, + response)).isFalse(); + } + + @Test + public void responseChecksumIsValid_defaultTrue() { + assertThat(responseChecksumIsValid(SdkHttpResponse.builder().build())).isTrue(); + } + + @Test + public void responseChecksumIsValid_serverSideCustomerEncryption_false() { + SdkHttpResponse response = SdkHttpResponse.builder() + .putHeader(SERVER_SIDE_CUSTOMER_ENCRYPTION_HEADER, "test") + .build(); + + assertThat(responseChecksumIsValid(response)).isFalse(); + } + + @Test + public void responseChecksumIsValid_serverSideEncryption_false() { + SdkHttpResponse response = SdkHttpResponse.builder() + .putHeader(SERVER_SIDE_ENCRYPTION_HEADER, AWS_KMS.toString()) + .build(); + + assertThat(responseChecksumIsValid(response)).isFalse(); } private ExecutionAttributes getSyncExecutionAttributes() { @@ -157,4 +182,12 @@ private SdkHttpResponse getSdkHttpResponseWithChecksumHeader() { .build(); } + private SdkHttpRequest.Builder emptyHttpRequest() { + return SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("https") + .host("localhost") + .port(80); + } + } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/CompleteMultipartUploadFunctionalTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/CompleteMultipartUploadFunctionalTest.java new file mode 100644 index 000000000000..c9488c9ca634 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/CompleteMultipartUploadFunctionalTest.java @@ -0,0 +1,305 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.functionaltests; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.net.URI; +import java.util.concurrent.CompletionException; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; + +import org.junit.Rule; +import org.junit.Test; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; + +public class CompleteMultipartUploadFunctionalTest { + private static final URI HTTP_LOCALHOST_URI = URI.create("http://localhost:8080/"); + + @Rule + public WireMockRule wireMock = new WireMockRule(); + + private S3ClientBuilder getSyncClientBuilder() { + + return S3Client.builder() + .region(Region.US_EAST_1) + .endpointOverride(HTTP_LOCALHOST_URI) + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("key", "secret"))); + } + + private S3AsyncClientBuilder getAsyncClientBuilder() { + return S3AsyncClient.builder() + .region(Region.US_EAST_1) + .endpointOverride(HTTP_LOCALHOST_URI) + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("key", "secret"))); + + } + + @Test + public void completeMultipartUpload_syncClient_completeResponse() { + String location = "http://Example-Bucket.s3.amazonaws.com/Example-Object"; + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String eTag = "\"3858f62230ac3c915f300c664312c11f-9\""; + String xmlResponseBody = String.format( + "\n" + + "\n" + + "%s\n" + + "%s\n" + + "%s\n" + + "%s\n" + + "", location, bucket, key, eTag); + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3Client s3Client = getSyncClientBuilder().build(); + + CompleteMultipartUploadResponse response = s3Client.completeMultipartUpload( + r -> r.bucket(bucket).key(key).uploadId("upload-id")); + + assertThat(response.location()).isEqualTo(location); + assertThat(response.bucket()).isEqualTo(bucket); + assertThat(response.key()).isEqualTo(key); + assertThat(response.eTag()).isEqualTo(eTag); + } + + @Test + public void completeMultipartUpload_asyncClient_completeResponse() { + String location = "http://Example-Bucket.s3.amazonaws.com/Example-Object"; + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String eTag = "\"3858f62230ac3c915f300c664312c11f-9\""; + String xmlResponseBody = String.format( + "\n" + + "\n" + + "%s\n" + + "%s\n" + + "%s\n" + + "%s\n" + + "", location, bucket, key, eTag); + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3AsyncClient s3Client = getAsyncClientBuilder().build(); + + CompleteMultipartUploadResponse response = s3Client.completeMultipartUpload( + r -> r.bucket(bucket).key(key).uploadId("upload-id")).join(); + + assertThat(response.location()).isEqualTo(location); + assertThat(response.bucket()).isEqualTo(bucket); + assertThat(response.key()).isEqualTo(key); + assertThat(response.eTag()).isEqualTo(eTag); + } + + @Test + public void completeMultipartUpload_syncClient_errorInResponseBody_correctType() { + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String xmlResponseBody = "\n" + + "\n" + + "InternalError\n" + + "We encountered an internal error. Please try again.\n" + + "656c76696e6727732072657175657374\n" + + "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==\n" + + ""; + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3Client s3Client = getSyncClientBuilder().build(); + + assertThatThrownBy(() -> s3Client.completeMultipartUpload(r -> r.bucket(bucket) + .key(key) + .uploadId("upload-id"))) + .isInstanceOf(S3Exception.class); + } + + @Test + public void completeMultipartUpload_asyncClient_errorInResponseBody_correctType() { + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String xmlResponseBody = "\n" + + "\n" + + "InternalError\n" + + "We encountered an internal error. Please try again.\n" + + "656c76696e6727732072657175657374\n" + + "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==\n" + + ""; + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3AsyncClient s3Client = getAsyncClientBuilder().build(); + + assertThatThrownBy(() -> s3Client.completeMultipartUpload(r -> r.bucket(bucket) + .key(key) + .uploadId("upload-id")) + .join()) + .isInstanceOf(CompletionException.class) + .hasCauseInstanceOf(S3Exception.class); + } + + @Test + public void completeMultipartUpload_syncClient_errorInResponseBody_correctCode() { + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String xmlResponseBody = "\n" + + "\n" + + "CustomError\n" + + "We encountered an internal error. Please try again.\n" + + "656c76696e6727732072657175657374\n" + + "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==\n" + + ""; + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3Client s3Client = getSyncClientBuilder().build(); + + assertThatThrownBy(() -> s3Client.completeMultipartUpload(r -> r.bucket(bucket) + .key(key) + .uploadId("upload-id"))) + .satisfies(e -> assertThat(((S3Exception)e).awsErrorDetails().errorCode()).isEqualTo("CustomError")); + } + + @Test + public void completeMultipartUpload_asyncClient_errorInResponseBody_correctCode() { + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String xmlResponseBody = "\n" + + "\n" + + "CustomError\n" + + "We encountered an internal error. Please try again.\n" + + "656c76696e6727732072657175657374\n" + + "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==\n" + + ""; + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3AsyncClient s3Client = getAsyncClientBuilder().build(); + + assertThatThrownBy(() -> s3Client.completeMultipartUpload(r -> r.bucket(bucket) + .key(key) + .uploadId("upload-id")) + .join()) + .satisfies(e -> { + S3Exception s3Exception = (S3Exception) e.getCause(); + assertThat(s3Exception.awsErrorDetails().errorCode()).isEqualTo("CustomError"); + }); + } + + @Test + public void completeMultipartUpload_syncClient_errorInResponseBody_correctMessage() { + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String xmlResponseBody = "\n" + + "\n" + + "CustomError\n" + + "Foo bar\n" + + "656c76696e6727732072657175657374\n" + + "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==\n" + + ""; + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3Client s3Client = getSyncClientBuilder().build(); + + assertThatThrownBy(() -> s3Client.completeMultipartUpload(r -> r.bucket(bucket) + .key(key) + .uploadId("upload-id"))) + .satisfies(e -> assertThat(((S3Exception)e).awsErrorDetails().errorMessage()).isEqualTo("Foo bar")); + } + + @Test + public void completeMultipartUpload_asyncClient_errorInResponseBody_correctMessage() { + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String xmlResponseBody = "\n" + + "\n" + + "CustomError\n" + + "Foo bar\n" + + "656c76696e6727732072657175657374\n" + + "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==\n" + + ""; + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3AsyncClient s3Client = getAsyncClientBuilder().build(); + + assertThatThrownBy(() -> s3Client.completeMultipartUpload(r -> r.bucket(bucket) + .key(key) + .uploadId("upload-id")) + .join()) + .satisfies(e -> { + S3Exception s3Exception = (S3Exception) e.getCause(); + assertThat(s3Exception.awsErrorDetails().errorMessage()).isEqualTo("Foo bar"); + }); + } + + @Test + public void completeMultipartUpload_syncClient_errorInResponseBody_invalidErrorXml() { + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String xmlResponseBody = "\n" + + "\n" + + "" + + ""; + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3Client s3Client = getSyncClientBuilder().build(); + + assertThatThrownBy(() -> s3Client.completeMultipartUpload(r -> r.bucket(bucket) + .key(key) + .uploadId("upload-id"))) + .isInstanceOf(S3Exception.class); + } + + @Test + public void completeMultipartUpload_asyncClient_errorInResponseBody_invalidErrorXml() { + String bucket = "Example-Bucket"; + String key = "Example-Object"; + String xmlResponseBody = "\n" + + "\n" + + "" + + ""; + + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(xmlResponseBody))); + + S3AsyncClient s3Client = getAsyncClientBuilder().build(); + + assertThatThrownBy(() -> s3Client.completeMultipartUpload(r -> r.bucket(bucket) + .key(key) + .uploadId("upload-id")) + .join()) + .isInstanceOf(CompletionException.class) + .hasCauseInstanceOf(S3Exception.class); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/GetBucketPolicyFunctionalTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/GetBucketPolicyFunctionalTest.java new file mode 100644 index 000000000000..6933e8726648 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/GetBucketPolicyFunctionalTest.java @@ -0,0 +1,93 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.functionaltests; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.net.URI; +import java.util.concurrent.CompletionException; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; + +import org.junit.Rule; +import org.junit.Test; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.GetBucketPolicyResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; + +public class GetBucketPolicyFunctionalTest { + private static final URI HTTP_LOCALHOST_URI = URI.create("http://localhost:8080/"); + private static final String EXAMPLE_BUCKET = "Example-Bucket"; + private static final String EXAMPLE_POLICY = + "{\"Version\":\"2012-10-17\",\"Id\":\"Policy1234\"," + + "\"Statement\":[{\"Sid\":\"Stmt1578431058575\",\"Effect\":\"Allow\"," + + "\"Principal\":{\"AWS\":\"arn:aws:iam::1234567890:root\"},\"Action\":\"s3:*\"," + + "\"Resource\":\"arn:aws:s3:::dummy-resource/*\"}]}"; + + @Rule + public WireMockRule wireMock = new WireMockRule(); + + private S3ClientBuilder getSyncClientBuilder() { + + return S3Client.builder() + .region(Region.US_EAST_1) + .endpointOverride(HTTP_LOCALHOST_URI) + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("key", "secret"))); + } + + private S3AsyncClientBuilder getAsyncClientBuilder() { + return S3AsyncClient.builder() + .region(Region.US_EAST_1) + .endpointOverride(HTTP_LOCALHOST_URI) + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("key", "secret"))); + + } + + @Test + public void getBucketPolicy_syncClient() { + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(EXAMPLE_POLICY))); + + S3Client s3Client = getSyncClientBuilder().build(); + + GetBucketPolicyResponse response = s3Client.getBucketPolicy(r -> r.bucket(EXAMPLE_BUCKET)); + assertThat(response.policy()).isEqualTo(EXAMPLE_POLICY); + } + + @Test + public void getBucketPolicy_asyncClient() { + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(EXAMPLE_POLICY))); + + S3AsyncClient s3Client = getAsyncClientBuilder().build(); + + GetBucketPolicyResponse response = s3Client.getBucketPolicy(r -> r.bucket(EXAMPLE_BUCKET)).join(); + assertThat(response.policy()).isEqualTo(EXAMPLE_POLICY); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolverTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolverTest.java new file mode 100644 index 000000000000..b37df4beea9d --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolverTest.java @@ -0,0 +1,563 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.utils.http.SdkHttpUtils.urlEncode; + +import java.net.URI; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.utils.InterceptorTestUtils; + +public class S3AccessPointEndpointResolverTest { + + S3AccessPointEndpointResolver endpointResolver; + + @Before + public void setUp() { + endpointResolver = S3AccessPointEndpointResolver.create(); + } + + @Test + public void accesspointArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + S3Configuration.builder()); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint:foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + S3Configuration.builder()); + } + + @Test + public void accesspointArn_futureUnknownRegion_US_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws:s3:us-future-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.us-future-1.amazonaws.com", + Region.of("us-future-1"), + S3Configuration.builder(), + Region.of("us-future-1")); + } + + @Test + public void accesspointArn_futureUnknownRegion_crossRegion_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws:s3:us-future-2:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.us-future-2.amazonaws.com", + Region.of("us-future-2"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-future-1")); + } + + @Test + public void accesspointArn_futureUnknownRegion_CN_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws-cn:s3:cn-future-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.cn-future-1.amazonaws.com.cn", + Region.of("cn-future-1"), + S3Configuration.builder(), + Region.of("cn-future-1")); + } + + @Test + public void accesspointArn_futureUnknownRegionAndPartition_defaultsToAws() { + verifyAccesspointArn("http", + "arn:aws:s3:unknown:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.unknown.amazonaws.com", + Region.of("unknown"), + S3Configuration.builder(), + Region.of("unknown")); + } + + @Test + public void malformedArn_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:foobar", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("ARN"); + } + + @Test + public void unsupportedArn_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:unsupported:foobar", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("ARN"); + } + + @Test + public void accesspointArn_invalidPartition_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:bar:s3:us-east-1:12345678910:accesspoint:foobar", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("bar"); + } + + @Test + public void bucketArn_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:bucket_name:foobar", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("bucket parameter"); + } + + + @Test + public void accesspointArn_withSlashes_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + S3Configuration.builder()); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + S3Configuration.builder()); + } + + @Test + public void accesspointArn_withDualStackEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.dualstack.us-east-1.amazonaws.com", + S3Configuration.builder().dualstackEnabled(true)); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.dualstack.us-east-1.amazonaws.com", + S3Configuration.builder().dualstackEnabled(true)); + } + + @Test + public void accesspointArn_withCnPartition_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", + Region.of("cn-north-1"), + S3Configuration.builder(), + Region.of("cn-north-1")); + verifyAccesspointArn("https", + "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", + "https://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", + Region.of("cn-north-1"), + S3Configuration.builder(), + Region.of("cn-north-1")); + } + + @Test + public void accesspointArn_withDifferentPartition_useArnRegionEnabled_shouldThrowIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", + Region.of("cn-north-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("partition"); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_noFipsInArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-east-1")); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_FipsInArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-east-1")); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_noFipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1")); + } + + + @Test + public void accesspointArn_withFipsRegionPrefix_FipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1")); + } + + + + @Test + public void accesspointArn_withFipsRegionPrefix_ArnRegionNotMatches_shouldThrowIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-gov-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("The region field of the ARN being passed as a bucket parameter to an S3 operation does not match the region the client was configured with."); + assertThatThrownBy(() -> verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-gov-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("The region field of the ARN being passed as a bucket parameter to an S3 operation does not match the region the client was configured with."); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_noFipsInArn_DualstackEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("fips-us-east-1")); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_FipsInArn_DualStackEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("fips-us-east-1")); + } + + @Test + public void accesspointArn_withFipsRegionSuffix_noFipsinArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("us-east-1-fips")); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("us-east-1-fips")); + } + + @Test + public void accesspointArn_noFipsRegionPrefix_FipsInArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder(), + Region.of("us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder(), + Region.of("us-east-1")); + } + + @Test + public void accesspointArn_noFipsRegionPrefix_FipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-east-1")); + } + + @Test + public void accesspointArn_noFipsRegionPrefix_FipsInArn_useArnRegionEnabled_DualstackEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true).dualstackEnabled(true), + Region.of("us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true).dualstackEnabled(true), + Region.of("us-east-1")); + } + + @Test + public void accesspointArn_withAccelerateEnabled_shouldThrowIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().accelerateModeEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accelerate"); + } + + + @Test + public void accesspointArn_withPathStyleAddressingEnabled_shouldThrowIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().pathStyleAccessEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("path style"); + } + + @Test + public void outpostAccessPointArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com", + Region.of("us-west-2"), + S3Configuration.builder(), + Region.of("us-west-2")); + + verifyAccesspointArn("https", + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com", + Region.of("us-west-2"), + S3Configuration.builder(), + Region.of("us-west-2")); + } + + @Test + public void outpostAccessPointArn_futureUnknownRegion_US_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-future-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-future-2.amazonaws.com", + Region.of("us-future-2"), + S3Configuration.builder(), + Region.of("us-future-2")); + } + + @Test + public void outpostAccessPointArn_futureUnknownRegion_crossRegion_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-future-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-future-2.amazonaws.com", + Region.of("us-future-2"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-future-1")); + } + + @Test + public void outpostAccessPointArn_futureUnknownRegion_CN_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws-cn:s3-outposts:cn-future-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.cn-future-1.amazonaws.com.cn", + Region.of("cn-future-1"), + S3Configuration.builder(), + Region.of("cn-future-1")); + } + + @Test + public void outpostAccessPointArn_futureUnknownRegionAndPartition_defaultsToAws() { + verifyAccesspointArn("http", + "arn:aws:s3-outposts:unknown:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.unknown.amazonaws.com", + Region.of("unknown"), + S3Configuration.builder(), + Region.of("unknown")); + } + + @Test + public void outpostAccessPointArn_invalidPartition_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:bar:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("bar"); + } + + @Test + public void outpostAccessPointArn_differentRegionWithoutUseArnRegion_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:bar:aws-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("region"); + } + + @Test + public void outpostAccessPointArn_fipsEnabled_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + Region.of("us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("FIPS"); + } + + @Test + public void outpostAccessPointArn_dualStackEnabled_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + Region.of("us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("dualstack"); + } + + @Test + public void outpostAccessPointArn_accelerateEnabled_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + Region.of("us-east-1"), + S3Configuration.builder().accelerateModeEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accelerate"); + } + + @Test + public void outpostAccessPointArn_ArnMissingAccesspointName_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456", + null, + Region.of("us-east-1"), + S3Configuration.builder().accelerateModeEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Invalid format"); + } + + private void verifyAccesspointArn(String protocol, String accessPointArn, String expectedEndpoint, + S3Configuration.Builder builder) { + verifyAccesspointArn(protocol, accessPointArn, expectedEndpoint, Region.US_EAST_1, builder, Region.US_EAST_1); + } + + private void verifyAccesspointArn(String protocol, String accessPointArn, String expectedEndpoint, + Region expectedSigningRegion, + S3Configuration.Builder configBuilder, Region region) { + String key = "test-key"; + + URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, urlEncode(accessPointArn), key)); + URI expectedUri = URI.create(String.format("%s/%s", expectedEndpoint, key)); + PutObjectRequest putObjectRequest = PutObjectRequest.builder() + .bucket(accessPointArn) + .key(key) + .build(); + + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .request(InterceptorTestUtils.sdkHttpRequest(customUri)) + .originalRequest(putObjectRequest) + .region(region) + .serviceConfiguration(configBuilder.build()) + .build(); + + ConfiguredS3SdkHttpRequest sdkHttpFullRequest = endpointResolver.applyEndpointConfiguration(context); + + assertThat(sdkHttpFullRequest.sdkHttpRequest().getUri()).isEqualTo(expectedUri); + assertThat(sdkHttpFullRequest.signingRegionModification()).isPresent(); + assertThat(sdkHttpFullRequest.signingRegionModification().get()).isEqualTo(expectedSigningRegion); + assertSigningRegion(accessPointArn, sdkHttpFullRequest); + } + + private void assertSigningRegion(String accessPointArn, ConfiguredS3SdkHttpRequest sdkHttpFullRequest) { + if (accessPointArn.contains(":s3-outposts")) { + String expectedSigningName = "s3-outposts"; + assertThat(sdkHttpFullRequest.signingServiceModification()).isPresent(); + assertThat(sdkHttpFullRequest.signingServiceModification().get()).isEqualTo(expectedSigningName); + } else { + assertThat(sdkHttpFullRequest.signingServiceModification()).isEmpty(); + } + } + +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolverTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolverTest.java new file mode 100644 index 000000000000..9acfa4032800 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolverTest.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.net.URI; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.ListBucketsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.utils.InterceptorTestUtils; + +public class S3BucketEndpointResolverTest { + + S3BucketEndpointResolver endpointResolver; + + @Before + public void setUp() throws Exception { + endpointResolver = S3BucketEndpointResolver.create(); + } + + @Test + public void traditionalEndpoint_shouldNotConvertEndpoint() { + verifyEndpoint("http", "http://s3-test.com", S3Configuration.builder()); + verifyEndpoint("https", "https://s3-test.com", S3Configuration.builder()); + } + + @Test + public void accelerateEnabled_shouldConvertToAccelerateEndpoint() { + verifyEndpoint("http", + "http://s3-accelerate.amazonaws.com", + S3Configuration.builder().accelerateModeEnabled(true)); + verifyEndpoint("https", "https://s3-accelerate.amazonaws.com", + S3Configuration.builder().accelerateModeEnabled(true)); + } + + @Test + public void bothAccelerateDualstackEnabled_shouldConvertToAccelerateDualstackEndpoint() { + verifyEndpoint("http", + "http://s3-accelerate.dualstack.amazonaws.com", + S3Configuration.builder().accelerateModeEnabled(true).dualstackEnabled(true) + ); + verifyEndpoint("https", + "https://s3-accelerate.dualstack.amazonaws.com", + S3Configuration.builder().accelerateModeEnabled(true).dualstackEnabled(true)); + } + + @Test + public void pathStyleAccessEnabled_shouldNotConvertToDnsEndpoint() { + verifyEndpoint("http", + "http://s3-test.com", + S3Configuration.builder().pathStyleAccessEnabled(true)); + verifyEndpoint("https", + "https://s3-test.com", + S3Configuration.builder().pathStyleAccessEnabled(true)); + } + + @Test + public void dualstackEnabled_shouldConvertToDualstackEndpoint() { + verifyEndpoint("http", "http://s3.dualstack.us-east-1.amazonaws.com", + S3Configuration.builder().dualstackEnabled(true)); + verifyEndpoint("https", "https://s3.dualstack.us-east-1.amazonaws.com", + S3Configuration.builder().dualstackEnabled(true)); + } + + @Test + public void accelerateEnabled_ListBucketRequest_shouldNotConvertToAccelerateEndpoint() { + verifyAccelerateDisabledOperationsEndpointNotConverted(ListBucketsRequest.builder().build()); + } + + @Test + public void accelerateEnabled_CreateBucketsRequest_shouldNotConvertToAccelerateEndpoint() { + verifyAccelerateDisabledOperationsEndpointNotConverted(CreateBucketRequest.builder().build()); + } + + @Test + public void accelerateEnabled_DeleteBucketRequest_shouldNotConvertToAccelerateEndpoint() { + verifyAccelerateDisabledOperationsEndpointNotConverted(DeleteBucketRequest.builder().build()); + } + + @Test + public void virtualStyle_shouldConvertToDnsEndpoint() { + verifyVirtualStyleConvertDnsEndpoint("https"); + verifyVirtualStyleConvertDnsEndpoint("http"); + } + + private void verifyVirtualStyleConvertDnsEndpoint(String protocol) { + String bucketName = "test-bucket"; + String key = "test-key"; + URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, bucketName, key)); + URI expectedUri = URI.create(String.format("%s://%s.s3.dualstack.us-east-1.amazonaws.com/%s", protocol, + bucketName, key)); + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .request(InterceptorTestUtils.sdkHttpRequest(customUri)) + .originalRequest(ListObjectsV2Request.builder().bucket(bucketName).build()) + .region(Region.US_EAST_1) + .serviceConfiguration(S3Configuration.builder().dualstackEnabled(true).build()) + .build(); + ConfiguredS3SdkHttpRequest sdkHttpFullRequest = endpointResolver.applyEndpointConfiguration(context); + + assertThat(sdkHttpFullRequest.sdkHttpRequest().getUri()).isEqualTo(expectedUri); + } + + private void verifyAccelerateDisabledOperationsEndpointNotConverted(SdkRequest request) { + URI customUri = URI.create("http://s3-test.com"); + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .request(InterceptorTestUtils.sdkHttpRequest(customUri)) + .originalRequest(request) + .region(Region.US_EAST_1) + .serviceConfiguration(S3Configuration.builder().accelerateModeEnabled(true).build()) + .build(); + ConfiguredS3SdkHttpRequest sdkHttpFullRequest = endpointResolver.applyEndpointConfiguration(context); + assertThat(sdkHttpFullRequest.sdkHttpRequest().getUri()).isEqualTo(customUri); + } + + private void verifyEndpoint(String protocol, + String expectedEndpoint, + S3Configuration.Builder configBuilder) { + String bucket = "test-bucket"; + String key = "test-key"; + URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, bucket, key)); + URI expectedUri = URI.create(String.format("%s/%s/%s", expectedEndpoint, bucket, key)); + + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .request(InterceptorTestUtils.sdkHttpRequest(customUri)) + .originalRequest(PutObjectRequest.builder().build()) + .region(Region.US_EAST_1) + .serviceConfiguration(configBuilder.build()) + .build(); + + ConfiguredS3SdkHttpRequest sdkHttpFullRequest = endpointResolver.applyEndpointConfiguration(context); + assertThat(sdkHttpFullRequest.sdkHttpRequest().getUri()).isEqualTo(expectedUri); + } + +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContextTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContextTest.java new file mode 100644 index 000000000000..9b846ad9cda2 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContextTest.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; + +public class S3EndpointResolverContextTest { + + @Test + public void toBuilder_minimal() { + S3EndpointResolverContext context = S3EndpointResolverContext.builder().build(); + assertFalse(context.endpointOverridden()); + assertNull(context.originalRequest()); + assertNull(context.region()); + assertNull(context.serviceConfiguration()); + assertNull(context.request()); + } + + @Test + public void toBuilder_maximal() { + S3Configuration serviceConfiguration = S3Configuration.builder().build(); + SdkHttpFullRequest httpRequest = SdkHttpFullRequest.builder().protocol("http").host("host").method(SdkHttpMethod.POST).build(); + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .endpointOverridden(true) + .originalRequest(PutObjectRequest.builder().build()) + .region(Region.US_EAST_1) + .serviceConfiguration(serviceConfiguration) + .request(httpRequest) + .build(); + assertTrue(context.endpointOverridden()); + assertThat(context.originalRequest()).isInstanceOf(PutObjectRequest.class); + assertThat(context.region()).isEqualTo(Region.US_EAST_1); + assertThat(context.serviceConfiguration()).isEqualTo(serviceConfiguration); + assertThat(context.request()).isEqualTo(httpRequest); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactoryTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactoryTest.java new file mode 100644 index 000000000000..ad5f86b8dd06 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactoryTest.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.Test; + +public class S3EndpointResolverFactoryTest { + + @Test + public void nullBucketName_returnsBucketEndpointResolver() { + assertThat(S3EndpointResolverFactory.getEndpointResolver(null)).isInstanceOf(S3BucketEndpointResolver.class); + } + + @Test + public void emptyBucketName_returnsBucketEndpointResolver() { + String bucketName = ""; + assertThat(S3EndpointResolverFactory.getEndpointResolver(bucketName)).isInstanceOf(S3BucketEndpointResolver.class); + } + + @Test + public void nonAccessPointBucketName_returnsBucketEndpointResolver() { + String bucketName = "test-bucket"; + assertThat(S3EndpointResolverFactory.getEndpointResolver(bucketName)).isInstanceOf(S3BucketEndpointResolver.class); + } + + @Test + public void accessPointBucketName_returnsAccessPointEndpointResolver() { + String bucketName = "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar"; + assertThat(S3EndpointResolverFactory.getEndpointResolver(bucketName)).isInstanceOf(S3AccessPointEndpointResolver.class); + } + +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtilsTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtilsTest.java new file mode 100644 index 000000000000..c2983aef2d12 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtilsTest.java @@ -0,0 +1,124 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.net.URI; +import org.junit.Test; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.ListBucketsRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; + +public class S3EndpointUtilsTest { + + @Test + public void removesFipsIfNeeded() { + assertThat(S3EndpointUtils.removeFipsIfNeeded("fips-us-east-1")).isEqualTo("us-east-1"); + assertThat(S3EndpointUtils.removeFipsIfNeeded("us-east-1-fips")).isEqualTo("us-east-1"); + } + + @Test + public void isFipsRegion() { + assertTrue(S3EndpointUtils.isFipsRegion("fips-us-east-1")); + assertTrue(S3EndpointUtils.isFipsRegion("us-east-1-fips")); + assertFalse(S3EndpointUtils.isFipsRegion("us-fips-1")); + } + + @Test + public void isFipsRegionProvided() { + assertTrue(S3EndpointUtils.isFipsRegionProvided("fips-us-east-1", "us-east-1", false)); + assertFalse(S3EndpointUtils.isFipsRegionProvided("us-east-1", "fips-us-east-1", false)); + assertTrue(S3EndpointUtils.isFipsRegionProvided("us-east-1", "us-east-1-fips", true)); + assertFalse(S3EndpointUtils.isFipsRegionProvided("us-east-1-fips", "us-east-1", true)); + } + + @Test + public void isAccelerateEnabled() { + assertFalse(S3EndpointUtils.isAccelerateEnabled(S3Configuration.builder().build())); + assertFalse(S3EndpointUtils.isAccelerateEnabled(null)); + assertFalse(S3EndpointUtils.isAccelerateEnabled(S3Configuration.builder().accelerateModeEnabled(false).build())); + assertTrue(S3EndpointUtils.isAccelerateEnabled(S3Configuration.builder().accelerateModeEnabled(true).build())); + } + + @Test + public void isAccelerateSupported() { + assertFalse(S3EndpointUtils.isAccelerateSupported(ListBucketsRequest.builder().build())); + assertTrue(S3EndpointUtils.isAccelerateSupported(PutObjectRequest.builder().build())); + } + + @Test + public void accelerateEndpoint() { + assertThat(S3EndpointUtils.accelerateEndpoint(S3Configuration.builder().build(), + "domain", + "https")) + .isEqualTo(URI.create("https://s3-accelerate.domain")); + + assertThat(S3EndpointUtils.accelerateEndpoint(S3Configuration.builder().dualstackEnabled(true).build(), + "domain", + "https")) + .isEqualTo(URI.create("https://s3-accelerate.dualstack.domain")); + } + + @Test + public void isDualstackEnabled() { + assertFalse(S3EndpointUtils.isDualstackEnabled(S3Configuration.builder().build())); + assertFalse(S3EndpointUtils.isDualstackEnabled(null)); + assertFalse(S3EndpointUtils.isDualstackEnabled(S3Configuration.builder().dualstackEnabled(false).build())); + assertTrue(S3EndpointUtils.isDualstackEnabled(S3Configuration.builder().dualstackEnabled(true).build())); + } + + @Test + public void dualStackEndpoint() { + assertThat(S3EndpointUtils.dualstackEndpoint("id", "domain", "https")) + .isEqualTo(URI.create("https://s3.dualstack.id.domain")); + } + + @Test + public void isPathstyleAccessEnabled() { + assertFalse(S3EndpointUtils.isPathStyleAccessEnabled(S3Configuration.builder().build())); + assertFalse(S3EndpointUtils.isPathStyleAccessEnabled(null)); + assertFalse(S3EndpointUtils.isPathStyleAccessEnabled(S3Configuration.builder().pathStyleAccessEnabled(false).build())); + assertTrue(S3EndpointUtils.isPathStyleAccessEnabled(S3Configuration.builder().pathStyleAccessEnabled(true).build())); + } + + @Test + public void isArnRegionEnabled() { + assertFalse(S3EndpointUtils.isArnRegionEnabled(S3Configuration.builder().build())); + assertFalse(S3EndpointUtils.isArnRegionEnabled(null)); + assertFalse(S3EndpointUtils.isArnRegionEnabled(S3Configuration.builder().useArnRegionEnabled(false).build())); + assertTrue(S3EndpointUtils.isArnRegionEnabled(S3Configuration.builder().useArnRegionEnabled(true).build())); + } + + @Test + public void changeToDnsEndpoint() { + SdkHttpRequest.Builder mutableRequest = SdkHttpFullRequest.builder().host("s3").encodedPath("/test-bucket"); + S3EndpointUtils.changeToDnsEndpoint(mutableRequest, "test-bucket"); + assertThat(mutableRequest.host()).isEqualTo("test-bucket.s3"); + assertThat(mutableRequest.encodedPath()).isEqualTo(""); + } + + @Test + public void isArn() { + assertFalse(S3EndpointUtils.isArn("bucketName")); + assertFalse(S3EndpointUtils.isArn("test:arn:")); + assertTrue(S3EndpointUtils.isArn("arn:test")); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/AsyncChecksumValidationInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/AsyncChecksumValidationInterceptorTest.java index 97cc088242f2..b8376597ca9f 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/AsyncChecksumValidationInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/AsyncChecksumValidationInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -39,6 +39,7 @@ import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpRequest; @@ -169,10 +170,18 @@ public void afterUnmarshalling_putObjectRequest_shouldValidateChecksum_throwExce .build(); Context.AfterUnmarshalling afterUnmarshallingContext = - InterceptorTestUtils.afterUnmarshallingContext(putObjectRequest, sdkHttpRequest, response, sdkHttpResponse); + InterceptorContext.builder() + .request(putObjectRequest) + .httpRequest(sdkHttpRequest) + .response(response) + .httpResponse(sdkHttpResponse) + .asyncRequestBody(AsyncRequestBody.fromString("Test")) + .build(); - assertThatThrownBy(() -> interceptor.afterUnmarshalling(afterUnmarshallingContext, getExecutionAttributesWithChecksum())) - .hasMessage("Data read has a different checksum than expected."); + ExecutionAttributes attributes = getExecutionAttributesWithChecksum(); + interceptor.modifyAsyncHttpContent(afterUnmarshallingContext, attributes); + assertThatThrownBy(() -> interceptor.afterUnmarshalling(afterUnmarshallingContext, attributes)) + .hasMessageContaining("Data read has a different checksum than expected."); } @Test diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CreateBucketInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CreateBucketInterceptorTest.java index 99583344a225..cf9f5014e351 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CreateBucketInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CreateBucketInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CreateMultipartUploadRequestInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CreateMultipartUploadRequestInterceptorTest.java index 13c034534fa4..586e62fa11e9 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CreateMultipartUploadRequestInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/CreateMultipartUploadRequestInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/DecodeUrlEncodedResponseInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/DecodeUrlEncodedResponseInterceptorTest.java index 8686ba0978d3..7f5a32dcae17 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/DecodeUrlEncodedResponseInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/DecodeUrlEncodedResponseInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -31,12 +31,17 @@ import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.CommonPrefix; import software.amazon.awssdk.services.s3.model.EncodingType; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; +import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse; import software.amazon.awssdk.services.s3.model.ListObjectsResponse; import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.MultipartUpload; +import software.amazon.awssdk.services.s3.model.ObjectVersion; import software.amazon.awssdk.services.s3.model.S3Object; /** @@ -50,6 +55,13 @@ */ public class DecodeUrlEncodedResponseInterceptorTest { private static final String TEST_URL_ENCODED = "foo+%3D+bar+baz+%CE%B1+%CE%B2+%F0%9F%98%8A"; + private static final String TEST_URL_ENCODED_DELIMITER = "foo+%3D+bar+baz+%CE%B1+%CE%B2+%F0%9F%98%8A+delimiter"; + private static final String TEST_URL_ENCODED_NEXT_MARKER = "foo+%3D+bar+baz+%CE%B1+%CE%B2+%F0%9F%98%8A+nextmarker"; + + private static final String TEST_URL_ENCODED_MARKER = "foo+%3D+bar+baz+%CE%B1+%CE%B2+%F0%9F%98%8A+marker"; + private static final String TEST_URL_ENCODED_PREFIX = "foo+%3D+bar+baz+%CE%B1+%CE%B2+%F0%9F%98%8A+prefix"; + private static final String TEST_URL_ENCODED_KEY = "foo+%3D+bar+baz+%CE%B1+%CE%B2+%F0%9F%98%8A+key"; + private static final String TEST_URL_ENCODED_START_AFTER = "foo+%3D+bar+baz+%CE%B1+%CE%B2+%F0%9F%98%8A+startafter"; // foo = bar baz α β 😊 private static final String TEST_URL_DECODED = "foo = bar baz α β \uD83D\uDE0A"; @@ -62,22 +74,53 @@ public class DecodeUrlEncodedResponseInterceptorTest { S3Object.builder().key(TEST_URL_ENCODED).build() ); + private static final List COMMON_PREFIXES = Arrays.asList(CommonPrefix.builder() + .prefix(TEST_URL_ENCODED_PREFIX) + .build()); private static final ListObjectsResponse V1_TEST_ENCODED_RESPONSE = ListObjectsResponse.builder() - .encodingType(EncodingType.URL) - .delimiter(TEST_URL_ENCODED) - .nextMarker(TEST_URL_ENCODED) - .prefix(TEST_URL_ENCODED) - .marker(TEST_URL_ENCODED) - .contents(TEST_CONTENTS) - .build(); + .encodingType(EncodingType.URL) + .delimiter(TEST_URL_ENCODED_DELIMITER) + .nextMarker(TEST_URL_ENCODED_NEXT_MARKER) + .prefix(TEST_URL_ENCODED_PREFIX) + .marker(TEST_URL_ENCODED_MARKER) + .contents(TEST_CONTENTS) + .commonPrefixes(COMMON_PREFIXES) + .build(); private static final ListObjectsV2Response V2_TEST_ENCODED_RESPONSE = ListObjectsV2Response.builder() - .encodingType(EncodingType.URL) - .delimiter(TEST_URL_ENCODED) - .prefix(TEST_URL_ENCODED) - .startAfter(TEST_URL_ENCODED) - .contents(TEST_CONTENTS) - .build(); + .encodingType(EncodingType.URL) + .delimiter(TEST_URL_ENCODED_DELIMITER) + .prefix(TEST_URL_ENCODED_PREFIX) + .startAfter(TEST_URL_ENCODED_START_AFTER) + .contents(TEST_CONTENTS) + .commonPrefixes(COMMON_PREFIXES) + .build(); + + private static final String TEST_URL_ENCODED_NEXT_KEY_MARKER = TEST_URL_ENCODED + "+nextKeyMarker"; + private static final String TEST_URL_ENCODED_KEY_MARKER = TEST_URL_ENCODED + "+keyMarker"; + private static final ListObjectVersionsResponse TEST_LIST_OBJECT_VERSION_RESPONSE = ListObjectVersionsResponse.builder() + .encodingType(EncodingType.URL) + .delimiter(TEST_URL_ENCODED_DELIMITER) + .prefix(TEST_URL_ENCODED_PREFIX) + .keyMarker(TEST_URL_ENCODED_KEY_MARKER) + .nextKeyMarker(TEST_URL_ENCODED_NEXT_KEY_MARKER) + .commonPrefixes(COMMON_PREFIXES) + .versions(ObjectVersion.builder() + .key(TEST_URL_ENCODED_KEY) + .build()) + .build(); + + + private static final ListMultipartUploadsResponse TEST_LIST_MULTIPART_UPLOADS_RESPONSE = + ListMultipartUploadsResponse.builder() + .encodingType(EncodingType.URL) + .delimiter(TEST_URL_ENCODED_DELIMITER) + .prefix(TEST_URL_ENCODED_PREFIX) + .keyMarker(TEST_URL_ENCODED_KEY_MARKER) + .nextKeyMarker(TEST_URL_ENCODED_NEXT_KEY_MARKER) + .uploads(MultipartUpload.builder().key(TEST_URL_ENCODED_KEY).build()) + .commonPrefixes(COMMON_PREFIXES) + .build(); @Test public void encodingTypeSet_decodesListObjectsResponseParts() { @@ -85,11 +128,12 @@ public void encodingTypeSet_decodesListObjectsResponseParts() { ListObjectsResponse decoded = (ListObjectsResponse) INTERCEPTOR.modifyResponse(ctx, new ExecutionAttributes()); - assertDecoded(decoded::delimiter); - assertDecoded(decoded::nextMarker); - assertDecoded(decoded::prefix); - assertDecoded(decoded::marker); + assertDecoded(decoded::delimiter, " delimiter"); + assertDecoded(decoded::nextMarker, " nextmarker"); + assertDecoded(decoded::prefix, " prefix"); + assertDecoded(decoded::marker, " marker"); assertKeysAreDecoded(decoded.contents()); + assertCommonPrefixesAreDecoded(decoded.commonPrefixes()); } @Test @@ -98,10 +142,40 @@ public void encodingTypeSet_decodesListObjectsV2ResponseParts() { ListObjectsV2Response decoded = (ListObjectsV2Response) INTERCEPTOR.modifyResponse(ctx, new ExecutionAttributes()); - assertDecoded(decoded::delimiter); - assertDecoded(decoded::prefix); - assertDecoded(decoded::startAfter); + assertDecoded(decoded::delimiter, " delimiter"); + assertDecoded(decoded::prefix, " prefix"); + assertDecoded(decoded::startAfter, " startafter"); assertKeysAreDecoded(decoded.contents()); + assertCommonPrefixesAreDecoded(decoded.commonPrefixes()); + } + + @Test + public void encodingTypeSet_decodesListObjectVersionsResponse() { + Context.ModifyResponse ctx = newContext(TEST_LIST_OBJECT_VERSION_RESPONSE); + + ListObjectVersionsResponse decoded = (ListObjectVersionsResponse) INTERCEPTOR.modifyResponse(ctx, new ExecutionAttributes()); + + assertDecoded(decoded::delimiter, " delimiter"); + assertDecoded(decoded::prefix, " prefix"); + assertDecoded(decoded::keyMarker, " keyMarker"); + assertDecoded(decoded::nextKeyMarker, " nextKeyMarker"); + assertCommonPrefixesAreDecoded(decoded.commonPrefixes()); + assertVersionsAreDecoded(decoded.versions()); + } + + @Test + public void encodingTypeSet_decodesListMultipartUploadsResponse() { + Context.ModifyResponse ctx = newContext(TEST_LIST_MULTIPART_UPLOADS_RESPONSE); + + ListMultipartUploadsResponse decoded = (ListMultipartUploadsResponse) INTERCEPTOR.modifyResponse(ctx, new ExecutionAttributes()); + + assertDecoded(decoded::delimiter, " delimiter"); + assertDecoded(decoded::prefix, " prefix"); + assertDecoded(decoded::keyMarker, " keyMarker"); + assertDecoded(decoded::nextKeyMarker, " nextKeyMarker"); + assertCommonPrefixesAreDecoded(decoded.commonPrefixes()); + assertUploadsAreDecoded(decoded.uploads()); + assertCommonPrefixesAreDecoded(decoded.commonPrefixes()); } @Test @@ -130,14 +204,39 @@ public void encodingTypeNotSet_doesNotDecodeListObjectsV2ResponseParts() { assertThat(fromInterceptor).isEqualTo(original); } + @Test + public void otherResponses_shouldNotModifyResponse() { + HeadObjectResponse original = HeadObjectResponse.builder().build(); + Context.ModifyResponse ctx = newContext(original); + SdkResponse sdkResponse = INTERCEPTOR.modifyResponse(ctx, new ExecutionAttributes()); + assertThat(original.hashCode()).isEqualTo(sdkResponse.hashCode()); + } + private void assertKeysAreDecoded(List objects) { objects.forEach(o -> assertDecoded(o::key)); } + private void assertCommonPrefixesAreDecoded(List commonPrefixes) { + commonPrefixes.forEach(c -> assertDecoded(c::prefix, " prefix")); + } + private void assertDecoded(Supplier supplier) { - assertThat(supplier.get()).isEqualTo(TEST_URL_DECODED); + assertDecoded(supplier, ""); + } + + private void assertDecoded(Supplier supplier, String suffix) { + assertThat(supplier.get()).isEqualTo(TEST_URL_DECODED + suffix); } + private void assertVersionsAreDecoded(List versions) { + versions.forEach(v -> assertDecoded(v::key, " key")); + } + + private void assertUploadsAreDecoded(List uploads) { + uploads.forEach(u -> assertDecoded(u::key, " key")); + } + + private static Context.ModifyResponse newContext(SdkResponse response) { return new Context.ModifyResponse() { @Override diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EnableChunkedEncodingInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EnableChunkedEncodingInterceptorTest.java index f3230392287f..4f03921cbb69 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EnableChunkedEncodingInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EnableChunkedEncodingInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EnableTrailingChecksumInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EnableTrailingChecksumInterceptorTest.java index c3ed27d0fe6f..400e20761b6f 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EnableTrailingChecksumInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EnableTrailingChecksumInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptorTest.java index a41382f9c0b1..c6c20fb17057 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,181 +16,92 @@ package software.amazon.awssdk.services.s3.internal.handlers; import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; import static software.amazon.awssdk.awscore.AwsExecutionAttribute.AWS_REGION; import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.SERVICE_CONFIG; +import static software.amazon.awssdk.utils.http.SdkHttpUtils.urlEncode; import java.net.URI; -import java.util.Optional; +import org.junit.Before; import org.junit.Test; -import software.amazon.awssdk.core.SdkRequest; -import software.amazon.awssdk.core.async.AsyncRequestBody; + import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpFullRequest; -import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Configuration; -import software.amazon.awssdk.services.s3.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; -import software.amazon.awssdk.services.s3.model.ListBucketsRequest; -import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.utils.InterceptorTestUtils; public class EndpointAddressInterceptorTest { - private final EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + private static final String AP_ARN = "arn:aws:s3:us-west-2:123456789012:accesspoint:foobar"; + private static final String OUTPOSTS_ARN = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456" + + ":accesspoint:myaccesspoint"; + private static final String KEY = "test-key"; + private static final String DEFAULT_SIGNING_NAME = "s3"; + private static final String OUTPOSTS_SIGNING_NAME = "s3-outposts"; + private static final Region DEFAULT_REGION = Region.US_WEST_2; - @Test - public void traditionalEndpoint_shouldNotConvertEndpoint() { - verifyEndpoint("http", "http://s3-test.com", - S3Configuration.builder()); + private EndpointAddressInterceptor interceptor; - verifyEndpoint("https", "https://s3-test.com", - S3Configuration.builder()); + @Before + public void setUp() throws Exception { + interceptor = new EndpointAddressInterceptor(); } @Test - public void accelerateEnabled_shouldConvertToAccelerateEndpoint() { - verifyEndpoint("http", "http://s3-accelerate.amazonaws.com", - S3Configuration.builder().accelerateModeEnabled(true)); - verifyEndpoint("https", "https://s3-accelerate.amazonaws.com", - S3Configuration.builder().accelerateModeEnabled(true)); + public void accesspointArn_shouldReturnStandardRequest() { + ExecutionAttributes executionAttributes = createExecutionAttributes(S3Configuration.builder(), DEFAULT_REGION); + SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(createContext(AP_ARN), executionAttributes); + + String expectedEndpoint = "http://foobar-123456789012.s3-accesspoint.us-west-2.amazonaws.com"; + assertThat(sdkHttpFullRequest.getUri()).isEqualTo(uri(expectedEndpoint)); + assertThat(executionAttributes.getAttribute(SIGNING_REGION)).isEqualTo(Region.US_WEST_2); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo(DEFAULT_SIGNING_NAME); } @Test - public void bothAccelerateDualstackEnabled_shouldConvertToAccelerateDualstackEndpoint() { - S3Configuration.Builder configurationBuilder = S3Configuration.builder() - .dualstackEnabled(true) - .accelerateModeEnabled(true); - verifyEndpoint("http", - "http://s3-accelerate.dualstack.amazonaws.com", - S3Configuration.builder() - .accelerateModeEnabled(true) - .dualstackEnabled(true) - ); - verifyEndpoint("https", - "https://s3-accelerate.dualstack.amazonaws.com", - configurationBuilder); + public void outpostAccessPointArn_sameRegion_shouldRegion() { + ExecutionAttributes executionAttributes = createExecutionAttributes(S3Configuration.builder(), DEFAULT_REGION); + SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(createContext(OUTPOSTS_ARN), executionAttributes); + + String expectedEndpoint = "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com"; + assertThat(sdkHttpFullRequest.getUri()).isEqualTo(uri(expectedEndpoint)); + assertThat(executionAttributes.getAttribute(SIGNING_REGION)).isEqualTo(Region.US_WEST_2); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo(OUTPOSTS_SIGNING_NAME); } @Test - public void accelerateEnabled_ListBucketRequest_shouldNotConvertToAccelerateEndpoint() { - verifyAccelerateDisabledOperationsEndpointNotConverted(ListBucketsRequest.builder().build()); + public void outpostAccessPointArn_crossRegion_ArnRegionEnabled_correctlyInfersPartition() { + ExecutionAttributes executionAttributes = createExecutionAttributes(S3Configuration.builder().useArnRegionEnabled(true), + Region.US_EAST_1); + SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(createContext(OUTPOSTS_ARN), executionAttributes); + + String expectedEndpoint = "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com"; + assertThat(sdkHttpFullRequest.getUri()).isEqualTo(uri(expectedEndpoint)); + assertThat(executionAttributes.getAttribute(SIGNING_REGION)).isEqualTo(Region.US_WEST_2); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo(OUTPOSTS_SIGNING_NAME); } - @Test - public void accelerateEnabled_CreateBucketsRequest_shouldNotConvertToAccelerateEndpoint() { - verifyAccelerateDisabledOperationsEndpointNotConverted(CreateBucketRequest.builder().build()); - } + private Context.ModifyHttpRequest createContext(String accessPointArn) { + URI customUri = URI.create(String.format("http://s3-test.com/%s/%s", urlEncode(accessPointArn), KEY)); + PutObjectRequest request = PutObjectRequest.builder().bucket(accessPointArn).key(KEY).build(); - @Test - public void accelerateEnabled_DeleteBucketRequest_shouldNotConvertToAccelerateEndpoint() { - verifyAccelerateDisabledOperationsEndpointNotConverted(DeleteBucketRequest.builder().build()); + return InterceptorTestUtils.modifyHttpRequestContext(request, InterceptorTestUtils.sdkHttpRequest(customUri)); } - @Test - public void dualstackEnabled_shouldConvertToDualstackEndpoint() { - verifyEndpoint("http", "http://s3.dualstack.us-east-1.amazonaws.com", - S3Configuration.builder().dualstackEnabled(true)); - verifyEndpoint("https", "https://s3.dualstack.us-east-1.amazonaws.com", - S3Configuration.builder().dualstackEnabled(true)); - } - - @Test - public void virtualStyle_shouldConvertToDnsEndpoint() { - verifyVirtualStyleConvertDnsEndpoint("https"); - verifyVirtualStyleConvertDnsEndpoint("http"); - } - - @Test - public void pathStyleAccessEnabled_shouldNotConvertToDnsEndpoint() { - verifyEndpoint("http", "http://s3-test.com", - S3Configuration.builder().pathStyleAccessEnabled(true)); - verifyEndpoint("https", "https://s3-test.com", - S3Configuration.builder().pathStyleAccessEnabled(true)); - } - - private void verifyVirtualStyleConvertDnsEndpoint(String protocol) { - URI customUri = URI.create(String.format("%s://s3-test.com", protocol)); - String bucketName = "some-bucket"; - URI expectedUri = URI.create(String.format("%s://%s.s3.dualstack.us-east-1.amazonaws.com", protocol, bucketName)); - - - Context.ModifyHttpRequest ctx = context(ListObjectsV2Request.builder().bucket(bucketName).build(), - sdkHttpRequest(customUri)); + private ExecutionAttributes createExecutionAttributes(S3Configuration.Builder builder, Region region) { ExecutionAttributes executionAttributes = new ExecutionAttributes(); - S3Configuration s3Configuration = S3Configuration.builder().dualstackEnabled(true).build(); - - executionAttributes.putAttribute(SERVICE_CONFIG, s3Configuration); - executionAttributes.putAttribute(AWS_REGION, Region.US_EAST_1); - - SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(ctx, executionAttributes); - - assertThat(sdkHttpFullRequest.getUri()).isEqualTo(expectedUri); - } - - private SdkHttpRequest sdkHttpRequest(URI customUri) { - return SdkHttpFullRequest.builder() - .protocol(customUri.getScheme()) - .host(customUri.getHost()) - .port(customUri.getPort()) - .method(SdkHttpMethod.GET) - .build(); - } - - private void verifyAccelerateDisabledOperationsEndpointNotConverted(SdkRequest request) { - URI customUri = URI.create("http://s3-test.com"); - Context.ModifyHttpRequest ctx = context(request, sdkHttpRequest(customUri)); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - S3Configuration s3Configuration = S3Configuration.builder().accelerateModeEnabled(true).build(); - - executionAttributes.putAttribute(SERVICE_CONFIG, s3Configuration); - executionAttributes.putAttribute(AWS_REGION, Region.US_EAST_1); - - SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(ctx, executionAttributes); - - assertThat(sdkHttpFullRequest.getUri()).isEqualTo(customUri); - } - - private void verifyEndpoint(String protocol, String expectedEndpoint, - S3Configuration.Builder builder) { - URI customUri = URI.create(String.format("%s://s3-test.com", protocol)); - URI expectedUri = URI.create(expectedEndpoint); - Context.ModifyHttpRequest ctx = context(PutObjectRequest.builder().build(), sdkHttpRequest(customUri)); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - S3Configuration s3Configuration = builder.build(); - - executionAttributes.putAttribute(SERVICE_CONFIG, s3Configuration); - executionAttributes.putAttribute(AWS_REGION, Region.US_EAST_1); - - SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(ctx, executionAttributes); - - assertThat(sdkHttpFullRequest.getUri()).isEqualTo(expectedUri); + executionAttributes.putAttribute(SERVICE_CONFIG, builder.build()); + executionAttributes.putAttribute(AWS_REGION, region); + executionAttributes.putAttribute(SIGNING_REGION, region); + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, DEFAULT_SIGNING_NAME); + return executionAttributes; } - private Context.ModifyHttpRequest context(SdkRequest request, SdkHttpRequest sdkHttpRequest) { - return new Context.ModifyHttpRequest() { - @Override - public SdkHttpRequest httpRequest() { - return sdkHttpRequest; - } - - @Override - public Optional requestBody() { - return null; - } - - @Override - public Optional asyncRequestBody() { - return null; - } - - @Override - public SdkRequest request() { - return request; - } - }; + private URI uri(String expectedEndpoint) { + return URI.create(String.format("%s/%s", expectedEndpoint, KEY)); } } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/ExceptionTranslationInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/ExceptionTranslationInterceptorTest.java index 65dfad573661..b93b6b26851c 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/ExceptionTranslationInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/ExceptionTranslationInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/GetBucketPolicyInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/GetBucketPolicyInterceptorTest.java index bc6d20800e77..95ee7178e58f 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/GetBucketPolicyInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/GetBucketPolicyInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectHeaderTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectHeaderTest.java index e03ff18547f8..0203649a477c 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectHeaderTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectHeaderTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptorTest.java index db120eb29bcc..13bc671086b1 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/SyncChecksumValidationInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/SyncChecksumValidationInterceptorTest.java index 8a428d53e4af..90b779cdfd25 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/SyncChecksumValidationInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/SyncChecksumValidationInterceptorTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,17 +17,22 @@ import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Java6Assertions.assertThatThrownBy; import static software.amazon.awssdk.core.ClientType.SYNC; import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.CLIENT_TYPE; import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.SERVICE_CONFIG; import static software.amazon.awssdk.services.s3.checksums.ChecksumConstant.CHECKSUM_ENABLED_RESPONSE_HEADER; import static software.amazon.awssdk.services.s3.checksums.ChecksumConstant.CONTENT_LENGTH_HEADER; import static software.amazon.awssdk.services.s3.checksums.ChecksumConstant.ENABLE_MD5_CHECKSUM_HEADER_VALUE; +import static software.amazon.awssdk.services.s3.checksums.ChecksumConstant.SERVER_SIDE_ENCRYPTION_HEADER; import static software.amazon.awssdk.services.s3.checksums.ChecksumsEnabledValidator.CHECKSUM; +import static software.amazon.awssdk.services.s3.model.ServerSideEncryption.AWS_KMS; import java.io.IOException; import java.io.InputStream; +import java.net.URI; import java.nio.ByteBuffer; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -36,8 +41,12 @@ import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.checksums.ChecksumCalculatingInputStream; @@ -45,12 +54,18 @@ import software.amazon.awssdk.services.s3.internal.handlers.SyncChecksumValidationInterceptor.ChecksumCalculatingStreamProvider; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.utils.InterceptorTestUtils; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.StringInputStream; +import software.amazon.awssdk.utils.internal.Base16Lower; public class SyncChecksumValidationInterceptorTest { + private static final byte[] CONTENT_BYTES = "CONTENT".getBytes(Charset.forName("UTF-8")); + private static final String VALID_CHECKSUM = Base16Lower.encodeAsString(checkSumFor(CONTENT_BYTES).getChecksumBytes()); + private static final String INVALID_CHECKSUM = "3902ee7e149eb8313a34757e89e21af6"; + private SyncChecksumValidationInterceptor interceptor = new SyncChecksumValidationInterceptor(); @Test @@ -149,6 +164,82 @@ public void checksumCalculatingStreamProvider_shouldReturnNewStreamResetChecksum newStream.close(); } + @Test + public void afterUnmarshalling_putObjectRequest_shouldValidateChecksum() { + SdkHttpResponse sdkHttpResponse = getSdkHttpResponseWithChecksumHeader(); + + PutObjectResponse response = PutObjectResponse.builder() + .eTag(VALID_CHECKSUM) + .build(); + + PutObjectRequest putObjectRequest = PutObjectRequest.builder() + .build(); + + SdkHttpRequest sdkHttpRequest = SdkHttpFullRequest.builder() + .uri(URI.create("http://localhost:8080")) + .method(SdkHttpMethod.PUT) + .build(); + + Context.AfterUnmarshalling afterUnmarshallingContext = + InterceptorTestUtils.afterUnmarshallingContext(putObjectRequest, sdkHttpRequest, response, sdkHttpResponse); + + interceptor.afterUnmarshalling(afterUnmarshallingContext, getExecutionAttributesWithChecksum()); + } + + @Test + public void afterUnmarshalling_putObjectRequest_shouldValidateChecksum_throwExceptionIfInvalid() { + SdkHttpResponse sdkHttpResponse = getSdkHttpResponseWithChecksumHeader(); + + PutObjectResponse response = PutObjectResponse.builder() + .eTag(INVALID_CHECKSUM) + .build(); + + PutObjectRequest putObjectRequest = PutObjectRequest.builder().build(); + + SdkHttpRequest sdkHttpRequest = SdkHttpFullRequest.builder() + .uri(URI.create("http://localhost:8080")) + .method(SdkHttpMethod.PUT) + .contentStreamProvider(() -> new StringInputStream("Test")) + .build(); + + Context.AfterUnmarshalling afterUnmarshallingContext = + InterceptorContext.builder() + .request(putObjectRequest) + .httpRequest(sdkHttpRequest) + .response(response) + .httpResponse(sdkHttpResponse) + .requestBody(RequestBody.fromString("Test")) + .build(); + + ExecutionAttributes attributes = getExecutionAttributesWithChecksum(); + interceptor.modifyHttpContent(afterUnmarshallingContext, attributes); + assertThatThrownBy(() -> interceptor.afterUnmarshalling(afterUnmarshallingContext, attributes)) + .hasMessageContaining("Data read has a different checksum than expected."); + } + + @Test + public void afterUnmarshalling_putObjectRequest_with_SSE_shouldNotValidateChecksum() { + SdkHttpResponse sdkHttpResponse = getSdkHttpResponseWithChecksumHeader(); + + PutObjectResponse response = PutObjectResponse.builder() + .eTag(INVALID_CHECKSUM) + .build(); + + PutObjectRequest putObjectRequest = PutObjectRequest.builder().build(); + + SdkHttpRequest sdkHttpRequest = SdkHttpFullRequest.builder() + .putHeader(SERVER_SIDE_ENCRYPTION_HEADER, AWS_KMS.toString()) + .putHeader("x-amz-server-side-encryption-aws-kms-key-id", ENABLE_MD5_CHECKSUM_HEADER_VALUE) + .uri(URI.create("http://localhost:8080")) + .method(SdkHttpMethod.PUT) + .build(); + + Context.AfterUnmarshalling afterUnmarshallingContext = + InterceptorTestUtils.afterUnmarshallingContext(putObjectRequest, sdkHttpRequest, response, sdkHttpResponse); + + interceptor.afterUnmarshalling(afterUnmarshallingContext, getExecutionAttributesWithChecksum()); + } + private static final class CloseAwareStream extends InputStream { private StringInputStream inputStream; private boolean isClosed; @@ -192,4 +283,15 @@ private ExecutionAttributes getExecutionAttributesWithChecksumDisabled() { executionAttributes.putAttribute(SERVICE_CONFIG, S3Configuration.builder().checksumValidationEnabled(false).build()); return executionAttributes; } + + private ExecutionAttributes getExecutionAttributesWithChecksum() { + SdkChecksum checksum = checkSumFor(CONTENT_BYTES); + return getExecutionAttributes().putAttribute(CHECKSUM, checksum); + } + + private static SdkChecksum checkSumFor(byte[] bytes) { + SdkChecksum checksum = new Md5Checksum(); + checksum.update(bytes, 0, bytes.length); + return checksum; + } } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/OutpostAccessPointArnEndpointResolutionTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/OutpostAccessPointArnEndpointResolutionTest.java new file mode 100644 index 000000000000..95cef1ba878d --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/OutpostAccessPointArnEndpointResolutionTest.java @@ -0,0 +1,172 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.services.s3.S3MockUtils.mockListObjectsResponse; + +import java.net.URI; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; + +/** + * Functional tests for outpost access point ARN + */ +public class OutpostAccessPointArnEndpointResolutionTest { + + private MockSyncHttpClient mockHttpClient; + private Signer mockSigner; + + @Before + public void setup() { + mockHttpClient = new MockSyncHttpClient(); + mockSigner = (request, executionAttributes) -> request; + } + + @Test + public void outpostArn_correctlyRewritesEndpoint() throws Exception { + URI customEndpoint = URI.create("https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.ap-south-1.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().build(); + String outpostArn = "arn:aws:s3-outposts:ap-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + s3Client.listObjects(ListObjectsRequest.builder().bucket(outpostArn).build()); + + assertThat(mockHttpClient.getLastRequest().firstMatchingHeader("Authorization").get()).contains("s3-outposts/aws4_request"); + assertEndpointMatches(mockHttpClient.getLastRequest(), customEndpoint.toString()); + } + + @Test + public void outpostArn_customEndpoint_throwsIllegalArgumentException() throws Exception { + URI customEndpoint = URI.create("https://foobar.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().endpointOverride(customEndpoint).build(); + String outpostArn = "arn:aws:s3-outposts:ap-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + assertThatThrownBy(() -> s3Client.listObjects(ListObjectsRequest.builder().bucket(outpostArn).build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("endpoint override"); + } + + @Test + public void outpostArn_dualstackEnabled_throwsIllegalArgumentException() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().serviceConfiguration(S3Configuration.builder().dualstackEnabled(true).build()).build(); + String outpostArn = "arn:aws:s3-outposts:ap-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + assertThatThrownBy(() -> s3Client.listObjects(ListObjectsRequest.builder().bucket(outpostArn).build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("dualstack"); + } + + @Test + public void outpostArn_fipsRegion_throwsIllegalArgumentException() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().region(Region.of("fips-us-east-1")).serviceConfiguration(S3Configuration.builder().dualstackEnabled(false).build()).build(); + String outpostArn = "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + assertThatThrownBy(() -> s3Client.listObjects(ListObjectsRequest.builder().bucket(outpostArn).build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("FIPS"); + } + + @Test + public void outpostArn_accelerateEnabled_throwsIllegalArgumentException() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().serviceConfiguration(S3Configuration.builder().accelerateModeEnabled(true).build()).build(); + String outpostArn = "arn:aws:s3-outposts:ap-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + assertThatThrownBy(() -> s3Client.listObjects(ListObjectsRequest.builder().bucket(outpostArn).build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accelerate"); + } + + @Test + public void outpostArn_pathStyle_throwsIllegalArgumentException() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().serviceConfiguration(S3Configuration.builder().pathStyleAccessEnabled(true).build()).build(); + String outpostArn = "arn:aws:s3-outposts:ap-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + assertThatThrownBy(() -> s3Client.listObjects(ListObjectsRequest.builder().bucket(outpostArn).build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("path style addressing"); + } + + @Test + public void outpostArn_differentRegion_useArnRegionFalse_throwsIllegalArgumentException() throws Exception { + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().build(); + String outpostArn = "arn:aws:s3-outposts:us-west-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + assertThatThrownBy(() -> s3Client.listObjects(ListObjectsRequest.builder().bucket(outpostArn).build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("region"); + } + + @Test + public void outpostArn_differentRegion_useArnRegionTrue() throws Exception { + URI customEndpoint = URI.create("https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com"); + mockHttpClient.stubNextResponse(mockListObjectsResponse()); + S3Client s3Client = clientBuilder().serviceConfiguration(b -> b.useArnRegionEnabled(true)).build(); + String outpostArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + s3Client.listObjects(ListObjectsRequest.builder().bucket(outpostArn).build()); + + assertEndpointMatches(mockHttpClient.getLastRequest(), customEndpoint.toString()); + } + + /** + * Assert that the provided request would have gone to the given endpoint. + * + * @param capturedRequest Request captured by mock HTTP client. + * @param endpoint Expected endpoint. + */ + private void assertEndpointMatches(SdkHttpRequest capturedRequest, String endpoint) { + assertThat(capturedRequest.getUri()).isEqualTo(URI.create(endpoint)); + } + + /** + * @param s3ServiceConfiguration Advanced configuration to use for this client. + * @return A built client with the given advanced configuration. + */ + private S3Client buildClient(S3Configuration s3ServiceConfiguration) { + return clientBuilder() + .serviceConfiguration(s3ServiceConfiguration) + .build(); + } + + /** + * @return Client builder instance preconfigured with credentials and region using the {@link #mockHttpClient} for transport. + */ + private S3ClientBuilder clientBuilder() { + return S3Client.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.AP_SOUTH_1) + .httpClient(mockHttpClient); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilderTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilderTest.java new file mode 100644 index 000000000000..86332d685a33 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilderTest.java @@ -0,0 +1,186 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; + +import java.net.URI; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class S3AccessPointBuilderTest { + private static final String LONG_STRING_64 = "1234567890123456789012345678901234567890123456789012345678901234"; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void toURI_noDualstack() { + URI result = S3AccessPointBuilder.create() + .accessPointName("access-point") + .accountId("account-id") + .region("region") + .protocol("protocol") + .domain("domain") + .toUri(); + + assertThat(result, is(URI.create("protocol://access-point-account-id.s3-accesspoint.region.domain"))); + } + + @Test + public void toURI_dualstack() { + URI result = S3AccessPointBuilder.create() + .accessPointName("access-point") + .accountId("account-id") + .region("region") + .protocol("protocol") + .domain("domain") + .dualstackEnabled(true) + .toUri(); + + assertThat(result, + is(URI.create("protocol://access-point-account-id.s3-accesspoint.dualstack.region.domain"))); + } + + @Test + public void toURI_FipsEnabled() { + URI result = S3AccessPointBuilder.create() + .accessPointName("access-point") + .accountId("account-id") + .region("region") + .protocol("protocol") + .domain("domain") + .fipsEnabled(true) + .toUri(); + + assertThat(result, is(URI.create("protocol://access-point-account-id.s3-accesspoint.fips-region.domain"))); + } + + @Test + public void toURI_accessPointNameWithSlashes_throwsIllegalArgumentException() { + assertThatThrownBy(() -> S3AccessPointBuilder.create() + .accessPointName("access/point") + .accountId("account-id") + .region("region") + .protocol("protocol") + .domain("domain") + .toUri()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accessPointName") + .hasMessageContaining("alphanumeric"); + } + + @Test + public void toURI_accountIdWithSlashes_throwsIllegalArgumentException() { + assertThatThrownBy(() -> S3AccessPointBuilder.create() + .accessPointName("accesspoint") + .accountId("account/id") + .region("region") + .protocol("protocol") + .domain("domain") + .toUri()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accountId") + .hasMessageContaining("alphanumeric"); + } + + @Test + public void toURI_accessPointNameWithTooLongString_throwsIllegalArgumentException() { + assertThatThrownBy(() -> S3AccessPointBuilder.create() + .accessPointName(LONG_STRING_64) + .accountId("account-id") + .region("region") + .protocol("protocol") + .domain("domain") + .toUri()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accessPointName") + .hasMessageContaining("63"); // max length + } + + @Test + public void toURI_accountIdWithTooLongString_throwsIllegalArgumentException() { + assertThatThrownBy(() -> S3AccessPointBuilder.create() + .accessPointName("accesspoint") + .accountId(LONG_STRING_64) + .region("region") + .protocol("protocol") + .domain("domain") + .toUri()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accountId") + .hasMessageContaining("63"); // max length + } + + @Test + public void toURI_accessPointNameWithEmptyString_throwsIllegalArgumentException() { + assertThatThrownBy(() -> S3AccessPointBuilder.create() + .accessPointName("") + .accountId("account-id") + .region("region") + .protocol("protocol") + .domain("domain") + .toUri()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accessPointName") + .hasMessageContaining("missing"); + } + + @Test + public void toURI_accountIdWithEmptyString_throwsIllegalArgumentException() { + assertThatThrownBy(() -> S3AccessPointBuilder.create() + .accessPointName("accesspoint") + .accountId("") + .region("region") + .protocol("protocol") + .domain("domain") + .toUri()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accountId") + .hasMessageContaining("missing"); + } + + @Test + public void toURI_accessPointNameWithUrlEncodedCharacters_throwsIllegalArgumentException() { + assertThatThrownBy(() -> S3AccessPointBuilder.create() + .accessPointName("access%2fpoint") + .accountId("account-id") + .region("region") + .protocol("protocol") + .domain("domain") + .toUri()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accessPointName") + .hasMessageContaining("alphanumeric"); + } + + @Test + public void toURI_accountIdWithUrlEncodedCharacters_throwsIllegalArgumentException() { + assertThatThrownBy(() -> S3AccessPointBuilder.create() + .accessPointName("accesspoint") + .accountId("account%2fid") + .region("region") + .protocol("protocol") + .domain("domain") + .toUri()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accountId") + .hasMessageContaining("alphanumeric"); + } +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointResourceTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointResourceTest.java new file mode 100644 index 000000000000..968bca5ff32d --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointResourceTest.java @@ -0,0 +1,316 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import java.util.Optional; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class S3AccessPointResourceTest { + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void buildWithAllPropertiesSet() { + S3AccessPointResource s3AccessPointResource = S3AccessPointResource.builder() + .accessPointName("access_point-name") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + assertEquals("access_point-name", s3AccessPointResource.accessPointName()); + assertEquals(Optional.of("account-id"), s3AccessPointResource.accountId()); + assertEquals(Optional.of("partition"), s3AccessPointResource.partition()); + assertEquals(Optional.of("region"), s3AccessPointResource.region()); + assertEquals("accesspoint", s3AccessPointResource.type()); + } + + @Test + public void toBuilder() { + S3AccessPointResource s3AccessPointResource = S3AccessPointResource.builder() + .accessPointName("access_point-name") + .accountId("account-id") + .partition("partition") + .region("region") + .build() + .toBuilder() + .build(); + + assertEquals("access_point-name", s3AccessPointResource.accessPointName()); + assertEquals(Optional.of("account-id"), s3AccessPointResource.accountId()); + assertEquals(Optional.of("partition"), s3AccessPointResource.partition()); + assertEquals(Optional.of("region"), s3AccessPointResource.region()); + assertEquals("accesspoint", s3AccessPointResource.type()); + } + + @Test(expected = IllegalArgumentException.class) + public void buildWithBlankRegion() { + S3AccessPointResource.builder() + .accessPointName("access_point-name") + .accountId("account-id") + .partition("partition") + .region("") + .build(); + } + + @Test(expected = IllegalArgumentException.class) + public void buildWithBlankPartition() { + S3AccessPointResource.builder() + .accessPointName("access_point-name") + .accountId("account-id") + .region("region") + .partition("") + .build(); + } + + @Test(expected = IllegalArgumentException.class) + public void buildWithBlankAccountId() { + S3AccessPointResource.builder() + .accessPointName("access_point-name") + .partition("partition") + .region("region") + .accountId("") + .build(); + } + + @Test(expected = IllegalArgumentException.class) + public void buildWithBlankAccessPointName() { + S3AccessPointResource.builder() + .accountId("account-id") + .partition("partition") + .region("region") + .accessPointName("") + .build(); + } + + @Test(expected = NullPointerException.class) + public void buildWithMissingRegion() { + S3AccessPointResource.builder() + .accessPointName("access_point-name") + .accountId("account-id") + .partition("partition") + .build(); + } + + @Test(expected = NullPointerException.class) + public void buildWithMissingPartition() { + S3AccessPointResource.builder() + .accessPointName("access_point-name") + .accountId("account-id") + .region("region") + .build(); + } + + @Test(expected = NullPointerException.class) + public void buildWithMissingAccountId() { + S3AccessPointResource.builder() + .accessPointName("access_point-name") + .partition("partition") + .region("region") + .build(); + } + + @Test(expected = NullPointerException.class) + public void buildWithMissingAccessPointName() { + S3AccessPointResource.builder() + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + } + + @Test + public void buildWithSetters() { + S3AccessPointResource.Builder builder = S3AccessPointResource.builder(); + builder.setAccessPointName("access_point-name"); + builder.setAccountId("account-id"); + builder.setPartition("partition"); + builder.setRegion("region"); + S3AccessPointResource s3AccessPointResource = builder.build(); + + assertEquals("access_point-name", s3AccessPointResource.accessPointName()); + assertEquals(Optional.of("account-id"), s3AccessPointResource.accountId()); + assertEquals(Optional.of("partition"), s3AccessPointResource.partition()); + assertEquals(Optional.of("region"), s3AccessPointResource.region()); + assertEquals("accesspoint", s3AccessPointResource.type()); + } + + @Test + public void equalsHashcode_withoutParent() { + S3AccessPointResource s3BucketResource1 = S3AccessPointResource.builder() + .accessPointName("access_point") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3AccessPointResource s3BucketResource2 = S3AccessPointResource.builder() + .accessPointName("access_point") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3AccessPointResource s3BucketResource3 = S3AccessPointResource.builder() + .accessPointName("access_point") + .accountId("account-id") + .partition("different-partition") + .region("region") + .build(); + + assertEquals(s3BucketResource1, s3BucketResource2); + assertEquals(s3BucketResource1.hashCode(), s3BucketResource2.hashCode()); + assertNotEquals(s3BucketResource1, s3BucketResource3); + assertNotEquals(s3BucketResource1.hashCode(), s3BucketResource3.hashCode()); + } + + @Test + public void equalsHashcode_withParent() { + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3OutpostResource parentResource2 = S3OutpostResource.builder() + .outpostId("5678") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3AccessPointResource s3BucketResource1 = S3AccessPointResource.builder() + .accessPointName("access_point") + .parentS3Resource(parentResource) + .build(); + + S3AccessPointResource s3BucketResource2 = S3AccessPointResource.builder() + .accessPointName("access_point") + .parentS3Resource(parentResource) + .build(); + + S3AccessPointResource s3BucketResource3 = S3AccessPointResource.builder() + .accessPointName("access_point") + .parentS3Resource(parentResource2) + .build(); + + + assertEquals(s3BucketResource1, s3BucketResource2); + assertEquals(s3BucketResource1.hashCode(), s3BucketResource2.hashCode()); + assertNotEquals(s3BucketResource1, s3BucketResource3); + assertNotEquals(s3BucketResource1.hashCode(), s3BucketResource3.hashCode()); + } + + @Test + public void buildWithOutpostParent() { + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3AccessPointResource s3AccessPointResource = S3AccessPointResource.builder() + .parentS3Resource(parentResource) + .accessPointName("access-point-name") + .build(); + + assertEquals("access-point-name", s3AccessPointResource.accessPointName()); + assertEquals(Optional.of("account-id"), s3AccessPointResource.accountId()); + assertEquals(Optional.of("partition"), s3AccessPointResource.partition()); + assertEquals(Optional.of("region"), s3AccessPointResource.region()); + assertEquals("accesspoint", s3AccessPointResource.type()); + assertEquals(Optional.of(parentResource), s3AccessPointResource.parentS3Resource()); + } + + @Test + public void buildWithInvalidParent_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("parentS3Resource"); + + S3BucketResource invalidParent = S3BucketResource.builder() + .bucketName("bucket") + .build(); + S3AccessPointResource.builder() + .parentS3Resource(invalidParent) + .accessPointName("access-point-name") + .build(); + } + + @Test + public void hasParentAndPartition_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("has parent resource"); + + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3AccessPointResource.builder() + .accessPointName("access_point") + .partition("partition") + .parentS3Resource(parentResource) + .build(); + } + + @Test + public void hasParentAndAccountId_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("has parent resource"); + + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3AccessPointResource.builder() + .accessPointName("access_point") + .accountId("account id") + .parentS3Resource(parentResource) + .build(); + } + + @Test + public void hasParentAndRegion_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("has parent resource"); + + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3AccessPointResource.builder() + .accessPointName("access_point") + .region("region") + .parentS3Resource(parentResource) + .build(); + } +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnConverterTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnConverterTest.java new file mode 100644 index 000000000000..323bc3c945ef --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnConverterTest.java @@ -0,0 +1,355 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +import java.util.Optional; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import software.amazon.awssdk.arns.Arn; + +public class S3ArnConverterTest { + private static final S3ArnConverter S3_ARN_PARSER = S3ArnConverter.create(); + private static final String ACCOUNT_ID = "123456789012"; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void parseArn_objectThroughAp_v2Arn() { + S3Resource resource = S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("accesspoint:test-ap/object/test-key") + .build()); + + assertThat(resource, instanceOf(S3ObjectResource.class)); + + S3ObjectResource s3ObjectResource = (S3ObjectResource) resource; + assertThat(s3ObjectResource.parentS3Resource().get(), instanceOf(S3AccessPointResource.class)); + S3AccessPointResource s3AccessPointResource = (S3AccessPointResource)s3ObjectResource.parentS3Resource().get(); + assertThat(s3AccessPointResource.accessPointName(), is("test-ap")); + assertThat(s3ObjectResource.key(), is("test-key")); + assertThat(s3ObjectResource.accountId(), is(Optional.of("123456789012"))); + assertThat(s3ObjectResource.partition(), is(Optional.of("aws"))); + assertThat(s3ObjectResource.region(), is(Optional.of("us-east-1"))); + assertThat(s3ObjectResource.type(), is(S3ResourceType.OBJECT.toString())); + } + + @Test + public void parseArn_object_v1Arn() { + S3Resource resource = S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .resource("bucket/key") + .build()); + + assertThat(resource, instanceOf(S3ObjectResource.class)); + S3ObjectResource s3ObjectResource = (S3ObjectResource) resource; + assertThat(s3ObjectResource.parentS3Resource().get(), instanceOf(S3BucketResource.class)); + S3BucketResource s3BucketResource = (S3BucketResource) s3ObjectResource.parentS3Resource().get(); + + assertThat(s3BucketResource.bucketName(), is("bucket")); + assertThat(s3ObjectResource.key(), is("key")); + assertThat(s3ObjectResource.accountId(), is(Optional.empty())); + assertThat(s3ObjectResource.partition(), is(Optional.of("aws"))); + assertThat(s3ObjectResource.region(), is(Optional.empty())); + assertThat(s3ObjectResource.type(), is(S3ResourceType.OBJECT.toString())); + } + + @Test + public void parseArn_accessPoint() { + S3Resource resource = S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("accesspoint:accesspoint-name") + .build()); + + assertThat(resource, instanceOf(S3AccessPointResource.class)); + + S3AccessPointResource s3EndpointResource = (S3AccessPointResource) resource; + assertThat(s3EndpointResource.accessPointName(), is("accesspoint-name")); + assertThat(s3EndpointResource.accountId(), is(Optional.of("123456789012"))); + assertThat(s3EndpointResource.partition(), is(Optional.of("aws"))); + assertThat(s3EndpointResource.region(), is(Optional.of("us-east-1"))); + assertThat(s3EndpointResource.type(), is(S3ResourceType.ACCESS_POINT.toString())); + } + + @Test + public void parseArn_accessPoint_withQualifier() { + S3Resource resource = S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("accesspoint:accesspoint-name:1214234234") + .build()); + + assertThat(resource, instanceOf(S3AccessPointResource.class)); + + S3AccessPointResource s3EndpointResource = (S3AccessPointResource) resource; + assertThat(s3EndpointResource.accessPointName(), is("accesspoint-name")); + assertThat(s3EndpointResource.accountId(), is(Optional.of("123456789012"))); + assertThat(s3EndpointResource.partition(), is(Optional.of("aws"))); + assertThat(s3EndpointResource.region(), is(Optional.of("us-east-1"))); + assertThat(s3EndpointResource.type(), is(S3ResourceType.ACCESS_POINT.toString())); + } + + @Test + public void parseArn_v1Bucket() { + S3Resource resource = S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .resource("bucket-name") + .build()); + + assertThat(resource, instanceOf(S3BucketResource.class)); + + S3BucketResource s3BucketResource = (S3BucketResource) resource; + assertThat(s3BucketResource.bucketName(), is("bucket-name")); + assertThat(s3BucketResource.accountId(), is(Optional.empty())); + assertThat(s3BucketResource.partition(), is(Optional.of("aws"))); + assertThat(s3BucketResource.region(), is(Optional.empty())); + assertThat(s3BucketResource.type(), is(S3ResourceType.BUCKET.toString())); + } + + @Test + public void parseArn_v2Bucket() { + S3Resource resource = S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("bucket_name:bucket-name") + .build()); + + assertThat(resource, instanceOf(S3BucketResource.class)); + + S3BucketResource s3BucketResource = (S3BucketResource) resource; + assertThat(s3BucketResource.bucketName(), is("bucket-name")); + assertThat(s3BucketResource.accountId(), is(Optional.of("123456789012"))); + assertThat(s3BucketResource.partition(), is(Optional.of("aws"))); + assertThat(s3BucketResource.region(), is(Optional.of("us-east-1"))); + assertThat(s3BucketResource.type(), is(S3ResourceType.BUCKET.toString())); + } + + @Test + public void parseArn_unknownResource() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("ARN type"); + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("unknown:foobar") + .build()); + } + + @Test + public void parseArn_bucket_noName() { + exception.expect(IllegalArgumentException.class); + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("bucket_name:") + .build()); + } + + @Test + public void parseArn_accesspoint_noName() { + exception.expect(IllegalArgumentException.class); + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("access_point:") + .build()); + } + + @Test + public void parseArn_object_v2Arn_noKey() { + exception.expect(IllegalArgumentException.class); + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("object:bucket") + .build()); + } + + @Test + public void parseArn_object_v2Arn_emptyBucket() { + exception.expect(IllegalArgumentException.class); + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("object:/key") + .build()); + } + + @Test + public void parseArn_object_v2Arn_emptyKey() { + exception.expect(IllegalArgumentException.class); + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("object:bucket/") + .build()); + } + + @Test + public void parseArn_object_v1Arn_emptyKey() { + exception.expect(IllegalArgumentException.class); + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .resource("bucket/") + .build()); + } + + @Test + public void parseArn_object_v1Arn_emptyBucket() { + exception.expect(IllegalArgumentException.class); + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .resource("/key") + .build()); + } + + @Test + public void parseArn_unknownType_throwsCorrectException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("invalidType"); + + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("invalidType:something") + .build()); + } + + @Test + public void parseArn_outpostAccessPoint_slash() { + S3Resource resource = S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId(ACCOUNT_ID) + .resource("outpost/22222/accesspoint/foobar") + .build()); + + assertThat(resource, instanceOf(S3AccessPointResource.class)); + S3AccessPointResource s3AccessPointResource = (S3AccessPointResource) resource; + assertThat(s3AccessPointResource.accessPointName(), is("foobar")); + assertThat(s3AccessPointResource.parentS3Resource().get(), instanceOf(S3OutpostResource.class)); + S3OutpostResource outpostResource = (S3OutpostResource)s3AccessPointResource.parentS3Resource().get(); + assertThat(outpostResource.accountId(), is(Optional.of(ACCOUNT_ID))); + assertThat(outpostResource.partition(), is(Optional.of("aws"))); + assertThat(outpostResource.region(), is(Optional.of("us-east-1"))); + assertThat(outpostResource.outpostId(), is("22222")); + assertThat(outpostResource.type(), is(S3ResourceType.OUTPOST.toString())); + } + + @Test + public void parseArn_outpostAccessPoint_colon() { + S3Resource resource = S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId(ACCOUNT_ID) + .resource("outpost:22222:accesspoint:foobar") + .build()); + + assertThat(resource, instanceOf(S3AccessPointResource.class)); + S3AccessPointResource s3AccessPointResource = (S3AccessPointResource) resource; + assertThat(s3AccessPointResource.accessPointName(), is("foobar")); + + assertThat(s3AccessPointResource.parentS3Resource().get(), instanceOf(S3OutpostResource.class)); + + S3OutpostResource outpostResource = (S3OutpostResource)s3AccessPointResource.parentS3Resource().get(); + + assertThat(outpostResource.accountId(), is(Optional.of(ACCOUNT_ID))); + assertThat(outpostResource.partition(), is(Optional.of("aws"))); + assertThat(outpostResource.region(), is(Optional.of("us-east-1"))); + assertThat(outpostResource.outpostId(), is("22222")); + assertThat(outpostResource.type(), is(S3ResourceType.OUTPOST.toString())); + } + + @Test + public void parseArn_invalidOutpostAccessPointMissingAccessPointName_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId(ACCOUNT_ID) + .resource("outpost:op-01234567890123456:accesspoint") + .build()); + } + + @Test + public void parseArn_invalidOutpostAccessPointMissingOutpostId_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId(ACCOUNT_ID) + .resource("outpost/myaccesspoint") + .build()); + } + + @Test + public void parseArn_malformedOutpostArn_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Unknown outpost ARN type"); + + S3_ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId(ACCOUNT_ID) + .resource("outpost:1:accesspoin1:1") + .build()); + } +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtilsTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtilsTest.java new file mode 100644 index 000000000000..1bd47ba02294 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ArnUtilsTest.java @@ -0,0 +1,134 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +import java.util.Optional; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.arns.ArnResource; + + +public class S3ArnUtilsTest { + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void parseS3AccessPointArn_shouldParseCorrectly() { + S3AccessPointResource s3AccessPointResource = S3ArnUtils.parseS3AccessPointArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("accesspoint:accesspoint-name") + .build()); + + assertThat(s3AccessPointResource.accessPointName(), is("accesspoint-name")); + assertThat(s3AccessPointResource.accountId(), is(Optional.of("123456789012"))); + assertThat(s3AccessPointResource.partition(), is(Optional.of("aws"))); + assertThat(s3AccessPointResource.region(), is(Optional.of("us-east-1"))); + assertThat(s3AccessPointResource.type(), is(S3ResourceType.ACCESS_POINT.toString())); + } + + @Test + public void parseOutpostArn_arnWithColon_shouldParseCorrectly() { + IntermediateOutpostResource intermediateOutpostResource = S3ArnUtils.parseOutpostArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost:22222:accesspoint:foobar") + .build()); + + assertThat(intermediateOutpostResource.outpostId(), is("22222")); + assertThat(intermediateOutpostResource.outpostSubresource(), equalTo(ArnResource.fromString("accesspoint:foobar"))); + } + + @Test + public void parseOutpostArn_arnWithSlash_shouldParseCorrectly() { + IntermediateOutpostResource intermediateOutpostResource = S3ArnUtils.parseOutpostArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost/22222/accesspoint/foobar") + .build()); + + assertThat(intermediateOutpostResource.outpostId(), is("22222")); + assertThat(intermediateOutpostResource.outpostSubresource(), equalTo(ArnResource.fromString("accesspoint/foobar"))); + } + + @Test + public void parseOutpostArn_shouldParseCorrectly() { + IntermediateOutpostResource intermediateOutpostResource = S3ArnUtils.parseOutpostArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost:22222:futuresegment:foobar") + .build()); + + assertThat(intermediateOutpostResource.outpostId(), is("22222")); + assertThat(intermediateOutpostResource.outpostSubresource(), equalTo(ArnResource.fromString("futuresegment/foobar"))); + } + + @Test + public void parseOutpostArn_malformedArnNullSubresourceType_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + S3ArnUtils.parseOutpostArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost/22222/") + .build()); + } + + @Test + public void parseOutpostArn_malformedArnNullSubresource_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format for S3 Outpost ARN"); + + S3ArnUtils.parseOutpostArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost:op-01234567890123456:accesspoint") + .build()); + } + + @Test + public void parseOutpostArn_malformedArnEmptyOutpostId_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("resource must not be blank or empty"); + + S3ArnUtils.parseOutpostArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost::accesspoint:name") + .build()); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3BucketResourceTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3BucketResourceTest.java new file mode 100644 index 000000000000..ed991c543d1e --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3BucketResourceTest.java @@ -0,0 +1,170 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import java.util.Optional; + +import org.junit.Test; + +public class S3BucketResourceTest { + @Test + public void buildWithAllPropertiesSet() { + S3BucketResource s3BucketResource = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + assertEquals("bucket", s3BucketResource.bucketName()); + assertEquals(Optional.of("account-id"), s3BucketResource.accountId()); + assertEquals(Optional.of("partition"), s3BucketResource.partition()); + assertEquals(Optional.of("region"), s3BucketResource.region()); + assertEquals("bucket_name", s3BucketResource.type()); + } + + @Test + public void toBuilder() { + S3BucketResource s3BucketResource = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build() + .toBuilder() + .build(); + + assertEquals("bucket", s3BucketResource.bucketName()); + assertEquals(Optional.of("account-id"), s3BucketResource.accountId()); + assertEquals(Optional.of("partition"), s3BucketResource.partition()); + assertEquals(Optional.of("region"), s3BucketResource.region()); + assertEquals("bucket_name", s3BucketResource.type()); + } + + @Test + public void buildWithSetters() { + S3BucketResource.Builder builder = S3BucketResource.builder(); + builder.setBucketName("bucket"); + builder.setAccountId("account-id"); + builder.setPartition("partition"); + builder.setRegion("region"); + S3BucketResource s3BucketResource = builder.build(); + + assertEquals("bucket", s3BucketResource.bucketName()); + assertEquals(Optional.of("account-id"), s3BucketResource.accountId()); + assertEquals(Optional.of("partition"), s3BucketResource.partition()); + assertEquals(Optional.of("region"), s3BucketResource.region()); + assertEquals("bucket_name", s3BucketResource.type()); + } + + @Test + public void buildWithMinimalPropertiesSet() { + S3BucketResource s3BucketResource = S3BucketResource.builder() + .bucketName("bucket") + .build(); + + assertEquals("bucket", s3BucketResource.bucketName()); + assertEquals(Optional.empty(), s3BucketResource.accountId()); + assertEquals(Optional.empty(), s3BucketResource.partition()); + assertEquals(Optional.empty(), s3BucketResource.region()); + assertEquals("bucket_name", s3BucketResource.type()); + } + + @Test(expected = NullPointerException.class) + public void buildWithMissingBucketName() { + S3BucketResource.builder().build(); + } + + @Test(expected = IllegalArgumentException.class) + public void buildWithBlankBucketName() { + S3BucketResource.builder().bucketName("").build(); + } + + @Test + public void equals_allProperties() { + S3BucketResource s3BucketResource1 = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3BucketResource s3BucketResource2 = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3BucketResource s3BucketResource3 = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("different-partition") + .region("region") + .build(); + + assertEquals(s3BucketResource1, s3BucketResource2); + assertNotEquals(s3BucketResource1, s3BucketResource3); + } + + @Test + public void equals_minimalProperties() { + S3BucketResource s3BucketResource1 = S3BucketResource.builder() + .bucketName("bucket") + .build(); + + S3BucketResource s3BucketResource2 = S3BucketResource.builder() + .bucketName("bucket") + .build(); + + S3BucketResource s3BucketResource3 = S3BucketResource.builder() + .bucketName("another-bucket") + .build(); + + assertEquals(s3BucketResource1, s3BucketResource2); + assertNotEquals(s3BucketResource1, s3BucketResource3); + } + + @Test + public void hashcode_allProperties() { + S3BucketResource s3BucketResource1 = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3BucketResource s3BucketResource2 = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3BucketResource s3BucketResource3 = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("different-partition") + .region("region") + .build(); + + assertEquals(s3BucketResource1.hashCode(), s3BucketResource2.hashCode()); + assertNotEquals(s3BucketResource1.hashCode(), s3BucketResource3.hashCode()); + } +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ObjectResourceTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ObjectResourceTest.java new file mode 100644 index 000000000000..127488e831c7 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/resource/S3ObjectResourceTest.java @@ -0,0 +1,207 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.resource; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import java.util.Optional; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class S3ObjectResourceTest { + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void buildWithBucketParent() { + S3BucketResource parentResource = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3ObjectResource s3ObjectResource = S3ObjectResource.builder() + .key("key") + .parentS3Resource(parentResource) + .build(); + + assertEquals("key", s3ObjectResource.key()); + assertEquals(Optional.of("account-id"), s3ObjectResource.accountId()); + assertEquals(Optional.of("partition"), s3ObjectResource.partition()); + assertEquals(Optional.of("region"), s3ObjectResource.region()); + assertEquals("object", s3ObjectResource.type()); + assertEquals(Optional.of(parentResource), s3ObjectResource.parentS3Resource()); + } + + @Test + public void buildWithAccessPointParent() { + S3AccessPointResource parentResource = S3AccessPointResource.builder() + .accessPointName("test-ap") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3ObjectResource s3ObjectResource = S3ObjectResource.builder() + .key("key") + .parentS3Resource(parentResource) + .build(); + + assertEquals("key", s3ObjectResource.key()); + assertEquals(Optional.of("account-id"), s3ObjectResource.accountId()); + assertEquals(Optional.of("partition"), s3ObjectResource.partition()); + assertEquals(Optional.of("region"), s3ObjectResource.region()); + assertEquals("object", s3ObjectResource.type()); + assertEquals(Optional.of(parentResource), s3ObjectResource.parentS3Resource()); + } + + @Test + public void buildWithInvalidParentType() { + S3Resource fakeS3ObjectResource = new S3Resource() { + @Override + public Optional partition() { + return Optional.empty(); + } + + @Override + public Optional region() { + return Optional.empty(); + } + + @Override + public Optional accountId() { + return Optional.empty(); + } + + @Override + public String type() { + return null; + } + }; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("parentS3Resource"); + S3ObjectResource.builder() + .parentS3Resource(fakeS3ObjectResource) + .key("key") + .build(); + } + + @Test + public void buildWithMissingKey() { + S3BucketResource parentResource = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + exception.expect(NullPointerException.class); + exception.expectMessage("key"); + S3ObjectResource.builder() + .parentS3Resource(parentResource) + .build(); + } + + @Test + public void buildWithMissingParent() { + exception.expect(NullPointerException.class); + exception.expectMessage("parentS3Resource"); + S3ObjectResource.builder() + .key("test-key") + .build(); + } + + @Test + public void equalsAndHashCode_allPropertiesSame() { + S3BucketResource parentResource = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3ObjectResource s3ObjectResource1 = S3ObjectResource.builder() + .key("key") + .parentS3Resource(parentResource) + .build(); + + S3ObjectResource s3ObjectResource2 = S3ObjectResource.builder() + .key("key") + .parentS3Resource(parentResource) + .build(); + + assertEquals(s3ObjectResource1, s3ObjectResource2); + assertEquals(s3ObjectResource1.hashCode(), s3ObjectResource2.hashCode()); + } + + @Test + public void equalsAndHashCode_differentKey() { + S3BucketResource parentResource = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3ObjectResource s3ObjectResource1 = S3ObjectResource.builder() + .key("key1") + .parentS3Resource(parentResource) + .build(); + + S3ObjectResource s3ObjectResource2 = S3ObjectResource.builder() + .key("key2") + .parentS3Resource(parentResource) + .build(); + + + assertNotEquals(s3ObjectResource1, s3ObjectResource2); + assertNotEquals(s3ObjectResource1.hashCode(), s3ObjectResource2.hashCode()); + } + + @Test + public void equalsAndHashCode_differentParent() { + S3BucketResource parentResource = S3BucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3BucketResource parentResource2 = S3BucketResource.builder() + .bucketName("bucket2") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3ObjectResource s3ObjectResource1 = S3ObjectResource.builder() + .key("key") + .parentS3Resource(parentResource) + .build(); + + S3ObjectResource s3ObjectResource2 = S3ObjectResource.builder() + .key("key") + .parentS3Resource(parentResource2) + .build(); + + + assertNotEquals(s3ObjectResource1, s3ObjectResource2); + assertNotEquals(s3ObjectResource1.hashCode(), s3ObjectResource2.hashCode()); + } + +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/ProfileUseArnRegionProviderTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/ProfileUseArnRegionProviderTest.java new file mode 100644 index 000000000000..eb6b3b27cc33 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/ProfileUseArnRegionProviderTest.java @@ -0,0 +1,121 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.usearnregion; + +import static java.lang.Boolean.FALSE; +import static java.lang.Boolean.TRUE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.in; +import static org.mockito.Matchers.any; +import static software.amazon.awssdk.profiles.ProfileFileSystemSetting.AWS_CONFIG_FILE; + +import java.util.Optional; +import java.util.StringJoiner; +import org.junit.After; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.utils.StringInputStream; + +public class ProfileUseArnRegionProviderTest { + private ProfileUseArnRegionProvider provider = ProfileUseArnRegionProvider.create(); + + @After + public void clearSystemProperty() { + System.clearProperty(AWS_CONFIG_FILE.property()); + } + + @Test + public void notSpecified_shouldReturnEmptyOptional() { + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.empty()); + } + + @Test + public void specifiedInConfigFile_shouldResolve() { + String configFile = getClass().getResource("UseArnRegionSet_true").getFile(); + System.setProperty(AWS_CONFIG_FILE.property(), configFile); + + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.of(TRUE)); + } + + @Test + public void configFile_mixedSpace() { + String configFile = getClass().getResource("UseArnRegionSet_mixedSpace").getFile(); + System.setProperty(AWS_CONFIG_FILE.property(), configFile); + + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.of(FALSE)); + } + + @Test + public void unsupportedValue_shouldThrowException() { + String configFile = getClass().getResource("UseArnRegionSet_unsupportedValue").getFile(); + System.setProperty(AWS_CONFIG_FILE.property(), configFile); + + assertThatThrownBy(() -> provider.resolveUseArnRegion()).isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void commaNoSpace_shouldResolveCorrectly() { + String configFile = getClass().getResource("UseArnRegionSet_noSpace").getFile(); + System.setProperty(AWS_CONFIG_FILE.property(), configFile); + + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.of(FALSE)); + } + + @Test + public void specifiedInOverrideConfig_shouldUse() { + ExecutionInterceptor interceptor = Mockito.spy(AbstractExecutionInterceptor.class); + + String profileFileContent = + "[default]\n" + + "s3_use_arn_region = true\n"; + + ProfileFile profileFile = ProfileFile.builder() + .type(ProfileFile.Type.CONFIGURATION) + .content(new StringInputStream(profileFileContent)) + .build(); + + S3Client s3 = S3Client.builder() + .region(Region.US_WEST_2) + .credentialsProvider(AnonymousCredentialsProvider.create()) + .overrideConfiguration(c -> c.defaultProfileFile(profileFile) + .defaultProfileName("default") + .addExecutionInterceptor(interceptor) + .retryPolicy(r -> r.numRetries(0))) + .build(); + + String arn = "arn:aws:s3:us-banana-46:12345567890:accesspoint:foo"; + assertThatThrownBy(() -> s3.getObject(r -> r.bucket(arn).key("bar"))).isInstanceOf(SdkException.class); + + ArgumentCaptor context = ArgumentCaptor.forClass(Context.BeforeTransmission.class); + Mockito.verify(interceptor).beforeTransmission(context.capture(), any()); + + String host = context.getValue().httpRequest().host(); + assertThat(host).contains("us-banana-46"); + } + + public static abstract class AbstractExecutionInterceptor implements ExecutionInterceptor {} +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/SystemSettingsUseArnRegionProviderTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/SystemSettingsUseArnRegionProviderTest.java new file mode 100644 index 000000000000..021fd91751bb --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/SystemSettingsUseArnRegionProviderTest.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.usearnregion; + +import static java.lang.Boolean.FALSE; +import static java.lang.Boolean.TRUE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.services.s3.S3SystemSetting.AWS_S3_USE_ARN_REGION; + +import java.util.Optional; +import org.junit.After; +import org.junit.Test; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; + +public class SystemSettingsUseArnRegionProviderTest { + private final SystemsSettingsUseArnRegionProvider provider = SystemsSettingsUseArnRegionProvider.create(); + private final EnvironmentVariableHelper helper = new EnvironmentVariableHelper(); + + @After + public void clearSystemProperty() { + System.clearProperty(AWS_S3_USE_ARN_REGION.property()); + helper.reset(); + } + + @Test + public void notSpecified_shouldReturnEmptyOptional() { + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.empty()); + } + + @Test + public void emptySystemProperties_shouldReturnEmptyOptional() { + System.setProperty(AWS_S3_USE_ARN_REGION.property(), ""); + assertThatThrownBy(() -> provider.resolveUseArnRegion()).isInstanceOf(IllegalStateException.class); + } + + @Test + public void specifiedInSystemProperties_shouldResolve() { + System.setProperty(AWS_S3_USE_ARN_REGION.property(), "false"); + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.of(FALSE)); + } + + @Test + public void specifiedInEnvironmentVariables_shouldResolve() { + helper.set(AWS_S3_USE_ARN_REGION.environmentVariable(), "true"); + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.of(TRUE)); + } + + @Test + public void specifiedInBothPlaces_SystemPropertiesShouldTakePrecedence() { + System.setProperty(AWS_S3_USE_ARN_REGION.property(), "true"); + helper.set(AWS_S3_USE_ARN_REGION.environmentVariable(), "false"); + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.of(TRUE)); + } + + @Test + public void mixedSpace_shouldResolveCorrectly() { + System.setProperty(AWS_S3_USE_ARN_REGION.property(), "tRuE"); + assertThat(provider.resolveUseArnRegion()).isEqualTo(Optional.of(TRUE)); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProviderChainTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProviderChainTest.java new file mode 100644 index 000000000000..7ac12f7eda31 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionProviderChainTest.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.usearnregion; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.profiles.ProfileFileSystemSetting.AWS_CONFIG_FILE; +import static software.amazon.awssdk.services.s3.S3SystemSetting.AWS_S3_USE_ARN_REGION; + +import java.util.Optional; +import org.junit.After; +import org.junit.Test; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; + +public class UseArnRegionProviderChainTest { + private final EnvironmentVariableHelper helper = new EnvironmentVariableHelper(); + + @After + public void clearSystemProperty() { + System.clearProperty(AWS_S3_USE_ARN_REGION.property()); + System.clearProperty(AWS_CONFIG_FILE.property()); + helper.reset(); + } + + @Test + public void notSpecified_shouldReturnEmptyOptional() { + assertThat(UseArnRegionProviderChain.create().resolveUseArnRegion()).isEqualTo(Optional.empty()); + } + + @Test + public void specifiedInBothProviders_systemPropertiesShouldTakePrecedence() { + System.setProperty(AWS_S3_USE_ARN_REGION.property(), "false"); + String configFile = getClass().getResource("UseArnRegionSet_true").getFile(); + System.setProperty(AWS_CONFIG_FILE.property(), configFile); + + assertThat(UseArnRegionProviderChain.create().resolveUseArnRegion()).isEqualTo(Optional.of(Boolean.FALSE)); + } + + @Test + public void systemPropertiesThrowException_shouldUseConfigFile() { + System.setProperty(AWS_S3_USE_ARN_REGION.property(), "foobar"); + String configFile = getClass().getResource("UseArnRegionSet_true").getFile(); + System.setProperty(AWS_CONFIG_FILE.property(), configFile); + + assertThat(UseArnRegionProviderChain.create().resolveUseArnRegion()).isEqualTo(Optional.of(Boolean.TRUE)); + } + + @Test + public void bothProvidersThrowException_shouldReturnEmpty() { + System.setProperty(AWS_S3_USE_ARN_REGION.property(), "foobar"); + String configFile = getClass().getResource("UseArnRegionSet_unsupportedValue").getFile(); + System.setProperty(AWS_CONFIG_FILE.property(), configFile); + + assertThat(UseArnRegionProviderChain.create().resolveUseArnRegion()).isEqualTo(Optional.empty()); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/presigner/model/GetObjectPresignRequestTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/presigner/model/GetObjectPresignRequestTest.java new file mode 100644 index 000000000000..4436e66cd8b7 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/presigner/model/GetObjectPresignRequestTest.java @@ -0,0 +1,132 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; + +import java.time.Duration; +import org.junit.Test; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; + +public class GetObjectPresignRequestTest { + private static final GetObjectRequest GET_OBJECT_REQUEST = GetObjectRequest.builder() + .bucket("some-bucket") + .key("some-key") + .build(); + + @Test + public void build_minimal_maximal() { + GetObjectPresignRequest getObjectPresignRequest = + GetObjectPresignRequest.builder() + .getObjectRequest(GET_OBJECT_REQUEST) + .signatureDuration(Duration.ofSeconds(123L)) + .build(); + + assertThat(getObjectPresignRequest.getObjectRequest()).isEqualTo(GET_OBJECT_REQUEST); + assertThat(getObjectPresignRequest.signatureDuration()).isEqualTo(Duration.ofSeconds(123L)); + } + + @Test + public void build_missingProperty_getObjectRequest() { + assertThatThrownBy(() -> GetObjectPresignRequest.builder() + .signatureDuration(Duration.ofSeconds(123L)) + .build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("getObjectRequest"); + } + + @Test + public void build_missingProperty_signatureDuration() { + assertThatThrownBy(() -> GetObjectPresignRequest.builder() + .getObjectRequest(GET_OBJECT_REQUEST) + .build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("signatureDuration"); + } + + @Test + public void toBuilder() { + GetObjectPresignRequest getObjectPresignRequest = + GetObjectPresignRequest.builder() + .getObjectRequest(GET_OBJECT_REQUEST) + .signatureDuration(Duration.ofSeconds(123L)) + .build(); + + GetObjectPresignRequest otherGetObjectPresignRequest = getObjectPresignRequest.toBuilder().build(); + + assertThat(otherGetObjectPresignRequest.getObjectRequest()).isEqualTo(GET_OBJECT_REQUEST); + assertThat(otherGetObjectPresignRequest.signatureDuration()).isEqualTo(Duration.ofSeconds(123L)); + } + + @Test + public void equalsAndHashCode_allPropertiesEqual() { + GetObjectPresignRequest getObjectPresignRequest = + GetObjectPresignRequest.builder() + .getObjectRequest(GET_OBJECT_REQUEST) + .signatureDuration(Duration.ofSeconds(123L)) + .build(); + + GetObjectPresignRequest otherGetObjectPresignRequest = + GetObjectPresignRequest.builder() + .getObjectRequest(GET_OBJECT_REQUEST) + .signatureDuration(Duration.ofSeconds(123L)) + .build(); + + assertThat(otherGetObjectPresignRequest).isEqualTo(getObjectPresignRequest); + assertThat(otherGetObjectPresignRequest.hashCode()).isEqualTo(getObjectPresignRequest.hashCode()); + } + + @Test + public void equalsAndHashCode_differentProperty_getObjectRequest() { + GetObjectRequest otherGetObjectRequest = GetObjectRequest.builder() + .bucket("other-bucket") + .key("other-key") + .build(); + GetObjectPresignRequest getObjectPresignRequest = + GetObjectPresignRequest.builder() + .getObjectRequest(GET_OBJECT_REQUEST) + .signatureDuration(Duration.ofSeconds(123L)) + .build(); + + GetObjectPresignRequest otherGetObjectPresignRequest = + GetObjectPresignRequest.builder() + .getObjectRequest(otherGetObjectRequest) + .signatureDuration(Duration.ofSeconds(123L)) + .build(); + + assertThat(otherGetObjectPresignRequest).isNotEqualTo(getObjectPresignRequest); + assertThat(otherGetObjectPresignRequest.hashCode()).isNotEqualTo(getObjectPresignRequest.hashCode()); + } + + @Test + public void equalsAndHashCode_differentProperty_signatureDuration() { + GetObjectPresignRequest getObjectPresignRequest = + GetObjectPresignRequest.builder() + .getObjectRequest(GET_OBJECT_REQUEST) + .signatureDuration(Duration.ofSeconds(123L)) + .build(); + + GetObjectPresignRequest otherGetObjectPresignRequest = + GetObjectPresignRequest.builder() + .getObjectRequest(GET_OBJECT_REQUEST) + .signatureDuration(Duration.ofSeconds(321L)) + .build(); + + assertThat(otherGetObjectPresignRequest).isNotEqualTo(getObjectPresignRequest); + assertThat(otherGetObjectPresignRequest.hashCode()).isNotEqualTo(getObjectPresignRequest.hashCode()); + } +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/presigner/model/PresignedGetObjectRequestTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/presigner/model/PresignedGetObjectRequestTest.java new file mode 100644 index 000000000000..b1e6a70d21ce --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/presigner/model/PresignedGetObjectRequestTest.java @@ -0,0 +1,201 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; + +@RunWith(MockitoJUnitRunner.class) +public class PresignedGetObjectRequestTest { + private static final Map> FAKE_SIGNED_HEADERS; + private static final URL FAKE_URL; + private static final SdkBytes FAKE_SIGNED_PAYLOAD = SdkBytes.fromString("fake-payload", StandardCharsets.UTF_8); + + static { + Map> map = new HashMap<>(); + map.put("fake-key", Collections.unmodifiableList(Arrays.asList("one", "two"))); + FAKE_SIGNED_HEADERS = Collections.unmodifiableMap(map); + + + try { + FAKE_URL = new URL("https://localhost"); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Mock + private SdkHttpRequest mockSdkHttpRequest; + + @Before + public void setup() throws URISyntaxException { + when(mockSdkHttpRequest.getUri()).thenReturn(FAKE_URL.toURI()); + } + + private PresignedGetObjectRequest generateMaximal() { + return PresignedGetObjectRequest.builder() + .expiration(Instant.MAX) + .httpRequest(mockSdkHttpRequest) + .signedHeaders(FAKE_SIGNED_HEADERS) + .signedPayload(FAKE_SIGNED_PAYLOAD) + .isBrowserExecutable(false) + .build(); + } + + private PresignedGetObjectRequest generateMinimal() { + return PresignedGetObjectRequest.builder() + .expiration(Instant.MAX) + .httpRequest(mockSdkHttpRequest) + .signedHeaders(FAKE_SIGNED_HEADERS) + .isBrowserExecutable(false) + .build(); + } + + @Test + public void build_allProperties() { + PresignedGetObjectRequest presignedGetObjectRequest = generateMaximal(); + + assertThat(presignedGetObjectRequest.expiration()).isEqualTo(Instant.MAX); + assertThat(presignedGetObjectRequest.httpRequest()).isEqualTo(mockSdkHttpRequest); + assertThat(presignedGetObjectRequest.signedHeaders()).isEqualTo(FAKE_SIGNED_HEADERS); + assertThat(presignedGetObjectRequest.signedPayload()).isEqualTo(Optional.of(FAKE_SIGNED_PAYLOAD)); + assertThat(presignedGetObjectRequest.url()).isEqualTo(FAKE_URL); + } + + @Test + public void build_minimalProperties() { + PresignedGetObjectRequest presignedGetObjectRequest = generateMinimal(); + + assertThat(presignedGetObjectRequest.expiration()).isEqualTo(Instant.MAX); + assertThat(presignedGetObjectRequest.httpRequest()).isEqualTo(mockSdkHttpRequest); + assertThat(presignedGetObjectRequest.url()).isEqualTo(FAKE_URL); + assertThat(presignedGetObjectRequest.signedHeaders()).isEqualTo(FAKE_SIGNED_HEADERS); + assertThat(presignedGetObjectRequest.signedPayload()).isEmpty(); + } + + @Test + public void build_missingProperty_expiration() { + assertThatThrownBy(() -> generateMinimal().toBuilder().expiration(null).build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("expiration"); + } + + @Test + public void build_missingProperty_httpRequest() { + assertThatThrownBy(() -> generateMinimal().toBuilder().httpRequest(null).build()) + .isInstanceOf(NullPointerException.class) + .hasMessageContaining("httpRequest"); + } + + @Test + public void hasSignedPayload_false() { + PresignedGetObjectRequest presignedGetObjectRequest = generateMinimal(); + + assertThat(presignedGetObjectRequest.signedPayload()).isNotPresent(); + } + + @Test + public void hasSignedPayload_true() { + PresignedGetObjectRequest presignedGetObjectRequest = generateMaximal(); + + assertThat(presignedGetObjectRequest.signedPayload()).isPresent(); + } + + @Test + public void equalsAndHashCode_maximal() { + PresignedGetObjectRequest request = generateMaximal(); + PresignedGetObjectRequest otherRequest = generateMaximal(); + + assertThat(request).isEqualTo(otherRequest); + assertThat(request.hashCode()).isEqualTo(otherRequest.hashCode()); + } + + @Test + public void equalsAndHashCode_minimal() { + PresignedGetObjectRequest request = generateMinimal(); + PresignedGetObjectRequest otherRequest = generateMinimal(); + + assertThat(request).isEqualTo(otherRequest); + assertThat(request.hashCode()).isEqualTo(otherRequest.hashCode()); + } + + @Test + public void equalsAndHashCode_differentProperty_httpRequest() throws URISyntaxException { + SdkHttpRequest otherHttpRequest = mock(SdkHttpRequest.class); + when(otherHttpRequest.getUri()).thenReturn(FAKE_URL.toURI()); + + PresignedGetObjectRequest request = generateMaximal(); + PresignedGetObjectRequest otherRequest = request.toBuilder().httpRequest(otherHttpRequest).build(); + + assertThat(request).isNotEqualTo(otherRequest); + assertThat(request.hashCode()).isNotEqualTo(otherRequest.hashCode()); + } + + @Test + public void equalsAndHashCode_differentProperty_expiration() { + PresignedGetObjectRequest request = generateMaximal(); + PresignedGetObjectRequest otherRequest = request.toBuilder().expiration(Instant.MIN).build(); + + assertThat(request).isNotEqualTo(otherRequest); + assertThat(request.hashCode()).isNotEqualTo(otherRequest.hashCode()); + } + + @Test + public void equalsAndHashCode_differentProperty_signedPayload() { + SdkBytes otherSignedPayload = SdkBytes.fromString("other-payload", StandardCharsets.UTF_8); + + PresignedGetObjectRequest request = generateMaximal(); + PresignedGetObjectRequest otherRequest = request.toBuilder().signedPayload(otherSignedPayload).build(); + + assertThat(request).isNotEqualTo(otherRequest); + assertThat(request.hashCode()).isNotEqualTo(otherRequest.hashCode()); + } + + @Test + public void equalsAndHashCode_differentProperty_signedHeaders() { + Map> otherSignedHeaders = new HashMap<>(); + otherSignedHeaders.put("fake-key", Collections.unmodifiableList(Arrays.asList("other-one", "other-two"))); + + PresignedGetObjectRequest request = generateMaximal(); + PresignedGetObjectRequest otherRequest = request.toBuilder().signedHeaders(otherSignedHeaders).build(); + + assertThat(request).isNotEqualTo(otherRequest); + assertThat(request.hashCode()).isNotEqualTo(otherRequest.hashCode()); + } +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/InterceptorTestUtils.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/InterceptorTestUtils.java index 3228d86825dd..83e462042437 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/InterceptorTestUtils.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/InterceptorTestUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -37,6 +37,23 @@ public final class InterceptorTestUtils { private InterceptorTestUtils() { } + public static SdkHttpFullRequest sdkHttpFullRequest() { + return SdkHttpFullRequest.builder() + .uri(URI.create("http://localhost:8080")) + .method(SdkHttpMethod.GET) + .build(); + } + + public static SdkHttpRequest sdkHttpRequest(URI customUri) { + return SdkHttpFullRequest.builder() + .protocol(customUri.getScheme()) + .host(customUri.getHost()) + .port(customUri.getPort()) + .method(SdkHttpMethod.GET) + .encodedPath(customUri.getPath()) + .build(); + } + public static Context.ModifyHttpResponse modifyHttpResponse(SdkRequest request, SdkHttpResponse sdkHttpResponse) { Publisher publisher = new EmptyPublisher<>(); @@ -116,13 +133,6 @@ public SdkRequest request() { }; } - public static SdkHttpFullRequest sdkHttpFullRequest() { - return SdkHttpFullRequest.builder() - .uri(URI.create("http://localhost:8080")) - .method(SdkHttpMethod.GET) - .build(); - } - public static Context.ModifyResponse modifyResponseContext(SdkRequest request, SdkResponse response, SdkHttpResponse sdkHttpResponse) { return new Context.ModifyResponse() { @Override diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/S3EndpointResolutionTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/S3EndpointResolutionTest.java deleted file mode 100644 index 2387ca5d78af..000000000000 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/S3EndpointResolutionTest.java +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3.utils; - - -import static org.assertj.core.api.Assertions.assertThat; -import static software.amazon.awssdk.services.s3.S3MockUtils.mockListBucketsResponse; -import static software.amazon.awssdk.services.s3.S3MockUtils.mockListObjectsResponse; - -import java.net.URI; -import org.junit.Before; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; -import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; -import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; -import software.amazon.awssdk.core.signer.Signer; -import software.amazon.awssdk.http.SdkHttpFullRequest; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3Client; -import software.amazon.awssdk.services.s3.S3ClientBuilder; -import software.amazon.awssdk.services.s3.S3Configuration; -import software.amazon.awssdk.services.s3.internal.handlers.EndpointAddressInterceptor; -import software.amazon.awssdk.services.s3.model.ListObjectsRequest; -import software.amazon.awssdk.testutils.service.http.MockHttpClient; - -/** - * Functional tests for various endpoint related behavior in S3. - */ -public class S3EndpointResolutionTest { - - private static final String BUCKET = "some-bucket"; - private static final String NON_DNS_COMPATIBLE_BUCKET = "SOME.BUCKET"; - private static final String ENDPOINT_WITHOUT_BUCKET = "https://s3.ap-south-1.amazonaws.com"; - private static final String ENDPOINT_WITH_BUCKET = String.format("https://%s.s3.ap-south-1.amazonaws.com", BUCKET); - - private MockHttpClient mockHttpClient; - private Signer mockSigner; - - @Before - public void setup() { - mockHttpClient = new MockHttpClient(); - mockSigner = (request, executionAttributes) -> request; - } - - /** - * Only APIs that operate on buckets uses virtual addressing. Service level operations like ListBuckets will use the normal - * endpoint. - */ - @Test - public void serviceLevelOperation_UsesStandardEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListBucketsResponse()); - S3Client s3Client = buildClient(null); - - s3Client.listBuckets(); - - assertThat(mockHttpClient.getLastRequest().getUri()) - .as("Uses regional S3 endpoint without bucket") - .isEqualTo(URI.create(ENDPOINT_WITHOUT_BUCKET + "/")); - - assertThat(mockHttpClient.getLastRequest().encodedPath()) - .as("Bucket is not in resource path") - .isEqualTo("/"); - } - - /** - * Service level operations for dualstack mode should go to the dualstack endpoint (without virtual addressing). - */ - @Test - public void serviceLevelOperation_WithDualstackEnabled_UsesDualstackEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListBucketsResponse()); - S3Client s3Client = buildClient(withDualstackEnabled()); - - s3Client.listBuckets(); - - assertThat(mockHttpClient.getLastRequest().getUri()) - .as("Uses regional S3 endpoint without bucket") - .isEqualTo(URI.create("https://s3.dualstack.ap-south-1.amazonaws.com/")); - } - - /** - * When a custom endpoint is provided via the builder we should honor that instead of trying to re-resolve it in the - * {@link EndpointAddressInterceptor}. - */ - @Test - public void customEndpointProvided_UsesCustomEndpoint() throws Exception { - URI customEndpoint = URI.create("https://foobar.amazonaws.com"); - mockHttpClient.stubNextResponse(mockListBucketsResponse()); - S3Client s3Client = clientBuilder().endpointOverride(customEndpoint).build(); - - s3Client.listBuckets(); - - assertThat(mockHttpClient.getLastRequest().getUri()) - .as("Uses custom endpoint") - .isEqualTo(URI.create(customEndpoint + "/")); - } - - /** - * If a custom, non-s3 endpoint is used we revert to path style addressing. This is useful for alternative S3 implementations - * like Ceph that do not support virtual style addressing. - */ - @Test - public void nonS3EndpointProvided_DoesNotUseVirtualAddressing() throws Exception { - URI customEndpoint = URI.create("https://foobar.amazonaws.com"); - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = clientBuilder().endpointOverride(customEndpoint).build(); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), customEndpoint.toString() + "/" + BUCKET); - } - - /** - * If a custom S3 endpoint is provided (like s3-external-1 or a FIPS endpoint) then we should still use virtual addressing - * when possible. - */ - @Test - public void customS3EndpointProvided_UsesVirtualAddressing() throws Exception { - URI customEndpoint = URI.create("https://s3-external-1.amazonaws.com"); - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = clientBuilder().endpointOverride(customEndpoint).build(); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), - String.format("https://%s.s3-external-1.amazonaws.com", BUCKET)); - } - - /** - * If customer is using HTTP we need to preserve that scheme when switching to virtual addressing. - */ - @Test - public void customHttpEndpoint_PreservesSchemeWhenSwitchingToVirtualAddressing() throws Exception { - URI customEndpoint = URI.create("http://s3-external-1.amazonaws.com"); - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = clientBuilderWithMockSigner().endpointOverride(customEndpoint).build(); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), - String.format("http://%s.s3-external-1.amazonaws.com", BUCKET)); - } - - /** - * In us-east-1 buckets can have non-DNS compliant names. For those buckets we must always use path style even when it - * is disabled per the advanced configuration. - */ - @Test - public void pathStyleDisabled_NonDnsCompatibleBucket_StillUsesPathStyleAddressing() throws Exception { - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = buildClient(null); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(NON_DNS_COMPATIBLE_BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), ENDPOINT_WITHOUT_BUCKET + "/" + NON_DNS_COMPATIBLE_BUCKET); - } - - /** - * When path style is enabled in the advanced configuration we should always use it. - */ - @Test - public void pathStyleConfigured_UsesPathStyleAddressing() throws Exception { - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = buildClient(withPathStyle()); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), ENDPOINT_WITHOUT_BUCKET + "/" + BUCKET); - } - - /** - * By default we use virtual addressing when possible. - */ - @Test - public void noServiceConfigurationProvided_UsesVirtualAddressingWithStandardEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = buildClient(null); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), ENDPOINT_WITH_BUCKET); - } - - /** - * By default we use virtual addressing when possible. - */ - @Test - public void emptyServiceConfigurationProvided_UsesVirtualAddressingWithStandardEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = buildClient(S3Configuration.builder().build()); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), ENDPOINT_WITH_BUCKET); - } - - /** - * S3 accelerate has a global endpoint, we use that when accelerate mode is enabled in the advanced configuration. - */ - @Test - public void accelerateEnabled_UsesVirtualAddressingWithAccelerateEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = buildClient(withAccelerateEnabled()); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), - String.format("https://%s.s3-accelerate.amazonaws.com", BUCKET)); - } - - /** - * Dualstack uses regional endpoints that support virtual addressing. - */ - @Test - public void dualstackEnabled_UsesVirtualAddressingWithDualstackEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = buildClient(withDualstackEnabled()); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), - String.format("https://%s.s3.dualstack.ap-south-1.amazonaws.com", BUCKET)); - } - - /** - * Dualstack also supports path style endpoints just like the normal endpoints. - */ - @Test - public void dualstackAndPathStyleEnabled_UsesPathStyleAddressingWithDualstackEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = buildClient(withDualstackAndPathStyleEnabled()); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), "https://s3.dualstack.ap-south-1.amazonaws.com/" + BUCKET); - } - - /** - * When dualstack and accelerate are both enabled there is a special, global dualstack endpoint we must use. - */ - @Test - public void dualstackAndAccelerateEnabled_UsesDualstackAccelerateEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListObjectsResponse()); - S3Client s3Client = buildClient(withDualstackAndAccelerateEnabled()); - - s3Client.listObjects(ListObjectsRequest.builder().bucket(BUCKET).build()); - - assertEndpointMatches(mockHttpClient.getLastRequest(), - String.format("https://%s.s3-accelerate.dualstack.amazonaws.com", BUCKET)); - } - - /** - * Accelerate is not supported for several operations. For those we should go to the normal, regional endpoint. - */ - @Test - public void unsupportedAccelerateOption_UsesStandardEndpoint() throws Exception { - mockHttpClient.stubNextResponse(mockListBucketsResponse()); - S3Client s3Client = buildClient(withAccelerateEnabled()); - - s3Client.listBuckets(); - - assertThat(mockHttpClient.getLastRequest().getUri()) - .as("Uses regional S3 endpoint") - .isEqualTo(URI.create("https://s3.ap-south-1.amazonaws.com/")); - } - - /** - * Accelerate only supports virtual addressing. Path style cannot be used with accelerate enabled. - */ - @Test(expected = IllegalArgumentException.class) - public void accelerateAndPathStyleEnabled_ThrowsIllegalArgumentException() { - buildClient(S3Configuration.builder() - .pathStyleAccessEnabled(true) - .accelerateModeEnabled(true) - .build()); - } - - /** - * Assert that the provided request would have gone to the given endpoint. - * - * @param capturedRequest Request captured by mock HTTP client. - * @param endpoint Expected endpoint. - */ - private void assertEndpointMatches(SdkHttpRequest capturedRequest, String endpoint) { - assertThat(capturedRequest.getUri()).isEqualTo(URI.create(endpoint)); - } - - /** - * @param s3ServiceConfiguration Advanced configuration to use for this client. - * @return A built client with the given advanced configuration. - */ - private S3Client buildClient(S3Configuration s3ServiceConfiguration) { - return clientBuilder() - .serviceConfiguration(s3ServiceConfiguration) - .build(); - } - - /** - * @return Client builder instance preconfigured with credentials and region using the {@link #mockHttpClient} for transport. - */ - private S3ClientBuilder clientBuilder() { - return S3Client.builder() - .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) - .region(Region.AP_SOUTH_1) - .httpClient(mockHttpClient); - } - - /** - * @return Client builder instance preconfigured with credentials and region using the {@link #mockHttpClient} for transport - * and {@link #mockSigner} for signing. Using actual AwsS3V4Signer results in NPE as the execution goes into payload signing - * due to "http" protocol and input stream is not mark supported. - */ - private S3ClientBuilder clientBuilderWithMockSigner() { - return S3Client.builder() - .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) - .region(Region.AP_SOUTH_1) - .overrideConfiguration(ClientOverrideConfiguration.builder() - .putAdvancedOption(SdkAdvancedClientOption.SIGNER, - mockSigner) - .build()) - .httpClient(mockHttpClient); - } - - /** - * @return S3Configuration with path style enabled. - */ - private S3Configuration withPathStyle() { - return S3Configuration.builder() - .pathStyleAccessEnabled(true) - .build(); - } - - /** - * @return S3Configuration with accelerate mode enabled. - */ - private S3Configuration withAccelerateEnabled() { - return S3Configuration.builder() - .accelerateModeEnabled(true) - .build(); - } - - /** - * @return S3Configuration with dualstack mode enabled. - */ - private S3Configuration withDualstackEnabled() { - return S3Configuration.builder() - .dualstackEnabled(true) - .build(); - } - - /** - * @return S3Configuration with dualstack mode and path style enabled. - */ - private S3Configuration withDualstackAndPathStyleEnabled() { - return S3Configuration.builder() - .dualstackEnabled(true) - .pathStyleAccessEnabled(true) - .build(); - } - - /** - * @return S3Configuration with dualstack mode and accelerate mode enabled. - */ - private S3Configuration withDualstackAndAccelerateEnabled() { - return S3Configuration.builder() - .dualstackEnabled(true) - .accelerateModeEnabled(true) - .build(); - } - -} diff --git a/services/s3/src/test/resources/log4j.properties b/services/s3/src/test/resources/log4j.properties index b821297c6731..012eb6e372f3 100644 --- a/services/s3/src/test/resources/log4j.properties +++ b/services/s3/src/test/resources/log4j.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_mixedSpace b/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_mixedSpace new file mode 100644 index 000000000000..c7556917d2f2 --- /dev/null +++ b/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_mixedSpace @@ -0,0 +1,2 @@ +[default] +s3_use_arn_region=fAlSE diff --git a/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_noSpace b/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_noSpace new file mode 100644 index 000000000000..5a3114eaf6db --- /dev/null +++ b/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_noSpace @@ -0,0 +1,2 @@ +[default] +s3_use_arn_region=false diff --git a/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_true b/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_true new file mode 100644 index 000000000000..a70cbb59a9ed --- /dev/null +++ b/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_true @@ -0,0 +1,2 @@ +[default] +s3_use_arn_region = true diff --git a/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_unsupportedValue b/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_unsupportedValue new file mode 100644 index 000000000000..72a689233754 --- /dev/null +++ b/services/s3/src/test/resources/software/amazon/awssdk/services/s3/internal/usearnregion/UseArnRegionSet_unsupportedValue @@ -0,0 +1,2 @@ +[default] +s3_use_arn_region=unsupported-value diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml new file mode 100644 index 000000000000..85283bcd280b --- /dev/null +++ b/services/s3control/pom.xml @@ -0,0 +1,92 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + s3control + AWS Java SDK :: Services :: Amazon S3 Control + The AWS Java SDK for Amazon S3 Control module holds the client classes that are used for communicating with + Amazon Simple Storage Service Control Plane + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.s3control + + + + + + + + + + software.amazon.awssdk + aws-xml-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + arns + ${awsjavasdk.version} + + + software.amazon.awssdk + s3 + ${awsjavasdk.version} + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + profiles + ${awsjavasdk.version} + + + + commons-io + commons-io + test + + + org.apache.commons + commons-lang3 + test + + + software.amazon.awssdk + sts + ${awsjavasdk.version} + test + + + diff --git a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AccessPointsIntegrationTest.java b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AccessPointsIntegrationTest.java new file mode 100644 index 000000000000..1ffb641bffdf --- /dev/null +++ b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AccessPointsIntegrationTest.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertNotNull; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.util.StringJoiner; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.sts.StsClient; + +public class S3AccessPointsIntegrationTest extends S3ControlIntegrationTestBase { + + private static final String BUCKET = temporaryBucketName(S3AccessPointsIntegrationTest.class); + + private static final String AP_NAME = "java-sdk-" + System.currentTimeMillis(); + + private static final String KEY = "some-key"; + + private static S3ControlClient s3control; + + private static StsClient sts; + + private static String accountId; + + @BeforeClass + public static void setupFixture() { + createBucket(BUCKET); + + s3control = S3ControlClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + sts = StsClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + accountId = sts.getCallerIdentity().account(); + s3control.createAccessPoint(r -> r.accountId(accountId) + .bucket(BUCKET) + .name(AP_NAME)); + } + + @AfterClass + public static void tearDown() { + deleteBucketAndAllContents(BUCKET); + s3control.deleteAccessPoint(b -> b.accountId(accountId).name(AP_NAME)); + } + + @Test + public void transfer_Succeeds_UsingAccessPoint() { + StringJoiner apArn = new StringJoiner(":"); + apArn.add("arn").add("aws").add("s3").add("us-west-2").add(accountId).add("accesspoint").add(AP_NAME); + + s3.putObject(PutObjectRequest.builder() + .bucket(apArn.toString()) + .key(KEY) + .build(), RequestBody.fromString("helloworld")); + + String objectContent = s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(apArn.toString()) + .key(KEY) + .build()).asUtf8String(); + + assertThat(objectContent).isEqualTo("helloworld"); + } + + @Test + public void transfer_Succeeds_UsingAccessPoint_CrossRegion() { + S3Client s3DifferentRegion = + s3ClientBuilder().region(Region.US_EAST_1).serviceConfiguration(c -> c.useArnRegionEnabled(true)).build(); + + StringJoiner apArn = new StringJoiner(":"); + apArn.add("arn").add("aws").add("s3").add("us-west-2").add(accountId).add("accesspoint").add(AP_NAME); + + s3DifferentRegion.putObject(PutObjectRequest.builder() + .bucket(apArn.toString()) + .key(KEY) + .build(), RequestBody.fromString("helloworld")); + + String objectContent = s3DifferentRegion.getObjectAsBytes(GetObjectRequest.builder() + .bucket(apArn.toString()) + .key(KEY) + .build()).asUtf8String(); + + assertThat(objectContent).isEqualTo("helloworld"); + } + + @Test + public void accessPointOperation_nonArns() { + assertNotNull(s3control.listAccessPoints(b -> b.bucket(BUCKET).accountId(accountId).maxResults(1))); + assertNotNull(s3control.getAccessPoint(b -> b.name(AP_NAME).accountId(accountId))); + } +} diff --git a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java new file mode 100644 index 000000000000..48f537bece32 --- /dev/null +++ b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertNotNull; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.util.StringJoiner; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.sts.StsClient; + +public class S3AsyncAccessPointsIntegrationTest extends S3ControlIntegrationTestBase { + + private static final String BUCKET = temporaryBucketName(S3AsyncAccessPointsIntegrationTest.class); + + private static final String AP_NAME = "java-sdk-" + System.currentTimeMillis(); + + private static final String KEY = "some-key"; + + private static S3ControlAsyncClient s3control; + + private static StsClient sts; + + private static String accountId; + + @BeforeClass + public static void setupFixture() { + createBucket(BUCKET); + + s3control = S3ControlAsyncClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + sts = StsClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + accountId = sts.getCallerIdentity().account(); + s3control.createAccessPoint(r -> r.accountId(accountId) + .bucket(BUCKET) + .name(AP_NAME)) + .join(); + } + + @AfterClass + public static void tearDown() { + deleteBucketAndAllContents(BUCKET); + s3control.deleteAccessPoint(b -> b.accountId(accountId).name(AP_NAME)).join(); + } + + @Test + public void accessPointOperation_nonArns() { + assertNotNull(s3control.listAccessPoints(b -> b.bucket(BUCKET).accountId(accountId).maxResults(1)).join()); + assertNotNull(s3control.getAccessPoint(b -> b.name(AP_NAME).accountId(accountId)).join()); + } + + @Test + public void transfer_Succeeds_UsingAccessPoint() { + StringJoiner apArn = new StringJoiner(":"); + apArn.add("arn").add("aws").add("s3").add("us-west-2").add(accountId).add("accesspoint").add(AP_NAME); + + s3.putObject(PutObjectRequest.builder() + .bucket(apArn.toString()) + .key(KEY) + .build(), RequestBody.fromString("helloworld")); + + String objectContent = s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(apArn.toString()) + .key(KEY) + .build()).asUtf8String(); + + assertThat(objectContent).isEqualTo("helloworld"); + } +} diff --git a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3ControlIntegrationTest.java b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3ControlIntegrationTest.java new file mode 100644 index 000000000000..10bb77d58858 --- /dev/null +++ b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3ControlIntegrationTest.java @@ -0,0 +1,127 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.services.s3control.model.DeletePublicAccessBlockRequest; +import software.amazon.awssdk.services.s3control.model.GetPublicAccessBlockResponse; +import software.amazon.awssdk.services.s3control.model.NoSuchPublicAccessBlockConfigurationException; +import software.amazon.awssdk.services.s3control.model.PutPublicAccessBlockResponse; +import software.amazon.awssdk.services.s3control.model.S3ControlException; +import software.amazon.awssdk.services.sts.StsClient; +import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; + +public class S3ControlIntegrationTest extends AwsIntegrationTestBase { + + private String accountId; + + private static final String INVALID_ACCOUNT_ID = "1"; + + private S3ControlClient client; + + @Before + public void setup() { + StsClient sts = StsClient.create(); + accountId = sts.getCallerIdentity().account(); + client = S3ControlClient.builder() + .overrideConfiguration(o -> o.addExecutionInterceptor(new AssertPayloadIsSignedExecutionInterceptor())) + .build(); + } + + @After + public void tearDown() { + try { + client.deletePublicAccessBlock(DeletePublicAccessBlockRequest.builder().accountId(accountId).build()); + } catch (Exception ignore) { + + } + } + + @Test + public void putGetAndDeletePublicAccessBlock_ValidAccount() throws InterruptedException { + PutPublicAccessBlockResponse result = + client.putPublicAccessBlock(r -> r.accountId(accountId) + .publicAccessBlockConfiguration(r2 -> r2.blockPublicAcls(true) + .ignorePublicAcls(true))); + assertNotNull(result); + + // Wait a bit for the put to take affect + Thread.sleep(5000); + + GetPublicAccessBlockResponse config = client.getPublicAccessBlock(r -> r.accountId(accountId)); + assertTrue(config.publicAccessBlockConfiguration().blockPublicAcls()); + assertTrue(config.publicAccessBlockConfiguration().ignorePublicAcls()); + + assertNotNull(client.deletePublicAccessBlock(r -> r.accountId(accountId))); + } + + @Test + public void putPublicAccessBlock_NoSuchAccount() { + try { + assertNotNull(client.putPublicAccessBlock(r -> r.accountId(INVALID_ACCOUNT_ID) + .publicAccessBlockConfiguration(r2 -> r2.restrictPublicBuckets(true)))); + fail("Expected exception"); + } catch (S3ControlException e) { + assertEquals("AccessDenied", e.awsErrorDetails().errorCode()); + assertNotNull(e.requestId()); + } + } + + @Test + public void getPublicAccessBlock_NoSuchAccount() { + try { + client.getPublicAccessBlock(r -> r.accountId(INVALID_ACCOUNT_ID)); + fail("Expected exception"); + } catch (S3ControlException e) { + assertEquals("AccessDenied", e.awsErrorDetails().errorCode()); + assertNotNull(e.requestId()); + } + } + + @Test + public void deletePublicAccessBlock_NoSuchAccount() { + try { + client.deletePublicAccessBlock(r -> r.accountId(INVALID_ACCOUNT_ID)); + fail("Expected exception"); + } catch (S3ControlException e) { + assertEquals("AccessDenied", e.awsErrorDetails().errorCode()); + assertNotNull(e.requestId()); + } + } + + /** + * Request handler to assert that payload signing is enabled. + */ + private static final class AssertPayloadIsSignedExecutionInterceptor implements ExecutionInterceptor { + @Override + public void afterTransmission(Context.AfterTransmission context, ExecutionAttributes executionAttributes) { + SdkHttpFullRequest request = (SdkHttpFullRequest) context.httpRequest(); + assertThat(context.httpRequest().headers().get("x-amz-content-sha256").get(0)).doesNotContain("UNSIGNED-PAYLOAD"); + } + } + +} \ No newline at end of file diff --git a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3ControlIntegrationTestBase.java b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3ControlIntegrationTestBase.java new file mode 100644 index 000000000000..07359e68dd36 --- /dev/null +++ b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3ControlIntegrationTestBase.java @@ -0,0 +1,194 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Iterator; +import java.util.List; +import org.junit.BeforeClass; +import software.amazon.awssdk.core.ClientType; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.s3.model.BucketLocationConstraint; +import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.model.S3Object; +import software.amazon.awssdk.testutils.Waiter; +import software.amazon.awssdk.testutils.service.AwsTestBase; + +/** + * Base class for S3 Control integration tests. Loads AWS credentials from a properties + * file and creates an S3 client for callers to use. + */ +public class S3ControlIntegrationTestBase extends AwsTestBase { + + protected static final Region DEFAULT_REGION = Region.US_WEST_2; + /** + * The S3 client for all tests to use. + */ + protected static S3Client s3; + + protected static S3AsyncClient s3Async; + + /** + * Loads the AWS account info for the integration tests and creates an S3 + * client for tests to use. + */ + @BeforeClass + public static void setUp() throws Exception { + s3 = s3ClientBuilder().build(); + s3Async = s3AsyncClientBuilder().build(); + } + + protected static S3ClientBuilder s3ClientBuilder() { + return S3Client.builder() + .region(DEFAULT_REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(o -> o.addExecutionInterceptor( + new UserAgentVerifyingExecutionInterceptor("Apache", ClientType.SYNC))); + } + + protected static S3AsyncClientBuilder s3AsyncClientBuilder() { + return S3AsyncClient.builder() + .region(DEFAULT_REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(o -> o.addExecutionInterceptor( + new UserAgentVerifyingExecutionInterceptor("NettyNio", ClientType.ASYNC))); + } + + protected static void createBucket(String bucketName) { + createBucket(bucketName, 0); + } + + private static void createBucket(String bucketName, int retryCount) { + try { + s3.createBucket( + CreateBucketRequest.builder() + .bucket(bucketName) + .createBucketConfiguration( + CreateBucketConfiguration.builder() + .locationConstraint(BucketLocationConstraint.US_WEST_2) + .build()) + .build()); + } catch (S3Exception e) { + System.err.println("Error attempting to create bucket: " + bucketName); + if (e.awsErrorDetails().errorCode().equals("BucketAlreadyOwnedByYou")) { + System.err.printf("%s bucket already exists, likely leaked by a previous run\n", bucketName); + } else if (e.awsErrorDetails().errorCode().equals("TooManyBuckets")) { + System.err.println("Printing all buckets for debug:"); + s3.listBuckets().buckets().forEach(System.err::println); + if (retryCount < 2) { + System.err.println("Retrying..."); + createBucket(bucketName, retryCount + 1); + } else { + throw e; + } + } else { + throw e; + } + } + } + + protected static void deleteBucketAndAllContents(String bucketName) { + deleteBucketAndAllContents(s3, bucketName); + } + + private static class UserAgentVerifyingExecutionInterceptor implements ExecutionInterceptor { + + private final String clientName; + private final ClientType clientType; + + public UserAgentVerifyingExecutionInterceptor(String clientName, ClientType clientType) { + this.clientName = clientName; + this.clientType = clientType; + } + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + assertThat(context.httpRequest().firstMatchingHeader("User-Agent").get()).containsIgnoringCase("io/" + clientType.name()); + assertThat(context.httpRequest().firstMatchingHeader("User-Agent").get()).containsIgnoringCase("http/" + clientName); + } + } + + public static void deleteBucketAndAllContents(S3Client s3, String bucketName) { + try { + System.out.println("Deleting S3 bucket: " + bucketName); + ListObjectsResponse response = Waiter.run(() -> s3.listObjects(r -> r.bucket(bucketName))) + .ignoringException(NoSuchBucketException.class) + .orFail(); + List objectListing = response.contents(); + + if (objectListing != null) { + while (true) { + for (Iterator iterator = objectListing.iterator(); iterator.hasNext(); ) { + S3Object objectSummary = (S3Object) iterator.next(); + s3.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(objectSummary.key()).build()); + } + + if (response.isTruncated()) { + objectListing = s3.listObjects(ListObjectsRequest.builder() + .bucket(bucketName) + .marker(response.marker()) + .build()) + .contents(); + } else { + break; + } + } + } + + + ListObjectVersionsResponse versions = s3 + .listObjectVersions(ListObjectVersionsRequest.builder().bucket(bucketName).build()); + + if (versions.deleteMarkers() != null) { + versions.deleteMarkers().forEach(v -> s3.deleteObject(DeleteObjectRequest.builder() + .versionId(v.versionId()) + .bucket(bucketName) + .key(v.key()) + .build())); + } + + if (versions.versions() != null) { + versions.versions().forEach(v -> s3.deleteObject(DeleteObjectRequest.builder() + .versionId(v.versionId()) + .bucket(bucketName) + .key(v.key()) + .build())); + } + + s3.deleteBucket(DeleteBucketRequest.builder().bucket(bucketName).build()); + } catch (Exception e) { + System.err.println("Failed to delete bucket: " + bucketName); + e.printStackTrace(); + } + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/S3ControlBucketResource.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/S3ControlBucketResource.java new file mode 100644 index 000000000000..ca50b327e816 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/S3ControlBucketResource.java @@ -0,0 +1,228 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control; + +import java.util.Objects; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; +import software.amazon.awssdk.services.s3.internal.resource.S3Resource; +import software.amazon.awssdk.services.s3.internal.resource.S3ResourceType; +import software.amazon.awssdk.services.s3control.internal.S3ControlResourceType; +import software.amazon.awssdk.utils.Validate; + +/** + * An {@link S3Resource} that represents an bucket. + */ +@SdkInternalApi +public final class S3ControlBucketResource implements S3Resource { + + private final String partition; + private final String region; + private final String accountId; + private final String bucketName; + private final S3Resource parentS3Resource; + + private S3ControlBucketResource(Builder b) { + this.bucketName = Validate.notBlank(b.bucketName, "bucketName"); + if (b.parentS3Resource == null) { + this.parentS3Resource = null; + this.partition = b.partition; + this.region = b.region; + this.accountId = b.accountId; + } else { + this.parentS3Resource = validateParentS3Resource(b.parentS3Resource); + Validate.isNull(b.partition, "partition cannot be set on builder if it has parent resource"); + Validate.isNull(b.region, "region cannot be set on builder if it has parent resource"); + Validate.isNull(b.accountId, "accountId cannot be set on builder if it has parent resource"); + this.partition = parentS3Resource.partition().orElse(null); + this.region = parentS3Resource.region().orElse(null); + this.accountId = parentS3Resource.accountId().orElse(null); + } + } + + /** + * Get a new builder for this class. + * @return A newly initialized instance of a builder. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the resource type for this bucket. + * @return This will always return "bucket_name". + */ + @Override + public String type() { + return S3ControlResourceType.BUCKET.toString(); + } + + /** + * Gets the AWS partition name associated with this bucket (e.g.: 'aws'). + * @return the name of the partition. + */ + @Override + public Optional partition() { + return Optional.of(this.partition); + } + + /** + * Gets the AWS region name associated with this bucket (e.g.: 'us-east-1'). + * @return the name of the region or null if the region has not been specified (e.g. the resource is in the + * global namespace). + */ + @Override + public Optional region() { + return Optional.of(this.region); + } + + /** + * Gets the AWS account ID associated with this bucket. + * @return the AWS account ID or null if the account ID has not been specified. + */ + @Override + public Optional accountId() { + return Optional.of(this.accountId); + } + + /** + * Gets the name of the bucket. + * @return the name of the bucket. + */ + public String bucketName() { + return this.bucketName; + } + + /** + * Gets the optional parent s3 resource + * @return the parent s3 resource if exists, otherwise null + */ + @Override + public Optional parentS3Resource() { + return Optional.ofNullable(parentS3Resource); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + S3ControlBucketResource that = (S3ControlBucketResource) o; + + if (!Objects.equals(partition, that.partition)) { + return false; + } + if (!Objects.equals(region, that.region)) { + return false; + } + if (!Objects.equals(accountId, that.accountId)) { + return false; + } + if (!bucketName.equals(that.bucketName)) { + return false; + } + return Objects.equals(parentS3Resource, that.parentS3Resource); + } + + @Override + public int hashCode() { + int result = partition != null ? partition.hashCode() : 0; + result = 31 * result + (region != null ? region.hashCode() : 0); + result = 31 * result + (accountId != null ? accountId.hashCode() : 0); + result = 31 * result + bucketName.hashCode(); + result = 31 * result + (parentS3Resource != null ? parentS3Resource.hashCode() : 0); + return result; + } + + /** + * A builder for {@link S3ControlBucketResource} objects. + */ + public static final class Builder { + private String partition; + private String region; + private String accountId; + private String bucketName; + private S3Resource parentS3Resource; + + private Builder() { + } + + /** + * The AWS partition associated with the bucket. + */ + public Builder partition(String partition) { + this.partition = partition; + return this; + } + + public void setRegion(String region) { + this.region = region; + } + + /** + * The AWS region associated with the bucket. This property is optional. + */ + public Builder region(String region) { + this.region = region; + return this; + } + + /** + * The AWS account ID associated with the bucket. This property is optional. + */ + public Builder accountId(String accountId) { + this.accountId = accountId; + return this; + } + + /** + * The name of the S3 bucket. + */ + public Builder bucketName(String bucketName) { + this.bucketName = bucketName; + return this; + } + + /** + * The S3 resource this access point is associated with (contained within). Only {@link S3OutpostResource} and + * is a valid parent resource types. + */ + public Builder parentS3Resource(S3Resource parentS3Resource) { + this.parentS3Resource = parentS3Resource; + return this; + } + + /** + * Builds an instance of {@link S3ControlBucketResource}. + */ + public S3ControlBucketResource build() { + return new S3ControlBucketResource(this); + } + } + + private S3Resource validateParentS3Resource(S3Resource parentS3Resource) { + if (!S3ResourceType.OUTPOST.toString().equals(parentS3Resource.type())) { + throw new IllegalArgumentException("Invalid 'parentS3Resource' type. An S3 bucket resource must be " + + "associated with an outpost parent resource."); + } + return parentS3Resource; + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/S3ControlConfiguration.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/S3ControlConfiguration.java new file mode 100644 index 000000000000..a19d1859613d --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/S3ControlConfiguration.java @@ -0,0 +1,256 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control; + +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.ServiceConfiguration; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * S3 Control specific configuration allowing customers to enabled FIPS or + * dualstack. + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class S3ControlConfiguration implements ServiceConfiguration, + ToCopyableBuilder { + /** + * S3 FIPS mode is by default not enabled + */ + private static final boolean DEFAULT_FIPS_MODE_ENABLED = false; + + /** + * S3 Dualstack endpoint is by default not enabled + */ + private static final boolean DEFAULT_DUALSTACK_ENABLED = false; + + private static final boolean DEFAULT_USE_ARN_REGION_ENABLED = false; + + private final Boolean fipsModeEnabled; + private final Boolean dualstackEnabled; + private final Boolean useArnRegionEnabled; + private final ProfileFile profileFile; + private final String profileName; + + private S3ControlConfiguration(DefaultS3ServiceConfigurationBuilder builder) { + this.dualstackEnabled = builder.dualstackEnabled; + this.fipsModeEnabled = builder.fipsModeEnabled; + this.profileFile = builder.profileFile; + this.profileName = builder.profileName; + this.useArnRegionEnabled = builder.useArnRegionEnabled; + } + + /** + * Create a {@link Builder}, used to create a {@link S3ControlConfiguration}. + */ + public static Builder builder() { + return new DefaultS3ServiceConfigurationBuilder(); + } + + /** + *

    + * Returns whether the client has enabled fips mode for accessing S3 Control. + * + * @return True if client will use FIPS mode. + */ + public boolean fipsModeEnabled() { + return resolveBoolean(fipsModeEnabled, DEFAULT_FIPS_MODE_ENABLED); + } + + /** + *

    + * Returns whether the client is configured to use dualstack mode for + * accessing S3. If you want to use IPv6 when accessing S3, dualstack + * must be enabled. + *

    + * + *

    + * Dualstack endpoints are disabled by default. + *

    + * + * @return True if the client will use the dualstack endpoints + */ + public boolean dualstackEnabled() { + return resolveBoolean(dualstackEnabled, DEFAULT_DUALSTACK_ENABLED); + } + + /** + * Returns whether the client is configured to make calls to a region specified in an ARN that represents an + * S3 resource even if that region is different to the region the client was initialized with. This setting is disabled by + * default. + * + * @return true if use arn region is enabled. + */ + public boolean useArnRegionEnabled() { + return resolveBoolean(useArnRegionEnabled, DEFAULT_USE_ARN_REGION_ENABLED); + } + + private boolean resolveBoolean(Boolean suppliedValue, boolean defaultValue) { + return suppliedValue == null ? defaultValue : suppliedValue; + } + + @Override + public Builder toBuilder() { + return builder() + .dualstackEnabled(dualstackEnabled) + .fipsModeEnabled(fipsModeEnabled) + .useArnRegionEnabled(useArnRegionEnabled) + .profileFile(profileFile) + .profileName(profileName); + } + + @NotThreadSafe + public interface Builder extends CopyableBuilder { + Boolean dualstackEnabled(); + + /** + * Option to enable using the dualstack endpoints when accessing S3. Dualstack + * should be enabled if you want to use IPv6. + * + *

    + * Dualstack endpoints are disabled by default. + *

    + * + * @see S3ControlConfiguration#dualstackEnabled(). + */ + Builder dualstackEnabled(Boolean dualstackEnabled); + + Boolean fipsModeEnabled(); + + /** + * Option to enable using the fips endpoint when accessing S3 Control. + * + *

    + * FIPS mode is disabled by default. + *

    + * + * @see S3ControlConfiguration#fipsModeEnabled(). + */ + Builder fipsModeEnabled(Boolean fipsModeEnabled); + + /** + * Option to enable the client to make calls to a region specified in an ARN that represents an S3 resource + * even if that region is different to the region the client was initialized with. This setting is disabled by + * default. + */ + Builder useArnRegionEnabled(Boolean arnRegionEnabled); + + /** + * Option to enable the client to make calls to a region specified in an ARN that represents an S3 resource + * even if that region is different to the region the client was initialized with. This setting is disabled by + * default. + */ + Boolean useArnRegionEnabled(); + + ProfileFile profileFile(); + + /** + * The profile file that should be consulted to determine the service-specific default configuration. This is not + * currently used by S3 control, but may be in a future SDK version. + */ + Builder profileFile(ProfileFile profileFile); + + String profileName(); + + /** + * The profile name that should be consulted to determine the service-specific default configuration. This is not + * currently used by S3 control, but may be in a future SDK version. + */ + Builder profileName(String profileName); + } + + private static final class DefaultS3ServiceConfigurationBuilder implements Builder { + + private Boolean dualstackEnabled; + private Boolean fipsModeEnabled; + private ProfileFile profileFile; + private String profileName; + private Boolean useArnRegionEnabled; + + public Boolean dualstackEnabled() { + return dualstackEnabled; + } + + public Builder dualstackEnabled(Boolean dualstackEnabled) { + this.dualstackEnabled = dualstackEnabled; + return this; + } + + public void setDualstackEnabled(Boolean dualstackEnabled) { + dualstackEnabled(dualstackEnabled); + } + + public Boolean fipsModeEnabled() { + return fipsModeEnabled; + } + + public Builder fipsModeEnabled(Boolean fipsModeEnabled) { + this.fipsModeEnabled = fipsModeEnabled; + return this; + } + + public void setFipsModeEnabled(Boolean fipsModeEnabled) { + fipsModeEnabled(fipsModeEnabled); + } + + @Override + public Builder useArnRegionEnabled(Boolean arnRegionEnabled) { + this.useArnRegionEnabled = arnRegionEnabled; + return this; + } + + @Override + public Boolean useArnRegionEnabled() { + return useArnRegionEnabled; + } + + public void setUseArnRegionEnabled(Boolean useArnRegionEnabled) { + useArnRegionEnabled(useArnRegionEnabled); + } + + @Override + public ProfileFile profileFile() { + return profileFile; + } + + @Override + public Builder profileFile(ProfileFile profileFile) { + this.profileFile = profileFile; + return this; + } + + @Override + public String profileName() { + return profileName; + } + + @Override + public Builder profileName(String profileName) { + this.profileName = profileName; + return this; + } + + public S3ControlConfiguration build() { + return new S3ControlConfiguration(this); + } + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/ArnHandler.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/ArnHandler.java new file mode 100644 index 000000000000..6a1376201c00 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/ArnHandler.java @@ -0,0 +1,151 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal; + +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; +import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.ENDPOINT_OVERRIDDEN; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.S3_OUTPOSTS; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isDualstackEnabled; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsEnabledInClientConfig; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsRegionProvided; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isUseArnRegionEnabledInClientConfig; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.PartitionMetadata; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; +import software.amazon.awssdk.services.s3.internal.resource.S3Resource; +import software.amazon.awssdk.services.s3.internal.usearnregion.UseArnRegionProviderChain; +import software.amazon.awssdk.services.s3control.S3ControlConfiguration; + +@SdkInternalApi +public final class ArnHandler { + private static final String X_AMZ_OUTPOST_ID_HEADER = "x-amz-outpost-id"; + private static final ArnHandler INSTANCE = new ArnHandler(); + private static final UseArnRegionProviderChain USE_ARN_REGION_RESOLVER = UseArnRegionProviderChain.create(); + + private ArnHandler() { + } + + public static ArnHandler getInstance() { + return INSTANCE; + } + + public SdkHttpRequest resolveHostForArn(SdkHttpRequest request, + S3ControlConfiguration configuration, + Arn arn, + ExecutionAttributes executionAttributes) { + + S3Resource s3Resource = S3ControlArnConverter.getInstance().convertArn(arn); + + String clientRegion = executionAttributes.getAttribute(SIGNING_REGION).id(); + String originalArnRegion = s3Resource.region().orElseThrow(() -> new IllegalArgumentException("Region is missing")); + + boolean isFipsEnabled = isFipsEnabledInClientConfig(configuration) || isFipsRegionProvided(clientRegion, + originalArnRegion, + useArnRegion(configuration)); + + String arnRegion = removeFipsIfNeeded(originalArnRegion); + validateConfiguration(executionAttributes, arn.partition(), arnRegion, configuration); + + executionAttributes.putAttribute(SIGNING_REGION, Region.of(arnRegion)); + + S3Resource parentS3Resource = s3Resource.parentS3Resource().orElse(null); + if (parentS3Resource instanceof S3OutpostResource) { + return handleOutpostArn(request, (S3OutpostResource) parentS3Resource, isFipsEnabled, configuration, + executionAttributes); + } else { + throw new IllegalArgumentException("Parent resource invalid, outpost resource expected."); + } + + } + + private SdkHttpRequest handleOutpostArn(SdkHttpRequest request, + S3OutpostResource outpostResource, + boolean isFipsEnabled, + S3ControlConfiguration configuration, + ExecutionAttributes executionAttributes) { + if (isFipsEnabled) { + throw new IllegalArgumentException("FIPS endpoints are not supported for outpost ARNs"); + } + + if (isDualstackEnabled(configuration)) { + throw new IllegalArgumentException("Dualstack endpoints are not supported for outpost ARNs"); + } + + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, S3_OUTPOSTS); + + SdkHttpRequest.Builder requestBuilder = request.toBuilder().appendHeader(X_AMZ_OUTPOST_ID_HEADER, + outpostResource.outpostId()); + String arnRegion = outpostResource.region().orElseThrow(() -> new IllegalArgumentException("arn region is missing")); + String dnsSuffix = PartitionMetadata.of(Region.of(arnRegion)).dnsSuffix(); + + String host = String.format("s3-outposts.%s.%s", arnRegion, dnsSuffix); + return requestBuilder.host(host).build(); + } + + private void validateConfiguration(ExecutionAttributes executionAttributes, String arnPartition, String arnRegion, + S3ControlConfiguration configuration) { + String clientRegionString = removeFipsIfNeeded(executionAttributes.getAttribute(SIGNING_REGION).id()); + Region clientRegion = Region.of(clientRegionString); + + if (Boolean.TRUE.equals(executionAttributes.getAttribute(ENDPOINT_OVERRIDDEN))) { + throw new IllegalArgumentException("An ARN cannot be passed to an " + + " operation if the client has been configured with an endpoint " + + "override."); + } + String clientPartition = PartitionMetadata.of(clientRegion).id(); + + if (!arnPartition.equals(clientPartition)) { + throw new IllegalArgumentException("The partition field of the ARN being passed as a bucket parameter to " + + "an S3 operation does not match the partition the client has been configured " + + "with. Provided " + + "partition: '" + arnPartition + "'; client partition: " + + "'" + clientPartition + "'."); + } + + if (!arnRegion.equals(clientRegionString) && !useArnRegion(configuration)) { + throw new IllegalArgumentException("The region field of the ARN being passed as a bucket parameter to an " + + "operation does not match the region the client was configured " + + "with. Provided region: '" + arnRegion + "'; client " + + "region: '" + clientRegionString + "'."); + } + } + + private String removeFipsIfNeeded(String region) { + if (region.startsWith("fips-")) { + return region.replace("fips-", ""); + } + + if (region.endsWith("-fips")) { + return region.replace("-fips", ""); + } + return region; + } + + private boolean useArnRegion(S3ControlConfiguration configuration) { + // If useArnRegion is false, it was not set to false by the customer, it was simply not enabled + if (isUseArnRegionEnabledInClientConfig(configuration)) { + return true; + } + + return USE_ARN_REGION_RESOLVER.resolveUseArnRegion().orElse(false); + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/HandlerUtils.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/HandlerUtils.java new file mode 100644 index 000000000000..5743ac39aa52 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/HandlerUtils.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3control.S3ControlConfiguration; +import software.amazon.awssdk.utils.StringUtils; + +@SdkInternalApi +public final class HandlerUtils { + public static final String X_AMZ_ACCOUNT_ID = "x-amz-account-id"; + public static final String ENDPOINT_PREFIX = "s3-control"; + public static final String S3_OUTPOSTS = "s3-outposts"; + + private HandlerUtils() { + } + + public static boolean isDualstackEnabled(S3ControlConfiguration configuration) { + return configuration != null && configuration.dualstackEnabled(); + } + + public static boolean isFipsEnabledInClientConfig(S3ControlConfiguration configuration) { + return configuration != null && configuration.fipsModeEnabled(); + } + + public static boolean isUseArnRegionEnabledInClientConfig(S3ControlConfiguration configuration) { + return configuration != null && configuration.useArnRegionEnabled(); + } + + /** + * Returns whether a FIPS pseudo region is provided. + */ + public static boolean isFipsRegionProvided(String clientRegion, String arnRegion, boolean useArnRegion) { + if (useArnRegion) { + return isFipsRegion(arnRegion); + } + + return isFipsRegion(clientRegion); + } + + /** + * Returns whether a region is a FIPS pseudo region. + */ + public static boolean isFipsRegion(String regionName) { + if (StringUtils.isEmpty(regionName)) { + return false; + } + + return regionName.startsWith("fips-") || regionName.endsWith("-fips"); + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ArnableField.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ArnableField.java new file mode 100644 index 000000000000..9414212e21b2 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ArnableField.java @@ -0,0 +1,67 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal; + + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.Arn; + +/** + * Indicating a field that can be an ARN + */ +@SdkInternalApi +public final class S3ArnableField { + private final Arn arn; + + private S3ArnableField(Builder builder) { + this.arn = builder.arn; + } + + public static Builder builder() { + return new Builder(); + } + + /** + * @return the ARN + */ + public Arn arn() { + return arn; + } + + public static final class Builder { + private Arn arn; + + private Builder() { + } + + /** + * Sets the arn + * + * @param arn The new arn value. + * @return This object for method chaining. + */ + public Builder arn(Arn arn) { + this.arn = arn; + return this; + } + + + public S3ArnableField build() { + return new S3ArnableField(this); + } + } + +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlArnConverter.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlArnConverter.java new file mode 100644 index 000000000000..cff5b4240501 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlArnConverter.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal; + + +import static software.amazon.awssdk.services.s3.internal.resource.S3ArnUtils.parseOutpostArn; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.arns.ArnResource; +import software.amazon.awssdk.services.s3.internal.resource.ArnConverter; +import software.amazon.awssdk.services.s3.internal.resource.IntermediateOutpostResource; +import software.amazon.awssdk.services.s3.internal.resource.OutpostResourceType; +import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; +import software.amazon.awssdk.services.s3.internal.resource.S3Resource; +import software.amazon.awssdk.services.s3control.S3ControlBucketResource; + +@SdkInternalApi +public final class S3ControlArnConverter implements ArnConverter { + private static final S3ControlArnConverter INSTANCE = new S3ControlArnConverter(); + + private S3ControlArnConverter() { + } + + /** + * Gets a static singleton instance of an {@link S3ControlArnConverter}. + * + * @return A static instance of an {@link S3ControlArnConverter}. + */ + public static S3ControlArnConverter getInstance() { + return INSTANCE; + } + + @Override + public S3Resource convertArn(Arn arn) { + S3ControlResourceType s3ResourceType; + + try { + s3ResourceType = + arn.resource().resourceType().map(S3ControlResourceType::fromValue) + .orElseThrow(() -> new IllegalArgumentException("resource type cannot be null")); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Unknown ARN type '" + arn.resource().resourceType() + "'"); + } + + switch (s3ResourceType) { + case OUTPOST: + return parseS3OutpostArn(arn); + default: + throw new IllegalArgumentException("Unknown ARN type '" + arn.resource().resourceType() + "'"); + } + + } + + private S3Resource parseS3OutpostArn(Arn arn) { + IntermediateOutpostResource intermediateOutpostResource = parseOutpostArn(arn); + ArnResource outpostSubresource = intermediateOutpostResource.outpostSubresource(); + String subResource = outpostSubresource.resource(); + OutpostResourceType outpostResourceType; + try { + outpostResourceType = outpostSubresource.resourceType().map(OutpostResourceType::fromValue) + .orElseThrow(() -> new IllegalArgumentException("resource type cannot be " + + "null")); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Unknown outpost ARN type '" + outpostSubresource.resourceType() + "'"); + } + + String outpostId = intermediateOutpostResource.outpostId(); + + switch (outpostResourceType) { + case OUTPOST_BUCKET: + return S3ControlBucketResource.builder() + .bucketName(subResource) + .parentS3Resource(S3OutpostResource.builder() + .partition(arn.partition()) + .region(arn.region().orElse(null)) + .accountId(arn.accountId().orElse(null)) + .outpostId(outpostId) + .build()) + .build(); + + case OUTPOST_ACCESS_POINT: + return S3AccessPointResource.builder() + .accessPointName(subResource) + .parentS3Resource(S3OutpostResource.builder() + .partition(arn.partition()) + .region(arn.region().orElse(null)) + .accountId(arn.accountId().orElse(null)) + .outpostId(outpostId) + .build()) + .build(); + default: + throw new IllegalArgumentException("Unknown outpost ARN type '" + outpostSubresource.resourceType() + "'"); + } + } + +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlInternalExecutionAttribute.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlInternalExecutionAttribute.java new file mode 100644 index 000000000000..407b64f67e97 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlInternalExecutionAttribute.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal; + +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.interceptor.ExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; + +@SdkProtectedApi +public final class S3ControlInternalExecutionAttribute extends SdkExecutionAttribute { + + /** + * The optional value contains metadata for a request with a field that contains an ARN + */ + public static final ExecutionAttribute S3_ARNABLE_FIELD = new ExecutionAttribute<>("S3_ARNABLE_FIELD"); +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlResourceType.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlResourceType.java new file mode 100644 index 000000000000..b496b05da038 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/S3ControlResourceType.java @@ -0,0 +1,66 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.internal.resource.S3Resource; + +/** + * An enum representing the types of resources supported by S3 control. Each resource type below will have a + * concrete implementation of {@link S3Resource}. + */ +@SdkInternalApi +public enum S3ControlResourceType { + + BUCKET("bucket"), + + OUTPOST("outpost"); + + private final String value; + + S3ControlResourceType(String value) { + this.value = value; + } + + /** + * @return The canonical string value of this resource type. + */ + @Override + public String toString() { + return value; + } + + /** + * Use this in place of valueOf. + * + * @param value real value + * @return S3ResourceType corresponding to the value + * @throws IllegalArgumentException If the specified value does not map to one of the known values in this enum. + */ + public static S3ControlResourceType fromValue(String value) { + if (value == null || "".equals(value)) { + throw new IllegalArgumentException("Value cannot be null or empty!"); + } + + for (S3ControlResourceType enumEntry : S3ControlResourceType.values()) { + if (enumEntry.toString().equals(value)) { + return enumEntry; + } + } + + throw new IllegalArgumentException("Cannot create enum from " + value + " value!"); + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/DisableDoubleUrlEncodingForSigningInterceptor.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/DisableDoubleUrlEncodingForSigningInterceptor.java new file mode 100644 index 000000000000..01c8cbcbbc38 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/DisableDoubleUrlEncodingForSigningInterceptor.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal.interceptors; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; + +/** + * Execution interceptor which modifies the HTTP request to S3 Control to + * add a signer attribute that will instruct the signer to not double-url-encode path elements. + * S3 Control expects path elements to be encoded only once in the canonical URI. + * Similar functionality exists for S3. + */ +@SdkInternalApi +public final class DisableDoubleUrlEncodingForSigningInterceptor implements ExecutionInterceptor { + + @Override + public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + executionAttributes.putAttribute(AwsSignerExecutionAttribute.SIGNER_DOUBLE_URL_ENCODE, Boolean.FALSE); + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java new file mode 100644 index 000000000000..66589f922d03 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java @@ -0,0 +1,142 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal.interceptors; + + +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; +import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.ENDPOINT_OVERRIDDEN; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.ENDPOINT_PREFIX; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.S3_OUTPOSTS; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isDualstackEnabled; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsEnabledInClientConfig; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsRegion; +import static software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute.S3_ARNABLE_FIELD; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.PartitionMetadata; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlConfiguration; +import software.amazon.awssdk.services.s3control.internal.ArnHandler; +import software.amazon.awssdk.services.s3control.internal.S3ArnableField; +import software.amazon.awssdk.services.s3control.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3control.model.ListRegionalBucketsRequest; +import software.amazon.awssdk.utils.StringUtils; + +/** + * Execution interceptor which modifies the HTTP request to S3 Control to + * change the endpoint to the correct endpoint. This includes prefixing the AWS + * account identifier and, when enabled, adding in FIPS and dualstack. + */ +@SdkInternalApi +public final class EndpointAddressInterceptor implements ExecutionInterceptor { + private final ArnHandler arnHandler; + + public EndpointAddressInterceptor() { + arnHandler = ArnHandler.getInstance(); + } + + @Override + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, + ExecutionAttributes executionAttributes) { + SdkHttpRequest request = context.httpRequest(); + + S3ControlConfiguration config = (S3ControlConfiguration) executionAttributes.getAttribute( + AwsSignerExecutionAttribute.SERVICE_CONFIG); + + S3ArnableField arnableField = executionAttributes.getAttribute(S3_ARNABLE_FIELD); + + if (arnableField != null && arnableField.arn() != null) { + return arnHandler.resolveHostForArn(request, config, arnableField.arn(), executionAttributes); + } + + String host; + + // If the request is an non-arn outpost request + if (isNonArnOutpostRequest(context.request())) { + host = resolveHostForNonArnOutpostRequest(config, executionAttributes); + } else { + host = resolveHost(request, config); + } + + return request.toBuilder() + .host(host) + .build(); + } + + private String resolveHostForNonArnOutpostRequest(S3ControlConfiguration configuration, + ExecutionAttributes executionAttributes) { + if (Boolean.TRUE.equals(executionAttributes.getAttribute(ENDPOINT_OVERRIDDEN))) { + throw new IllegalArgumentException("Endpoint must not be overridden"); + } + + if (isDualstackEnabled(configuration)) { + throw new IllegalArgumentException("Dualstack endpoints are not supported"); + } + + Region region = executionAttributes.getAttribute(SIGNING_REGION); + if (isFipsEnabledInClientConfig(configuration) || isFipsRegion(region.id())) { + throw new IllegalArgumentException("FIPS endpoints are not supported"); + } + + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, S3_OUTPOSTS); + + String dnsSuffix = PartitionMetadata.of(region).dnsSuffix(); + + return String.format("s3-outposts.%s.%s", region, dnsSuffix); + } + + /** + * It should redirect signer if the request is CreateBucketRequest or ListRegionalBucketsRequest with outpostId present + */ + private boolean isNonArnOutpostRequest(SdkRequest request) { + if (request instanceof CreateBucketRequest && (StringUtils.isNotBlank(((CreateBucketRequest) request).outpostId()))) { + return true; + } + + return request instanceof ListRegionalBucketsRequest && + (StringUtils.isNotBlank(((ListRegionalBucketsRequest) request).outpostId())); + } + + private String resolveHost(SdkHttpRequest request, S3ControlConfiguration configuration) { + if (isDualstackEnabled(configuration) && isFipsEnabledInClientConfig(configuration)) { + throw SdkClientException.create("Cannot use both Dual-Stack endpoints and FIPS endpoints"); + } + String host = request.getUri().getHost(); + if (isDualstackEnabled(configuration)) { + if (!host.contains(ENDPOINT_PREFIX)) { + throw SdkClientException.create(String.format("The Dual-Stack option cannot be used with custom endpoints (%s)", + request.getUri())); + } + host = host.replace(ENDPOINT_PREFIX, String.format("%s.%s", ENDPOINT_PREFIX, "dualstack")); + } else if (isFipsEnabledInClientConfig(configuration)) { + if (!host.contains(ENDPOINT_PREFIX)) { + throw SdkClientException.create(String.format("The FIPS option cannot be used with custom endpoints (%s)", + request.getUri())); + } + host = host.replace(ENDPOINT_PREFIX, String.format("%s-%s", ENDPOINT_PREFIX, "fips")); + + } + return host; + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptor.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptor.java new file mode 100644 index 000000000000..38e1a815aee3 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptor.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal.interceptors; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.SdkHttpMethod; + +/** + * Turns on payload signing and prevents moving query params to body during a POST which S3 doesn't like. + */ +@SdkInternalApi +public class PayloadSigningInterceptor implements ExecutionInterceptor { + + public Optional modifyHttpContent(Context.ModifyHttpRequest context, + ExecutionAttributes executionAttributes) { + executionAttributes.putAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, true); + if (!context.requestBody().isPresent() && context.httpRequest().method().equals(SdkHttpMethod.POST)) { + return Optional.of(RequestBody.fromBytes(new byte[0])); + } + + return context.requestBody(); + } +} diff --git a/services/s3control/src/main/resources/codegen-resources/customization.config b/services/s3control/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..d5badfef0228 --- /dev/null +++ b/services/s3control/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,225 @@ +{ + "serviceSpecificClientConfigClass": "S3ControlConfiguration", + "customResponseMetadata": { + "EXTENDED_REQUEST_ID": "x-amz-id-2", + "REQUEST_ID": "x-amz-request-id" + }, + "s3ArnableFields": { + "CreateAccessPointRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "arnResourceSubstitutionGetter": "bucketName", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "DeleteAccessPointRequest": { + "field": "name", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource", + "arnResourceSubstitutionGetter": "accessPointName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "GetAccessPointRequest": { + "field": "name", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource", + "arnResourceSubstitutionGetter": "accessPointName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "ListAccessPointsRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "PutAccessPointPolicyRequest": { + "field": "name", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource", + "arnResourceSubstitutionGetter": "accessPointName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "GetAccessPointPolicyRequest": { + "field": "name", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource", + "arnResourceSubstitutionGetter": "accessPointName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "DeleteAccessPointPolicyRequest": { + "field": "name", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource", + "arnResourceSubstitutionGetter": "accessPointName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "DeleteBucketRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "DeleteBucketLifecycleConfigurationRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "GetBucketLifecycleConfigurationRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "PutBucketLifecycleConfigurationRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "GetBucketRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "GetBucketPolicyRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "DeleteBucketPolicyRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "PutBucketPolicyRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "GetBucketTaggingRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "PutBucketTaggingRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + }, + "DeleteBucketTaggingRequest": { + "field": "bucket", + "arnConverterFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter", + "arnResourceFqcn": "software.amazon.awssdk.services.s3control.S3ControlBucketResource", + "arnResourceSubstitutionGetter": "bucketName", + "baseArnResourceFqcn": "software.amazon.awssdk.services.s3.internal.resource.S3Resource", + "executionAttributeKeyFqcn": "software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute", + "executionAttributeValueFqcn": "software.amazon.awssdk.services.s3control.internal.S3ArnableField", + "otherFieldsToPopulate": { + "accountId": "accountId().orElseThrow(() -> new IllegalArgumentException(\"accountId cannot be null\"))" + } + } + } +} \ No newline at end of file diff --git a/services/s3control/src/main/resources/codegen-resources/paginators-1.json b/services/s3control/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..d4d35a1a3952 --- /dev/null +++ b/services/s3control/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,19 @@ +{ + "pagination": { + "ListAccessPoints": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRegionalBuckets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/s3control/src/main/resources/codegen-resources/service-2.json b/services/s3control/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..721dc2a3c630 --- /dev/null +++ b/services/s3control/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3708 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-08-20", + "endpointPrefix":"s3-control", + "protocol":"rest-xml", + "serviceFullName":"AWS S3 Control", + "serviceId":"S3 Control", + "signatureVersion":"s3v4", + "signingName":"s3", + "uid":"s3control-2018-08-20" + }, + "operations":{ + "CreateAccessPoint":{ + "name":"CreateAccessPoint", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/accesspoint/{name}" + }, + "input":{ + "shape":"CreateAccessPointRequest", + "locationName":"CreateAccessPointRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "output":{"shape":"CreateAccessPointResult"}, + "documentation":"

    Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

    Using this action with Amazon S3 on Outposts

    This action:

    • Requires a virtual private cloud (VPC) configuration as S3 on Outposts only supports VPC style access points.

    • Does not support ACL on S3 on Outposts buckets.

    • Does not support Public Access on S3 on Outposts buckets.

    • Does not support object lock for S3 on Outposts buckets.

    For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide .

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to CreateAccessPoint:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "CreateBucket":{ + "name":"CreateBucket", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/bucket/{name}" + }, + "input":{"shape":"CreateBucketRequest"}, + "output":{"shape":"CreateBucketResult"}, + "errors":[ + {"shape":"BucketAlreadyExists"}, + {"shape":"BucketAlreadyOwnedByYou"} + ], + "documentation":"

    This API operation creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.

    Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

    Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

    S3 on Outposts buckets do not support

    • ACLs. Instead, configure access point policies to manage access to buckets.

    • Public access.

    • Object Lock

    • Bucket Location constraint

    For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your API request, see the Examples section.

    The following actions are related to CreateBucket for Amazon S3 on Outposts:

    ", + "httpChecksumRequired":true + }, + "CreateJob":{ + "name":"CreateJob", + "http":{ + "method":"POST", + "requestUri":"/v20180820/jobs" + }, + "input":{ + "shape":"CreateJobRequest", + "locationName":"CreateJobRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "output":{"shape":"CreateJobResult"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"IdempotencyException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    S3 Batch Operations performs large-scale Batch Operations on Amazon S3 objects. Batch Operations can run a single operation or action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    This operation creates an S3 Batch Operations job.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteAccessPoint":{ + "name":"DeleteAccessPoint", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/accesspoint/{name}" + }, + "input":{"shape":"DeleteAccessPointRequest"}, + "documentation":"

    Deletes the specified access point.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to DeleteAccessPoint:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteAccessPointPolicy":{ + "name":"DeleteAccessPointPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/accesspoint/{name}/policy" + }, + "input":{"shape":"DeleteAccessPointPolicyRequest"}, + "documentation":"

    Deletes the access point policy for the specified access point.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to DeleteAccessPointPolicy:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteBucket":{ + "name":"DeleteBucket", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/bucket/{name}" + }, + "input":{"shape":"DeleteBucketRequest"}, + "documentation":"

    This API operation deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.

    Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    Related Resources

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteBucketLifecycleConfiguration":{ + "name":"DeleteBucketLifecycleConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" + }, + "input":{"shape":"DeleteBucketLifecycleConfigurationRequest"}, + "documentation":"

    This API action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.

    Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

    To use this operation, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    For more information about object expiration, see Elements to Describe Lifecycle Actions.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteBucketPolicy":{ + "name":"DeleteBucketPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/bucket/{name}/policy" + }, + "input":{"shape":"DeleteBucketPolicyRequest"}, + "documentation":"

    This API operation deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.

    This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this operation. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

    If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and User Policies.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to DeleteBucketPolicy:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteBucketTagging":{ + "name":"DeleteBucketTagging", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/bucket/{name}/tagging", + "responseCode":204 + }, + "input":{"shape":"DeleteBucketTaggingRequest"}, + "documentation":"

    This operation deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.

    Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

    To use this operation, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to DeleteBucketTagging:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteJobTagging":{ + "name":"DeleteJobTagging", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{"shape":"DeleteJobTaggingRequest"}, + "output":{"shape":"DeleteJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeletePublicAccessBlock":{ + "name":"DeletePublicAccessBlock", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/configuration/publicAccessBlock" + }, + "input":{"shape":"DeletePublicAccessBlockRequest"}, + "documentation":"

    Removes the PublicAccessBlock configuration for an AWS account. For more information, see Using Amazon S3 block public access.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteStorageLensConfiguration":{ + "name":"DeleteStorageLensConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/storagelens/{storagelensid}" + }, + "input":{"shape":"DeleteStorageLensConfigurationRequest"}, + "documentation":"

    Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteStorageLensConfigurationTagging":{ + "name":"DeleteStorageLensConfigurationTagging", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/storagelens/{storagelensid}/tagging" + }, + "input":{"shape":"DeleteStorageLensConfigurationTaggingRequest"}, + "output":{"shape":"DeleteStorageLensConfigurationTaggingResult"}, + "documentation":"

    Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DescribeJob":{ + "name":"DescribeJob", + "http":{ + "method":"GET", + "requestUri":"/v20180820/jobs/{id}" + }, + "input":{"shape":"DescribeJobRequest"}, + "output":{"shape":"DescribeJobResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetAccessPoint":{ + "name":"GetAccessPoint", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspoint/{name}" + }, + "input":{"shape":"GetAccessPointRequest"}, + "output":{"shape":"GetAccessPointResult"}, + "documentation":"

    Returns configuration information about the specified access point.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to GetAccessPoint:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetAccessPointPolicy":{ + "name":"GetAccessPointPolicy", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspoint/{name}/policy" + }, + "input":{"shape":"GetAccessPointPolicyRequest"}, + "output":{"shape":"GetAccessPointPolicyResult"}, + "documentation":"

    Returns the access point policy associated with the specified access point.

    The following actions are related to GetAccessPointPolicy:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetAccessPointPolicyStatus":{ + "name":"GetAccessPointPolicyStatus", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspoint/{name}/policyStatus" + }, + "input":{"shape":"GetAccessPointPolicyStatusRequest"}, + "output":{"shape":"GetAccessPointPolicyStatusResult"}, + "documentation":"

    Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetBucket":{ + "name":"GetBucket", + "http":{ + "method":"GET", + "requestUri":"/v20180820/bucket/{name}" + }, + "input":{"shape":"GetBucketRequest"}, + "output":{"shape":"GetBucketResult"}, + "documentation":"

    Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

    If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

    The following actions are related to GetBucket for Amazon S3 on Outposts:

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetBucketLifecycleConfiguration":{ + "name":"GetBucketLifecycleConfiguration", + "http":{ + "method":"GET", + "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" + }, + "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, + "output":{"shape":"GetBucketLifecycleConfigurationResult"}, + "documentation":"

    This operation gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

    Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

    To use this operation, you must have permission to perform the s3-outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    GetBucketLifecycleConfiguration has the following special error:

    • Error code: NoSuchLifecycleConfiguration

      • Description: The lifecycle configuration does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

    The following actions are related to GetBucketLifecycleConfiguration:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetBucketPolicy":{ + "name":"GetBucketPolicy", + "http":{ + "method":"GET", + "requestUri":"/v20180820/bucket/{name}/policy" + }, + "input":{"shape":"GetBucketPolicyRequest"}, + "output":{"shape":"GetBucketPolicyResult"}, + "documentation":"

    This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.

    Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

    Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and User Policies.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to GetBucketPolicy:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetBucketTagging":{ + "name":"GetBucketTagging", + "http":{ + "method":"GET", + "requestUri":"/v20180820/bucket/{name}/tagging" + }, + "input":{"shape":"GetBucketTaggingRequest"}, + "output":{"shape":"GetBucketTaggingResult"}, + "documentation":"

    This operation gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.

    Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    To use this operation, you must have permission to perform the GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

    GetBucketTagging has the following special error:

    • Error code: NoSuchTagSetError

      • Description: There is no tag set associated with the bucket.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to GetBucketTagging:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetJobTagging":{ + "name":"GetJobTagging", + "http":{ + "method":"GET", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{"shape":"GetJobTaggingRequest"}, + "output":{"shape":"GetJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetPublicAccessBlock":{ + "name":"GetPublicAccessBlock", + "http":{ + "method":"GET", + "requestUri":"/v20180820/configuration/publicAccessBlock" + }, + "input":{"shape":"GetPublicAccessBlockRequest"}, + "output":{"shape":"GetPublicAccessBlockOutput"}, + "errors":[ + {"shape":"NoSuchPublicAccessBlockConfiguration"} + ], + "documentation":"

    Retrieves the PublicAccessBlock configuration for an AWS account. For more information, see Using Amazon S3 block public access.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetStorageLensConfiguration":{ + "name":"GetStorageLensConfiguration", + "http":{ + "method":"GET", + "requestUri":"/v20180820/storagelens/{storagelensid}" + }, + "input":{"shape":"GetStorageLensConfigurationRequest"}, + "output":{"shape":"GetStorageLensConfigurationResult"}, + "documentation":"

    Gets the Amazon S3 Storage Lens configuration. For more information, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetStorageLensConfigurationTagging":{ + "name":"GetStorageLensConfigurationTagging", + "http":{ + "method":"GET", + "requestUri":"/v20180820/storagelens/{storagelensid}/tagging" + }, + "input":{"shape":"GetStorageLensConfigurationTaggingRequest"}, + "output":{"shape":"GetStorageLensConfigurationTaggingResult"}, + "documentation":"

    Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "ListAccessPoints":{ + "name":"ListAccessPoints", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspoint" + }, + "input":{"shape":"ListAccessPointsRequest"}, + "output":{"shape":"ListAccessPointsResult"}, + "documentation":"

    Returns a list of the access points currently associated with the specified bucket. You can retrieve up to 1000 access points per call. If the specified bucket has more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to ListAccessPoints:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"GET", + "requestUri":"/v20180820/jobs" + }, + "input":{"shape":"ListJobsRequest"}, + "output":{"shape":"ListJobsResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

    Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "ListRegionalBuckets":{ + "name":"ListRegionalBuckets", + "http":{ + "method":"GET", + "requestUri":"/v20180820/bucket" + }, + "input":{"shape":"ListRegionalBucketsRequest"}, + "output":{"shape":"ListRegionalBucketsResult"}, + "documentation":"

    Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the Examples section.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "ListStorageLensConfigurations":{ + "name":"ListStorageLensConfigurations", + "http":{ + "method":"GET", + "requestUri":"/v20180820/storagelens" + }, + "input":{"shape":"ListStorageLensConfigurationsRequest"}, + "output":{"shape":"ListStorageLensConfigurationsResult"}, + "documentation":"

    Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "PutAccessPointPolicy":{ + "name":"PutAccessPointPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/accesspoint/{name}/policy" + }, + "input":{ + "shape":"PutAccessPointPolicyRequest", + "locationName":"PutAccessPointPolicyRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "documentation":"

    Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to PutAccessPointPolicy:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "PutBucketLifecycleConfiguration":{ + "name":"PutBucketLifecycleConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" + }, + "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, + "documentation":"

    This action puts a lifecycle configuration to an Amazon S3 on Outposts bucket. To put a lifecycle configuration to an S3 bucket, see PutBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

    Creates a new lifecycle configuration for the Outposts bucket or replaces an existing lifecycle configuration. Outposts buckets only support lifecycle configurations that delete/expire objects after a certain period of time and abort incomplete multipart uploads. For more information, see Managing Lifecycle Permissions for Amazon S3 on Outposts.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to PutBucketLifecycleConfiguration:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + }, + "httpChecksumRequired":true + }, + "PutBucketPolicy":{ + "name":"PutBucketPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/bucket/{name}/policy" + }, + "input":{ + "shape":"PutBucketPolicyRequest", + "locationName":"PutBucketPolicyRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "documentation":"

    This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.

    Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this operation.

    If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and User Policies.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to PutBucketPolicy:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + }, + "httpChecksumRequired":true + }, + "PutBucketTagging":{ + "name":"PutBucketTagging", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/bucket/{name}/tagging" + }, + "input":{"shape":"PutBucketTaggingRequest"}, + "documentation":"

    This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.

    Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

    Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

    Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

    To use this operation, you must have permissions to perform the s3-outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    PutBucketTagging has the following special errors:

    • Error code: InvalidTagError

    • Error code: MalformedXMLError

      • Description: The XML provided does not match the schema.

    • Error code: OperationAbortedError

      • Description: A conflicting conditional operation is currently in progress against this resource. Try again.

    • Error code: InternalError

      • Description: The service was unable to apply the provided tag to the bucket.

    All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

    The following actions are related to PutBucketTagging:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + }, + "httpChecksumRequired":true + }, + "PutJobTagging":{ + "name":"PutJobTagging", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{ + "shape":"PutJobTaggingRequest", + "locationName":"PutJobTaggingRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "output":{"shape":"PutJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"

    Sets the supplied tag-set on an S3 Batch Operations job.

    A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

    • If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.

    • For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.

    • A few things to consider about using tags:

      • Amazon S3 limits the maximum number of tags to 50 tags per job.

      • You can associate up to 50 tags with a job as long as they have unique tag keys.

      • A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.

      • The key and values are case sensitive.

      • For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.

    To use this operation, you must have permission to perform the s3:PutJobTagging action.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "PutPublicAccessBlock":{ + "name":"PutPublicAccessBlock", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/configuration/publicAccessBlock" + }, + "input":{"shape":"PutPublicAccessBlockRequest"}, + "documentation":"

    Creates or modifies the PublicAccessBlock configuration for an AWS account. For more information, see Using Amazon S3 block public access.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "PutStorageLensConfiguration":{ + "name":"PutStorageLensConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/storagelens/{storagelensid}" + }, + "input":{ + "shape":"PutStorageLensConfigurationRequest", + "locationName":"PutStorageLensConfigurationRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "documentation":"

    Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    To use this action, you must have permission to perform the s3:PutStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "PutStorageLensConfigurationTagging":{ + "name":"PutStorageLensConfigurationTagging", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/storagelens/{storagelensid}/tagging" + }, + "input":{ + "shape":"PutStorageLensConfigurationTaggingRequest", + "locationName":"PutStorageLensConfigurationTaggingRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "output":{"shape":"PutStorageLensConfigurationTaggingResult"}, + "documentation":"

    Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "UpdateJobPriority":{ + "name":"UpdateJobPriority", + "http":{ + "method":"POST", + "requestUri":"/v20180820/jobs/{id}/priority" + }, + "input":{"shape":"UpdateJobPriorityRequest"}, + "output":{"shape":"UpdateJobPriorityResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "UpdateJobStatus":{ + "name":"UpdateJobStatus", + "http":{ + "method":"POST", + "requestUri":"/v20180820/jobs/{id}/status" + }, + "input":{"shape":"UpdateJobStatusRequest"}, + "output":{"shape":"UpdateJobStatusResult"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"JobStatusException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    Related actions include:

    ", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + } + }, + "shapes":{ + "AbortIncompleteMultipartUpload":{ + "type":"structure", + "members":{ + "DaysAfterInitiation":{ + "shape":"DaysAfterInitiation", + "documentation":"

    Specifies the number of days after which Amazon S3 aborts an incomplete multipart upload to the Outposts bucket.

    " + } + }, + "documentation":"

    The container for abort incomplete multipart upload

    " + }, + "AccessPoint":{ + "type":"structure", + "required":[ + "Name", + "NetworkOrigin", + "Bucket" + ], + "members":{ + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name of this access point.

    " + }, + "NetworkOrigin":{ + "shape":"NetworkOrigin", + "documentation":"

    Indicates whether this access point allows access from the public internet. If VpcConfiguration is specified for this access point, then NetworkOrigin is VPC, and the access point doesn't allow access from the public internet. Otherwise, NetworkOrigin is Internet, and the access point allows access from the public internet, subject to the access point and bucket access policies.

    " + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

    The virtual private cloud (VPC) configuration for this access point, if one exists.

    " + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the bucket associated with this access point.

    " + }, + "AccessPointArn":{ + "shape":"S3AccessPointArn", + "documentation":"

    The ARN for the access point.

    " + } + }, + "documentation":"

    An access point used to access a bucket.

    " + }, + "AccessPointList":{ + "type":"list", + "member":{ + "shape":"AccessPoint", + "locationName":"AccessPoint" + } + }, + "AccessPointName":{ + "type":"string", + "max":50, + "min":3 + }, + "AccountId":{ + "type":"string", + "max":64, + "pattern":"^\\d{12}$" + }, + "AccountLevel":{ + "type":"structure", + "required":["BucketLevel"], + "members":{ + "ActivityMetrics":{ + "shape":"ActivityMetrics", + "documentation":"

    A container for the S3 Storage Lens activity metrics.

    " + }, + "BucketLevel":{ + "shape":"BucketLevel", + "documentation":"

    A container for the S3 Storage Lens bucket-level configuration.

    " + } + }, + "documentation":"

    A container for the account level Amazon S3 Storage Lens configuration.

    " + }, + "ActivityMetrics":{ + "type":"structure", + "members":{ + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

    A container for whether the activity metrics are enabled.

    " + } + }, + "documentation":"

    A container for the activity metrics.

    " + }, + "AwsOrgArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:[a-z\\-]+:organizations::\\d{12}:organization\\/o-[a-z0-9]{10,32}" + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "Boolean":{"type":"boolean"}, + "BucketAlreadyExists":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The requested Outposts bucket name is not available. The bucket namespace is shared by all users of the AWS Outposts in this Region. Select a different name and try again.

    ", + "exception":true + }, + "BucketAlreadyOwnedByYou":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The Outposts bucket you tried to create already exists, and you own it.

    ", + "exception":true + }, + "BucketCannedACL":{ + "type":"string", + "enum":[ + "private", + "public-read", + "public-read-write", + "authenticated-read" + ] + }, + "BucketLevel":{ + "type":"structure", + "members":{ + "ActivityMetrics":{ + "shape":"ActivityMetrics", + "documentation":"

    A container for the bucket-level activity metrics for Amazon S3 Storage Lens

    " + }, + "PrefixLevel":{ + "shape":"PrefixLevel", + "documentation":"

    A container for the bucket-level prefix-level metrics for S3 Storage Lens

    " + } + }, + "documentation":"

    A container for the bucket-level configuration.

    " + }, + "BucketLocationConstraint":{ + "type":"string", + "enum":[ + "EU", + "eu-west-1", + "us-west-1", + "us-west-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "sa-east-1", + "cn-north-1", + "eu-central-1" + ] + }, + "BucketName":{ + "type":"string", + "max":255, + "min":3 + }, + "Buckets":{ + "type":"list", + "member":{ + "shape":"S3BucketArnString", + "locationName":"Arn" + } + }, + "ConfigId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9\\-\\_\\.]+" + }, + "ConfirmRemoveSelfBucketAccess":{"type":"boolean"}, + "ConfirmationRequired":{"type":"boolean"}, + "ContinuationToken":{"type":"string"}, + "CreateAccessPointRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID for the owner of the bucket for which you want to create an access point.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name you want to assign to this access point.

    ", + "location":"uri", + "locationName":"name" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the bucket that you want to associate this access point with.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    " + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

    If you include this field, Amazon S3 restricts access to this access point to requests from the specified virtual private cloud (VPC).

    This is required for creating an access point for Amazon S3 on Outposts buckets.

    " + }, + "PublicAccessBlockConfiguration":{"shape":"PublicAccessBlockConfiguration"} + } + }, + "CreateAccessPointResult":{ + "type":"structure", + "members":{ + "AccessPointArn":{ + "shape":"S3AccessPointArn", + "documentation":"

    The ARN of the access point.

    This is only supported by Amazon S3 on Outposts.

    " + } + } + }, + "CreateBucketConfiguration":{ + "type":"structure", + "members":{ + "LocationConstraint":{ + "shape":"BucketLocationConstraint", + "documentation":"

    Specifies the Region where the bucket will be created. If you are creating a bucket on the US East (N. Virginia) Region (us-east-1), you do not need to specify the location.

    This is not supported by Amazon S3 on Outposts buckets.

    " + } + }, + "documentation":"

    The container for the bucket configuration.

    This is not supported by Amazon S3 on Outposts buckets.

    " + }, + "CreateBucketRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "ACL":{ + "shape":"BucketCannedACL", + "documentation":"

    The canned ACL to apply to the bucket.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-acl" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the bucket.

    ", + "location":"uri", + "locationName":"name" + }, + "CreateBucketConfiguration":{ + "shape":"CreateBucketConfiguration", + "documentation":"

    The configuration information for the bucket.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "locationName":"CreateBucketConfiguration", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "documentation":"

    Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "documentation":"

    Allows grantee to list the objects in the bucket.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "documentation":"

    Allows grantee to read the bucket ACL.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWrite":{ + "shape":"GrantWrite", + "documentation":"

    Allows grantee to create, overwrite, and delete any object in the bucket.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-grant-write" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "documentation":"

    Allows grantee to write the ACL for the applicable bucket.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "ObjectLockEnabledForBucket":{ + "shape":"ObjectLockEnabledForBucket", + "documentation":"

    Specifies whether you want S3 Object Lock to be enabled for the new bucket.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-bucket-object-lock-enabled" + }, + "OutpostId":{ + "shape":"NonEmptyMaxLength64String", + "documentation":"

    The ID of the Outposts where the bucket is being created.

    This is required by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-outpost-id" + } + }, + "payload":"CreateBucketConfiguration" + }, + "CreateBucketResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"Location", + "documentation":"

    The location of the bucket.

    ", + "location":"header", + "locationName":"Location" + }, + "BucketArn":{ + "shape":"S3RegionalBucketArn", + "documentation":"

    The Amazon Resource Name (ARN) of the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    " + } + } + }, + "CreateJobRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Operation", + "Report", + "ClientRequestToken", + "Manifest", + "Priority", + "RoleArn" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID that creates the job.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "ConfirmationRequired":{ + "shape":"ConfirmationRequired", + "documentation":"

    Indicates whether confirmation is required before Amazon S3 runs the job. Confirmation is only required for jobs created through the Amazon S3 console.

    ", + "box":true + }, + "Operation":{ + "shape":"JobOperation", + "documentation":"

    The operation that you want this job to perform on each object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.

    " + }, + "Report":{ + "shape":"JobReport", + "documentation":"

    Configuration parameters for the optional job-completion report.

    " + }, + "ClientRequestToken":{ + "shape":"NonEmptyMaxLength64String", + "documentation":"

    An idempotency token to ensure that you don't accidentally submit the same request twice. You can use any string up to the maximum length.

    ", + "idempotencyToken":true + }, + "Manifest":{ + "shape":"JobManifest", + "documentation":"

    Configuration parameters for the manifest.

    " + }, + "Description":{ + "shape":"NonEmptyMaxLength256String", + "documentation":"

    A description for this job. You can use any string within the permitted length. Descriptions don't need to be unique and can be used for multiple jobs.

    " + }, + "Priority":{ + "shape":"JobPriority", + "documentation":"

    The numerical priority for this job. Higher numbers indicate higher priority.

    ", + "box":true + }, + "RoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's operation on each object in the manifest.

    " + }, + "Tags":{ + "shape":"S3TagSet", + "documentation":"

    A set of tags to associate with the S3 Batch Operations job. This is an optional parameter.

    " + } + } + }, + "CreateJobResult":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for this job. Amazon S3 generates this ID automatically and returns it after a successful Create Job request.

    " + } + } + }, + "CreationDate":{"type":"timestamp"}, + "Date":{"type":"timestamp"}, + "Days":{"type":"integer"}, + "DaysAfterInitiation":{"type":"integer"}, + "DeleteAccessPointPolicyRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID for the account that owns the specified access point.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name of the access point whose policy you want to delete.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteAccessPointRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID for the account that owns the specified access point.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name of the access point you want to delete.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the lifecycle configuration to delete.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    Specifies the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteBucketPolicyRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    Specifies the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteBucketRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID that owns the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    Specifies the bucket being deleted.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteBucketTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket tag set to be removed.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The bucket ARN that has the tag set to be removed.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID associated with the S3 Batch Operations job.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the S3 Batch Operations job whose tags you want to delete.

    ", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteJobTaggingResult":{ + "type":"structure", + "members":{ + } + }, + "DeletePublicAccessBlockRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID for the AWS account whose PublicAccessBlock configuration you want to remove.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "DeleteStorageLensConfigurationRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

    The ID of the S3 Storage Lens configuration.

    ", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the requester.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "DeleteStorageLensConfigurationTaggingRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

    The ID of the S3 Storage Lens configuration.

    ", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the requester.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "DeleteStorageLensConfigurationTaggingResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeJobRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the job whose information you want to retrieve.

    ", + "location":"uri", + "locationName":"id" + } + } + }, + "DescribeJobResult":{ + "type":"structure", + "members":{ + "Job":{ + "shape":"JobDescriptor", + "documentation":"

    Contains the configuration parameters and status for the job specified in the Describe Job request.

    " + } + } + }, + "ExceptionMessage":{ + "type":"string", + "max":1024, + "min":1 + }, + "Exclude":{ + "type":"structure", + "members":{ + "Buckets":{ + "shape":"Buckets", + "documentation":"

    A container for the S3 Storage Lens bucket excludes.

    " + }, + "Regions":{ + "shape":"Regions", + "documentation":"

    A container for the S3 Storage Lens Region excludes.

    " + } + }, + "documentation":"

    A container for what Amazon S3 Storage Lens will exclude.

    " + }, + "ExpirationStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "ExpiredObjectDeleteMarker":{"type":"boolean"}, + "Format":{ + "type":"string", + "enum":[ + "CSV", + "Parquet" + ] + }, + "FunctionArnString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + }, + "GetAccessPointPolicyRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID for the account that owns the specified access point.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name of the access point whose policy you want to retrieve.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetAccessPointPolicyResult":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"Policy", + "documentation":"

    The access point policy associated with the specified access point.

    " + } + } + }, + "GetAccessPointPolicyStatusRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID for the account that owns the specified access point.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name of the access point whose policy status you want to retrieve.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetAccessPointPolicyStatusResult":{ + "type":"structure", + "members":{ + "PolicyStatus":{ + "shape":"PolicyStatus", + "documentation":"

    Indicates the current policy status of the specified access point.

    " + } + } + }, + "GetAccessPointRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID for the account that owns the specified access point.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name of the access point whose configuration information you want to retrieve.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetAccessPointResult":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name of the specified access point.

    " + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the bucket associated with the specified access point.

    " + }, + "NetworkOrigin":{ + "shape":"NetworkOrigin", + "documentation":"

    Indicates whether this access point allows access from the public internet. If VpcConfiguration is specified for this access point, then NetworkOrigin is VPC, and the access point doesn't allow access from the public internet. Otherwise, NetworkOrigin is Internet, and the access point allows access from the public internet, subject to the access point and bucket access policies.

    This will always be true for an Amazon S3 on Outposts access point

    " + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

    Contains the virtual private cloud (VPC) configuration for the specified access point.

    " + }, + "PublicAccessBlockConfiguration":{"shape":"PublicAccessBlockConfiguration"}, + "CreationDate":{ + "shape":"CreationDate", + "documentation":"

    The date and time when the specified access point was created.

    " + } + } + }, + "GetBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The Amazon Resource Name (ARN) of the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetBucketLifecycleConfigurationResult":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"LifecycleRules", + "documentation":"

    Container for the lifecycle rule of the Outposts bucket.

    " + } + } + }, + "GetBucketPolicyRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    Specifies the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetBucketPolicyResult":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"Policy", + "documentation":"

    The policy of the Outposts bucket.

    " + } + } + }, + "GetBucketRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    Specifies the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetBucketResult":{ + "type":"structure", + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The Outposts bucket requested.

    " + }, + "PublicAccessBlockEnabled":{ + "shape":"PublicAccessBlockEnabled", + "documentation":"

    " + }, + "CreationDate":{ + "shape":"CreationDate", + "documentation":"

    The creation date of the Outposts bucket.

    " + } + } + }, + "GetBucketTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    Specifies the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetBucketTaggingResult":{ + "type":"structure", + "required":["TagSet"], + "members":{ + "TagSet":{ + "shape":"S3TagSet", + "documentation":"

    The tags set of the Outposts bucket.

    " + } + } + }, + "GetJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID associated with the S3 Batch Operations job.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the S3 Batch Operations job whose tags you want to retrieve.

    ", + "location":"uri", + "locationName":"id" + } + } + }, + "GetJobTaggingResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"S3TagSet", + "documentation":"

    The set of tags associated with the S3 Batch Operations job.

    " + } + } + }, + "GetPublicAccessBlockOutput":{ + "type":"structure", + "members":{ + "PublicAccessBlockConfiguration":{ + "shape":"PublicAccessBlockConfiguration", + "documentation":"

    The PublicAccessBlock configuration currently in effect for this AWS account.

    " + } + }, + "payload":"PublicAccessBlockConfiguration" + }, + "GetPublicAccessBlockRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID for the AWS account whose PublicAccessBlock configuration you want to retrieve.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "GetStorageLensConfigurationRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

    The ID of the Amazon S3 Storage Lens configuration.

    ", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the requester.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "GetStorageLensConfigurationResult":{ + "type":"structure", + "members":{ + "StorageLensConfiguration":{ + "shape":"StorageLensConfiguration", + "documentation":"

    The S3 Storage Lens configuration requested.

    " + } + }, + "payload":"StorageLensConfiguration" + }, + "GetStorageLensConfigurationTaggingRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

    The ID of the Amazon S3 Storage Lens configuration.

    ", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the requester.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "GetStorageLensConfigurationTaggingResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"StorageLensTags", + "documentation":"

    The tags of S3 Storage Lens configuration requested.

    " + } + } + }, + "GrantFullControl":{"type":"string"}, + "GrantRead":{"type":"string"}, + "GrantReadACP":{"type":"string"}, + "GrantWrite":{"type":"string"}, + "GrantWriteACP":{"type":"string"}, + "IAMRoleArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:[^:]+:iam::\\d{12}:role/.*" + }, + "ID":{"type":"string"}, + "IdempotencyException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "Include":{ + "type":"structure", + "members":{ + "Buckets":{ + "shape":"Buckets", + "documentation":"

    A container for the S3 Storage Lens bucket includes.

    " + }, + "Regions":{ + "shape":"Regions", + "documentation":"

    A container for the S3 Storage Lens Region includes.

    " + } + }, + "documentation":"

    A container for what Amazon S3 Storage Lens configuration includes.

    " + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    ", + "exception":true, + "fault":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "IsEnabled":{"type":"boolean"}, + "IsPublic":{"type":"boolean"}, + "JobArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:[^:]+:s3:[a-zA-Z0-9\\-]+:\\d{12}:job\\/.*" + }, + "JobCreationTime":{"type":"timestamp"}, + "JobDescriptor":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the specified job.

    " + }, + "ConfirmationRequired":{ + "shape":"ConfirmationRequired", + "documentation":"

    Indicates whether confirmation is required before Amazon S3 begins running the specified job. Confirmation is required only for jobs created through the Amazon S3 console.

    ", + "box":true + }, + "Description":{ + "shape":"NonEmptyMaxLength256String", + "documentation":"

    The description for this job, if one was provided in this job's Create Job request.

    ", + "box":true + }, + "JobArn":{ + "shape":"JobArn", + "documentation":"

    The Amazon Resource Name (ARN) for this job.

    ", + "box":true + }, + "Status":{ + "shape":"JobStatus", + "documentation":"

    The current status of the specified job.

    " + }, + "Manifest":{ + "shape":"JobManifest", + "documentation":"

    The configuration information for the specified job's manifest object.

    ", + "box":true + }, + "Operation":{ + "shape":"JobOperation", + "documentation":"

    The operation that the specified job is configured to run on the objects listed in the manifest.

    ", + "box":true + }, + "Priority":{ + "shape":"JobPriority", + "documentation":"

    The priority of the specified job.

    " + }, + "ProgressSummary":{ + "shape":"JobProgressSummary", + "documentation":"

    Describes the total number of tasks that the specified job has run, the number of tasks that succeeded, and the number of tasks that failed.

    ", + "box":true + }, + "StatusUpdateReason":{ + "shape":"JobStatusUpdateReason", + "documentation":"

    The reason for updating the job.

    ", + "box":true + }, + "FailureReasons":{ + "shape":"JobFailureList", + "documentation":"

    If the specified job failed, this field contains information describing the failure.

    ", + "box":true + }, + "Report":{ + "shape":"JobReport", + "documentation":"

    Contains the configuration information for the job-completion report if you requested one in the Create Job request.

    ", + "box":true + }, + "CreationTime":{ + "shape":"JobCreationTime", + "documentation":"

    A timestamp indicating when this job was created.

    " + }, + "TerminationDate":{ + "shape":"JobTerminationDate", + "documentation":"

    A timestamp indicating when this job terminated. A job's termination date is the date and time when it succeeded, failed, or was canceled.

    ", + "box":true + }, + "RoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

    The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role assigned to run the tasks for this job.

    ", + "box":true + }, + "SuspendedDate":{ + "shape":"SuspendedDate", + "documentation":"

    The timestamp when this job was suspended, if it has been suspended.

    ", + "box":true + }, + "SuspendedCause":{ + "shape":"SuspendedCause", + "documentation":"

    The reason why the specified job was suspended. A job is only suspended if you create it through the Amazon S3 console. When you create the job, it enters the Suspended state to await confirmation before running. After you confirm the job, it automatically exits the Suspended state.

    ", + "box":true + } + }, + "documentation":"

    A container element for the job configuration and status information returned by a Describe Job request.

    " + }, + "JobFailure":{ + "type":"structure", + "members":{ + "FailureCode":{ + "shape":"JobFailureCode", + "documentation":"

    The failure code, if any, for the specified job.

    " + }, + "FailureReason":{ + "shape":"JobFailureReason", + "documentation":"

    The failure reason, if any, for the specified job.

    " + } + }, + "documentation":"

    If this job failed, this element indicates why the job failed.

    " + }, + "JobFailureCode":{ + "type":"string", + "max":64, + "min":1 + }, + "JobFailureList":{ + "type":"list", + "member":{"shape":"JobFailure"} + }, + "JobFailureReason":{ + "type":"string", + "max":256, + "min":1 + }, + "JobId":{ + "type":"string", + "max":36, + "min":5, + "pattern":"[a-zA-Z0-9\\-\\_]+" + }, + "JobListDescriptor":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the specified job.

    " + }, + "Description":{ + "shape":"NonEmptyMaxLength256String", + "documentation":"

    The user-specified description that was included in the specified job's Create Job request.

    " + }, + "Operation":{ + "shape":"OperationName", + "documentation":"

    The operation that the specified job is configured to run on each object listed in the manifest.

    " + }, + "Priority":{ + "shape":"JobPriority", + "documentation":"

    The current priority for the specified job.

    " + }, + "Status":{ + "shape":"JobStatus", + "documentation":"

    The specified job's current status.

    " + }, + "CreationTime":{ + "shape":"JobCreationTime", + "documentation":"

    A timestamp indicating when the specified job was created.

    " + }, + "TerminationDate":{ + "shape":"JobTerminationDate", + "documentation":"

    A timestamp indicating when the specified job terminated. A job's termination date is the date and time when it succeeded, failed, or was canceled.

    " + }, + "ProgressSummary":{ + "shape":"JobProgressSummary", + "documentation":"

    Describes the total number of tasks that the specified job has run, the number of tasks that succeeded, and the number of tasks that failed.

    " + } + }, + "documentation":"

    Contains the configuration and status information for a single job retrieved as part of a job list.

    " + }, + "JobListDescriptorList":{ + "type":"list", + "member":{"shape":"JobListDescriptor"} + }, + "JobManifest":{ + "type":"structure", + "required":[ + "Spec", + "Location" + ], + "members":{ + "Spec":{ + "shape":"JobManifestSpec", + "documentation":"

    Describes the format of the specified job's manifest. If the manifest is in CSV format, also describes the columns contained within the manifest.

    " + }, + "Location":{ + "shape":"JobManifestLocation", + "documentation":"

    Contains the information required to locate the specified job's manifest.

    " + } + }, + "documentation":"

    Contains the configuration information for a job's manifest.

    " + }, + "JobManifestFieldList":{ + "type":"list", + "member":{"shape":"JobManifestFieldName"} + }, + "JobManifestFieldName":{ + "type":"string", + "enum":[ + "Ignore", + "Bucket", + "Key", + "VersionId" + ] + }, + "JobManifestFormat":{ + "type":"string", + "enum":[ + "S3BatchOperations_CSV_20180820", + "S3InventoryReport_CSV_20161130" + ] + }, + "JobManifestLocation":{ + "type":"structure", + "required":[ + "ObjectArn", + "ETag" + ], + "members":{ + "ObjectArn":{ + "shape":"S3KeyArnString", + "documentation":"

    The Amazon Resource Name (ARN) for a manifest object.

    " + }, + "ObjectVersionId":{ + "shape":"S3ObjectVersionId", + "documentation":"

    The optional version ID to identify a specific version of the manifest object.

    ", + "box":true + }, + "ETag":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    The ETag for the specified manifest object.

    " + } + }, + "documentation":"

    Contains the information required to locate a manifest object.

    " + }, + "JobManifestSpec":{ + "type":"structure", + "required":["Format"], + "members":{ + "Format":{ + "shape":"JobManifestFormat", + "documentation":"

    Indicates which of the available formats the specified manifest uses.

    " + }, + "Fields":{ + "shape":"JobManifestFieldList", + "documentation":"

    If the specified manifest object is in the S3BatchOperations_CSV_20180820 format, this element describes which columns contain the required data.

    ", + "box":true + } + }, + "documentation":"

    Describes the format of a manifest. If the manifest is in CSV format, also describes the columns contained within the manifest.

    " + }, + "JobNumberOfTasksFailed":{ + "type":"long", + "min":0 + }, + "JobNumberOfTasksSucceeded":{ + "type":"long", + "min":0 + }, + "JobOperation":{ + "type":"structure", + "members":{ + "LambdaInvoke":{ + "shape":"LambdaInvokeOperation", + "documentation":"

    Directs the specified job to invoke an AWS Lambda function on each object in the manifest.

    ", + "box":true + }, + "S3PutObjectCopy":{ + "shape":"S3CopyObjectOperation", + "documentation":"

    Directs the specified job to run a PUT Copy object call on each object in the manifest.

    ", + "box":true + }, + "S3PutObjectAcl":{ + "shape":"S3SetObjectAclOperation", + "documentation":"

    Directs the specified job to run a PUT Object acl call on each object in the manifest.

    ", + "box":true + }, + "S3PutObjectTagging":{ + "shape":"S3SetObjectTaggingOperation", + "documentation":"

    Directs the specified job to run a PUT Object tagging call on each object in the manifest.

    ", + "box":true + }, + "S3InitiateRestoreObject":{ + "shape":"S3InitiateRestoreObjectOperation", + "documentation":"

    Directs the specified job to run an Initiate Glacier Restore call on each object in the manifest.

    ", + "box":true + }, + "S3PutObjectLegalHold":{ + "shape":"S3SetObjectLegalHoldOperation", + "box":true + }, + "S3PutObjectRetention":{ + "shape":"S3SetObjectRetentionOperation", + "box":true + } + }, + "documentation":"

    The operation that you want this job to perform on each object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.

    " + }, + "JobPriority":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "JobProgressSummary":{ + "type":"structure", + "members":{ + "TotalNumberOfTasks":{ + "shape":"JobTotalNumberOfTasks", + "documentation":"

    ", + "box":true + }, + "NumberOfTasksSucceeded":{ + "shape":"JobNumberOfTasksSucceeded", + "documentation":"

    ", + "box":true + }, + "NumberOfTasksFailed":{ + "shape":"JobNumberOfTasksFailed", + "documentation":"

    ", + "box":true + } + }, + "documentation":"

    Describes the total number of tasks that the specified job has started, the number of tasks that succeeded, and the number of tasks that failed.

    " + }, + "JobReport":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Bucket":{ + "shape":"S3BucketArnString", + "documentation":"

    The Amazon Resource Name (ARN) for the bucket where specified job-completion report will be stored.

    ", + "box":true + }, + "Format":{ + "shape":"JobReportFormat", + "documentation":"

    The format of the specified job-completion report.

    ", + "box":true + }, + "Enabled":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the specified job will generate a job-completion report.

    " + }, + "Prefix":{ + "shape":"ReportPrefixString", + "documentation":"

    An optional prefix to describe where in the specified bucket the job-completion report will be stored. Amazon S3 stores the job-completion report at <prefix>/job-<job-id>/report.json.

    ", + "box":true + }, + "ReportScope":{ + "shape":"JobReportScope", + "documentation":"

    Indicates whether the job-completion report will include details of all tasks or only failed tasks.

    ", + "box":true + } + }, + "documentation":"

    Contains the configuration parameters for a job-completion report.

    " + }, + "JobReportFormat":{ + "type":"string", + "enum":["Report_CSV_20180820"] + }, + "JobReportScope":{ + "type":"string", + "enum":[ + "AllTasks", + "FailedTasksOnly" + ] + }, + "JobStatus":{ + "type":"string", + "enum":[ + "Active", + "Cancelled", + "Cancelling", + "Complete", + "Completing", + "Failed", + "Failing", + "New", + "Paused", + "Pausing", + "Preparing", + "Ready", + "Suspended" + ] + }, + "JobStatusException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "JobStatusList":{ + "type":"list", + "member":{"shape":"JobStatus"} + }, + "JobStatusUpdateReason":{ + "type":"string", + "max":256, + "min":1 + }, + "JobTerminationDate":{"type":"timestamp"}, + "JobTotalNumberOfTasks":{ + "type":"long", + "min":0 + }, + "KmsKeyArnString":{ + "type":"string", + "max":2000, + "min":1 + }, + "LambdaInvokeOperation":{ + "type":"structure", + "members":{ + "FunctionArn":{ + "shape":"FunctionArnString", + "documentation":"

    The Amazon Resource Name (ARN) for the AWS Lambda function that the specified job will invoke for each object in the manifest.

    " + } + }, + "documentation":"

    Contains the configuration parameters for a Lambda Invoke operation.

    " + }, + "LifecycleConfiguration":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"LifecycleRules", + "documentation":"

    A lifecycle rule for individual objects in an Outposts bucket.

    " + } + }, + "documentation":"

    The container for the Outposts bucket lifecycle configuration.

    " + }, + "LifecycleExpiration":{ + "type":"structure", + "members":{ + "Date":{ + "shape":"Date", + "documentation":"

    Indicates at what date the object is to be deleted. Should be in GMT ISO 8601 format.

    " + }, + "Days":{ + "shape":"Days", + "documentation":"

    Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.

    " + }, + "ExpiredObjectDeleteMarker":{ + "shape":"ExpiredObjectDeleteMarker", + "documentation":"

    Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired. If set to false, the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.

    " + } + }, + "documentation":"

    The container of the Outposts bucket lifecycle expiration.

    " + }, + "LifecycleRule":{ + "type":"structure", + "required":["Status"], + "members":{ + "Expiration":{ + "shape":"LifecycleExpiration", + "documentation":"

    Specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker.

    " + }, + "ID":{ + "shape":"ID", + "documentation":"

    Unique identifier for the rule. The value cannot be longer than 255 characters.

    " + }, + "Filter":{ + "shape":"LifecycleRuleFilter", + "documentation":"

    The container for the filter of lifecycle rule.

    " + }, + "Status":{ + "shape":"ExpirationStatus", + "documentation":"

    If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.

    " + }, + "Transitions":{ + "shape":"TransitionList", + "documentation":"

    Specifies when an Amazon S3 object transitions to a specified storage class.

    This is not supported by Amazon S3 on Outposts buckets.

    " + }, + "NoncurrentVersionTransitions":{ + "shape":"NoncurrentVersionTransitionList", + "documentation":"

    Specifies the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to a specific storage class at a set period in the object's lifetime.

    This is not supported by Amazon S3 on Outposts buckets.

    " + }, + "NoncurrentVersionExpiration":{ + "shape":"NoncurrentVersionExpiration", + "documentation":"

    The noncurrent version expiration of the lifecycle rule.

    This is not supported by Amazon S3 on Outposts buckets.

    " + }, + "AbortIncompleteMultipartUpload":{ + "shape":"AbortIncompleteMultipartUpload", + "documentation":"

    Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 waits before permanently removing all parts of the upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy in the Amazon Simple Storage Service Developer Guide.

    " + } + }, + "documentation":"

    The container for the Outposts bucket lifecycle rule.

    " + }, + "LifecycleRuleAndOperator":{ + "type":"structure", + "members":{ + "Prefix":{ + "shape":"Prefix", + "documentation":"

    Prefix identifying one or more objects to which the rule applies.

    " + }, + "Tags":{ + "shape":"S3TagSet", + "documentation":"

    All of these tags must exist in the object's tag set in order for the rule to apply.

    " + } + }, + "documentation":"

    The container for the Outposts bucket lifecycle rule and operator.

    " + }, + "LifecycleRuleFilter":{ + "type":"structure", + "members":{ + "Prefix":{ + "shape":"Prefix", + "documentation":"

    Prefix identifying one or more objects to which the rule applies.

    " + }, + "Tag":{"shape":"S3Tag"}, + "And":{ + "shape":"LifecycleRuleAndOperator", + "documentation":"

    The container for the AND condition for the lifecycle rule.

    " + } + }, + "documentation":"

    The container for the filter of the lifecycle rule.

    " + }, + "LifecycleRules":{ + "type":"list", + "member":{ + "shape":"LifecycleRule", + "locationName":"Rule" + } + }, + "ListAccessPointsRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID for owner of the bucket whose access points you want to list.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the bucket whose associated access points you want to list.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"querystring", + "locationName":"bucket" + }, + "NextToken":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    A continuation token. If a previous call to ListAccessPoints returned a continuation token in the NextToken field, then providing that value here causes Amazon S3 to retrieve the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of access points that you want to include in the list. If the specified bucket has more than this number of access points, then the response will include a continuation token in the NextToken field that you can use to retrieve the next page of access points.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAccessPointsResult":{ + "type":"structure", + "members":{ + "AccessPointList":{ + "shape":"AccessPointList", + "documentation":"

    Contains identification and configuration information for one or more access points associated with the specified bucket.

    " + }, + "NextToken":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    If the specified bucket has more access points than can be returned in one call to this API, this field contains a continuation token that you can provide in subsequent calls to this API to retrieve additional access points.

    " + } + } + }, + "ListJobsRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobStatuses":{ + "shape":"JobStatusList", + "documentation":"

    The List Jobs request returns jobs that match the statuses listed in this element.

    ", + "location":"querystring", + "locationName":"jobStatuses" + }, + "NextToken":{ + "shape":"StringForNextToken", + "documentation":"

    A pagination token to request the next page of results. Use the token that Amazon S3 returned in the NextToken element of the ListJobsResult from the previous List Jobs request.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of jobs that Amazon S3 will include in the List Jobs response. If there are more jobs than this number, the response will include a pagination token in the NextToken field to enable you to retrieve the next page of results.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListJobsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"StringForNextToken", + "documentation":"

    If the List Jobs request produced more than the maximum number of results, you can pass this value into a subsequent List Jobs request in order to retrieve the next page of results.

    " + }, + "Jobs":{ + "shape":"JobListDescriptorList", + "documentation":"

    The list of current jobs and jobs that have ended within the last 30 days.

    " + } + } + }, + "ListRegionalBucketsRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "NextToken":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "OutpostId":{ + "shape":"NonEmptyMaxLength64String", + "documentation":"

    The ID of the AWS Outposts.

    This is required by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-outpost-id" + } + } + }, + "ListRegionalBucketsResult":{ + "type":"structure", + "members":{ + "RegionalBucketList":{ + "shape":"RegionalBucketList", + "documentation":"

    " + }, + "NextToken":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    NextToken is sent when isTruncated is true, which means there are more buckets that can be listed. The next list requests to Amazon S3 can be continued with this NextToken. NextToken is obfuscated and is not a real key.

    " + } + } + }, + "ListStorageLensConfigurationEntry":{ + "type":"structure", + "required":[ + "Id", + "StorageLensArn", + "HomeRegion" + ], + "members":{ + "Id":{ + "shape":"ConfigId", + "documentation":"

    A container for the S3 Storage Lens configuration ID.

    " + }, + "StorageLensArn":{ + "shape":"StorageLensArn", + "documentation":"

    The ARN of the S3 Storage Lens configuration. This property is read-only.

    " + }, + "HomeRegion":{ + "shape":"S3AWSRegion", + "documentation":"

    A container for the S3 Storage Lens home Region. Your metrics data is stored and retained in your designated S3 Storage Lens home Region.

    " + }, + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

    A container for whether the S3 Storage Lens configuration is enabled. This property is required.

    " + } + }, + "documentation":"

    Part of ListStorageLensConfigurationResult. Each entry includes the description of the S3 Storage Lens configuration, its home Region, whether it is enabled, its Amazon Resource Name (ARN), and config ID.

    " + }, + "ListStorageLensConfigurationsRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the requester.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "NextToken":{ + "shape":"ContinuationToken", + "documentation":"

    A pagination token to request the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListStorageLensConfigurationsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"ContinuationToken", + "documentation":"

    If the request produced more than the maximum number of S3 Storage Lens configuration results, you can pass this value into a subsequent request to retrieve the next page of results.

    " + }, + "StorageLensConfigurationList":{ + "shape":"StorageLensConfigurationList", + "documentation":"

    A list of S3 Storage Lens configurations.

    " + } + } + }, + "Location":{"type":"string"}, + "MaxLength1024String":{ + "type":"string", + "max":1024 + }, + "MaxResults":{ + "type":"integer", + "max":1000, + "min":0 + }, + "MinStorageBytesPercentage":{ + "type":"double", + "max":100, + "min":0.1 + }, + "NetworkOrigin":{ + "type":"string", + "enum":[ + "Internet", + "VPC" + ] + }, + "NoSuchPublicAccessBlockConfiguration":{ + "type":"structure", + "members":{ + "Message":{"shape":"NoSuchPublicAccessBlockConfigurationMessage"} + }, + "documentation":"

    Amazon S3 throws this exception if you make a GetPublicAccessBlock request against an account that doesn't have a PublicAccessBlockConfiguration set.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchPublicAccessBlockConfigurationMessage":{"type":"string"}, + "NonEmptyMaxLength1024String":{ + "type":"string", + "max":1024, + "min":1 + }, + "NonEmptyMaxLength2048String":{ + "type":"string", + "max":2048, + "min":1 + }, + "NonEmptyMaxLength256String":{ + "type":"string", + "max":256, + "min":1 + }, + "NonEmptyMaxLength64String":{ + "type":"string", + "max":64, + "min":1 + }, + "NoncurrentVersionExpiration":{ + "type":"structure", + "members":{ + "NoncurrentDays":{ + "shape":"Days", + "documentation":"

    Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.

    " + } + }, + "documentation":"

    The container of the noncurrent version expiration.

    " + }, + "NoncurrentVersionTransition":{ + "type":"structure", + "members":{ + "NoncurrentDays":{ + "shape":"Days", + "documentation":"

    Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent in the Amazon Simple Storage Service Developer Guide.

    " + }, + "StorageClass":{ + "shape":"TransitionStorageClass", + "documentation":"

    The class of storage used to store the object.

    " + } + }, + "documentation":"

    The container for the noncurrent version transition.

    " + }, + "NoncurrentVersionTransitionList":{ + "type":"list", + "member":{ + "shape":"NoncurrentVersionTransition", + "locationName":"NoncurrentVersionTransition" + } + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "ObjectLockEnabledForBucket":{"type":"boolean"}, + "OperationName":{ + "type":"string", + "enum":[ + "LambdaInvoke", + "S3PutObjectCopy", + "S3PutObjectAcl", + "S3PutObjectTagging", + "S3InitiateRestoreObject", + "S3PutObjectLegalHold", + "S3PutObjectRetention" + ] + }, + "OutputSchemaVersion":{ + "type":"string", + "enum":["V_1"] + }, + "Policy":{"type":"string"}, + "PolicyStatus":{ + "type":"structure", + "members":{ + "IsPublic":{ + "shape":"IsPublic", + "documentation":"

    ", + "locationName":"IsPublic" + } + }, + "documentation":"

    Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

    " + }, + "Prefix":{"type":"string"}, + "PrefixLevel":{ + "type":"structure", + "required":["StorageMetrics"], + "members":{ + "StorageMetrics":{ + "shape":"PrefixLevelStorageMetrics", + "documentation":"

    A container for the prefix-level storage metrics for S3 Storage Lens.

    " + } + }, + "documentation":"

    A container for the prefix-level configuration.

    " + }, + "PrefixLevelStorageMetrics":{ + "type":"structure", + "members":{ + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

    A container for whether prefix-level storage metrics are enabled.

    " + }, + "SelectionCriteria":{"shape":"SelectionCriteria"} + }, + "documentation":"

    A container for the prefix-level storage metrics for S3 Storage Lens.

    " + }, + "PublicAccessBlockConfiguration":{ + "type":"structure", + "members":{ + "BlockPublicAcls":{ + "shape":"Setting", + "documentation":"

    Specifies whether Amazon S3 should block public access control lists (ACLs) for buckets in this account. Setting this element to TRUE causes the following behavior:

    • PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.

    • PUT Object calls fail if the request includes a public ACL.

    • PUT Bucket calls fail if the request includes a public ACL.

    Enabling this setting doesn't affect existing policies or ACLs.

    This is not supported for Amazon S3 on Outposts.

    ", + "locationName":"BlockPublicAcls" + }, + "IgnorePublicAcls":{ + "shape":"Setting", + "documentation":"

    Specifies whether Amazon S3 should ignore public ACLs for buckets in this account. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on buckets in this account and any objects that they contain.

    Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.

    This is not supported for Amazon S3 on Outposts.

    ", + "locationName":"IgnorePublicAcls" + }, + "BlockPublicPolicy":{ + "shape":"Setting", + "documentation":"

    Specifies whether Amazon S3 should block public bucket policies for buckets in this account. Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.

    Enabling this setting doesn't affect existing bucket policies.

    This is not supported for Amazon S3 on Outposts.

    ", + "locationName":"BlockPublicPolicy" + }, + "RestrictPublicBuckets":{ + "shape":"Setting", + "documentation":"

    Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to TRUE restricts access to buckets with public policies to only AWS service principals and authorized users within this account.

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    This is not supported for Amazon S3 on Outposts.

    ", + "locationName":"RestrictPublicBuckets" + } + }, + "documentation":"

    The PublicAccessBlock configuration that you want to apply to this Amazon S3 account. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

    This is not supported for Amazon S3 on Outposts.

    " + }, + "PublicAccessBlockEnabled":{"type":"boolean"}, + "PutAccessPointPolicyRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name", + "Policy" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID for owner of the bucket associated with the specified access point.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"AccessPointName", + "documentation":"

    The name of the access point that you want to associate with the specified policy.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + }, + "Policy":{ + "shape":"Policy", + "documentation":"

    The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

    " + } + } + }, + "PutBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The name of the bucket for which to set the configuration.

    ", + "location":"uri", + "locationName":"name" + }, + "LifecycleConfiguration":{ + "shape":"LifecycleConfiguration", + "documentation":"

    Container for lifecycle rules. You can add as many as 1,000 rules.

    ", + "locationName":"LifecycleConfiguration", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + } + }, + "payload":"LifecycleConfiguration" + }, + "PutBucketPolicyRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket", + "Policy" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    Specifies the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + }, + "ConfirmRemoveSelfBucketAccess":{ + "shape":"ConfirmRemoveSelfBucketAccess", + "documentation":"

    Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.

    This is not supported by Amazon S3 on Outposts buckets.

    ", + "location":"header", + "locationName":"x-amz-confirm-remove-self-bucket-access" + }, + "Policy":{ + "shape":"Policy", + "documentation":"

    The bucket policy as a JSON document.

    " + } + } + }, + "PutBucketTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Bucket", + "Tagging" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID of the Outposts bucket.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"

    The Amazon Resource Name (ARN) of the bucket.

    For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

    For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

    ", + "location":"uri", + "locationName":"name" + }, + "Tagging":{ + "shape":"Tagging", + "documentation":"

    ", + "locationName":"Tagging", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + } + }, + "payload":"Tagging" + }, + "PutJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId", + "Tags" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The AWS account ID associated with the S3 Batch Operations job.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the S3 Batch Operations job whose tags you want to replace.

    ", + "location":"uri", + "locationName":"id" + }, + "Tags":{ + "shape":"S3TagSet", + "documentation":"

    The set of tags to associate with the S3 Batch Operations job.

    " + } + } + }, + "PutJobTaggingResult":{ + "type":"structure", + "members":{ + } + }, + "PutPublicAccessBlockRequest":{ + "type":"structure", + "required":[ + "PublicAccessBlockConfiguration", + "AccountId" + ], + "members":{ + "PublicAccessBlockConfiguration":{ + "shape":"PublicAccessBlockConfiguration", + "documentation":"

    The PublicAccessBlock configuration that you want to apply to the specified AWS account.

    ", + "locationName":"PublicAccessBlockConfiguration", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID for the AWS account whose PublicAccessBlock configuration you want to set.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + }, + "payload":"PublicAccessBlockConfiguration" + }, + "PutStorageLensConfigurationRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId", + "StorageLensConfiguration" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

    The ID of the S3 Storage Lens configuration.

    ", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the requester.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "StorageLensConfiguration":{ + "shape":"StorageLensConfiguration", + "documentation":"

    The S3 Storage Lens configuration.

    " + }, + "Tags":{ + "shape":"StorageLensTags", + "documentation":"

    The tag set of the S3 Storage Lens configuration.

    You can set up to a maximum of 50 tags.

    " + } + } + }, + "PutStorageLensConfigurationTaggingRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId", + "Tags" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

    The ID of the S3 Storage Lens configuration.

    ", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the requester.

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Tags":{ + "shape":"StorageLensTags", + "documentation":"

    The tag set of the S3 Storage Lens configuration.

    You can set up to a maximum of 50 tags.

    " + } + } + }, + "PutStorageLensConfigurationTaggingResult":{ + "type":"structure", + "members":{ + } + }, + "RegionalBucket":{ + "type":"structure", + "required":[ + "Bucket", + "PublicAccessBlockEnabled", + "CreationDate" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

    " + }, + "BucketArn":{ + "shape":"S3RegionalBucketArn", + "documentation":"

    The Amazon Resource Name (ARN) for the regional bucket.

    " + }, + "PublicAccessBlockEnabled":{ + "shape":"PublicAccessBlockEnabled", + "documentation":"

    " + }, + "CreationDate":{ + "shape":"CreationDate", + "documentation":"

    The creation date of the regional bucket

    " + }, + "OutpostId":{ + "shape":"NonEmptyMaxLength64String", + "documentation":"

    The AWS Outposts ID of the regional bucket.

    " + } + }, + "documentation":"

    The container for the regional bucket.

    " + }, + "RegionalBucketList":{ + "type":"list", + "member":{ + "shape":"RegionalBucket", + "locationName":"RegionalBucket" + } + }, + "Regions":{ + "type":"list", + "member":{ + "shape":"S3AWSRegion", + "locationName":"Region" + } + }, + "ReportPrefixString":{ + "type":"string", + "max":512, + "min":1 + }, + "RequestedJobStatus":{ + "type":"string", + "enum":[ + "Cancelled", + "Ready" + ] + }, + "S3AWSRegion":{ + "type":"string", + "max":30, + "min":5, + "pattern":"[a-z0-9\\-]+" + }, + "S3AccessControlList":{ + "type":"structure", + "required":["Owner"], + "members":{ + "Owner":{ + "shape":"S3ObjectOwner", + "documentation":"

    " + }, + "Grants":{ + "shape":"S3GrantList", + "documentation":"

    " + } + }, + "documentation":"

    " + }, + "S3AccessControlPolicy":{ + "type":"structure", + "members":{ + "AccessControlList":{ + "shape":"S3AccessControlList", + "documentation":"

    ", + "box":true + }, + "CannedAccessControlList":{ + "shape":"S3CannedAccessControlList", + "documentation":"

    ", + "box":true + } + }, + "documentation":"

    " + }, + "S3AccessPointArn":{ + "type":"string", + "max":128, + "min":4 + }, + "S3BucketArnString":{ + "type":"string", + "max":128, + "min":1, + "pattern":"arn:[^:]+:s3:.*" + }, + "S3BucketDestination":{ + "type":"structure", + "required":[ + "Format", + "OutputSchemaVersion", + "AccountId", + "Arn" + ], + "members":{ + "Format":{ + "shape":"Format", + "documentation":"

    " + }, + "OutputSchemaVersion":{ + "shape":"OutputSchemaVersion", + "documentation":"

    The schema version of the export file.

    " + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The account ID of the owner of the S3 Storage Lens metrics export bucket.

    " + }, + "Arn":{ + "shape":"S3BucketArnString", + "documentation":"

    The Amazon Resource Name (ARN) of the bucket. This property is read-only and follows the following format: arn:aws:s3:us-east-1:example-account-id:bucket/your-destination-bucket-name

    " + }, + "Prefix":{ + "shape":"Prefix", + "documentation":"

    The prefix of the destination bucket where the metrics export will be delivered.

    " + }, + "Encryption":{ + "shape":"StorageLensDataExportEncryption", + "documentation":"

    The container for the type encryption of the metrics exports in this bucket.

    " + } + }, + "documentation":"

    A container for the bucket where the Amazon S3 Storage Lens metrics export files are located.

    " + }, + "S3CannedAccessControlList":{ + "type":"string", + "enum":[ + "private", + "public-read", + "public-read-write", + "aws-exec-read", + "authenticated-read", + "bucket-owner-read", + "bucket-owner-full-control" + ] + }, + "S3ContentLength":{ + "type":"long", + "min":0 + }, + "S3CopyObjectOperation":{ + "type":"structure", + "members":{ + "TargetResource":{ + "shape":"S3BucketArnString", + "documentation":"

    Specifies the destination bucket ARN for the batch copy operation. For example, to copy objects to a bucket named \"destinationBucket\", set the TargetResource to \"arn:aws:s3:::destinationBucket\".

    " + }, + "CannedAccessControlList":{ + "shape":"S3CannedAccessControlList", + "documentation":"

    ", + "box":true + }, + "AccessControlGrants":{ + "shape":"S3GrantList", + "documentation":"

    ", + "box":true + }, + "MetadataDirective":{ + "shape":"S3MetadataDirective", + "documentation":"

    " + }, + "ModifiedSinceConstraint":{ + "shape":"TimeStamp", + "documentation":"

    " + }, + "NewObjectMetadata":{ + "shape":"S3ObjectMetadata", + "documentation":"

    " + }, + "NewObjectTagging":{ + "shape":"S3TagSet", + "documentation":"

    " + }, + "RedirectLocation":{ + "shape":"NonEmptyMaxLength2048String", + "documentation":"

    Specifies an optional metadata property for website redirects, x-amz-website-redirect-location. Allows webpage redirects if the object is accessed through a website endpoint.

    " + }, + "RequesterPays":{ + "shape":"Boolean", + "documentation":"

    " + }, + "StorageClass":{ + "shape":"S3StorageClass", + "documentation":"

    " + }, + "UnModifiedSinceConstraint":{ + "shape":"TimeStamp", + "documentation":"

    " + }, + "SSEAwsKmsKeyId":{ + "shape":"KmsKeyArnString", + "documentation":"

    " + }, + "TargetKeyPrefix":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    Specifies the folder prefix into which you would like the objects to be copied. For example, to copy objects into a folder named \"Folder1\" in the destination bucket, set the TargetKeyPrefix to \"Folder1/\".

    " + }, + "ObjectLockLegalHoldStatus":{ + "shape":"S3ObjectLockLegalHoldStatus", + "documentation":"

    The legal hold status to be applied to all objects in the Batch Operations job.

    " + }, + "ObjectLockMode":{ + "shape":"S3ObjectLockMode", + "documentation":"

    The retention mode to be applied to all objects in the Batch Operations job.

    " + }, + "ObjectLockRetainUntilDate":{ + "shape":"TimeStamp", + "documentation":"

    The date when the applied object retention configuration expires on all objects in the Batch Operations job.

    " + } + }, + "documentation":"

    Contains the configuration parameters for a PUT Copy object operation. S3 Batch Operations passes each value through to the underlying PUT Copy object API. For more information about the parameters for this operation, see PUT Object - Copy.

    " + }, + "S3ExpirationInDays":{ + "type":"integer", + "min":0 + }, + "S3GlacierJobTier":{ + "type":"string", + "enum":[ + "BULK", + "STANDARD" + ] + }, + "S3Grant":{ + "type":"structure", + "members":{ + "Grantee":{ + "shape":"S3Grantee", + "documentation":"

    " + }, + "Permission":{ + "shape":"S3Permission", + "documentation":"

    " + } + }, + "documentation":"

    " + }, + "S3GrantList":{ + "type":"list", + "member":{"shape":"S3Grant"} + }, + "S3Grantee":{ + "type":"structure", + "members":{ + "TypeIdentifier":{ + "shape":"S3GranteeTypeIdentifier", + "documentation":"

    " + }, + "Identifier":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    ", + "box":true + }, + "DisplayName":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + } + }, + "documentation":"

    " + }, + "S3GranteeTypeIdentifier":{ + "type":"string", + "enum":[ + "id", + "emailAddress", + "uri" + ] + }, + "S3InitiateRestoreObjectOperation":{ + "type":"structure", + "members":{ + "ExpirationInDays":{ + "shape":"S3ExpirationInDays", + "documentation":"

    " + }, + "GlacierJobTier":{ + "shape":"S3GlacierJobTier", + "documentation":"

    " + } + }, + "documentation":"

    Contains the configuration parameters for an Initiate Glacier Restore job. S3 Batch Operations passes each value through to the underlying POST Object restore API. For more information about the parameters for this operation, see RestoreObject.

    " + }, + "S3KeyArnString":{ + "type":"string", + "max":2000, + "min":1, + "pattern":"arn:[^:]+:s3:.*" + }, + "S3MetadataDirective":{ + "type":"string", + "enum":[ + "COPY", + "REPLACE" + ] + }, + "S3ObjectLockLegalHold":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"S3ObjectLockLegalHoldStatus", + "documentation":"

    The Object Lock legal hold status to be applied to all objects in the Batch Operations job.

    " + } + }, + "documentation":"

    Whether S3 Object Lock legal hold will be applied to objects in an S3 Batch Operations job.

    " + }, + "S3ObjectLockLegalHoldStatus":{ + "type":"string", + "enum":[ + "OFF", + "ON" + ] + }, + "S3ObjectLockMode":{ + "type":"string", + "enum":[ + "COMPLIANCE", + "GOVERNANCE" + ] + }, + "S3ObjectLockRetentionMode":{ + "type":"string", + "enum":[ + "COMPLIANCE", + "GOVERNANCE" + ] + }, + "S3ObjectMetadata":{ + "type":"structure", + "members":{ + "CacheControl":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + }, + "ContentDisposition":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + }, + "ContentEncoding":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + }, + "ContentLanguage":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + }, + "UserMetadata":{ + "shape":"S3UserMetadata", + "documentation":"

    " + }, + "ContentLength":{ + "shape":"S3ContentLength", + "documentation":"

    ", + "box":true + }, + "ContentMD5":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + }, + "ContentType":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + }, + "HttpExpiresDate":{ + "shape":"TimeStamp", + "documentation":"

    " + }, + "RequesterCharged":{ + "shape":"Boolean", + "documentation":"

    " + }, + "SSEAlgorithm":{ + "shape":"S3SSEAlgorithm", + "documentation":"

    " + } + }, + "documentation":"

    " + }, + "S3ObjectOwner":{ + "type":"structure", + "members":{ + "ID":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + }, + "DisplayName":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

    " + } + }, + "documentation":"

    " + }, + "S3ObjectVersionId":{ + "type":"string", + "max":2000, + "min":1 + }, + "S3Permission":{ + "type":"string", + "enum":[ + "FULL_CONTROL", + "READ", + "WRITE", + "READ_ACP", + "WRITE_ACP" + ] + }, + "S3RegionalBucketArn":{ + "type":"string", + "max":128, + "min":4 + }, + "S3Retention":{ + "type":"structure", + "members":{ + "RetainUntilDate":{ + "shape":"TimeStamp", + "documentation":"

    The date when the applied Object Lock retention will expire on all objects set by the Batch Operations job.

    " + }, + "Mode":{ + "shape":"S3ObjectLockRetentionMode", + "documentation":"

    The Object Lock retention mode to be applied to all objects in the Batch Operations job.

    " + } + }, + "documentation":"

    Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode and RetainUntilDate data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    " + }, + "S3SSEAlgorithm":{ + "type":"string", + "enum":[ + "AES256", + "KMS" + ] + }, + "S3SetObjectAclOperation":{ + "type":"structure", + "members":{ + "AccessControlPolicy":{ + "shape":"S3AccessControlPolicy", + "documentation":"

    " + } + }, + "documentation":"

    Contains the configuration parameters for a Set Object ACL operation. S3 Batch Operations passes each value through to the underlying PUT Object acl API. For more information about the parameters for this operation, see PUT Object acl.

    " + }, + "S3SetObjectLegalHoldOperation":{ + "type":"structure", + "required":["LegalHold"], + "members":{ + "LegalHold":{ + "shape":"S3ObjectLockLegalHold", + "documentation":"

    Contains the Object Lock legal hold status to be applied to all objects in the Batch Operations job.

    " + } + }, + "documentation":"

    Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes each object through to the underlying PutObjectLegalHold API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    " + }, + "S3SetObjectRetentionOperation":{ + "type":"structure", + "required":["Retention"], + "members":{ + "BypassGovernanceRetention":{ + "shape":"Boolean", + "documentation":"

    Indicates if the action should be applied to objects in the Batch Operations job even if they have Object Lock GOVERNANCE type in place.

    ", + "box":true + }, + "Retention":{ + "shape":"S3Retention", + "documentation":"

    Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    " + } + }, + "documentation":"

    Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes each value through to the underlying PutObjectRetention API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

    " + }, + "S3SetObjectTaggingOperation":{ + "type":"structure", + "members":{ + "TagSet":{ + "shape":"S3TagSet", + "documentation":"

    " + } + }, + "documentation":"

    Contains the configuration parameters for a Set Object Tagging operation. S3 Batch Operations passes each value through to the underlying PUT Object tagging API. For more information about the parameters for this operation, see PUT Object tagging.

    " + }, + "S3StorageClass":{ + "type":"string", + "enum":[ + "STANDARD", + "STANDARD_IA", + "ONEZONE_IA", + "GLACIER", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE" + ] + }, + "S3Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKeyString", + "documentation":"

    " + }, + "Value":{ + "shape":"TagValueString", + "documentation":"

    " + } + }, + "documentation":"

    " + }, + "S3TagSet":{ + "type":"list", + "member":{"shape":"S3Tag"} + }, + "S3UserMetadata":{ + "type":"map", + "key":{"shape":"NonEmptyMaxLength1024String"}, + "value":{"shape":"MaxLength1024String"}, + "max":8192 + }, + "SSEKMS":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{ + "shape":"SSEKMSKeyId", + "documentation":"

    A container for the ARN of the SSE-KMS encryption. This property is read-only and follows the following format: arn:aws:kms:us-east-1:example-account-id:key/example-9a73-4afc-8d29-8f5900cef44e

    " + } + }, + "documentation":"

    ", + "locationName":"SSE-KMS" + }, + "SSEKMSKeyId":{"type":"string"}, + "SSES3":{ + "type":"structure", + "members":{ + }, + "documentation":"

    ", + "locationName":"SSE-S3" + }, + "SelectionCriteria":{ + "type":"structure", + "members":{ + "Delimiter":{ + "shape":"StorageLensPrefixLevelDelimiter", + "documentation":"

    A container for the delimiter of the selection criteria being used.

    " + }, + "MaxDepth":{ + "shape":"StorageLensPrefixLevelMaxDepth", + "documentation":"

    The max depth of the selection criteria

    " + }, + "MinStorageBytesPercentage":{ + "shape":"MinStorageBytesPercentage", + "documentation":"

    The minimum number of storage bytes percentage whose metrics will be selected.

    You must choose a value greater than or equal to 1.0.

    " + } + }, + "documentation":"

    " + }, + "Setting":{"type":"boolean"}, + "StorageLensArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:[a-z\\-]+:s3:[a-z0-9\\-]+:\\d{12}:storage\\-lens\\/.*" + }, + "StorageLensAwsOrg":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"AwsOrgArn", + "documentation":"

    A container for the Amazon Resource Name (ARN) of the AWS organization. This property is read-only and follows the following format: arn:aws:organizations:us-east-1:example-account-id:organization/o-ex2l495dck

    " + } + }, + "documentation":"

    The AWS organization for your S3 Storage Lens.

    " + }, + "StorageLensConfiguration":{ + "type":"structure", + "required":[ + "Id", + "AccountLevel", + "IsEnabled" + ], + "members":{ + "Id":{ + "shape":"ConfigId", + "documentation":"

    A container for the Amazon S3 Storage Lens configuration ID.

    " + }, + "AccountLevel":{ + "shape":"AccountLevel", + "documentation":"

    A container for all the account-level configurations of your S3 Storage Lens configuration.

    " + }, + "Include":{ + "shape":"Include", + "documentation":"

    A container for what is included in this configuration. This container can only be valid if there is no Exclude container submitted, and it's not empty.

    " + }, + "Exclude":{ + "shape":"Exclude", + "documentation":"

    A container for what is excluded in this configuration. This container can only be valid if there is no Include container submitted, and it's not empty.

    " + }, + "DataExport":{ + "shape":"StorageLensDataExport", + "documentation":"

    A container to specify the properties of your S3 Storage Lens metrics export including, the destination, schema and format.

    " + }, + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

    A container for whether the S3 Storage Lens configuration is enabled.

    " + }, + "AwsOrg":{ + "shape":"StorageLensAwsOrg", + "documentation":"

    A container for the AWS organization for this S3 Storage Lens configuration.

    " + }, + "StorageLensArn":{ + "shape":"StorageLensArn", + "documentation":"

    The Amazon Resource Name (ARN) of the S3 Storage Lens configuration. This property is read-only and follows the following format: arn:aws:s3:us-east-1:example-account-id:storage-lens/your-dashboard-name

    " + } + }, + "documentation":"

    A container for the Amazon S3 Storage Lens configuration.

    " + }, + "StorageLensConfigurationList":{ + "type":"list", + "member":{ + "shape":"ListStorageLensConfigurationEntry", + "locationName":"StorageLensConfiguration" + }, + "flattened":true + }, + "StorageLensDataExport":{ + "type":"structure", + "required":["S3BucketDestination"], + "members":{ + "S3BucketDestination":{ + "shape":"S3BucketDestination", + "documentation":"

    A container for the bucket where the S3 Storage Lens metrics export will be located.

    " + } + }, + "documentation":"

    A container to specify the properties of your S3 Storage Lens metrics export, including the destination, schema, and format.

    " + }, + "StorageLensDataExportEncryption":{ + "type":"structure", + "members":{ + "SSES3":{ + "shape":"SSES3", + "documentation":"

    ", + "locationName":"SSE-S3" + }, + "SSEKMS":{ + "shape":"SSEKMS", + "documentation":"

    ", + "locationName":"SSE-KMS" + } + }, + "documentation":"

    A container for the encryption of the S3 Storage Lens metrics exports.

    " + }, + "StorageLensPrefixLevelDelimiter":{ + "type":"string", + "max":1 + }, + "StorageLensPrefixLevelMaxDepth":{ + "type":"integer", + "max":10, + "min":1 + }, + "StorageLensTag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKeyString", + "documentation":"

    " + }, + "Value":{ + "shape":"TagValueString", + "documentation":"

    " + } + }, + "documentation":"

    " + }, + "StorageLensTags":{ + "type":"list", + "member":{ + "shape":"StorageLensTag", + "locationName":"Tag" + } + }, + "StringForNextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[A-Za-z0-9\\+\\:\\/\\=\\?\\#-_]+$" + }, + "SuspendedCause":{ + "type":"string", + "max":1024, + "min":1 + }, + "SuspendedDate":{"type":"timestamp"}, + "TagKeyString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + }, + "TagValueString":{ + "type":"string", + "max":1024, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + }, + "Tagging":{ + "type":"structure", + "required":["TagSet"], + "members":{ + "TagSet":{ + "shape":"S3TagSet", + "documentation":"

    A collection for a set of tags.

    " + } + }, + "documentation":"

    " + }, + "TimeStamp":{"type":"timestamp"}, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    Amazon S3 throws this exception if you have too many tags in your tag set.

    ", + "exception":true + }, + "Transition":{ + "type":"structure", + "members":{ + "Date":{ + "shape":"Date", + "documentation":"

    Indicates when objects are transitioned to the specified storage class. The date value must be in ISO 8601 format. The time is always midnight UTC.

    " + }, + "Days":{ + "shape":"Days", + "documentation":"

    Indicates the number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer.

    " + }, + "StorageClass":{ + "shape":"TransitionStorageClass", + "documentation":"

    The storage class to which you want the object to transition.

    " + } + }, + "documentation":"

    Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

    " + }, + "TransitionList":{ + "type":"list", + "member":{ + "shape":"Transition", + "locationName":"Transition" + } + }, + "TransitionStorageClass":{ + "type":"string", + "enum":[ + "GLACIER", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE" + ] + }, + "UpdateJobPriorityRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId", + "Priority" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the job whose priority you want to update.

    ", + "location":"uri", + "locationName":"id" + }, + "Priority":{ + "shape":"JobPriority", + "documentation":"

    The priority you want to assign to this job.

    ", + "location":"querystring", + "locationName":"priority" + } + } + }, + "UpdateJobPriorityResult":{ + "type":"structure", + "required":[ + "JobId", + "Priority" + ], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the job whose priority Amazon S3 updated.

    " + }, + "Priority":{ + "shape":"JobPriority", + "documentation":"

    The new priority assigned to the specified job.

    " + } + } + }, + "UpdateJobStatusRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId", + "RequestedJobStatus" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    ", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID of the job whose status you want to update.

    ", + "location":"uri", + "locationName":"id" + }, + "RequestedJobStatus":{ + "shape":"RequestedJobStatus", + "documentation":"

    The status that you want to move the specified job to.

    ", + "location":"querystring", + "locationName":"requestedJobStatus" + }, + "StatusUpdateReason":{ + "shape":"JobStatusUpdateReason", + "documentation":"

    A description of the reason why you want to change the specified job's status. This field can be any string up to the maximum length.

    ", + "location":"querystring", + "locationName":"statusUpdateReason" + } + } + }, + "UpdateJobStatusResult":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID for the job whose status was updated.

    " + }, + "Status":{ + "shape":"JobStatus", + "documentation":"

    The current status for the specified job.

    " + }, + "StatusUpdateReason":{ + "shape":"JobStatusUpdateReason", + "documentation":"

    The reason that the specified job's status was updated.

    " + } + } + }, + "VpcConfiguration":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "VpcId":{ + "shape":"VpcId", + "documentation":"

    If this field is specified, this access point will only allow connections from the specified VPC ID.

    " + } + }, + "documentation":"

    The virtual private cloud (VPC) configuration for an access point.

    " + }, + "VpcId":{ + "type":"string", + "max":1024, + "min":1 + } + }, + "documentation":"

    AWS S3 Control provides access to Amazon S3 control plane operations.

    " +} diff --git a/services/s3control/src/main/resources/software/amazon/awssdk/services/s3control/execution.interceptors b/services/s3control/src/main/resources/software/amazon/awssdk/services/s3control/execution.interceptors new file mode 100644 index 000000000000..bc59d1fe2d3e --- /dev/null +++ b/services/s3control/src/main/resources/software/amazon/awssdk/services/s3control/execution.interceptors @@ -0,0 +1,3 @@ +software.amazon.awssdk.services.s3control.internal.interceptors.DisableDoubleUrlEncodingForSigningInterceptor +software.amazon.awssdk.services.s3control.internal.interceptors.EndpointAddressInterceptor +software.amazon.awssdk.services.s3control.internal.interceptors.PayloadSigningInterceptor \ No newline at end of file diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/ArnHandlerTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/ArnHandlerTest.java new file mode 100644 index 000000000000..28a71688b32b --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/ArnHandlerTest.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal; + + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; +import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.X_AMZ_ACCOUNT_ID; + +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.core.Protocol; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; +import software.amazon.awssdk.services.s3control.S3ControlConfiguration; + +public class ArnHandlerTest { + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + private SdkHttpRequest request; + private S3ControlConfiguration configuration; + private ExecutionAttributes executionAttributes; + + private final ArnHandler arnHandler = ArnHandler.getInstance(); + private static final String ACCOUNT_ID = "123456789012"; + + @Before + public void setup() { + request = SdkHttpFullRequest.builder() + .appendHeader(X_AMZ_ACCOUNT_ID, ACCOUNT_ID) + .protocol(Protocol.HTTPS.toString()) + .method(SdkHttpMethod.POST) + .host(S3ControlClient.serviceMetadata().endpointFor(Region.US_WEST_2).toString()) + .build(); + configuration = S3ControlConfiguration.builder().build(); + executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3-control"); + executionAttributes.putAttribute(SIGNING_REGION, Region.of("us-west-2")); + } + + @Test + public void outpostBucketArn_shouldResolveHost() { + Arn arn = Arn.fromString("arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"); + SdkHttpRequest modifiedRequest = arnHandler.resolveHostForArn(request, configuration, arn, executionAttributes); + + assertThat(modifiedRequest.host(), is("s3-outposts.us-west-2.amazonaws.com")); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME), is("s3-outposts")); + assertThat(modifiedRequest.headers().get("x-amz-outpost-id").get(0), is("op-01234567890123456")); + assertThat(modifiedRequest.headers().get("x-amz-account-id").get(0), is(ACCOUNT_ID)); + } + + @Test + public void outpostAccessPointArn_shouldResolveHost() { + Arn arn = Arn.fromString("arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"); + SdkHttpRequest modifiedRequest = arnHandler.resolveHostForArn(request, configuration, arn, executionAttributes); + + assertThat(modifiedRequest.host(), is("s3-outposts.us-west-2.amazonaws.com")); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME), is("s3-outposts")); + assertThat(modifiedRequest.headers().get("x-amz-outpost-id").get(0), is("op-01234567890123456")); + assertThat(modifiedRequest.headers().get("x-amz-account-id").get(0), is(ACCOUNT_ID)); + } + + @Test + public void outpostArnWithFipsEnabled_shouldThrowException() { + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("FIPS"); + + Arn arn = Arn.fromString("arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"); + arnHandler.resolveHostForArn(request, enableFips(), arn, executionAttributes); + } + + @Test + public void outpostArnWithDualstackEnabled_shouldThrowException() { + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("Dualstack"); + + Arn arn = Arn.fromString("arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"); + arnHandler.resolveHostForArn(request, enableDualstack(), arn, executionAttributes); + } + + private S3ControlConfiguration enableDualstack() { + return S3ControlConfiguration.builder() + .dualstackEnabled(true) + .build(); + } + + private S3ControlConfiguration enableFips() { + return S3ControlConfiguration.builder() + .fipsModeEnabled(true) + .build(); + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlArnConverterTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlArnConverterTest.java new file mode 100644 index 000000000000..9ec15936be18 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlArnConverterTest.java @@ -0,0 +1,156 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control.internal; + + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +import java.util.Optional; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; +import software.amazon.awssdk.services.s3.internal.resource.S3Resource; +import software.amazon.awssdk.services.s3control.S3ControlBucketResource; + +public class S3ControlArnConverterTest { + + private static final S3ControlArnConverter ARN_PARSER = S3ControlArnConverter.getInstance(); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void parseArn_outpostBucketArn() { + S3Resource resource = ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost/1234/bucket/myBucket") + .build()); + + assertThat(resource, instanceOf(S3ControlBucketResource.class)); + + S3ControlBucketResource bucketResource = (S3ControlBucketResource) resource; + assertThat(bucketResource.bucketName(), is("myBucket")); + + assertThat(bucketResource.parentS3Resource().get(), instanceOf(S3OutpostResource.class)); + S3OutpostResource outpostResource = (S3OutpostResource) bucketResource.parentS3Resource().get(); + + assertThat(outpostResource.accountId(), is(Optional.of("123456789012"))); + assertThat(outpostResource.partition(), is(Optional.of("aws"))); + assertThat(outpostResource.region(), is(Optional.of("us-east-1"))); + assertThat(outpostResource.type(), is(S3ControlResourceType.OUTPOST.toString())); + assertThat(outpostResource.outpostId(), is("1234")); + } + + @Test + public void parseArn_outpostAccessPointArn() { + S3Resource resource = ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3-outposts") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost/1234/accesspoint/myAccessPoint") + .build()); + + assertThat(resource, instanceOf(S3AccessPointResource.class)); + + S3AccessPointResource accessPointResource = (S3AccessPointResource) resource; + assertThat(accessPointResource.accessPointName(), is("myAccessPoint")); + + assertThat(accessPointResource.parentS3Resource().get(), instanceOf(S3OutpostResource.class)); + S3OutpostResource outpostResource = (S3OutpostResource) accessPointResource.parentS3Resource().get(); + + assertThat(outpostResource.outpostId(), is("1234")); + assertThat(outpostResource.accountId(), is(Optional.of("123456789012"))); + assertThat(outpostResource.partition(), is(Optional.of("aws"))); + assertThat(outpostResource.region(), is(Optional.of("us-east-1"))); + } + + @Test + public void parseArn_invalidOutpostAccessPointMissingAccessPointName_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + + ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost:op-01234567890123456:accesspoint") + .build()); + } + + @Test + public void parseArn_invalidOutpostAccessPointMissingOutpostId_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + + ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost/myaccesspoint") + .build()); + } + + @Test + public void parseArn_malformedOutpostArn_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Unknown outpost ARN"); + + ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("outpost:1:accesspoin1:1") + .build()); + } + + @Test + public void parseArn_unknownResource() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("ARN type"); + ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("unknown:foobar") + .build()); + } + + @Test + public void parseArn_unknownType_throwsCorrectException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("invalidType"); + + ARN_PARSER.convertArn(Arn.builder() + .partition("aws") + .service("s3") + .region("us-east-1") + .accountId("123456789012") + .resource("invalidType:something") + .build()); + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlBucketResourceTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlBucketResourceTest.java new file mode 100644 index 000000000000..63cb73d67620 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlBucketResourceTest.java @@ -0,0 +1,201 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control.internal; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import java.util.Optional; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.services.s3.internal.resource.S3BucketResource; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; +import software.amazon.awssdk.services.s3control.S3ControlBucketResource; + +public class S3ControlBucketResourceTest { + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void buildWithAllPropertiesSet() { + S3ControlBucketResource bucketResource = S3ControlBucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + assertEquals("bucket", bucketResource.bucketName()); + assertEquals(Optional.of("account-id"), bucketResource.accountId()); + assertEquals(Optional.of("partition"), bucketResource.partition()); + assertEquals(Optional.of("region"), bucketResource.region()); + assertEquals(S3ControlResourceType.BUCKET.toString(), bucketResource.type()); + } + + @Test(expected = NullPointerException.class) + public void buildWithMissingBucketName() { + S3ControlBucketResource.builder().build(); + } + + @Test + public void equals_allProperties() { + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3ControlBucketResource bucketResource1 = S3ControlBucketResource.builder() + .bucketName("bucket") + .parentS3Resource(parentResource) + .build(); + + S3ControlBucketResource bucketResource2 = S3ControlBucketResource.builder() + .bucketName("bucket") + .parentS3Resource(parentResource) + .build(); + + S3ControlBucketResource bucketResource3 = S3ControlBucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("different-partition") + .region("region") + .build(); + + assertEquals(bucketResource1, bucketResource2); + assertNotEquals(bucketResource1, bucketResource3); + } + + @Test + public void hashcode_allProperties() { + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3ControlBucketResource bucketResource1 = S3ControlBucketResource.builder() + .bucketName("bucket") + .parentS3Resource(parentResource) + .build(); + + S3ControlBucketResource bucketResource2 = S3ControlBucketResource.builder() + .bucketName("bucket") + .parentS3Resource(parentResource) + .build(); + + S3ControlBucketResource bucketResource3 = S3ControlBucketResource.builder() + .bucketName("bucket") + .accountId("account-id") + .partition("different-partition") + .region("region") + .build(); + + assertEquals(bucketResource1.hashCode(), bucketResource2.hashCode()); + assertNotEquals(bucketResource1.hashCode(), bucketResource3.hashCode()); + } + + @Test + public void buildWithOutpostParent() { + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + + S3ControlBucketResource bucketResource = S3ControlBucketResource.builder() + .bucketName("bucket") + .parentS3Resource(parentResource) + .build(); + + assertEquals(Optional.of("account-id"), bucketResource.accountId()); + assertEquals(Optional.of("partition"), bucketResource.partition()); + assertEquals(Optional.of("region"), bucketResource.region()); + assertEquals("bucket", bucketResource.bucketName()); + assertEquals("bucket", bucketResource.type()); + assertEquals(Optional.of(parentResource), bucketResource.parentS3Resource()); + } + + @Test + public void buildWithInvalidParent_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("parentS3Resource"); + + S3BucketResource invalidParent = S3BucketResource.builder() + .bucketName("bucket") + .build(); + S3ControlBucketResource.builder() + .parentS3Resource(invalidParent) + .bucketName("bucketName") + .build(); + } + + @Test + public void hasParentAndRegion_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("has parent resource"); + + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3ControlBucketResource.builder() + .parentS3Resource(parentResource) + .region("us-east-1") + .bucketName("bucketName") + .build(); + } + + @Test + public void hasParentAndPartition_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("has parent resource"); + + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3ControlBucketResource.builder() + .parentS3Resource(parentResource) + .partition("partition") + .bucketName("bucketName") + .build(); + } + + @Test + public void hasParentAndAccountId_shouldThrowException() { + exception.expect(IllegalArgumentException.class); + exception.expectMessage("has parent resource"); + + S3OutpostResource parentResource = S3OutpostResource.builder() + .outpostId("1234") + .accountId("account-id") + .partition("partition") + .region("region") + .build(); + S3ControlBucketResource.builder() + .parentS3Resource(parentResource) + .accountId("account-id") + .bucketName("bucketName") + .build(); + } +} \ No newline at end of file diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlWireMockTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlWireMockTest.java new file mode 100644 index 000000000000..78aa57674f78 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/S3ControlWireMockTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; +import software.amazon.awssdk.utils.builder.SdkBuilder; + +public class S3ControlWireMockTest { + + @Rule + public WireMockRule mockServer = new WireMockRule(new WireMockConfiguration().port(0).httpsPort(0)); + + private S3ControlClient client; + + @Before + public void setup() { + client = S3ControlClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(() -> AwsBasicCredentials.create("test", "test")) + .build(); + } + + @Test + public void invalidAccountId_shouldThrowException() { + assertThatThrownBy(() -> client.getPublicAccessBlock(b -> b.accountId("1234#"))).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("must only contain alphanumeric characters and dashes"); + } + + @Test + public void nullAccountId_shouldThrowException() { + assertThatThrownBy(() -> client.getPublicAccessBlock(SdkBuilder::build)).isInstanceOf(IllegalArgumentException.class).hasMessageContaining("component is missing"); + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequestTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequestTest.java new file mode 100644 index 000000000000..12534ca47bbe --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequestTest.java @@ -0,0 +1,98 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal.functionaltests.arns; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.containing; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.put; +import static com.github.tomakehurst.wiremock.client.WireMock.putRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; + +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; + + +public class NonArnOutpostRequestTest extends S3ControlWireMockTestBase { + private S3ControlClient s3; + private static final String EXPECTED_URL = "/v20180820/bucket"; + + @Before + public void methodSetUp() { + s3 = buildClient(); + } + + @Test + public void listRegionalBuckets_outpostIdNotNull_shouldRedirect() { + S3ControlClient s3Control = initializedBuilder().region(Region.of("us-west-2")).build(); + stubFor(get(urlMatching("/v20180820/bucket")).willReturn(aResponse().withBody("").withStatus(200))); + + s3Control.listRegionalBuckets(b -> b.outpostId("op-01234567890123456").accountId("123456789012")); + String expectedHost = "s3-outposts.us-west-2.amazonaws.com"; + verifyOutpostRequest("us-west-2", expectedHost); + } + + @Test + public void listRegionalBuckets_outpostIdNull_shouldNotRedirect() { + S3ControlClient s3Control = initializedBuilder().region(Region.of("us-west-2")).build(); + stubFor(get(urlMatching("/v20180820/bucket")).willReturn(aResponse().withBody("").withStatus(200))); + + s3Control.listRegionalBuckets(b -> b.accountId("123456789012")); + String expectedHost = "123456789012.s3-control.us-west-2.amazonaws.com"; + verifyS3ControlRequest("us-west-2", expectedHost); + } + + @Test + public void createBucketRequest_outpostIdNotNull_shouldRedirect() { + S3ControlClient s3Control = initializedBuilder().region(Region.of("us-west-2")).build(); + stubFor(put(urlMatching("/v20180820/bucket/test")).willReturn(aResponse().withBody("").withStatus(200))); + + s3Control.createBucket(b -> b.outpostId("op-01234567890123456").bucket("test")); + String expectedHost = "s3-outposts.us-west-2.amazonaws.com"; + verify(putRequestedFor(urlEqualTo("/v20180820/bucket/test")) + .withHeader("Authorization", containing("us-west-2/s3-outposts/aws4_request")) + .withHeader("x-amz-outpost-id", equalTo("op-01234567890123456"))); + assertThat(getRecordedEndpoints().size(), is(1)); + assertThat(getRecordedEndpoints().get(0).getHost(), is(expectedHost)); + } + + @Test + public void createBucketRequest_outpostIdNull_shouldNotRedirect() { + S3ControlClient s3Control = initializedBuilder().region(Region.of("us-west-2")).build(); + stubFor(put(urlMatching("/v20180820/bucket/test")).willReturn(aResponse().withBody("").withStatus(200))); + + s3Control.createBucket(b -> b.bucket("test")); + String expectedHost = "s3-control.us-west-2.amazonaws.com"; + + verify(putRequestedFor(urlEqualTo("/v20180820/bucket/test")).withHeader("Authorization", containing("us-west-2/s3/aws4_request"))); + assertThat(getRecordedEndpoints().size(), is(1)); + assertThat(getRecordedEndpoints().get(0).getHost(), is(expectedHost)); + } + + + @Override + String expectedUrl() { + return EXPECTED_URL; + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3AccessPointArnTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3AccessPointArnTest.java new file mode 100644 index 000000000000..b22ede8fec84 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3AccessPointArnTest.java @@ -0,0 +1,91 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control.internal.functionaltests.arns; + + +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; + +public class S3AccessPointArnTest extends S3ControlWireMockTestBase { + private S3ControlClient s3Control; + private static final String EXPECTED_URL = "/v20180820/accesspoint/myendpoint"; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void methodSetUp() { + s3Control = buildClient(); + } + + @Test + public void malformedArn_MissingOutpostSegment_shouldThrowException() { + String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Unknown ARN type"); + s3Control.getAccessPoint(b -> b.name(accessPointArn)); + } + + @Test + public void malformedArn_MissingAccessPointSegment_shouldThrowException() { + String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + s3Control.getAccessPoint(b -> b.name(accessPointArn)); + } + + @Test + public void malformedArn_MissingAccessPointName_shouldThrowException() { + String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + s3Control.getAccessPoint(b -> b.name(accessPointArn)); + } + + @Test + public void accessPointArn_ClientHasCustomEndpoint_throwsIllegalArgumentException() { + S3ControlClient s3ControlCustom = buildClientWithCustomEndpoint("https://foo.bar", "us-east-1"); + + String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint" + + ":myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("endpoint"); + s3ControlCustom.getAccessPoint(b -> b.name(accessPointArn)); + } + + @Test + public void bucketArnDifferentRegionNoConfigFlag_throwsIllegalArgumentException() { + S3ControlClient s3ControlCustom = initializedBuilder().region(Region.of("us-east-1")).build(); + String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint" + + ":myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("does not match the region the client was configured with"); + s3ControlCustom.getAccessPoint(b -> b.name(accessPointArn)); + } + + @Override + String expectedUrl() { + return EXPECTED_URL; + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3ControlWireMockRerouteInterceptor.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3ControlWireMockRerouteInterceptor.java new file mode 100644 index 000000000000..a561c919bab2 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3ControlWireMockRerouteInterceptor.java @@ -0,0 +1,41 @@ +package software.amazon.awssdk.services.s3control.internal.functionaltests.arns; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.SdkHttpRequest; + +/** + * Class javadoc + */ +public class S3ControlWireMockRerouteInterceptor implements ExecutionInterceptor { + + private final URI rerouteEndpoint; + private final List recordedRequests = new ArrayList<>(); + private final List recordedEndpoints = new ArrayList(); + + S3ControlWireMockRerouteInterceptor(URI rerouteEndpoint) { + this.rerouteEndpoint = rerouteEndpoint; + } + + @Override + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { + + SdkHttpRequest request = context.httpRequest(); + recordedEndpoints.add(request.getUri()); + recordedRequests.add(request); + + return request.toBuilder().uri(rerouteEndpoint).build(); + } + + public List getRecordedRequests() { + return recordedRequests; + } + + public List getRecordedEndpoints() { + return recordedEndpoints; + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3ControlWireMockTestBase.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3ControlWireMockTestBase.java new file mode 100644 index 000000000000..b8b16152dca0 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3ControlWireMockTestBase.java @@ -0,0 +1,106 @@ +/* + * Copyright 2011-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control.internal.functionaltests.arns; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.containing; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; + +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.List; +import org.junit.Rule; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; +import software.amazon.awssdk.services.s3control.S3ControlClientBuilder; + +/** + * Base class for tests that use a WireMock server + */ +public abstract class S3ControlWireMockTestBase { + private S3ControlWireMockRerouteInterceptor s3ControlWireMockRequestHandler; + + @Rule + public WireMockRule mockServer = new WireMockRule(new WireMockConfiguration().port(0).httpsPort(0)); + + protected String getEndpoint() { + return "http://localhost:" + mockServer.port(); + } + + protected S3ControlClient buildClient() { + this.s3ControlWireMockRequestHandler = new S3ControlWireMockRerouteInterceptor(URI.create(getEndpoint())); + + return initializedBuilder().build(); + } + + protected S3ControlClientBuilder buildClientCustom() { + this.s3ControlWireMockRequestHandler = new S3ControlWireMockRerouteInterceptor(URI.create(getEndpoint())); + + return initializedBuilder(); + } + + protected S3ControlClient buildClientWithCustomEndpoint(String serviceEndpoint, String signingRegion) { + this.s3ControlWireMockRequestHandler = new S3ControlWireMockRerouteInterceptor(URI.create(getEndpoint())); + return initializedBuilder().region(Region.of(signingRegion)).endpointOverride(URI.create(serviceEndpoint)).build(); + } + + protected S3ControlClientBuilder initializedBuilder() { + return S3ControlClient.builder() + .credentialsProvider(() -> AwsBasicCredentials.create("test", "test")) + .region(Region.US_WEST_2) + .overrideConfiguration(o -> o.addExecutionInterceptor(this.s3ControlWireMockRequestHandler)); + } + + protected List getRecordedRequests() { + return this.s3ControlWireMockRequestHandler.getRecordedRequests(); + } + + protected List getRecordedEndpoints() { + return this.s3ControlWireMockRequestHandler.getRecordedEndpoints(); + } + + protected void verifyOutpostRequest(String region, String expectedHost) { + verify(getRequestedFor(urlEqualTo(expectedUrl())) + .withHeader("Authorization", containing(String.format("%s/s3-outposts/aws4_request", region))) + .withHeader("x-amz-outpost-id", equalTo("op-01234567890123456")) + .withHeader("x-amz-account-id", equalTo("123456789012"))); + assertThat(getRecordedEndpoints().size(), is(1)); + assertThat(getRecordedEndpoints().get(0).getHost(), is(expectedHost)); + } + + protected void stubResponse() { + stubFor(get(urlMatching(expectedUrl())).willReturn(aResponse().withBody("").withStatus(200))); + } + + protected void verifyS3ControlRequest(String region, String expectedHost) { + verify(getRequestedFor(urlEqualTo(expectedUrl())).withHeader("Authorization", containing(String.format("%s/s3/aws4_request", region))) + .withHeader("x-amz-account-id", equalTo("123456789012"))); + assertThat(getRecordedEndpoints().size(), is(1)); + assertThat(getRecordedEndpoints().get(0).getHost(), is(expectedHost)); + } + + abstract String expectedUrl(); +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3OutpostAccessPointArnTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3OutpostAccessPointArnTest.java new file mode 100644 index 000000000000..aba2a8b49b23 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3OutpostAccessPointArnTest.java @@ -0,0 +1,260 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control.internal.functionaltests.arns; + + +import static com.github.tomakehurst.wiremock.client.WireMock.containing; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; + +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; +import software.amazon.awssdk.services.s3control.S3ControlClientBuilder; + +public class S3OutpostAccessPointArnTest extends S3ControlWireMockTestBase { + private S3ControlClient s3; + private static final String EXPECTED_URL = "/v20180820/accesspoint/myaccesspoint"; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void methodSetUp() { + s3 = buildClient(); + } + + @Test + public void fipsEnabledOnClientSide_shouldThrowException() { + S3ControlClient s3ControlForTest = + buildClientCustom().region(Region.of("us-gov-east-1")).serviceConfiguration(b -> b.fipsModeEnabled(true)).build(); + + String outpostArn = "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint" + + ":myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("FIPS"); + s3ControlForTest.getAccessPoint(b -> b.name(outpostArn)); + } + + @Test + public void regionWithFipsProvided_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().region(Region.of("fips-us-gov-east-1")).build(); + + String outpostArn = "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint" + + ":myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("FIPS"); + s3ControlForTest.getAccessPoint(b -> b.name(outpostArn)); + } + + @Test + public void dualstackEnabled_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().serviceConfiguration(b -> b.dualstackEnabled(true)).build(); + + String outpostArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Dualstack"); + s3ControlForTest.getAccessPoint(b -> b.name(outpostArn)); + } + + @Test + public void malformedArn_MissingOutpostSegment_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().build(); + + String outpostArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Unknown ARN type"); + s3ControlForTest.getAccessPoint(b -> b.name(outpostArn)); + } + + @Test + public void malformedArn_MissingAccessPointSegment_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().build(); + + String outpostArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + s3ControlForTest.getAccessPoint(b -> b.name(outpostArn)); + } + + @Test + public void malformedArn_MissingAccessPointName_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().build(); + + String outpostArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + s3ControlForTest.getAccessPoint(b -> b.name(outpostArn)); + } + + @Test + public void outpostArnClientHasCustomEndpoint_throwsIllegalArgumentException() { + S3ControlClient s3Control = buildClientWithCustomEndpoint("https://foo.bar", "us-east-1"); + + String outpostArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("endpoint"); + s3Control.getAccessPoint(b -> b.name(outpostArn)); + } + + @Test + public void bucketArnDifferentRegionNoConfigFlag_throwsIllegalArgumentException() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-west-2")).build(); + String outpostArn = "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("does not match the region the client was configured with"); + s3ControlForTest.getAccessPoint(b -> b.name(outpostArn)); + } + + @Test + public void outpostArn_accountIdPresent_shouldThrowException() { + S3ControlClient s3Control = initializedBuilderForAccessPoint().region(Region.of("us-future-1")).build(); + + String outpostArn = "arn:aws:s3-outposts:us-future-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + exception.expect(IllegalArgumentException.class); + exception.expectMessage("accountId"); + + s3Control.getAccessPoint(b -> b.name(outpostArn).accountId("1234")); + } + + @Test + public void nonArn_shouldNotRedirect() { + S3ControlClient s3Control = initializedBuilderForAccessPoint().region(Region.of("us-west-2")).build(); + String name = "myaccesspoint"; + stubResponse(); + s3Control.getAccessPoint(b -> b.name(name).accountId("123456789012")); + String expectedHost = "123456789012.s3-control.us-west-2.amazonaws.com"; + + verify(getRequestedFor(urlEqualTo(expectedUrl())) + .withHeader("authorization", containing("us-west-2/s3/aws4_request")) + .withHeader("x-amz-account-id", equalTo("123456789012"))); + assertThat(getRecordedEndpoints().size(), is(1)); + assertThat(getRecordedEndpoints().get(0).getHost(), is(expectedHost)); + } + + @Test + public void outpostArnUSRegion() { + S3ControlClient s3Control = initializedBuilderForAccessPoint().region(Region.of("us-west-2")).build(); + + String outpostArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + String expectedHost = "s3-outposts.us-west-2.amazonaws.com"; + + stubResponse(); + s3Control.getAccessPoint(b -> b.name(outpostArn).accountId("123456789012")); + + verifyOutpostRequest("us-west-2", expectedHost); + } + + @Test + public void outpostArn_GovRegion() { + S3ControlClient s3Control = initializedBuilderForAccessPoint().region(Region.of("us-gov-east-1")).build(); + + String outpostArn = "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint" + + ":myaccesspoint"; + String expectedHost = "s3-outposts.us-gov-east-1.amazonaws.com"; + + stubResponse(); + + s3Control.getAccessPoint(b -> b.name(outpostArn)); + + verifyOutpostRequest("us-gov-east-1", expectedHost); + } + + @Test + public void outpostArn_futureRegion_US() { + S3ControlClient s3Control = initializedBuilderForAccessPoint().region(Region.of("us-future-1")).build(); + + + String outpostArn = "arn:aws:s3-outposts:us-future-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + String expectedHost = "s3-outposts.us-future-1.amazonaws.com"; + + stubResponse(); + + s3Control.getAccessPoint(b -> b.name(outpostArn)); + + verifyOutpostRequest("us-future-1", expectedHost); + } + + @Test + public void outpostArn_futureRegion_CN() { + S3ControlClient s3Control = initializedBuilderForAccessPoint().region(Region.of("cn-future-1")).build(); + String outpostArn = "arn:aws-cn:s3-outposts:cn-future-1:123456789012:outpost:op-01234567890123456:accesspoint" + + ":myaccesspoint"; + + String expectedHost = "s3-outposts.cn-future-1.amazonaws.com.cn"; + + stubResponse(); + + s3Control.getAccessPoint(b -> b.name(outpostArn)); + verifyOutpostRequest("cn-future-1", expectedHost); + } + + @Test + public void outpostArnDifferentRegion_useArnRegionSet_shouldUseRegionFromArn() { + + String outpostArn = "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"; + + String expectedHost = "s3-outposts.us-east-1.amazonaws.com"; + stubResponse(); + + S3ControlClient s3WithUseArnRegion = + initializedBuilderForAccessPoint().region(Region.of("us-west-2")).serviceConfiguration(b -> b.useArnRegionEnabled(true)).build(); + + s3WithUseArnRegion.getAccessPoint(b -> b.name(outpostArn)); + + verifyOutpostRequest("us-east-1", expectedHost); + } + + @Test + public void clientFipsRegion_outpostArnDifferentRegion_useArnRegionSet_shouldUseRegionFromArn() { + + String outpostArn = "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint" + + ":myaccesspoint"; + + String expectedHost = "s3-outposts.us-gov-east-1.amazonaws.com"; + stubResponse(); + + S3ControlClient s3WithUseArnRegion = initializedBuilderForAccessPoint().region(Region.of("fips-us-gov-east-1")) + .serviceConfiguration(b -> b.useArnRegionEnabled(true)).build(); + + s3WithUseArnRegion.getAccessPoint(b -> b.name(outpostArn)); + + verifyOutpostRequest("us-gov-east-1", expectedHost); + } + + private S3ControlClientBuilder initializedBuilderForAccessPoint() { + return initializedBuilder(); + } + + @Override + String expectedUrl() { + return EXPECTED_URL; + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3OutpostBucketArnTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3OutpostBucketArnTest.java new file mode 100644 index 000000000000..b4c308915157 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3OutpostBucketArnTest.java @@ -0,0 +1,288 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control.internal.functionaltests.arns; + + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.containing; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.put; +import static com.github.tomakehurst.wiremock.client.WireMock.putRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; + +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; + +public class S3OutpostBucketArnTest extends S3ControlWireMockTestBase { + private S3ControlClient s3Control; + + @Rule + public ExpectedException exception = ExpectedException.none(); + private static final String EXPECTED_URL = "/v20180820/bucket/mybucket"; + private static final String EXPECTED_HOST = "s3-outposts.%s.amazonaws.com"; + + @Before + public void methodSetUp() { + s3Control = buildClient(); + } + + @Test + public void fipsEnabledInConfig_shouldThrowException() { + S3ControlClient s3ControlForTest = + buildClientCustom().region(Region.of("us-gov-east-1")).serviceConfiguration(b -> b.fipsModeEnabled(true)).build(); + + String bucketArn = "arn:aws-us-gov:s3-outposts:fips-us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket" + + ":mybucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("FIPS"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void fipsRegionProvided_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().region(Region.of("fips-us-gov-east-1")).build(); + + String bucketArn = "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("FIPS"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void dualstackEnabled_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().serviceConfiguration(b -> b.dualstackEnabled(true)).build(); + + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Dualstack"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void malformedArn_MissingBucketSegment_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().serviceConfiguration(b -> b.dualstackEnabled(true)).build(); + + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void malformedArn_missingOutpostId_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().serviceConfiguration(b -> b.dualstackEnabled(true)).build(); + + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Unknown ARN type"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void malformedArn_missingOutpostIdAndBucketName_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().serviceConfiguration(b -> b.dualstackEnabled(true)).build(); + + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:bucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void malformedArn_missingBucketName_shouldThrowException() { + S3ControlClient s3ControlForTest = buildClientCustom().serviceConfiguration(b -> b.dualstackEnabled(true)).build(); + + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("Invalid format"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void bucketArnDifferentRegionNoConfigFlag_throwsIllegalArgumentException() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-west-2")).build(); + String bucketArn = "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("does not match the region the client was configured with"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void bucketArnInvalidPartition_throwsIllegalArgumentException() { + S3ControlClient s3ControlForTest = + initializedBuilder().region(Region.of("us-west-2")).serviceConfiguration(b -> b.useArnRegionEnabled(true)).build(); + String bucketArn = "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("does not match the partition the client has been configured with"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void bucketArnWithCustomEndpoint_throwsIllegalArgumentException() { + S3ControlClient s3ControlForTest = buildClientWithCustomEndpoint("https://foo.bar", "us-west-2"); + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("has been configured with an endpoint override"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + } + + @Test + public void bucketArn_conflictingAccountIdPresent_shouldThrowException() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-west-2")).build(); + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("accountId"); + s3ControlForTest.getBucket(b -> b.bucket(bucketArn).accountId("1234")); + } + + @Test + public void bucketArnUSRegion() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-west-2")).build(); + + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + stubResponse(); + + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + verifyRequest("us-west-2"); + } + + @Test + public void bucketArn_GovRegion() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-gov-east-1")).build(); + + String bucketArn = "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + stubResponse(); + + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + + verifyRequest("us-gov-east-1"); + } + + @Test + public void bucketArn_futureRegion_US() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-future-1")).build(); + + String bucketArn = "arn:aws:s3-outposts:us-future-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + stubResponse(); + + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + + verifyRequest("us-future-1"); + } + + @Test + public void bucketArn_futureRegion_CN() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("cn-future-1")).build(); + String bucketArn = "arn:aws-cn:s3-outposts:cn-future-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + stubResponse(); + + s3ControlForTest.getBucket(b -> b.bucket(bucketArn)); + verifyOutpostRequest("cn-future-1", "s3-outposts.cn-future-1.amazonaws.com.cn"); + } + + @Test + public void bucketArnDifferentRegion_useArnRegionSet_shouldUseRegionFromArn() { + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + stubResponse(); + + S3ControlClient s3WithUseArnRegion = + initializedBuilder().region(Region.of("us-east-1")).serviceConfiguration(b -> b.useArnRegionEnabled(true)).build(); + + s3WithUseArnRegion.getBucket(b -> b.bucket(bucketArn)); + + verifyRequest("us-west-2"); + } + + @Test + public void fipsClientRegion_bucketArnDifferentRegion_useArnRegionSet_shouldUseRegionFromArn() { + String bucketArn = "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + stubResponse(); + + S3ControlClient s3WithUseArnRegion = + initializedBuilder().region(Region.of("fips-us-gov-east-1")).serviceConfiguration(b -> b.useArnRegionEnabled(true)).build(); + + s3WithUseArnRegion.getBucket(b -> b.bucket(bucketArn)); + + verifyRequest("us-gov-east-1"); + } + + @Test + public void outpostBucketArn_listAccessPoints() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-west-2")).build(); + + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + stubFor(get(urlEqualTo("/v20180820/accesspoint?bucket=mybucket")).willReturn(aResponse().withBody("").withStatus(200))); + + s3ControlForTest.listAccessPoints(b -> b.bucket(bucketArn)); + verify(getRequestedFor(urlEqualTo("/v20180820/accesspoint?bucket=mybucket")) + .withHeader("authorization", containing("us-west-2/s3-outposts/aws4_request")) + .withHeader("x-amz-outpost-id", equalTo("op-01234567890123456")) + .withHeader("x-amz-account-id", equalTo("123456789012"))); + assertThat(getRecordedEndpoints().size(), is(1)); + assertThat(getRecordedEndpoints().get(0).getHost(), is(String.format(EXPECTED_HOST, "us-west-2"))); + } + + @Test + public void outpostBucketArn_createAccessPoint() { + S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-west-2")).build(); + + String bucketArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket"; + + stubFor(put(urlEqualTo("/v20180820/accesspoint/name")).willReturn(aResponse().withBody("").withStatus(200))); + + s3ControlForTest.createAccessPoint(b -> b.bucket(bucketArn).name("name")); + verify(putRequestedFor(urlEqualTo("/v20180820/accesspoint/name")) + .withRequestBody(containing("mybucket AwsBasicCredentials.create("test", "test")) + .region(Region.US_WEST_2) + .overrideConfiguration(o -> o.addExecutionInterceptor(this.interceptor)) + .build(); + } + + @Before + public void methodSetUp() { + s3Control = buildClient(); + } + + @Test + public void any_request_should_set_double_url_encode_to_false() { + stubFor(get(urlMatching(EXPECTED_URL)).willReturn(aResponse().withBody("").withStatus(200))); + + s3Control.describeJob(b -> b.accountId("123456789012").jobId("id")); + + assertThat(interceptor.signerDoubleUrlEncode()).isNotNull(); + assertThat(interceptor.signerDoubleUrlEncode()).isFalse(); + } + + /** + * In addition to checking the signing attribute, the interceptor sets the endpoint since + * S3 control prepends the account id to the host name and wiremock won't intercept the request + */ + private static class ExecutionAttributeInterceptor implements ExecutionInterceptor { + private final URI rerouteEndpoint; + private Boolean signerDoubleUrlEncode; + + ExecutionAttributeInterceptor(URI rerouteEndpoint) { + this.rerouteEndpoint = rerouteEndpoint; + } + + @Override + public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + signerDoubleUrlEncode = executionAttributes.getAttribute(AwsSignerExecutionAttribute.SIGNER_DOUBLE_URL_ENCODE); + } + + @Override + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { + SdkHttpRequest request = context.httpRequest(); + return request.toBuilder().uri(rerouteEndpoint).build(); + } + + public Boolean signerDoubleUrlEncode() { + return signerDoubleUrlEncode; + } + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java new file mode 100644 index 000000000000..a909573972d7 --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java @@ -0,0 +1,265 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control.internal.interceptors; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.verifyZeroInteractions; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; +import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; + +import java.net.URI; +import java.util.Optional; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.Protocol; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.internal.presigner.DefaultS3Presigner; +import software.amazon.awssdk.services.s3control.S3ControlClient; +import software.amazon.awssdk.services.s3control.S3ControlConfiguration; +import software.amazon.awssdk.services.s3control.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3control.model.ListRegionalBucketsRequest; + +public class EndpointAddressInterceptorTest { + + private static final String X_AMZ_ACCOUNT_ID = "x-amz-account-id"; + private static final String ACCOUNT_ID = "123456789012"; + + private SdkHttpRequest request; + + @Before + public void setup() { + request = SdkHttpFullRequest.builder() + .appendHeader(X_AMZ_ACCOUNT_ID, ACCOUNT_ID) + .protocol(Protocol.HTTPS.toString()) + .method(SdkHttpMethod.POST) + .host(S3ControlClient.serviceMetadata().endpointFor(Region.US_EAST_1).toString()) + .build(); + } + + @Test + public void modifyHttpRequest_ResolvesCorrectHost_StandardSettings() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), new ExecutionAttributes()); + assertThat(modified.host()).isEqualTo("s3-control.us-east-1.amazonaws.com"); + } + + @Test + public void modifyHttpRequest_ResolvesCorrectHost_Dualstack() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().dualstackEnabled(true).build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + + SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), executionAttributes); + assertThat(modified.host()).isEqualTo("s3-control.dualstack.us-east-1.amazonaws.com"); + } + + @Test + public void modifyHttpRequest_ResolvesCorrectHost_Fips() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().fipsModeEnabled(true).build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + + SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), executionAttributes); + assertThat(modified.host()).isEqualTo("s3-control-fips.us-east-1.amazonaws.com"); + } + + @Test + public void createBucketRequestWithOutpostId_shouldRedirect() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + CreateBucketRequest createBucketRequest = CreateBucketRequest.builder().outpostId("1234").build(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); + + SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(createBucketRequest), + executionAttributes); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); + assertThat(modified.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); + } + + @Test + public void listRegionalBucketsRequestsWithOutpostId_shouldRedirect() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder().outpostId("1234").build(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); + + SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), + executionAttributes); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); + assertThat(modified.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); + } + + @Test + public void listRegionalBucketsRequestsWithoutOutpostId_shouldNotRedirect() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder().build(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() + .dualstackEnabled(true) + .build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); + + SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), + executionAttributes); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3"); + assertThat(modified.host()).isEqualTo("s3-control.dualstack.us-east-1.amazonaws.com"); + } + + @Test + public void createBucketRequestsWithoutOutpostId_shouldNotRedirect() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() + .build(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() + .fipsModeEnabled(true) + .build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); + + SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), + executionAttributes); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3"); + assertThat(modified.host()).isEqualTo("s3-control-fips.us-east-1.amazonaws.com"); + } + + @Test + public void listRegionalBucketsRequestWithOutpostId_fipsEnabled_shouldThrowException() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() + .outpostId("123") + .build(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().fipsModeEnabled(true).build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); + + assertThatThrownBy(() -> interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), + executionAttributes)).hasMessageContaining("FIPS endpoints are " + + "not supported"); + } + + @Test + public void listRegionalBucketsRequestWithOutpostId_fipsDualsackEnabled_shouldThrowException() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() + .outpostId("123") + .build(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().dualstackEnabled(true).build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); + + assertThatThrownBy(() -> interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), + executionAttributes)).hasMessageContaining("Dualstack endpoints are " + + "not supported"); + } + + @Test(expected = SdkClientException.class) + public void modifyHttpRequest_ThrowsException_FipsAndDualstack() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() + .fipsModeEnabled(true) + .dualstackEnabled(true) + .build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + + interceptor.modifyHttpRequest(new Context(request), executionAttributes); + } + + @Test(expected = SdkClientException.class) + public void modifyHttpRequest_ThrowsException_NonStandardEndpoint() { + EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + + S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() + .dualstackEnabled(true) + .build(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); + + interceptor.modifyHttpRequest(new Context(request.toBuilder().host("some-garbage").build()), + executionAttributes); + } + + public final class Context implements software.amazon.awssdk.core.interceptor.Context.ModifyHttpRequest { + + private final SdkHttpRequest request; + private SdkRequest sdkRequest; + + public Context(SdkHttpRequest request) { + this.request = request; + } + + public Context request(SdkRequest sdkRequest) { + this.sdkRequest = sdkRequest; + return this; + } + + @Override + public SdkRequest request() { + return sdkRequest; + } + + @Override + public SdkHttpRequest httpRequest() { + return request; + } + + @Override + public Optional requestBody() { + return Optional.empty(); + } + + @Override + public Optional asyncRequestBody() { + return Optional.empty(); + } + } +} diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptorTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptorTest.java new file mode 100644 index 000000000000..f64cb21f188a --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptorTest.java @@ -0,0 +1,102 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control.internal.interceptors; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Optional; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; +import software.amazon.awssdk.core.Protocol; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; + +public class PayloadSigningInterceptorTest { + + private SdkHttpRequest request; + + @Before + public void setup() { + request = SdkHttpFullRequest.builder() + .protocol(Protocol.HTTPS.toString()) + .method(SdkHttpMethod.POST) + .host(S3ControlClient.serviceMetadata().endpointFor(Region.US_EAST_1).toString()) + .build(); + } + + @Test + public void modifyHttpContent_AddsExecutionAttributeAndPayload() { + PayloadSigningInterceptor interceptor = new PayloadSigningInterceptor(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + Optional modified = interceptor.modifyHttpContent(new Context(request, null), + executionAttributes); + + assertThat(modified.isPresent()).isTrue(); + assertThat(modified.get().contentLength()).isEqualTo(0); + assertThat(executionAttributes.getAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING)).isTrue(); + } + + @Test + public void modifyHttpContent_DoesNotReplaceBody() { + PayloadSigningInterceptor interceptor = new PayloadSigningInterceptor(); + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + Optional modified = interceptor.modifyHttpContent(new Context(request, RequestBody.fromString("hello")), + executionAttributes); + + assertThat(modified.isPresent()).isTrue(); + assertThat(modified.get().contentLength()).isEqualTo(5); + assertThat(executionAttributes.getAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING)).isTrue(); + } + + public final class Context implements software.amazon.awssdk.core.interceptor.Context.ModifyHttpRequest { + + private final SdkHttpRequest request; + private final RequestBody requestBody; + + public Context(SdkHttpRequest request, + RequestBody requestBody) { + this.request = request; + this.requestBody = requestBody; + } + + @Override + public SdkRequest request() { + return null; + } + + @Override + public SdkHttpRequest httpRequest() { + return request; + } + + @Override + public Optional requestBody() { + return Optional.ofNullable(requestBody); + } + + @Override + public Optional asyncRequestBody() { + return Optional.empty(); + } + } +} diff --git a/services/s3control/src/test/resources/log4j.properties b/services/s3control/src/test/resources/log4j.properties new file mode 100644 index 000000000000..012eb6e372f3 --- /dev/null +++ b/services/s3control/src/test/resources/log4j.properties @@ -0,0 +1,33 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +log4j.rootLogger=WARN, A1 +log4j.appender.A1=org.apache.log4j.ConsoleAppender +log4j.appender.A1.layout=org.apache.log4j.PatternLayout + +# Print the date in ISO 8601 format +log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + +# Adjust to see more / less logging +#log4j.logger.com.amazonaws.ec2=DEBUG + +# HttpClient 3 Wire Logging +#log4j.logger.httpclient.wire=DEBUG + +# HttpClient 4 Wire Logging +#log4j.logger.org.apache.http.wire=DEBUG +#log4j.logger.org.apache.http=DEBUG +#log4j.logger.org.apache.http.wire=WARN +#log4j.logger.software.amazon.awssdk=DEBUG diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml new file mode 100644 index 000000000000..9fd9e48cd6df --- /dev/null +++ b/services/s3outposts/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + s3outposts + AWS Java SDK :: Services :: S3 Outposts + The AWS Java SDK for S3 Outposts module holds the client classes that are used for + communicating with S3 Outposts. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.s3outposts + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/s3outposts/src/main/resources/codegen-resources/paginators-1.json b/services/s3outposts/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..ad678fe468b7 --- /dev/null +++ b/services/s3outposts/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListEndpoints": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Endpoints" + } + } +} \ No newline at end of file diff --git a/services/s3outposts/src/main/resources/codegen-resources/service-2.json b/services/s3outposts/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..a344e3715111 --- /dev/null +++ b/services/s3outposts/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,304 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-07-25", + "endpointPrefix":"s3-outposts", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon S3 Outposts", + "serviceFullName":"Amazon S3 on Outposts", + "serviceId":"S3Outposts", + "signatureVersion":"v4", + "signingName":"s3-outposts", + "uid":"s3outposts-2017-07-25" + }, + "operations":{ + "CreateEndpoint":{ + "name":"CreateEndpoint", + "http":{ + "method":"POST", + "requestUri":"/S3Outposts/CreateEndpoint" + }, + "input":{"shape":"CreateEndpointRequest"}, + "output":{"shape":"CreateEndpointResult"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    S3 on Outposts access points simplify managing data access at scale for shared datasets in Amazon S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC).

    This action creates an endpoint and associates it with the specified Outpost.

    Related actions include:

    " + }, + "DeleteEndpoint":{ + "name":"DeleteEndpoint", + "http":{ + "method":"DELETE", + "requestUri":"/S3Outposts/DeleteEndpoint" + }, + "input":{"shape":"DeleteEndpointRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    S3 on Outposts access points simplify managing data access at scale for shared datasets in Amazon S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC).

    This action deletes an endpoint.

    Related actions include:

    " + }, + "ListEndpoints":{ + "name":"ListEndpoints", + "http":{ + "method":"GET", + "requestUri":"/S3Outposts/ListEndpoints" + }, + "input":{"shape":"ListEndpointsRequest"}, + "output":{"shape":"ListEndpointsResult"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    S3 on Outposts access points simplify managing data access at scale for shared datasets in Amazon S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC).

    This action lists endpoints associated with the Outpost.

    Related actions include:

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Access was denied for this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "CidrBlock":{ + "type":"string", + "max":20, + "min":1 + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    There was a conflict with this action, and it could not be completed.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateEndpointRequest":{ + "type":"structure", + "required":[ + "OutpostId", + "SubnetId", + "SecurityGroupId" + ], + "members":{ + "OutpostId":{ + "shape":"OutpostId", + "documentation":"

    The ID of the AWS Outpost.

    " + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

    The ID of the subnet in the selected VPC.

    " + }, + "SecurityGroupId":{ + "shape":"SecurityGroupId", + "documentation":"

    The ID of the security group to use with the endpoint.

    " + } + } + }, + "CreateEndpointResult":{ + "type":"structure", + "members":{ + "EndpointArn":{ + "shape":"EndpointArn", + "documentation":"

    The Amazon Resource Name (ARN) of the endpoint.

    " + } + } + }, + "CreationTime":{"type":"timestamp"}, + "DeleteEndpointRequest":{ + "type":"structure", + "required":[ + "EndpointId", + "OutpostId" + ], + "members":{ + "EndpointId":{ + "shape":"EndpointId", + "documentation":"

    The ID of the end point.

    ", + "location":"querystring", + "locationName":"endpointId" + }, + "OutpostId":{ + "shape":"OutpostId", + "documentation":"

    The ID of the AWS Outpost.

    ", + "location":"querystring", + "locationName":"outpostId" + } + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "EndpointArn":{ + "shape":"EndpointArn", + "documentation":"

    The Amazon Resource Name (ARN) of the endpoint.

    " + }, + "OutpostsId":{ + "shape":"OutpostId", + "documentation":"

    The ID of the AWS Outpost.

    " + }, + "CidrBlock":{ + "shape":"CidrBlock", + "documentation":"

    The VPC CIDR committed by this endpoint.

    " + }, + "Status":{ + "shape":"EndpointStatus", + "documentation":"

    The status of the endpoint.

    " + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

    The time the endpoint was created.

    " + }, + "NetworkInterfaces":{ + "shape":"NetworkInterfaces", + "documentation":"

    The network interface of the endpoint.

    " + } + }, + "documentation":"

    S3 on Outposts access points simplify managing data access at scale for shared datasets in Amazon S3 on Outposts. S3 on Outposts uses endpoints to connect to Outposts buckets so that you can perform actions within your virtual private cloud (VPC).

    " + }, + "EndpointArn":{ + "type":"string", + "max":500, + "min":5, + "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):s3-outposts:[a-z\\-0-9]*:[0-9]{12}:outpost/(op-[a-f0-9]{17}|ec2)/endpoint/[a-zA-Z0-9]{19}$" + }, + "EndpointId":{ + "type":"string", + "max":500, + "min":5, + "pattern":"^[a-zA-Z0-9]{19}$" + }, + "EndpointStatus":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE" + ] + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "ErrorMessage":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    There was an exception with the internal server.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListEndpointsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The next endpoint requested in the list.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The max number of endpoints that can be returned on the request.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListEndpointsResult":{ + "type":"structure", + "members":{ + "Endpoints":{ + "shape":"Endpoints", + "documentation":"

    Returns an array of endpoints associated with AWS Outpost.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The next endpoint returned in the list.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

    The ID for the network interface.

    " + } + }, + "documentation":"

    The container for the network interface.

    " + }, + "NetworkInterfaceId":{ + "type":"string", + "max":100, + "min":1 + }, + "NetworkInterfaces":{ + "type":"list", + "member":{"shape":"NetworkInterface"} + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[A-Za-z0-9\\+\\:\\/\\=\\?\\#-_]+$" + }, + "OutpostId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^(op-[a-f0-9]{17}|\\d{12}|ec2)$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The requested resource was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SecurityGroupId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^sg-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "SubnetId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^subnet-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    There was an exception validating this data.

    ", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

    Amazon S3 on Outposts provides access to S3 on Outposts operations.

    " +} diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index bcc9a939573f..8a50f87ee3cf 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + sagemakera2iruntime + AWS Java SDK :: Services :: SageMaker A2I Runtime + The AWS Java SDK for SageMaker A2I Runtime module holds the client classes that are used for + communicating with SageMaker A2I Runtime. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.sagemakera2iruntime + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/sagemakera2iruntime/src/main/resources/codegen-resources/paginators-1.json b/services/sagemakera2iruntime/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..b19128c2626a --- /dev/null +++ b/services/sagemakera2iruntime/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListHumanLoops": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "HumanLoopSummaries" + } + } +} diff --git a/services/sagemakera2iruntime/src/main/resources/codegen-resources/service-2.json b/services/sagemakera2iruntime/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..bc9ad98eb866 --- /dev/null +++ b/services/sagemakera2iruntime/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,462 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-11-07", + "endpointPrefix":"a2i-runtime.sagemaker", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Augmented AI Runtime", + "serviceId":"SageMaker A2I Runtime", + "signatureVersion":"v4", + "signingName":"sagemaker", + "uid":"sagemaker-a2i-runtime-2019-11-07" + }, + "operations":{ + "DeleteHumanLoop":{ + "name":"DeleteHumanLoop", + "http":{ + "method":"DELETE", + "requestUri":"/human-loops/{HumanLoopName}" + }, + "input":{"shape":"DeleteHumanLoopRequest"}, + "output":{"shape":"DeleteHumanLoopResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the specified human loop for a flow definition.

    " + }, + "DescribeHumanLoop":{ + "name":"DescribeHumanLoop", + "http":{ + "method":"GET", + "requestUri":"/human-loops/{HumanLoopName}" + }, + "input":{"shape":"DescribeHumanLoopRequest"}, + "output":{"shape":"DescribeHumanLoopResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns information about the specified human loop.

    " + }, + "ListHumanLoops":{ + "name":"ListHumanLoops", + "http":{ + "method":"GET", + "requestUri":"/human-loops" + }, + "input":{"shape":"ListHumanLoopsRequest"}, + "output":{"shape":"ListHumanLoopsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns information about human loops, given the specified parameters. If a human loop was deleted, it will not be included.

    " + }, + "StartHumanLoop":{ + "name":"StartHumanLoop", + "http":{ + "method":"POST", + "requestUri":"/human-loops" + }, + "input":{"shape":"StartHumanLoopRequest"}, + "output":{"shape":"StartHumanLoopResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Starts a human loop, provided that at least one activation condition is met.

    " + }, + "StopHumanLoop":{ + "name":"StopHumanLoop", + "http":{ + "method":"POST", + "requestUri":"/human-loops/stop" + }, + "input":{"shape":"StopHumanLoopRequest"}, + "output":{"shape":"StopHumanLoopResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Stops the specified human loop.

    " + } + }, + "shapes":{ + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"FailureReason"} + }, + "documentation":"

    Your request has the same name as another active human loop but has different input data. You cannot start two human loops with the same name and different input data.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ContentClassifier":{ + "type":"string", + "enum":[ + "FreeOfPersonallyIdentifiableInformation", + "FreeOfAdultContent" + ] + }, + "ContentClassifiers":{ + "type":"list", + "member":{"shape":"ContentClassifier"}, + "max":256 + }, + "DeleteHumanLoopRequest":{ + "type":"structure", + "required":["HumanLoopName"], + "members":{ + "HumanLoopName":{ + "shape":"HumanLoopName", + "documentation":"

    The name of the human loop that you want to delete.

    ", + "location":"uri", + "locationName":"HumanLoopName" + } + } + }, + "DeleteHumanLoopResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeHumanLoopRequest":{ + "type":"structure", + "required":["HumanLoopName"], + "members":{ + "HumanLoopName":{ + "shape":"HumanLoopName", + "documentation":"

    The name of the human loop that you want information about.

    ", + "location":"uri", + "locationName":"HumanLoopName" + } + } + }, + "DescribeHumanLoopResponse":{ + "type":"structure", + "required":[ + "CreationTime", + "HumanLoopStatus", + "HumanLoopName", + "HumanLoopArn", + "FlowDefinitionArn" + ], + "members":{ + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    The creation time when Amazon Augmented AI created the human loop.

    " + }, + "FailureReason":{ + "shape":"String", + "documentation":"

    The reason why a human loop failed. The failure reason is returned when the status of the human loop is Failed.

    " + }, + "FailureCode":{ + "shape":"String", + "documentation":"

    A failure code that identifies the type of failure.

    " + }, + "HumanLoopStatus":{ + "shape":"HumanLoopStatus", + "documentation":"

    The status of the human loop.

    " + }, + "HumanLoopName":{ + "shape":"HumanLoopName", + "documentation":"

    The name of the human loop. The name must be lowercase, unique within the Region in your account, and can have up to 63 characters. Valid characters: a-z, 0-9, and - (hyphen).

    " + }, + "HumanLoopArn":{ + "shape":"HumanLoopArn", + "documentation":"

    The Amazon Resource Name (ARN) of the human loop.

    " + }, + "FlowDefinitionArn":{ + "shape":"FlowDefinitionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the flow definition.

    " + }, + "HumanLoopOutput":{ + "shape":"HumanLoopOutput", + "documentation":"

    An object that contains information about the output of the human loop.

    " + } + } + }, + "FailureReason":{ + "type":"string", + "max":1024 + }, + "FlowDefinitionArn":{ + "type":"string", + "max":1024, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:flow-definition/.*" + }, + "HumanLoopArn":{ + "type":"string", + "max":1024, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:human-loop/.*" + }, + "HumanLoopDataAttributes":{ + "type":"structure", + "required":["ContentClassifiers"], + "members":{ + "ContentClassifiers":{ + "shape":"ContentClassifiers", + "documentation":"

    Declares that your content is free of personally identifiable information or adult content.

    Amazon SageMaker can restrict the Amazon Mechanical Turk workers who can view your task based on this information.

    " + } + }, + "documentation":"

    Attributes of the data specified by the customer. Use these to describe the data to be labeled.

    " + }, + "HumanLoopInput":{ + "type":"structure", + "required":["InputContent"], + "members":{ + "InputContent":{ + "shape":"InputContent", + "documentation":"

    Serialized input from the human loop. The input must be a string representation of a file in JSON format.

    " + } + }, + "documentation":"

    An object containing the human loop input in JSON format.

    " + }, + "HumanLoopName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-z0-9](-*[a-z0-9])*$" + }, + "HumanLoopOutput":{ + "type":"structure", + "required":["OutputS3Uri"], + "members":{ + "OutputS3Uri":{ + "shape":"String", + "documentation":"

    The location of the Amazon S3 object where Amazon Augmented AI stores your human loop output.

    " + } + }, + "documentation":"

    Information about where the human output will be stored.

    " + }, + "HumanLoopStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Failed", + "Completed", + "Stopped", + "Stopping" + ] + }, + "HumanLoopSummaries":{ + "type":"list", + "member":{"shape":"HumanLoopSummary"} + }, + "HumanLoopSummary":{ + "type":"structure", + "members":{ + "HumanLoopName":{ + "shape":"HumanLoopName", + "documentation":"

    The name of the human loop.

    " + }, + "HumanLoopStatus":{ + "shape":"HumanLoopStatus", + "documentation":"

    The status of the human loop.

    " + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

    When Amazon Augmented AI created the human loop.

    " + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

    The reason why the human loop failed. A failure reason is returned when the status of the human loop is Failed.

    " + }, + "FlowDefinitionArn":{ + "shape":"FlowDefinitionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the flow definition used to configure the human loop.

    " + } + }, + "documentation":"

    Summary information about the human loop.

    " + }, + "InputContent":{ + "type":"string", + "max":3145728 + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"FailureReason"} + }, + "documentation":"

    We couldn't process your request because of an issue with the server. Try again later.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "ListHumanLoopsRequest":{ + "type":"structure", + "required":["FlowDefinitionArn"], + "members":{ + "CreationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

    (Optional) The timestamp of the date when you want the human loops to begin in ISO 8601 format. For example, 2020-02-24.

    ", + "location":"querystring", + "locationName":"CreationTimeAfter" + }, + "CreationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

    (Optional) The timestamp of the date before which you want the human loops to begin in ISO 8601 format. For example, 2020-02-24.

    ", + "location":"querystring", + "locationName":"CreationTimeBefore" + }, + "FlowDefinitionArn":{ + "shape":"FlowDefinitionArn", + "documentation":"

    The Amazon Resource Name (ARN) of a flow definition.

    ", + "location":"querystring", + "locationName":"FlowDefinitionArn" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

    Optional. The order for displaying results. Valid values: Ascending and Descending.

    ", + "location":"querystring", + "locationName":"SortOrder" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to display the next page of results.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken is returned in the output. You can use this token to display the next page of results.

    ", + "box":true, + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListHumanLoopsResponse":{ + "type":"structure", + "required":["HumanLoopSummaries"], + "members":{ + "HumanLoopSummaries":{ + "shape":"HumanLoopSummaries", + "documentation":"

    An array of objects that contain information about the human loops.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to display the next page of results.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":8192, + "pattern":".*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"FailureReason"} + }, + "documentation":"

    We couldn't find the requested resource.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"FailureReason"} + }, + "documentation":"

    You exceeded your service quota. Delete some resources or request an increase in your service quota.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "SortOrder":{ + "type":"string", + "enum":[ + "Ascending", + "Descending" + ] + }, + "StartHumanLoopRequest":{ + "type":"structure", + "required":[ + "HumanLoopName", + "FlowDefinitionArn", + "HumanLoopInput" + ], + "members":{ + "HumanLoopName":{ + "shape":"HumanLoopName", + "documentation":"

    The name of the human loop.

    " + }, + "FlowDefinitionArn":{ + "shape":"FlowDefinitionArn", + "documentation":"

    The Amazon Resource Name (ARN) of the flow definition associated with this human loop.

    " + }, + "HumanLoopInput":{ + "shape":"HumanLoopInput", + "documentation":"

    An object that contains information about the human loop.

    " + }, + "DataAttributes":{ + "shape":"HumanLoopDataAttributes", + "documentation":"

    Attributes of the specified data. Use DataAttributes to specify if your data is free of personally identifiable information and/or free of adult content.

    " + } + } + }, + "StartHumanLoopResponse":{ + "type":"structure", + "members":{ + "HumanLoopArn":{ + "shape":"HumanLoopArn", + "documentation":"

    The Amazon Resource Name (ARN) of the human loop.

    " + } + } + }, + "StopHumanLoopRequest":{ + "type":"structure", + "required":["HumanLoopName"], + "members":{ + "HumanLoopName":{ + "shape":"HumanLoopName", + "documentation":"

    The name of the human loop that you want to stop.

    " + } + } + }, + "StopHumanLoopResponse":{ + "type":"structure", + "members":{ + } + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"FailureReason"} + }, + "documentation":"

    You exceeded the maximum number of requests.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"FailureReason"} + }, + "documentation":"

    The request isn't valid. Check the syntax and try again.

    ", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

    Amazon Augmented AI is in preview release and is subject to change. We do not recommend using this product in production environments.

    Amazon Augmented AI (Amazon A2I) adds the benefit of human judgment to any machine learning application. When an AI application can't evaluate data with a high degree of confidence, human reviewers can take over. This human review is called a human review workflow. To create and start a human review workflow, you need three resources: a worker task template, a flow definition, and a human loop.

    For information about these resources and prerequisites for using Amazon A2I, see Get Started with Amazon Augmented AI in the Amazon SageMaker Developer Guide.

    This API reference includes information about API actions and data types that you can use to interact with Amazon A2I programmatically. Use this guide to:

    • Start a human loop with the StartHumanLoop operation when using Amazon A2I with a custom task type. To learn more about the difference between custom and built-in task types, see Use Task Types . To learn how to start a human loop using this API, see Create and Start a Human Loop for a Custom Task Type in the Amazon SageMaker Developer Guide.

    • Manage your human loops. You can list all human loops that you have created, describe individual human loops, and stop and delete human loops. To learn more, see Monitor and Manage Your Human Loop in the Amazon SageMaker Developer Guide.

    Amazon A2I integrates APIs from various AWS services to create and start human review workflows for those services. To learn how Amazon A2I uses these APIs, see Use APIs in Amazon A2I in the Amazon SageMaker Developer Guide.

    " +} diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml new file mode 100644 index 000000000000..3675f0940208 --- /dev/null +++ b/services/sagemakeredge/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + sagemakeredge + AWS Java SDK :: Services :: Sagemaker Edge + The AWS Java SDK for Sagemaker Edge module holds the client classes that are used for + communicating with Sagemaker Edge. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.sagemakeredge + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/sagemakeredge/src/main/resources/codegen-resources/paginators-1.json b/services/sagemakeredge/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/sagemakeredge/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/sagemakeredge/src/main/resources/codegen-resources/service-2.json b/services/sagemakeredge/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..502ead13478d --- /dev/null +++ b/services/sagemakeredge/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,217 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-09-23", + "endpointPrefix":"edge.sagemaker", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Sagemaker Edge Manager", + "serviceId":"Sagemaker Edge", + "signatureVersion":"v4", + "signingName":"sagemaker", + "uid":"sagemaker-edge-2020-09-23" + }, + "operations":{ + "GetDeviceRegistration":{ + "name":"GetDeviceRegistration", + "http":{ + "method":"POST", + "requestUri":"/GetDeviceRegistration" + }, + "input":{"shape":"GetDeviceRegistrationRequest"}, + "output":{"shape":"GetDeviceRegistrationResult"}, + "errors":[ + {"shape":"InternalServiceException"} + ], + "documentation":"

    Use to check if a device is registered with SageMaker Edge Manager.

    " + }, + "SendHeartbeat":{ + "name":"SendHeartbeat", + "http":{ + "method":"POST", + "requestUri":"/SendHeartbeat" + }, + "input":{"shape":"SendHeartbeatRequest"}, + "errors":[ + {"shape":"InternalServiceException"} + ], + "documentation":"

    Use to get the current status of devices registered on SageMaker Edge Manager.

    " + } + }, + "shapes":{ + "CacheTTLSeconds":{ + "type":"string", + "max":1000, + "min":1 + }, + "DeviceFleetName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-zA-Z0-9](-*_*[a-zA-Z0-9])*$" + }, + "DeviceName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-zA-Z0-9](-*_*[a-zA-Z0-9])*$" + }, + "DeviceRegistration":{ + "type":"string", + "max":1000, + "min":1 + }, + "Dimension":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9\\/])*$" + }, + "EdgeMetric":{ + "type":"structure", + "members":{ + "Dimension":{ + "shape":"Dimension", + "documentation":"

    The dimension of metrics published.

    " + }, + "MetricName":{ + "shape":"Metric", + "documentation":"

    Returns the name of the metric.

    " + }, + "Value":{ + "shape":"Value", + "documentation":"

    Returns the value of the metric.

    " + }, + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

    Timestamp of when the metric was requested.

    " + } + }, + "documentation":"

    Information required for edge device metrics.

    " + }, + "EdgeMetrics":{ + "type":"list", + "member":{"shape":"EdgeMetric"} + }, + "ErrorMessage":{"type":"string"}, + "GetDeviceRegistrationRequest":{ + "type":"structure", + "required":[ + "DeviceName", + "DeviceFleetName" + ], + "members":{ + "DeviceName":{ + "shape":"DeviceName", + "documentation":"

    The unique name of the device you want to get the registration status from.

    " + }, + "DeviceFleetName":{ + "shape":"DeviceFleetName", + "documentation":"

    The name of the fleet that the device belongs to.

    " + } + } + }, + "GetDeviceRegistrationResult":{ + "type":"structure", + "members":{ + "DeviceRegistration":{ + "shape":"DeviceRegistration", + "documentation":"

    Describes if the device is currently registered with SageMaker Edge Manager.

    " + }, + "CacheTTL":{ + "shape":"CacheTTLSeconds", + "documentation":"

    The amount of time, in seconds, that the registration status is stored on the device’s cache before it is refreshed.

    " + } + } + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An internal failure occurred. Try your request again. If the problem persists, contact AWS customer support.

    ", + "exception":true + }, + "Metric":{ + "type":"string", + "max":100, + "min":4, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + }, + "Model":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

    The name of the model.

    " + }, + "ModelVersion":{ + "shape":"Version", + "documentation":"

    The version of the model.

    " + }, + "LatestSampleTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of the last data sample taken.

    " + }, + "LatestInference":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of the last inference that was made.

    " + }, + "ModelMetrics":{ + "shape":"EdgeMetrics", + "documentation":"

    Information required for model metrics.

    " + } + }, + "documentation":"

    Information about a model deployed on an edge device that is registered with SageMaker Edge Manager.

    " + }, + "ModelName":{ + "type":"string", + "max":255, + "min":4, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + }, + "Models":{ + "type":"list", + "member":{"shape":"Model"} + }, + "SendHeartbeatRequest":{ + "type":"structure", + "required":[ + "AgentVersion", + "DeviceName", + "DeviceFleetName" + ], + "members":{ + "AgentMetrics":{ + "shape":"EdgeMetrics", + "documentation":"

    For internal use. Returns a list of SageMaker Edge Manager agent operating metrics.

    " + }, + "Models":{ + "shape":"Models", + "documentation":"

    Returns a list of models deployed on the the device.

    " + }, + "AgentVersion":{ + "shape":"Version", + "documentation":"

    Returns the version of the agent.

    " + }, + "DeviceName":{ + "shape":"DeviceName", + "documentation":"

    The unique name of the device.

    " + }, + "DeviceFleetName":{ + "shape":"DeviceFleetName", + "documentation":"

    The name of the fleet that the device belongs to.

    " + } + } + }, + "Timestamp":{"type":"timestamp"}, + "Value":{"type":"double"}, + "Version":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9\\ \\_\\.]+" + } + }, + "documentation":"

    SageMaker Edge Manager dataplane service for communicating with active agents.

    " +} diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml new file mode 100644 index 000000000000..df51a40b3fde --- /dev/null +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + sagemakerfeaturestoreruntime + AWS Java SDK :: Services :: Sage Maker Feature Store Runtime + The AWS Java SDK for Sage Maker Feature Store Runtime module holds the client classes that are used for + communicating with Sage Maker Feature Store Runtime. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.sagemakerfeaturestoreruntime + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/paginators-1.json b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/service-2.json b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f7a71ea7f971 --- /dev/null +++ b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,249 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-01", + "endpointPrefix":"featurestore-runtime.sagemaker", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon SageMaker Feature Store Runtime", + "serviceId":"SageMaker FeatureStore Runtime", + "signatureVersion":"v4", + "signingName":"sagemaker", + "uid":"sagemaker-featurestore-runtime-2020-07-01" + }, + "operations":{ + "DeleteRecord":{ + "name":"DeleteRecord", + "http":{ + "method":"DELETE", + "requestUri":"/FeatureGroup/{FeatureGroupName}" + }, + "input":{"shape":"DeleteRecordRequest"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"InternalFailure"}, + {"shape":"ServiceUnavailable"}, + {"shape":"AccessForbidden"} + ], + "documentation":"

    Deletes a Record from a FeatureGroup. A new record will show up in the OfflineStore when the DeleteRecord API is called. This record will have a value of True in the is_deleted column.

    " + }, + "GetRecord":{ + "name":"GetRecord", + "http":{ + "method":"GET", + "requestUri":"/FeatureGroup/{FeatureGroupName}" + }, + "input":{"shape":"GetRecordRequest"}, + "output":{"shape":"GetRecordResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"ResourceNotFound"}, + {"shape":"InternalFailure"}, + {"shape":"ServiceUnavailable"}, + {"shape":"AccessForbidden"} + ], + "documentation":"

    Use for OnlineStore serving from a FeatureStore. Only the latest records stored in the OnlineStore can be retrieved. If no Record with RecordIdentifierValue is found, then an empty result is returned.

    " + }, + "PutRecord":{ + "name":"PutRecord", + "http":{ + "method":"PUT", + "requestUri":"/FeatureGroup/{FeatureGroupName}" + }, + "input":{"shape":"PutRecordRequest"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"InternalFailure"}, + {"shape":"ServiceUnavailable"}, + {"shape":"AccessForbidden"} + ], + "documentation":"

    Used for data ingestion into the FeatureStore. The PutRecord API writes to both the OnlineStore and OfflineStore. If the record is the latest record for the recordIdentifier, the record is written to both the OnlineStore and OfflineStore. If the record is a historic record, it is written only to the OfflineStore.

    " + } + }, + "shapes":{ + "AccessForbidden":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    You do not have permission to perform an action.

    ", + "error":{"httpStatusCode":403}, + "exception":true, + "synthetic":true + }, + "DeleteRecordRequest":{ + "type":"structure", + "required":[ + "FeatureGroupName", + "RecordIdentifierValueAsString", + "EventTime" + ], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

    The name of the feature group to delete the record from.

    ", + "location":"uri", + "locationName":"FeatureGroupName" + }, + "RecordIdentifierValueAsString":{ + "shape":"ValueAsString", + "documentation":"

    The value for the RecordIdentifier that uniquely identifies the record, in string format.

    ", + "location":"querystring", + "locationName":"RecordIdentifierValueAsString" + }, + "EventTime":{ + "shape":"ValueAsString", + "documentation":"

    Timestamp indicating when the deletion event occurred. EventTime can be used to query data at a certain point in time.

    ", + "location":"querystring", + "locationName":"EventTime" + } + } + }, + "FeatureGroupName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, + "FeatureName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9]([-_]*[a-zA-Z0-9])*" + }, + "FeatureNames":{ + "type":"list", + "member":{"shape":"FeatureName"}, + "min":1 + }, + "FeatureValue":{ + "type":"structure", + "required":[ + "FeatureName", + "ValueAsString" + ], + "members":{ + "FeatureName":{ + "shape":"FeatureName", + "documentation":"

    The name of a feature that a feature value corresponds to.

    " + }, + "ValueAsString":{ + "shape":"ValueAsString", + "documentation":"

    The value associated with a feature, in string format. Note that features types can be String, Integral, or Fractional. This value represents all three types as a string.

    " + } + }, + "documentation":"

    The value associated with a feature.

    " + }, + "GetRecordRequest":{ + "type":"structure", + "required":[ + "FeatureGroupName", + "RecordIdentifierValueAsString" + ], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

    The name of the feature group in which you want to put the records.

    ", + "location":"uri", + "locationName":"FeatureGroupName" + }, + "RecordIdentifierValueAsString":{ + "shape":"ValueAsString", + "documentation":"

    The value that corresponds to RecordIdentifier type and uniquely identifies the record in the FeatureGroup.

    ", + "location":"querystring", + "locationName":"RecordIdentifierValueAsString" + }, + "FeatureNames":{ + "shape":"FeatureNames", + "documentation":"

    List of names of Features to be retrieved. If not specified, the latest value for all the Features are returned.

    ", + "location":"querystring", + "locationName":"FeatureName" + } + } + }, + "GetRecordResponse":{ + "type":"structure", + "members":{ + "Record":{ + "shape":"Record", + "documentation":"

    The record you requested. A list of FeatureValues.

    " + } + } + }, + "InternalFailure":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    An internal failure occurred. Try your request again. If the problem persists, contact AWS customer support.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "synthetic":true + }, + "Message":{ + "type":"string", + "max":2048 + }, + "PutRecordRequest":{ + "type":"structure", + "required":[ + "FeatureGroupName", + "Record" + ], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

    The name of the feature group that you want to insert the record into.

    ", + "location":"uri", + "locationName":"FeatureGroupName" + }, + "Record":{ + "shape":"Record", + "documentation":"

    List of FeatureValues to be inserted. This will be a full over-write. If you only want to update few of the feature values, do the following:

    • Use GetRecord to retrieve the latest record.

    • Update the record returned from GetRecord.

    • Use PutRecord to update feature values.

    " + } + } + }, + "Record":{ + "type":"list", + "member":{"shape":"FeatureValue"}, + "min":1 + }, + "ResourceNotFound":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    A resource that is required to perform an action was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceUnavailable":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The service is currently unavailable.

    ", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true, + "synthetic":true + }, + "ValidationError":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    There was an error validating your request.

    ", + "error":{"httpStatusCode":400}, + "exception":true, + "synthetic":true + }, + "ValueAsString":{ + "type":"string", + "max":358400, + "pattern":".*" + } + }, + "documentation":"

    Contains all data plane API operations and data types for the Amazon SageMaker Feature Store. Use this API to put, delete, and retrieve (get) features from a feature store.

    Use the following operations to configure your OnlineStore and OfflineStore features, and to create and manage feature groups:

    " +} diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index 14a253f7fd97..54624bd7d591 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + savingsplans + AWS Java SDK :: Services :: Savingsplans + The AWS Java SDK for Savingsplans module holds the client classes that are used for + communicating with Savingsplans. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.savingsplans + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/savingsplans/src/main/resources/codegen-resources/paginators-1.json b/services/savingsplans/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/savingsplans/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/savingsplans/src/main/resources/codegen-resources/service-2.json b/services/savingsplans/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..d08f4423923d --- /dev/null +++ b/services/savingsplans/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1173 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-06-28", + "endpointPrefix":"savingsplans", + "globalEndpoint":"savingsplans.amazonaws.com", + "jsonVersion":"1.0", + "protocol":"rest-json", + "serviceAbbreviation":"AWSSavingsPlans", + "serviceFullName":"AWS Savings Plans", + "serviceId":"savingsplans", + "signatureVersion":"v4", + "uid":"savingsplans-2019-06-28" + }, + "operations":{ + "CreateSavingsPlan":{ + "name":"CreateSavingsPlan", + "http":{ + "method":"POST", + "requestUri":"/CreateSavingsPlan" + }, + "input":{"shape":"CreateSavingsPlanRequest"}, + "output":{"shape":"CreateSavingsPlanResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Creates a Savings Plan.

    " + }, + "DeleteQueuedSavingsPlan":{ + "name":"DeleteQueuedSavingsPlan", + "http":{ + "method":"POST", + "requestUri":"/DeleteQueuedSavingsPlan" + }, + "input":{"shape":"DeleteQueuedSavingsPlanRequest"}, + "output":{"shape":"DeleteQueuedSavingsPlanResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Deletes the queued purchase for the specified Savings Plan.

    " + }, + "DescribeSavingsPlanRates":{ + "name":"DescribeSavingsPlanRates", + "http":{ + "method":"POST", + "requestUri":"/DescribeSavingsPlanRates" + }, + "input":{"shape":"DescribeSavingsPlanRatesRequest"}, + "output":{"shape":"DescribeSavingsPlanRatesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Describes the specified Savings Plans rates.

    " + }, + "DescribeSavingsPlans":{ + "name":"DescribeSavingsPlans", + "http":{ + "method":"POST", + "requestUri":"/DescribeSavingsPlans" + }, + "input":{"shape":"DescribeSavingsPlansRequest"}, + "output":{"shape":"DescribeSavingsPlansResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Describes the specified Savings Plans.

    " + }, + "DescribeSavingsPlansOfferingRates":{ + "name":"DescribeSavingsPlansOfferingRates", + "http":{ + "method":"POST", + "requestUri":"/DescribeSavingsPlansOfferingRates" + }, + "input":{"shape":"DescribeSavingsPlansOfferingRatesRequest"}, + "output":{"shape":"DescribeSavingsPlansOfferingRatesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes the specified Savings Plans offering rates.

    " + }, + "DescribeSavingsPlansOfferings":{ + "name":"DescribeSavingsPlansOfferings", + "http":{ + "method":"POST", + "requestUri":"/DescribeSavingsPlansOfferings" + }, + "input":{"shape":"DescribeSavingsPlansOfferingsRequest"}, + "output":{"shape":"DescribeSavingsPlansOfferingsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Describes the specified Savings Plans offerings.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/ListTagsForResource" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the tags for the specified resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/TagResource" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Adds the specified tags to the specified resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/UntagResource" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes the specified tags from the specified resource.

    " + } + }, + "shapes":{ + "Amount":{"type":"string"}, + "ClientToken":{"type":"string"}, + "CreateSavingsPlanRequest":{ + "type":"structure", + "required":[ + "savingsPlanOfferingId", + "commitment" + ], + "members":{ + "savingsPlanOfferingId":{ + "shape":"SavingsPlanOfferingId", + "documentation":"

    The ID of the offering.

    " + }, + "commitment":{ + "shape":"Amount", + "documentation":"

    The hourly commitment, in USD. This is a value between 0.001 and 1 million. You cannot specify more than three digits after the decimal point.

    " + }, + "upfrontPaymentAmount":{ + "shape":"Amount", + "documentation":"

    The up-front payment amount. This is a whole number between 50 and 99 percent of the total value of the Savings Plan. This parameter is supported only if the payment option is Partial Upfront.

    " + }, + "purchaseTime":{ + "shape":"DateTime", + "documentation":"

    The time at which to purchase the Savings Plan, in UTC format (YYYY-MM-DDTHH:MM:SSZ).

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    One or more tags.

    " + } + } + }, + "CreateSavingsPlanResponse":{ + "type":"structure", + "members":{ + "savingsPlanId":{ + "shape":"SavingsPlanId", + "documentation":"

    The ID of the Savings Plan.

    " + } + } + }, + "CurrencyCode":{ + "type":"string", + "enum":[ + "CNY", + "USD" + ] + }, + "CurrencyList":{ + "type":"list", + "member":{"shape":"CurrencyCode"} + }, + "DateTime":{"type":"timestamp"}, + "DeleteQueuedSavingsPlanRequest":{ + "type":"structure", + "required":["savingsPlanId"], + "members":{ + "savingsPlanId":{ + "shape":"SavingsPlanId", + "documentation":"

    The ID of the Savings Plan.

    " + } + } + }, + "DeleteQueuedSavingsPlanResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeSavingsPlanRatesRequest":{ + "type":"structure", + "required":["savingsPlanId"], + "members":{ + "savingsPlanId":{ + "shape":"SavingsPlanId", + "documentation":"

    The ID of the Savings Plan.

    " + }, + "filters":{ + "shape":"SavingsPlanRateFilterList", + "documentation":"

    The filters.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next page of results.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.

    " + } + } + }, + "DescribeSavingsPlanRatesResponse":{ + "type":"structure", + "members":{ + "savingsPlanId":{ + "shape":"SavingsPlanId", + "documentation":"

    The ID of the Savings Plan.

    " + }, + "searchResults":{ + "shape":"SavingsPlanRateList", + "documentation":"

    Information about the Savings Plans rates.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "DescribeSavingsPlansOfferingRatesRequest":{ + "type":"structure", + "members":{ + "savingsPlanOfferingIds":{ + "shape":"UUIDs", + "documentation":"

    The IDs of the offerings.

    " + }, + "savingsPlanPaymentOptions":{ + "shape":"SavingsPlanPaymentOptionList", + "documentation":"

    The payment options.

    " + }, + "savingsPlanTypes":{ + "shape":"SavingsPlanTypeList", + "documentation":"

    The plan types.

    " + }, + "products":{ + "shape":"SavingsPlanProductTypeList", + "documentation":"

    The AWS products.

    " + }, + "serviceCodes":{ + "shape":"SavingsPlanRateServiceCodeList", + "documentation":"

    The services.

    " + }, + "usageTypes":{ + "shape":"SavingsPlanRateUsageTypeList", + "documentation":"

    The usage details of the line item in the billing report.

    " + }, + "operations":{ + "shape":"SavingsPlanRateOperationList", + "documentation":"

    The specific AWS operation for the line item in the billing report.

    " + }, + "filters":{ + "shape":"SavingsPlanOfferingRateFiltersList", + "documentation":"

    The filters.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next page of results.

    " + }, + "maxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.

    " + } + } + }, + "DescribeSavingsPlansOfferingRatesResponse":{ + "type":"structure", + "members":{ + "searchResults":{ + "shape":"SavingsPlanOfferingRatesList", + "documentation":"

    Information about the Savings Plans offering rates.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "DescribeSavingsPlansOfferingsRequest":{ + "type":"structure", + "members":{ + "offeringIds":{ + "shape":"UUIDs", + "documentation":"

    The IDs of the offerings.

    " + }, + "paymentOptions":{ + "shape":"SavingsPlanPaymentOptionList", + "documentation":"

    The payment options.

    " + }, + "productType":{ + "shape":"SavingsPlanProductType", + "documentation":"

    The product type.

    " + }, + "planTypes":{ + "shape":"SavingsPlanTypeList", + "documentation":"

    The plan type.

    " + }, + "durations":{ + "shape":"DurationsList", + "documentation":"

    The durations, in seconds.

    " + }, + "currencies":{ + "shape":"CurrencyList", + "documentation":"

    The currencies.

    " + }, + "descriptions":{ + "shape":"SavingsPlanDescriptionsList", + "documentation":"

    The descriptions.

    " + }, + "serviceCodes":{ + "shape":"SavingsPlanServiceCodeList", + "documentation":"

    The services.

    " + }, + "usageTypes":{ + "shape":"SavingsPlanUsageTypeList", + "documentation":"

    The usage details of the line item in the billing report.

    " + }, + "operations":{ + "shape":"SavingsPlanOperationList", + "documentation":"

    The specific AWS operation for the line item in the billing report.

    " + }, + "filters":{ + "shape":"SavingsPlanOfferingFiltersList", + "documentation":"

    The filters.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next page of results.

    " + }, + "maxResults":{ + "shape":"PageSize", + "documentation":"

    The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.

    " + } + } + }, + "DescribeSavingsPlansOfferingsResponse":{ + "type":"structure", + "members":{ + "searchResults":{ + "shape":"SavingsPlanOfferingsList", + "documentation":"

    Information about the Savings Plans offerings.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "DescribeSavingsPlansRequest":{ + "type":"structure", + "members":{ + "savingsPlanArns":{ + "shape":"SavingsPlanArnList", + "documentation":"

    The Amazon Resource Names (ARN) of the Savings Plans.

    " + }, + "savingsPlanIds":{ + "shape":"SavingsPlanIdList", + "documentation":"

    The IDs of the Savings Plans.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next page of results.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.

    " + }, + "states":{ + "shape":"SavingsPlanStateList", + "documentation":"

    The states.

    " + }, + "filters":{ + "shape":"SavingsPlanFilterList", + "documentation":"

    The filters.

    " + } + } + }, + "DescribeSavingsPlansResponse":{ + "type":"structure", + "members":{ + "savingsPlans":{ + "shape":"SavingsPlanList", + "documentation":"

    Information about the Savings Plans.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + } + }, + "DurationsList":{ + "type":"list", + "member":{"shape":"SavingsPlansDuration"} + }, + "EC2InstanceFamily":{"type":"string"}, + "FilterValuesList":{ + "type":"list", + "member":{"shape":"JsonSafeFilterValueString"} + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    An unexpected error occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "JsonSafeFilterValueString":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_ \\/.\\:\\-\\(\\)]+$" + }, + "ListOfStrings":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"SavingsPlanArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    Information about the tags.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "PageSize":{ + "type":"integer", + "max":1000, + "min":0 + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "pattern":"^[A-Za-z0-9/=\\+]+$" + }, + "ParentSavingsPlanOffering":{ + "type":"structure", + "members":{ + "offeringId":{ + "shape":"UUID", + "documentation":"

    The ID of the offering.

    " + }, + "paymentOption":{ + "shape":"SavingsPlanPaymentOption", + "documentation":"

    The payment option.

    " + }, + "planType":{ + "shape":"SavingsPlanType", + "documentation":"

    The plan type.

    " + }, + "durationSeconds":{ + "shape":"SavingsPlansDuration", + "documentation":"

    The duration, in seconds.

    " + }, + "currency":{ + "shape":"CurrencyCode", + "documentation":"

    The currency.

    " + }, + "planDescription":{ + "shape":"SavingsPlanDescription", + "documentation":"

    The description.

    " + } + }, + "documentation":"

    Information about a Savings Plan offering.

    " + }, + "Region":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The specified resource was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SavingsPlan":{ + "type":"structure", + "members":{ + "offeringId":{ + "shape":"SavingsPlanOfferingId", + "documentation":"

    The ID of the offering.

    " + }, + "savingsPlanId":{ + "shape":"SavingsPlanId", + "documentation":"

    The ID of the Savings Plan.

    " + }, + "savingsPlanArn":{ + "shape":"SavingsPlanArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Savings Plan.

    " + }, + "description":{ + "shape":"String", + "documentation":"

    The description.

    " + }, + "start":{ + "shape":"String", + "documentation":"

    The start time.

    " + }, + "end":{ + "shape":"String", + "documentation":"

    The end time.

    " + }, + "state":{ + "shape":"SavingsPlanState", + "documentation":"

    The state.

    " + }, + "region":{ + "shape":"Region", + "documentation":"

    The AWS Region.

    " + }, + "ec2InstanceFamily":{ + "shape":"EC2InstanceFamily", + "documentation":"

    The EC2 instance family.

    " + }, + "savingsPlanType":{ + "shape":"SavingsPlanType", + "documentation":"

    The plan type.

    " + }, + "paymentOption":{ + "shape":"SavingsPlanPaymentOption", + "documentation":"

    The payment option.

    " + }, + "productTypes":{ + "shape":"SavingsPlanProductTypeList", + "documentation":"

    The product types.

    " + }, + "currency":{ + "shape":"CurrencyCode", + "documentation":"

    The currency.

    " + }, + "commitment":{ + "shape":"Amount", + "documentation":"

    The hourly commitment, in USD.

    " + }, + "upfrontPaymentAmount":{ + "shape":"Amount", + "documentation":"

    The up-front payment amount.

    " + }, + "recurringPaymentAmount":{ + "shape":"Amount", + "documentation":"

    The recurring payment amount.

    " + }, + "termDurationInSeconds":{ + "shape":"TermDurationInSeconds", + "documentation":"

    The duration of the term, in seconds.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    One or more tags.

    " + } + }, + "documentation":"

    Information about a Savings Plan.

    " + }, + "SavingsPlanArn":{ + "type":"string", + "pattern":"arn:aws:[a-z]+:([a-z]{2}-[a-z]+-\\d{1}|):(\\d{12}):savingsplan\\/([0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12})$" + }, + "SavingsPlanArnList":{ + "type":"list", + "member":{"shape":"SavingsPlanArn"}, + "max":100 + }, + "SavingsPlanDescription":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\- ]+$" + }, + "SavingsPlanDescriptionsList":{ + "type":"list", + "member":{"shape":"SavingsPlanDescription"} + }, + "SavingsPlanFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"SavingsPlansFilterName", + "documentation":"

    The filter name.

    " + }, + "values":{ + "shape":"ListOfStrings", + "documentation":"

    The filter value.

    " + } + }, + "documentation":"

    Information about a filter.

    " + }, + "SavingsPlanFilterList":{ + "type":"list", + "member":{"shape":"SavingsPlanFilter"} + }, + "SavingsPlanId":{"type":"string"}, + "SavingsPlanIdList":{ + "type":"list", + "member":{"shape":"SavingsPlanId"} + }, + "SavingsPlanList":{ + "type":"list", + "member":{"shape":"SavingsPlan"} + }, + "SavingsPlanOffering":{ + "type":"structure", + "members":{ + "offeringId":{ + "shape":"UUID", + "documentation":"

    The ID of the offering.

    " + }, + "productTypes":{ + "shape":"SavingsPlanProductTypeList", + "documentation":"

    The product type.

    " + }, + "planType":{ + "shape":"SavingsPlanType", + "documentation":"

    The plan type.

    " + }, + "description":{ + "shape":"SavingsPlanDescription", + "documentation":"

    The description.

    " + }, + "paymentOption":{ + "shape":"SavingsPlanPaymentOption", + "documentation":"

    The payment option.

    " + }, + "durationSeconds":{ + "shape":"SavingsPlansDuration", + "documentation":"

    The duration, in seconds.

    " + }, + "currency":{ + "shape":"CurrencyCode", + "documentation":"

    The currency.

    " + }, + "serviceCode":{ + "shape":"SavingsPlanServiceCode", + "documentation":"

    The service.

    " + }, + "usageType":{ + "shape":"SavingsPlanUsageType", + "documentation":"

    The usage details of the line item in the billing report.

    " + }, + "operation":{ + "shape":"SavingsPlanOperation", + "documentation":"

    The specific AWS operation for the line item in the billing report.

    " + }, + "properties":{ + "shape":"SavingsPlanOfferingPropertyList", + "documentation":"

    The properties.

    " + } + }, + "documentation":"

    Information about a Savings Plan offering.

    " + }, + "SavingsPlanOfferingFilterAttribute":{ + "type":"string", + "enum":[ + "region", + "instanceFamily" + ] + }, + "SavingsPlanOfferingFilterElement":{ + "type":"structure", + "members":{ + "name":{ + "shape":"SavingsPlanOfferingFilterAttribute", + "documentation":"

    The filter name.

    " + }, + "values":{ + "shape":"FilterValuesList", + "documentation":"

    The filter values.

    " + } + }, + "documentation":"

    Information about a filter.

    " + }, + "SavingsPlanOfferingFiltersList":{ + "type":"list", + "member":{"shape":"SavingsPlanOfferingFilterElement"} + }, + "SavingsPlanOfferingId":{"type":"string"}, + "SavingsPlanOfferingProperty":{ + "type":"structure", + "members":{ + "name":{ + "shape":"SavingsPlanOfferingPropertyKey", + "documentation":"

    The property name.

    " + }, + "value":{ + "shape":"JsonSafeFilterValueString", + "documentation":"

    The property value.

    " + } + }, + "documentation":"

    Information about a property.

    " + }, + "SavingsPlanOfferingPropertyKey":{ + "type":"string", + "enum":[ + "region", + "instanceFamily" + ] + }, + "SavingsPlanOfferingPropertyList":{ + "type":"list", + "member":{"shape":"SavingsPlanOfferingProperty"} + }, + "SavingsPlanOfferingRate":{ + "type":"structure", + "members":{ + "savingsPlanOffering":{ + "shape":"ParentSavingsPlanOffering", + "documentation":"

    The Savings Plan offering.

    " + }, + "rate":{ + "shape":"SavingsPlanRatePricePerUnit", + "documentation":"

    The Savings Plan rate.

    " + }, + "unit":{ + "shape":"SavingsPlanRateUnit", + "documentation":"

    The unit.

    " + }, + "productType":{ + "shape":"SavingsPlanProductType", + "documentation":"

    The product type.

    " + }, + "serviceCode":{ + "shape":"SavingsPlanRateServiceCode", + "documentation":"

    The service.

    " + }, + "usageType":{ + "shape":"SavingsPlanRateUsageType", + "documentation":"

    The usage details of the line item in the billing report.

    " + }, + "operation":{ + "shape":"SavingsPlanRateOperation", + "documentation":"

    The specific AWS operation for the line item in the billing report.

    " + }, + "properties":{ + "shape":"SavingsPlanOfferingRatePropertyList", + "documentation":"

    The properties.

    " + } + }, + "documentation":"

    Information about a Savings Plan offering rate.

    " + }, + "SavingsPlanOfferingRateFilterElement":{ + "type":"structure", + "members":{ + "name":{ + "shape":"SavingsPlanRateFilterAttribute", + "documentation":"

    The filter name.

    " + }, + "values":{ + "shape":"FilterValuesList", + "documentation":"

    The filter values.

    " + } + }, + "documentation":"

    Information about a filter.

    " + }, + "SavingsPlanOfferingRateFiltersList":{ + "type":"list", + "member":{"shape":"SavingsPlanOfferingRateFilterElement"} + }, + "SavingsPlanOfferingRateProperty":{ + "type":"structure", + "members":{ + "name":{ + "shape":"JsonSafeFilterValueString", + "documentation":"

    The property name.

    " + }, + "value":{ + "shape":"JsonSafeFilterValueString", + "documentation":"

    The property value.

    " + } + }, + "documentation":"

    Information about a property.

    " + }, + "SavingsPlanOfferingRatePropertyList":{ + "type":"list", + "member":{"shape":"SavingsPlanOfferingRateProperty"} + }, + "SavingsPlanOfferingRatesList":{ + "type":"list", + "member":{"shape":"SavingsPlanOfferingRate"} + }, + "SavingsPlanOfferingsList":{ + "type":"list", + "member":{"shape":"SavingsPlanOffering"} + }, + "SavingsPlanOperation":{ + "type":"string", + "max":255, + "pattern":"^[a-zA-Z0-9_ \\/.:-]*$" + }, + "SavingsPlanOperationList":{ + "type":"list", + "member":{"shape":"SavingsPlanOperation"} + }, + "SavingsPlanPaymentOption":{ + "type":"string", + "enum":[ + "All Upfront", + "Partial Upfront", + "No Upfront" + ] + }, + "SavingsPlanPaymentOptionList":{ + "type":"list", + "member":{"shape":"SavingsPlanPaymentOption"} + }, + "SavingsPlanProductType":{ + "type":"string", + "enum":[ + "EC2", + "Fargate", + "Lambda" + ] + }, + "SavingsPlanProductTypeList":{ + "type":"list", + "member":{"shape":"SavingsPlanProductType"} + }, + "SavingsPlanRate":{ + "type":"structure", + "members":{ + "rate":{ + "shape":"Amount", + "documentation":"

    The rate.

    " + }, + "currency":{ + "shape":"CurrencyCode", + "documentation":"

    The currency.

    " + }, + "unit":{ + "shape":"SavingsPlanRateUnit", + "documentation":"

    The unit.

    " + }, + "productType":{ + "shape":"SavingsPlanProductType", + "documentation":"

    The product type.

    " + }, + "serviceCode":{ + "shape":"SavingsPlanRateServiceCode", + "documentation":"

    The service.

    " + }, + "usageType":{ + "shape":"SavingsPlanRateUsageType", + "documentation":"

    The usage details of the line item in the billing report.

    " + }, + "operation":{ + "shape":"SavingsPlanRateOperation", + "documentation":"

    The specific AWS operation for the line item in the billing report.

    " + }, + "properties":{ + "shape":"SavingsPlanRatePropertyList", + "documentation":"

    The properties.

    " + } + }, + "documentation":"

    Information about a Savings Plan rate.

    " + }, + "SavingsPlanRateFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"SavingsPlanRateFilterName", + "documentation":"

    The filter name.

    " + }, + "values":{ + "shape":"ListOfStrings", + "documentation":"

    The filter values.

    " + } + }, + "documentation":"

    Information about a filter.

    " + }, + "SavingsPlanRateFilterAttribute":{ + "type":"string", + "enum":[ + "region", + "instanceFamily", + "instanceType", + "productDescription", + "tenancy", + "productId" + ] + }, + "SavingsPlanRateFilterList":{ + "type":"list", + "member":{"shape":"SavingsPlanRateFilter"} + }, + "SavingsPlanRateFilterName":{ + "type":"string", + "enum":[ + "region", + "instanceType", + "productDescription", + "tenancy", + "productType", + "serviceCode", + "usageType", + "operation" + ] + }, + "SavingsPlanRateList":{ + "type":"list", + "member":{"shape":"SavingsPlanRate"} + }, + "SavingsPlanRateOperation":{ + "type":"string", + "max":255, + "pattern":"^[a-zA-Z0-9_ \\/.:-]*$" + }, + "SavingsPlanRateOperationList":{ + "type":"list", + "member":{"shape":"SavingsPlanRateOperation"} + }, + "SavingsPlanRatePricePerUnit":{"type":"string"}, + "SavingsPlanRateProperty":{ + "type":"structure", + "members":{ + "name":{ + "shape":"SavingsPlanRatePropertyKey", + "documentation":"

    The property name.

    " + }, + "value":{ + "shape":"JsonSafeFilterValueString", + "documentation":"

    The property value.

    " + } + }, + "documentation":"

    Information about a property.

    " + }, + "SavingsPlanRatePropertyKey":{ + "type":"string", + "enum":[ + "region", + "instanceType", + "instanceFamily", + "productDescription", + "tenancy" + ] + }, + "SavingsPlanRatePropertyList":{ + "type":"list", + "member":{"shape":"SavingsPlanRateProperty"} + }, + "SavingsPlanRateServiceCode":{ + "type":"string", + "enum":[ + "AmazonEC2", + "AmazonECS", + "AWSLambda" + ] + }, + "SavingsPlanRateServiceCodeList":{ + "type":"list", + "member":{"shape":"SavingsPlanRateServiceCode"} + }, + "SavingsPlanRateUnit":{ + "type":"string", + "enum":[ + "Hrs", + "Lambda-GB-Second", + "Request" + ] + }, + "SavingsPlanRateUsageType":{ + "type":"string", + "max":255, + "pattern":"^[a-zA-Z0-9_ \\/.:-]+$" + }, + "SavingsPlanRateUsageTypeList":{ + "type":"list", + "member":{"shape":"SavingsPlanRateUsageType"} + }, + "SavingsPlanServiceCode":{ + "type":"string", + "max":255, + "pattern":"^[a-zA-Z]+$" + }, + "SavingsPlanServiceCodeList":{ + "type":"list", + "member":{"shape":"SavingsPlanServiceCode"} + }, + "SavingsPlanState":{ + "type":"string", + "enum":[ + "payment-pending", + "payment-failed", + "active", + "retired", + "queued", + "queued-deleted" + ] + }, + "SavingsPlanStateList":{ + "type":"list", + "member":{"shape":"SavingsPlanState"} + }, + "SavingsPlanType":{ + "type":"string", + "enum":[ + "Compute", + "EC2Instance" + ] + }, + "SavingsPlanTypeList":{ + "type":"list", + "member":{"shape":"SavingsPlanType"} + }, + "SavingsPlanUsageType":{ + "type":"string", + "max":255, + "pattern":"^[a-zA-Z0-9_ \\/.:-]+$" + }, + "SavingsPlanUsageTypeList":{ + "type":"list", + "member":{"shape":"SavingsPlanUsageType"} + }, + "SavingsPlansDuration":{ + "type":"long", + "min":0 + }, + "SavingsPlansFilterName":{ + "type":"string", + "enum":[ + "region", + "ec2-instance-family", + "commitment", + "upfront", + "term", + "savings-plan-type", + "payment-option", + "start", + "end" + ] + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    A service quota has been exceeded.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "String":{"type":"string"}, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"SavingsPlanArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    One or more tags. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{"type":"string"}, + "TermDurationInSeconds":{"type":"long"}, + "UUID":{ + "type":"string", + "pattern":"^(([0-9a-f]+)(-?))+$" + }, + "UUIDs":{ + "type":"list", + "member":{"shape":"UUID"} + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"SavingsPlanArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tag keys.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    One of the input parameters is not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

    Savings Plans are a pricing model that offer significant savings on AWS usage (for example, on Amazon EC2 instances). You commit to a consistent amount of usage, in USD per hour, for a term of 1 or 3 years, and receive a lower price for that usage. For more information, see the AWS Savings Plans User Guide.

    " +} diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml new file mode 100644 index 000000000000..c48aa90937e8 --- /dev/null +++ b/services/schemas/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + schemas + AWS Java SDK :: Services :: Schemas + The AWS Java SDK for Schemas module holds the client classes that are used for + communicating with Schemas. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.schemas + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/schemas/src/main/resources/codegen-resources/paginators-1.json b/services/schemas/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..ef2fe19d1957 --- /dev/null +++ b/services/schemas/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,34 @@ +{ + "pagination": { + "ListDiscoverers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "Limit", + "result_key": "Discoverers" + }, + "ListRegistries": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "Limit", + "result_key": "Registries" + }, + "ListSchemaVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "Limit", + "result_key": "SchemaVersions" + }, + "ListSchemas": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "Limit", + "result_key": "Schemas" + }, + "SearchSchemas": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "Limit", + "result_key": "Schemas" + } + } +} diff --git a/services/schemas/src/main/resources/codegen-resources/service-2.json b/services/schemas/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..dfdcbff38c62 --- /dev/null +++ b/services/schemas/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3494 @@ +{ + "metadata": { + "apiVersion": "2019-12-02", + "endpointPrefix": "schemas", + "signingName": "schemas", + "serviceFullName": "Schemas", + "serviceId": "schemas", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "schemas-2019-12-02", + "signatureVersion": "v4" + }, + "operations": { + "CreateDiscoverer": { + "name": "CreateDiscoverer", + "http": { + "method": "POST", + "requestUri": "/v1/discoverers", + "responseCode": 201 + }, + "input": { + "shape": "CreateDiscovererRequest" + }, + "output": { + "shape": "CreateDiscovererResponse", + "documentation": "

    201 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    Creates a discoverer.

    " + }, + "CreateRegistry": { + "name": "CreateRegistry", + "http": { + "method": "POST", + "requestUri": "/v1/registries/name/{registryName}", + "responseCode": 201 + }, + "input": { + "shape": "CreateRegistryRequest" + }, + "output": { + "shape": "CreateRegistryResponse", + "documentation": "

    201 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "ConflictException", + "documentation": "

    409 response

    " + } + ], + "documentation": "

    Creates a registry.

    " + }, + "CreateSchema": { + "name": "CreateSchema", + "http": { + "method": "POST", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}", + "responseCode": 201 + }, + "input": { + "shape": "CreateSchemaRequest" + }, + "output": { + "shape": "CreateSchemaResponse", + "documentation": "

    201 response

    " + }, + "errors": [ + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    Creates a schema definition.

    Inactive schemas will be deleted after two years.

    " + }, + "DeleteDiscoverer": { + "name": "DeleteDiscoverer", + "http": { + "method": "DELETE", + "requestUri": "/v1/discoverers/id/{discovererId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteDiscovererRequest" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Deletes a discoverer.

    " + }, + "DeleteRegistry": { + "name": "DeleteRegistry", + "http": { + "method": "DELETE", + "requestUri": "/v1/registries/name/{registryName}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteRegistryRequest" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Deletes a Registry.

    " + }, + "DeleteResourcePolicy": { + "name": "DeleteResourcePolicy", + "http": { + "method": "DELETE", + "requestUri": "/v1/policy", + "responseCode": 204 + }, + "input": { + "shape": "DeleteResourcePolicyRequest" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Delete the resource-based policy attached to the specified registry.

    " + }, + "DeleteSchema": { + "name": "DeleteSchema", + "http": { + "method": "DELETE", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteSchemaRequest" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Delete a schema definition.

    " + }, + "DeleteSchemaVersion": { + "name": "DeleteSchemaVersion", + "http": { + "method": "DELETE", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}/version/{schemaVersion}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteSchemaVersionRequest" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Delete the schema version definition

    " + }, + "DescribeCodeBinding": { + "name": "DescribeCodeBinding", + "http": { + "method": "GET", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeCodeBindingRequest" + }, + "output": { + "shape": "DescribeCodeBindingResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + } + ], + "documentation": "

    Describe the code binding URI.

    " + }, + "DescribeDiscoverer": { + "name": "DescribeDiscoverer", + "http": { + "method": "GET", + "requestUri": "/v1/discoverers/id/{discovererId}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeDiscovererRequest" + }, + "output": { + "shape": "DescribeDiscovererResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Describes the discoverer.

    " + }, + "DescribeRegistry": { + "name": "DescribeRegistry", + "http": { + "method": "GET", + "requestUri": "/v1/registries/name/{registryName}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeRegistryRequest" + }, + "output": { + "shape": "DescribeRegistryResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Describes the registry.

    " + }, + "DescribeSchema": { + "name": "DescribeSchema", + "http": { + "method": "GET", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeSchemaRequest" + }, + "output": { + "shape": "DescribeSchemaResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Retrieve the schema definition.

    " + }, + "ExportSchema": { + "name": "ExportSchema", + "http": { + "method": "GET", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}/export", + "responseCode": 200 + }, + "input": { + "shape": "ExportSchemaRequest" + }, + "output": { + "shape": "ExportSchemaResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + } + ] + }, + "GetCodeBindingSource": { + "name": "GetCodeBindingSource", + "http": { + "method": "GET", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}/source", + "responseCode": 200 + }, + "input": { + "shape": "GetCodeBindingSourceRequest" + }, + "output": { + "shape": "GetCodeBindingSourceResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + } + ], + "documentation": "

    Get the code binding source URI.

    " + }, + "GetDiscoveredSchema": { + "name": "GetDiscoveredSchema", + "http": { + "method": "POST", + "requestUri": "/v1/discover", + "responseCode": 200 + }, + "input": { + "shape": "GetDiscoveredSchemaRequest" + }, + "output": { + "shape": "GetDiscoveredSchemaResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    Get the discovered schema that was generated based on sampled events.

    " + }, + "GetResourcePolicy": { + "name": "GetResourcePolicy", + "http": { + "method": "GET", + "requestUri": "/v1/policy", + "responseCode": 200 + }, + "input": { + "shape": "GetResourcePolicyRequest" + }, + "output": { + "shape": "GetResourcePolicyResponse", + "documentation": "

    Get Resource-Based Policy Response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Retrieves the resource-based policy attached to a given registry.

    " + }, + "ListDiscoverers": { + "name": "ListDiscoverers", + "http": { + "method": "GET", + "requestUri": "/v1/discoverers", + "responseCode": 200 + }, + "input": { + "shape": "ListDiscoverersRequest" + }, + "output": { + "shape": "ListDiscoverersResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    List the discoverers.

    " + }, + "ListRegistries": { + "name": "ListRegistries", + "http": { + "method": "GET", + "requestUri": "/v1/registries", + "responseCode": 200 + }, + "input": { + "shape": "ListRegistriesRequest" + }, + "output": { + "shape": "ListRegistriesResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    List the registries.

    " + }, + "ListSchemaVersions": { + "name": "ListSchemaVersions", + "http": { + "method": "GET", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}/versions", + "responseCode": 200 + }, + "input": { + "shape": "ListSchemaVersionsRequest" + }, + "output": { + "shape": "ListSchemaVersionsResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Provides a list of the schema versions and related information.

    " + }, + "ListSchemas": { + "name": "ListSchemas", + "http": { + "method": "GET", + "requestUri": "/v1/registries/name/{registryName}/schemas", + "responseCode": 200 + }, + "input": { + "shape": "ListSchemasRequest" + }, + "output": { + "shape": "ListSchemasResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    List the schemas.

    " + }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "output": { + "shape": "ListTagsForResourceResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    Get tags for resource.

    " + }, + "PutCodeBinding": { + "name": "PutCodeBinding", + "http": { + "method": "POST", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}", + "responseCode": 202 + }, + "input": { + "shape": "PutCodeBindingRequest" + }, + "output": { + "shape": "PutCodeBindingResponse", + "documentation": "

    202 response

    " + }, + "errors": [ + { + "shape": "GoneException", + "documentation": "

    410 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    429 response

    " + } + ], + "documentation": "

    Put code binding URI

    " + }, + "PutResourcePolicy": { + "name": "PutResourcePolicy", + "http": { + "method": "PUT", + "requestUri": "/v1/policy", + "responseCode": 200 + }, + "input": { + "shape": "PutResourcePolicyRequest" + }, + "output": { + "shape": "PutResourcePolicyResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "PreconditionFailedException", + "documentation": "

    412 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    The name of the policy.

    " + }, + "SearchSchemas": { + "name": "SearchSchemas", + "http": { + "method": "GET", + "requestUri": "/v1/registries/name/{registryName}/schemas/search", + "responseCode": 200 + }, + "input": { + "shape": "SearchSchemasRequest" + }, + "output": { + "shape": "SearchSchemasResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    Search the schemas

    " + }, + "StartDiscoverer": { + "name": "StartDiscoverer", + "http": { + "method": "POST", + "requestUri": "/v1/discoverers/id/{discovererId}/start", + "responseCode": 200 + }, + "input": { + "shape": "StartDiscovererRequest" + }, + "output": { + "shape": "StartDiscovererResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Starts the discoverer

    " + }, + "StopDiscoverer": { + "name": "StopDiscoverer", + "http": { + "method": "POST", + "requestUri": "/v1/discoverers/id/{discovererId}/stop", + "responseCode": 200 + }, + "input": { + "shape": "StopDiscovererRequest" + }, + "output": { + "shape": "StopDiscovererResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Stops the discoverer

    " + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "POST", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    Add tags to a resource.

    " + }, + "UntagResource": { + "name": "UntagResource", + "http": { + "method": "DELETE", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + } + ], + "documentation": "

    Removes tags from a resource.

    " + }, + "UpdateDiscoverer": { + "name": "UpdateDiscoverer", + "http": { + "method": "PUT", + "requestUri": "/v1/discoverers/id/{discovererId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateDiscovererRequest" + }, + "output": { + "shape": "UpdateDiscovererResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Updates the discoverer

    " + }, + "UpdateRegistry": { + "name": "UpdateRegistry", + "http": { + "method": "PUT", + "requestUri": "/v1/registries/name/{registryName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateRegistryRequest" + }, + "output": { + "shape": "UpdateRegistryResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "UnauthorizedException", + "documentation": "

    401 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Updates a registry.

    " + }, + "UpdateSchema": { + "name": "UpdateSchema", + "http": { + "method": "PUT", + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateSchemaRequest" + }, + "output": { + "shape": "UpdateSchemaResponse", + "documentation": "

    200 response

    " + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

    400 response

    " + }, + { + "shape": "InternalServerErrorException", + "documentation": "

    500 response

    " + }, + { + "shape": "ForbiddenException", + "documentation": "

    403 response

    " + }, + { + "shape": "NotFoundException", + "documentation": "

    404 response

    " + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

    503 response

    " + } + ], + "documentation": "

    Updates the schema definition

    Inactive schemas will be deleted after two years.

    " + } + }, + "shapes": { + "BadRequestException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "CodeBindingOutput": { + "type": "structure", + "members": { + "CreationDate": { + "shape": "__timestampIso8601", + "documentation": "

    The time and date that the code binding was created.

    " + }, + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that code bindings were modified.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema.

    " + }, + "Status": { + "shape": "CodeGenerationStatus", + "documentation": "

    The current status of code binding generation.

    " + } + } + }, + "CodeGenerationStatus": { + "type": "string", + "enum": [ + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "CREATE_FAILED" + ] + }, + "ConflictException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 409 + } + }, + "CreateDiscovererInput": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    A description for the discoverer.

    " + }, + "SourceArn": { + "shape": "__stringMin20Max1600", + "documentation": "

    The ARN of the event bus.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + }, + "required": [ + "SourceArn" + ] + }, + "CreateDiscovererRequest": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    A description for the discoverer.

    " + }, + "SourceArn": { + "shape": "__stringMin20Max1600", + "documentation": "

    The ARN of the event bus.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + }, + "documentation": "", + "required": [ + "SourceArn" + ] + }, + "CreateDiscovererResponse": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the discoverer.

    " + }, + "DiscovererArn": { + "shape": "__string", + "documentation": "

    The ARN of the discoverer.

    " + }, + "DiscovererId": { + "shape": "__string", + "documentation": "

    The ID of the discoverer.

    " + }, + "SourceArn": { + "shape": "__string", + "documentation": "

    The ARN of the event bus.

    " + }, + "State": { + "shape": "DiscovererState", + "documentation": "

    The state of the discoverer.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + } + }, + "CreateRegistryInput": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    A description of the registry to be created.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags to associate with the registry.

    " + } + } + }, + "CreateRegistryRequest": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    A description of the registry to be created.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags to associate with the registry.

    " + } + }, + "required": [ + "RegistryName" + ] + }, + "CreateRegistryResponse": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the registry.

    " + }, + "RegistryArn": { + "shape": "__string", + "documentation": "

    The ARN of the registry.

    " + }, + "RegistryName": { + "shape": "__string", + "documentation": "

    The name of the registry.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the registry.

    " + } + } + }, + "CreateSchemaInput": { + "type": "structure", + "members": { + "Content": { + "shape": "__stringMin1Max100000", + "documentation": "

    The source of the schema definition.

    " + }, + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    A description of the schema.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the schema.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The type of schema.

    " + } + }, + "required": [ + "Type", + "Content" + ] + }, + "CreateSchemaRequest": { + "type": "structure", + "members": { + "Content": { + "shape": "__stringMin1Max100000", + "documentation": "

    The source of the schema definition.

    " + }, + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    A description of the schema.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the schema.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The type of schema.

    " + } + }, + "required": [ + "RegistryName", + "SchemaName", + "Type", + "Content" + ] + }, + "CreateSchemaResponse": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the schema.

    " + }, + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that schema was modified.

    " + }, + "SchemaArn": { + "shape": "__string", + "documentation": "

    The ARN of the schema.

    " + }, + "SchemaName": { + "shape": "__string", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + }, + "Type": { + "shape": "__string", + "documentation": "

    The type of the schema.

    " + }, + "VersionCreatedDate": { + "shape": "__timestampIso8601", + "documentation": "

    The date the schema version was created.

    " + } + } + }, + "DeleteDiscovererRequest": { + "type": "structure", + "members": { + "DiscovererId": { + "shape": "__string", + "location": "uri", + "locationName": "discovererId", + "documentation": "

    The ID of the discoverer.

    " + } + }, + "required": [ + "DiscovererId" + ] + }, + "DeleteRegistryRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + } + }, + "required": [ + "RegistryName" + ] + }, + "DeleteResourcePolicyRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "querystring", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + } + } + }, + "DeleteSchemaRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + } + }, + "required": [ + "RegistryName", + "SchemaName" + ] + }, + "DeleteSchemaVersionRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "location": "uri", + "locationName": "schemaVersion", + "documentation": "The version number of the schema" + } + }, + "required": [ + "SchemaVersion", + "RegistryName", + "SchemaName" + ] + }, + "DescribeCodeBindingRequest": { + "type": "structure", + "members": { + "Language": { + "shape": "__string", + "location": "uri", + "locationName": "language", + "documentation": "

    The language of the code binding.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "location": "querystring", + "locationName": "schemaVersion", + "documentation": "

    Specifying this limits the results to only this schema version.

    " + } + }, + "required": [ + "RegistryName", + "SchemaName", + "Language" + ] + }, + "DescribeCodeBindingResponse": { + "type": "structure", + "members": { + "CreationDate": { + "shape": "__timestampIso8601", + "documentation": "

    The time and date that the code binding was created.

    " + }, + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that code bindings were modified.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema.

    " + }, + "Status": { + "shape": "CodeGenerationStatus", + "documentation": "

    The current status of code binding generation.

    " + } + } + }, + "DescribeDiscovererRequest": { + "type": "structure", + "members": { + "DiscovererId": { + "shape": "__string", + "location": "uri", + "locationName": "discovererId", + "documentation": "

    The ID of the discoverer.

    " + } + }, + "required": [ + "DiscovererId" + ] + }, + "DescribeDiscovererResponse": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the discoverer.

    " + }, + "DiscovererArn": { + "shape": "__string", + "documentation": "

    The ARN of the discoverer.

    " + }, + "DiscovererId": { + "shape": "__string", + "documentation": "

    The ID of the discoverer.

    " + }, + "SourceArn": { + "shape": "__string", + "documentation": "

    The ARN of the event bus.

    " + }, + "State": { + "shape": "DiscovererState", + "documentation": "

    The state of the discoverer.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + } + }, + "DescribeRegistryRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + } + }, + "required": [ + "RegistryName" + ] + }, + "DescribeRegistryResponse": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the registry.

    " + }, + "RegistryArn": { + "shape": "__string", + "documentation": "

    The ARN of the registry.

    " + }, + "RegistryName": { + "shape": "__string", + "documentation": "

    The name of the registry.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the registry.

    " + } + } + }, + "DescribeSchemaOutput": { + "type": "structure", + "members": { + "Content": { + "shape": "__string", + "documentation": "

    The source of the schema definition.

    " + }, + "Description": { + "shape": "__string", + "documentation": "

    The description of the schema.

    " + }, + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that schema was modified.

    " + }, + "SchemaArn": { + "shape": "__string", + "documentation": "

    The ARN of the schema.

    " + }, + "SchemaName": { + "shape": "__string", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + }, + "Type": { + "shape": "__string", + "documentation": "

    The type of the schema.

    " + }, + "VersionCreatedDate": { + "shape": "__timestampIso8601", + "documentation": "

    The date the schema version was created.

    " + } + } + }, + "DescribeSchemaRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "location": "querystring", + "locationName": "schemaVersion", + "documentation": "

    Specifying this limits the results to only this schema version.

    " + } + }, + "required": [ + "RegistryName", + "SchemaName" + ] + }, + "DescribeSchemaResponse": { + "type": "structure", + "members": { + "Content": { + "shape": "__string", + "documentation": "

    The source of the schema definition.

    " + }, + "Description": { + "shape": "__string", + "documentation": "

    The description of the schema.

    " + }, + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that schema was modified.

    " + }, + "SchemaArn": { + "shape": "__string", + "documentation": "

    The ARN of the schema.

    " + }, + "SchemaName": { + "shape": "__string", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + }, + "Type": { + "shape": "__string", + "documentation": "

    The type of the schema.

    " + }, + "VersionCreatedDate": { + "shape": "__timestampIso8601", + "documentation": "

    The date the schema version was created.

    " + } + } + }, + "DiscovererOutput": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the discoverer.

    " + }, + "DiscovererArn": { + "shape": "__string", + "documentation": "

    The ARN of the discoverer.

    " + }, + "DiscovererId": { + "shape": "__string", + "documentation": "

    The ID of the discoverer.

    " + }, + "SourceArn": { + "shape": "__string", + "documentation": "

    The ARN of the event bus.

    " + }, + "State": { + "shape": "DiscovererState", + "documentation": "

    The state of the discoverer.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + } + }, + "DiscovererState": { + "type": "string", + "enum": [ + "STARTED", + "STOPPED" + ] + }, + "DiscovererStateOutput": { + "type": "structure", + "members": { + "DiscovererId": { + "shape": "__string", + "documentation": "

    The ID of the discoverer.

    " + }, + "State": { + "shape": "DiscovererState", + "documentation": "

    The state of the discoverer.

    " + } + } + }, + "DiscovererSummary": { + "type": "structure", + "members": { + "DiscovererArn": { + "shape": "__string", + "documentation": "

    The ARN of the discoverer.

    " + }, + "DiscovererId": { + "shape": "__string", + "documentation": "

    The ID of the discoverer.

    " + }, + "SourceArn": { + "shape": "__string", + "documentation": "

    The ARN of the event bus.

    " + }, + "State": { + "shape": "DiscovererState", + "documentation": "

    The state of the discoverer.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + } + }, + "ErrorOutput": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ] + }, + "ExportSchemaOutput": { + "type": "structure", + "members": { + "Content": { + "shape": "__string" + }, + "SchemaArn": { + "shape": "__string" + }, + "SchemaName": { + "shape": "__string" + }, + "SchemaVersion": { + "shape": "__string" + }, + "Type": { + "shape": "__string" + } + } + }, + "ExportSchemaRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "location": "querystring", + "locationName": "schemaVersion", + "documentation": "

    Specifying this limits the results to only this schema version.

    " + }, + "Type": { + "shape": "__string", + "location": "querystring", + "locationName": "type" + } + }, + "required": [ + "RegistryName", + "SchemaName", + "Type" + ] + }, + "ExportSchemaResponse": { + "type": "structure", + "members": { + "Content": { + "shape": "__string" + }, + "SchemaArn": { + "shape": "__string" + }, + "SchemaName": { + "shape": "__string" + }, + "SchemaVersion": { + "shape": "__string" + }, + "Type": { + "shape": "__string" + } + } + }, + "ForbiddenException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 403 + } + }, + "GetCodeBindingSourceOutput": { + "type": "string" + }, + "GetCodeBindingSourceRequest": { + "type": "structure", + "members": { + "Language": { + "shape": "__string", + "location": "uri", + "locationName": "language", + "documentation": "

    The language of the code binding.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "location": "querystring", + "locationName": "schemaVersion", + "documentation": "

    Specifying this limits the results to only this schema version.

    " + } + }, + "required": [ + "RegistryName", + "SchemaName", + "Language" + ] + }, + "GetCodeBindingSourceResponse": { + "type": "structure", + "members": { + "Body": { + "shape": "Body" + } + }, + "payload": "Body" + }, + "GetDiscoveredSchemaInput": { + "type": "structure", + "members": { + "Events": { + "shape": "__listOfGetDiscoveredSchemaVersionItemInput", + "documentation": "

    An array of strings where each string is a JSON event. These are the events that were used to generate the schema. The array includes a single type of event and has a maximum size of 10 events.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The type of event.

    " + } + }, + "required": [ + "Type", + "Events" + ] + }, + "GetDiscoveredSchemaOutput": { + "type": "structure", + "members": { + "Content": { + "shape": "__string", + "documentation": "

    The source of the schema definition.

    " + } + }, + "documentation": "

    " + }, + "GetDiscoveredSchemaRequest": { + "type": "structure", + "members": { + "Events": { + "shape": "__listOfGetDiscoveredSchemaVersionItemInput", + "documentation": "

    An array of strings where each string is a JSON event. These are the events that were used to generate the schema. The array includes a single type of event and has a maximum size of 10 events.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The type of event.

    " + } + }, + "required": [ + "Type", + "Events" + ] + }, + "GetDiscoveredSchemaResponse": { + "type": "structure", + "members": { + "Content": { + "shape": "__string", + "documentation": "

    The source of the schema definition.

    " + } + } + }, + "GetDiscoveredSchemaVersionItemInput": { + "type": "string", + "min": 1, + "max": 100000 + }, + "GetResourcePolicyOutput": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

    The resource-based policy.

    ", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

    The revision ID.

    " + } + }, + "documentation": "

    Information about the policy.

    " + }, + "GetResourcePolicyRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "querystring", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + } + } + }, + "GetResourcePolicyResponse": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

    The resource-based policy.

    ", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

    The revision ID.

    " + } + } + }, + "GoneException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 410 + } + }, + "InternalServerErrorException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 500 + } + }, + "Limit": { + "type": "integer", + "min": 1, + "max": 100 + }, + "ListDiscoverersOutput": { + "type": "structure", + "members": { + "Discoverers": { + "shape": "__listOfDiscovererSummary", + "documentation": "

    An array of DiscovererSummary information.

    " + }, + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + } + } + }, + "ListDiscoverersRequest": { + "type": "structure", + "members": { + "DiscovererIdPrefix": { + "shape": "__string", + "location": "querystring", + "locationName": "discovererIdPrefix", + "documentation": "

    Specifying this limits the results to only those discoverer IDs that start with the specified prefix.

    " + }, + "Limit": { + "shape": "__integer", + "location": "querystring", + "locationName": "limit" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "SourceArnPrefix": { + "shape": "__string", + "location": "querystring", + "locationName": "sourceArnPrefix", + "documentation": "

    Specifying this limits the results to only those ARNs that start with the specified prefix.

    " + } + } + }, + "ListDiscoverersResponse": { + "type": "structure", + "members": { + "Discoverers": { + "shape": "__listOfDiscovererSummary", + "documentation": "

    An array of DiscovererSummary information.

    " + }, + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + } + } + }, + "ListRegistriesOutput": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "Registries": { + "shape": "__listOfRegistrySummary", + "documentation": "

    An array of registry summaries.

    " + } + }, + "documentation": "

    List the registries.

    " + }, + "ListRegistriesRequest": { + "type": "structure", + "members": { + "Limit": { + "shape": "__integer", + "location": "querystring", + "locationName": "limit" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "RegistryNamePrefix": { + "shape": "__string", + "location": "querystring", + "locationName": "registryNamePrefix", + "documentation": "

    Specifying this limits the results to only those registry names that start with the specified prefix.

    " + }, + "Scope": { + "shape": "__string", + "location": "querystring", + "locationName": "scope", + "documentation": "

    Can be set to Local or AWS to limit responses to your custom registries, or the ones provided by AWS.

    " + } + } + }, + "ListRegistriesResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "Registries": { + "shape": "__listOfRegistrySummary", + "documentation": "

    An array of registry summaries.

    " + } + } + }, + "ListSchemaVersionsOutput": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "SchemaVersions": { + "shape": "__listOfSchemaVersionSummary", + "documentation": "

    An array of schema version summaries.

    " + } + } + }, + "ListSchemaVersionsRequest": { + "type": "structure", + "members": { + "Limit": { + "shape": "__integer", + "location": "querystring", + "locationName": "limit" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + } + }, + "required": [ + "RegistryName", + "SchemaName" + ] + }, + "ListSchemaVersionsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "SchemaVersions": { + "shape": "__listOfSchemaVersionSummary", + "documentation": "

    An array of schema version summaries.

    " + } + } + }, + "ListSchemasOutput": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "Schemas": { + "shape": "__listOfSchemaSummary", + "documentation": "

    An array of schema summaries.

    " + } + } + }, + "ListSchemasRequest": { + "type": "structure", + "members": { + "Limit": { + "shape": "__integer", + "location": "querystring", + "locationName": "limit" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaNamePrefix": { + "shape": "__string", + "location": "querystring", + "locationName": "schemaNamePrefix", + "documentation": "

    Specifying this limits the results to only those schema names that start with the specified prefix.

    " + } + }, + "required": [ + "RegistryName" + ] + }, + "ListSchemasResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "Schemas": { + "shape": "__listOfSchemaSummary", + "documentation": "

    An array of schema summaries.

    " + } + } + }, + "ListTagsForResourceOutput": { + "type": "structure", + "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

    The ARN of the resource.

    " + } + }, + "required": [ + "ResourceArn" + ] + }, + "ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, + "LockServiceLinkedRoleInput": { + "type": "structure", + "members": { + "RoleArn": { + "shape": "__stringMin1Max1600" + }, + "Timeout": { + "shape": "__integerMin1Max29000" + } + }, + "required": [ + "Timeout", + "RoleArn" + ] + }, + "LockServiceLinkedRoleOutput": { + "type": "structure", + "members": { + "CanBeDeleted": { + "shape": "__boolean" + }, + "ReasonOfFailure": { + "shape": "__stringMin1Max1600" + }, + "RelatedResources": { + "shape": "__listOfDiscovererSummary" + } + } + }, + "LockServiceLinkedRoleRequest": { + "type": "structure", + "members": { + "RoleArn": { + "shape": "__stringMin1Max1600" + }, + "Timeout": { + "shape": "__integerMin1Max29000" + } + }, + "documentation": "", + "required": [ + "Timeout", + "RoleArn" + ] + }, + "LockServiceLinkedRoleResponse": { + "type": "structure", + "members": { + "CanBeDeleted": { + "shape": "__boolean" + }, + "ReasonOfFailure": { + "shape": "__stringMin1Max1600" + }, + "RelatedResources": { + "shape": "__listOfDiscovererSummary" + } + } + }, + "NotFoundException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "PreconditionFailedException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 412 + } + }, + "PutCodeBindingRequest": { + "type": "structure", + "members": { + "Language": { + "shape": "__string", + "location": "uri", + "locationName": "language", + "documentation": "

    The language of the code binding.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "location": "querystring", + "locationName": "schemaVersion", + "documentation": "

    Specifying this limits the results to only this schema version.

    " + } + }, + "required": [ + "RegistryName", + "SchemaName", + "Language" + ] + }, + "PutCodeBindingResponse": { + "type": "structure", + "members": { + "CreationDate": { + "shape": "__timestampIso8601", + "documentation": "

    The time and date that the code binding was created.

    " + }, + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that code bindings were modified.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema.

    " + }, + "Status": { + "shape": "CodeGenerationStatus", + "documentation": "

    The current status of code binding generation.

    " + } + } + }, + "PutResourcePolicyInput": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

    The resource-based policy.

    ", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

    The revision ID of the policy.

    " + } + }, + "documentation": "

    Only update the policy if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.

    ", + "required": [ + "Policy" + ] + }, + "PutResourcePolicyOutput": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

    The resource-based policy.

    ", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

    The revision ID of the policy.

    " + } + }, + "documentation": "

    The resource-based policy.

    " + }, + "PutResourcePolicyRequest": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

    The resource-based policy.

    ", + "jsonvalue": true + }, + "RegistryName": { + "shape": "__string", + "location": "querystring", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "RevisionId": { + "shape": "__string", + "documentation": "

    The revision ID of the policy.

    " + } + }, + "documentation": "

    The name of the policy.

    ", + "required": [ + "Policy" + ] + }, + "PutResourcePolicyResponse": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

    The resource-based policy.

    ", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

    The revision ID of the policy.

    " + } + } + }, + "RegistryOutput": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the registry.

    " + }, + "RegistryArn": { + "shape": "__string", + "documentation": "

    The ARN of the registry.

    " + }, + "RegistryName": { + "shape": "__string", + "documentation": "

    The name of the registry.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the registry.

    " + } + } + }, + "RegistrySummary": { + "type": "structure", + "members": { + "RegistryArn": { + "shape": "__string", + "documentation": "

    The ARN of the registry.

    " + }, + "RegistryName": { + "shape": "__string", + "documentation": "

    The name of the registry.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the registry.

    " + } + } + }, + "SchemaOutput": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the schema.

    " + }, + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that schema was modified.

    " + }, + "SchemaArn": { + "shape": "__string", + "documentation": "

    The ARN of the schema.

    " + }, + "SchemaName": { + "shape": "__string", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + }, + "Type": { + "shape": "__string", + "documentation": "

    The type of the schema.

    " + }, + "VersionCreatedDate": { + "shape": "__timestampIso8601", + "documentation": "

    The date the schema version was created.

    " + } + } + }, + "SchemaSummary": { + "type": "structure", + "members": { + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that schema was modified.

    " + }, + "SchemaArn": { + "shape": "__string", + "documentation": "

    The ARN of the schema.

    " + }, + "SchemaName": { + "shape": "__string", + "documentation": "

    The name of the schema.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the schema.

    " + }, + "VersionCount": { + "shape": "__long", + "documentation": "

    The number of versions available for the schema.

    " + } + }, + "documentation": "

    A summary of schema details.

    " + }, + "SchemaVersionSummary": { + "type": "structure", + "members": { + "SchemaArn": { + "shape": "__string", + "documentation": "

    The ARN of the schema version.

    " + }, + "SchemaName": { + "shape": "__string", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The type of schema.

    " + } + } + }, + "SearchSchemaSummary": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "documentation": "

    The name of the registry.

    " + }, + "SchemaArn": { + "shape": "__string", + "documentation": "

    The ARN of the schema.

    " + }, + "SchemaName": { + "shape": "__string", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersions": { + "shape": "__listOfSearchSchemaVersionSummary", + "documentation": "

    An array of schema version summaries.

    " + } + } + }, + "SearchSchemaVersionSummary": { + "type": "structure", + "members": { + "CreatedDate": { + "shape": "__timestampIso8601", + "documentation": "

    The date the schema version was created.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The type of schema.

    " + } + } + }, + "SearchSchemasOutput": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "Schemas": { + "shape": "__listOfSearchSchemaSummary", + "documentation": "

    An array of SearchSchemaSummary information.

    " + } + } + }, + "SearchSchemasRequest": { + "type": "structure", + "members": { + "Keywords": { + "shape": "__string", + "location": "querystring", + "locationName": "keywords", + "documentation": "

    Specifying this limits the results to only schemas that include the provided keywords.

    " + }, + "Limit": { + "shape": "__integer", + "location": "querystring", + "locationName": "limit" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + } + }, + "required": [ + "RegistryName", + "Keywords" + ] + }, + "SearchSchemasResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "documentation": "

    The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

    " + }, + "Schemas": { + "shape": "__listOfSearchSchemaSummary", + "documentation": "

    An array of SearchSchemaSummary information.

    " + } + } + }, + "ServiceUnavailableException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 503 + } + }, + "StartDiscovererRequest": { + "type": "structure", + "members": { + "DiscovererId": { + "shape": "__string", + "location": "uri", + "locationName": "discovererId", + "documentation": "

    The ID of the discoverer.

    " + } + }, + "required": [ + "DiscovererId" + ] + }, + "StartDiscovererResponse": { + "type": "structure", + "members": { + "DiscovererId": { + "shape": "__string", + "documentation": "

    The ID of the discoverer.

    " + }, + "State": { + "shape": "DiscovererState", + "documentation": "

    The state of the discoverer.

    " + } + } + }, + "StopDiscovererRequest": { + "type": "structure", + "members": { + "DiscovererId": { + "shape": "__string", + "location": "uri", + "locationName": "discovererId", + "documentation": "

    The ID of the discoverer.

    " + } + }, + "required": [ + "DiscovererId" + ] + }, + "StopDiscovererResponse": { + "type": "structure", + "members": { + "DiscovererId": { + "shape": "__string", + "documentation": "

    The ID of the discoverer.

    " + }, + "State": { + "shape": "DiscovererState", + "documentation": "

    The state of the discoverer.

    " + } + } + }, + "TagResourceInput": { + "type": "structure", + "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + }, + "required": [ + "Tags" + ] + }, + "TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

    The ARN of the resource.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + }, + "documentation": "

    ", + "required": [ + "ResourceArn", + "Tags" + ] + }, + "Tags": { + "type": "map", + "documentation": "

    Key-value pairs associated with a resource.

    ", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + } + }, + "TooManyRequestsException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "Type": { + "type": "string", + "enum": [ + "OpenApi3", + "JSONSchemaDraft4" + ] + }, + "UnauthorizedException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

    The error code.

    " + }, + "Message": { + "shape": "__string", + "documentation": "

    The message string of the error output.

    " + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 401 + } + }, + "UnlockServiceLinkedRoleInput": { + "type": "structure", + "members": { + "RoleArn": { + "shape": "__stringMin1Max1600" + } + }, + "required": [ + "RoleArn" + ] + }, + "UnlockServiceLinkedRoleRequest": { + "type": "structure", + "members": { + "RoleArn": { + "shape": "__stringMin1Max1600" + } + }, + "required": [ + "RoleArn" + ] + }, + "UnlockServiceLinkedRoleResponse": { + "type": "structure", + "members": {} + }, + "UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn", + "documentation": "

    The ARN of the resource.

    " + }, + "TagKeys": { + "shape": "__listOf__string", + "location": "querystring", + "locationName": "tagKeys", + "documentation": "

    Keys of key-value pairs.

    " + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ] + }, + "UpdateDiscovererInput": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    The description of the discoverer to update.

    " + } + } + }, + "UpdateDiscovererRequest": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    The description of the discoverer to update.

    " + }, + "DiscovererId": { + "shape": "__string", + "location": "uri", + "locationName": "discovererId", + "documentation": "

    The ID of the discoverer.

    " + } + }, + "required": [ + "DiscovererId" + ] + }, + "UpdateDiscovererResponse": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the discoverer.

    " + }, + "DiscovererArn": { + "shape": "__string", + "documentation": "

    The ARN of the discoverer.

    " + }, + "DiscovererId": { + "shape": "__string", + "documentation": "

    The ID of the discoverer.

    " + }, + "SourceArn": { + "shape": "__string", + "documentation": "

    The ARN of the event bus.

    " + }, + "State": { + "shape": "DiscovererState", + "documentation": "

    The state of the discoverer.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the resource.

    " + } + } + }, + "UpdateRegistryInput": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    The description of the registry to update.

    " + } + } + }, + "UpdateRegistryRequest": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    The description of the registry to update.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + } + }, + "documentation": "

    Updates the registry.

    ", + "required": [ + "RegistryName" + ] + }, + "UpdateRegistryResponse": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the registry.

    " + }, + "RegistryArn": { + "shape": "__string", + "documentation": "

    The ARN of the registry.

    " + }, + "RegistryName": { + "shape": "__string", + "documentation": "

    The name of the registry.

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "

    Tags associated with the registry.

    " + } + } + }, + "UpdateSchemaInput": { + "type": "structure", + "members": { + "ClientTokenId": { + "shape": "__stringMin0Max36", + "documentation": "

    The ID of the client token.

    ", + "idempotencyToken": true + }, + "Content": { + "shape": "__stringMin1Max100000", + "documentation": "

    The source of the schema definition.

    " + }, + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    The description of the schema.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The schema type for the events schema.

    " + } + } + }, + "UpdateSchemaRequest": { + "type": "structure", + "members": { + "ClientTokenId": { + "shape": "__stringMin0Max36", + "documentation": "

    The ID of the client token.

    ", + "idempotencyToken": true + }, + "Content": { + "shape": "__stringMin1Max100000", + "documentation": "

    The source of the schema definition.

    " + }, + "Description": { + "shape": "__stringMin0Max256", + "documentation": "

    The description of the schema.

    " + }, + "RegistryName": { + "shape": "__string", + "location": "uri", + "locationName": "registryName", + "documentation": "

    The name of the registry.

    " + }, + "SchemaName": { + "shape": "__string", + "location": "uri", + "locationName": "schemaName", + "documentation": "

    The name of the schema.

    " + }, + "Type": { + "shape": "Type", + "documentation": "

    The schema type for the events schema.

    " + } + }, + "required": [ + "RegistryName", + "SchemaName" + ] + }, + "UpdateSchemaResponse": { + "type": "structure", + "members": { + "Description": { + "shape": "__string", + "documentation": "

    The description of the schema.

    " + }, + "LastModified": { + "shape": "__timestampIso8601", + "documentation": "

    The date and time that schema was modified.

    " + }, + "SchemaArn": { + "shape": "__string", + "documentation": "

    The ARN of the schema.

    " + }, + "SchemaName": { + "shape": "__string", + "documentation": "

    The name of the schema.

    " + }, + "SchemaVersion": { + "shape": "__string", + "documentation": "

    The version number of the schema

    " + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + }, + "Type": { + "shape": "__string", + "documentation": "

    The type of the schema.

    " + }, + "VersionCreatedDate": { + "shape": "__timestampIso8601", + "documentation": "

    The date the schema version was created.

    " + } + } + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__integer": { + "type": "integer" + }, + "__integerMin1Max29000": { + "type": "integer", + "min": 1, + "max": 29000 + }, + "__listOfDiscovererSummary": { + "type": "list", + "member": { + "shape": "DiscovererSummary" + } + }, + "__listOfGetDiscoveredSchemaVersionItemInput": { + "type": "list", + "min": 1, + "max": 10, + "member": { + "shape": "GetDiscoveredSchemaVersionItemInput" + } + }, + "__listOfRegistrySummary": { + "type": "list", + "member": { + "shape": "RegistrySummary" + } + }, + "__listOfSchemaSummary": { + "type": "list", + "member": { + "shape": "SchemaSummary" + } + }, + "__listOfSchemaVersionSummary": { + "type": "list", + "member": { + "shape": "SchemaVersionSummary" + } + }, + "__listOfSearchSchemaSummary": { + "type": "list", + "member": { + "shape": "SearchSchemaSummary" + } + }, + "__listOfSearchSchemaVersionSummary": { + "type": "list", + "member": { + "shape": "SearchSchemaVersionSummary" + } + }, + "__listOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "__long": { + "type": "long" + }, + "__string": { + "type": "string" + }, + "__stringMin0Max256": { + "type": "string", + "min": 0, + "max": 256 + }, + "__stringMin0Max36": { + "type": "string", + "min": 0, + "max": 36 + }, + "__stringMin1Max100000": { + "type": "string", + "min": 1, + "max": 100000 + }, + "__stringMin1Max1600": { + "type": "string", + "min": 1, + "max": 1600 + }, + "__stringMin20Max1600": { + "type": "string", + "min": 20, + "max": 1600 + }, + "__timestampIso8601": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" + }, + "Body": { + "type": "blob" + } + }, + "documentation": "

    Amazon EventBridge Schema Registry

    " +} diff --git a/services/schemas/src/main/resources/codegen-resources/waiters-2.json b/services/schemas/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..4f642f615c44 --- /dev/null +++ b/services/schemas/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,36 @@ +{ + "version": 2, + "waiters": { + "CodeBindingExists": { + "description": "Wait until code binding is generated", + "delay": 2, + "operation": "DescribeCodeBinding", + "maxAttempts": 30, + "acceptors": [ + { + "expected": "CREATE_COMPLETE", + "matcher": "path", + "state": "success", + "argument": "Status" + }, + { + "expected": "CREATE_IN_PROGRESS", + "matcher": "path", + "state": "retry", + "argument": "Status" + }, + { + "expected": "CREATE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "Status" + }, + { + "matcher": "error", + "expected": "NotFoundException", + "state": "failure" + } + ] + } + } +} diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 287e40ae5966..e82ff45f5472 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + servicecatalogappregistry + AWS Java SDK :: Services :: Service Catalog App Registry + The AWS Java SDK for Service Catalog App Registry module holds the client classes that are used for + communicating with Service Catalog App Registry. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.servicecatalogappregistry + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/servicecatalogappregistry/src/main/resources/codegen-resources/paginators-1.json b/services/servicecatalogappregistry/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..8c9a3bd7c9f8 --- /dev/null +++ b/services/servicecatalogappregistry/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListApplications": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "applications" + }, + "ListAssociatedAttributeGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "attributeGroups" + }, + "ListAssociatedResources": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "resources" + }, + "ListAttributeGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "attributeGroups" + } + } +} diff --git a/services/servicecatalogappregistry/src/main/resources/codegen-resources/service-2.json b/services/servicecatalogappregistry/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..cbc2de63d1cd --- /dev/null +++ b/services/servicecatalogappregistry/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1328 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-06-24", + "endpointPrefix":"servicecatalog-appregistry", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AppRegistry", + "serviceFullName":"AWS Service Catalog App Registry", + "serviceId":"Service Catalog AppRegistry", + "signatureVersion":"v4", + "signingName":"servicecatalog", + "uid":"AWS242AppRegistry-2020-06-24" + }, + "operations":{ + "AssociateAttributeGroup":{ + "name":"AssociateAttributeGroup", + "http":{ + "method":"PUT", + "requestUri":"/applications/{application}/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"AssociateAttributeGroupRequest"}, + "output":{"shape":"AssociateAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Associates an attribute group with an application to augment the application's metadata with the group's attributes. This feature enables applications to be described with user-defined details that are machine-readable, such as third-party integrations.

    " + }, + "AssociateResource":{ + "name":"AssociateResource", + "http":{ + "method":"PUT", + "requestUri":"/applications/{application}/resources/{resourceType}/{resource}" + }, + "input":{"shape":"AssociateResourceRequest"}, + "output":{"shape":"AssociateResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Associates a resource with an application. Both the resource and the application can be specified either by ID or name.

    " + }, + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/applications", + "responseCode":201 + }, + "input":{"shape":"CreateApplicationRequest"}, + "output":{"shape":"CreateApplicationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new application that is the top-level node in a hierarchy of related cloud resource abstractions.

    " + }, + "CreateAttributeGroup":{ + "name":"CreateAttributeGroup", + "http":{ + "method":"POST", + "requestUri":"/attribute-groups", + "responseCode":201 + }, + "input":{"shape":"CreateAttributeGroupRequest"}, + "output":{"shape":"CreateAttributeGroupResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new attribute group as a container for user-defined attributes. This feature enables users to have full control over their cloud application's metadata in a rich machine-readable format to facilitate integration with automated workflows and third-party tools.

    " + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{application}" + }, + "input":{"shape":"DeleteApplicationRequest"}, + "output":{"shape":"DeleteApplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an application that is specified either by its application ID or name. All associated attribute groups and resources must be disassociated from it before deleting an application.

    " + }, + "DeleteAttributeGroup":{ + "name":"DeleteAttributeGroup", + "http":{ + "method":"DELETE", + "requestUri":"/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"DeleteAttributeGroupRequest"}, + "output":{"shape":"DeleteAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an attribute group, specified either by its attribute group ID or name.

    " + }, + "DisassociateAttributeGroup":{ + "name":"DisassociateAttributeGroup", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{application}/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"DisassociateAttributeGroupRequest"}, + "output":{"shape":"DisassociateAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Disassociates an attribute group from an application to remove the extra attributes contained in the attribute group from the application's metadata. This operation reverts AssociateAttributeGroup.

    " + }, + "DisassociateResource":{ + "name":"DisassociateResource", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{application}/resources/{resourceType}/{resource}" + }, + "input":{"shape":"DisassociateResourceRequest"}, + "output":{"shape":"DisassociateResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Disassociates a resource from application. Both the resource and the application can be specified either by ID or name.

    " + }, + "GetApplication":{ + "name":"GetApplication", + "http":{ + "method":"GET", + "requestUri":"/applications/{application}" + }, + "input":{"shape":"GetApplicationRequest"}, + "output":{"shape":"GetApplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves metadata information about one of your applications. The application can be specified either by its unique ID or by its name (which is unique within one account in one region at a given point in time). Specify by ID in automated workflows if you want to make sure that the exact same application is returned or a ResourceNotFoundException is thrown, avoiding the ABA addressing problem.

    " + }, + "GetAttributeGroup":{ + "name":"GetAttributeGroup", + "http":{ + "method":"GET", + "requestUri":"/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"GetAttributeGroupRequest"}, + "output":{"shape":"GetAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves an attribute group, either by its name or its ID. The attribute group can be specified either by its unique ID or by its name.

    " + }, + "ListApplications":{ + "name":"ListApplications", + "http":{ + "method":"GET", + "requestUri":"/applications" + }, + "input":{"shape":"ListApplicationsRequest"}, + "output":{"shape":"ListApplicationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves a list of all of your applications. Results are paginated.

    ", + "idempotent":true + }, + "ListAssociatedAttributeGroups":{ + "name":"ListAssociatedAttributeGroups", + "http":{ + "method":"GET", + "requestUri":"/applications/{application}/attribute-groups" + }, + "input":{"shape":"ListAssociatedAttributeGroupsRequest"}, + "output":{"shape":"ListAssociatedAttributeGroupsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all attribute groups that are associated with specified application. Results are paginated.

    ", + "idempotent":true + }, + "ListAssociatedResources":{ + "name":"ListAssociatedResources", + "http":{ + "method":"GET", + "requestUri":"/applications/{application}/resources" + }, + "input":{"shape":"ListAssociatedResourcesRequest"}, + "output":{"shape":"ListAssociatedResourcesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all resources that are associated with specified application. Results are paginated.

    ", + "idempotent":true + }, + "ListAttributeGroups":{ + "name":"ListAttributeGroups", + "http":{ + "method":"GET", + "requestUri":"/attribute-groups" + }, + "input":{"shape":"ListAttributeGroupsRequest"}, + "output":{"shape":"ListAttributeGroupsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all attribute groups which you have access to. Results are paginated.

    ", + "idempotent":true + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists all of the tags on the resource.

    " + }, + "SyncResource":{ + "name":"SyncResource", + "http":{ + "method":"POST", + "requestUri":"/sync/{resourceType}/{resource}" + }, + "input":{"shape":"SyncResourceRequest"}, + "output":{"shape":"SyncResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Syncs the resource with what is currently recorded in App registry. Specifically, the resource’s App registry system tags are synced with its associated application. The resource is removed if it is not associated with the application. The caller must have permissions to read and update the resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Assigns one or more tags (key-value pairs) to the specified resource.

    Each tag consists of a key and an optional value. If a tag with the same key is already associated with the resource, this action updates its value.

    This operation returns an empty response if the call was successful.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes tags from a resource.

    This operation returns an empty response if the call was successful.

    " + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"PATCH", + "requestUri":"/applications/{application}" + }, + "input":{"shape":"UpdateApplicationRequest"}, + "output":{"shape":"UpdateApplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates an existing application with new attributes.

    " + }, + "UpdateAttributeGroup":{ + "name":"UpdateAttributeGroup", + "http":{ + "method":"PATCH", + "requestUri":"/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"UpdateAttributeGroupRequest"}, + "output":{"shape":"UpdateAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates an existing attribute group with new details.

    " + } + }, + "shapes":{ + "Application":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ApplicationId", + "documentation":"

    The identifier of the application.

    " + }, + "arn":{ + "shape":"ApplicationArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the application across services.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the application. The name must be unique in the region in which you are creating the application.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the application.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment when the application was created.

    " + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment when the application was last updated.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Key-value pairs you can use to associate with the application.

    " + } + }, + "documentation":"

    Represents a Service Catalog AppRegistry application that is the top-level node in a hierarchy of related cloud resource abstractions.

    " + }, + "ApplicationArn":{ + "type":"string", + "pattern":"arn:aws[-a-z]*:servicecatalog:[a-z]{2}(-gov)?-[a-z]+-\\d:\\d{12}:/applications/[a-z0-9]+" + }, + "ApplicationId":{ + "type":"string", + "pattern":"[a-z0-9]{12}" + }, + "ApplicationSpecifier":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\w+" + }, + "ApplicationSummaries":{ + "type":"list", + "member":{"shape":"ApplicationSummary"} + }, + "ApplicationSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ApplicationId", + "documentation":"

    The identifier of the application.

    " + }, + "arn":{ + "shape":"ApplicationArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the application across services.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the application. The name must be unique in the region in which you are creating the application.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the application.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment when the application was created.

    " + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment when the application was last updated.

    " + } + }, + "documentation":"

    Summary of a Service Catalog AppRegistry application.

    " + }, + "Arn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" + }, + "AssociateAttributeGroupRequest":{ + "type":"structure", + "required":[ + "application", + "attributeGroup" + ], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application.

    ", + "location":"uri", + "locationName":"application" + }, + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

    The name or ID of the attribute group that holds the attributes to describe the application.

    ", + "location":"uri", + "locationName":"attributeGroup" + } + } + }, + "AssociateAttributeGroupResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

    The Amazon resource name (ARN) of the application that was augmented with attributes.

    " + }, + "attributeGroupArn":{ + "shape":"AttributeGroupArn", + "documentation":"

    The Amazon resource name (ARN) of the attribute group that contains the application's new attributes.

    " + } + } + }, + "AssociateResourceRequest":{ + "type":"structure", + "required":[ + "application", + "resourceType", + "resource" + ], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application.

    ", + "location":"uri", + "locationName":"application" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource of which the application will be associated.

    ", + "location":"uri", + "locationName":"resourceType" + }, + "resource":{ + "shape":"ResourceSpecifier", + "documentation":"

    The name or ID of the resource of which the application will be associated.

    ", + "location":"uri", + "locationName":"resource" + } + } + }, + "AssociateResourceResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

    The Amazon resource name (ARN) of the application that was augmented with attributes.

    " + }, + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon resource name (ARN) that specifies the resource.

    " + } + } + }, + "AssociationCount":{ + "type":"integer", + "min":0 + }, + "AttributeGroup":{ + "type":"structure", + "members":{ + "id":{ + "shape":"AttributeGroupId", + "documentation":"

    The globally unique attribute group identifier of the attribute group.

    " + }, + "arn":{ + "shape":"AttributeGroupArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the attribute group across services.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the attribute group.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the attribute group that the user provides.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment the attribute group was created.

    " + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment the attribute group was last updated. This time is the same as the creationTime for a newly created attribute group.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Key-value pairs you can use to associate with the attribute group.

    " + } + }, + "documentation":"

    Represents a Service Catalog AppRegistry attribute group that is rich metadata which describes an application and its components.

    " + }, + "AttributeGroupArn":{ + "type":"string", + "pattern":"arn:aws[-a-z]*:servicecatalog:[a-z]{2}(-gov)?-[a-z]+-\\d:\\d{12}:/attribute-groups/[a-z0-9]+" + }, + "AttributeGroupId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-z0-9]{12}" + }, + "AttributeGroupIds":{ + "type":"list", + "member":{"shape":"AttributeGroupId"} + }, + "AttributeGroupSpecifier":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\w+" + }, + "AttributeGroupSummaries":{ + "type":"list", + "member":{"shape":"AttributeGroupSummary"} + }, + "AttributeGroupSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"AttributeGroupId", + "documentation":"

    The globally unique attribute group identifier of the attribute group.

    " + }, + "arn":{ + "shape":"AttributeGroupArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the attribute group across services.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the attribute group.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the attribute group that the user provides.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment the attribute group was created.

    " + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment the attribute group was last updated. This time is the same as the creationTime for a newly created attribute group.

    " + } + }, + "documentation":"

    Summary of a Service Catalog AppRegistry attribute group.

    " + }, + "Attributes":{ + "type":"string", + "max":8000, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "ClientToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    There was a conflict when processing the request (for example, a resource with the given name already exists within the account).

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateApplicationRequest":{ + "type":"structure", + "required":[ + "name", + "clientToken" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the application. The name must be unique in the region in which you are creating the application.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the application.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Key-value pairs you can use to associate with the application.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique identifier that you provide to ensure idempotency. If you retry a request that completed successfully using the same client token and the same parameters, the retry succeeds without performing any further actions. If you retry a successful request using the same client token, but one or more of the parameters are different, the retry fails.

    ", + "idempotencyToken":true + } + } + }, + "CreateApplicationResponse":{ + "type":"structure", + "members":{ + "application":{ + "shape":"Application", + "documentation":"

    Information about the application.

    " + } + } + }, + "CreateAttributeGroupRequest":{ + "type":"structure", + "required":[ + "name", + "attributes", + "clientToken" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

    The name of the attribute group.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the attribute group that the user provides.

    " + }, + "attributes":{ + "shape":"Attributes", + "documentation":"

    A JSON string in the form of nested key-value pairs that represent the attributes in the group and describes an application and its components.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Key-value pairs you can use to associate with the attribute group.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    A unique identifier that you provide to ensure idempotency. If you retry a request that completed successfully using the same client token and the same parameters, the retry succeeds without performing any further actions. If you retry a successful request using the same client token, but one or more of the parameters are different, the retry fails.

    ", + "idempotencyToken":true + } + } + }, + "CreateAttributeGroupResponse":{ + "type":"structure", + "members":{ + "attributeGroup":{ + "shape":"AttributeGroup", + "documentation":"

    Information about the attribute group.

    " + } + } + }, + "DeleteApplicationRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application.

    ", + "location":"uri", + "locationName":"application" + } + } + }, + "DeleteApplicationResponse":{ + "type":"structure", + "members":{ + "application":{ + "shape":"ApplicationSummary", + "documentation":"

    Information about the deleted application.

    " + } + } + }, + "DeleteAttributeGroupRequest":{ + "type":"structure", + "required":["attributeGroup"], + "members":{ + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

    The name or ID of the attribute group that holds the attributes to describe the application.

    ", + "location":"uri", + "locationName":"attributeGroup" + } + } + }, + "DeleteAttributeGroupResponse":{ + "type":"structure", + "members":{ + "attributeGroup":{ + "shape":"AttributeGroupSummary", + "documentation":"

    Information about the deleted attribute group.

    " + } + } + }, + "Description":{ + "type":"string", + "max":1024 + }, + "DisassociateAttributeGroupRequest":{ + "type":"structure", + "required":[ + "application", + "attributeGroup" + ], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application.

    ", + "location":"uri", + "locationName":"application" + }, + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

    The name or ID of the attribute group that holds the attributes to describe the application.

    ", + "location":"uri", + "locationName":"attributeGroup" + } + } + }, + "DisassociateAttributeGroupResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the application.

    " + }, + "attributeGroupArn":{ + "shape":"AttributeGroupArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the attribute group.

    " + } + } + }, + "DisassociateResourceRequest":{ + "type":"structure", + "required":[ + "application", + "resourceType", + "resource" + ], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application.

    ", + "location":"uri", + "locationName":"application" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of the resource that is being disassociated.

    ", + "location":"uri", + "locationName":"resourceType" + }, + "resource":{ + "shape":"ResourceSpecifier", + "documentation":"

    The name or ID of the resource.

    ", + "location":"uri", + "locationName":"resource" + } + } + }, + "DisassociateResourceResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the application.

    " + }, + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon resource name (ARN) that specifies the resource.

    " + } + } + }, + "GetApplicationRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application.

    ", + "location":"uri", + "locationName":"application" + } + } + }, + "GetApplicationResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ApplicationId", + "documentation":"

    The identifier of the application.

    " + }, + "arn":{ + "shape":"ApplicationArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the application across services.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the application. The name must be unique in the region in which you are creating the application.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the application.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment when the application was created.

    " + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment when the application was last updated.

    " + }, + "associatedResourceCount":{ + "shape":"AssociationCount", + "documentation":"

    The number of top-level resources that were registered as part of this application.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Key-value pairs associated with the application.

    " + } + } + }, + "GetAttributeGroupRequest":{ + "type":"structure", + "required":["attributeGroup"], + "members":{ + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

    The name or ID of the attribute group that holds the attributes to describe the application.

    ", + "location":"uri", + "locationName":"attributeGroup" + } + } + }, + "GetAttributeGroupResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"AttributeGroupId", + "documentation":"

    The identifier of the attribute group.

    " + }, + "arn":{ + "shape":"AttributeGroupArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the attribute group across services.

    " + }, + "name":{ + "shape":"Name", + "documentation":"

    The name of the attribute group.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the attribute group that the user provides.

    " + }, + "attributes":{ + "shape":"Attributes", + "documentation":"

    A JSON string in the form of nested key-value pairs that represent the attributes in the group and describes an application and its components.

    " + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment the attribute group was created.

    " + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The ISO-8601 formatted timestamp of the moment the attribute group was last updated. This time is the same as the creationTime for a newly created attribute group.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    Key-value pairs associated with the attribute group.

    " + } + } + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The service is experiencing internal problems.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListApplicationsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next page of results after a previous API call.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The upper bound of the number of results to return (cannot exceed 25). If this parameter is omitted, it defaults to 25. This value is optional.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListApplicationsResponse":{ + "type":"structure", + "members":{ + "applications":{ + "shape":"ApplicationSummaries", + "documentation":"

    This list of applications.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next page of results after a previous API call.

    " + } + } + }, + "ListAssociatedAttributeGroupsRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application.

    ", + "location":"uri", + "locationName":"application" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next page of results after a previous API call.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The upper bound of the number of results to return (cannot exceed 25). If this parameter is omitted, it defaults to 25. This value is optional.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssociatedAttributeGroupsResponse":{ + "type":"structure", + "members":{ + "attributeGroups":{ + "shape":"AttributeGroupIds", + "documentation":"

    A list of attribute group IDs.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next page of results after a previous API call.

    " + } + } + }, + "ListAssociatedResourcesRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application.

    ", + "location":"uri", + "locationName":"application" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next page of results after a previous API call.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The upper bound of the number of results to return (cannot exceed 25). If this parameter is omitted, it defaults to 25. This value is optional.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssociatedResourcesResponse":{ + "type":"structure", + "members":{ + "resources":{ + "shape":"Resources", + "documentation":"

    Information about the resources.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next page of results after a previous API call.

    " + } + } + }, + "ListAttributeGroupsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next page of results after a previous API call.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The upper bound of the number of results to return (cannot exceed 25). If this parameter is omitted, it defaults to 25. This value is optional.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAttributeGroupsResponse":{ + "type":"structure", + "members":{ + "attributeGroups":{ + "shape":"AttributeGroupSummaries", + "documentation":"

    This list of attribute groups.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to get the next page of results after a previous API call.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon resource name (ARN) that specifies the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"Tags", + "documentation":"

    The tags on the resource.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "max":25, + "min":1 + }, + "Name":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\w+" + }, + "NextToken":{ + "type":"string", + "max":2024, + "min":1, + "pattern":"[A-Za-z0-9+/=]+" + }, + "ResourceInfo":{ + "type":"structure", + "members":{ + "name":{ + "shape":"ResourceSpecifier", + "documentation":"

    The name of the resource.

    " + }, + "arn":{ + "shape":"StackArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the resource across services.

    " + } + }, + "documentation":"

    Information about the resource.

    " + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The specified resource does not exist.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceSpecifier":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\S+" + }, + "ResourceType":{ + "type":"string", + "enum":["CFN_STACK"] + }, + "Resources":{ + "type":"list", + "member":{"shape":"ResourceInfo"} + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The maximum number of resources per account has been reached.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "StackArn":{ + "type":"string", + "pattern":"arn:aws[-a-z]*:cloudformation:[a-z]{2}(-gov)?-[a-z]+-\\d:\\d{12}:stack/[a-zA-Z][-A-Za-z0-9]{0,127}/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}" + }, + "String":{"type":"string"}, + "SyncAction":{ + "type":"string", + "enum":[ + "START_SYNC", + "NO_ACTION" + ] + }, + "SyncResourceRequest":{ + "type":"structure", + "required":[ + "resourceType", + "resource" + ], + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of resource of which the application will be associated.

    ", + "location":"uri", + "locationName":"resourceType" + }, + "resource":{ + "shape":"ResourceSpecifier", + "documentation":"

    An entity you can work with and specify with a name or ID. Examples include an Amazon EC2 instance, an AWS CloudFormation stack, or an Amazon S3 bucket.

    ", + "location":"uri", + "locationName":"resource" + } + } + }, + "SyncResourceResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

    The Amazon resource name (ARN) that specifies the application.

    " + }, + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon resource name (ARN) that specifies the resource.

    " + }, + "actionTaken":{ + "shape":"SyncAction", + "documentation":"

    The results of the output if an application is associated with an ARN value, which could be syncStarted or None.

    " + } + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"(?!aws:)[a-zA-Z+-=._:/]+" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon resource name (ARN) that specifies the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"Tags", + "documentation":"

    The new or modified tags for the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "pattern":"[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*" + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon resource name (ARN) that specifies the resource.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

    A list of the tag keys to remove from the specified resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateApplicationRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

    The name or ID of the application that will be updated.

    ", + "location":"uri", + "locationName":"application" + }, + "name":{ + "shape":"Name", + "documentation":"

    The new name of the application. The name must be unique in the region in which you are updating the application.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The new description of the application.

    " + } + } + }, + "UpdateApplicationResponse":{ + "type":"structure", + "members":{ + "application":{ + "shape":"Application", + "documentation":"

    The updated information of the application.

    " + } + } + }, + "UpdateAttributeGroupRequest":{ + "type":"structure", + "required":["attributeGroup"], + "members":{ + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

    The name or ID of the attribute group that holds the attributes to describe the application.

    ", + "location":"uri", + "locationName":"attributeGroup" + }, + "name":{ + "shape":"Name", + "documentation":"

    The new name of the attribute group. The name must be unique in the region in which you are updating the attribute group.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of the attribute group that the user provides.

    " + }, + "attributes":{ + "shape":"Attributes", + "documentation":"

    A JSON string in the form of nested key-value pairs that represent the attributes in the group and describes an application and its components.

    " + } + } + }, + "UpdateAttributeGroupResponse":{ + "type":"structure", + "members":{ + "attributeGroup":{ + "shape":"AttributeGroup", + "documentation":"

    The updated information of the attribute group.

    " + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The request has invalid or missing parameters.

    ", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

    AWS Service Catalog AppRegistry enables organizations to understand the application context of their AWS resources. AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise.

    " +} diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index ab4a4d18d7fb..3782999165f0 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + sesv2 + AWS Java SDK :: Services :: SESv2 + The AWS Java SDK for SESv2 module holds the client classes that are used for + communicating with SESv2. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.sesv2 + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/sesv2/src/main/resources/codegen-resources/paginators-1.json b/services/sesv2/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..3d39be48f44a --- /dev/null +++ b/services/sesv2/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,64 @@ +{ + "pagination": { + "GetDedicatedIps": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListConfigurationSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListContactLists": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListContacts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListCustomVerificationEmailTemplates": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListDedicatedIpPools": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListDeliverabilityTestReports": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListDomainDeliverabilityCampaigns": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListEmailIdentities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListEmailTemplates": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListImportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListSuppressedDestinations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + } + } +} diff --git a/services/sesv2/src/main/resources/codegen-resources/service-2.json b/services/sesv2/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f165ed3bb91b --- /dev/null +++ b/services/sesv2/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,5646 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-09-27", + "endpointPrefix":"email", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon SES V2", + "serviceFullName":"Amazon Simple Email Service", + "serviceId":"SESv2", + "signatureVersion":"v4", + "signingName":"ses", + "uid":"sesv2-2019-09-27" + }, + "operations":{ + "CreateConfigurationSet":{ + "name":"CreateConfigurationSet", + "http":{ + "method":"POST", + "requestUri":"/v2/email/configuration-sets" + }, + "input":{"shape":"CreateConfigurationSetRequest"}, + "output":{"shape":"CreateConfigurationSetResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Create a configuration set. Configuration sets are groups of rules that you can apply to the emails that you send. You apply a configuration set to an email by specifying the name of the configuration set when you call the Amazon SES API v2. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    " + }, + "CreateConfigurationSetEventDestination":{ + "name":"CreateConfigurationSetEventDestination", + "http":{ + "method":"POST", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations" + }, + "input":{"shape":"CreateConfigurationSetEventDestinationRequest"}, + "output":{"shape":"CreateConfigurationSetEventDestinationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    A single configuration set can include more than one event destination.

    " + }, + "CreateContact":{ + "name":"CreateContact", + "http":{ + "method":"POST", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts" + }, + "input":{"shape":"CreateContactRequest"}, + "output":{"shape":"CreateContactResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"AlreadyExistsException"} + ], + "documentation":"

    Creates a contact, which is an end-user who is receiving the email, and adds them to a contact list.

    " + }, + "CreateContactList":{ + "name":"CreateContactList", + "http":{ + "method":"POST", + "requestUri":"/v2/email/contact-lists" + }, + "input":{"shape":"CreateContactListRequest"}, + "output":{"shape":"CreateContactListResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a contact list.

    " + }, + "CreateCustomVerificationEmailTemplate":{ + "name":"CreateCustomVerificationEmailTemplate", + "http":{ + "method":"POST", + "requestUri":"/v2/email/custom-verification-email-templates" + }, + "input":{"shape":"CreateCustomVerificationEmailTemplateRequest"}, + "output":{"shape":"CreateCustomVerificationEmailTemplateResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates a new custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "CreateDedicatedIpPool":{ + "name":"CreateDedicatedIpPool", + "http":{ + "method":"POST", + "requestUri":"/v2/email/dedicated-ip-pools" + }, + "input":{"shape":"CreateDedicatedIpPoolRequest"}, + "output":{"shape":"CreateDedicatedIpPoolResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Create a new pool of dedicated IP addresses. A pool can include one or more dedicated IP addresses that are associated with your AWS account. You can associate a pool with a configuration set. When you send an email that uses that configuration set, the message is sent from one of the addresses in the associated pool.

    " + }, + "CreateDeliverabilityTestReport":{ + "name":"CreateDeliverabilityTestReport", + "http":{ + "method":"POST", + "requestUri":"/v2/email/deliverability-dashboard/test" + }, + "input":{"shape":"CreateDeliverabilityTestReportRequest"}, + "output":{"shape":"CreateDeliverabilityTestReportResponse"}, + "errors":[ + {"shape":"AccountSuspendedException"}, + {"shape":"SendingPausedException"}, + {"shape":"MessageRejected"}, + {"shape":"MailFromDomainNotVerifiedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Create a new predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. Amazon SES then sends that message to special email addresses spread across several major email providers. After about 24 hours, the test is complete, and you can use the GetDeliverabilityTestReport operation to view the results of the test.

    " + }, + "CreateEmailIdentity":{ + "name":"CreateEmailIdentity", + "http":{ + "method":"POST", + "requestUri":"/v2/email/identities" + }, + "input":{"shape":"CreateEmailIdentityRequest"}, + "output":{"shape":"CreateEmailIdentityResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Starts the process of verifying an email identity. An identity is an email address or domain that you use when you send email. Before you can use an identity to send email, you first have to verify it. By verifying an identity, you demonstrate that you're the owner of the identity, and that you've given Amazon SES API v2 permission to send email from the identity.

    When you verify an email address, Amazon SES sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.

    When you verify a domain without specifying the DkimSigningAttributes object, this operation provides a set of DKIM tokens. You can convert these tokens into CNAME records, which you then add to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. This verification method is known as Easy DKIM.

    Alternatively, you can perform the verification process by providing your own public-private key pair. This verification method is known as Bring Your Own DKIM (BYODKIM). To use BYODKIM, your call to the CreateEmailIdentity operation has to include the DkimSigningAttributes object. When you specify this object, you provide a selector (a component of the DNS record name that identifies the public key that you want to use for DKIM authentication) and a private key.

    " + }, + "CreateEmailIdentityPolicy":{ + "name":"CreateEmailIdentityPolicy", + "http":{ + "method":"POST", + "requestUri":"/v2/email/identities/{EmailIdentity}/policies/{PolicyName}" + }, + "input":{"shape":"CreateEmailIdentityPolicyRequest"}, + "output":{"shape":"CreateEmailIdentityPolicyResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Creates the specified sending authorization policy for the given identity (an email address or a domain).

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "CreateEmailTemplate":{ + "name":"CreateEmailTemplate", + "http":{ + "method":"POST", + "requestUri":"/v2/email/templates" + }, + "input":{"shape":"CreateEmailTemplateRequest"}, + "output":{"shape":"CreateEmailTemplateResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates an email template. Email templates enable you to send personalized email to one or more destinations in a single API operation. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "CreateImportJob":{ + "name":"CreateImportJob", + "http":{ + "method":"POST", + "requestUri":"/v2/email/import-jobs" + }, + "input":{"shape":"CreateImportJobRequest"}, + "output":{"shape":"CreateImportJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Creates an import job for a data destination.

    " + }, + "DeleteConfigurationSet":{ + "name":"DeleteConfigurationSet", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}" + }, + "input":{"shape":"DeleteConfigurationSetRequest"}, + "output":{"shape":"DeleteConfigurationSetResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Delete an existing configuration set.

    Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    " + }, + "DeleteConfigurationSetEventDestination":{ + "name":"DeleteConfigurationSetEventDestination", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}" + }, + "input":{"shape":"DeleteConfigurationSetEventDestinationRequest"}, + "output":{"shape":"DeleteConfigurationSetEventDestinationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Delete an event destination.

    Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    " + }, + "DeleteContact":{ + "name":"DeleteContact", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts/{EmailAddress}" + }, + "input":{"shape":"DeleteContactRequest"}, + "output":{"shape":"DeleteContactResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Removes a contact from a contact list.

    " + }, + "DeleteContactList":{ + "name":"DeleteContactList", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/contact-lists/{ContactListName}" + }, + "input":{"shape":"DeleteContactListRequest"}, + "output":{"shape":"DeleteContactListResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Deletes a contact list and all of the contacts on that list.

    " + }, + "DeleteCustomVerificationEmailTemplate":{ + "name":"DeleteCustomVerificationEmailTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/custom-verification-email-templates/{TemplateName}" + }, + "input":{"shape":"DeleteCustomVerificationEmailTemplateRequest"}, + "output":{"shape":"DeleteCustomVerificationEmailTemplateResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Deletes an existing custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "DeleteDedicatedIpPool":{ + "name":"DeleteDedicatedIpPool", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/dedicated-ip-pools/{PoolName}" + }, + "input":{"shape":"DeleteDedicatedIpPoolRequest"}, + "output":{"shape":"DeleteDedicatedIpPoolResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Delete a dedicated IP pool.

    " + }, + "DeleteEmailIdentity":{ + "name":"DeleteEmailIdentity", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/identities/{EmailIdentity}" + }, + "input":{"shape":"DeleteEmailIdentityRequest"}, + "output":{"shape":"DeleteEmailIdentityResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Deletes an email identity. An identity can be either an email address or a domain name.

    " + }, + "DeleteEmailIdentityPolicy":{ + "name":"DeleteEmailIdentityPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/identities/{EmailIdentity}/policies/{PolicyName}" + }, + "input":{"shape":"DeleteEmailIdentityPolicyRequest"}, + "output":{"shape":"DeleteEmailIdentityPolicyResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Deletes the specified sending authorization policy for the given identity (an email address or a domain). This API returns successfully even if a policy with the specified name does not exist.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "DeleteEmailTemplate":{ + "name":"DeleteEmailTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/templates/{TemplateName}" + }, + "input":{"shape":"DeleteEmailTemplateRequest"}, + "output":{"shape":"DeleteEmailTemplateResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Deletes an email template.

    You can execute this operation no more than once per second.

    " + }, + "DeleteSuppressedDestination":{ + "name":"DeleteSuppressedDestination", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/suppression/addresses/{EmailAddress}" + }, + "input":{"shape":"DeleteSuppressedDestinationRequest"}, + "output":{"shape":"DeleteSuppressedDestinationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Removes an email address from the suppression list for your account.

    " + }, + "GetAccount":{ + "name":"GetAccount", + "http":{ + "method":"GET", + "requestUri":"/v2/email/account" + }, + "input":{"shape":"GetAccountRequest"}, + "output":{"shape":"GetAccountResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Obtain information about the email-sending status and capabilities of your Amazon SES account in the current AWS Region.

    " + }, + "GetBlacklistReports":{ + "name":"GetBlacklistReports", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/blacklist-report" + }, + "input":{"shape":"GetBlacklistReportsRequest"}, + "output":{"shape":"GetBlacklistReportsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve a list of the blacklists that your dedicated IP addresses appear on.

    " + }, + "GetConfigurationSet":{ + "name":"GetConfigurationSet", + "http":{ + "method":"GET", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}" + }, + "input":{"shape":"GetConfigurationSetRequest"}, + "output":{"shape":"GetConfigurationSetResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Get information about an existing configuration set, including the dedicated IP pool that it's associated with, whether or not it's enabled for sending email, and more.

    Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    " + }, + "GetConfigurationSetEventDestinations":{ + "name":"GetConfigurationSetEventDestinations", + "http":{ + "method":"GET", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations" + }, + "input":{"shape":"GetConfigurationSetEventDestinationsRequest"}, + "output":{"shape":"GetConfigurationSetEventDestinationsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve a list of event destinations that are associated with a configuration set.

    Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    " + }, + "GetContact":{ + "name":"GetContact", + "http":{ + "method":"GET", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts/{EmailAddress}" + }, + "input":{"shape":"GetContactRequest"}, + "output":{"shape":"GetContactResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Returns a contact from a contact list.

    " + }, + "GetContactList":{ + "name":"GetContactList", + "http":{ + "method":"GET", + "requestUri":"/v2/email/contact-lists/{ContactListName}" + }, + "input":{"shape":"GetContactListRequest"}, + "output":{"shape":"GetContactListResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Returns contact list metadata. It does not return any information about the contacts present in the list.

    " + }, + "GetCustomVerificationEmailTemplate":{ + "name":"GetCustomVerificationEmailTemplate", + "http":{ + "method":"GET", + "requestUri":"/v2/email/custom-verification-email-templates/{TemplateName}" + }, + "input":{"shape":"GetCustomVerificationEmailTemplateRequest"}, + "output":{"shape":"GetCustomVerificationEmailTemplateResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Returns the custom email verification template for the template name you specify.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "GetDedicatedIp":{ + "name":"GetDedicatedIp", + "http":{ + "method":"GET", + "requestUri":"/v2/email/dedicated-ips/{IP}" + }, + "input":{"shape":"GetDedicatedIpRequest"}, + "output":{"shape":"GetDedicatedIpResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Get information about a dedicated IP address, including the name of the dedicated IP pool that it's associated with, as well information about the automatic warm-up process for the address.

    " + }, + "GetDedicatedIps":{ + "name":"GetDedicatedIps", + "http":{ + "method":"GET", + "requestUri":"/v2/email/dedicated-ips" + }, + "input":{"shape":"GetDedicatedIpsRequest"}, + "output":{"shape":"GetDedicatedIpsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    List the dedicated IP addresses that are associated with your AWS account.

    " + }, + "GetDeliverabilityDashboardOptions":{ + "name":"GetDeliverabilityDashboardOptions", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard" + }, + "input":{"shape":"GetDeliverabilityDashboardOptionsRequest"}, + "output":{"shape":"GetDeliverabilityDashboardOptionsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve information about the status of the Deliverability dashboard for your account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

    When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon SES Pricing.

    " + }, + "GetDeliverabilityTestReport":{ + "name":"GetDeliverabilityTestReport", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/test-reports/{ReportId}" + }, + "input":{"shape":"GetDeliverabilityTestReportRequest"}, + "output":{"shape":"GetDeliverabilityTestReportResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve the results of a predictive inbox placement test.

    " + }, + "GetDomainDeliverabilityCampaign":{ + "name":"GetDomainDeliverabilityCampaign", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/campaigns/{CampaignId}" + }, + "input":{"shape":"GetDomainDeliverabilityCampaignRequest"}, + "output":{"shape":"GetDomainDeliverabilityCampaignResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Retrieve all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for.

    " + }, + "GetDomainStatisticsReport":{ + "name":"GetDomainStatisticsReport", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/statistics-report/{Domain}" + }, + "input":{"shape":"GetDomainStatisticsReportRequest"}, + "output":{"shape":"GetDomainStatisticsReportResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Retrieve inbox placement and engagement rates for the domains that you use to send email.

    " + }, + "GetEmailIdentity":{ + "name":"GetEmailIdentity", + "http":{ + "method":"GET", + "requestUri":"/v2/email/identities/{EmailIdentity}" + }, + "input":{"shape":"GetEmailIdentityRequest"}, + "output":{"shape":"GetEmailIdentityResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Provides information about a specific identity, including the identity's verification status, sending authorization policies, its DKIM authentication status, and its custom Mail-From settings.

    " + }, + "GetEmailIdentityPolicies":{ + "name":"GetEmailIdentityPolicies", + "http":{ + "method":"GET", + "requestUri":"/v2/email/identities/{EmailIdentity}/policies" + }, + "input":{"shape":"GetEmailIdentityPoliciesRequest"}, + "output":{"shape":"GetEmailIdentityPoliciesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Returns the requested sending authorization policies for the given identity (an email address or a domain). The policies are returned as a map of policy names to policy contents. You can retrieve a maximum of 20 policies at a time.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "GetEmailTemplate":{ + "name":"GetEmailTemplate", + "http":{ + "method":"GET", + "requestUri":"/v2/email/templates/{TemplateName}" + }, + "input":{"shape":"GetEmailTemplateRequest"}, + "output":{"shape":"GetEmailTemplateResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Displays the template object (which includes the subject line, HTML part and text part) for the template you specify.

    You can execute this operation no more than once per second.

    " + }, + "GetImportJob":{ + "name":"GetImportJob", + "http":{ + "method":"GET", + "requestUri":"/v2/email/import-jobs/{JobId}" + }, + "input":{"shape":"GetImportJobRequest"}, + "output":{"shape":"GetImportJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Provides information about an import job.

    " + }, + "GetSuppressedDestination":{ + "name":"GetSuppressedDestination", + "http":{ + "method":"GET", + "requestUri":"/v2/email/suppression/addresses/{EmailAddress}" + }, + "input":{"shape":"GetSuppressedDestinationRequest"}, + "output":{"shape":"GetSuppressedDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Retrieves information about a specific email address that's on the suppression list for your account.

    " + }, + "ListConfigurationSets":{ + "name":"ListConfigurationSets", + "http":{ + "method":"GET", + "requestUri":"/v2/email/configuration-sets" + }, + "input":{"shape":"ListConfigurationSetsRequest"}, + "output":{"shape":"ListConfigurationSetsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    List all of the configuration sets associated with your account in the current region.

    Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    " + }, + "ListContactLists":{ + "name":"ListContactLists", + "http":{ + "method":"GET", + "requestUri":"/v2/email/contact-lists" + }, + "input":{"shape":"ListContactListsRequest"}, + "output":{"shape":"ListContactListsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Lists all of the contact lists available.

    " + }, + "ListContacts":{ + "name":"ListContacts", + "http":{ + "method":"GET", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts" + }, + "input":{"shape":"ListContactsRequest"}, + "output":{"shape":"ListContactsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Lists the contacts present in a specific contact list.

    " + }, + "ListCustomVerificationEmailTemplates":{ + "name":"ListCustomVerificationEmailTemplates", + "http":{ + "method":"GET", + "requestUri":"/v2/email/custom-verification-email-templates" + }, + "input":{"shape":"ListCustomVerificationEmailTemplatesRequest"}, + "output":{"shape":"ListCustomVerificationEmailTemplatesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Lists the existing custom verification email templates for your account in the current AWS Region.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "ListDedicatedIpPools":{ + "name":"ListDedicatedIpPools", + "http":{ + "method":"GET", + "requestUri":"/v2/email/dedicated-ip-pools" + }, + "input":{"shape":"ListDedicatedIpPoolsRequest"}, + "output":{"shape":"ListDedicatedIpPoolsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    List all of the dedicated IP pools that exist in your AWS account in the current Region.

    " + }, + "ListDeliverabilityTestReports":{ + "name":"ListDeliverabilityTestReports", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/test-reports" + }, + "input":{"shape":"ListDeliverabilityTestReportsRequest"}, + "output":{"shape":"ListDeliverabilityTestReportsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Show a list of the predictive inbox placement tests that you've performed, regardless of their statuses. For predictive inbox placement tests that are complete, you can use the GetDeliverabilityTestReport operation to view the results.

    " + }, + "ListDomainDeliverabilityCampaigns":{ + "name":"ListDomainDeliverabilityCampaigns", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/domains/{SubscribedDomain}/campaigns" + }, + "input":{"shape":"ListDomainDeliverabilityCampaignsRequest"}, + "output":{"shape":"ListDomainDeliverabilityCampaignsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

    Retrieve deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard for the domain.

    " + }, + "ListEmailIdentities":{ + "name":"ListEmailIdentities", + "http":{ + "method":"GET", + "requestUri":"/v2/email/identities" + }, + "input":{"shape":"ListEmailIdentitiesRequest"}, + "output":{"shape":"ListEmailIdentitiesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Returns a list of all of the email identities that are associated with your AWS account. An identity can be either an email address or a domain. This operation returns identities that are verified as well as those that aren't. This operation returns identities that are associated with Amazon SES and Amazon Pinpoint.

    " + }, + "ListEmailTemplates":{ + "name":"ListEmailTemplates", + "http":{ + "method":"GET", + "requestUri":"/v2/email/templates" + }, + "input":{"shape":"ListEmailTemplatesRequest"}, + "output":{"shape":"ListEmailTemplatesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Lists the email templates present in your Amazon SES account in the current AWS Region.

    You can execute this operation no more than once per second.

    " + }, + "ListImportJobs":{ + "name":"ListImportJobs", + "http":{ + "method":"GET", + "requestUri":"/v2/email/import-jobs" + }, + "input":{"shape":"ListImportJobsRequest"}, + "output":{"shape":"ListImportJobsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Lists all of the import jobs.

    " + }, + "ListSuppressedDestinations":{ + "name":"ListSuppressedDestinations", + "http":{ + "method":"GET", + "requestUri":"/v2/email/suppression/addresses" + }, + "input":{"shape":"ListSuppressedDestinationsRequest"}, + "output":{"shape":"ListSuppressedDestinationsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

    Retrieves a list of email addresses that are on the suppression list for your account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/v2/email/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Retrieve a list of the tags (keys and values) that are associated with a specified resource. A tag is a label that you optionally define and associate with a resource. Each tag consists of a required tag key and an optional associated tag value. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.

    " + }, + "PutAccountDedicatedIpWarmupAttributes":{ + "name":"PutAccountDedicatedIpWarmupAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/account/dedicated-ips/warmup" + }, + "input":{"shape":"PutAccountDedicatedIpWarmupAttributesRequest"}, + "output":{"shape":"PutAccountDedicatedIpWarmupAttributesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Enable or disable the automatic warm-up feature for dedicated IP addresses.

    " + }, + "PutAccountDetails":{ + "name":"PutAccountDetails", + "http":{ + "method":"POST", + "requestUri":"/v2/email/account/details" + }, + "input":{"shape":"PutAccountDetailsRequest"}, + "output":{"shape":"PutAccountDetailsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Update your Amazon SES account details.

    " + }, + "PutAccountSendingAttributes":{ + "name":"PutAccountSendingAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/account/sending" + }, + "input":{"shape":"PutAccountSendingAttributesRequest"}, + "output":{"shape":"PutAccountSendingAttributesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Enable or disable the ability of your account to send email.

    " + }, + "PutAccountSuppressionAttributes":{ + "name":"PutAccountSuppressionAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/account/suppression" + }, + "input":{"shape":"PutAccountSuppressionAttributesRequest"}, + "output":{"shape":"PutAccountSuppressionAttributesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Change the settings for the account-level suppression list.

    " + }, + "PutConfigurationSetDeliveryOptions":{ + "name":"PutConfigurationSetDeliveryOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/delivery-options" + }, + "input":{"shape":"PutConfigurationSetDeliveryOptionsRequest"}, + "output":{"shape":"PutConfigurationSetDeliveryOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Associate a configuration set with a dedicated IP pool. You can use dedicated IP pools to create groups of dedicated IP addresses for sending specific types of email.

    " + }, + "PutConfigurationSetReputationOptions":{ + "name":"PutConfigurationSetReputationOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/reputation-options" + }, + "input":{"shape":"PutConfigurationSetReputationOptionsRequest"}, + "output":{"shape":"PutConfigurationSetReputationOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Enable or disable collection of reputation metrics for emails that you send using a particular configuration set in a specific AWS Region.

    " + }, + "PutConfigurationSetSendingOptions":{ + "name":"PutConfigurationSetSendingOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/sending" + }, + "input":{"shape":"PutConfigurationSetSendingOptionsRequest"}, + "output":{"shape":"PutConfigurationSetSendingOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Enable or disable email sending for messages that use a particular configuration set in a specific AWS Region.

    " + }, + "PutConfigurationSetSuppressionOptions":{ + "name":"PutConfigurationSetSuppressionOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/suppression-options" + }, + "input":{"shape":"PutConfigurationSetSuppressionOptionsRequest"}, + "output":{"shape":"PutConfigurationSetSuppressionOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Specify the account suppression list preferences for a configuration set.

    " + }, + "PutConfigurationSetTrackingOptions":{ + "name":"PutConfigurationSetTrackingOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/tracking-options" + }, + "input":{"shape":"PutConfigurationSetTrackingOptionsRequest"}, + "output":{"shape":"PutConfigurationSetTrackingOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Specify a custom domain to use for open and click tracking elements in email that you send.

    " + }, + "PutDedicatedIpInPool":{ + "name":"PutDedicatedIpInPool", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/dedicated-ips/{IP}/pool" + }, + "input":{"shape":"PutDedicatedIpInPoolRequest"}, + "output":{"shape":"PutDedicatedIpInPoolResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Move a dedicated IP address to an existing dedicated IP pool.

    The dedicated IP address that you specify must already exist, and must be associated with your AWS account.

    The dedicated IP pool you specify must already exist. You can create a new pool by using the CreateDedicatedIpPool operation.

    " + }, + "PutDedicatedIpWarmupAttributes":{ + "name":"PutDedicatedIpWarmupAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/dedicated-ips/{IP}/warmup" + }, + "input":{"shape":"PutDedicatedIpWarmupAttributesRequest"}, + "output":{"shape":"PutDedicatedIpWarmupAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    " + }, + "PutDeliverabilityDashboardOption":{ + "name":"PutDeliverabilityDashboardOption", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/deliverability-dashboard" + }, + "input":{"shape":"PutDeliverabilityDashboardOptionRequest"}, + "output":{"shape":"PutDeliverabilityDashboardOptionResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

    When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon SES Pricing.

    " + }, + "PutEmailIdentityDkimAttributes":{ + "name":"PutEmailIdentityDkimAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/identities/{EmailIdentity}/dkim" + }, + "input":{"shape":"PutEmailIdentityDkimAttributesRequest"}, + "output":{"shape":"PutEmailIdentityDkimAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Used to enable or disable DKIM authentication for an email identity.

    " + }, + "PutEmailIdentityDkimSigningAttributes":{ + "name":"PutEmailIdentityDkimSigningAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v1/email/identities/{EmailIdentity}/dkim/signing" + }, + "input":{"shape":"PutEmailIdentityDkimSigningAttributesRequest"}, + "output":{"shape":"PutEmailIdentityDkimSigningAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Used to configure or change the DKIM authentication settings for an email domain identity. You can use this operation to do any of the following:

    • Update the signing attributes for an identity that uses Bring Your Own DKIM (BYODKIM).

    • Change from using no DKIM authentication to using Easy DKIM.

    • Change from using no DKIM authentication to using BYODKIM.

    • Change from using Easy DKIM to using BYODKIM.

    • Change from using BYODKIM to using Easy DKIM.

    " + }, + "PutEmailIdentityFeedbackAttributes":{ + "name":"PutEmailIdentityFeedbackAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/identities/{EmailIdentity}/feedback" + }, + "input":{"shape":"PutEmailIdentityFeedbackAttributesRequest"}, + "output":{"shape":"PutEmailIdentityFeedbackAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Used to enable or disable feedback forwarding for an identity. This setting determines what happens when an identity is used to send an email that results in a bounce or complaint event.

    If the value is true, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path header of the original email.

    You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).

    " + }, + "PutEmailIdentityMailFromAttributes":{ + "name":"PutEmailIdentityMailFromAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/identities/{EmailIdentity}/mail-from" + }, + "input":{"shape":"PutEmailIdentityMailFromAttributesRequest"}, + "output":{"shape":"PutEmailIdentityMailFromAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Used to enable or disable the custom Mail-From domain configuration for an email identity.

    " + }, + "PutSuppressedDestination":{ + "name":"PutSuppressedDestination", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/suppression/addresses" + }, + "input":{"shape":"PutSuppressedDestinationRequest"}, + "output":{"shape":"PutSuppressedDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Adds an email address to the suppression list for your account.

    " + }, + "SendBulkEmail":{ + "name":"SendBulkEmail", + "http":{ + "method":"POST", + "requestUri":"/v2/email/outbound-bulk-emails" + }, + "input":{"shape":"SendBulkEmailRequest"}, + "output":{"shape":"SendBulkEmailResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccountSuspendedException"}, + {"shape":"SendingPausedException"}, + {"shape":"MessageRejected"}, + {"shape":"MailFromDomainNotVerifiedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Composes an email message to multiple destinations.

    " + }, + "SendCustomVerificationEmail":{ + "name":"SendCustomVerificationEmail", + "http":{ + "method":"POST", + "requestUri":"/v2/email/outbound-custom-verification-emails" + }, + "input":{"shape":"SendCustomVerificationEmailRequest"}, + "output":{"shape":"SendCustomVerificationEmailResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"MessageRejected"}, + {"shape":"SendingPausedException"}, + {"shape":"MailFromDomainNotVerifiedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Adds an email address to the list of identities for your Amazon SES account in the current AWS Region and attempts to verify it. As a result of executing this operation, a customized verification email is sent to the specified address.

    To use this operation, you must first create a custom verification email template. For more information about creating and using custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "SendEmail":{ + "name":"SendEmail", + "http":{ + "method":"POST", + "requestUri":"/v2/email/outbound-emails" + }, + "input":{"shape":"SendEmailRequest"}, + "output":{"shape":"SendEmailResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccountSuspendedException"}, + {"shape":"SendingPausedException"}, + {"shape":"MessageRejected"}, + {"shape":"MailFromDomainNotVerifiedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Sends an email message. You can use the Amazon SES API v2 to send two types of messages:

    • Simple – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and Amazon SES assembles the message for you.

    • Raw – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message.

    • Templated – A message that contains personalization tags. When you send this type of email, Amazon SES API v2 automatically replaces the tags with values that you specify.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/v2/email/tags" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Add one or more tags (keys and values) to a specified resource. A tag is a label that you optionally define and associate with a resource. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.

    Each tag consists of a required tag key and an associated tag value, both of which you define. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.

    " + }, + "TestRenderEmailTemplate":{ + "name":"TestRenderEmailTemplate", + "http":{ + "method":"POST", + "requestUri":"/v2/email/templates/{TemplateName}/render" + }, + "input":{"shape":"TestRenderEmailTemplateRequest"}, + "output":{"shape":"TestRenderEmailTemplateResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Creates a preview of the MIME content of an email when provided with a template and a set of replacement data.

    You can execute this operation no more than once per second.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/tags" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Remove one or more tags (keys and values) from a specified resource.

    " + }, + "UpdateConfigurationSetEventDestination":{ + "name":"UpdateConfigurationSetEventDestination", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}" + }, + "input":{"shape":"UpdateConfigurationSetEventDestinationRequest"}, + "output":{"shape":"UpdateConfigurationSetEventDestinationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Update the configuration of an event destination for a configuration set.

    Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    " + }, + "UpdateContact":{ + "name":"UpdateContact", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts/{EmailAddress}" + }, + "input":{"shape":"UpdateContactRequest"}, + "output":{"shape":"UpdateContactResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Updates a contact's preferences for a list. It is not necessary to specify all existing topic preferences in the TopicPreferences object, just the ones that need updating.

    " + }, + "UpdateContactList":{ + "name":"UpdateContactList", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/contact-lists/{ContactListName}" + }, + "input":{"shape":"UpdateContactListRequest"}, + "output":{"shape":"UpdateContactListResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

    Updates contact list metadata. This operation does a complete replacement.

    " + }, + "UpdateCustomVerificationEmailTemplate":{ + "name":"UpdateCustomVerificationEmailTemplate", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/custom-verification-email-templates/{TemplateName}" + }, + "input":{"shape":"UpdateCustomVerificationEmailTemplateRequest"}, + "output":{"shape":"UpdateCustomVerificationEmailTemplateResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Updates an existing custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "UpdateEmailIdentityPolicy":{ + "name":"UpdateEmailIdentityPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/identities/{EmailIdentity}/policies/{PolicyName}" + }, + "input":{"shape":"UpdateEmailIdentityPolicyRequest"}, + "output":{"shape":"UpdateEmailIdentityPolicyResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Updates the specified sending authorization policy for the given identity (an email address or a domain). This API returns successfully even if a policy with the specified name does not exist.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + }, + "UpdateEmailTemplate":{ + "name":"UpdateEmailTemplate", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/templates/{TemplateName}" + }, + "input":{"shape":"UpdateEmailTemplateRequest"}, + "output":{"shape":"UpdateEmailTemplateResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Updates an email template. Email templates enable you to send personalized email to one or more destinations in a single API operation. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + } + }, + "shapes":{ + "AccountDetails":{ + "type":"structure", + "members":{ + "MailType":{ + "shape":"MailType", + "documentation":"

    The type of email your account is sending. The mail type can be one of the following:

    • MARKETING – Most of your sending traffic is to keep your customers informed of your latest offering.

    • TRANSACTIONAL – Most of your sending traffic is to communicate during a transaction with a customer.

    " + }, + "WebsiteURL":{ + "shape":"WebsiteURL", + "documentation":"

    The URL of your website. This information helps us better understand the type of content that you plan to send.

    " + }, + "ContactLanguage":{ + "shape":"ContactLanguage", + "documentation":"

    The language you would prefer for the case. The contact language can be one of ENGLISH or JAPANESE.

    " + }, + "UseCaseDescription":{ + "shape":"UseCaseDescription", + "documentation":"

    A description of the types of email that you plan to send.

    " + }, + "AdditionalContactEmailAddresses":{ + "shape":"AdditionalContactEmailAddresses", + "documentation":"

    Additional email addresses where updates are sent about your account review process.

    " + }, + "ReviewDetails":{ + "shape":"ReviewDetails", + "documentation":"

    Information about the review of the latest details you submitted.

    " + } + }, + "documentation":"

    An object that contains information about your account details.

    " + }, + "AccountSuspendedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The message can't be sent because the account's ability to send email has been permanently restricted.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "AdditionalContactEmailAddress":{ + "type":"string", + "max":254, + "min":6, + "pattern":"^(.+)@(.+)$", + "sensitive":true + }, + "AdditionalContactEmailAddresses":{ + "type":"list", + "member":{"shape":"AdditionalContactEmailAddress"}, + "max":4, + "min":1, + "sensitive":true + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The resource specified in your request already exists.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "AmazonResourceName":{"type":"string"}, + "AttributesData":{"type":"string"}, + "BadRequestException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The input you provided is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "BehaviorOnMxFailure":{ + "type":"string", + "documentation":"

    The action that you want to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

    These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

    ", + "enum":[ + "USE_DEFAULT_VALUE", + "REJECT_MESSAGE" + ] + }, + "BlacklistEntries":{ + "type":"list", + "member":{"shape":"BlacklistEntry"} + }, + "BlacklistEntry":{ + "type":"structure", + "members":{ + "RblName":{ + "shape":"RblName", + "documentation":"

    The name of the blacklist that the IP address appears on.

    " + }, + "ListingTime":{ + "shape":"Timestamp", + "documentation":"

    The time when the blacklisting event occurred, shown in Unix time format.

    " + }, + "Description":{ + "shape":"BlacklistingDescription", + "documentation":"

    Additional information about the blacklisting event, as provided by the blacklist maintainer.

    " + } + }, + "documentation":"

    An object that contains information about a blacklisting event that impacts one of the dedicated IP addresses that is associated with your account.

    " + }, + "BlacklistItemName":{ + "type":"string", + "documentation":"

    An IP address that you want to obtain blacklist information for.

    " + }, + "BlacklistItemNames":{ + "type":"list", + "member":{"shape":"BlacklistItemName"} + }, + "BlacklistReport":{ + "type":"map", + "key":{"shape":"BlacklistItemName"}, + "value":{"shape":"BlacklistEntries"} + }, + "BlacklistingDescription":{ + "type":"string", + "documentation":"

    A description of the blacklisting event.

    " + }, + "Body":{ + "type":"structure", + "members":{ + "Text":{ + "shape":"Content", + "documentation":"

    An object that represents the version of the message that is displayed in email clients that don't support HTML, or clients where the recipient has disabled HTML rendering.

    " + }, + "Html":{ + "shape":"Content", + "documentation":"

    An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.

    " + } + }, + "documentation":"

    Represents the body of the email message.

    " + }, + "BulkEmailContent":{ + "type":"structure", + "members":{ + "Template":{ + "shape":"Template", + "documentation":"

    The template to use for the bulk email message.

    " + } + }, + "documentation":"

    An object that contains the body of the message. You can specify a template message.

    " + }, + "BulkEmailEntry":{ + "type":"structure", + "required":["Destination"], + "members":{ + "Destination":{ + "shape":"Destination", + "documentation":"

    Represents the destination of the message, consisting of To:, CC:, and BCC: fields.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492.

    " + }, + "ReplacementTags":{ + "shape":"MessageTagList", + "documentation":"

    A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendBulkTemplatedEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.

    " + }, + "ReplacementEmailContent":{ + "shape":"ReplacementEmailContent", + "documentation":"

    The ReplacementEmailContent associated with a BulkEmailEntry.

    " + } + } + }, + "BulkEmailEntryList":{ + "type":"list", + "member":{"shape":"BulkEmailEntry"}, + "documentation":"

    A list of BulkEmailEntry objects.

    " + }, + "BulkEmailEntryResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"BulkEmailStatus", + "documentation":"

    The status of a message sent using the SendBulkTemplatedEmail operation.

    Possible values for this parameter include:

    • SUCCESS: Amazon SES accepted the message, and will attempt to deliver it to the recipients.

    • MESSAGE_REJECTED: The message was rejected because it contained a virus.

    • MAIL_FROM_DOMAIN_NOT_VERIFIED: The sender's email address or domain was not verified.

    • CONFIGURATION_SET_DOES_NOT_EXIST: The configuration set you specified does not exist.

    • TEMPLATE_DOES_NOT_EXIST: The template you specified does not exist.

    • ACCOUNT_SUSPENDED: Your account has been shut down because of issues related to your email sending practices.

    • ACCOUNT_THROTTLED: The number of emails you can send has been reduced because your account has exceeded its allocated sending limit.

    • ACCOUNT_DAILY_QUOTA_EXCEEDED: You have reached or exceeded the maximum number of emails you can send from your account in a 24-hour period.

    • INVALID_SENDING_POOL_NAME: The configuration set you specified refers to an IP pool that does not exist.

    • ACCOUNT_SENDING_PAUSED: Email sending for the Amazon SES account was disabled using the UpdateAccountSendingEnabled operation.

    • CONFIGURATION_SET_SENDING_PAUSED: Email sending for this configuration set was disabled using the UpdateConfigurationSetSendingEnabled operation.

    • INVALID_PARAMETER_VALUE: One or more of the parameters you specified when calling this operation was invalid. See the error message for additional information.

    • TRANSIENT_FAILURE: Amazon SES was unable to process your request because of a temporary issue.

    • FAILED: Amazon SES was unable to process your request. See the error message for additional information.

    " + }, + "Error":{ + "shape":"ErrorMessage", + "documentation":"

    A description of an error that prevented a message being sent using the SendBulkTemplatedEmail operation.

    " + }, + "MessageId":{ + "shape":"OutboundMessageId", + "documentation":"

    The unique message identifier returned from the SendBulkTemplatedEmail operation.

    " + } + }, + "documentation":"

    The result of the SendBulkEmail operation of each specified BulkEmailEntry.

    " + }, + "BulkEmailEntryResultList":{ + "type":"list", + "member":{"shape":"BulkEmailEntryResult"}, + "documentation":"

    A list of BulkMailEntry objects.

    " + }, + "BulkEmailStatus":{ + "type":"string", + "enum":[ + "SUCCESS", + "MESSAGE_REJECTED", + "MAIL_FROM_DOMAIN_NOT_VERIFIED", + "CONFIGURATION_SET_NOT_FOUND", + "TEMPLATE_NOT_FOUND", + "ACCOUNT_SUSPENDED", + "ACCOUNT_THROTTLED", + "ACCOUNT_DAILY_QUOTA_EXCEEDED", + "INVALID_SENDING_POOL_NAME", + "ACCOUNT_SENDING_PAUSED", + "CONFIGURATION_SET_SENDING_PAUSED", + "INVALID_PARAMETER", + "TRANSIENT_FAILURE", + "FAILED" + ] + }, + "CampaignId":{"type":"string"}, + "CaseId":{"type":"string"}, + "Charset":{"type":"string"}, + "CloudWatchDestination":{ + "type":"structure", + "required":["DimensionConfigurations"], + "members":{ + "DimensionConfigurations":{ + "shape":"CloudWatchDimensionConfigurations", + "documentation":"

    An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.

    " + } + }, + "documentation":"

    An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.

    " + }, + "CloudWatchDimensionConfiguration":{ + "type":"structure", + "required":[ + "DimensionName", + "DimensionValueSource", + "DefaultDimensionValue" + ], + "members":{ + "DimensionName":{ + "shape":"DimensionName", + "documentation":"

    The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + }, + "DimensionValueSource":{ + "shape":"DimensionValueSource", + "documentation":"

    The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. If you want to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail or SendRawEmail API, choose messageTag. If you want to use your own email headers, choose emailHeader. If you want to use link tags, choose linkTags.

    " + }, + "DefaultDimensionValue":{ + "shape":"DefaultDimensionValue", + "documentation":"

    The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + } + }, + "documentation":"

    An object that defines the dimension configuration to use when you send email events to Amazon CloudWatch.

    " + }, + "CloudWatchDimensionConfigurations":{ + "type":"list", + "member":{"shape":"CloudWatchDimensionConfiguration"} + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The resource is being modified by another operation or thread.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ConfigurationSetName":{ + "type":"string", + "documentation":"

    The name of a configuration set.

    Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    " + }, + "ConfigurationSetNameList":{ + "type":"list", + "member":{"shape":"ConfigurationSetName"} + }, + "ConflictException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    If there is already an ongoing account details update under review.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "Contact":{ + "type":"structure", + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The contact's email address.

    " + }, + "TopicPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

    The contact's preference for being opted-in to or opted-out of a topic.

    " + }, + "TopicDefaultPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

    The default topic preferences applied to the contact.

    " + }, + "UnsubscribeAll":{ + "shape":"UnsubscribeAll", + "documentation":"

    A boolean value status noting if the contact is unsubscribed from all contact list topics.

    " + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    A timestamp noting the last time the contact's information was updated.

    " + } + }, + "documentation":"

    A contact is the end-user who is receiving the email.

    " + }, + "ContactLanguage":{ + "type":"string", + "enum":[ + "EN", + "JA" + ] + }, + "ContactList":{ + "type":"structure", + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    " + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    A timestamp noting the last time the contact list was updated.

    " + } + }, + "documentation":"

    A list that contains contacts that have subscribed to a particular topic or topics.

    " + }, + "ContactListDestination":{ + "type":"structure", + "required":[ + "ContactListName", + "ContactListImportAction" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    " + }, + "ContactListImportAction":{ + "shape":"ContactListImportAction", + "documentation":"

    >The type of action that you want to perform on the addresses. Acceptable values:

    • PUT: add the addresses to the contact list. If the record already exists, it will override it with the new value.

    • DELETE: remove the addresses from the contact list.

    " + } + }, + "documentation":"

    An object that contains details about the action of a contact list.

    " + }, + "ContactListImportAction":{ + "type":"string", + "enum":[ + "DELETE", + "PUT" + ] + }, + "ContactListName":{"type":"string"}, + "Content":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{ + "shape":"MessageData", + "documentation":"

    The content of the message itself.

    " + }, + "Charset":{ + "shape":"Charset", + "documentation":"

    The character set for the content. Because of the constraints of the SMTP protocol, Amazon SES uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8, ISO-8859-1, or Shift_JIS.

    " + } + }, + "documentation":"

    An object that represents the content of the email, and optionally a character set specification.

    " + }, + "CreateConfigurationSetEventDestinationRequest":{ + "type":"structure", + "required":[ + "ConfigurationSetName", + "EventDestinationName", + "EventDestination" + ], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to add an event destination to.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "EventDestinationName":{ + "shape":"EventDestinationName", + "documentation":"

    A name that identifies the event destination within the configuration set.

    " + }, + "EventDestination":{ + "shape":"EventDestinationDefinition", + "documentation":"

    An object that defines the event destination.

    " + } + }, + "documentation":"

    A request to add an event destination to a configuration set.

    " + }, + "CreateConfigurationSetEventDestinationResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "CreateConfigurationSetRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set.

    " + }, + "TrackingOptions":{ + "shape":"TrackingOptions", + "documentation":"

    An object that defines the open and click tracking options for emails that you send using the configuration set.

    " + }, + "DeliveryOptions":{ + "shape":"DeliveryOptions", + "documentation":"

    An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.

    " + }, + "ReputationOptions":{ + "shape":"ReputationOptions", + "documentation":"

    An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set.

    " + }, + "SendingOptions":{ + "shape":"SendingOptions", + "documentation":"

    An object that defines whether or not Amazon SES can send email that you send using the configuration set.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of objects that define the tags (keys and values) that you want to associate with the configuration set.

    " + }, + "SuppressionOptions":{"shape":"SuppressionOptions"} + }, + "documentation":"

    A request to create a configuration set.

    " + }, + "CreateConfigurationSetResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "CreateContactListRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    " + }, + "Topics":{ + "shape":"Topics", + "documentation":"

    An interest group, theme, or label within a list. A contact list can have multiple topics.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of what the contact list is about.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags associated with a contact list.

    " + } + } + }, + "CreateContactListResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateContactRequest":{ + "type":"structure", + "required":[ + "ContactListName", + "EmailAddress" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list to which the contact should be added.

    ", + "location":"uri", + "locationName":"ContactListName" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The contact's email address.

    " + }, + "TopicPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

    The contact's preferences for being opted-in to or opted-out of topics.

    " + }, + "UnsubscribeAll":{ + "shape":"UnsubscribeAll", + "documentation":"

    A boolean value status noting if the contact is unsubscribed from all contact list topics.

    " + }, + "AttributesData":{ + "shape":"AttributesData", + "documentation":"

    The attribute data attached to a contact.

    " + } + } + }, + "CreateContactResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateCustomVerificationEmailTemplateRequest":{ + "type":"structure", + "required":[ + "TemplateName", + "FromEmailAddress", + "TemplateSubject", + "TemplateContent", + "SuccessRedirectionURL", + "FailureRedirectionURL" + ], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the custom verification email template.

    " + }, + "FromEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that the custom verification email is sent from.

    " + }, + "TemplateSubject":{ + "shape":"EmailTemplateSubject", + "documentation":"

    The subject line of the custom verification email.

    " + }, + "TemplateContent":{ + "shape":"TemplateContent", + "documentation":"

    The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom Verification Email Frequently Asked Questions in the Amazon SES Developer Guide.

    " + }, + "SuccessRedirectionURL":{ + "shape":"SuccessRedirectionURL", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is successfully verified.

    " + }, + "FailureRedirectionURL":{ + "shape":"FailureRedirectionURL", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is not successfully verified.

    " + } + }, + "documentation":"

    Represents a request to create a custom verification email template.

    " + }, + "CreateCustomVerificationEmailTemplateResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

    " + }, + "CreateDedicatedIpPoolRequest":{ + "type":"structure", + "required":["PoolName"], + "members":{ + "PoolName":{ + "shape":"PoolName", + "documentation":"

    The name of the dedicated IP pool.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An object that defines the tags (keys and values) that you want to associate with the pool.

    " + } + }, + "documentation":"

    A request to create a new dedicated IP pool.

    " + }, + "CreateDedicatedIpPoolResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "CreateDeliverabilityTestReportRequest":{ + "type":"structure", + "required":[ + "FromEmailAddress", + "Content" + ], + "members":{ + "ReportName":{ + "shape":"ReportName", + "documentation":"

    A unique name that helps you to identify the predictive inbox placement test when you retrieve the results.

    " + }, + "FromEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that the predictive inbox placement test email was sent from.

    " + }, + "Content":{ + "shape":"EmailContent", + "documentation":"

    The HTML body of the message that you sent when you performed the predictive inbox placement test.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of objects that define the tags (keys and values) that you want to associate with the predictive inbox placement test.

    " + } + }, + "documentation":"

    A request to perform a predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. We send that message to special email addresses spread across several major email providers around the world. The test takes about 24 hours to complete. When the test is complete, you can use the GetDeliverabilityTestReport operation to view the results of the test.

    " + }, + "CreateDeliverabilityTestReportResponse":{ + "type":"structure", + "required":[ + "ReportId", + "DeliverabilityTestStatus" + ], + "members":{ + "ReportId":{ + "shape":"ReportId", + "documentation":"

    A unique string that identifies the predictive inbox placement test.

    " + }, + "DeliverabilityTestStatus":{ + "shape":"DeliverabilityTestStatus", + "documentation":"

    The status of the predictive inbox placement test. If the status is IN_PROGRESS, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE, then the test is finished, and you can use the GetDeliverabilityTestReport to view the results of the test.

    " + } + }, + "documentation":"

    Information about the predictive inbox placement test that you created.

    " + }, + "CreateEmailIdentityPolicyRequest":{ + "type":"structure", + "required":[ + "EmailIdentity", + "PolicyName", + "Policy" + ], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email identity for which you want to create a policy.

    ", + "location":"uri", + "locationName":"EmailIdentity" + }, + "PolicyName":{ + "shape":"PolicyName", + "documentation":"

    The name of the policy.

    The policy name cannot exceed 64 characters and can only include alphanumeric characters, dashes, and underscores.

    ", + "location":"uri", + "locationName":"PolicyName" + }, + "Policy":{ + "shape":"Policy", + "documentation":"

    The text of the policy in JSON format. The policy cannot exceed 4 KB.

    For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide.

    " + } + }, + "documentation":"

    Represents a request to create a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " + }, + "CreateEmailIdentityPolicyResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "CreateEmailIdentityRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email address or domain that you want to verify.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of objects that define the tags (keys and values) that you want to associate with the email identity.

    " + }, + "DkimSigningAttributes":{ + "shape":"DkimSigningAttributes", + "documentation":"

    If your request includes this object, Amazon SES configures the identity to use Bring Your Own DKIM (BYODKIM) for DKIM authentication purposes, as opposed to the default method, Easy DKIM.

    You can only specify this object if the email identity is a domain, as opposed to an address.

    " + } + }, + "documentation":"

    A request to begin the verification process for an email identity (an email address or domain).

    " + }, + "CreateEmailIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

    The email identity type.

    " + }, + "VerifiedForSendingStatus":{ + "shape":"Enabled", + "documentation":"

    Specifies whether or not the identity is verified. You can only send email from verified email addresses or domains. For more information about verifying identities, see the Amazon Pinpoint User Guide.

    " + }, + "DkimAttributes":{ + "shape":"DkimAttributes", + "documentation":"

    An object that contains information about the DKIM attributes for the identity.

    " + } + }, + "documentation":"

    If the email identity is a domain, this object contains information about the DKIM verification status for the domain.

    If the email identity is an email address, this object is empty.

    " + }, + "CreateEmailTemplateRequest":{ + "type":"structure", + "required":[ + "TemplateName", + "TemplateContent" + ], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the template you want to create.

    " + }, + "TemplateContent":{ + "shape":"EmailTemplateContent", + "documentation":"

    The content of the email template, composed of a subject line, an HTML part, and a text-only part.

    " + } + }, + "documentation":"

    Represents a request to create an email template. For more information, see the Amazon SES Developer Guide.

    " + }, + "CreateEmailTemplateResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

    " + }, + "CreateImportJobRequest":{ + "type":"structure", + "required":[ + "ImportDestination", + "ImportDataSource" + ], + "members":{ + "ImportDestination":{ + "shape":"ImportDestination", + "documentation":"

    The destination for the import job.

    " + }, + "ImportDataSource":{ + "shape":"ImportDataSource", + "documentation":"

    The data source for the import job.

    " + } + }, + "documentation":"

    Represents a request to create an import job from a data source for a data destination.

    " + }, + "CreateImportJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    A string that represents the import job ID.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "CustomRedirectDomain":{ + "type":"string", + "documentation":"

    The domain that you want to use for tracking open and click events.

    " + }, + "CustomVerificationEmailTemplateMetadata":{ + "type":"structure", + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the custom verification email template.

    " + }, + "FromEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that the custom verification email is sent from.

    " + }, + "TemplateSubject":{ + "shape":"EmailTemplateSubject", + "documentation":"

    The subject line of the custom verification email.

    " + }, + "SuccessRedirectionURL":{ + "shape":"SuccessRedirectionURL", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is successfully verified.

    " + }, + "FailureRedirectionURL":{ + "shape":"FailureRedirectionURL", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is not successfully verified.

    " + } + }, + "documentation":"

    Contains information about a custom verification email template.

    " + }, + "CustomVerificationEmailTemplatesList":{ + "type":"list", + "member":{"shape":"CustomVerificationEmailTemplateMetadata"}, + "documentation":"

    A list of the custom verification email templates that exist in your account.

    " + }, + "DailyVolume":{ + "type":"structure", + "members":{ + "StartDate":{ + "shape":"Timestamp", + "documentation":"

    The date that the DailyVolume metrics apply to, in Unix time.

    " + }, + "VolumeStatistics":{ + "shape":"VolumeStatistics", + "documentation":"

    An object that contains inbox placement metrics for a specific day in the analysis period.

    " + }, + "DomainIspPlacements":{ + "shape":"DomainIspPlacements", + "documentation":"

    An object that contains inbox placement metrics for a specified day in the analysis period, broken out by the recipient's email provider.

    " + } + }, + "documentation":"

    An object that contains information about the volume of email sent on each day of the analysis period.

    " + }, + "DailyVolumes":{ + "type":"list", + "member":{"shape":"DailyVolume"} + }, + "DataFormat":{ + "type":"string", + "documentation":"

    The data format of the import job's data source.

    ", + "enum":[ + "CSV", + "JSON" + ] + }, + "DedicatedIp":{ + "type":"structure", + "required":[ + "Ip", + "WarmupStatus", + "WarmupPercentage" + ], + "members":{ + "Ip":{ + "shape":"Ip", + "documentation":"

    An IPv4 address.

    " + }, + "WarmupStatus":{ + "shape":"WarmupStatus", + "documentation":"

    The warm-up status of a dedicated IP address. The status can have one of the following values:

    • IN_PROGRESS – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.

    • DONE – The dedicated IP warm-up process is complete, and the IP address is ready to use.

    " + }, + "WarmupPercentage":{ + "shape":"Percentage100Wrapper", + "documentation":"

    Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.

    " + }, + "PoolName":{ + "shape":"PoolName", + "documentation":"

    The name of the dedicated IP pool that the IP address is associated with.

    " + } + }, + "documentation":"

    Contains information about a dedicated IP address that is associated with your Amazon SES account.

    To learn more about requesting dedicated IP addresses, see Requesting and Relinquishing Dedicated IP Addresses in the Amazon SES Developer Guide.

    " + }, + "DedicatedIpList":{ + "type":"list", + "member":{"shape":"DedicatedIp"}, + "documentation":"

    A list of dedicated IP addresses that are associated with your AWS account.

    " + }, + "DefaultDimensionValue":{ + "type":"string", + "documentation":"

    The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + }, + "DeleteConfigurationSetEventDestinationRequest":{ + "type":"structure", + "required":[ + "ConfigurationSetName", + "EventDestinationName" + ], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that contains the event destination that you want to delete.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "EventDestinationName":{ + "shape":"EventDestinationName", + "documentation":"

    The name of the event destination that you want to delete.

    ", + "location":"uri", + "locationName":"EventDestinationName" + } + }, + "documentation":"

    A request to delete an event destination from a configuration set.

    " + }, + "DeleteConfigurationSetEventDestinationResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "DeleteConfigurationSetRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to delete.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + } + }, + "documentation":"

    A request to delete a configuration set.

    " + }, + "DeleteConfigurationSetResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "DeleteContactListRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    ", + "location":"uri", + "locationName":"ContactListName" + } + } + }, + "DeleteContactListResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteContactRequest":{ + "type":"structure", + "required":[ + "ContactListName", + "EmailAddress" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list from which the contact should be removed.

    ", + "location":"uri", + "locationName":"ContactListName" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The contact's email address.

    ", + "location":"uri", + "locationName":"EmailAddress" + } + } + }, + "DeleteContactResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteCustomVerificationEmailTemplateRequest":{ + "type":"structure", + "required":["TemplateName"], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the custom verification email template that you want to delete.

    ", + "location":"uri", + "locationName":"TemplateName" + } + }, + "documentation":"

    Represents a request to delete an existing custom verification email template.

    " + }, + "DeleteCustomVerificationEmailTemplateResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

    " + }, + "DeleteDedicatedIpPoolRequest":{ + "type":"structure", + "required":["PoolName"], + "members":{ + "PoolName":{ + "shape":"PoolName", + "documentation":"

    The name of the dedicated IP pool that you want to delete.

    ", + "location":"uri", + "locationName":"PoolName" + } + }, + "documentation":"

    A request to delete a dedicated IP pool.

    " + }, + "DeleteDedicatedIpPoolResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "DeleteEmailIdentityPolicyRequest":{ + "type":"structure", + "required":[ + "EmailIdentity", + "PolicyName" + ], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email identity for which you want to delete a policy.

    ", + "location":"uri", + "locationName":"EmailIdentity" + }, + "PolicyName":{ + "shape":"PolicyName", + "documentation":"

    The name of the policy.

    The policy name cannot exceed 64 characters and can only include alphanumeric characters, dashes, and underscores.

    ", + "location":"uri", + "locationName":"PolicyName" + } + }, + "documentation":"

    Represents a request to delete a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " + }, + "DeleteEmailIdentityPolicyResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "DeleteEmailIdentityRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The identity (that is, the email address or domain) that you want to delete.

    ", + "location":"uri", + "locationName":"EmailIdentity" + } + }, + "documentation":"

    A request to delete an existing email identity. When you delete an identity, you lose the ability to send email from that identity. You can restore your ability to send email by completing the verification process for the identity again.

    " + }, + "DeleteEmailIdentityResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "DeleteEmailTemplateRequest":{ + "type":"structure", + "required":["TemplateName"], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the template to be deleted.

    ", + "location":"uri", + "locationName":"TemplateName" + } + }, + "documentation":"

    Represents a request to delete an email template. For more information, see the Amazon SES Developer Guide.

    " + }, + "DeleteEmailTemplateResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

    " + }, + "DeleteSuppressedDestinationRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The suppressed email destination to remove from the account suppression list.

    ", + "location":"uri", + "locationName":"EmailAddress" + } + }, + "documentation":"

    A request to remove an email address from the suppression list for your account.

    " + }, + "DeleteSuppressedDestinationResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "DeliverabilityDashboardAccountStatus":{ + "type":"string", + "documentation":"

    The current status of your Deliverability dashboard subscription. If this value is PENDING_EXPIRATION, your subscription is scheduled to expire at the end of the current calendar month.

    ", + "enum":[ + "ACTIVE", + "PENDING_EXPIRATION", + "DISABLED" + ] + }, + "DeliverabilityTestReport":{ + "type":"structure", + "members":{ + "ReportId":{ + "shape":"ReportId", + "documentation":"

    A unique string that identifies the predictive inbox placement test.

    " + }, + "ReportName":{ + "shape":"ReportName", + "documentation":"

    A name that helps you identify a predictive inbox placement test report.

    " + }, + "Subject":{ + "shape":"DeliverabilityTestSubject", + "documentation":"

    The subject line for an email that you submitted in a predictive inbox placement test.

    " + }, + "FromEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The sender address that you specified for the predictive inbox placement test.

    " + }, + "CreateDate":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the predictive inbox placement test was created, in Unix time format.

    " + }, + "DeliverabilityTestStatus":{ + "shape":"DeliverabilityTestStatus", + "documentation":"

    The status of the predictive inbox placement test. If the status is IN_PROGRESS, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE, then the test is finished, and you can use the GetDeliverabilityTestReport to view the results of the test.

    " + } + }, + "documentation":"

    An object that contains metadata related to a predictive inbox placement test.

    " + }, + "DeliverabilityTestReports":{ + "type":"list", + "member":{"shape":"DeliverabilityTestReport"} + }, + "DeliverabilityTestStatus":{ + "type":"string", + "documentation":"

    The status of a predictive inbox placement test. If the status is IN_PROGRESS, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE, then the test is finished, and you can use the GetDeliverabilityTestReport operation to view the results of the test.

    ", + "enum":[ + "IN_PROGRESS", + "COMPLETED" + ] + }, + "DeliverabilityTestSubject":{ + "type":"string", + "documentation":"

    The subject line for an email that you submitted in a predictive inbox placement test.

    " + }, + "DeliveryOptions":{ + "type":"structure", + "members":{ + "TlsPolicy":{ + "shape":"TlsPolicy", + "documentation":"

    Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established.

    " + }, + "SendingPoolName":{ + "shape":"PoolName", + "documentation":"

    The name of the dedicated IP pool that you want to associate with the configuration set.

    " + } + }, + "documentation":"

    Used to associate a configuration set with a dedicated IP pool.

    " + }, + "Description":{"type":"string"}, + "Destination":{ + "type":"structure", + "members":{ + "ToAddresses":{ + "shape":"EmailAddressList", + "documentation":"

    An array that contains the email addresses of the \"To\" recipients for the email.

    " + }, + "CcAddresses":{ + "shape":"EmailAddressList", + "documentation":"

    An array that contains the email addresses of the \"CC\" (carbon copy) recipients for the email.

    " + }, + "BccAddresses":{ + "shape":"EmailAddressList", + "documentation":"

    An array that contains the email addresses of the \"BCC\" (blind carbon copy) recipients for the email.

    " + } + }, + "documentation":"

    An object that describes the recipients for an email.

    " + }, + "DimensionName":{ + "type":"string", + "documentation":"

    The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:

    • It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + }, + "DimensionValueSource":{ + "type":"string", + "documentation":"

    The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. If you want to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail or SendRawEmail API, choose messageTag. If you want to use your own email headers, choose emailHeader. If you want to use link tags, choose linkTags.

    ", + "enum":[ + "MESSAGE_TAG", + "EMAIL_HEADER", + "LINK_TAG" + ] + }, + "DisplayName":{"type":"string"}, + "DkimAttributes":{ + "type":"structure", + "members":{ + "SigningEnabled":{ + "shape":"Enabled", + "documentation":"

    If the value is true, then the messages that you send from the identity are signed using DKIM. If the value is false, then the messages that you send from the identity aren't DKIM-signed.

    " + }, + "Status":{ + "shape":"DkimStatus", + "documentation":"

    Describes whether or not Amazon SES has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:

    • PENDING – The verification process was initiated, but Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain.

    • SUCCESS – The verification process completed successfully.

    • FAILED – The verification process failed. This typically occurs when Amazon SES fails to find the DKIM records in the DNS configuration of the domain.

    • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

    • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

    " + }, + "Tokens":{ + "shape":"DnsTokenList", + "documentation":"

    If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete.

    If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key.

    Regardless of the DKIM authentication method you use, Amazon SES searches for the appropriate records in the DNS configuration of the domain for up to 72 hours.

    " + }, + "SigningAttributesOrigin":{ + "shape":"DkimSigningAttributesOrigin", + "documentation":"

    A string that indicates how DKIM was configured for the identity. There are two possible values:

    • AWS_SES – Indicates that DKIM was configured for the identity by using Easy DKIM.

    • EXTERNAL – Indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM).

    " + } + }, + "documentation":"

    An object that contains information about the DKIM authentication status for an email identity.

    Amazon SES determines the authentication status by searching for specific records in the DNS configuration for the domain. If you used Easy DKIM to set up DKIM authentication, Amazon SES tries to find three unique CNAME records in the DNS configuration for your domain. If you provided a public key to perform DKIM authentication, Amazon SES tries to find a TXT record that uses the selector that you specified. The value of the TXT record must be a public key that's paired with the private key that you specified in the process of creating the identity

    " + }, + "DkimSigningAttributes":{ + "type":"structure", + "required":[ + "DomainSigningSelector", + "DomainSigningPrivateKey" + ], + "members":{ + "DomainSigningSelector":{ + "shape":"Selector", + "documentation":"

    A string that's used to identify a public key in the DNS configuration for a domain.

    " + }, + "DomainSigningPrivateKey":{ + "shape":"PrivateKey", + "documentation":"

    A private key that's used to generate a DKIM signature.

    The private key must use 1024-bit RSA encryption, and must be encoded using base64 encoding.

    " + } + }, + "documentation":"

    An object that contains information about the tokens used for setting up Bring Your Own DKIM (BYODKIM).

    " + }, + "DkimSigningAttributesOrigin":{ + "type":"string", + "enum":[ + "AWS_SES", + "EXTERNAL" + ] + }, + "DkimStatus":{ + "type":"string", + "documentation":"

    The DKIM authentication status of the identity. The status can be one of the following:

    • PENDING – The verification process was initiated, but Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain.

    • SUCCESS – The verification process completed successfully.

    • FAILED – The verification process failed. This typically occurs when Amazon SES fails to find the DKIM records in the DNS configuration of the domain.

    • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

    • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

    ", + "enum":[ + "PENDING", + "SUCCESS", + "FAILED", + "TEMPORARY_FAILURE", + "NOT_STARTED" + ] + }, + "DnsToken":{"type":"string"}, + "DnsTokenList":{ + "type":"list", + "member":{"shape":"DnsToken"} + }, + "Domain":{"type":"string"}, + "DomainDeliverabilityCampaign":{ + "type":"structure", + "members":{ + "CampaignId":{ + "shape":"CampaignId", + "documentation":"

    The unique identifier for the campaign. The Deliverability dashboard automatically generates and assigns this identifier to a campaign.

    " + }, + "ImageUrl":{ + "shape":"ImageUrl", + "documentation":"

    The URL of an image that contains a snapshot of the email message that was sent.

    " + }, + "Subject":{ + "shape":"Subject", + "documentation":"

    The subject line, or title, of the email message.

    " + }, + "FromAddress":{ + "shape":"Identity", + "documentation":"

    The verified email address that the email message was sent from.

    " + }, + "SendingIps":{ + "shape":"IpList", + "documentation":"

    The IP addresses that were used to send the email message.

    " + }, + "FirstSeenDateTime":{ + "shape":"Timestamp", + "documentation":"

    The first time, in Unix time format, when the email message was delivered to any recipient's inbox. This value can help you determine how long it took for a campaign to deliver an email message.

    " + }, + "LastSeenDateTime":{ + "shape":"Timestamp", + "documentation":"

    The last time, in Unix time format, when the email message was delivered to any recipient's inbox. This value can help you determine how long it took for a campaign to deliver an email message.

    " + }, + "InboxCount":{ + "shape":"Volume", + "documentation":"

    The number of email messages that were delivered to recipients’ inboxes.

    " + }, + "SpamCount":{ + "shape":"Volume", + "documentation":"

    The number of email messages that were delivered to recipients' spam or junk mail folders.

    " + }, + "ReadRate":{ + "shape":"Percentage", + "documentation":"

    The percentage of email messages that were opened by recipients. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.

    " + }, + "DeleteRate":{ + "shape":"Percentage", + "documentation":"

    The percentage of email messages that were deleted by recipients, without being opened first. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.

    " + }, + "ReadDeleteRate":{ + "shape":"Percentage", + "documentation":"

    The percentage of email messages that were opened and then deleted by recipients. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.

    " + }, + "ProjectedVolume":{ + "shape":"Volume", + "documentation":"

    The projected number of recipients that the email message was sent to.

    " + }, + "Esps":{ + "shape":"Esps", + "documentation":"

    The major email providers who handled the email message.

    " + } + }, + "documentation":"

    An object that contains the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for (PutDeliverabilityDashboardOption operation).

    " + }, + "DomainDeliverabilityCampaignList":{ + "type":"list", + "member":{"shape":"DomainDeliverabilityCampaign"}, + "documentation":"

    " + }, + "DomainDeliverabilityTrackingOption":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"Domain", + "documentation":"

    A verified domain that’s associated with your AWS account and currently has an active Deliverability dashboard subscription.

    " + }, + "SubscriptionStartDate":{ + "shape":"Timestamp", + "documentation":"

    The date, in Unix time format, when you enabled the Deliverability dashboard for the domain.

    " + }, + "InboxPlacementTrackingOption":{ + "shape":"InboxPlacementTrackingOption", + "documentation":"

    An object that contains information about the inbox placement data settings for the domain.

    " + } + }, + "documentation":"

    An object that contains information about the Deliverability dashboard subscription for a verified domain that you use to send email and currently has an active Deliverability dashboard subscription. If a Deliverability dashboard subscription is active for a domain, you gain access to reputation, inbox placement, and other metrics for the domain.

    " + }, + "DomainDeliverabilityTrackingOptions":{ + "type":"list", + "member":{"shape":"DomainDeliverabilityTrackingOption"}, + "documentation":"

    An object that contains information about the Deliverability dashboard subscription for a verified domain that you use to send email and currently has an active Deliverability dashboard subscription. If a Deliverability dashboard subscription is active for a domain, you gain access to reputation, inbox placement, and other metrics for the domain.

    " + }, + "DomainIspPlacement":{ + "type":"structure", + "members":{ + "IspName":{ + "shape":"IspName", + "documentation":"

    The name of the email provider that the inbox placement data applies to.

    " + }, + "InboxRawCount":{ + "shape":"Volume", + "documentation":"

    The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.

    " + }, + "SpamRawCount":{ + "shape":"Volume", + "documentation":"

    The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.

    " + }, + "InboxPercentage":{ + "shape":"Percentage", + "documentation":"

    The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.

    " + }, + "SpamPercentage":{ + "shape":"Percentage", + "documentation":"

    The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.

    " + } + }, + "documentation":"

    An object that contains inbox placement data for email sent from one of your email domains to a specific email provider.

    " + }, + "DomainIspPlacements":{ + "type":"list", + "member":{"shape":"DomainIspPlacement"} + }, + "EmailAddress":{"type":"string"}, + "EmailAddressList":{ + "type":"list", + "member":{"shape":"EmailAddress"} + }, + "EmailContent":{ + "type":"structure", + "members":{ + "Simple":{ + "shape":"Message", + "documentation":"

    The simple email message. The message consists of a subject and a message body.

    " + }, + "Raw":{ + "shape":"RawMessage", + "documentation":"

    The raw email message. The message has to meet the following criteria:

    • The message has to contain a header and a body, separated by one blank line.

    • All of the required header fields must be present in the message.

    • Each part of a multipart MIME message must be formatted properly.

    • If you include attachments, they must be in a file format that the Amazon SES API v2 supports.

    • The entire message must be Base64 encoded.

    • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.

    • The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.

    " + }, + "Template":{ + "shape":"Template", + "documentation":"

    The template to use for the email message.

    " + } + }, + "documentation":"

    An object that defines the entire content of the email, including the message headers and the body content. You can create a simple email message, in which you specify the subject and the text and HTML versions of the message body. You can also create raw messages, in which you specify a complete MIME-formatted message. Raw messages can include attachments and custom headers.

    " + }, + "EmailTemplateContent":{ + "type":"structure", + "members":{ + "Subject":{ + "shape":"EmailTemplateSubject", + "documentation":"

    The subject line of the email.

    " + }, + "Text":{ + "shape":"EmailTemplateText", + "documentation":"

    The email body that will be visible to recipients whose email clients do not display HTML.

    " + }, + "Html":{ + "shape":"EmailTemplateHtml", + "documentation":"

    The HTML body of the email.

    " + } + }, + "documentation":"

    The content of the email, composed of a subject line, an HTML part, and a text-only part.

    " + }, + "EmailTemplateData":{ + "type":"string", + "documentation":"

    An object that defines the values to use for message variables in the template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the value to use for that variable.

    ", + "max":262144 + }, + "EmailTemplateHtml":{ + "type":"string", + "documentation":"

    The HTML body of the email.

    " + }, + "EmailTemplateMetadata":{ + "type":"structure", + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the template.

    " + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time and date the template was created.

    " + } + }, + "documentation":"

    Contains information about an email template.

    " + }, + "EmailTemplateMetadataList":{ + "type":"list", + "member":{"shape":"EmailTemplateMetadata"}, + "documentation":"

    A list of the EmailTemplateMetadata object.

    " + }, + "EmailTemplateName":{ + "type":"string", + "documentation":"

    The name of the template. You will refer to this name when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail operations.

    ", + "min":1 + }, + "EmailTemplateSubject":{ + "type":"string", + "documentation":"

    The subject line of the email.

    " + }, + "EmailTemplateText":{ + "type":"string", + "documentation":"

    The email body that will be visible to recipients whose email clients do not display HTML.

    " + }, + "Enabled":{"type":"boolean"}, + "EnabledWrapper":{"type":"boolean"}, + "ErrorMessage":{"type":"string"}, + "Esp":{"type":"string"}, + "Esps":{ + "type":"list", + "member":{"shape":"Esp"} + }, + "EventDestination":{ + "type":"structure", + "required":[ + "Name", + "MatchingEventTypes" + ], + "members":{ + "Name":{ + "shape":"EventDestinationName", + "documentation":"

    A name that identifies the event destination.

    " + }, + "Enabled":{ + "shape":"Enabled", + "documentation":"

    If true, the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition.

    If false, the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.

    " + }, + "MatchingEventTypes":{ + "shape":"EventTypes", + "documentation":"

    The types of events that Amazon SES sends to the specified event destinations.

    " + }, + "KinesisFirehoseDestination":{ + "shape":"KinesisFirehoseDestination", + "documentation":"

    An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.

    " + }, + "CloudWatchDestination":{ + "shape":"CloudWatchDestination", + "documentation":"

    An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.

    " + }, + "SnsDestination":{ + "shape":"SnsDestination", + "documentation":"

    An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

    " + }, + "PinpointDestination":{ + "shape":"PinpointDestination", + "documentation":"

    An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.

    " + } + }, + "documentation":"

    In the Amazon SES API v2, events include message sends, deliveries, opens, clicks, bounces, complaints and delivery delays. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    " + }, + "EventDestinationDefinition":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Enabled", + "documentation":"

    If true, the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition.

    If false, the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.

    " + }, + "MatchingEventTypes":{ + "shape":"EventTypes", + "documentation":"

    An array that specifies which events the Amazon SES API v2 should send to the destinations in this EventDestinationDefinition.

    " + }, + "KinesisFirehoseDestination":{ + "shape":"KinesisFirehoseDestination", + "documentation":"

    An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.

    " + }, + "CloudWatchDestination":{ + "shape":"CloudWatchDestination", + "documentation":"

    An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.

    " + }, + "SnsDestination":{ + "shape":"SnsDestination", + "documentation":"

    An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

    " + }, + "PinpointDestination":{ + "shape":"PinpointDestination", + "documentation":"

    An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.

    " + } + }, + "documentation":"

    An object that defines the event destination. Specifically, it defines which services receive events from emails sent using the configuration set that the event destination is associated with. Also defines the types of events that are sent to the event destination.

    " + }, + "EventDestinationName":{ + "type":"string", + "documentation":"

    The name of an event destination.

    Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    " + }, + "EventDestinations":{ + "type":"list", + "member":{"shape":"EventDestination"} + }, + "EventType":{ + "type":"string", + "documentation":"

    An email sending event type. For example, email sends, opens, and bounces are all email events.

    ", + "enum":[ + "SEND", + "REJECT", + "BOUNCE", + "COMPLAINT", + "DELIVERY", + "OPEN", + "CLICK", + "RENDERING_FAILURE", + "DELIVERY_DELAY", + "SUBSCRIPTION" + ] + }, + "EventTypes":{ + "type":"list", + "member":{"shape":"EventType"} + }, + "FailedRecordsCount":{"type":"integer"}, + "FailedRecordsS3Url":{"type":"string"}, + "FailureInfo":{ + "type":"structure", + "members":{ + "FailedRecordsS3Url":{ + "shape":"FailedRecordsS3Url", + "documentation":"

    An Amazon S3 presigned URL that contains all the failed records and related information.

    " + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    A message about why the import job failed.

    " + } + }, + "documentation":"

    An object that contains the failure details about an import job.

    " + }, + "FailureRedirectionURL":{ + "type":"string", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is not successfully verified.

    " + }, + "FeedbackId":{"type":"string"}, + "GeneralEnforcementStatus":{"type":"string"}, + "GetAccountRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"

    A request to obtain information about the email-sending capabilities of your Amazon SES account.

    " + }, + "GetAccountResponse":{ + "type":"structure", + "members":{ + "DedicatedIpAutoWarmupEnabled":{ + "shape":"Enabled", + "documentation":"

    Indicates whether or not the automatic warm-up feature is enabled for dedicated IP addresses that are associated with your account.

    " + }, + "EnforcementStatus":{ + "shape":"GeneralEnforcementStatus", + "documentation":"

    The reputation status of your Amazon SES account. The status can be one of the following:

    • HEALTHY – There are no reputation-related issues that currently impact your account.

    • PROBATION – We've identified potential issues with your Amazon SES account. We're placing your account under review while you work on correcting these issues.

    • SHUTDOWN – Your account's ability to send email is currently paused because of an issue with the email sent from your account. When you correct the issue, you can contact us and request that your account's ability to send email is resumed.

    " + }, + "ProductionAccessEnabled":{ + "shape":"Enabled", + "documentation":"

    Indicates whether or not your account has production access in the current AWS Region.

    If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.

    If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.

    " + }, + "SendQuota":{ + "shape":"SendQuota", + "documentation":"

    An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current AWS Region.

    " + }, + "SendingEnabled":{ + "shape":"Enabled", + "documentation":"

    Indicates whether or not email sending is enabled for your Amazon SES account in the current AWS Region.

    " + }, + "SuppressionAttributes":{ + "shape":"SuppressionAttributes", + "documentation":"

    An object that contains information about the email address suppression preferences for your account in the current AWS Region.

    " + }, + "Details":{ + "shape":"AccountDetails", + "documentation":"

    An object that defines your account details.

    " + } + }, + "documentation":"

    A list of details about the email-sending capabilities of your Amazon SES account in the current AWS Region.

    " + }, + "GetBlacklistReportsRequest":{ + "type":"structure", + "required":["BlacklistItemNames"], + "members":{ + "BlacklistItemNames":{ + "shape":"BlacklistItemNames", + "documentation":"

    A list of IP addresses that you want to retrieve blacklist information about. You can only specify the dedicated IP addresses that you use to send email using Amazon SES or Amazon Pinpoint.

    ", + "location":"querystring", + "locationName":"BlacklistItemNames" + } + }, + "documentation":"

    A request to retrieve a list of the blacklists that your dedicated IP addresses appear on.

    " + }, + "GetBlacklistReportsResponse":{ + "type":"structure", + "required":["BlacklistReport"], + "members":{ + "BlacklistReport":{ + "shape":"BlacklistReport", + "documentation":"

    An object that contains information about a blacklist that one of your dedicated IP addresses appears on.

    " + } + }, + "documentation":"

    An object that contains information about blacklist events.

    " + }, + "GetConfigurationSetEventDestinationsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that contains the event destination.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + } + }, + "documentation":"

    A request to obtain information about the event destinations for a configuration set.

    " + }, + "GetConfigurationSetEventDestinationsResponse":{ + "type":"structure", + "members":{ + "EventDestinations":{ + "shape":"EventDestinations", + "documentation":"

    An array that includes all of the events destinations that have been configured for the configuration set.

    " + } + }, + "documentation":"

    Information about an event destination for a configuration set.

    " + }, + "GetConfigurationSetRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to obtain more information about.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + } + }, + "documentation":"

    A request to obtain information about a configuration set.

    " + }, + "GetConfigurationSetResponse":{ + "type":"structure", + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set.

    " + }, + "TrackingOptions":{ + "shape":"TrackingOptions", + "documentation":"

    An object that defines the open and click tracking options for emails that you send using the configuration set.

    " + }, + "DeliveryOptions":{ + "shape":"DeliveryOptions", + "documentation":"

    An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.

    " + }, + "ReputationOptions":{ + "shape":"ReputationOptions", + "documentation":"

    An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set.

    " + }, + "SendingOptions":{ + "shape":"SendingOptions", + "documentation":"

    An object that defines whether or not Amazon SES can send email that you send using the configuration set.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of objects that define the tags (keys and values) that are associated with the configuration set.

    " + }, + "SuppressionOptions":{ + "shape":"SuppressionOptions", + "documentation":"

    An object that contains information about the suppression list preferences for your account.

    " + } + }, + "documentation":"

    Information about a configuration set.

    " + }, + "GetContactListRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    ", + "location":"uri", + "locationName":"ContactListName" + } + } + }, + "GetContactListResponse":{ + "type":"structure", + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    " + }, + "Topics":{ + "shape":"Topics", + "documentation":"

    An interest group, theme, or label within a list. A contact list can have multiple topics.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of what the contact list is about.

    " + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    A timestamp noting when the contact list was created.

    " + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    A timestamp noting the last time the contact list was updated.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags associated with a contact list.

    " + } + } + }, + "GetContactRequest":{ + "type":"structure", + "required":[ + "ContactListName", + "EmailAddress" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list to which the contact belongs.

    ", + "location":"uri", + "locationName":"ContactListName" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The contact's email addres.

    ", + "location":"uri", + "locationName":"EmailAddress" + } + } + }, + "GetContactResponse":{ + "type":"structure", + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list to which the contact belongs.

    " + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The contact's email addres.

    " + }, + "TopicPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

    The contact's preference for being opted-in to or opted-out of a topic.>

    " + }, + "TopicDefaultPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

    The default topic preferences applied to the contact.

    " + }, + "UnsubscribeAll":{ + "shape":"UnsubscribeAll", + "documentation":"

    A boolean value status noting if the contact is unsubscribed from all contact list topics.

    " + }, + "AttributesData":{ + "shape":"AttributesData", + "documentation":"

    The attribute data attached to a contact.

    " + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    A timestamp noting when the contact was created.

    " + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    A timestamp noting the last time the contact's information was updated.

    " + } + } + }, + "GetCustomVerificationEmailTemplateRequest":{ + "type":"structure", + "required":["TemplateName"], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the custom verification email template that you want to retrieve.

    ", + "location":"uri", + "locationName":"TemplateName" + } + }, + "documentation":"

    Represents a request to retrieve an existing custom verification email template.

    " + }, + "GetCustomVerificationEmailTemplateResponse":{ + "type":"structure", + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the custom verification email template.

    " + }, + "FromEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that the custom verification email is sent from.

    " + }, + "TemplateSubject":{ + "shape":"EmailTemplateSubject", + "documentation":"

    The subject line of the custom verification email.

    " + }, + "TemplateContent":{ + "shape":"TemplateContent", + "documentation":"

    The content of the custom verification email.

    " + }, + "SuccessRedirectionURL":{ + "shape":"SuccessRedirectionURL", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is successfully verified.

    " + }, + "FailureRedirectionURL":{ + "shape":"FailureRedirectionURL", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is not successfully verified.

    " + } + }, + "documentation":"

    The following elements are returned by the service.

    " + }, + "GetDedicatedIpRequest":{ + "type":"structure", + "required":["Ip"], + "members":{ + "Ip":{ + "shape":"Ip", + "documentation":"

    The IP address that you want to obtain more information about. The value you specify has to be a dedicated IP address that's assocaited with your AWS account.

    ", + "location":"uri", + "locationName":"IP" + } + }, + "documentation":"

    A request to obtain more information about a dedicated IP address.

    " + }, + "GetDedicatedIpResponse":{ + "type":"structure", + "members":{ + "DedicatedIp":{ + "shape":"DedicatedIp", + "documentation":"

    An object that contains information about a dedicated IP address.

    " + } + }, + "documentation":"

    Information about a dedicated IP address.

    " + }, + "GetDedicatedIpsRequest":{ + "type":"structure", + "members":{ + "PoolName":{ + "shape":"PoolName", + "documentation":"

    The name of the IP pool that the dedicated IP address is associated with.

    ", + "location":"querystring", + "locationName":"PoolName" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token returned from a previous call to GetDedicatedIps to indicate the position of the dedicated IP pool in the list of IP pools.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of results to show in a single call to GetDedicatedIpsRequest. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    A request to obtain more information about dedicated IP pools.

    " + }, + "GetDedicatedIpsResponse":{ + "type":"structure", + "members":{ + "DedicatedIps":{ + "shape":"DedicatedIpList", + "documentation":"

    A list of dedicated IP addresses that are associated with your AWS account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that indicates that there are additional dedicated IP addresses to list. To view additional addresses, issue another request to GetDedicatedIps, passing this token in the NextToken parameter.

    " + } + }, + "documentation":"

    Information about the dedicated IP addresses that are associated with your AWS account.

    " + }, + "GetDeliverabilityDashboardOptionsRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"

    Retrieve information about the status of the Deliverability dashboard for your AWS account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for your domains. You also gain the ability to perform predictive inbox placement tests.

    When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

    " + }, + "GetDeliverabilityDashboardOptionsResponse":{ + "type":"structure", + "required":["DashboardEnabled"], + "members":{ + "DashboardEnabled":{ + "shape":"Enabled", + "documentation":"

    Specifies whether the Deliverability dashboard is enabled. If this value is true, the dashboard is enabled.

    " + }, + "SubscriptionExpiryDate":{ + "shape":"Timestamp", + "documentation":"

    The date, in Unix time format, when your current subscription to the Deliverability dashboard is scheduled to expire, if your subscription is scheduled to expire at the end of the current calendar month. This value is null if you have an active subscription that isn’t due to expire at the end of the month.

    " + }, + "AccountStatus":{ + "shape":"DeliverabilityDashboardAccountStatus", + "documentation":"

    The current status of your Deliverability dashboard subscription. If this value is PENDING_EXPIRATION, your subscription is scheduled to expire at the end of the current calendar month.

    " + }, + "ActiveSubscribedDomains":{ + "shape":"DomainDeliverabilityTrackingOptions", + "documentation":"

    An array of objects, one for each verified domain that you use to send email and currently has an active Deliverability dashboard subscription that isn’t scheduled to expire at the end of the current calendar month.

    " + }, + "PendingExpirationSubscribedDomains":{ + "shape":"DomainDeliverabilityTrackingOptions", + "documentation":"

    An array of objects, one for each verified domain that you use to send email and currently has an active Deliverability dashboard subscription that's scheduled to expire at the end of the current calendar month.

    " + } + }, + "documentation":"

    An object that shows the status of the Deliverability dashboard.

    " + }, + "GetDeliverabilityTestReportRequest":{ + "type":"structure", + "required":["ReportId"], + "members":{ + "ReportId":{ + "shape":"ReportId", + "documentation":"

    A unique string that identifies the predictive inbox placement test.

    ", + "location":"uri", + "locationName":"ReportId" + } + }, + "documentation":"

    A request to retrieve the results of a predictive inbox placement test.

    " + }, + "GetDeliverabilityTestReportResponse":{ + "type":"structure", + "required":[ + "DeliverabilityTestReport", + "OverallPlacement", + "IspPlacements" + ], + "members":{ + "DeliverabilityTestReport":{ + "shape":"DeliverabilityTestReport", + "documentation":"

    An object that contains the results of the predictive inbox placement test.

    " + }, + "OverallPlacement":{ + "shape":"PlacementStatistics", + "documentation":"

    An object that specifies how many test messages that were sent during the predictive inbox placement test were delivered to recipients' inboxes, how many were sent to recipients' spam folders, and how many weren't delivered.

    " + }, + "IspPlacements":{ + "shape":"IspPlacements", + "documentation":"

    An object that describes how the test email was handled by several email providers, including Gmail, Hotmail, Yahoo, AOL, and others.

    " + }, + "Message":{ + "shape":"MessageContent", + "documentation":"

    An object that contains the message that you sent when you performed this predictive inbox placement test.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of objects that define the tags (keys and values) that are associated with the predictive inbox placement test.

    " + } + }, + "documentation":"

    The results of the predictive inbox placement test.

    " + }, + "GetDomainDeliverabilityCampaignRequest":{ + "type":"structure", + "required":["CampaignId"], + "members":{ + "CampaignId":{ + "shape":"CampaignId", + "documentation":"

    The unique identifier for the campaign. The Deliverability dashboard automatically generates and assigns this identifier to a campaign.

    ", + "location":"uri", + "locationName":"CampaignId" + } + }, + "documentation":"

    Retrieve all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for (PutDeliverabilityDashboardOption operation).

    " + }, + "GetDomainDeliverabilityCampaignResponse":{ + "type":"structure", + "required":["DomainDeliverabilityCampaign"], + "members":{ + "DomainDeliverabilityCampaign":{ + "shape":"DomainDeliverabilityCampaign", + "documentation":"

    An object that contains the deliverability data for the campaign.

    " + } + }, + "documentation":"

    An object that contains all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for.

    " + }, + "GetDomainStatisticsReportRequest":{ + "type":"structure", + "required":[ + "Domain", + "StartDate", + "EndDate" + ], + "members":{ + "Domain":{ + "shape":"Identity", + "documentation":"

    The domain that you want to obtain deliverability metrics for.

    ", + "location":"uri", + "locationName":"Domain" + }, + "StartDate":{ + "shape":"Timestamp", + "documentation":"

    The first day (in Unix time) that you want to obtain domain deliverability metrics for.

    ", + "location":"querystring", + "locationName":"StartDate" + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

    The last day (in Unix time) that you want to obtain domain deliverability metrics for. The EndDate that you specify has to be less than or equal to 30 days after the StartDate.

    ", + "location":"querystring", + "locationName":"EndDate" + } + }, + "documentation":"

    A request to obtain deliverability metrics for a domain.

    " + }, + "GetDomainStatisticsReportResponse":{ + "type":"structure", + "required":[ + "OverallVolume", + "DailyVolumes" + ], + "members":{ + "OverallVolume":{ + "shape":"OverallVolume", + "documentation":"

    An object that contains deliverability metrics for the domain that you specified. The data in this object is a summary of all of the data that was collected from the StartDate to the EndDate.

    " + }, + "DailyVolumes":{ + "shape":"DailyVolumes", + "documentation":"

    An object that contains deliverability metrics for the domain that you specified. This object contains data for each day, starting on the StartDate and ending on the EndDate.

    " + } + }, + "documentation":"

    An object that includes statistics that are related to the domain that you specified.

    " + }, + "GetEmailIdentityPoliciesRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email identity that you want to retrieve policies for.

    ", + "location":"uri", + "locationName":"EmailIdentity" + } + }, + "documentation":"

    A request to return the policies of an email identity.

    " + }, + "GetEmailIdentityPoliciesResponse":{ + "type":"structure", + "members":{ + "Policies":{ + "shape":"PolicyMap", + "documentation":"

    A map of policy names to policies.

    " + } + }, + "documentation":"

    Identity policies associated with email identity.

    " + }, + "GetEmailIdentityRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email identity that you want to retrieve details for.

    ", + "location":"uri", + "locationName":"EmailIdentity" + } + }, + "documentation":"

    A request to return details about an email identity.

    " + }, + "GetEmailIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

    The email identity type.

    " + }, + "FeedbackForwardingStatus":{ + "shape":"Enabled", + "documentation":"

    The feedback forwarding configuration for the identity.

    If the value is true, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path header of the original email.

    You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).

    " + }, + "VerifiedForSendingStatus":{ + "shape":"Enabled", + "documentation":"

    Specifies whether or not the identity is verified. You can only send email from verified email addresses or domains. For more information about verifying identities, see the Amazon Pinpoint User Guide.

    " + }, + "DkimAttributes":{ + "shape":"DkimAttributes", + "documentation":"

    An object that contains information about the DKIM attributes for the identity.

    " + }, + "MailFromAttributes":{ + "shape":"MailFromAttributes", + "documentation":"

    An object that contains information about the Mail-From attributes for the email identity.

    " + }, + "Policies":{ + "shape":"PolicyMap", + "documentation":"

    A map of policy names to policies.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of objects that define the tags (keys and values) that are associated with the email identity.

    " + } + }, + "documentation":"

    Details about an email identity.

    " + }, + "GetEmailTemplateRequest":{ + "type":"structure", + "required":["TemplateName"], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the template you want to retrieve.

    ", + "location":"uri", + "locationName":"TemplateName" + } + }, + "documentation":"

    Represents a request to display the template object (which includes the subject line, HTML part and text part) for the template you specify.

    " + }, + "GetEmailTemplateResponse":{ + "type":"structure", + "required":[ + "TemplateName", + "TemplateContent" + ], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the template you want to retrieve.

    " + }, + "TemplateContent":{ + "shape":"EmailTemplateContent", + "documentation":"

    The content of the email template, composed of a subject line, an HTML part, and a text-only part.

    " + } + }, + "documentation":"

    The following element is returned by the service.

    " + }, + "GetImportJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The ID of the import job.

    ", + "location":"uri", + "locationName":"JobId" + } + }, + "documentation":"

    Represents a request for information about an import job using the import job ID.

    " + }, + "GetImportJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    A string that represents the import job ID.

    " + }, + "ImportDestination":{ + "shape":"ImportDestination", + "documentation":"

    The destination of the import job.

    " + }, + "ImportDataSource":{ + "shape":"ImportDataSource", + "documentation":"

    The data source of the import job.

    " + }, + "FailureInfo":{ + "shape":"FailureInfo", + "documentation":"

    The failure details about an import job.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The status of the import job.

    " + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time stamp of when the import job was created.

    " + }, + "CompletedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time stamp of when the import job was completed.

    " + }, + "ProcessedRecordsCount":{ + "shape":"ProcessedRecordsCount", + "documentation":"

    The current number of records processed.

    " + }, + "FailedRecordsCount":{ + "shape":"FailedRecordsCount", + "documentation":"

    The number of records that failed processing because of invalid input or other reasons.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "GetSuppressedDestinationRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that's on the account suppression list.

    ", + "location":"uri", + "locationName":"EmailAddress" + } + }, + "documentation":"

    A request to retrieve information about an email address that's on the suppression list for your account.

    " + }, + "GetSuppressedDestinationResponse":{ + "type":"structure", + "required":["SuppressedDestination"], + "members":{ + "SuppressedDestination":{ + "shape":"SuppressedDestination", + "documentation":"

    An object containing information about the suppressed email address.

    " + } + }, + "documentation":"

    Information about the suppressed email address.

    " + }, + "Identity":{ + "type":"string", + "min":1 + }, + "IdentityInfo":{ + "type":"structure", + "members":{ + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

    The email identity type. The identity type can be one of the following:

    • EMAIL_ADDRESS – The identity is an email address.

    • DOMAIN – The identity is a domain.

    • MANAGED_DOMAIN – The identity is a domain that is managed by AWS.

    " + }, + "IdentityName":{ + "shape":"Identity", + "documentation":"

    The address or domain of the identity.

    " + }, + "SendingEnabled":{ + "shape":"Enabled", + "documentation":"

    Indicates whether or not you can send email from the identity.

    An identity is an email address or domain that you send email from. Before you can send email from an identity, you have to demostrate that you own the identity, and that you authorize Amazon SES to send email from that identity.

    " + } + }, + "documentation":"

    Information about an email identity.

    " + }, + "IdentityInfoList":{ + "type":"list", + "member":{"shape":"IdentityInfo"} + }, + "IdentityType":{ + "type":"string", + "documentation":"

    The email identity type. The identity type can be one of the following:

    • EMAIL_ADDRESS – The identity is an email address.

    • DOMAIN – The identity is a domain.

    ", + "enum":[ + "EMAIL_ADDRESS", + "DOMAIN", + "MANAGED_DOMAIN" + ] + }, + "ImageUrl":{"type":"string"}, + "ImportDataSource":{ + "type":"structure", + "required":[ + "S3Url", + "DataFormat" + ], + "members":{ + "S3Url":{ + "shape":"S3Url", + "documentation":"

    An Amazon S3 URL in the format s3://<bucket_name>/<object>.

    " + }, + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

    The data format of the import job's data source.

    " + } + }, + "documentation":"

    An object that contains details about the data source of the import job.

    " + }, + "ImportDestination":{ + "type":"structure", + "members":{ + "SuppressionListDestination":{ + "shape":"SuppressionListDestination", + "documentation":"

    An object that contains the action of the import job towards suppression list.

    " + }, + "ContactListDestination":{ + "shape":"ContactListDestination", + "documentation":"

    An object that contains the action of the import job towards a contact list.

    " + } + }, + "documentation":"

    An object that contains details about the resource destination the import job is going to target.

    " + }, + "ImportDestinationType":{ + "type":"string", + "documentation":"

    The destination of the import job, which can be used to list import jobs that have a certain ImportDestinationType.

    ", + "enum":[ + "SUPPRESSION_LIST", + "CONTACT_LIST" + ] + }, + "ImportJobSummary":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "ImportDestination":{"shape":"ImportDestination"}, + "JobStatus":{"shape":"JobStatus"}, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the import job was created.

    " + } + }, + "documentation":"

    A summary of the import job.

    " + }, + "ImportJobSummaryList":{ + "type":"list", + "member":{"shape":"ImportJobSummary"}, + "documentation":"

    A list of the import job summaries.

    " + }, + "InboxPlacementTrackingOption":{ + "type":"structure", + "members":{ + "Global":{ + "shape":"Enabled", + "documentation":"

    Specifies whether inbox placement data is being tracked for the domain.

    " + }, + "TrackedIsps":{ + "shape":"IspNameList", + "documentation":"

    An array of strings, one for each major email provider that the inbox placement data applies to.

    " + } + }, + "documentation":"

    An object that contains information about the inbox placement data settings for a verified domain that’s associated with your AWS account. This data is available only if you enabled the Deliverability dashboard for the domain.

    " + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The specified request includes an invalid or expired token.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Ip":{ + "type":"string", + "documentation":"

    An IPv4 address.

    " + }, + "IpList":{ + "type":"list", + "member":{"shape":"Ip"} + }, + "IspName":{ + "type":"string", + "documentation":"

    The name of an email provider.

    " + }, + "IspNameList":{ + "type":"list", + "member":{"shape":"IspName"} + }, + "IspPlacement":{ + "type":"structure", + "members":{ + "IspName":{ + "shape":"IspName", + "documentation":"

    The name of the email provider that the inbox placement data applies to.

    " + }, + "PlacementStatistics":{ + "shape":"PlacementStatistics", + "documentation":"

    An object that contains inbox placement metrics for a specific email provider.

    " + } + }, + "documentation":"

    An object that describes how email sent during the predictive inbox placement test was handled by a certain email provider.

    " + }, + "IspPlacements":{ + "type":"list", + "member":{"shape":"IspPlacement"} + }, + "JobId":{ + "type":"string", + "documentation":"

    A string that represents the import job ID.

    ", + "min":1 + }, + "JobStatus":{ + "type":"string", + "documentation":"

    The status of the import job.

    ", + "enum":[ + "CREATED", + "PROCESSING", + "COMPLETED", + "FAILED" + ] + }, + "KinesisFirehoseDestination":{ + "type":"structure", + "required":[ + "IamRoleArn", + "DeliveryStreamArn" + ], + "members":{ + "IamRoleArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that the Amazon SES API v2 uses to send email events to the Amazon Kinesis Data Firehose stream.

    " + }, + "DeliveryStreamArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that the Amazon SES API v2 sends email events to.

    " + } + }, + "documentation":"

    An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.

    " + }, + "LastFreshStart":{ + "type":"timestamp", + "documentation":"

    The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.

    " + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    There are too many instances of the specified resource type.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListConfigurationSetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token returned from a previous call to ListConfigurationSets to indicate the position in the list of configuration sets.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of results to show in a single call to ListConfigurationSets. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    A request to obtain a list of configuration sets for your Amazon SES account in the current AWS Region.

    " + }, + "ListConfigurationSetsResponse":{ + "type":"structure", + "members":{ + "ConfigurationSets":{ + "shape":"ConfigurationSetNameList", + "documentation":"

    An array that contains all of the configuration sets in your Amazon SES account in the current AWS Region.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ListConfigurationSets, and pass this token in the NextToken parameter.

    " + } + }, + "documentation":"

    A list of configuration sets in your Amazon SES account in the current AWS Region.

    " + }, + "ListContactListsRequest":{ + "type":"structure", + "members":{ + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    Maximum number of contact lists to return at once. Use this parameter to paginate results. If additional contact lists exist beyond the specified limit, the NextToken element is sent in the response. Use the NextToken value in subsequent requests to retrieve additional lists.

    ", + "location":"querystring", + "locationName":"PageSize" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A string token indicating that there might be additional contact lists available to be listed. Use the token provided in the Response to use in the subsequent call to ListContactLists with the same parameters to retrieve the next page of contact lists.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListContactListsResponse":{ + "type":"structure", + "members":{ + "ContactLists":{ + "shape":"ListOfContactLists", + "documentation":"

    The available contact lists.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A string token indicating that there might be additional contact lists available to be listed. Copy this token to a subsequent call to ListContactLists with the same parameters to retrieve the next page of contact lists.

    " + } + } + }, + "ListContactsFilter":{ + "type":"structure", + "members":{ + "FilteredStatus":{ + "shape":"SubscriptionStatus", + "documentation":"

    The status by which you are filtering: OPT_IN or OPT_OUT.

    " + }, + "TopicFilter":{ + "shape":"TopicFilter", + "documentation":"

    Used for filtering by a specific topic preference.

    " + } + }, + "documentation":"

    A filter that can be applied to a list of contacts.

    " + }, + "ListContactsRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    ", + "location":"uri", + "locationName":"ContactListName" + }, + "Filter":{ + "shape":"ListContactsFilter", + "documentation":"

    A filter that can be applied to a list of contacts.

    " + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of contacts that may be returned at once, which is dependent on if there are more or less contacts than the value of the PageSize. Use this parameter to paginate results. If additional contacts exist beyond the specified limit, the NextToken element is sent in the response. Use the NextToken value in subsequent requests to retrieve additional contacts.

    ", + "location":"querystring", + "locationName":"PageSize" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A string token indicating that there might be additional contacts available to be listed. Use the token provided in the Response to use in the subsequent call to ListContacts with the same parameters to retrieve the next page of contacts.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListContactsResponse":{ + "type":"structure", + "members":{ + "Contacts":{ + "shape":"ListOfContacts", + "documentation":"

    The contacts present in a specific contact list.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A string token indicating that there might be additional contacts available to be listed. Copy this token to a subsequent call to ListContacts with the same parameters to retrieve the next page of contacts.

    " + } + } + }, + "ListCustomVerificationEmailTemplatesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token returned from a previous call to ListCustomVerificationEmailTemplates to indicate the position in the list of custom verification email templates.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of results to show in a single call to ListCustomVerificationEmailTemplates. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    The value you specify has to be at least 1, and can be no more than 50.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    Represents a request to list the existing custom verification email templates for your account.

    " + }, + "ListCustomVerificationEmailTemplatesResponse":{ + "type":"structure", + "members":{ + "CustomVerificationEmailTemplates":{ + "shape":"CustomVerificationEmailTemplatesList", + "documentation":"

    A list of the custom verification email templates that exist in your account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token indicating that there are additional custom verification email templates available to be listed. Pass this token to a subsequent call to ListCustomVerificationEmailTemplates to retrieve the next 50 custom verification email templates.

    " + } + }, + "documentation":"

    The following elements are returned by the service.

    " + }, + "ListDedicatedIpPoolsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token returned from a previous call to ListDedicatedIpPools to indicate the position in the list of dedicated IP pools.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of results to show in a single call to ListDedicatedIpPools. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    A request to obtain a list of dedicated IP pools.

    " + }, + "ListDedicatedIpPoolsResponse":{ + "type":"structure", + "members":{ + "DedicatedIpPools":{ + "shape":"ListOfDedicatedIpPools", + "documentation":"

    A list of all of the dedicated IP pools that are associated with your AWS account in the current Region.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that indicates that there are additional IP pools to list. To view additional IP pools, issue another request to ListDedicatedIpPools, passing this token in the NextToken parameter.

    " + } + }, + "documentation":"

    A list of dedicated IP pools.

    " + }, + "ListDeliverabilityTestReportsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token returned from a previous call to ListDeliverabilityTestReports to indicate the position in the list of predictive inbox placement tests.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of results to show in a single call to ListDeliverabilityTestReports. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    The value you specify has to be at least 0, and can be no more than 1000.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    A request to list all of the predictive inbox placement tests that you've performed.

    " + }, + "ListDeliverabilityTestReportsResponse":{ + "type":"structure", + "required":["DeliverabilityTestReports"], + "members":{ + "DeliverabilityTestReports":{ + "shape":"DeliverabilityTestReports", + "documentation":"

    An object that contains a lists of predictive inbox placement tests that you've performed.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that indicates that there are additional predictive inbox placement tests to list. To view additional predictive inbox placement tests, issue another request to ListDeliverabilityTestReports, and pass this token in the NextToken parameter.

    " + } + }, + "documentation":"

    A list of the predictive inbox placement test reports that are available for your account, regardless of whether or not those tests are complete.

    " + }, + "ListDomainDeliverabilityCampaignsRequest":{ + "type":"structure", + "required":[ + "StartDate", + "EndDate", + "SubscribedDomain" + ], + "members":{ + "StartDate":{ + "shape":"Timestamp", + "documentation":"

    The first day, in Unix time format, that you want to obtain deliverability data for.

    ", + "location":"querystring", + "locationName":"StartDate" + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

    The last day, in Unix time format, that you want to obtain deliverability data for. This value has to be less than or equal to 30 days after the value of the StartDate parameter.

    ", + "location":"querystring", + "locationName":"EndDate" + }, + "SubscribedDomain":{ + "shape":"Domain", + "documentation":"

    The domain to obtain deliverability data for.

    ", + "location":"uri", + "locationName":"SubscribedDomain" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that’s returned from a previous call to the ListDomainDeliverabilityCampaigns operation. This token indicates the position of a campaign in the list of campaigns.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The maximum number of results to include in response to a single call to the ListDomainDeliverabilityCampaigns operation. If the number of results is larger than the number that you specify in this parameter, the response includes a NextToken element, which you can use to obtain additional results.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    Retrieve deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard.

    " + }, + "ListDomainDeliverabilityCampaignsResponse":{ + "type":"structure", + "required":["DomainDeliverabilityCampaigns"], + "members":{ + "DomainDeliverabilityCampaigns":{ + "shape":"DomainDeliverabilityCampaignList", + "documentation":"

    An array of responses, one for each campaign that used the domain to send email during the specified time range.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that’s returned from a previous call to the ListDomainDeliverabilityCampaigns operation. This token indicates the position of the campaign in the list of campaigns.

    " + } + }, + "documentation":"

    An array of objects that provide deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard for the domain.

    " + }, + "ListEmailIdentitiesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token returned from a previous call to ListEmailIdentities to indicate the position in the list of identities.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of results to show in a single call to ListEmailIdentities. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    The value you specify has to be at least 0, and can be no more than 1000.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    A request to list all of the email identities associated with your AWS account. This list includes identities that you've already verified, identities that are unverified, and identities that were verified in the past, but are no longer verified.

    " + }, + "ListEmailIdentitiesResponse":{ + "type":"structure", + "members":{ + "EmailIdentities":{ + "shape":"IdentityInfoList", + "documentation":"

    An array that includes all of the email identities associated with your AWS account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ListEmailIdentities, and pass this token in the NextToken parameter.

    " + } + }, + "documentation":"

    A list of all of the identities that you've attempted to verify, regardless of whether or not those identities were successfully verified.

    " + }, + "ListEmailTemplatesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token returned from a previous call to ListEmailTemplates to indicate the position in the list of email templates.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of results to show in a single call to ListEmailTemplates. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    The value you specify has to be at least 1, and can be no more than 10.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    Represents a request to list the email templates present in your Amazon SES account in the current AWS Region. For more information, see the Amazon SES Developer Guide.

    " + }, + "ListEmailTemplatesResponse":{ + "type":"structure", + "members":{ + "TemplatesMetadata":{ + "shape":"EmailTemplateMetadataList", + "documentation":"

    An array the contains the name and creation time stamp for each template in your Amazon SES account.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token indicating that there are additional email templates available to be listed. Pass this token to a subsequent ListEmailTemplates call to retrieve the next 10 email templates.

    " + } + }, + "documentation":"

    The following elements are returned by the service.

    " + }, + "ListImportJobsRequest":{ + "type":"structure", + "members":{ + "ImportDestinationType":{ + "shape":"ImportDestinationType", + "documentation":"

    The destination of the import job, which can be used to list import jobs that have a certain ImportDestinationType.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A string token indicating that there might be additional import jobs available to be listed. Copy this token to a subsequent call to ListImportJobs with the same parameters to retrieve the next page of import jobs.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    Maximum number of import jobs to return at once. Use this parameter to paginate results. If additional import jobs exist beyond the specified limit, the NextToken element is sent in the response. Use the NextToken value in subsequent requests to retrieve additional addresses.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    Represents a request to list all of the import jobs for a data destination within the specified maximum number of import jobs.

    " + }, + "ListImportJobsResponse":{ + "type":"structure", + "members":{ + "ImportJobs":{ + "shape":"ImportJobSummaryList", + "documentation":"

    A list of the import job summaries.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A string token indicating that there might be additional import jobs available to be listed. Copy this token to a subsequent call to ListImportJobs with the same parameters to retrieve the next page of import jobs.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "ListManagementOptions":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    " + }, + "TopicName":{ + "shape":"TopicName", + "documentation":"

    The name of the topic.

    " + } + }, + "documentation":"

    An object used to specify a list or topic to which an email belongs, which will be used when a contact chooses to unsubscribe.

    " + }, + "ListOfContactLists":{ + "type":"list", + "member":{"shape":"ContactList"} + }, + "ListOfContacts":{ + "type":"list", + "member":{"shape":"Contact"} + }, + "ListOfDedicatedIpPools":{ + "type":"list", + "member":{"shape":"PoolName"}, + "documentation":"

    A list of dedicated IP pools that are associated with your AWS account.

    " + }, + "ListSuppressedDestinationsRequest":{ + "type":"structure", + "members":{ + "Reasons":{ + "shape":"SuppressionListReasons", + "documentation":"

    The factors that caused the email address to be added to .

    ", + "location":"querystring", + "locationName":"Reason" + }, + "StartDate":{ + "shape":"Timestamp", + "documentation":"

    Used to filter the list of suppressed email destinations so that it only includes addresses that were added to the list after a specific date. The date that you specify should be in Unix time format.

    ", + "location":"querystring", + "locationName":"StartDate" + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

    Used to filter the list of suppressed email destinations so that it only includes addresses that were added to the list before a specific date. The date that you specify should be in Unix time format.

    ", + "location":"querystring", + "locationName":"EndDate" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token returned from a previous call to ListSuppressedDestinations to indicate the position in the list of suppressed email addresses.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    The number of results to show in a single call to ListSuppressedDestinations. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    ", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

    A request to obtain a list of email destinations that are on the suppression list for your account.

    " + }, + "ListSuppressedDestinationsResponse":{ + "type":"structure", + "members":{ + "SuppressedDestinationSummaries":{ + "shape":"SuppressedDestinationSummaries", + "documentation":"

    A list of summaries, each containing a summary for a suppressed email destination.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token that indicates that there are additional email addresses on the suppression list for your account. To view additional suppressed addresses, issue another request to ListSuppressedDestinations, and pass this token in the NextToken parameter.

    " + } + }, + "documentation":"

    A list of suppressed email addresses.

    " + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to retrieve tag information for.

    ", + "location":"querystring", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

    An array that lists all the tags that are associated with the resource. Each tag consists of a required tag key (Key) and an associated tag value (Value)

    " + } + } + }, + "MailFromAttributes":{ + "type":"structure", + "required":[ + "MailFromDomain", + "MailFromDomainStatus", + "BehaviorOnMxFailure" + ], + "members":{ + "MailFromDomain":{ + "shape":"MailFromDomainName", + "documentation":"

    The name of a domain that an email identity uses as a custom MAIL FROM domain.

    " + }, + "MailFromDomainStatus":{ + "shape":"MailFromDomainStatus", + "documentation":"

    The status of the MAIL FROM domain. This status can have the following values:

    • PENDING – Amazon SES hasn't started searching for the MX record yet.

    • SUCCESS – Amazon SES detected the required MX record for the MAIL FROM domain.

    • FAILED – Amazon SES can't find the required MX record, or the record no longer exists.

    • TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon SES from determining the status of the MAIL FROM domain.

    " + }, + "BehaviorOnMxFailure":{ + "shape":"BehaviorOnMxFailure", + "documentation":"

    The action that you want to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

    These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

    " + } + }, + "documentation":"

    A list of attributes that are associated with a MAIL FROM domain.

    " + }, + "MailFromDomainName":{ + "type":"string", + "documentation":"

    The domain that you want to use as a MAIL FROM domain.

    " + }, + "MailFromDomainNotVerifiedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The message can't be sent because the sending domain isn't verified.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "MailFromDomainStatus":{ + "type":"string", + "documentation":"

    The status of the MAIL FROM domain. This status can have the following values:

    • PENDING – Amazon SES hasn't started searching for the MX record yet.

    • SUCCESS – Amazon SES detected the required MX record for the MAIL FROM domain.

    • FAILED – Amazon SES can't find the required MX record, or the record no longer exists.

    • TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon SES from determining the status of the MAIL FROM domain.

    ", + "enum":[ + "PENDING", + "SUCCESS", + "FAILED", + "TEMPORARY_FAILURE" + ] + }, + "MailType":{ + "type":"string", + "enum":[ + "MARKETING", + "TRANSACTIONAL" + ] + }, + "Max24HourSend":{"type":"double"}, + "MaxItems":{"type":"integer"}, + "MaxSendRate":{"type":"double"}, + "Message":{ + "type":"structure", + "required":[ + "Subject", + "Body" + ], + "members":{ + "Subject":{ + "shape":"Content", + "documentation":"

    The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in RFC 2047.

    " + }, + "Body":{ + "shape":"Body", + "documentation":"

    The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.

    " + } + }, + "documentation":"

    Represents the email message that you're sending. The Message object consists of a subject line and a message body.

    " + }, + "MessageContent":{ + "type":"string", + "documentation":"

    The body of an email message.

    " + }, + "MessageData":{"type":"string"}, + "MessageRejected":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The message can't be sent because it contains invalid content.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "MessageTag":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"MessageTagName", + "documentation":"

    The name of the message tag. The message tag name has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + }, + "Value":{ + "shape":"MessageTagValue", + "documentation":"

    The value of the message tag. The message tag value has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + } + }, + "documentation":"

    Contains the name and value of a tag that you apply to an email. You can use message tags when you publish email sending events.

    " + }, + "MessageTagList":{ + "type":"list", + "member":{"shape":"MessageTag"}, + "documentation":"

    A list of message tags.

    " + }, + "MessageTagName":{ + "type":"string", + "documentation":"

    The name of the message tag. The message tag name has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + }, + "MessageTagValue":{ + "type":"string", + "documentation":"

    The value of the message tag. The message tag value has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + }, + "NextToken":{"type":"string"}, + "NotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The resource you attempted to access doesn't exist.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "OutboundMessageId":{"type":"string"}, + "OverallVolume":{ + "type":"structure", + "members":{ + "VolumeStatistics":{ + "shape":"VolumeStatistics", + "documentation":"

    An object that contains information about the numbers of messages that arrived in recipients' inboxes and junk mail folders.

    " + }, + "ReadRatePercent":{ + "shape":"Percentage", + "documentation":"

    The percentage of emails that were sent from the domain that were read by their recipients.

    " + }, + "DomainIspPlacements":{ + "shape":"DomainIspPlacements", + "documentation":"

    An object that contains inbox and junk mail placement metrics for individual email providers.

    " + } + }, + "documentation":"

    An object that contains information about email that was sent from the selected domain.

    " + }, + "Percentage":{ + "type":"double", + "documentation":"

    An object that contains information about inbox placement percentages.

    " + }, + "Percentage100Wrapper":{"type":"integer"}, + "PinpointDestination":{ + "type":"structure", + "members":{ + "ApplicationArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.

    " + } + }, + "documentation":"

    An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.

    " + }, + "PlacementStatistics":{ + "type":"structure", + "members":{ + "InboxPercentage":{ + "shape":"Percentage", + "documentation":"

    The percentage of emails that arrived in recipients' inboxes during the predictive inbox placement test.

    " + }, + "SpamPercentage":{ + "shape":"Percentage", + "documentation":"

    The percentage of emails that arrived in recipients' spam or junk mail folders during the predictive inbox placement test.

    " + }, + "MissingPercentage":{ + "shape":"Percentage", + "documentation":"

    The percentage of emails that didn't arrive in recipients' inboxes at all during the predictive inbox placement test.

    " + }, + "SpfPercentage":{ + "shape":"Percentage", + "documentation":"

    The percentage of emails that were authenticated by using Sender Policy Framework (SPF) during the predictive inbox placement test.

    " + }, + "DkimPercentage":{ + "shape":"Percentage", + "documentation":"

    The percentage of emails that were authenticated by using DomainKeys Identified Mail (DKIM) during the predictive inbox placement test.

    " + } + }, + "documentation":"

    An object that contains inbox placement data for an email provider.

    " + }, + "Policy":{ + "type":"string", + "documentation":"

    The text of the policy in JSON format. The policy cannot exceed 4 KB.

    For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide.

    ", + "min":1 + }, + "PolicyMap":{ + "type":"map", + "key":{"shape":"PolicyName"}, + "value":{"shape":"Policy"}, + "documentation":"

    An object that contains mapping between PolicyName and Policy text.

    " + }, + "PolicyName":{ + "type":"string", + "documentation":"

    The name of the policy.

    The policy name cannot exceed 64 characters and can only include alphanumeric characters, dashes, and underscores.

    ", + "max":64, + "min":1 + }, + "PoolName":{ + "type":"string", + "documentation":"

    The name of a dedicated IP pool.

    " + }, + "PrivateKey":{ + "type":"string", + "max":20480, + "min":1, + "pattern":"^[a-zA-Z0-9+\\/]+={0,2}$", + "sensitive":true + }, + "ProcessedRecordsCount":{"type":"integer"}, + "PutAccountDedicatedIpWarmupAttributesRequest":{ + "type":"structure", + "members":{ + "AutoWarmupEnabled":{ + "shape":"Enabled", + "documentation":"

    Enables or disables the automatic warm-up feature for dedicated IP addresses that are associated with your Amazon SES account in the current AWS Region. Set to true to enable the automatic warm-up feature, or set to false to disable it.

    " + } + }, + "documentation":"

    A request to enable or disable the automatic IP address warm-up feature.

    " + }, + "PutAccountDedicatedIpWarmupAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutAccountDetailsRequest":{ + "type":"structure", + "required":[ + "MailType", + "WebsiteURL", + "UseCaseDescription" + ], + "members":{ + "MailType":{ + "shape":"MailType", + "documentation":"

    The type of email your account will send.

    " + }, + "WebsiteURL":{ + "shape":"WebsiteURL", + "documentation":"

    The URL of your website. This information helps us better understand the type of content that you plan to send.

    " + }, + "ContactLanguage":{ + "shape":"ContactLanguage", + "documentation":"

    The language you would prefer to be contacted with.

    " + }, + "UseCaseDescription":{ + "shape":"UseCaseDescription", + "documentation":"

    A description of the types of email that you plan to send.

    " + }, + "AdditionalContactEmailAddresses":{ + "shape":"AdditionalContactEmailAddresses", + "documentation":"

    Additional email addresses that you would like to be notified regarding Amazon SES matters.

    " + }, + "ProductionAccessEnabled":{ + "shape":"EnabledWrapper", + "documentation":"

    Indicates whether or not your account should have production access in the current AWS Region.

    If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.

    If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.

    " + } + }, + "documentation":"

    A request to submit new account details.

    " + }, + "PutAccountDetailsResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutAccountSendingAttributesRequest":{ + "type":"structure", + "members":{ + "SendingEnabled":{ + "shape":"Enabled", + "documentation":"

    Enables or disables your account's ability to send email. Set to true to enable email sending, or set to false to disable email sending.

    If AWS paused your account's ability to send email, you can't use this operation to resume your account's ability to send email.

    " + } + }, + "documentation":"

    A request to change the ability of your account to send email.

    " + }, + "PutAccountSendingAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutAccountSuppressionAttributesRequest":{ + "type":"structure", + "members":{ + "SuppressedReasons":{ + "shape":"SuppressionListReasons", + "documentation":"

    A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. This list can contain any or all of the following:

    • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

    • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

    " + } + }, + "documentation":"

    A request to change your account's suppression preferences.

    " + }, + "PutAccountSuppressionAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutConfigurationSetDeliveryOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to associate with a dedicated IP pool.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "TlsPolicy":{ + "shape":"TlsPolicy", + "documentation":"

    Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established.

    " + }, + "SendingPoolName":{ + "shape":"SendingPoolName", + "documentation":"

    The name of the dedicated IP pool that you want to associate with the configuration set.

    " + } + }, + "documentation":"

    A request to associate a configuration set with a dedicated IP pool.

    " + }, + "PutConfigurationSetDeliveryOptionsResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutConfigurationSetReputationOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to enable or disable reputation metric tracking for.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "ReputationMetricsEnabled":{ + "shape":"Enabled", + "documentation":"

    If true, tracking of reputation metrics is enabled for the configuration set. If false, tracking of reputation metrics is disabled for the configuration set.

    " + } + }, + "documentation":"

    A request to enable or disable tracking of reputation metrics for a configuration set.

    " + }, + "PutConfigurationSetReputationOptionsResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutConfigurationSetSendingOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to enable or disable email sending for.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "SendingEnabled":{ + "shape":"Enabled", + "documentation":"

    If true, email sending is enabled for the configuration set. If false, email sending is disabled for the configuration set.

    " + } + }, + "documentation":"

    A request to enable or disable the ability of Amazon SES to send emails that use a specific configuration set.

    " + }, + "PutConfigurationSetSendingOptionsResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutConfigurationSetSuppressionOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to change the suppression list preferences for.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "SuppressedReasons":{ + "shape":"SuppressionListReasons", + "documentation":"

    A list that contains the reasons that email addresses are automatically added to the suppression list for your account. This list can contain any or all of the following:

    • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

    • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

    " + } + }, + "documentation":"

    A request to change the account suppression list preferences for a specific configuration set.

    " + }, + "PutConfigurationSetSuppressionOptionsResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutConfigurationSetTrackingOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to add a custom tracking domain to.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "CustomRedirectDomain":{ + "shape":"CustomRedirectDomain", + "documentation":"

    The domain that you want to use to track open and click events.

    " + } + }, + "documentation":"

    A request to add a custom domain for tracking open and click events to a configuration set.

    " + }, + "PutConfigurationSetTrackingOptionsResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutDedicatedIpInPoolRequest":{ + "type":"structure", + "required":[ + "Ip", + "DestinationPoolName" + ], + "members":{ + "Ip":{ + "shape":"Ip", + "documentation":"

    The IP address that you want to move to the dedicated IP pool. The value you specify has to be a dedicated IP address that's associated with your AWS account.

    ", + "location":"uri", + "locationName":"IP" + }, + "DestinationPoolName":{ + "shape":"PoolName", + "documentation":"

    The name of the IP pool that you want to add the dedicated IP address to. You have to specify an IP pool that already exists.

    " + } + }, + "documentation":"

    A request to move a dedicated IP address to a dedicated IP pool.

    " + }, + "PutDedicatedIpInPoolResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutDedicatedIpWarmupAttributesRequest":{ + "type":"structure", + "required":[ + "Ip", + "WarmupPercentage" + ], + "members":{ + "Ip":{ + "shape":"Ip", + "documentation":"

    The dedicated IP address that you want to update the warm-up attributes for.

    ", + "location":"uri", + "locationName":"IP" + }, + "WarmupPercentage":{ + "shape":"Percentage100Wrapper", + "documentation":"

    The warm-up percentage that you want to associate with the dedicated IP address.

    " + } + }, + "documentation":"

    A request to change the warm-up attributes for a dedicated IP address. This operation is useful when you want to resume the warm-up process for an existing IP address.

    " + }, + "PutDedicatedIpWarmupAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutDeliverabilityDashboardOptionRequest":{ + "type":"structure", + "required":["DashboardEnabled"], + "members":{ + "DashboardEnabled":{ + "shape":"Enabled", + "documentation":"

    Specifies whether to enable the Deliverability dashboard. To enable the dashboard, set this value to true.

    " + }, + "SubscribedDomains":{ + "shape":"DomainDeliverabilityTrackingOptions", + "documentation":"

    An array of objects, one for each verified domain that you use to send email and enabled the Deliverability dashboard for.

    " + } + }, + "documentation":"

    Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email using Amazon SES API v2. You also gain the ability to perform predictive inbox placement tests.

    When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

    " + }, + "PutDeliverabilityDashboardOptionResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    A response that indicates whether the Deliverability dashboard is enabled.

    " + }, + "PutEmailIdentityDkimAttributesRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email identity that you want to change the DKIM settings for.

    ", + "location":"uri", + "locationName":"EmailIdentity" + }, + "SigningEnabled":{ + "shape":"Enabled", + "documentation":"

    Sets the DKIM signing configuration for the identity.

    When you set this value true, then the messages that are sent from the identity are signed using DKIM. If you set this value to false, your messages are sent without DKIM signing.

    " + } + }, + "documentation":"

    A request to enable or disable DKIM signing of email that you send from an email identity.

    " + }, + "PutEmailIdentityDkimAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutEmailIdentityDkimSigningAttributesRequest":{ + "type":"structure", + "required":[ + "EmailIdentity", + "SigningAttributesOrigin" + ], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email identity that you want to configure DKIM for.

    ", + "location":"uri", + "locationName":"EmailIdentity" + }, + "SigningAttributesOrigin":{ + "shape":"DkimSigningAttributesOrigin", + "documentation":"

    The method that you want to use to configure DKIM for the identity. There are two possible values:

    • AWS_SES – Configure DKIM for the identity by using Easy DKIM.

    • EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM (BYODKIM).

    " + }, + "SigningAttributes":{ + "shape":"DkimSigningAttributes", + "documentation":"

    An object that contains information about the private key and selector that you want to use to configure DKIM for the identity. This object is only required if you want to configure Bring Your Own DKIM (BYODKIM) for the identity.

    " + } + }, + "documentation":"

    A request to change the DKIM attributes for an email identity.

    " + }, + "PutEmailIdentityDkimSigningAttributesResponse":{ + "type":"structure", + "members":{ + "DkimStatus":{ + "shape":"DkimStatus", + "documentation":"

    The DKIM authentication status of the identity. Amazon SES determines the authentication status by searching for specific records in the DNS configuration for your domain. If you used Easy DKIM to set up DKIM authentication, Amazon SES tries to find three unique CNAME records in the DNS configuration for your domain.

    If you provided a public key to perform DKIM authentication, Amazon SES tries to find a TXT record that uses the selector that you specified. The value of the TXT record must be a public key that's paired with the private key that you specified in the process of creating the identity.

    The status can be one of the following:

    • PENDING – The verification process was initiated, but Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain.

    • SUCCESS – The verification process completed successfully.

    • FAILED – The verification process failed. This typically occurs when Amazon SES fails to find the DKIM records in the DNS configuration of the domain.

    • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

    • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

    " + }, + "DkimTokens":{ + "shape":"DnsTokenList", + "documentation":"

    If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete.

    If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector that's associated with your public key.

    Regardless of the DKIM authentication method you use, Amazon SES searches for the appropriate records in the DNS configuration of the domain for up to 72 hours.

    " + } + }, + "documentation":"

    If the action is successful, the service sends back an HTTP 200 response.

    The following data is returned in JSON format by the service.

    " + }, + "PutEmailIdentityFeedbackAttributesRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email identity that you want to configure bounce and complaint feedback forwarding for.

    ", + "location":"uri", + "locationName":"EmailIdentity" + }, + "EmailForwardingEnabled":{ + "shape":"Enabled", + "documentation":"

    Sets the feedback forwarding configuration for the identity.

    If the value is true, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path header of the original email.

    You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).

    " + } + }, + "documentation":"

    A request to set the attributes that control how bounce and complaint events are processed.

    " + }, + "PutEmailIdentityFeedbackAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutEmailIdentityMailFromAttributesRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The verified email identity that you want to set up the custom MAIL FROM domain for.

    ", + "location":"uri", + "locationName":"EmailIdentity" + }, + "MailFromDomain":{ + "shape":"MailFromDomainName", + "documentation":"

    The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must meet the following criteria:

    • It has to be a subdomain of the verified identity.

    • It can't be used to receive email.

    • It can't be used in a \"From\" address if the MAIL FROM domain is a destination for feedback forwarding emails.

    " + }, + "BehaviorOnMxFailure":{ + "shape":"BehaviorOnMxFailure", + "documentation":"

    The action that you want to take if the required MX record isn't found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

    These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

    " + } + }, + "documentation":"

    A request to configure the custom MAIL FROM domain for a verified identity.

    " + }, + "PutEmailIdentityMailFromAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "PutSuppressedDestinationRequest":{ + "type":"structure", + "required":[ + "EmailAddress", + "Reason" + ], + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that should be added to the suppression list for your account.

    " + }, + "Reason":{ + "shape":"SuppressionListReason", + "documentation":"

    The factors that should cause the email address to be added to the suppression list for your account.

    " + } + }, + "documentation":"

    A request to add an email destination to the suppression list for your account.

    " + }, + "PutSuppressedDestinationResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "RawMessage":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{ + "shape":"RawMessageData", + "documentation":"

    The raw email message. The message has to meet the following criteria:

    • The message has to contain a header and a body, separated by one blank line.

    • All of the required header fields must be present in the message.

    • Each part of a multipart MIME message must be formatted properly.

    • Attachments must be in a file format that the Amazon SES supports.

    • The entire message must be Base64 encoded.

    • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.

    • The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.

    " + } + }, + "documentation":"

    Represents the raw content of an email message.

    " + }, + "RawMessageData":{ + "type":"blob", + "documentation":"

    The raw email message. The message has to meet the following criteria:

    • The message has to contain a header and a body, separated by one blank line.

    • All of the required header fields must be present in the message.

    • Each part of a multipart MIME message must be formatted properly.

    • Attachments must be in a file format that the Amazon SES API v2 supports.

    • The entire message must be Base64 encoded.

    • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.

    • The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.

    " + }, + "RblName":{ + "type":"string", + "documentation":"

    The name of a blacklist that an IP address was found on.

    " + }, + "RenderedEmailTemplate":{ + "type":"string", + "documentation":"

    The complete MIME message rendered by applying the data in the TemplateData parameter to the template specified in the TemplateName parameter.

    " + }, + "ReplacementEmailContent":{ + "type":"structure", + "members":{ + "ReplacementTemplate":{ + "shape":"ReplacementTemplate", + "documentation":"

    The ReplacementTemplate associated with ReplacementEmailContent.

    " + } + }, + "documentation":"

    The ReplaceEmailContent object to be used for a specific BulkEmailEntry. The ReplacementTemplate can be specified within this object.

    " + }, + "ReplacementTemplate":{ + "type":"structure", + "members":{ + "ReplacementTemplateData":{ + "shape":"EmailTemplateData", + "documentation":"

    A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.

    " + } + }, + "documentation":"

    An object which contains ReplacementTemplateData to be used for a specific BulkEmailEntry.

    " + }, + "ReportId":{ + "type":"string", + "documentation":"

    A unique string that identifies a Deliverability dashboard report.

    " + }, + "ReportName":{ + "type":"string", + "documentation":"

    A name that helps you identify a report generated by the Deliverability dashboard.

    " + }, + "ReputationOptions":{ + "type":"structure", + "members":{ + "ReputationMetricsEnabled":{ + "shape":"Enabled", + "documentation":"

    If true, tracking of reputation metrics is enabled for the configuration set. If false, tracking of reputation metrics is disabled for the configuration set.

    " + }, + "LastFreshStart":{ + "shape":"LastFreshStart", + "documentation":"

    The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.

    " + } + }, + "documentation":"

    Enable or disable collection of reputation metrics for emails that you send using this configuration set in the current AWS Region.

    " + }, + "ReviewDetails":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"ReviewStatus", + "documentation":"

    The status of the latest review of your account. The status can be one of the following:

    • PENDING – We have received your appeal and are in the process of reviewing it.

    • GRANTED – Your appeal has been reviewed and your production access has been granted.

    • DENIED – Your appeal has been reviewed and your production access has been denied.

    • FAILED – An internal error occurred and we didn't receive your appeal. You can submit your appeal again.

    " + }, + "CaseId":{ + "shape":"CaseId", + "documentation":"

    The associated support center case ID (if any).

    " + } + }, + "documentation":"

    An object that contains information about your account details review.

    " + }, + "ReviewStatus":{ + "type":"string", + "enum":[ + "PENDING", + "FAILED", + "GRANTED", + "DENIED" + ] + }, + "S3Url":{ + "type":"string", + "documentation":"

    An Amazon S3 URL in the format s3://<bucket_name>/<object>.

    ", + "pattern":"^s3:\\/\\/([^\\/]+)\\/(.*?([^\\/]+)\\/?)$" + }, + "Selector":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9]))$" + }, + "SendBulkEmailRequest":{ + "type":"structure", + "required":[ + "DefaultContent", + "BulkEmailEntries" + ], + "members":{ + "FromEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that you want to use as the \"From\" address for the email. The address that you specify has to be verified.

    " + }, + "FromEmailAddressIdentityArn":{ + "shape":"AmazonResourceName", + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the FromEmailAddress parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use sender@example.com, then you would specify the FromEmailAddressIdentityArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the FromEmailAddress to be sender@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + }, + "ReplyToAddresses":{ + "shape":"EmailAddressList", + "documentation":"

    The \"Reply-to\" email addresses for the message. When the recipient replies to the message, each Reply-to address receives the reply.

    " + }, + "FeedbackForwardingEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The address that you want bounce and complaint notifications to be sent to.

    " + }, + "FeedbackForwardingEmailAddressIdentityArn":{ + "shape":"AmazonResourceName", + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the FeedbackForwardingEmailAddress parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the FeedbackForwardingEmailAddressIdentityArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the FeedbackForwardingEmailAddress to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + }, + "DefaultEmailTags":{ + "shape":"MessageTagList", + "documentation":"

    A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.

    " + }, + "DefaultContent":{ + "shape":"BulkEmailContent", + "documentation":"

    An object that contains the body of the message. You can specify a template message.

    " + }, + "BulkEmailEntries":{ + "shape":"BulkEmailEntryList", + "documentation":"

    The list of bulk email entry objects.

    " + }, + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to use when sending the email.

    " + } + }, + "documentation":"

    Represents a request to send email messages to multiple destinations using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + }, + "SendBulkEmailResponse":{ + "type":"structure", + "required":["BulkEmailEntryResults"], + "members":{ + "BulkEmailEntryResults":{"shape":"BulkEmailEntryResultList"} + }, + "documentation":"

    The following data is returned in JSON format by the service.

    " + }, + "SendCustomVerificationEmailRequest":{ + "type":"structure", + "required":[ + "EmailAddress", + "TemplateName" + ], + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address to verify.

    " + }, + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the custom verification email template to use when sending the verification email.

    " + }, + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    Name of a configuration set to use when sending the verification email.

    " + } + }, + "documentation":"

    Represents a request to send a custom verification email to a specified recipient.

    " + }, + "SendCustomVerificationEmailResponse":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"OutboundMessageId", + "documentation":"

    The unique message identifier returned from the SendCustomVerificationEmail operation.

    " + } + }, + "documentation":"

    The following element is returned by the service.

    " + }, + "SendEmailRequest":{ + "type":"structure", + "required":["Content"], + "members":{ + "FromEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that you want to use as the \"From\" address for the email. The address that you specify has to be verified.

    " + }, + "FromEmailAddressIdentityArn":{ + "shape":"AmazonResourceName", + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the FromEmailAddress parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use sender@example.com, then you would specify the FromEmailAddressIdentityArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the FromEmailAddress to be sender@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    For Raw emails, the FromEmailAddressIdentityArn value overrides the X-SES-SOURCE-ARN and X-SES-FROM-ARN headers specified in raw email message content.

    " + }, + "Destination":{ + "shape":"Destination", + "documentation":"

    An object that contains the recipients of the email message.

    " + }, + "ReplyToAddresses":{ + "shape":"EmailAddressList", + "documentation":"

    The \"Reply-to\" email addresses for the message. When the recipient replies to the message, each Reply-to address receives the reply.

    " + }, + "FeedbackForwardingEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The address that you want bounce and complaint notifications to be sent to.

    " + }, + "FeedbackForwardingEmailAddressIdentityArn":{ + "shape":"AmazonResourceName", + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the FeedbackForwardingEmailAddress parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the FeedbackForwardingEmailAddressIdentityArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the FeedbackForwardingEmailAddress to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + }, + "Content":{ + "shape":"EmailContent", + "documentation":"

    An object that contains the body of the message. You can send either a Simple message Raw message or a template Message.

    " + }, + "EmailTags":{ + "shape":"MessageTagList", + "documentation":"

    A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.

    " + }, + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that you want to use when sending the email.

    " + }, + "ListManagementOptions":{ + "shape":"ListManagementOptions", + "documentation":"

    An object used to specify a list or topic to which an email belongs, which will be used when a contact chooses to unsubscribe.

    " + } + }, + "documentation":"

    Represents a request to send a single formatted email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + }, + "SendEmailResponse":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"OutboundMessageId", + "documentation":"

    A unique identifier for the message that is generated when the message is accepted.

    It's possible for Amazon SES to accept a message without sending it. This can happen when the message that you're trying to send has an attachment contains a virus, or when you send a templated email that contains invalid personalization content, for example.

    " + } + }, + "documentation":"

    A unique message ID that you receive when an email is accepted for sending.

    " + }, + "SendQuota":{ + "type":"structure", + "members":{ + "Max24HourSend":{ + "shape":"Max24HourSend", + "documentation":"

    The maximum number of emails that you can send in the current AWS Region over a 24-hour period. This value is also called your sending quota.

    " + }, + "MaxSendRate":{ + "shape":"MaxSendRate", + "documentation":"

    The maximum number of emails that you can send per second in the current AWS Region. This value is also called your maximum sending rate or your maximum TPS (transactions per second) rate.

    " + }, + "SentLast24Hours":{ + "shape":"SentLast24Hours", + "documentation":"

    The number of emails sent from your Amazon SES account in the current AWS Region over the past 24 hours.

    " + } + }, + "documentation":"

    An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current AWS Region.

    " + }, + "SendingOptions":{ + "type":"structure", + "members":{ + "SendingEnabled":{ + "shape":"Enabled", + "documentation":"

    If true, email sending is enabled for the configuration set. If false, email sending is disabled for the configuration set.

    " + } + }, + "documentation":"

    Used to enable or disable email sending for messages that use this configuration set in the current AWS Region.

    " + }, + "SendingPausedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The message can't be sent because the account's ability to send email is currently paused.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "SendingPoolName":{ + "type":"string", + "documentation":"

    The name of the dedicated IP pool that you want to associate with the configuration set.

    " + }, + "SentLast24Hours":{"type":"double"}, + "SnsDestination":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " + } + }, + "documentation":"

    An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

    " + }, + "Subject":{"type":"string"}, + "SubscriptionStatus":{ + "type":"string", + "enum":[ + "OPT_IN", + "OPT_OUT" + ] + }, + "SuccessRedirectionURL":{ + "type":"string", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is successfully verified.

    " + }, + "SuppressedDestination":{ + "type":"structure", + "required":[ + "EmailAddress", + "Reason", + "LastUpdateTime" + ], + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that is on the suppression list for your account.

    " + }, + "Reason":{ + "shape":"SuppressionListReason", + "documentation":"

    The reason that the address was added to the suppression list for your account.

    " + }, + "LastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the suppressed destination was last updated, shown in Unix time format.

    " + }, + "Attributes":{ + "shape":"SuppressedDestinationAttributes", + "documentation":"

    An optional value that can contain additional information about the reasons that the address was added to the suppression list for your account.

    " + } + }, + "documentation":"

    An object that contains information about an email address that is on the suppression list for your account.

    " + }, + "SuppressedDestinationAttributes":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"OutboundMessageId", + "documentation":"

    The unique identifier of the email message that caused the email address to be added to the suppression list for your account.

    " + }, + "FeedbackId":{ + "shape":"FeedbackId", + "documentation":"

    A unique identifier that's generated when an email address is added to the suppression list for your account.

    " + } + }, + "documentation":"

    An object that contains additional attributes that are related an email address that is on the suppression list for your account.

    " + }, + "SuppressedDestinationSummaries":{ + "type":"list", + "member":{"shape":"SuppressedDestinationSummary"} + }, + "SuppressedDestinationSummary":{ + "type":"structure", + "required":[ + "EmailAddress", + "Reason", + "LastUpdateTime" + ], + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that's on the suppression list for your account.

    " + }, + "Reason":{ + "shape":"SuppressionListReason", + "documentation":"

    The reason that the address was added to the suppression list for your account.

    " + }, + "LastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the suppressed destination was last updated, shown in Unix time format.

    " + } + }, + "documentation":"

    A summary that describes the suppressed email address.

    " + }, + "SuppressionAttributes":{ + "type":"structure", + "members":{ + "SuppressedReasons":{ + "shape":"SuppressionListReasons", + "documentation":"

    A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. This list can contain any or all of the following:

    • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

    • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

    " + } + }, + "documentation":"

    An object that contains information about the email address suppression preferences for your account in the current AWS Region.

    " + }, + "SuppressionListDestination":{ + "type":"structure", + "required":["SuppressionListImportAction"], + "members":{ + "SuppressionListImportAction":{ + "shape":"SuppressionListImportAction", + "documentation":"

    The type of action that you want to perform on the address. Acceptable values:

    • PUT: add the addresses to the suppression list. If the record already exists, it will override it with the new value.

    • DELETE: remove the addresses from the suppression list.

    " + } + }, + "documentation":"

    An object that contains details about the action of suppression list.

    " + }, + "SuppressionListImportAction":{ + "type":"string", + "documentation":"

    The type of action that you want to perform on the address. Acceptable values:

    • PUT: add the addresses to the suppression list.

    • DELETE: remove the address from the suppression list.

    ", + "enum":[ + "DELETE", + "PUT" + ] + }, + "SuppressionListReason":{ + "type":"string", + "documentation":"

    The reason that the address was added to the suppression list for your account. The value can be one of the following:

    • COMPLAINT – Amazon SES added an email address to the suppression list for your account because a message sent to that address results in a complaint.

    • BOUNCE – Amazon SES added an email address to the suppression list for your account because a message sent to that address results in a hard bounce.

    ", + "enum":[ + "BOUNCE", + "COMPLAINT" + ] + }, + "SuppressionListReasons":{ + "type":"list", + "member":{"shape":"SuppressionListReason"} + }, + "SuppressionOptions":{ + "type":"structure", + "members":{ + "SuppressedReasons":{ + "shape":"SuppressionListReasons", + "documentation":"

    A list that contains the reasons that email addresses are automatically added to the suppression list for your account. This list can contain any or all of the following:

    • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

    • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

    " + } + }, + "documentation":"

    An object that contains information about the suppression list preferences for your account.

    " + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don't want a resource to have a specific tag value, don't specify a value for this parameter. If you don't specify a value, Amazon SES sets the value to an empty string.

    " + } + }, + "documentation":"

    An object that defines the tags that are associated with a resource. A tag is a label that you optionally define and associate with a resource. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.

    Each tag consists of a required tag key and an associated tag value, both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:

    • Tag keys and values are case sensitive.

    • For each associated resource, each tag key must be unique and it can have only one value.

    • The aws: prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can't edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.

    • You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.

    " + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to add one or more tags to.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of the tags that you want to add to the resource. A tag consists of a required tag key (Key) and an associated tag value (Value). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{"type":"string"}, + "Template":{ + "type":"structure", + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the template. You will refer to this name when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail operations.

    " + }, + "TemplateArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the template.

    " + }, + "TemplateData":{ + "shape":"EmailTemplateData", + "documentation":"

    An object that defines the values to use for message variables in the template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the value to use for that variable.

    " + } + }, + "documentation":"

    An object that defines the email template to use for an email message, and the values to use for any message variables in that template. An email template is a type of message template that contains content that you want to define, save, and reuse in email messages that you send.

    " + }, + "TemplateContent":{ + "type":"string", + "documentation":"

    The content of the custom verification email template.

    " + }, + "TestRenderEmailTemplateRequest":{ + "type":"structure", + "required":[ + "TemplateName", + "TemplateData" + ], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the template that you want to render.

    ", + "location":"uri", + "locationName":"TemplateName" + }, + "TemplateData":{ + "shape":"EmailTemplateData", + "documentation":"

    A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.

    " + } + }, + "documentation":"

    >Represents a request to create a preview of the MIME content of an email when provided with a template and a set of replacement data.

    " + }, + "TestRenderEmailTemplateResponse":{ + "type":"structure", + "required":["RenderedTemplate"], + "members":{ + "RenderedTemplate":{ + "shape":"RenderedEmailTemplate", + "documentation":"

    The complete MIME message rendered by applying the data in the TemplateData parameter to the template specified in the TemplateName parameter.

    " + } + }, + "documentation":"

    The following element is returned by the service.

    " + }, + "Timestamp":{"type":"timestamp"}, + "TlsPolicy":{ + "type":"string", + "documentation":"

    Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established.

    ", + "enum":[ + "REQUIRE", + "OPTIONAL" + ] + }, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    Too many requests have been made to the operation.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Topic":{ + "type":"structure", + "required":[ + "TopicName", + "DisplayName", + "DefaultSubscriptionStatus" + ], + "members":{ + "TopicName":{ + "shape":"TopicName", + "documentation":"

    The name of the topic.

    " + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

    The name of the topic the contact will see.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of what the topic is about, which the contact will see.

    " + }, + "DefaultSubscriptionStatus":{ + "shape":"SubscriptionStatus", + "documentation":"

    The default subscription status to be applied to a contact if the contact has not noted their preference for subscribing to a topic.

    " + } + }, + "documentation":"

    An interest group, theme, or label within a list. Lists can have multiple topics.

    " + }, + "TopicFilter":{ + "type":"structure", + "members":{ + "TopicName":{ + "shape":"TopicName", + "documentation":"

    The name of a topic on which you wish to apply the filter.

    " + }, + "UseDefaultIfPreferenceUnavailable":{ + "shape":"UseDefaultIfPreferenceUnavailable", + "documentation":"

    Notes that the default subscription status should be applied to a contact because the contact has not noted their preference for subscribing to a topic.

    " + } + }, + "documentation":"

    Used for filtering by a specific topic preference.

    " + }, + "TopicName":{"type":"string"}, + "TopicPreference":{ + "type":"structure", + "required":[ + "TopicName", + "SubscriptionStatus" + ], + "members":{ + "TopicName":{ + "shape":"TopicName", + "documentation":"

    The name of the topic.

    " + }, + "SubscriptionStatus":{ + "shape":"SubscriptionStatus", + "documentation":"

    The contact's subscription status to a topic which is either OPT_IN or OPT_OUT.

    " + } + }, + "documentation":"

    The contact's preference for being opted-in to or opted-out of a topic.

    " + }, + "TopicPreferenceList":{ + "type":"list", + "member":{"shape":"TopicPreference"} + }, + "Topics":{ + "type":"list", + "member":{"shape":"Topic"} + }, + "TrackingOptions":{ + "type":"structure", + "required":["CustomRedirectDomain"], + "members":{ + "CustomRedirectDomain":{ + "shape":"CustomRedirectDomain", + "documentation":"

    The domain that you want to use for tracking open and click events.

    " + } + }, + "documentation":"

    An object that defines the tracking options for a configuration set. When you use the Amazon SES API v2 to send an email, it contains an invisible image that's used to track when recipients open your email. If your email contains links, those links are changed slightly in order to track when recipients click them.

    These images and links include references to a domain operated by AWS. You can optionally configure the Amazon SES to use a domain that you operate for these images and links.

    " + }, + "UnsubscribeAll":{"type":"boolean"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to remove one or more tags from.

    ", + "location":"querystring", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The tags (tag keys) that you want to remove from the resource. When you specify a tag key, the action removes both that key and its associated tag value.

    To remove more than one tag from the resource, append the TagKeys parameter and argument for each additional tag to remove, separated by an ampersand. For example: /v2/email/tags?ResourceArn=ResourceArn&TagKeys=Key1&TagKeys=Key2

    ", + "location":"querystring", + "locationName":"TagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateConfigurationSetEventDestinationRequest":{ + "type":"structure", + "required":[ + "ConfigurationSetName", + "EventDestinationName", + "EventDestination" + ], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

    The name of the configuration set that contains the event destination that you want to modify.

    ", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "EventDestinationName":{ + "shape":"EventDestinationName", + "documentation":"

    The name of the event destination that you want to modify.

    ", + "location":"uri", + "locationName":"EventDestinationName" + }, + "EventDestination":{ + "shape":"EventDestinationDefinition", + "documentation":"

    An object that defines the event destination.

    " + } + }, + "documentation":"

    A request to change the settings for an event destination for a configuration set.

    " + }, + "UpdateConfigurationSetEventDestinationResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "UpdateContactListRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    ", + "location":"uri", + "locationName":"ContactListName" + }, + "Topics":{ + "shape":"Topics", + "documentation":"

    An interest group, theme, or label within a list. A contact list can have multiple topics.

    " + }, + "Description":{ + "shape":"Description", + "documentation":"

    A description of what the contact list is about.

    " + } + } + }, + "UpdateContactListResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateContactRequest":{ + "type":"structure", + "required":[ + "ContactListName", + "EmailAddress" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

    The name of the contact list.

    ", + "location":"uri", + "locationName":"ContactListName" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The contact's email addres.

    ", + "location":"uri", + "locationName":"EmailAddress" + }, + "TopicPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

    The contact's preference for being opted-in to or opted-out of a topic.

    " + }, + "UnsubscribeAll":{ + "shape":"UnsubscribeAll", + "documentation":"

    A boolean value status noting if the contact is unsubscribed from all contact list topics.

    " + }, + "AttributesData":{ + "shape":"AttributesData", + "documentation":"

    The attribute data attached to a contact.

    " + } + } + }, + "UpdateContactResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateCustomVerificationEmailTemplateRequest":{ + "type":"structure", + "required":[ + "TemplateName", + "FromEmailAddress", + "TemplateSubject", + "TemplateContent", + "SuccessRedirectionURL", + "FailureRedirectionURL" + ], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the custom verification email template that you want to update.

    ", + "location":"uri", + "locationName":"TemplateName" + }, + "FromEmailAddress":{ + "shape":"EmailAddress", + "documentation":"

    The email address that the custom verification email is sent from.

    " + }, + "TemplateSubject":{ + "shape":"EmailTemplateSubject", + "documentation":"

    The subject line of the custom verification email.

    " + }, + "TemplateContent":{ + "shape":"TemplateContent", + "documentation":"

    The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom Verification Email Frequently Asked Questions in the Amazon SES Developer Guide.

    " + }, + "SuccessRedirectionURL":{ + "shape":"SuccessRedirectionURL", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is successfully verified.

    " + }, + "FailureRedirectionURL":{ + "shape":"FailureRedirectionURL", + "documentation":"

    The URL that the recipient of the verification email is sent to if his or her address is not successfully verified.

    " + } + }, + "documentation":"

    Represents a request to update an existing custom verification email template.

    " + }, + "UpdateCustomVerificationEmailTemplateResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

    " + }, + "UpdateEmailIdentityPolicyRequest":{ + "type":"structure", + "required":[ + "EmailIdentity", + "PolicyName", + "Policy" + ], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "documentation":"

    The email identity for which you want to update policy.

    ", + "location":"uri", + "locationName":"EmailIdentity" + }, + "PolicyName":{ + "shape":"PolicyName", + "documentation":"

    The name of the policy.

    The policy name cannot exceed 64 characters and can only include alphanumeric characters, dashes, and underscores.

    ", + "location":"uri", + "locationName":"PolicyName" + }, + "Policy":{ + "shape":"Policy", + "documentation":"

    The text of the policy in JSON format. The policy cannot exceed 4 KB.

    For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide.

    " + } + }, + "documentation":"

    Represents a request to update a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " + }, + "UpdateEmailIdentityPolicyResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, + "UpdateEmailTemplateRequest":{ + "type":"structure", + "required":[ + "TemplateName", + "TemplateContent" + ], + "members":{ + "TemplateName":{ + "shape":"EmailTemplateName", + "documentation":"

    The name of the template you want to update.

    ", + "location":"uri", + "locationName":"TemplateName" + }, + "TemplateContent":{ + "shape":"EmailTemplateContent", + "documentation":"

    The content of the email template, composed of a subject line, an HTML part, and a text-only part.

    " + } + }, + "documentation":"

    Represents a request to update an email template. For more information, see the Amazon SES Developer Guide.

    " + }, + "UpdateEmailTemplateResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

    " + }, + "UseCaseDescription":{ + "type":"string", + "max":5000, + "min":1, + "sensitive":true + }, + "UseDefaultIfPreferenceUnavailable":{"type":"boolean"}, + "Volume":{ + "type":"long", + "documentation":"

    An object that contains information about inbox placement volume.

    " + }, + "VolumeStatistics":{ + "type":"structure", + "members":{ + "InboxRawCount":{ + "shape":"Volume", + "documentation":"

    The total number of emails that arrived in recipients' inboxes.

    " + }, + "SpamRawCount":{ + "shape":"Volume", + "documentation":"

    The total number of emails that arrived in recipients' spam or junk mail folders.

    " + }, + "ProjectedInbox":{ + "shape":"Volume", + "documentation":"

    An estimate of the percentage of emails sent from the current domain that will arrive in recipients' inboxes.

    " + }, + "ProjectedSpam":{ + "shape":"Volume", + "documentation":"

    An estimate of the percentage of emails sent from the current domain that will arrive in recipients' spam or junk mail folders.

    " + } + }, + "documentation":"

    An object that contains information about the amount of email that was delivered to recipients.

    " + }, + "WarmupStatus":{ + "type":"string", + "documentation":"

    The warmup status of a dedicated IP.

    ", + "enum":[ + "IN_PROGRESS", + "DONE" + ] + }, + "WebsiteURL":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))?", + "sensitive":true + } + }, + "documentation":"Amazon SES API v2

    Welcome to the Amazon SES API v2 Reference. This guide provides information about the Amazon SES API v2, including supported operations, data types, parameters, and schemas.

    Amazon SES is an AWS service that you can use to send email messages to your customers.

    If you're new to Amazon SES API v2, you might find it helpful to also review the Amazon Simple Email Service Developer Guide. The Amazon SES Developer Guide provides information and code samples that demonstrate how to use Amazon SES API v2 features programmatically.

    The Amazon SES API v2 is available in several AWS Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see AWS Service Endpoints in the Amazon Web Services General Reference. To learn more about AWS Regions, see Managing AWS Regions in the Amazon Web Services General Reference.

    In each Region, AWS maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see AWS Global Infrastructure.

    " +} diff --git a/services/sfn/build.properties b/services/sfn/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/sfn/build.properties +++ b/services/sfn/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 5c39d4833479..ed569ee6ab5d 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + sso + AWS Java SDK :: Services :: SSO + The AWS Java SDK for SSO module holds the client classes that are used for + communicating with SSO. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.sso + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + profiles + ${awsjavasdk.version} + compile + + + + + com.google.jimfs + jimfs + test + + + com.google.guava + guava + test + + + diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/ExpiredTokenException.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/ExpiredTokenException.java new file mode 100644 index 000000000000..9284bcace31e --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/ExpiredTokenException.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.exception.SdkClientException; + +/** + *

    + * The session token that was passed is expired or is not valid. + *

    + */ +@SdkPublicApi +public final class ExpiredTokenException extends SdkClientException { + + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private ExpiredTokenException(Builder b) { + super(b); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public interface Builder extends SdkPojo, SdkClientException.Builder { + @Override + Builder message(String message); + + @Override + Builder cause(Throwable cause); + + @Override + ExpiredTokenException build(); + } + + static final class BuilderImpl extends SdkClientException.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(ExpiredTokenException model) { + super(model); + } + + @Override + public BuilderImpl message(String message) { + this.message = message; + return this; + } + + @Override + public BuilderImpl cause(Throwable cause) { + this.cause = cause; + return this; + } + + @Override + public ExpiredTokenException build() { + return new ExpiredTokenException(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProvider.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProvider.java new file mode 100644 index 000000000000..3663771fe572 --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProvider.java @@ -0,0 +1,256 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static software.amazon.awssdk.utils.Validate.notNull; + +import java.time.Duration; +import java.time.Instant; +import java.util.Optional; +import java.util.function.Consumer; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.services.sso.SsoClient; +import software.amazon.awssdk.services.sso.internal.SessionCredentialsHolder; +import software.amazon.awssdk.services.sso.model.GetRoleCredentialsRequest; +import software.amazon.awssdk.services.sso.model.RoleCredentials; +import software.amazon.awssdk.utils.SdkAutoCloseable; +import software.amazon.awssdk.utils.cache.CachedSupplier; +import software.amazon.awssdk.utils.cache.NonBlocking; +import software.amazon.awssdk.utils.cache.RefreshResult; + +/** + *

    + * An implementation of {@link AwsCredentialsProvider} that is extended within this package to provide support for + * periodically updating session credentials. This credential provider maintains a {@link Supplier} + * for a {@link SsoClient#getRoleCredentials(Consumer)} call to retrieve the credentials needed. + *

    + * + *

    + * While creating the {@link GetRoleCredentialsRequest}, an access token is needed to be resolved from a token file. + * In default, the token is assumed unexpired, and if it's expired then an {@link ExpiredTokenException} will be thrown. + * If the users want to change the behavior of this, please implement your own token resolving logic and override the + * {@link Builder#refreshRequest). + *

    + * + *

    + * When credentials get close to expiration, this class will attempt to update them asynchronously. If the credentials + * end up expiring, this class will block all calls to {@link #resolveCredentials()} until the credentials can be updated. + *

    + */ +@SdkPublicApi +public final class SsoCredentialsProvider implements AwsCredentialsProvider, SdkAutoCloseable { + + private static final Duration DEFAULT_STALE_TIME = Duration.ofMinutes(1); + private static final Duration DEFAULT_PREFETCH_TIME = Duration.ofMinutes(5); + + private static final String ASYNC_THREAD_NAME = "sdk-sso-credentials-provider"; + + private final Supplier getRoleCredentialsRequestSupplier; + + private final SsoClient ssoClient; + private final Duration staleTime; + private final Duration prefetchTime; + + private final CachedSupplier credentialCache; + + /** + * @see #builder() + */ + private SsoCredentialsProvider(BuilderImpl builder) { + this.ssoClient = notNull(builder.ssoClient, "SSO client must not be null."); + this.getRoleCredentialsRequestSupplier = builder.getRoleCredentialsRequestSupplier; + + this.staleTime = Optional.ofNullable(builder.staleTime).orElse(DEFAULT_STALE_TIME); + this.prefetchTime = Optional.ofNullable(builder.prefetchTime).orElse(DEFAULT_PREFETCH_TIME); + + CachedSupplier.Builder cacheBuilder = CachedSupplier.builder(this::updateSsoCredentials); + if (builder.asyncCredentialUpdateEnabled) { + cacheBuilder.prefetchStrategy(new NonBlocking(ASYNC_THREAD_NAME)); + } + + this.credentialCache = cacheBuilder.build(); + } + + /** + * Update the expiring session SSO credentials by calling SSO. Invoked by {@link CachedSupplier} when the credentials + * are close to expiring. + */ + private RefreshResult updateSsoCredentials() { + SessionCredentialsHolder credentials = getUpdatedCredentials(ssoClient); + Instant acutalTokenExpiration = credentials.sessionCredentialsExpiration(); + + return RefreshResult.builder(credentials) + .staleTime(acutalTokenExpiration.minus(staleTime)) + .prefetchTime(acutalTokenExpiration.minus(prefetchTime)) + .build(); + } + + private SessionCredentialsHolder getUpdatedCredentials(SsoClient ssoClient) { + GetRoleCredentialsRequest request = getRoleCredentialsRequestSupplier.get(); + notNull(request, "GetRoleCredentialsRequest can't be null."); + RoleCredentials roleCredentials = ssoClient.getRoleCredentials(request).roleCredentials(); + AwsSessionCredentials sessionCredentials = AwsSessionCredentials.create(roleCredentials.accessKeyId(), + roleCredentials.secretAccessKey(), + roleCredentials.sessionToken()); + return new SessionCredentialsHolder(sessionCredentials, Instant.ofEpochMilli(roleCredentials.expiration())); + } + + /** + * The amount of time, relative to session token expiration, that the cached credentials are considered stale and + * should no longer be used. All threads will block until the value is updated. + */ + public Duration staleTime() { + return staleTime; + } + + /** + * The amount of time, relative to session token expiration, that the cached credentials are considered close to stale + * and should be updated. + */ + public Duration prefetchTime() { + return prefetchTime; + } + + /** + * Get a builder for creating a custom {@link SsoCredentialsProvider}. + */ + public static BuilderImpl builder() { + return new BuilderImpl(); + } + + @Override + public AwsCredentials resolveCredentials() { + return credentialCache.get().sessionCredentials(); + } + + @Override + public void close() { + credentialCache.close(); + } + + /** + * A builder for creating a custom {@link SsoCredentialsProvider}. + */ + public interface Builder { + + /** + * Configure the {@link SsoClient} to use when calling SSO to update the session. This client should not be shut + * down as long as this credentials provider is in use. + */ + Builder ssoClient(SsoClient ssoclient); + + /** + * Configure whether the provider should fetch credentials asynchronously in the background. If this is true, + * threads are less likely to block when credentials are loaded, but addtiional resources are used to maintian + * the provider. + * + *

    By default, this is disabled.

    + */ + Builder asyncCredentialUpdateEnabled(Boolean asyncCredentialUpdateEnabled); + + /** + * Configure the amount of time, relative to SSO session token expiration, that the cached credentials are considered + * stale and should no longer be used. All threads will block until the value is updated. + * + *

    By default, this is 1 minute.

    + */ + Builder staleTime(Duration staleTime); + + /** + * Configure the amount of time, relative to SSO session token expiration, that the cached credentials are considered + * close to stale and should be updated. See {@link #asyncCredentialUpdateEnabled}. + * + *

    By default, this is 5 minutes.

    + */ + Builder prefetchTime(Duration prefetchTime); + + /** + * Configure the {@link GetRoleCredentialsRequest} that should be periodically sent to the SSO service to update the + * credentials. + */ + Builder refreshRequest(GetRoleCredentialsRequest getRoleCredentialsRequest); + + /** + * Similar to {@link #refreshRequest(GetRoleCredentialsRequest)}, but takes a {@link Supplier} to supply the request to + * SSO. + */ + Builder refreshRequest(Supplier getRoleCredentialsRequestSupplier); + + /** + * Create a {@link SsoCredentialsProvider} using the configuration applied to this builder. + * @return + */ + SsoCredentialsProvider build(); + + } + + protected static final class BuilderImpl implements Builder { + private Boolean asyncCredentialUpdateEnabled = false; + private SsoClient ssoClient; + private Duration staleTime; + private Duration prefetchTime; + private Supplier getRoleCredentialsRequestSupplier; + + BuilderImpl() { + + } + + @Override + public Builder ssoClient(SsoClient ssoClient) { + this.ssoClient = ssoClient; + return this; + } + + @Override + public Builder asyncCredentialUpdateEnabled(Boolean asyncCredentialUpdateEnabled) { + this.asyncCredentialUpdateEnabled = asyncCredentialUpdateEnabled; + return this; + } + + @Override + public Builder staleTime(Duration staleTime) { + this.staleTime = staleTime; + return this; + } + + @Override + public Builder prefetchTime(Duration prefetchTime) { + this.prefetchTime = prefetchTime; + return this; + } + + @Override + public Builder refreshRequest(GetRoleCredentialsRequest getRoleCredentialsRequest) { + return refreshRequest(() -> getRoleCredentialsRequest); + } + + @Override + public Builder refreshRequest(Supplier getRoleCredentialsRequestSupplier) { + this.getRoleCredentialsRequestSupplier = getRoleCredentialsRequestSupplier; + return this; + } + + @Override + public SsoCredentialsProvider build() { + return new SsoCredentialsProvider(this); + } + + } +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactory.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactory.java new file mode 100644 index 000000000000..c7745714f258 --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactory.java @@ -0,0 +1,112 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static software.amazon.awssdk.services.sso.internal.SsoTokenFileUtils.generateCachedTokenPath; +import static software.amazon.awssdk.utils.UserHomeDirectoryUtils.userHomeDirectory; + +import java.nio.file.Paths; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProviderFactory; +import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.sso.SsoClient; +import software.amazon.awssdk.services.sso.internal.SsoAccessTokenProvider; +import software.amazon.awssdk.services.sso.model.GetRoleCredentialsRequest; +import software.amazon.awssdk.utils.IoUtils; +import software.amazon.awssdk.utils.SdkAutoCloseable; + +/** + * An implementation of {@link ProfileCredentialsProviderFactory} that allows users to get SSO role credentials using the startUrl + * specified in either a {@link Profile} or environment variables. + */ +@SdkProtectedApi +public class SsoProfileCredentialsProviderFactory implements ProfileCredentialsProviderFactory { + + private static final String TOKEN_DIRECTORY = Paths.get(userHomeDirectory(), ".aws", "sso", "cache").toString(); + + /** + * Default method to create the {@link SsoProfileCredentialsProvider} with a {@link SsoAccessTokenProvider} + * object created with the start url from {@link Profile} or environment variables and the default token file directory. + */ + public AwsCredentialsProvider create(Profile profile) { + return create(profile, new SsoAccessTokenProvider( + generateCachedTokenPath(profile.properties().get(ProfileProperty.SSO_START_URL), TOKEN_DIRECTORY))); + } + + /** + * Alternative method to create the {@link SsoProfileCredentialsProvider} with a customized + * {@link SsoAccessTokenProvider}. This method is only used for testing. + */ + @SdkTestInternalApi + public AwsCredentialsProvider create(Profile profile, + SsoAccessTokenProvider tokenProvider) { + return new SsoProfileCredentialsProvider(profile, tokenProvider); + } + + /** + * A wrapper for a {@link SsoCredentialsProvider} that is returned by this factory when {@link #create(Profile)} or + * {@link #create(Profile, SsoAccessTokenProvider)} is invoked. This wrapper is important because it ensures the parent + * credentials provider is closed when the sso credentials provider is no longer needed. + */ + private static final class SsoProfileCredentialsProvider implements AwsCredentialsProvider, SdkAutoCloseable { + private final SsoClient ssoClient; + private final SsoCredentialsProvider credentialsProvider; + + private SsoProfileCredentialsProvider(Profile profile, + SsoAccessTokenProvider tokenProvider) { + String ssoAccountId = profile.properties().get(ProfileProperty.SSO_ACCOUNT_ID); + String ssoRoleName = profile.properties().get(ProfileProperty.SSO_ROLE_NAME); + String ssoRegion = profile.properties().get(ProfileProperty.SSO_REGION); + + this.ssoClient = SsoClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.of(ssoRegion)) + .build(); + + GetRoleCredentialsRequest request = GetRoleCredentialsRequest.builder() + .accountId(ssoAccountId) + .roleName(ssoRoleName) + .build(); + + Supplier supplier = () -> request.toBuilder() + .accessToken(tokenProvider.resolveAccessToken()).build(); + + + this.credentialsProvider = SsoCredentialsProvider.builder() + .ssoClient(ssoClient) + .refreshRequest(supplier) + .build(); + } + + @Override + public AwsCredentials resolveCredentials() { + return this.credentialsProvider.resolveCredentials(); + } + + @Override + public void close() { + IoUtils.closeQuietly(credentialsProvider, null); + IoUtils.closeQuietly(ssoClient, null); + } + } +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SessionCredentialsHolder.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SessionCredentialsHolder.java new file mode 100644 index 000000000000..b114917a81bb --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SessionCredentialsHolder.java @@ -0,0 +1,45 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import java.time.Instant; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; + +/** + * Holder class used to atomically store a session with its expiration time. + */ +@SdkInternalApi +@ThreadSafe +public final class SessionCredentialsHolder { + + private final AwsSessionCredentials sessionCredentials; + private final Instant sessionCredentialsExpiration; + + public SessionCredentialsHolder(AwsSessionCredentials credentials, Instant expiration) { + this.sessionCredentials = credentials; + this.sessionCredentialsExpiration = expiration; + } + + public AwsSessionCredentials sessionCredentials() { + return sessionCredentials; + } + + public Instant sessionCredentialsExpiration() { + return sessionCredentialsExpiration; + } +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java new file mode 100644 index 000000000000..242208af9356 --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import static java.time.temporal.ChronoUnit.MINUTES; + +import com.fasterxml.jackson.databind.JsonNode; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Instant; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.util.json.JacksonUtils; +import software.amazon.awssdk.services.sso.auth.ExpiredTokenException; +import software.amazon.awssdk.services.sso.auth.SsoCredentialsProvider; +import software.amazon.awssdk.utils.IoUtils; + +/** + * Resolve the access token from the cached token file. If the token has expired then throw out an exception to ask the users to + * update the token. This provider can also be replaced by any other implementation of resolving the access token. The users can + * resolve the access token in their own way and add it to the {@link SsoCredentialsProvider.Builder#refreshRequest}. + */ +@SdkInternalApi +public final class SsoAccessTokenProvider { + + private Path cachedTokenFilePath; + + public SsoAccessTokenProvider(Path cachedTokenFilePath) { + this.cachedTokenFilePath = cachedTokenFilePath; + } + + public String resolveAccessToken() { + try (InputStream cachedTokenStream = Files.newInputStream(cachedTokenFilePath)) { + return getTokenFromJson(IoUtils.toUtf8String(cachedTokenStream)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private String getTokenFromJson(String json) { + JsonNode jsonNode = JacksonUtils.sensitiveJsonNodeOf(json); + + if (validateToken(jsonNode.get("expiresAt").asText())) { + throw ExpiredTokenException.builder().message("The SSO session associated with this profile has expired or is" + + " otherwise invalid. To refresh this SSO session run aws sso" + + " login with the corresponding profile.").build(); + } + + return jsonNode.get("accessToken").asText(); + } + + private boolean validateToken(String expirationTime) { + return Instant.now().isAfter(Instant.parse(expirationTime).minus(15, MINUTES)); + } + +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtils.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtils.java new file mode 100644 index 000000000000..7ddd353bb7b2 --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtils.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import static software.amazon.awssdk.utils.UserHomeDirectoryUtils.userHomeDirectory; + +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.regex.Pattern; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.Validate; + +/** + * A tool class helps generating the path of cached token file. + */ +@SdkInternalApi +public class SsoTokenFileUtils { + + private static final Pattern HOME_DIRECTORY_PATTERN = + Pattern.compile("^~(/|" + Pattern.quote(FileSystems.getDefault().getSeparator()) + ").*$"); + + private SsoTokenFileUtils() { + + } + + /** + * Generate the cached file name by generating the SHA1 Hex Digest of the UTF-8 encoded start url bytes. + */ + public static Path generateCachedTokenPath(String startUrl, String tokenDirectory) { + Validate.notNull(startUrl, "The start url shouldn't be null."); + byte[] startUrlBytes = startUrl.getBytes(StandardCharsets.UTF_8); + String encodedUrl = new String(startUrlBytes, StandardCharsets.UTF_8); + return resolveProfileFilePath(Paths.get(tokenDirectory, sha1Hex(encodedUrl) + ".json").toString()); + } + + /** + * Use {@link MessageDigest} instance to encrypt the input String. + */ + private static String sha1Hex(String input) { + MessageDigest md; + try { + md = MessageDigest.getInstance("SHA-1"); + md.update(input.getBytes(StandardCharsets.UTF_8)); + } catch (NoSuchAlgorithmException e) { + throw SdkClientException.builder().message("Unable to use \"SHA-1\" algorithm.").cause(e).build(); + } + + return BinaryUtils.toHex(md.digest()); + } + + private static Path resolveProfileFilePath(String path) { + // Resolve ~ using the CLI's logic, not whatever Java decides to do with it. + if (HOME_DIRECTORY_PATTERN.matcher(path).matches()) { + path = userHomeDirectory() + path.substring(1); + } + + return Paths.get(path); + } +} diff --git a/services/sso/src/main/resources/codegen-resources/paginators-1.json b/services/sso/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..daaed6fe69df --- /dev/null +++ b/services/sso/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,16 @@ +{ + "pagination": { + "ListAccountRoles": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "roleList" + }, + "ListAccounts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "accountList" + } + } +} diff --git a/services/sso/src/main/resources/codegen-resources/service-2.json b/services/sso/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..84e4fa40f792 --- /dev/null +++ b/services/sso/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,346 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-06-10", + "endpointPrefix":"portal.sso", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"SSO", + "serviceFullName":"AWS Single Sign-On", + "serviceId":"SSO", + "signatureVersion":"v4", + "signingName":"awsssoportal", + "uid":"sso-2019-06-10" + }, + "operations":{ + "GetRoleCredentials":{ + "name":"GetRoleCredentials", + "http":{ + "method":"GET", + "requestUri":"/federation/credentials" + }, + "input":{"shape":"GetRoleCredentialsRequest"}, + "output":{"shape":"GetRoleCredentialsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns the STS short-term credentials for a given role name that is assigned to the user.

    ", + "authtype":"none" + }, + "ListAccountRoles":{ + "name":"ListAccountRoles", + "http":{ + "method":"GET", + "requestUri":"/assignment/roles" + }, + "input":{"shape":"ListAccountRolesRequest"}, + "output":{"shape":"ListAccountRolesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists all roles that are assigned to the user for a given AWS account.

    ", + "authtype":"none" + }, + "ListAccounts":{ + "name":"ListAccounts", + "http":{ + "method":"GET", + "requestUri":"/assignment/accounts" + }, + "input":{"shape":"ListAccountsRequest"}, + "output":{"shape":"ListAccountsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists all AWS accounts assigned to the user. These AWS accounts are assigned by the administrator of the account. For more information, see Assign User Access in the AWS SSO User Guide. This operation returns a paginated response.

    ", + "authtype":"none" + }, + "Logout":{ + "name":"Logout", + "http":{ + "method":"POST", + "requestUri":"/logout" + }, + "input":{"shape":"LogoutRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Removes the client- and server-side session that is associated with the user.

    ", + "authtype":"none" + } + }, + "shapes":{ + "AccessKeyType":{"type":"string"}, + "AccessTokenType":{ + "type":"string", + "sensitive":true + }, + "AccountIdType":{"type":"string"}, + "AccountInfo":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"AccountIdType", + "documentation":"

    The identifier of the AWS account that is assigned to the user.

    " + }, + "accountName":{ + "shape":"AccountNameType", + "documentation":"

    The display name of the AWS account that is assigned to the user.

    " + }, + "emailAddress":{ + "shape":"EmailAddressType", + "documentation":"

    The email address of the AWS account that is assigned to the user.

    " + } + }, + "documentation":"

    Provides information about your AWS account.

    " + }, + "AccountListType":{ + "type":"list", + "member":{"shape":"AccountInfo"} + }, + "AccountNameType":{"type":"string"}, + "EmailAddressType":{ + "type":"string", + "max":254, + "min":1 + }, + "ErrorDescription":{"type":"string"}, + "ExpirationTimestampType":{"type":"long"}, + "GetRoleCredentialsRequest":{ + "type":"structure", + "required":[ + "roleName", + "accountId", + "accessToken" + ], + "members":{ + "roleName":{ + "shape":"RoleNameType", + "documentation":"

    The friendly name of the role that is assigned to the user.

    ", + "location":"querystring", + "locationName":"role_name" + }, + "accountId":{ + "shape":"AccountIdType", + "documentation":"

    The identifier for the AWS account that is assigned to the user.

    ", + "location":"querystring", + "locationName":"account_id" + }, + "accessToken":{ + "shape":"AccessTokenType", + "documentation":"

    The token issued by the CreateToken API call. For more information, see CreateToken in the AWS SSO OIDC API Reference Guide.

    ", + "location":"header", + "locationName":"x-amz-sso_bearer_token" + } + } + }, + "GetRoleCredentialsResponse":{ + "type":"structure", + "members":{ + "roleCredentials":{ + "shape":"RoleCredentials", + "documentation":"

    The credentials for the role that is assigned to the user.

    " + } + } + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that a problem occurred with the input to the request. For example, a required parameter might be missing or out of range.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListAccountRolesRequest":{ + "type":"structure", + "required":[ + "accessToken", + "accountId" + ], + "members":{ + "nextToken":{ + "shape":"NextTokenType", + "documentation":"

    The page token from the previous response output when you request subsequent pages.

    ", + "location":"querystring", + "locationName":"next_token" + }, + "maxResults":{ + "shape":"MaxResultType", + "documentation":"

    The number of items that clients can request per page.

    ", + "location":"querystring", + "locationName":"max_result" + }, + "accessToken":{ + "shape":"AccessTokenType", + "documentation":"

    The token issued by the CreateToken API call. For more information, see CreateToken in the AWS SSO OIDC API Reference Guide.

    ", + "location":"header", + "locationName":"x-amz-sso_bearer_token" + }, + "accountId":{ + "shape":"AccountIdType", + "documentation":"

    The identifier for the AWS account that is assigned to the user.

    ", + "location":"querystring", + "locationName":"account_id" + } + } + }, + "ListAccountRolesResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextTokenType", + "documentation":"

    The page token client that is used to retrieve the list of accounts.

    " + }, + "roleList":{ + "shape":"RoleListType", + "documentation":"

    A paginated response with the list of roles and the next token if more results are available.

    " + } + } + }, + "ListAccountsRequest":{ + "type":"structure", + "required":["accessToken"], + "members":{ + "nextToken":{ + "shape":"NextTokenType", + "documentation":"

    (Optional) When requesting subsequent pages, this is the page token from the previous response output.

    ", + "location":"querystring", + "locationName":"next_token" + }, + "maxResults":{ + "shape":"MaxResultType", + "documentation":"

    This is the number of items clients can request per page.

    ", + "location":"querystring", + "locationName":"max_result" + }, + "accessToken":{ + "shape":"AccessTokenType", + "documentation":"

    The token issued by the CreateToken API call. For more information, see CreateToken in the AWS SSO OIDC API Reference Guide.

    ", + "location":"header", + "locationName":"x-amz-sso_bearer_token" + } + } + }, + "ListAccountsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextTokenType", + "documentation":"

    The page token client that is used to retrieve the list of accounts.

    " + }, + "accountList":{ + "shape":"AccountListType", + "documentation":"

    A paginated response with the list of account information and the next token if more results are available.

    " + } + } + }, + "LogoutRequest":{ + "type":"structure", + "required":["accessToken"], + "members":{ + "accessToken":{ + "shape":"AccessTokenType", + "documentation":"

    The token issued by the CreateToken API call. For more information, see CreateToken in the AWS SSO OIDC API Reference Guide.

    ", + "location":"header", + "locationName":"x-amz-sso_bearer_token" + } + } + }, + "MaxResultType":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "NextTokenType":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorDescription"} + }, + "documentation":"

    The specified resource doesn't exist.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleCredentials":{ + "type":"structure", + "members":{ + "accessKeyId":{ + "shape":"AccessKeyType", + "documentation":"

    The identifier used for the temporary security credentials. For more information, see Using Temporary Security Credentials to Request Access to AWS Resources in the AWS IAM User Guide.

    " + }, + "secretAccessKey":{ + "shape":"SecretAccessKeyType", + "documentation":"

    The key that is used to sign the request. For more information, see Using Temporary Security Credentials to Request Access to AWS Resources in the AWS IAM User Guide.

    " + }, + "sessionToken":{ + "shape":"SessionTokenType", + "documentation":"

    The token used for temporary credentials. For more information, see Using Temporary Security Credentials to Request Access to AWS Resources in the AWS IAM User Guide.

    " + }, + "expiration":{ + "shape":"ExpirationTimestampType", + "documentation":"

    The date on which temporary security credentials expire.

    " + } + }, + "documentation":"

    Provides information about the role credentials that are assigned to the user.

    " + }, + "RoleInfo":{ + "type":"structure", + "members":{ + "roleName":{ + "shape":"RoleNameType", + "documentation":"

    The friendly name of the role that is assigned to the user.

    " + }, + "accountId":{ + "shape":"AccountIdType", + "documentation":"

    The identifier of the AWS account assigned to the user.

    " + } + }, + "documentation":"

    Provides information about the role that is assigned to the user.

    " + }, + "RoleListType":{ + "type":"list", + "member":{"shape":"RoleInfo"} + }, + "RoleNameType":{"type":"string"}, + "SecretAccessKeyType":{ + "type":"string", + "sensitive":true + }, + "SessionTokenType":{ + "type":"string", + "sensitive":true + }, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the request is being made too frequently and is more than what the server can handle.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the request is not authorized. This can happen due to an invalid access token in the request.

    ", + "error":{"httpStatusCode":401}, + "exception":true + } + }, + "documentation":"

    AWS Single Sign-On Portal is a web service that makes it easy for you to assign user access to AWS SSO resources such as the user portal. Users can get AWS account applications and roles assigned to them and get federated into the application.

    For general information about AWS SSO, see What is AWS Single Sign-On? in the AWS SSO User Guide.

    This API reference guide describes the AWS SSO Portal operations that you can call programatically and includes detailed information on data types and errors.

    AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs provide a convenient way to create programmatic access to AWS SSO and other AWS services. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

    " +} diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProviderTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProviderTest.java new file mode 100644 index 000000000000..9d15a6cfc7be --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProviderTest.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.time.Duration; +import java.time.Instant; +import java.util.function.Supplier; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.services.sso.SsoClient; +import software.amazon.awssdk.services.sso.auth.SsoCredentialsProvider; +import software.amazon.awssdk.services.sso.model.GetRoleCredentialsRequest; +import software.amazon.awssdk.services.sso.model.GetRoleCredentialsResponse; +import software.amazon.awssdk.services.sso.model.RoleCredentials; + +/** + * Validates the functionality of {@link SsoCredentialsProvider}. + */ +public class SsoCredentialsProviderTest { + + private SsoClient ssoClient; + + @Test + public void cachingDoesNotApplyToExpiredSession() { + callClientWithCredentialsProvider(Instant.now().minus(Duration.ofSeconds(5)), 2, false); + callClient(verify(ssoClient, times(2)), Mockito.any()); + } + + @Test + public void cachingDoesNotApplyToExpiredSession_OverridePrefetchAndStaleTimes() { + callClientWithCredentialsProvider(Instant.now().minus(Duration.ofSeconds(5)), 2, true); + callClient(verify(ssoClient, times(2)), Mockito.any()); + } + + @Test + public void cachingAppliesToNonExpiredSession() { + callClientWithCredentialsProvider(Instant.now().plus(Duration.ofHours(5)), 2, false); + callClient(verify(ssoClient, times(1)), Mockito.any()); + } + + @Test + public void cachingAppliesToNonExpiredSession_OverridePrefetchAndStaleTimes() { + callClientWithCredentialsProvider(Instant.now().plus(Duration.ofHours(5)), 2, true); + callClient(verify(ssoClient, times(1)), Mockito.any()); + } + + @Test + public void distantExpiringCredentialsUpdatedInBackground() throws InterruptedException { + callClientWithCredentialsProvider(Instant.now().plusSeconds(90), 2, false); + + Instant endCheckTime = Instant.now().plus(Duration.ofSeconds(5)); + while (Mockito.mockingDetails(ssoClient).getInvocations().size() < 2 && endCheckTime.isAfter(Instant.now())) { + Thread.sleep(100); + } + + callClient(verify(ssoClient, times(2)), Mockito.any()); + } + + @Test + public void distantExpiringCredentialsUpdatedInBackground_OverridePrefetchAndStaleTimes() throws InterruptedException { + callClientWithCredentialsProvider(Instant.now().plusSeconds(90), 2, true); + + Instant endCheckTime = Instant.now().plus(Duration.ofSeconds(5)); + while (Mockito.mockingDetails(ssoClient).getInvocations().size() < 2 && endCheckTime.isAfter(Instant.now())) { + Thread.sleep(100); + } + + callClient(verify(ssoClient, times(2)), Mockito.any()); + } + + + + private GetRoleCredentialsRequestSupplier getRequestSupplier() { + return new GetRoleCredentialsRequestSupplier(GetRoleCredentialsRequest.builder().build(), "cachedToken"); + } + + private GetRoleCredentialsResponse getResponse(RoleCredentials roleCredentials) { + return GetRoleCredentialsResponse.builder().roleCredentials(roleCredentials).build(); + } + + private GetRoleCredentialsResponse callClient(SsoClient ssoClient, GetRoleCredentialsRequest request) { + return ssoClient.getRoleCredentials(request); + } + + private void callClientWithCredentialsProvider(Instant credentialsExpirationDate, int numTimesInvokeCredentialsProvider, + boolean overrideStaleAndPrefetchTimes) { + ssoClient = mock(SsoClient.class); + RoleCredentials credentials = RoleCredentials.builder().accessKeyId("a").secretAccessKey("b").sessionToken("c") + .expiration(credentialsExpirationDate.toEpochMilli()).build(); + + Supplier supplier = getRequestSupplier(); + GetRoleCredentialsResponse response = getResponse(credentials); + + when(ssoClient.getRoleCredentials(supplier.get())).thenReturn(response); + + SsoCredentialsProvider.Builder ssoCredentialsProviderBuilder = SsoCredentialsProvider.builder().refreshRequest(supplier); + + if(overrideStaleAndPrefetchTimes) { + ssoCredentialsProviderBuilder.staleTime(Duration.ofMinutes(2)); + ssoCredentialsProviderBuilder.prefetchTime(Duration.ofMinutes(4)); + } + + try (SsoCredentialsProvider credentialsProvider = ssoCredentialsProviderBuilder.ssoClient(ssoClient).build()) { + if(overrideStaleAndPrefetchTimes) { + assertThat(credentialsProvider.staleTime()).as("stale time").isEqualTo(Duration.ofMinutes(2)); + assertThat(credentialsProvider.prefetchTime()).as("prefetch time").isEqualTo(Duration.ofMinutes(4)); + } else { + assertThat(credentialsProvider.staleTime()).as("stale time").isEqualTo(Duration.ofMinutes(1)); + assertThat(credentialsProvider.prefetchTime()).as("prefetch time").isEqualTo(Duration.ofMinutes(5)); + } + + for (int i = 0; i < numTimesInvokeCredentialsProvider; ++i) { + AwsSessionCredentials actualCredentials = (AwsSessionCredentials) credentialsProvider.resolveCredentials(); + assertThat(actualCredentials.accessKeyId()).isEqualTo("a"); + assertThat(actualCredentials.secretAccessKey()).isEqualTo("b"); + assertThat(actualCredentials.sessionToken()).isEqualTo("c"); + } + } + + } + + private static final class GetRoleCredentialsRequestSupplier implements Supplier { + private final GetRoleCredentialsRequest request; + private final String cachedToken; + + GetRoleCredentialsRequestSupplier(GetRoleCredentialsRequest request, + String cachedToken) { + this.request = request; + this.cachedToken = cachedToken; + } + + @Override + public Object get() { + return request.toBuilder().accessToken(cachedToken).build(); + } + + } + +} \ No newline at end of file diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactoryTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactoryTest.java new file mode 100644 index 000000000000..bcc47d576ccd --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactoryTest.java @@ -0,0 +1,75 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.services.sso.internal.SsoAccessTokenProvider; + +/** + * Validate the code path of creating the {@link SsoCredentialsProvider} with {@link SsoProfileCredentialsProviderFactory}. + */ +public class SsoProfileCredentialsProviderFactoryTest { + + @Test + public void createSsoCredentialsProviderWithFactorySucceed() throws IOException { + String startUrl = "https//d-abc123.awsapps.com/start"; + String generatedTokenFileName = "6a888bdb653a4ba345dd68f21b896ec2e218c6f4.json"; + + Map properties = new HashMap<>(); + properties.put("sso_account_id", "accountId"); + properties.put("sso_region", "region"); + properties.put("sso_role_name", "roleName"); + properties.put("sso_start_url", "https//d-abc123.awsapps.com/start"); + Profile profile = Profile.builder().name("foo").properties(properties).build(); + + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\", \n" + + "\"startUrl\": \""+ startUrl +"\"\n" + + "}"; + SsoAccessTokenProvider tokenProvider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, generatedTokenFileName)); + + SsoProfileCredentialsProviderFactory factory = new SsoProfileCredentialsProviderFactory(); + assertThat(factory.create(profile, tokenProvider)).isInstanceOf(AwsCredentialsProvider.class); + } + + private Path prepareTestCachedTokenFile(String tokenFileContent, String generatedTokenFileName) throws IOException { + FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); + Path fileDirectory = fs.getPath("./foo"); + + Files.createDirectory(fileDirectory); + Path cachedTokenFilePath = fileDirectory.resolve(generatedTokenFileName); + Files.write(cachedTokenFilePath, ImmutableList.of(tokenFileContent), StandardCharsets.UTF_8); + + return cachedTokenFilePath; + } +} \ No newline at end of file diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java new file mode 100644 index 000000000000..ee08c541eaef --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java @@ -0,0 +1,94 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.internal.ProfileCredentialsUtils; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.utils.StringInputStream; + +/** + * Validate the completeness of sso profile properties consumed by the {@link ProfileCredentialsUtils}. + */ +public class SsoProfileTest { + + @Test + public void createSsoCredentialsProvider_SsoAccountIdMissing_throwException() { + String profileContent = "[profile foo]\n" + + "sso_region=us-east-1\n" + + "sso_role_name=SampleRole\n" + + "sso_start_url=https://d-abc123.awsapps.com/start-beta\n"; + ProfileFile profiles = ProfileFile.builder() + .content(new StringInputStream(profileContent)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { + assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + .hasMessageContaining("Profile property 'sso_account_id' was not configured"); + }); + } + + @Test + public void createSsoCredentialsProvider_SsoRegionMissing_throwException() { + String profileContent = "[profile foo]\n" + + "sso_account_id=012345678901\n" + + "sso_role_name=SampleRole\n" + + "sso_start_url=https://d-abc123.awsapps.com/start-beta\n"; + ProfileFile profiles = ProfileFile.builder() + .content(new StringInputStream(profileContent)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { + assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + .hasMessageContaining("Profile property 'sso_region' was not configured"); + }); + } + + @Test + public void createSsoCredentialsProvider_SsoRoleNameMissing_throwException() { + String profileContent = "[profile foo]\n" + + "sso_account_id=012345678901\n" + + "sso_region=us-east-1\n" + + "sso_start_url=https://d-abc123.awsapps.com/start-beta\n"; + ProfileFile profiles = ProfileFile.builder() + .content(new StringInputStream(profileContent)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { + assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + .hasMessageContaining("Profile property 'sso_role_name' was not configured"); + }); + } + + @Test + public void createSsoCredentialsProvider_SsoStartUrlMissing_throwException() { + String profileContent = "[profile foo]\n" + + "sso_account_id=012345678901\n" + + "sso_region=us-east-1\n" + + "sso_role_name=SampleRole\n"; + ProfileFile profiles = ProfileFile.builder() + .content(new StringInputStream(profileContent)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { + assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + .hasMessageContaining("Profile property 'sso_start_url' was not configured"); + }); + } +} diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProviderTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProviderTest.java new file mode 100644 index 000000000000..678bd8f129bb --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProviderTest.java @@ -0,0 +1,145 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.google.common.collect.ImmutableList; +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import org.junit.Test; + +/** + * Check if the behavior of {@link SsoAccessTokenProvider} is correct while consuming different formats of cached token + * file. + */ +public class SsoAccessTokenProviderTest { + + private static final String START_URL = "https//d-abc123.awsapps.com/start"; + private static final String GENERATED_TOKEN_FILE_NAME = "6a888bdb653a4ba345dd68f21b896ec2e218c6f4.json"; + private static final String WRONG_TOKEN_FILE_NAME = "wrong-token-file.json"; + + @Test + public void cachedTokenFile_correctFormat_resolveAccessTokenCorrectly() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\", \n" + + "\"startUrl\": \""+ START_URL +"\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThat(provider.resolveAccessToken()).isEqualTo("base64string"); + } + + @Test + public void cachedTokenFile_accessTokenMissing_throwNullPointerException() throws IOException { + String tokenFile = "{\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\", \n" + + "\"startUrl\": \""+ START_URL +"\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThatThrownBy(provider::resolveAccessToken).isInstanceOf(NullPointerException.class); + } + + @Test + public void cachedTokenFile_expiresAtMissing_throwNullPointerException() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"region\": \"us-west-2\", \n" + + "\"startUrl\": \""+ START_URL +"\"\n" + + "}"; + + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThatThrownBy(provider::resolveAccessToken).isInstanceOf(NullPointerException.class); + } + + @Test + public void cachedTokenFile_optionalRegionMissing_resolveAccessTokenCorrectly() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"startUrl\": \""+ START_URL +"\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThat(provider.resolveAccessToken()).isEqualTo("base64string"); + } + + @Test + public void cachedTokenFile_optionalStartUrlMissing_resolveAccessTokenCorrectly() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThat(provider.resolveAccessToken()).isEqualTo("base64string"); + } + + @Test + public void cachedTokenFile_alreadyExpired_resolveAccessTokenCorrectly() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2019-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThatThrownBy(provider::resolveAccessToken).hasMessageContaining("The SSO session associated with this profile " + + "has expired or is otherwise invalid."); + } + + @Test + public void cachedTokenFile_tokenFileNotExist_throwNullPointerException() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2019-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\"\n" + + "}"; + prepareTestCachedTokenFile(tokenFile, WRONG_TOKEN_FILE_NAME); + SsoAccessTokenProvider provider = new SsoAccessTokenProvider(createTestCachedTokenFilePath( + Jimfs.newFileSystem(Configuration.unix()).getPath("./foo"), GENERATED_TOKEN_FILE_NAME)); + assertThatThrownBy(provider::resolveAccessToken).isInstanceOf(UncheckedIOException.class); + } + + private Path prepareTestCachedTokenFile(String tokenFileContent, String generatedTokenFileName) throws IOException { + FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); + Path fileDirectory = fs.getPath("./foo"); + + Files.createDirectory(fileDirectory); + Path cachedTokenFilePath = createTestCachedTokenFilePath(fileDirectory, generatedTokenFileName); + Files.write(cachedTokenFilePath, ImmutableList.of(tokenFileContent), StandardCharsets.UTF_8); + + return cachedTokenFilePath; + } + + private Path createTestCachedTokenFilePath(Path directory, String tokenFileName) { + return directory.resolve(tokenFileName); + } + +} diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtilsTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtilsTest.java new file mode 100644 index 000000000000..1028fa397759 --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtilsTest.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.services.sso.internal.SsoTokenFileUtils.generateCachedTokenPath; +import static software.amazon.awssdk.utils.UserHomeDirectoryUtils.userHomeDirectory; + +import org.junit.Test; + +public class SsoTokenFileUtilsTest { + + @Test + public void generateTheCorrectPathTest() { + String startUrl = "https//d-abc123.awsapps.com/start"; + String directory = "~/.aws/sso/cache"; + assertThat(generateCachedTokenPath(startUrl, directory).toString()) + .isEqualTo(userHomeDirectory() + "/.aws/sso/cache/6a888bdb653a4ba345dd68f21b896ec2e218c6f4.json"); + } + +} \ No newline at end of file diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml new file mode 100644 index 000000000000..21557b6c7fe8 --- /dev/null +++ b/services/ssoadmin/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + ssoadmin + AWS Java SDK :: Services :: SSO Admin + The AWS Java SDK for SSO Admin module holds the client classes that are used for + communicating with SSO Admin. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.ssoadmin + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/ssoadmin/src/main/resources/codegen-resources/paginators-1.json b/services/ssoadmin/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..e47ffc104743 --- /dev/null +++ b/services/ssoadmin/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,63 @@ +{ + "pagination": { + "ListAccountAssignmentCreationStatus": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AccountAssignmentsCreationStatus" + }, + "ListAccountAssignmentDeletionStatus": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AccountAssignmentsDeletionStatus" + }, + "ListAccountAssignments": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AccountAssignments" + }, + "ListAccountsForProvisionedPermissionSet": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AccountIds" + }, + "ListInstances": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Instances" + }, + "ListManagedPoliciesInPermissionSet": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AttachedManagedPolicies" + }, + "ListPermissionSetProvisioningStatus": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PermissionSetsProvisioningStatus" + }, + "ListPermissionSets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PermissionSets" + }, + "ListPermissionSetsProvisionedToAccount": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PermissionSets" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Tags" + } + } +} \ No newline at end of file diff --git a/services/ssoadmin/src/main/resources/codegen-resources/service-2.json b/services/ssoadmin/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..e886ccab6c71 --- /dev/null +++ b/services/ssoadmin/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2056 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-20", + "endpointPrefix":"sso", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"SSO Admin", + "serviceFullName":"AWS Single Sign-On Admin", + "serviceId":"SSO Admin", + "signatureVersion":"v4", + "signingName":"sso", + "targetPrefix":"SWBExternalService", + "uid":"sso-admin-2020-07-20" + }, + "operations":{ + "AttachManagedPolicyToPermissionSet":{ + "name":"AttachManagedPolicyToPermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachManagedPolicyToPermissionSetRequest"}, + "output":{"shape":"AttachManagedPolicyToPermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Attaches an IAM managed policy ARN to a permission set.

    If the permission set is already referenced by one or more account assignments, you will need to call ProvisionPermissionSet after this action to apply the corresponding IAM policy updates to all assigned accounts.

    " + }, + "CreateAccountAssignment":{ + "name":"CreateAccountAssignment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccountAssignmentRequest"}, + "output":{"shape":"CreateAccountAssignmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Assigns access to a principal for a specified AWS account using a specified permission set.

    The term principal here refers to a user or group that is defined in AWS SSO.

    As part of a successful CreateAccountAssignment call, the specified permission set will automatically be provisioned to the account in the form of an IAM policy attached to the SSO-created IAM role. If the permission set is subsequently updated, the corresponding IAM policies attached to roles in your accounts will not be updated automatically. In this case, you will need to call ProvisionPermissionSet to make these updates.

    " + }, + "CreateInstanceAccessControlAttributeConfiguration":{ + "name":"CreateInstanceAccessControlAttributeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceAccessControlAttributeConfigurationRequest"}, + "output":{"shape":"CreateInstanceAccessControlAttributeConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Enables the attributes-based access control (ABAC) feature for the specified AWS SSO instance. You can also specify new attributes to add to your ABAC configuration during the enabling process. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

    " + }, + "CreatePermissionSet":{ + "name":"CreatePermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePermissionSetRequest"}, + "output":{"shape":"CreatePermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Creates a permission set within a specified SSO instance.

    To grant users and groups access to AWS account resources, use CreateAccountAssignment .

    " + }, + "DeleteAccountAssignment":{ + "name":"DeleteAccountAssignment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccountAssignmentRequest"}, + "output":{"shape":"DeleteAccountAssignmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes a principal's access from a specified AWS account using a specified permission set.

    " + }, + "DeleteInlinePolicyFromPermissionSet":{ + "name":"DeleteInlinePolicyFromPermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInlinePolicyFromPermissionSetRequest"}, + "output":{"shape":"DeleteInlinePolicyFromPermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes the inline policy from a specified permission set.

    " + }, + "DeleteInstanceAccessControlAttributeConfiguration":{ + "name":"DeleteInstanceAccessControlAttributeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceAccessControlAttributeConfigurationRequest"}, + "output":{"shape":"DeleteInstanceAccessControlAttributeConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Disables the attributes-based access control (ABAC) feature for the specified AWS SSO instance and deletes all of the attribute mappings that have been configured. Once deleted, any attributes that are received from an identity source and any custom attributes you have previously configured will not be passed. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

    " + }, + "DeletePermissionSet":{ + "name":"DeletePermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePermissionSetRequest"}, + "output":{"shape":"DeletePermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes the specified permission set.

    " + }, + "DescribeAccountAssignmentCreationStatus":{ + "name":"DescribeAccountAssignmentCreationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAssignmentCreationStatusRequest"}, + "output":{"shape":"DescribeAccountAssignmentCreationStatusResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Describes the status of the assignment creation request.

    " + }, + "DescribeAccountAssignmentDeletionStatus":{ + "name":"DescribeAccountAssignmentDeletionStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAssignmentDeletionStatusRequest"}, + "output":{"shape":"DescribeAccountAssignmentDeletionStatusResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Describes the status of the assignment deletion request.

    " + }, + "DescribeInstanceAccessControlAttributeConfiguration":{ + "name":"DescribeInstanceAccessControlAttributeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceAccessControlAttributeConfigurationRequest"}, + "output":{"shape":"DescribeInstanceAccessControlAttributeConfigurationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns the list of AWS SSO identity store attributes that have been configured to work with attributes-based access control (ABAC) for the specified AWS SSO instance. This will not return attributes configured and sent by an external identity provider. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

    " + }, + "DescribePermissionSet":{ + "name":"DescribePermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePermissionSetRequest"}, + "output":{"shape":"DescribePermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Gets the details of the permission set.

    " + }, + "DescribePermissionSetProvisioningStatus":{ + "name":"DescribePermissionSetProvisioningStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePermissionSetProvisioningStatusRequest"}, + "output":{"shape":"DescribePermissionSetProvisioningStatusResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Describes the status for the given permission set provisioning request.

    " + }, + "DetachManagedPolicyFromPermissionSet":{ + "name":"DetachManagedPolicyFromPermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachManagedPolicyFromPermissionSetRequest"}, + "output":{"shape":"DetachManagedPolicyFromPermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Detaches the attached IAM managed policy ARN from the specified permission set.

    " + }, + "GetInlinePolicyForPermissionSet":{ + "name":"GetInlinePolicyForPermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetInlinePolicyForPermissionSetRequest"}, + "output":{"shape":"GetInlinePolicyForPermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Obtains the inline policy assigned to the permission set.

    " + }, + "ListAccountAssignmentCreationStatus":{ + "name":"ListAccountAssignmentCreationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccountAssignmentCreationStatusRequest"}, + "output":{"shape":"ListAccountAssignmentCreationStatusResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the status of the AWS account assignment creation requests for a specified SSO instance.

    " + }, + "ListAccountAssignmentDeletionStatus":{ + "name":"ListAccountAssignmentDeletionStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccountAssignmentDeletionStatusRequest"}, + "output":{"shape":"ListAccountAssignmentDeletionStatusResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the status of the AWS account assignment deletion requests for a specified SSO instance.

    " + }, + "ListAccountAssignments":{ + "name":"ListAccountAssignments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccountAssignmentsRequest"}, + "output":{"shape":"ListAccountAssignmentsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the assignee of the specified AWS account with the specified permission set.

    " + }, + "ListAccountsForProvisionedPermissionSet":{ + "name":"ListAccountsForProvisionedPermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccountsForProvisionedPermissionSetRequest"}, + "output":{"shape":"ListAccountsForProvisionedPermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists all the AWS accounts where the specified permission set is provisioned.

    " + }, + "ListInstances":{ + "name":"ListInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstancesRequest"}, + "output":{"shape":"ListInstancesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Lists the SSO instances that the caller has access to.

    " + }, + "ListManagedPoliciesInPermissionSet":{ + "name":"ListManagedPoliciesInPermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListManagedPoliciesInPermissionSetRequest"}, + "output":{"shape":"ListManagedPoliciesInPermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the IAM managed policy that is attached to a specified permission set.

    " + }, + "ListPermissionSetProvisioningStatus":{ + "name":"ListPermissionSetProvisioningStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPermissionSetProvisioningStatusRequest"}, + "output":{"shape":"ListPermissionSetProvisioningStatusResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the status of the permission set provisioning requests for a specified SSO instance.

    " + }, + "ListPermissionSets":{ + "name":"ListPermissionSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPermissionSetsRequest"}, + "output":{"shape":"ListPermissionSetsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the PermissionSets in an SSO instance.

    " + }, + "ListPermissionSetsProvisionedToAccount":{ + "name":"ListPermissionSetsProvisionedToAccount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPermissionSetsProvisionedToAccountRequest"}, + "output":{"shape":"ListPermissionSetsProvisionedToAccountResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists all the permission sets that are provisioned to a specified AWS account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the tags that are attached to a specified resource.

    " + }, + "ProvisionPermissionSet":{ + "name":"ProvisionPermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ProvisionPermissionSetRequest"}, + "output":{"shape":"ProvisionPermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    The process by which a specified permission set is provisioned to the specified target.

    " + }, + "PutInlinePolicyToPermissionSet":{ + "name":"PutInlinePolicyToPermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutInlinePolicyToPermissionSetRequest"}, + "output":{"shape":"PutInlinePolicyToPermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Attaches an IAM inline policy to a permission set.

    If the permission set is already referenced by one or more account assignments, you will need to call ProvisionPermissionSet after this action to apply the corresponding IAM policy updates to all assigned accounts.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Associates a set of tags with a specified resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Disassociates a set of tags from a specified resource.

    " + }, + "UpdateInstanceAccessControlAttributeConfiguration":{ + "name":"UpdateInstanceAccessControlAttributeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateInstanceAccessControlAttributeConfigurationRequest"}, + "output":{"shape":"UpdateInstanceAccessControlAttributeConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Updates the AWS SSO identity store attributes to use with the AWS SSO instance for attributes-based access control (ABAC). When using an external identity provider as an identity source, you can pass attributes through the SAML assertion as an alternative to configuring attributes from the AWS SSO identity store. If a SAML assertion passes any of these attributes, AWS SSO will replace the attribute value with the value from the AWS SSO identity store. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

    " + }, + "UpdatePermissionSet":{ + "name":"UpdatePermissionSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePermissionSetRequest"}, + "output":{"shape":"UpdatePermissionSetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Updates an existing permission set.

    " + } + }, + "shapes":{ + "AccessControlAttribute":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"AccessControlAttributeKey", + "documentation":"

    The name of the attribute associated with your identities in your identity source. This is used to map a specified attribute in your identity source with an attribute in AWS SSO.

    " + }, + "Value":{ + "shape":"AccessControlAttributeValue", + "documentation":"

    The value used for mapping a specified attribute to an identity source.

    " + } + }, + "documentation":"

    These are AWS SSO identity store attributes that you can configure for use in attributes-based access control (ABAC). You can create permission policies that determine who can access your AWS resources based upon the configured attribute value(s). When you enable ABAC and specify AccessControlAttributes, AWS SSO passes the attribute(s) value of the authenticated user into IAM for use in policy evaluation.

    " + }, + "AccessControlAttributeKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]+" + }, + "AccessControlAttributeList":{ + "type":"list", + "member":{"shape":"AccessControlAttribute"}, + "max":50, + "min":0 + }, + "AccessControlAttributeValue":{ + "type":"structure", + "required":["Source"], + "members":{ + "Source":{ + "shape":"AccessControlAttributeValueSourceList", + "documentation":"

    The identity source to use when mapping a specified attribute to AWS SSO.

    " + } + }, + "documentation":"

    The value used for mapping a specified attribute to an identity source.

    " + }, + "AccessControlAttributeValueSource":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@\\[\\]\\{\\}\\$\\\\\"]*" + }, + "AccessControlAttributeValueSourceList":{ + "type":"list", + "member":{"shape":"AccessControlAttributeValueSource"}, + "max":1, + "min":1 + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"AccessDeniedExceptionMessage"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "exception":true + }, + "AccessDeniedExceptionMessage":{"type":"string"}, + "AccountAssignment":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier of the AWS account.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PrincipalType":{ + "shape":"PrincipalType", + "documentation":"

    The entity type for which the assignment will be created.

    " + }, + "PrincipalId":{ + "shape":"PrincipalId", + "documentation":"

    An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in AWS SSO, see the AWS SSO Identity Store API Reference.

    " + } + }, + "documentation":"

    The assignment that indicates a principal's limited access to a specified AWS account with a specified permission set.

    The term principal here refers to a user or group that is defined in AWS SSO.

    " + }, + "AccountAssignmentList":{ + "type":"list", + "member":{"shape":"AccountAssignment"} + }, + "AccountAssignmentOperationStatus":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusValues", + "documentation":"

    The status of the permission set provisioning process.

    " + }, + "RequestId":{ + "shape":"UUId", + "documentation":"

    The identifier for tracking the request operation that is generated by the universally unique identifier (UUID) workflow.

    " + }, + "FailureReason":{ + "shape":"Reason", + "documentation":"

    The message that contains an error or exception in case of an operation failure.

    " + }, + "TargetId":{ + "shape":"TargetId", + "documentation":"

    TargetID is an AWS account identifier, typically a 10-12 digit string (For example, 123456789012).

    " + }, + "TargetType":{ + "shape":"TargetType", + "documentation":"

    The entity type for which the assignment will be created.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PrincipalType":{ + "shape":"PrincipalType", + "documentation":"

    The entity type for which the assignment will be created.

    " + }, + "PrincipalId":{ + "shape":"PrincipalId", + "documentation":"

    An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in AWS SSO, see the AWS SSO Identity Store API Reference.

    " + }, + "CreatedDate":{ + "shape":"Date", + "documentation":"

    The date that the permission set was created.

    " + } + }, + "documentation":"

    The status of the creation or deletion operation of an assignment that a principal needs to access an account.

    " + }, + "AccountAssignmentOperationStatusList":{ + "type":"list", + "member":{"shape":"AccountAssignmentOperationStatusMetadata"} + }, + "AccountAssignmentOperationStatusMetadata":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusValues", + "documentation":"

    The status of the permission set provisioning process.

    " + }, + "RequestId":{ + "shape":"UUId", + "documentation":"

    The identifier for tracking the request operation that is generated by the universally unique identifier (UUID) workflow.

    " + }, + "CreatedDate":{ + "shape":"Date", + "documentation":"

    The date that the permission set was created.

    " + } + }, + "documentation":"

    Provides information about the AccountAssignment creation request.

    " + }, + "AccountId":{ + "type":"string", + "pattern":"\\d{12}" + }, + "AccountList":{ + "type":"list", + "member":{"shape":"AccountId"} + }, + "AttachManagedPolicyToPermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn", + "ManagedPolicyArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the PermissionSet that the managed policy should be attached to.

    " + }, + "ManagedPolicyArn":{ + "shape":"ManagedPolicyArn", + "documentation":"

    The IAM managed policy ARN to be attached to a permission set.

    " + } + } + }, + "AttachManagedPolicyToPermissionSetResponse":{ + "type":"structure", + "members":{ + } + }, + "AttachedManagedPolicy":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

    The name of the IAM managed policy.

    " + }, + "Arn":{ + "shape":"ManagedPolicyArn", + "documentation":"

    The ARN of the IAM managed policy. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + } + }, + "documentation":"

    A structure that stores the details of the IAM managed policy.

    " + }, + "AttachedManagedPolicyList":{ + "type":"list", + "member":{"shape":"AttachedManagedPolicy"} + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ConflictExceptionMessage"} + }, + "documentation":"

    Occurs when a conflict with a previous successful write is detected. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

    ", + "exception":true + }, + "ConflictExceptionMessage":{"type":"string"}, + "CreateAccountAssignmentRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "TargetId", + "TargetType", + "PermissionSetArn", + "PrincipalType", + "PrincipalId" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "TargetId":{ + "shape":"TargetId", + "documentation":"

    TargetID is an AWS account identifier, typically a 10-12 digit string (For example, 123456789012).

    " + }, + "TargetType":{ + "shape":"TargetType", + "documentation":"

    The entity type for which the assignment will be created.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set that the admin wants to grant the principal access to.

    " + }, + "PrincipalType":{ + "shape":"PrincipalType", + "documentation":"

    The entity type for which the assignment will be created.

    " + }, + "PrincipalId":{ + "shape":"PrincipalId", + "documentation":"

    An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in AWS SSO, see the AWS SSO Identity Store API Reference.

    " + } + } + }, + "CreateAccountAssignmentResponse":{ + "type":"structure", + "members":{ + "AccountAssignmentCreationStatus":{ + "shape":"AccountAssignmentOperationStatus", + "documentation":"

    The status object for the account assignment creation operation.

    " + } + } + }, + "CreateInstanceAccessControlAttributeConfigurationRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "InstanceAccessControlAttributeConfiguration" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed.

    " + }, + "InstanceAccessControlAttributeConfiguration":{ + "shape":"InstanceAccessControlAttributeConfiguration", + "documentation":"

    Specifies the AWS SSO identity store attributes to add to your ABAC configuration. When using an external identity provider as an identity source, you can pass attributes through the SAML assertion as an alternative to configuring attributes from the AWS SSO identity store. If a SAML assertion passes any of these attributes, AWS SSO will replace the attribute value with the value from the AWS SSO identity store.

    " + } + } + }, + "CreateInstanceAccessControlAttributeConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "CreatePermissionSetRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceArn" + ], + "members":{ + "Name":{ + "shape":"PermissionSetName", + "documentation":"

    The name of the PermissionSet.

    " + }, + "Description":{ + "shape":"PermissionSetDescription", + "documentation":"

    The description of the PermissionSet.

    " + }, + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "SessionDuration":{ + "shape":"Duration", + "documentation":"

    The length of time that the application user sessions are valid in the ISO-8601 standard.

    " + }, + "RelayState":{ + "shape":"RelayState", + "documentation":"

    Used to redirect users within the application during the federation authentication process.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to attach to the new PermissionSet.

    " + } + } + }, + "CreatePermissionSetResponse":{ + "type":"structure", + "members":{ + "PermissionSet":{ + "shape":"PermissionSet", + "documentation":"

    Defines the level of access on an AWS account.

    " + } + } + }, + "Date":{"type":"timestamp"}, + "DeleteAccountAssignmentRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "TargetId", + "TargetType", + "PermissionSetArn", + "PrincipalType", + "PrincipalId" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "TargetId":{ + "shape":"TargetId", + "documentation":"

    TargetID is an AWS account identifier, typically a 10-12 digit string (For example, 123456789012).

    " + }, + "TargetType":{ + "shape":"TargetType", + "documentation":"

    The entity type for which the assignment will be deleted.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set that will be used to remove access.

    " + }, + "PrincipalType":{ + "shape":"PrincipalType", + "documentation":"

    The entity type for which the assignment will be deleted.

    " + }, + "PrincipalId":{ + "shape":"PrincipalId", + "documentation":"

    An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about PrincipalIds in AWS SSO, see the AWS SSO Identity Store API Reference.

    " + } + } + }, + "DeleteAccountAssignmentResponse":{ + "type":"structure", + "members":{ + "AccountAssignmentDeletionStatus":{ + "shape":"AccountAssignmentOperationStatus", + "documentation":"

    The status object for the account assignment deletion operation.

    " + } + } + }, + "DeleteInlinePolicyFromPermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set that will be used to remove access.

    " + } + } + }, + "DeleteInlinePolicyFromPermissionSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteInstanceAccessControlAttributeConfigurationRequest":{ + "type":"structure", + "required":["InstanceArn"], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed.

    " + } + } + }, + "DeleteInstanceAccessControlAttributeConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "DeletePermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set that should be deleted.

    " + } + } + }, + "DeletePermissionSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeAccountAssignmentCreationStatusRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "AccountAssignmentCreationRequestId" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "AccountAssignmentCreationRequestId":{ + "shape":"UUId", + "documentation":"

    The identifier that is used to track the request operation progress.

    " + } + } + }, + "DescribeAccountAssignmentCreationStatusResponse":{ + "type":"structure", + "members":{ + "AccountAssignmentCreationStatus":{ + "shape":"AccountAssignmentOperationStatus", + "documentation":"

    The status object for the account assignment creation operation.

    " + } + } + }, + "DescribeAccountAssignmentDeletionStatusRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "AccountAssignmentDeletionRequestId" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "AccountAssignmentDeletionRequestId":{ + "shape":"UUId", + "documentation":"

    The identifier that is used to track the request operation progress.

    " + } + } + }, + "DescribeAccountAssignmentDeletionStatusResponse":{ + "type":"structure", + "members":{ + "AccountAssignmentDeletionStatus":{ + "shape":"AccountAssignmentOperationStatus", + "documentation":"

    The status object for the account assignment deletion operation.

    " + } + } + }, + "DescribeInstanceAccessControlAttributeConfigurationRequest":{ + "type":"structure", + "required":["InstanceArn"], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed.

    " + } + } + }, + "DescribeInstanceAccessControlAttributeConfigurationResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"InstanceAccessControlAttributeConfigurationStatus", + "documentation":"

    The status of the attribute configuration process.

    " + }, + "StatusReason":{ + "shape":"InstanceAccessControlAttributeConfigurationStatusReason", + "documentation":"

    Provides more details about the current status of the specified attribute.

    " + }, + "InstanceAccessControlAttributeConfiguration":{ + "shape":"InstanceAccessControlAttributeConfiguration", + "documentation":"

    Gets the list of AWS SSO identity store attributes added to your ABAC configuration.

    " + } + } + }, + "DescribePermissionSetProvisioningStatusRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "ProvisionPermissionSetRequestId" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "ProvisionPermissionSetRequestId":{ + "shape":"UUId", + "documentation":"

    The identifier that is provided by the ProvisionPermissionSet call to retrieve the current status of the provisioning workflow.

    " + } + } + }, + "DescribePermissionSetProvisioningStatusResponse":{ + "type":"structure", + "members":{ + "PermissionSetProvisioningStatus":{ + "shape":"PermissionSetProvisioningStatus", + "documentation":"

    The status object for the permission set provisioning operation.

    " + } + } + }, + "DescribePermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set.

    " + } + } + }, + "DescribePermissionSetResponse":{ + "type":"structure", + "members":{ + "PermissionSet":{ + "shape":"PermissionSet", + "documentation":"

    Describes the level of access on an AWS account.

    " + } + } + }, + "DetachManagedPolicyFromPermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn", + "ManagedPolicyArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the PermissionSet from which the policy should be detached.

    " + }, + "ManagedPolicyArn":{ + "shape":"ManagedPolicyArn", + "documentation":"

    The IAM managed policy ARN to be attached to a permission set.

    " + } + } + }, + "DetachManagedPolicyFromPermissionSetResponse":{ + "type":"structure", + "members":{ + } + }, + "Duration":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^(-?)P(?=\\d|T\\d)(?:(\\d+)Y)?(?:(\\d+)M)?(?:(\\d+)([DW]))?(?:T(?:(\\d+)H)?(?:(\\d+)M)?(?:(\\d+(?:\\.\\d+)?)S)?)?$" + }, + "GeneralArn":{ + "type":"string", + "max":2048, + "min":10, + "pattern":"arn:aws:sso:([a-zA-Z0-9-]+)?:(\\d{12})?:[a-zA-Z0-9-]+/[a-zA-Z0-9-/.]+" + }, + "GetInlinePolicyForPermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set.

    " + } + } + }, + "GetInlinePolicyForPermissionSetResponse":{ + "type":"structure", + "members":{ + "InlinePolicy":{ + "shape":"PermissionSetPolicyDocument", + "documentation":"

    The IAM inline policy that is attached to the permission set.

    " + } + } + }, + "Id":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-]*" + }, + "InstanceAccessControlAttributeConfiguration":{ + "type":"structure", + "required":["AccessControlAttributes"], + "members":{ + "AccessControlAttributes":{ + "shape":"AccessControlAttributeList", + "documentation":"

    Lists the attributes that are configured for ABAC in the specified AWS SSO instance.

    " + } + }, + "documentation":"

    Specifies the attributes to add to your attribute-based access control (ABAC) configuration.

    " + }, + "InstanceAccessControlAttributeConfigurationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "CREATION_IN_PROGRESS", + "CREATION_FAILED" + ] + }, + "InstanceAccessControlAttributeConfigurationStatusReason":{"type":"string"}, + "InstanceArn":{ + "type":"string", + "max":1224, + "min":10, + "pattern":"arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}" + }, + "InstanceList":{ + "type":"list", + "member":{"shape":"InstanceMetadata"} + }, + "InstanceMetadata":{ + "type":"structure", + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "IdentityStoreId":{ + "shape":"Id", + "documentation":"

    The identifier of the identity store that is connected to the SSO instance.

    " + } + }, + "documentation":"

    Provides information about the SSO instance.

    " + }, + "InternalFailureMessage":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"InternalFailureMessage"} + }, + "documentation":"

    The request processing has failed because of an unknown error, exception, or failure with an internal server.

    ", + "exception":true + }, + "ListAccountAssignmentCreationStatusRequest":{ + "type":"structure", + "required":["InstanceArn"], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the assignment.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + }, + "Filter":{ + "shape":"OperationStatusFilter", + "documentation":"

    Filters results based on the passed attribute value.

    " + } + } + }, + "ListAccountAssignmentCreationStatusResponse":{ + "type":"structure", + "members":{ + "AccountAssignmentsCreationStatus":{ + "shape":"AccountAssignmentOperationStatusList", + "documentation":"

    The status object for the account assignment creation operation.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListAccountAssignmentDeletionStatusRequest":{ + "type":"structure", + "required":["InstanceArn"], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the assignment.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + }, + "Filter":{ + "shape":"OperationStatusFilter", + "documentation":"

    Filters results based on the passed attribute value.

    " + } + } + }, + "ListAccountAssignmentDeletionStatusResponse":{ + "type":"structure", + "members":{ + "AccountAssignmentsDeletionStatus":{ + "shape":"AccountAssignmentOperationStatusList", + "documentation":"

    The status object for the account assignment deletion operation.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListAccountAssignmentsRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "AccountId", + "PermissionSetArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "AccountId":{ + "shape":"TargetId", + "documentation":"

    The identifier of the AWS account from which to list the assignments.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set from which to list assignments.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the assignment.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListAccountAssignmentsResponse":{ + "type":"structure", + "members":{ + "AccountAssignments":{ + "shape":"AccountAssignmentList", + "documentation":"

    The list of assignments that match the input AWS account and permission set.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListAccountsForProvisionedPermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the PermissionSet from which the associated AWS accounts will be listed.

    " + }, + "ProvisioningStatus":{ + "shape":"ProvisioningStatus", + "documentation":"

    The permission set provisioning status for an AWS account.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the PermissionSet.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListAccountsForProvisionedPermissionSetResponse":{ + "type":"structure", + "members":{ + "AccountIds":{ + "shape":"AccountList", + "documentation":"

    The list of AWS AccountIds.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListInstancesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the instance.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListInstancesResponse":{ + "type":"structure", + "members":{ + "Instances":{ + "shape":"InstanceList", + "documentation":"

    Lists the SSO instances that the caller has access to.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListManagedPoliciesInPermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the PermissionSet whose managed policies will be listed.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the PermissionSet.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListManagedPoliciesInPermissionSetResponse":{ + "type":"structure", + "members":{ + "AttachedManagedPolicies":{ + "shape":"AttachedManagedPolicyList", + "documentation":"

    The array of the AttachedManagedPolicy data type object.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListPermissionSetProvisioningStatusRequest":{ + "type":"structure", + "required":["InstanceArn"], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the assignment.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + }, + "Filter":{ + "shape":"OperationStatusFilter", + "documentation":"

    Filters results based on the passed attribute value.

    " + } + } + }, + "ListPermissionSetProvisioningStatusResponse":{ + "type":"structure", + "members":{ + "PermissionSetsProvisioningStatus":{ + "shape":"PermissionSetProvisioningStatusList", + "documentation":"

    The status object for the permission set provisioning operation.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListPermissionSetsProvisionedToAccountRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "AccountId" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier of the AWS account from which to list the assignments.

    " + }, + "ProvisioningStatus":{ + "shape":"ProvisioningStatus", + "documentation":"

    The status object for the permission set provisioning operation.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the assignment.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListPermissionSetsProvisionedToAccountResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + }, + "PermissionSets":{ + "shape":"PermissionSetList", + "documentation":"

    Defines the level of access that an AWS account has.

    " + } + } + }, + "ListPermissionSetsRequest":{ + "type":"structure", + "required":["InstanceArn"], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to display for the assignment.

    " + } + } + }, + "ListPermissionSetsResponse":{ + "type":"structure", + "members":{ + "PermissionSets":{ + "shape":"PermissionSetList", + "documentation":"

    Defines the level of access on an AWS account.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "ResourceArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "ResourceArn":{ + "shape":"GeneralArn", + "documentation":"

    The ARN of the resource with the tags to be listed.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

    A set of key-value pairs that are used to manage the resource.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + }, + "ManagedPolicyArn":{ + "type":"string", + "max":2048, + "min":20 + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "Name":{ + "type":"string", + "max":100, + "min":1 + }, + "OperationStatusFilter":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusValues", + "documentation":"

    Filters the list operations result based on the status attribute.

    " + } + }, + "documentation":"

    Filters he operation status list based on the passed attribute value.

    " + }, + "PermissionSet":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"PermissionSetName", + "documentation":"

    The name of the permission set.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "Description":{ + "shape":"PermissionSetDescription", + "documentation":"

    The description of the PermissionSet.

    " + }, + "CreatedDate":{ + "shape":"Date", + "documentation":"

    The date that the permission set was created.

    " + }, + "SessionDuration":{ + "shape":"Duration", + "documentation":"

    The length of time that the application user sessions are valid for in the ISO-8601 standard.

    " + }, + "RelayState":{ + "shape":"RelayState", + "documentation":"

    Used to redirect users within the application during the federation authentication process.

    " + } + }, + "documentation":"

    An entity that contains IAM policies.

    " + }, + "PermissionSetArn":{ + "type":"string", + "max":1224, + "min":10, + "pattern":"arn:aws:sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}" + }, + "PermissionSetDescription":{ + "type":"string", + "max":700, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*" + }, + "PermissionSetList":{ + "type":"list", + "member":{"shape":"PermissionSetArn"} + }, + "PermissionSetName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "PermissionSetPolicyDocument":{ + "type":"string", + "max":10240, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+", + "sensitive":true + }, + "PermissionSetProvisioningStatus":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusValues", + "documentation":"

    The status of the permission set provisioning process.

    " + }, + "RequestId":{ + "shape":"UUId", + "documentation":"

    The identifier for tracking the request operation that is generated by the universally unique identifier (UUID) workflow.

    " + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

    The identifier of the AWS account from which to list the assignments.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set that is being provisioned. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "FailureReason":{ + "shape":"Reason", + "documentation":"

    The message that contains an error or exception in case of an operation failure.

    " + }, + "CreatedDate":{ + "shape":"Date", + "documentation":"

    The date that the permission set was created.

    " + } + }, + "documentation":"

    A structure that is used to provide the status of the provisioning operation for a specified permission set.

    " + }, + "PermissionSetProvisioningStatusList":{ + "type":"list", + "member":{"shape":"PermissionSetProvisioningStatusMetadata"} + }, + "PermissionSetProvisioningStatusMetadata":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusValues", + "documentation":"

    The status of the permission set provisioning process.

    " + }, + "RequestId":{ + "shape":"UUId", + "documentation":"

    The identifier for tracking the request operation that is generated by the universally unique identifier (UUID) workflow.

    " + }, + "CreatedDate":{ + "shape":"Date", + "documentation":"

    The date that the permission set was created.

    " + } + }, + "documentation":"

    Provides information about the permission set provisioning status.

    " + }, + "PrincipalId":{ + "type":"string", + "max":47, + "min":1, + "pattern":"^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$" + }, + "PrincipalType":{ + "type":"string", + "enum":[ + "USER", + "GROUP" + ] + }, + "ProvisionPermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn", + "TargetType" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set.

    " + }, + "TargetId":{ + "shape":"TargetId", + "documentation":"

    TargetID is an AWS account identifier, typically a 10-12 digit string (For example, 123456789012).

    " + }, + "TargetType":{ + "shape":"ProvisionTargetType", + "documentation":"

    The entity type for which the assignment will be created.

    " + } + } + }, + "ProvisionPermissionSetResponse":{ + "type":"structure", + "members":{ + "PermissionSetProvisioningStatus":{ + "shape":"PermissionSetProvisioningStatus", + "documentation":"

    The status object for the permission set provisioning operation.

    " + } + } + }, + "ProvisionTargetType":{ + "type":"string", + "enum":[ + "AWS_ACCOUNT", + "ALL_PROVISIONED_ACCOUNTS" + ] + }, + "ProvisioningStatus":{ + "type":"string", + "enum":[ + "LATEST_PERMISSION_SET_PROVISIONED", + "LATEST_PERMISSION_SET_NOT_PROVISIONED" + ] + }, + "PutInlinePolicyToPermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn", + "InlinePolicy" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set.

    " + }, + "InlinePolicy":{ + "shape":"PermissionSetPolicyDocument", + "documentation":"

    The IAM inline policy to attach to a PermissionSet.

    " + } + } + }, + "PutInlinePolicyToPermissionSetResponse":{ + "type":"structure", + "members":{ + } + }, + "Reason":{ + "type":"string", + "pattern":"[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*" + }, + "RelayState":{ + "type":"string", + "max":240, + "min":1, + "pattern":"[a-zA-Z0-9&$@#\\\\\\/%?=~\\-_'\"|!:,.;*+\\[\\]\\ \\(\\)\\{\\}]+" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ResourceNotFoundMessage"} + }, + "documentation":"

    Indicates that a requested resource is not found.

    ", + "exception":true + }, + "ResourceNotFoundMessage":{"type":"string"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ServiceQuotaExceededMessage"} + }, + "documentation":"

    Indicates that the principal has crossed the permitted number of resources that can be created.

    ", + "exception":true + }, + "ServiceQuotaExceededMessage":{"type":"string"}, + "StatusValues":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "FAILED", + "SUCCEEDED" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    The key for the tag.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The value of the tag.

    " + } + }, + "documentation":"

    A set of key-value pairs that are used to manage the resource. Tags can only be applied to permission sets and cannot be applied to corresponding roles that AWS SSO creates in AWS accounts.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "ResourceArn", + "Tags" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "ResourceArn":{ + "shape":"GeneralArn", + "documentation":"

    The ARN of the resource with the tags to be listed.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A set of key-value pairs that are used to manage the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TargetId":{ + "type":"string", + "pattern":"\\d{12}" + }, + "TargetType":{ + "type":"string", + "enum":["AWS_ACCOUNT"] + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ThrottlingExceptionMessage"} + }, + "documentation":"

    Indicates that the principal has crossed the throttling limits of the API operations.

    ", + "exception":true + }, + "ThrottlingExceptionMessage":{"type":"string"}, + "Token":{ + "type":"string", + "max":2048, + "pattern":"^[-a-zA-Z0-9+=/]*" + }, + "UUId":{ + "type":"string", + "pattern":"\\b[0-9a-f]{8}\\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\\b[0-9a-f]{12}\\b" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "ResourceArn", + "TagKeys" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "ResourceArn":{ + "shape":"GeneralArn", + "documentation":"

    The ARN of the resource with the tags to be listed.

    " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The keys of tags that are attached to the resource.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateInstanceAccessControlAttributeConfigurationRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "InstanceAccessControlAttributeConfiguration" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed.

    " + }, + "InstanceAccessControlAttributeConfiguration":{ + "shape":"InstanceAccessControlAttributeConfiguration", + "documentation":"

    Updates the attributes for your ABAC configuration.

    " + } + } + }, + "UpdateInstanceAccessControlAttributeConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdatePermissionSetRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "PermissionSetArn" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    " + }, + "PermissionSetArn":{ + "shape":"PermissionSetArn", + "documentation":"

    The ARN of the permission set.

    " + }, + "Description":{ + "shape":"PermissionSetDescription", + "documentation":"

    The description of the PermissionSet.

    " + }, + "SessionDuration":{ + "shape":"Duration", + "documentation":"

    The length of time that the application user sessions are valid for in the ISO-8601 standard.

    " + }, + "RelayState":{ + "shape":"RelayState", + "documentation":"

    Used to redirect users within the application during the federation authentication process.

    " + } + } + }, + "UpdatePermissionSetResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ValidationExceptionMessage"} + }, + "documentation":"

    The request failed because it contains a syntax error.

    ", + "exception":true + }, + "ValidationExceptionMessage":{"type":"string"} + } +} diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml new file mode 100644 index 000000000000..c199eac7f528 --- /dev/null +++ b/services/ssooidc/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + ssooidc + AWS Java SDK :: Services :: SSO OIDC + The AWS Java SDK for SSO OIDC module holds the client classes that are used for + communicating with SSO OIDC. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.ssooidc + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/ssooidc/src/main/resources/codegen-resources/customization.config b/services/ssooidc/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..919e7a0f70fc --- /dev/null +++ b/services/ssooidc/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,125 @@ +{ + "shapeModifiers": { + // Do not keep adding to this list. Make the service team do the right thing across all SDKs. + "AccessDeniedException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "AuthorizationPendingException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "ExpiredTokenException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InternalServerException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidClientException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidClientMetadataException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidGrantException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidRequestException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidScopeException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "SlowDownException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "UnauthorizedClientException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "UnsupportedGrantTypeException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + } + } +} \ No newline at end of file diff --git a/services/ssooidc/src/main/resources/codegen-resources/paginators-1.json b/services/ssooidc/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/ssooidc/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/ssooidc/src/main/resources/codegen-resources/service-2.json b/services/ssooidc/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..79b2efe1304d --- /dev/null +++ b/services/ssooidc/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,392 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-06-10", + "endpointPrefix":"oidc", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"SSO OIDC", + "serviceFullName":"AWS SSO OIDC", + "serviceId":"SSO OIDC", + "signatureVersion":"v4", + "signingName":"awsssooidc", + "uid":"sso-oidc-2019-06-10" + }, + "operations":{ + "CreateToken":{ + "name":"CreateToken", + "http":{ + "method":"POST", + "requestUri":"/token" + }, + "input":{"shape":"CreateTokenRequest"}, + "output":{"shape":"CreateTokenResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidClientException"}, + {"shape":"InvalidGrantException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"UnsupportedGrantTypeException"}, + {"shape":"InvalidScopeException"}, + {"shape":"AuthorizationPendingException"}, + {"shape":"SlowDownException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ExpiredTokenException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates and returns an access token for the authorized client. The access token issued will be used to fetch short-term credentials for the assigned roles in the AWS account.

    ", + "authtype":"none" + }, + "RegisterClient":{ + "name":"RegisterClient", + "http":{ + "method":"POST", + "requestUri":"/client/register" + }, + "input":{"shape":"RegisterClientRequest"}, + "output":{"shape":"RegisterClientResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidScopeException"}, + {"shape":"InvalidClientMetadataException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Registers a client with AWS SSO. This allows clients to initiate device authorization. The output should be persisted for reuse through many authentication requests.

    ", + "authtype":"none" + }, + "StartDeviceAuthorization":{ + "name":"StartDeviceAuthorization", + "http":{ + "method":"POST", + "requestUri":"/device_authorization" + }, + "input":{"shape":"StartDeviceAuthorizationRequest"}, + "output":{"shape":"StartDeviceAuthorizationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"SlowDownException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Initiates device authorization by requesting a pair of verification codes from the authorization service.

    ", + "authtype":"none" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "AccessToken":{"type":"string"}, + "AuthCode":{"type":"string"}, + "AuthorizationPendingException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that a request to authorize a client with an access user session token is pending.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ClientId":{"type":"string"}, + "ClientName":{"type":"string"}, + "ClientSecret":{"type":"string"}, + "ClientType":{"type":"string"}, + "CreateTokenRequest":{ + "type":"structure", + "required":[ + "clientId", + "clientSecret", + "grantType", + "deviceCode" + ], + "members":{ + "clientId":{ + "shape":"ClientId", + "documentation":"

    The unique identifier string for each client. This value should come from the persisted result of the RegisterClient API.

    " + }, + "clientSecret":{ + "shape":"ClientSecret", + "documentation":"

    A secret string generated for the client. This value should come from the persisted result of the RegisterClient API.

    " + }, + "grantType":{ + "shape":"GrantType", + "documentation":"

    Supports grant types for authorization code, refresh token, and device code request.

    " + }, + "deviceCode":{ + "shape":"DeviceCode", + "documentation":"

    Used only when calling this API for the device code grant type. This short-term code is used to identify this authentication attempt. This should come from an in-memory reference to the result of the StartDeviceAuthorization API.

    " + }, + "code":{ + "shape":"AuthCode", + "documentation":"

    The authorization code received from the authorization service. This parameter is required to perform an authorization grant request to get access to a token.

    " + }, + "refreshToken":{ + "shape":"RefreshToken", + "documentation":"

    The token used to obtain an access token in the event that the access token is invalid or expired. This token is not issued by the service.

    " + }, + "scope":{ + "shape":"Scopes", + "documentation":"

    The list of scopes that is defined by the client. Upon authorization, this list is used to restrict permissions when granting an access token.

    " + }, + "redirectUri":{ + "shape":"URI", + "documentation":"

    The location of the application that will receive the authorization code. Users authorize the service to send the request to this location.

    " + } + } + }, + "CreateTokenResponse":{ + "type":"structure", + "members":{ + "accessToken":{ + "shape":"AccessToken", + "documentation":"

    An opaque token to access AWS SSO resources assigned to a user.

    " + }, + "tokenType":{ + "shape":"TokenType", + "documentation":"

    Used to notify the client that the returned token is an access token. The supported type is BearerToken.

    " + }, + "expiresIn":{ + "shape":"ExpirationInSeconds", + "documentation":"

    Indicates the time in seconds when an access token will expire.

    " + }, + "refreshToken":{ + "shape":"RefreshToken", + "documentation":"

    A token that, if present, can be used to refresh a previously issued access token that might have expired.

    " + }, + "idToken":{ + "shape":"IdToken", + "documentation":"

    The identifier of the user that associated with the access token, if present.

    " + } + } + }, + "DeviceCode":{"type":"string"}, + "Error":{"type":"string"}, + "ErrorDescription":{"type":"string"}, + "ExpirationInSeconds":{"type":"integer"}, + "ExpiredTokenException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the token issued by the service is expired and is no longer valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "GrantType":{"type":"string"}, + "IdToken":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that an error from the service occurred while trying to process a request.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "IntervalInSeconds":{"type":"integer"}, + "InvalidClientException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the clientId or clientSecret in the request is invalid. For example, this can occur when a client sends an incorrect clientId or an expired clientSecret.

    ", + "error":{"httpStatusCode":401}, + "exception":true + }, + "InvalidClientMetadataException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the client information sent in the request during registration is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGrantException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that a request contains an invalid grant. This can occur if a client makes a CreateToken request with an invalid grant type.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that something is wrong with the input to the request. For example, a required parameter might be missing or out of range.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidScopeException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the scope provided in the request is invalid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LongTimeStampType":{"type":"long"}, + "RefreshToken":{"type":"string"}, + "RegisterClientRequest":{ + "type":"structure", + "required":[ + "clientName", + "clientType" + ], + "members":{ + "clientName":{ + "shape":"ClientName", + "documentation":"

    The friendly name of the client.

    " + }, + "clientType":{ + "shape":"ClientType", + "documentation":"

    The type of client. The service supports only public as a client type. Anything other than public will be rejected by the service.

    " + }, + "scopes":{ + "shape":"Scopes", + "documentation":"

    The list of scopes that are defined by the client. Upon authorization, this list is used to restrict permissions when granting an access token.

    " + } + } + }, + "RegisterClientResponse":{ + "type":"structure", + "members":{ + "clientId":{ + "shape":"ClientId", + "documentation":"

    The unique identifier string for each client. This client uses this identifier to get authenticated by the service in subsequent calls.

    " + }, + "clientSecret":{ + "shape":"ClientSecret", + "documentation":"

    A secret string generated for the client. The client will use this string to get authenticated by the service in subsequent calls.

    " + }, + "clientIdIssuedAt":{ + "shape":"LongTimeStampType", + "documentation":"

    Indicates the time at which the clientId and clientSecret were issued.

    " + }, + "clientSecretExpiresAt":{ + "shape":"LongTimeStampType", + "documentation":"

    Indicates the time at which the clientId and clientSecret will become invalid.

    " + }, + "authorizationEndpoint":{ + "shape":"URI", + "documentation":"

    The endpoint where the client can request authorization.

    " + }, + "tokenEndpoint":{ + "shape":"URI", + "documentation":"

    The endpoint where the client can get an access token.

    " + } + } + }, + "Scope":{"type":"string"}, + "Scopes":{ + "type":"list", + "member":{"shape":"Scope"} + }, + "SlowDownException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the client is making the request too frequently and is more than the service can handle.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "StartDeviceAuthorizationRequest":{ + "type":"structure", + "required":[ + "clientId", + "clientSecret", + "startUrl" + ], + "members":{ + "clientId":{ + "shape":"ClientId", + "documentation":"

    The unique identifier string for the client that is registered with AWS SSO. This value should come from the persisted result of the RegisterClient API operation.

    " + }, + "clientSecret":{ + "shape":"ClientSecret", + "documentation":"

    A secret string that is generated for the client. This value should come from the persisted result of the RegisterClient API operation.

    " + }, + "startUrl":{ + "shape":"URI", + "documentation":"

    The URL for the AWS SSO user portal. For more information, see Using the User Portal in the AWS Single Sign-On User Guide.

    " + } + } + }, + "StartDeviceAuthorizationResponse":{ + "type":"structure", + "members":{ + "deviceCode":{ + "shape":"DeviceCode", + "documentation":"

    The short-lived code that is used by the device when polling for a session token.

    " + }, + "userCode":{ + "shape":"UserCode", + "documentation":"

    A one-time user verification code. This is needed to authorize an in-use device.

    " + }, + "verificationUri":{ + "shape":"URI", + "documentation":"

    The URI of the verification page that takes the userCode to authorize the device.

    " + }, + "verificationUriComplete":{ + "shape":"URI", + "documentation":"

    An alternate URL that the client can use to automatically launch a browser. This process skips the manual step in which the user visits the verification page and enters their code.

    " + }, + "expiresIn":{ + "shape":"ExpirationInSeconds", + "documentation":"

    Indicates the number of seconds in which the verification code will become invalid.

    " + }, + "interval":{ + "shape":"IntervalInSeconds", + "documentation":"

    Indicates the number of seconds the client must wait between attempts when polling for a session.

    " + } + } + }, + "TokenType":{"type":"string"}, + "URI":{"type":"string"}, + "UnauthorizedClientException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the client is not currently authorized to make the request. This can happen when a clientId is not issued for a public client.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "UnsupportedGrantTypeException":{ + "type":"structure", + "members":{ + "error":{"shape":"Error"}, + "error_description":{"shape":"ErrorDescription"} + }, + "documentation":"

    Indicates that the grant type in the request is not supported by the service.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "UserCode":{"type":"string"} + }, + "documentation":"

    AWS Single Sign-On (SSO) OpenID Connect (OIDC) is a web service that enables a client (such as AWS CLI or a native application) to register with AWS SSO. The service also enables the client to fetch the user’s access token upon successful authentication and authorization with AWS SSO. This service conforms with the OAuth 2.0 based implementation of the device authorization grant standard (https://tools.ietf.org/html/rfc8628).

    For general information about AWS SSO, see What is AWS Single Sign-On? in the AWS SSO User Guide.

    This API reference guide describes the AWS SSO OIDC operations that you can call programatically and includes detailed information on data types and errors.

    AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms such as Java, Ruby, .Net, iOS, and Android. The SDKs provide a convenient way to create programmatic access to AWS SSO and other AWS services. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

    " +} diff --git a/services/storagegateway/build.properties b/services/storagegateway/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/storagegateway/build.properties +++ b/services/storagegateway/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index 39e1a548d1e1..f8c13cabbe3a 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + synthetics + AWS Java SDK :: Services :: Synthetics + The AWS Java SDK for Synthetics module holds the client classes that are used for + communicating with Synthetics. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.synthetics + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/synthetics/src/main/resources/codegen-resources/paginators-1.json b/services/synthetics/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..e5412aa47fd0 --- /dev/null +++ b/services/synthetics/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,24 @@ +{ + "pagination": { + "DescribeCanaries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "DescribeCanariesLastRun": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "DescribeRuntimeVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "GetCanaryRuns": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + } + } +} \ No newline at end of file diff --git a/services/synthetics/src/main/resources/codegen-resources/service-2.json b/services/synthetics/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..255bc9bba01d --- /dev/null +++ b/services/synthetics/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1125 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-10-11", + "endpointPrefix":"synthetics", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Synthetics", + "serviceFullName":"Synthetics", + "serviceId":"synthetics", + "signatureVersion":"v4", + "signingName":"synthetics", + "uid":"synthetics-2017-10-11" + }, + "operations":{ + "CreateCanary":{ + "name":"CreateCanary", + "http":{ + "method":"POST", + "requestUri":"/canary" + }, + "input":{"shape":"CreateCanaryRequest"}, + "output":{"shape":"CreateCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates a canary. Canaries are scripts that monitor your endpoints and APIs from the outside-in. Canaries help you check the availability and latency of your web services and troubleshoot anomalies by investigating load time data, screenshots of the UI, logs, and metrics. You can set up a canary to run continuously or just once.

    Do not use CreateCanary to modify an existing canary. Use UpdateCanary instead.

    To create canaries, you must have the CloudWatchSyntheticsFullAccess policy. If you are creating a new IAM role for the canary, you also need the the iam:CreateRole, iam:CreatePolicy and iam:AttachRolePolicy permissions. For more information, see Necessary Roles and Permissions.

    Do not include secrets or proprietary information in your canary names. The canary name makes up part of the Amazon Resource Name (ARN) for the canary, and the ARN is included in outbound calls over the internet. For more information, see Security Considerations for Synthetics Canaries.

    " + }, + "DeleteCanary":{ + "name":"DeleteCanary", + "http":{ + "method":"DELETE", + "requestUri":"/canary/{name}" + }, + "input":{"shape":"DeleteCanaryRequest"}, + "output":{"shape":"DeleteCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Permanently deletes the specified canary.

    When you delete a canary, resources used and created by the canary are not automatically deleted. After you delete a canary that you do not intend to use again, you should also delete the following:

    • The Lambda functions and layers used by this canary. These have the prefix cwsyn-MyCanaryName .

    • The CloudWatch alarms created for this canary. These alarms have a name of Synthetics-SharpDrop-Alarm-MyCanaryName .

    • Amazon S3 objects and buckets, such as the canary's artifact location.

    • IAM roles created for the canary. If they were created in the console, these roles have the name role/service-role/CloudWatchSyntheticsRole-MyCanaryName .

    • CloudWatch Logs log groups created for the canary. These logs groups have the name /aws/lambda/cwsyn-MyCanaryName .

    Before you delete a canary, you might want to use GetCanary to display the information about this canary. Make note of the information returned by this operation so that you can delete these resources after you delete the canary.

    " + }, + "DescribeCanaries":{ + "name":"DescribeCanaries", + "http":{ + "method":"POST", + "requestUri":"/canaries" + }, + "input":{"shape":"DescribeCanariesRequest"}, + "output":{"shape":"DescribeCanariesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    This operation returns a list of the canaries in your account, along with full details about each canary.

    This operation does not have resource-level authorization, so if a user is able to use DescribeCanaries, the user can see all of the canaries in the account. A deny policy can only be used to restrict access to all canaries. It cannot be used on specific resources.

    " + }, + "DescribeCanariesLastRun":{ + "name":"DescribeCanariesLastRun", + "http":{ + "method":"POST", + "requestUri":"/canaries/last-run" + }, + "input":{"shape":"DescribeCanariesLastRunRequest"}, + "output":{"shape":"DescribeCanariesLastRunResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Use this operation to see information from the most recent run of each canary that you have created.

    " + }, + "DescribeRuntimeVersions":{ + "name":"DescribeRuntimeVersions", + "http":{ + "method":"POST", + "requestUri":"/runtime-versions" + }, + "input":{"shape":"DescribeRuntimeVersionsRequest"}, + "output":{"shape":"DescribeRuntimeVersionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Returns a list of Synthetics canary runtime versions. For more information, see Canary Runtime Versions.

    " + }, + "GetCanary":{ + "name":"GetCanary", + "http":{ + "method":"GET", + "requestUri":"/canary/{name}" + }, + "input":{"shape":"GetCanaryRequest"}, + "output":{"shape":"GetCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Retrieves complete information about one canary. You must specify the name of the canary that you want. To get a list of canaries and their names, use DescribeCanaries.

    " + }, + "GetCanaryRuns":{ + "name":"GetCanaryRuns", + "http":{ + "method":"POST", + "requestUri":"/canary/{name}/runs" + }, + "input":{"shape":"GetCanaryRunsRequest"}, + "output":{"shape":"GetCanaryRunsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves a list of runs for a specified canary.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Displays the tags associated with a canary.

    " + }, + "StartCanary":{ + "name":"StartCanary", + "http":{ + "method":"POST", + "requestUri":"/canary/{name}/start" + }, + "input":{"shape":"StartCanaryRequest"}, + "output":{"shape":"StartCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Use this operation to run a canary that has already been created. The frequency of the canary runs is determined by the value of the canary's Schedule. To see a canary's schedule, use GetCanary.

    " + }, + "StopCanary":{ + "name":"StopCanary", + "http":{ + "method":"POST", + "requestUri":"/canary/{name}/stop" + }, + "input":{"shape":"StopCanaryRequest"}, + "output":{"shape":"StopCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Stops the canary to prevent all future runs. If the canary is currently running, Synthetics stops waiting for the current run of the specified canary to complete. The run that is in progress completes on its own, publishes metrics, and uploads artifacts, but it is not recorded in Synthetics as a completed run.

    You can use StartCanary to start it running again with the canary’s current schedule at any point in the future.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Assigns one or more tags (key-value pairs) to the specified canary.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

    Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

    You can use the TagResource action with a canary that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

    You can associate as many as 50 tags with a canary.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Removes one or more tags from the specified canary.

    " + }, + "UpdateCanary":{ + "name":"UpdateCanary", + "http":{ + "method":"PATCH", + "requestUri":"/canary/{name}" + }, + "input":{"shape":"UpdateCanaryRequest"}, + "output":{"shape":"UpdateCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Use this operation to change the settings of a canary that has already been created.

    You can't use this operation to update the tags of an existing canary. To change the tags of an existing canary, use TagResource.

    " + } + }, + "shapes":{ + "Blob":{ + "type":"blob", + "max":10000000, + "min":1 + }, + "Canaries":{ + "type":"list", + "member":{"shape":"Canary"} + }, + "CanariesLastRun":{ + "type":"list", + "member":{"shape":"CanaryLastRun"} + }, + "Canary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"UUID", + "documentation":"

    The unique ID of this canary.

    " + }, + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary.

    " + }, + "Code":{"shape":"CanaryCodeOutput"}, + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of the IAM role used to run the canary. This role must include lambda.amazonaws.com as a principal in the trust policy.

    " + }, + "Schedule":{ + "shape":"CanaryScheduleOutput", + "documentation":"

    A structure that contains information about how often the canary is to run, and when these runs are to stop.

    " + }, + "RunConfig":{"shape":"CanaryRunConfigOutput"}, + "SuccessRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

    The number of days to retain data about successful runs of this canary.

    " + }, + "FailureRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

    The number of days to retain data about failed runs of this canary.

    " + }, + "Status":{ + "shape":"CanaryStatus", + "documentation":"

    A structure that contains information about the canary's status.

    " + }, + "Timeline":{ + "shape":"CanaryTimeline", + "documentation":"

    A structure that contains information about when the canary was created, modified, and most recently run.

    " + }, + "ArtifactS3Location":{ + "shape":"String", + "documentation":"

    The location in Amazon S3 where Synthetics stores artifacts from the runs of this canary. Artifacts include the log file, screenshots, and HAR files.

    " + }, + "EngineArn":{ + "shape":"FunctionArn", + "documentation":"

    The ARN of the Lambda function that is used as your canary's engine. For more information about Lambda ARN format, see Resources and Conditions for Lambda Actions.

    " + }, + "RuntimeVersion":{ + "shape":"String", + "documentation":"

    Specifies the runtime version to use for the canary. For more information about runtime versions, see Canary Runtime Versions.

    " + }, + "VpcConfig":{"shape":"VpcConfigOutput"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The list of key-value pairs that are associated with the canary.

    " + } + }, + "documentation":"

    This structure contains all information about one canary in your account.

    " + }, + "CanaryArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,21}" + }, + "CanaryCodeInput":{ + "type":"structure", + "required":["Handler"], + "members":{ + "S3Bucket":{ + "shape":"String", + "documentation":"

    If your canary script is located in S3, specify the full bucket name here. The bucket must already exist. Specify the full bucket name, including s3:// as the start of the bucket name.

    " + }, + "S3Key":{ + "shape":"String", + "documentation":"

    The S3 key of your script. For more information, see Working with Amazon S3 Objects.

    " + }, + "S3Version":{ + "shape":"String", + "documentation":"

    The S3 version ID of your script.

    " + }, + "ZipFile":{ + "shape":"Blob", + "documentation":"

    If you input your canary script directly into the canary instead of referring to an S3 location, the value of this parameter is the .zip file that contains the script. It can be up to 5 MB.

    " + }, + "Handler":{ + "shape":"String", + "documentation":"

    The entry point to use for the source code when running the canary. This value must end with the string .handler.

    " + } + }, + "documentation":"

    Use this structure to input your script code for the canary. This structure contains the Lambda handler with the location where the canary should start running the script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included. If the script was passed into the canary directly, the script code is contained in the value of Zipfile.

    " + }, + "CanaryCodeOutput":{ + "type":"structure", + "members":{ + "SourceLocationArn":{ + "shape":"String", + "documentation":"

    The ARN of the Lambda layer where Synthetics stores the canary script code.

    " + }, + "Handler":{ + "shape":"String", + "documentation":"

    The entry point to use for the source code when running the canary.

    " + } + }, + "documentation":"

    This structure contains information about the canary's Lambda handler and where its code is stored by CloudWatch Synthetics.

    " + }, + "CanaryLastRun":{ + "type":"structure", + "members":{ + "CanaryName":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary.

    " + }, + "LastRun":{ + "shape":"CanaryRun", + "documentation":"

    The results from this canary's most recent run.

    " + } + }, + "documentation":"

    This structure contains information about the most recent run of a single canary.

    " + }, + "CanaryName":{ + "type":"string", + "max":21, + "min":1, + "pattern":"^[0-9a-z_\\-]+$" + }, + "CanaryRun":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"UUID", + "documentation":"

    A unique ID that identifies this canary run.

    " + }, + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary.

    " + }, + "Status":{ + "shape":"CanaryRunStatus", + "documentation":"

    The status of this run.

    " + }, + "Timeline":{ + "shape":"CanaryRunTimeline", + "documentation":"

    A structure that contains the start and end times of this run.

    " + }, + "ArtifactS3Location":{ + "shape":"String", + "documentation":"

    The location where the canary stored artifacts from the run. Artifacts include the log file, screenshots, and HAR files.

    " + } + }, + "documentation":"

    This structure contains the details about one run of one canary.

    " + }, + "CanaryRunConfigInput":{ + "type":"structure", + "members":{ + "TimeoutInSeconds":{ + "shape":"MaxFifteenMinutesInSeconds", + "documentation":"

    How long the canary is allowed to run before it must stop. You can't set this time to be longer than the frequency of the runs of this canary.

    If you omit this field, the frequency of the canary is used as this value, up to a maximum of 14 minutes.

    " + }, + "MemoryInMB":{ + "shape":"MaxSize3008", + "documentation":"

    The maximum amount of memory available to the canary while it is running, in MB. This value must be a multiple of 64.

    " + }, + "ActiveTracing":{ + "shape":"NullableBoolean", + "documentation":"

    Specifies whether this canary is to use active AWS X-Ray tracing when it runs. Active tracing enables this canary run to be displayed in the ServiceLens and X-Ray service maps even if the canary does not hit an endpoint that has X-ray tracing enabled. Using X-Ray tracing incurs charges. For more information, see Canaries and X-Ray tracing.

    You can enable active tracing only for canaries that use version syn-nodejs-2.0 or later for their canary runtime.

    " + }, + "EnvironmentVariables":{ + "shape":"EnvironmentVariablesMap", + "documentation":"

    Specifies the keys and values to use for any environment variables used in the canary script. Use the following format:

    { \"key1\" : \"value1\", \"key2\" : \"value2\", ...}

    Keys must start with a letter and be at least two characters. The total size of your environment variables cannot exceed 4 KB. You can't specify any Lambda reserved environment variables as the keys for your environment variables. For more information about reserved keys, see Runtime environment variables.

    " + } + }, + "documentation":"

    A structure that contains input information for a canary run.

    " + }, + "CanaryRunConfigOutput":{ + "type":"structure", + "members":{ + "TimeoutInSeconds":{ + "shape":"MaxFifteenMinutesInSeconds", + "documentation":"

    How long the canary is allowed to run before it must stop.

    " + }, + "MemoryInMB":{ + "shape":"MaxSize3008", + "documentation":"

    The maximum amount of memory available to the canary while it is running, in MB. This value must be a multiple of 64.

    " + }, + "ActiveTracing":{ + "shape":"NullableBoolean", + "documentation":"

    Displays whether this canary run used active AWS X-Ray tracing.

    " + } + }, + "documentation":"

    A structure that contains information about a canary run.

    " + }, + "CanaryRunState":{ + "type":"string", + "enum":[ + "RUNNING", + "PASSED", + "FAILED" + ] + }, + "CanaryRunStateReasonCode":{ + "type":"string", + "enum":[ + "CANARY_FAILURE", + "EXECUTION_FAILURE" + ] + }, + "CanaryRunStatus":{ + "type":"structure", + "members":{ + "State":{ + "shape":"CanaryRunState", + "documentation":"

    The current state of the run.

    " + }, + "StateReason":{ + "shape":"String", + "documentation":"

    If run of the canary failed, this field contains the reason for the error.

    " + }, + "StateReasonCode":{ + "shape":"CanaryRunStateReasonCode", + "documentation":"

    If this value is CANARY_FAILURE, an exception occurred in the canary code. If this value is EXECUTION_FAILURE, an exception occurred in CloudWatch Synthetics.

    " + } + }, + "documentation":"

    This structure contains the status information about a canary run.

    " + }, + "CanaryRunTimeline":{ + "type":"structure", + "members":{ + "Started":{ + "shape":"Timestamp", + "documentation":"

    The start time of the run.

    " + }, + "Completed":{ + "shape":"Timestamp", + "documentation":"

    The end time of the run.

    " + } + }, + "documentation":"

    This structure contains the start and end times of a single canary run.

    " + }, + "CanaryRuns":{ + "type":"list", + "member":{"shape":"CanaryRun"} + }, + "CanaryScheduleInput":{ + "type":"structure", + "required":["Expression"], + "members":{ + "Expression":{ + "shape":"String", + "documentation":"

    A rate expression that defines how often the canary is to run. The syntax is rate(number unit). unit can be minute, minutes, or hour.

    For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour. You can specify a frequency between rate(1 minute) and rate(1 hour).

    Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

    " + }, + "DurationInSeconds":{ + "shape":"MaxOneYearInSeconds", + "documentation":"

    How long, in seconds, for the canary to continue making regular runs according to the schedule in the Expression value. If you specify 0, the canary continues making runs until you stop it. If you omit this field, the default of 0 is used.

    " + } + }, + "documentation":"

    This structure specifies how often a canary is to make runs and the date and time when it should stop making runs.

    " + }, + "CanaryScheduleOutput":{ + "type":"structure", + "members":{ + "Expression":{ + "shape":"String", + "documentation":"

    A rate expression that defines how often the canary is to run. The syntax is rate(number unit). unit can be minute, minutes, or hour.

    For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour.

    Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

    " + }, + "DurationInSeconds":{ + "shape":"MaxOneYearInSeconds", + "documentation":"

    How long, in seconds, for the canary to continue making regular runs after it was created. The runs are performed according to the schedule in the Expression value.

    " + } + }, + "documentation":"

    How long, in seconds, for the canary to continue making regular runs according to the schedule in the Expression value.

    " + }, + "CanaryState":{ + "type":"string", + "enum":[ + "CREATING", + "READY", + "STARTING", + "RUNNING", + "UPDATING", + "STOPPING", + "STOPPED", + "ERROR", + "DELETING" + ] + }, + "CanaryStateReasonCode":{ + "type":"string", + "enum":["INVALID_PERMISSIONS"] + }, + "CanaryStatus":{ + "type":"structure", + "members":{ + "State":{ + "shape":"CanaryState", + "documentation":"

    The current state of the canary.

    " + }, + "StateReason":{ + "shape":"String", + "documentation":"

    If the canary has insufficient permissions to run, this field provides more details.

    " + }, + "StateReasonCode":{ + "shape":"CanaryStateReasonCode", + "documentation":"

    If the canary cannot run or has failed, this field displays the reason.

    " + } + }, + "documentation":"

    A structure that contains the current state of the canary.

    " + }, + "CanaryTimeline":{ + "type":"structure", + "members":{ + "Created":{ + "shape":"Timestamp", + "documentation":"

    The date and time the canary was created.

    " + }, + "LastModified":{ + "shape":"Timestamp", + "documentation":"

    The date and time the canary was most recently modified.

    " + }, + "LastStarted":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the canary's most recent run started.

    " + }, + "LastStopped":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the canary's most recent run ended.

    " + } + }, + "documentation":"

    This structure contains information about when the canary was created and modified.

    " + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    A conflicting operation is already in progress.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateCanaryRequest":{ + "type":"structure", + "required":[ + "Name", + "Code", + "ArtifactS3Location", + "ExecutionRoleArn", + "Schedule", + "RuntimeVersion" + ], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name for this canary. Be sure to give it a descriptive name that distinguishes it from other canaries in your account.

    Do not include secrets or proprietary information in your canary names. The canary name makes up part of the canary ARN, and the ARN is included in outbound calls over the internet. For more information, see Security Considerations for Synthetics Canaries.

    " + }, + "Code":{ + "shape":"CanaryCodeInput", + "documentation":"

    A structure that includes the entry point from which the canary should start running your script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included.

    " + }, + "ArtifactS3Location":{ + "shape":"String", + "documentation":"

    The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files.

    " + }, + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of the IAM role to be used to run the canary. This role must already exist, and must include lambda.amazonaws.com as a principal in the trust policy. The role must also have the following permissions:

    • s3:PutObject

    • s3:GetBucketLocation

    • s3:ListAllMyBuckets

    • cloudwatch:PutMetricData

    • logs:CreateLogGroup

    • logs:CreateLogStream

    • logs:PutLogEvents

    " + }, + "Schedule":{ + "shape":"CanaryScheduleInput", + "documentation":"

    A structure that contains information about how often the canary is to run and when these test runs are to stop.

    " + }, + "RunConfig":{ + "shape":"CanaryRunConfigInput", + "documentation":"

    A structure that contains the configuration for individual canary runs, such as timeout value.

    " + }, + "SuccessRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

    The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.

    " + }, + "FailureRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

    The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.

    " + }, + "RuntimeVersion":{ + "shape":"String", + "documentation":"

    Specifies the runtime version to use for the canary. For a list of valid runtime versions and more information about runtime versions, see Canary Runtime Versions.

    " + }, + "VpcConfig":{ + "shape":"VpcConfigInput", + "documentation":"

    If this canary is to test an endpoint in a VPC, this structure contains information about the subnet and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    A list of key-value pairs to associate with the canary. You can associate as many as 50 tags with a canary.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only the resources that have certain tag values.

    " + } + } + }, + "CreateCanaryResponse":{ + "type":"structure", + "members":{ + "Canary":{ + "shape":"Canary", + "documentation":"

    The full details about the canary you have created.

    " + } + } + }, + "DeleteCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary that you want to delete. To find the names of your canaries, use DescribeCanaries.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteCanaryResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeCanariesLastRunRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

    A token that indicates that there is more data available. You can use this token in a subsequent DescribeCanaries operation to retrieve the next set of results.

    " + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

    Specify this parameter to limit how many runs are returned each time you use the DescribeLastRun operation. If you omit this parameter, the default of 100 is used.

    " + } + } + }, + "DescribeCanariesLastRunResponse":{ + "type":"structure", + "members":{ + "CanariesLastRun":{ + "shape":"CanariesLastRun", + "documentation":"

    An array that contains the information from the most recent run of each canary.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A token that indicates that there is more data available. You can use this token in a subsequent DescribeCanariesLastRun operation to retrieve the next set of results.

    " + } + } + }, + "DescribeCanariesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

    A token that indicates that there is more data available. You can use this token in a subsequent operation to retrieve the next set of results.

    " + }, + "MaxResults":{ + "shape":"MaxCanaryResults", + "documentation":"

    Specify this parameter to limit how many canaries are returned each time you use the DescribeCanaries operation. If you omit this parameter, the default of 100 is used.

    " + } + } + }, + "DescribeCanariesResponse":{ + "type":"structure", + "members":{ + "Canaries":{ + "shape":"Canaries", + "documentation":"

    Returns an array. Each item in the array contains the full information about one canary.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A token that indicates that there is more data available. You can use this token in a subsequent DescribeCanaries operation to retrieve the next set of results.

    " + } + } + }, + "DescribeRuntimeVersionsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

    A token that indicates that there is more data available. You can use this token in a subsequent DescribeRuntimeVersions operation to retrieve the next set of results.

    " + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

    Specify this parameter to limit how many runs are returned each time you use the DescribeRuntimeVersions operation. If you omit this parameter, the default of 100 is used.

    " + } + } + }, + "DescribeRuntimeVersionsResponse":{ + "type":"structure", + "members":{ + "RuntimeVersions":{ + "shape":"RuntimeVersionList", + "documentation":"

    An array of objects that display the details about each Synthetics canary runtime version.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A token that indicates that there is more data available. You can use this token in a subsequent DescribeRuntimeVersions operation to retrieve the next set of results.

    " + } + } + }, + "EnvironmentVariableName":{ + "type":"string", + "pattern":"[a-zA-Z]([a-zA-Z0-9_])+" + }, + "EnvironmentVariableValue":{"type":"string"}, + "EnvironmentVariablesMap":{ + "type":"map", + "key":{"shape":"EnvironmentVariableName"}, + "value":{"shape":"EnvironmentVariableValue"} + }, + "ErrorMessage":{"type":"string"}, + "FunctionArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + }, + "GetCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary that you want details for.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "GetCanaryResponse":{ + "type":"structure", + "members":{ + "Canary":{ + "shape":"Canary", + "documentation":"

    A strucure that contains the full information about the canary.

    " + } + } + }, + "GetCanaryRunsRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary that you want to see runs for.

    ", + "location":"uri", + "locationName":"name" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A token that indicates that there is more data available. You can use this token in a subsequent GetCanaryRuns operation to retrieve the next set of results.

    " + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

    Specify this parameter to limit how many runs are returned each time you use the GetCanaryRuns operation. If you omit this parameter, the default of 100 is used.

    " + } + } + }, + "GetCanaryRunsResponse":{ + "type":"structure", + "members":{ + "CanaryRuns":{ + "shape":"CanaryRuns", + "documentation":"

    An array of structures. Each structure contains the details of one of the retrieved canary runs.

    " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

    A token that indicates that there is more data available. You can use this token in a subsequent GetCanaryRuns operation to retrieve the next set of results.

    " + } + } + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An unknown internal error occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"CanaryArn", + "documentation":"

    The ARN of the canary that you want to view tags for.

    The ARN format of a canary is arn:aws:synthetics:Region:account-id:canary:canary-name .

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

    The list of tag keys and values associated with the canary that you specified.

    " + } + } + }, + "MaxCanaryResults":{ + "type":"integer", + "max":20, + "min":1 + }, + "MaxFifteenMinutesInSeconds":{ + "type":"integer", + "max":840, + "min":3 + }, + "MaxOneYearInSeconds":{ + "type":"long", + "max":31622400, + "min":0 + }, + "MaxSize100":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxSize1024":{ + "type":"integer", + "max":1024, + "min":1 + }, + "MaxSize3008":{ + "type":"integer", + "max":3008, + "min":960 + }, + "NullableBoolean":{"type":"boolean"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    One of the specified resources was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" + }, + "RuntimeVersion":{ + "type":"structure", + "members":{ + "VersionName":{ + "shape":"String", + "documentation":"

    The name of the runtime version. For a list of valid runtime versions, see Canary Runtime Versions.

    " + }, + "Description":{ + "shape":"String", + "documentation":"

    A description of the runtime version, created by Amazon.

    " + }, + "ReleaseDate":{ + "shape":"Timestamp", + "documentation":"

    The date that the runtime version was released.

    " + }, + "DeprecationDate":{ + "shape":"Timestamp", + "documentation":"

    If this runtime version is deprecated, this value is the date of deprecation.

    " + } + }, + "documentation":"

    This structure contains information about one canary runtime version. For more information about runtime versions, see Canary Runtime Versions.

    " + }, + "RuntimeVersionList":{ + "type":"list", + "member":{"shape":"RuntimeVersion"} + }, + "SecurityGroupId":{"type":"string"}, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":5, + "min":0 + }, + "StartCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary that you want to run. To find canary names, use DescribeCanaries.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "StartCanaryResponse":{ + "type":"structure", + "members":{ + } + }, + "StopCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary that you want to stop. To find the names of your canaries, use DescribeCanaries.

    ", + "location":"uri", + "locationName":"name" + } + } + }, + "StopCanaryResponse":{ + "type":"structure", + "members":{ + } + }, + "String":{ + "type":"string", + "max":1024, + "min":1 + }, + "SubnetId":{"type":"string"}, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":16, + "min":0 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"CanaryArn", + "documentation":"

    The ARN of the canary that you're adding tags to.

    The ARN format of a canary is arn:aws:synthetics:Region:account-id:canary:canary-name .

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The list of key-value pairs to associate with the canary.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Timestamp":{"type":"timestamp"}, + "Token":{ + "type":"string", + "max":252, + "min":4 + }, + "UUID":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"CanaryArn", + "documentation":"

    The ARN of the canary that you're removing tags from.

    The ARN format of a canary is arn:aws:synthetics:Region:account-id:canary:canary-name .

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The list of tag keys to remove from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

    The name of the canary that you want to update. To find the names of your canaries, use DescribeCanaries.

    You cannot change the name of a canary that has already been created.

    ", + "location":"uri", + "locationName":"name" + }, + "Code":{ + "shape":"CanaryCodeInput", + "documentation":"

    A structure that includes the entry point from which the canary should start running your script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included.

    " + }, + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of the IAM role to be used to run the canary. This role must already exist, and must include lambda.amazonaws.com as a principal in the trust policy. The role must also have the following permissions:

    • s3:PutObject

    • s3:GetBucketLocation

    • s3:ListAllMyBuckets

    • cloudwatch:PutMetricData

    • logs:CreateLogGroup

    • logs:CreateLogStream

    • logs:CreateLogStream

    " + }, + "RuntimeVersion":{ + "shape":"String", + "documentation":"

    Specifies the runtime version to use for the canary. For a list of valid runtime versions and for more information about runtime versions, see Canary Runtime Versions.

    " + }, + "Schedule":{ + "shape":"CanaryScheduleInput", + "documentation":"

    A structure that contains information about how often the canary is to run, and when these runs are to stop.

    " + }, + "RunConfig":{ + "shape":"CanaryRunConfigInput", + "documentation":"

    A structure that contains the timeout value that is used for each individual run of the canary.

    " + }, + "SuccessRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

    The number of days to retain data about successful runs of this canary.

    " + }, + "FailureRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

    The number of days to retain data about failed runs of this canary.

    " + }, + "VpcConfig":{ + "shape":"VpcConfigInput", + "documentation":"

    If this canary is to test an endpoint in a VPC, this structure contains information about the subnet and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

    " + } + } + }, + "UpdateCanaryResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    A parameter could not be validated.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "VpcConfigInput":{ + "type":"structure", + "members":{ + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

    The IDs of the subnets where this canary is to run.

    " + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

    The IDs of the security groups for this canary.

    " + } + }, + "documentation":"

    If this canary is to test an endpoint in a VPC, this structure contains information about the subnets and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

    " + }, + "VpcConfigOutput":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"VpcId", + "documentation":"

    The IDs of the VPC where this canary is to run.

    " + }, + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

    The IDs of the subnets where this canary is to run.

    " + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

    The IDs of the security groups for this canary.

    " + } + }, + "documentation":"

    If this canary is to test an endpoint in a VPC, this structure contains information about the subnets and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

    " + }, + "VpcId":{"type":"string"} + }, + "documentation":"Amazon CloudWatch Synthetics

    You can use Amazon CloudWatch Synthetics to continually monitor your services. You can create and manage canaries, which are modular, lightweight scripts that monitor your endpoints and APIs from the outside-in. You can set up your canaries to run 24 hours a day, once per minute. The canaries help you check the availability and latency of your web services and troubleshoot anomalies by investigating load time data, screenshots of the UI, logs, and metrics. The canaries seamlessly integrate with CloudWatch ServiceLens to help you trace the causes of impacted nodes in your applications. For more information, see Using ServiceLens to Monitor the Health of Your Applications in the Amazon CloudWatch User Guide.

    Before you create and manage canaries, be aware of the security considerations. For more information, see Security Considerations for Synthetics Canaries.

    " +} diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 8cb763c9e7e1..b6415e147ec9 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + timestreamquery + AWS Java SDK :: Services :: Timestream Query + The AWS Java SDK for Timestream Query module holds the client classes that are used for + communicating with Timestream Query. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.timestreamquery + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/timestreamquery/src/main/resources/codegen-resources/customization.config b/services/timestreamquery/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..8175efc28858 --- /dev/null +++ b/services/timestreamquery/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "allowEndpointOverrideForEndpointDiscoveryRequiredOperations": true +} \ No newline at end of file diff --git a/services/timestreamquery/src/main/resources/codegen-resources/paginators-1.json b/services/timestreamquery/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..dcc93dac8de3 --- /dev/null +++ b/services/timestreamquery/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,15 @@ +{ + "pagination": { + "Query": { + "input_token": "NextToken", + "limit_key": "MaxRows", + "non_aggregate_keys": [ + "ColumnInfo", + "QueryId", + "QueryStatus" + ], + "output_token": "NextToken", + "result_key": "Rows" + } + } +} \ No newline at end of file diff --git a/services/timestreamquery/src/main/resources/codegen-resources/service-2.json b/services/timestreamquery/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..fd388619dca7 --- /dev/null +++ b/services/timestreamquery/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,418 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-11-01", + "endpointPrefix":"query.timestream", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"Timestream Query", + "serviceFullName":"Amazon Timestream Query", + "serviceId":"Timestream Query", + "signatureVersion":"v4", + "signingName":"timestream", + "targetPrefix":"Timestream_20181101", + "uid":"timestream-query-2018-11-01" + }, + "operations":{ + "CancelQuery":{ + "name":"CancelQuery", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelQueryRequest"}, + "output":{"shape":"CancelQueryResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Cancels a query that has been issued. Cancellation is guaranteed only if the query has not completed execution before the cancellation request was issued. Because cancellation is an idempotent operation, subsequent cancellation requests will return a CancellationMessage, indicating that the query has already been canceled.

    ", + "endpointdiscovery":{"required":true}, + "idempotent":true + }, + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query.

    Because Timestream’s SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless:

    • Your application uses a programming language that does not yet have SDK support

    • You require better control over the client-side implementation

    For detailed information on how to use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs.

    ", + "endpointoperation":true + }, + "Query":{ + "name":"Query", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryRequest"}, + "output":{"shape":"QueryResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"QueryExecutionException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Query is a synchronous operation that enables you to execute a query. Query will timeout after 60 seconds. You must update the default timeout in the SDK to support a timeout of 60 seconds. The result set will be truncated to 1MB. Service quotas apply. For more information, see Quotas in the Timestream Developer Guide.

    ", + "endpointdiscovery":{"required":true}, + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ServiceErrorMessage"} + }, + "documentation":"

    You are not authorized to perform this action.

    ", + "exception":true, + "synthetic":true + }, + "CancelQueryRequest":{ + "type":"structure", + "required":["QueryId"], + "members":{ + "QueryId":{ + "shape":"QueryId", + "documentation":"

    The id of the query that needs to be cancelled. QueryID is returned as part of QueryResult.

    " + } + } + }, + "CancelQueryResponse":{ + "type":"structure", + "members":{ + "CancellationMessage":{ + "shape":"String", + "documentation":"

    A CancellationMessage is returned when a CancelQuery request for the query specified by QueryId has already been issued.

    " + } + } + }, + "ClientRequestToken":{ + "type":"string", + "max":128, + "min":32, + "sensitive":true + }, + "ColumnInfo":{ + "type":"structure", + "required":["Type"], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

    The name of the result set column. The name of the result set is available for columns of all data types except for arrays.

    " + }, + "Type":{ + "shape":"Type", + "documentation":"

    The data type of the result set column. The data type can be a scalar or complex. Scalar data types are integers, strings, doubles, booleans, and others. Complex data types are types such as arrays, rows, and others.

    " + } + }, + "documentation":"

    Contains the meta data for query results such as the column names, data types, and other attributes.

    " + }, + "ColumnInfoList":{ + "type":"list", + "member":{"shape":"ColumnInfo"} + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Unable to poll results for a cancelled query.

    ", + "exception":true + }, + "Datum":{ + "type":"structure", + "members":{ + "ScalarValue":{ + "shape":"ScalarValue", + "documentation":"

    Indicates if the data point is a scalar value such as integer, string, double, or boolean.

    " + }, + "TimeSeriesValue":{ + "shape":"TimeSeriesDataPointList", + "documentation":"

    Indicates if the data point is of timeseries data type.

    " + }, + "ArrayValue":{ + "shape":"DatumList", + "documentation":"

    Indicates if the data point is an array.

    " + }, + "RowValue":{ + "shape":"Row", + "documentation":"

    Indicates if the data point is a row.

    " + }, + "NullValue":{ + "shape":"NullableBoolean", + "documentation":"

    Indicates if the data point is null.

    " + } + }, + "documentation":"

    Datum represents a single data point in a query result.

    " + }, + "DatumList":{ + "type":"list", + "member":{"shape":"Datum"} + }, + "DescribeEndpointsRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "required":["Endpoints"], + "members":{ + "Endpoints":{ + "shape":"Endpoints", + "documentation":"

    An Endpoints object is returned when a DescribeEndpoints request is made.

    " + } + } + }, + "Double":{"type":"double"}, + "Endpoint":{ + "type":"structure", + "required":[ + "Address", + "CachePeriodInMinutes" + ], + "members":{ + "Address":{ + "shape":"String", + "documentation":"

    An endpoint address.

    " + }, + "CachePeriodInMinutes":{ + "shape":"Long", + "documentation":"

    The TTL for the endpoint, in minutes.

    " + } + }, + "documentation":"

    Represents an available endpoint against which to make API calls agaisnt, as well as the TTL for that endpoint.

    " + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "ErrorMessage":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Timestream was unable to fully process this request because of an internal server error.

    ", + "exception":true + }, + "InvalidEndpointException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The requested endpoint was invalid.

    ", + "exception":true + }, + "Long":{"type":"long"}, + "MaxQueryResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "NullableBoolean":{ + "type":"boolean", + "box":true + }, + "QueryExecutionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Timestream was unable to run the query successfully.

    ", + "exception":true + }, + "QueryId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9]+" + }, + "QueryRequest":{ + "type":"structure", + "required":["QueryString"], + "members":{ + "QueryString":{ + "shape":"QueryString", + "documentation":"

    The query to be executed by Timestream.

    " + }, + "ClientToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Unique, case-sensitive string of up to 64 ASCII characters that you specify when you make a Query request. Providing a ClientToken makes the call to Query idempotent, meaning that multiple identical calls have the same effect as one single call.

    Your query request will fail in the following cases:

    • If you submit a request with the same client token outside the 5-minute idepotency window.

    • If you submit a request with the same client token but a change in other parameters within the 5-minute idempotency window.

    After 4 hours, any request with the same client token is treated as a new request.

    ", + "idempotencyToken":true + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A pagination token passed to get a set of results.

    " + }, + "MaxRows":{ + "shape":"MaxQueryResults", + "documentation":"

    The total number of rows to return in the output. If the total number of rows available is more than the value specified, a NextToken is provided in the command's output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command.

    " + } + } + }, + "QueryResponse":{ + "type":"structure", + "required":[ + "QueryId", + "Rows", + "ColumnInfo" + ], + "members":{ + "QueryId":{ + "shape":"QueryId", + "documentation":"

    A unique ID for the given query.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A pagination token that can be used again on a Query call to get the next set of results.

    " + }, + "Rows":{ + "shape":"RowList", + "documentation":"

    The result set rows returned by the query.

    " + }, + "ColumnInfo":{ + "shape":"ColumnInfoList", + "documentation":"

    The column data types of the returned result set.

    " + }, + "QueryStatus":{ + "shape":"QueryStatus", + "documentation":"

    Information about the status of the query, including progress and bytes scannned.

    " + } + } + }, + "QueryStatus":{ + "type":"structure", + "members":{ + "ProgressPercentage":{ + "shape":"Double", + "documentation":"

    The progress of the query, expressed as a percentage.

    " + }, + "CumulativeBytesScanned":{ + "shape":"Long", + "documentation":"

    The amount of data scanned by the query in bytes. This is a cumulative sum and represents the total amount of bytes scanned since the query was started.

    " + }, + "CumulativeBytesMetered":{ + "shape":"Long", + "documentation":"

    The amount of data scanned by the query in bytes that you will be charged for. This is a cumulative sum and represents the total amount of data that you will be charged for since the query was started. The charge is applied only once and is either applied when the query completes execution or when the query is cancelled.

    " + } + }, + "documentation":"

    Information about the status of the query, including progress and bytes scannned.

    " + }, + "QueryString":{ + "type":"string", + "sensitive":true + }, + "Row":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{ + "shape":"DatumList", + "documentation":"

    List of data points in a single row of the result set.

    " + } + }, + "documentation":"

    Represents a single row in the query results.

    " + }, + "RowList":{ + "type":"list", + "member":{"shape":"Row"} + }, + "ScalarType":{ + "type":"string", + "enum":[ + "VARCHAR", + "BOOLEAN", + "BIGINT", + "DOUBLE", + "TIMESTAMP", + "DATE", + "TIME", + "INTERVAL_DAY_TO_SECOND", + "INTERVAL_YEAR_TO_MONTH", + "UNKNOWN", + "INTEGER" + ] + }, + "ScalarValue":{"type":"string"}, + "ServiceErrorMessage":{"type":"string"}, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "exception":true + }, + "TimeSeriesDataPoint":{ + "type":"structure", + "required":[ + "Time", + "Value" + ], + "members":{ + "Time":{ + "shape":"Timestamp", + "documentation":"

    The timestamp when the measure value was collected.

    " + }, + "Value":{ + "shape":"Datum", + "documentation":"

    The measure value for the data point.

    " + } + }, + "documentation":"

    The timeseries datatype represents the values of a measure over time. A time series is an array of rows of timestamps and measure values, with rows sorted in ascending order of time. A TimeSeriesDataPoint is a single data point in the timeseries. It represents a tuple of (time, measure value) in a timeseries.

    " + }, + "TimeSeriesDataPointList":{ + "type":"list", + "member":{"shape":"TimeSeriesDataPoint"} + }, + "Timestamp":{"type":"string"}, + "Type":{ + "type":"structure", + "members":{ + "ScalarType":{ + "shape":"ScalarType", + "documentation":"

    Indicates if the column is of type string, integer, boolean, double, timestamp, date, time.

    " + }, + "ArrayColumnInfo":{ + "shape":"ColumnInfo", + "documentation":"

    Indicates if the column is an array.

    " + }, + "TimeSeriesMeasureValueColumnInfo":{ + "shape":"ColumnInfo", + "documentation":"

    Indicates if the column is a timeseries data type.

    " + }, + "RowColumnInfo":{ + "shape":"ColumnInfoList", + "documentation":"

    Indicates if the column is a row.

    " + } + }, + "documentation":"

    Contains the data type of a column in a query result set. The data type can be scalar or complex. The supported scalar data types are integers, boolean, string, double, timestamp, date, time, and intervals. The supported complex data types are arrays, rows, and timeseries.

    " + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Invalid or malformed request.

    ", + "exception":true + } + }, + "documentation":"

    " +} diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml new file mode 100644 index 000000000000..9e02714bb3a2 --- /dev/null +++ b/services/timestreamwrite/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + timestreamwrite + AWS Java SDK :: Services :: Timestream Write + The AWS Java SDK for Timestream Write module holds the client classes that are used for + communicating with Timestream Write. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.timestreamwrite + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/timestreamwrite/src/main/resources/codegen-resources/customization.config b/services/timestreamwrite/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..8175efc28858 --- /dev/null +++ b/services/timestreamwrite/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "allowEndpointOverrideForEndpointDiscoveryRequiredOperations": true +} \ No newline at end of file diff --git a/services/timestreamwrite/src/main/resources/codegen-resources/paginators-1.json b/services/timestreamwrite/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..45728c71e593 --- /dev/null +++ b/services/timestreamwrite/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,14 @@ +{ + "pagination": { + "ListDatabases": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTables": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json b/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..9ced833c8668 --- /dev/null +++ b/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1041 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-11-01", + "endpointPrefix":"ingest.timestream", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"Timestream Write", + "serviceFullName":"Amazon Timestream Write", + "serviceId":"Timestream Write", + "signatureVersion":"v4", + "signingName":"timestream", + "targetPrefix":"Timestream_20181101", + "uid":"timestream-write-2018-11-01" + }, + "operations":{ + "CreateDatabase":{ + "name":"CreateDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatabaseRequest"}, + "output":{"shape":"CreateDatabaseResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidEndpointException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to AWS managed KMS keys for more info. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

    ", + "endpointdiscovery":{"required":true} + }, + "CreateTable":{ + "name":"CreateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTableRequest"}, + "output":{"shape":"CreateTableResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    The CreateTable operation adds a new table to an existing database in your account. In an AWS account, table names must be at least unique within each Region if they are in the same database. You may have identical table names in the same Region if the tables are in seperate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

    ", + "endpointdiscovery":{"required":true} + }, + "DeleteDatabase":{ + "name":"DeleteDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatabaseRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.

    All tables in the database must be deleted first, or a ValidationException error will be thrown.

    Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

    ", + "endpointdiscovery":{"required":true} + }, + "DeleteTable":{ + "name":"DeleteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTableRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.

    Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

    ", + "endpointdiscovery":{"required":true} + }, + "DescribeDatabase":{ + "name":"DescribeDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatabaseRequest"}, + "output":{"shape":"DescribeDatabaseResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Returns information about the database, including the database name, time that the database was created, and the total number of tables found within the database. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

    ", + "endpointdiscovery":{"required":true} + }, + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query.

    Because Timestream’s SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless:

    • Your application uses a programming language that does not yet have SDK support

    • You require better control over the client-side implementation

    For detailed information on how to use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs.

    ", + "endpointoperation":true + }, + "DescribeTable":{ + "name":"DescribeTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTableRequest"}, + "output":{"shape":"DescribeTableResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Returns information about the table, including the table name, database name, retention duration of the memory store and the magnetic store. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

    ", + "endpointdiscovery":{"required":true} + }, + "ListDatabases":{ + "name":"ListDatabases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatabasesRequest"}, + "output":{"shape":"ListDatabasesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Returns a list of your Timestream databases. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

    ", + "endpointdiscovery":{"required":true} + }, + "ListTables":{ + "name":"ListTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTablesRequest"}, + "output":{"shape":"ListTablesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    A list of tables, along with the name, status and retention properties of each table.

    ", + "endpointdiscovery":{"required":true} + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    List all tags on a Timestream resource.

    ", + "endpointdiscovery":{"required":true} + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Associate a set of tags with a Timestream resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking.

    ", + "endpointdiscovery":{"required":true} + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Removes the association of tags from a Timestream resource.

    ", + "endpointdiscovery":{"required":true} + }, + "UpdateDatabase":{ + "name":"UpdateDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDatabaseRequest"}, + "output":{"shape":"UpdateDatabaseResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Modifies the KMS key for an existing database. While updating the database, you must specify the database name and the identifier of the new KMS key to be used (KmsKeyId). If there are any concurrent UpdateDatabase requests, first writer wins.

    ", + "endpointdiscovery":{"required":true} + }, + "UpdateTable":{ + "name":"UpdateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTableRequest"}, + "output":{"shape":"UpdateTableResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    Modifies the retention duration of the memory store and magnetic store for your Timestream table. Note that the change in retention duration takes effect immediately. For example, if the retention period of the memory store was initially set to 2 hours and then changed to 24 hours, the memory store will be capable of holding 24 hours of data, but will be populated with 24 hours of data 22 hours after this change was made. Timestream does not retrieve data from the magnetic store to populate the memory store.

    Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

    ", + "endpointdiscovery":{"required":true} + }, + "WriteRecords":{ + "name":"WriteRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"WriteRecordsRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RejectedRecordsException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

    The WriteRecords operation enables you to write your time series data into Timestream. You can specify a single data point or a batch of data points to be inserted into the system. Timestream offers you with a flexible schema that auto detects the column names and data types for your Timestream tables based on the dimension names and data types of the data points you specify when invoking writes into the database. Timestream support eventual consistency read semantics. This means that when you query data immediately after writing a batch of data into Timestream, the query results might not reflect the results of a recently completed write operation. The results may also include some stale data. If you repeat the query request after a short time, the results should return the latest data. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

    ", + "endpointdiscovery":{"required":true} + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You are not authorized to perform this action.

    ", + "exception":true + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, + "ConflictException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Timestream was unable to process this request because it contains resource that already exists.

    ", + "exception":true + }, + "CreateDatabaseRequest":{ + "type":"structure", + "required":["DatabaseName"], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database.

    " + }, + "KmsKeyId":{ + "shape":"StringValue2048", + "documentation":"

    The KMS key for the database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to AWS managed KMS keys for more info.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of key-value pairs to label the table.

    " + } + } + }, + "CreateDatabaseResponse":{ + "type":"structure", + "members":{ + "Database":{ + "shape":"Database", + "documentation":"

    The newly created Timestream database.

    " + } + } + }, + "CreateTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database.

    " + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream table.

    " + }, + "RetentionProperties":{ + "shape":"RetentionProperties", + "documentation":"

    The duration for which your time series data must be stored in the memory store and the magnetic store.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A list of key-value pairs to label the table.

    " + } + } + }, + "CreateTableResponse":{ + "type":"structure", + "members":{ + "Table":{ + "shape":"Table", + "documentation":"

    The newly created Timestream table.

    " + } + } + }, + "Database":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name that uniquely identifies this database.

    " + }, + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database.

    " + }, + "TableCount":{ + "shape":"Long", + "documentation":"

    The total number of tables found within a Timestream database.

    " + }, + "KmsKeyId":{ + "shape":"StringValue2048", + "documentation":"

    The identifier of the KMS key used to encrypt the data stored in the database.

    " + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

    The time when the database was created, calculated from the Unix epoch time.

    " + }, + "LastUpdatedTime":{ + "shape":"Date", + "documentation":"

    The last time that this database was updated.

    " + } + }, + "documentation":"

    A top level container for a table. Databases and tables are the fundamental management concepts in Amazon Timestream. All tables in a database are encrypted with the same KMS key.

    " + }, + "DatabaseList":{ + "type":"list", + "member":{"shape":"Database"} + }, + "Date":{"type":"timestamp"}, + "DeleteDatabaseRequest":{ + "type":"structure", + "required":["DatabaseName"], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database to be deleted.

    " + } + } + }, + "DeleteTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the database where the Timestream database is to be deleted.

    " + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream table to be deleted.

    " + } + } + }, + "DescribeDatabaseRequest":{ + "type":"structure", + "required":["DatabaseName"], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database.

    " + } + } + }, + "DescribeDatabaseResponse":{ + "type":"structure", + "members":{ + "Database":{ + "shape":"Database", + "documentation":"

    The name of the Timestream table.

    " + } + } + }, + "DescribeEndpointsRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "required":["Endpoints"], + "members":{ + "Endpoints":{ + "shape":"Endpoints", + "documentation":"

    An Endpoints object is returned when a DescribeEndpoints request is made.

    " + } + } + }, + "DescribeTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database.

    " + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream table.

    " + } + } + }, + "DescribeTableResponse":{ + "type":"structure", + "members":{ + "Table":{ + "shape":"Table", + "documentation":"

    The Timestream table.

    " + } + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"StringValue256", + "documentation":"

    Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions.

    For constraints on Dimension names, see Naming Constraints.

    " + }, + "Value":{ + "shape":"StringValue2048", + "documentation":"

    The value of the dimension.

    " + }, + "DimensionValueType":{ + "shape":"DimensionValueType", + "documentation":"

    The data type of the dimension for the time series data point.

    " + } + }, + "documentation":"

    Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions.

    " + }, + "DimensionValueType":{ + "type":"string", + "enum":["VARCHAR"] + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"}, + "max":128 + }, + "Endpoint":{ + "type":"structure", + "required":[ + "Address", + "CachePeriodInMinutes" + ], + "members":{ + "Address":{ + "shape":"String", + "documentation":"

    An endpoint address.

    " + }, + "CachePeriodInMinutes":{ + "shape":"Long", + "documentation":"

    The TTL for the endpoint, in minutes.

    " + } + }, + "documentation":"

    Represents an available endpoint against which to make API calls agaisnt, as well as the TTL for that endpoint.

    " + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "ErrorMessage":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Timestream was unable to fully process this request because of an internal server error.

    ", + "exception":true, + "fault":true + }, + "InvalidEndpointException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The requested endpoint was invalid.

    ", + "exception":true + }, + "ListDatabasesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

    The pagination token. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.

    " + }, + "MaxResults":{ + "shape":"PaginationLimit", + "documentation":"

    The total number of items to return in the output. If the total number of items available is more than the value specified, a NextToken is provided in the output. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.

    " + } + } + }, + "ListDatabasesResponse":{ + "type":"structure", + "members":{ + "Databases":{ + "shape":"DatabaseList", + "documentation":"

    A list of database names.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The pagination token. This parameter is returned when the response is truncated.

    " + } + } + }, + "ListTablesRequest":{ + "type":"structure", + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The pagination token. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.

    " + }, + "MaxResults":{ + "shape":"PaginationLimit", + "documentation":"

    The total number of items to return in the output. If the total number of items available is more than the value specified, a NextToken is provided in the output. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.

    " + } + } + }, + "ListTablesResponse":{ + "type":"structure", + "members":{ + "Tables":{ + "shape":"TableList", + "documentation":"

    A list of tables.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A token to specify where to start paginating. This is the NextToken from a previously truncated response.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Timestream resource with tags to be listed. This value is an Amazon Resource Name (ARN).

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags currently associated with the Timestream resource.

    " + } + } + }, + "Long":{"type":"long"}, + "MagneticStoreRetentionPeriodInDays":{ + "type":"long", + "max":73000, + "min":1 + }, + "MeasureValueType":{ + "type":"string", + "enum":[ + "DOUBLE", + "BIGINT", + "VARCHAR", + "BOOLEAN" + ] + }, + "MemoryStoreRetentionPeriodInHours":{ + "type":"long", + "max":8766, + "min":1 + }, + "PaginationLimit":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "Record":{ + "type":"structure", + "members":{ + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

    Contains the list of dimensions for time series data points.

    " + }, + "MeasureName":{ + "shape":"StringValue256", + "documentation":"

    Measure represents the data attribute of the time series. For example, the CPU utilization of an EC2 instance or the RPM of a wind turbine are measures.

    " + }, + "MeasureValue":{ + "shape":"StringValue2048", + "documentation":"

    Contains the measure value for the time series data point.

    " + }, + "MeasureValueType":{ + "shape":"MeasureValueType", + "documentation":"

    Contains the data type of the measure value for the time series data point.

    " + }, + "Time":{ + "shape":"StringValue256", + "documentation":"

    Contains the time at which the measure value for the data point was collected. The time value plus the unit provides the time elapsed since the epoch. For example, if the time value is 12345 and the unit is ms, then 12345 ms have elapsed since the epoch.

    " + }, + "TimeUnit":{ + "shape":"TimeUnit", + "documentation":"

    The granularity of the timestamp unit. It indicates if the time value is in seconds, milliseconds, nanoseconds or other supported values.

    " + }, + "Version":{ + "shape":"RecordVersion", + "documentation":"

    64-bit attribute used for record updates. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated . Default value is to 1.

    ", + "box":true + } + }, + "documentation":"

    Record represents a time series data point being written into Timestream. Each record contains an array of dimensions. Dimensions represent the meta data attributes of a time series data point such as the instance name or availability zone of an EC2 instance. A record also contains the measure name which is the name of the measure being collected for example the CPU utilization of an EC2 instance. A record also contains the measure value and the value type which is the data type of the measure value. In addition, the record contains the timestamp when the measure was collected that the timestamp unit which represents the granularity of the timestamp.

    " + }, + "RecordIndex":{"type":"integer"}, + "RecordVersion":{"type":"long"}, + "Records":{ + "type":"list", + "member":{"shape":"Record"}, + "max":100, + "min":1 + }, + "RejectedRecord":{ + "type":"structure", + "members":{ + "RecordIndex":{ + "shape":"RecordIndex", + "documentation":"

    The index of the record in the input request for WriteRecords. Indexes begin with 0.

    " + }, + "Reason":{ + "shape":"ErrorMessage", + "documentation":"

    The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:

    • Records with duplicate data where there are multiple records with the same dimensions, timestamps, and measure names but different measure values.

    • Records with timestamps that lie outside the retention duration of the memory store

      When the retention window is updated, you will receive a RejectedRecords exception if you immediately try to ingest data within the new window. To avoid a RejectedRecords exception, wait until the duration of the new window to ingest new data. For further information, see Best Practices for Configuring Timestream and the explanation of how storage works in Timestream.

    • Records with dimensions or measures that exceed the Timestream defined limits.

    For more information, see Access Management in the Timestream Developer Guide.

    " + }, + "ExistingVersion":{ + "shape":"RecordVersion", + "documentation":"

    The existing version of the record. This value is populated in scenarios where an identical record exists with a higher version than the version in the write request.

    ", + "box":true + } + }, + "documentation":"

    Records that were not successfully inserted into Timestream due to data validation issues that must be resolved prior to reinserting time series data into the system.

    " + }, + "RejectedRecords":{ + "type":"list", + "member":{"shape":"RejectedRecord"} + }, + "RejectedRecordsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "RejectedRecords":{"shape":"RejectedRecords"} + }, + "documentation":"

    WriteRecords would throw this exception in the following cases:

    • Records with duplicate data where there are multiple records with the same dimensions, timestamps, and measure names but different measure values.

    • Records with timestamps that lie outside the retention duration of the memory store

    • Records with dimensions or measures that exceed the Timestream defined limits.

    For more information, see Access Management in the Timestream Developer Guide.

    ", + "exception":true + }, + "ResourceName":{ + "type":"string", + "max":64, + "min":3, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The operation tried to access a nonexistent resource. The resource might not be specified correctly, or its status might not be ACTIVE.

    ", + "exception":true + }, + "RetentionProperties":{ + "type":"structure", + "required":[ + "MemoryStoreRetentionPeriodInHours", + "MagneticStoreRetentionPeriodInDays" + ], + "members":{ + "MemoryStoreRetentionPeriodInHours":{ + "shape":"MemoryStoreRetentionPeriodInHours", + "documentation":"

    The duration for which data must be stored in the memory store.

    " + }, + "MagneticStoreRetentionPeriodInDays":{ + "shape":"MagneticStoreRetentionPeriodInDays", + "documentation":"

    The duration for which data must be stored in the magnetic store.

    " + } + }, + "documentation":"

    Retention properties contain the duration for which your time series data must be stored in the magnetic store and the memory store.

    " + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Instance quota of resource exceeded for this account.

    ", + "exception":true + }, + "String":{"type":"string"}, + "StringValue2048":{ + "type":"string", + "max":2048, + "min":1 + }, + "StringValue256":{ + "type":"string", + "max":256, + "min":1 + }, + "Table":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name that uniquely identifies this table.

    " + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream table.

    " + }, + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database that contains this table.

    " + }, + "TableStatus":{ + "shape":"TableStatus", + "documentation":"

    The current state of the table:

    • DELETING - The table is being deleted.

    • ACTIVE - The table is ready for use.

    " + }, + "RetentionProperties":{ + "shape":"RetentionProperties", + "documentation":"

    The retention duration for the memory store and magnetic store.

    " + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

    The time when the Timestream table was created.

    " + }, + "LastUpdatedTime":{ + "shape":"Date", + "documentation":"

    The time when the Timestream table was last updated.

    " + } + }, + "documentation":"

    Table represents a database table in Timestream. Tables contain one or more related time series. You can modify the retention duration of the memory store and the magnetic store for a table.

    " + }, + "TableList":{ + "type":"list", + "member":{"shape":"Table"} + }, + "TableStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    The key of the tag. Tag keys are case sensitive.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    The value of the tag. Tag values are case-sensitive and can be null.

    " + } + }, + "documentation":"

    A tag is a label that you assign to a Timestream database and/or table. Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize databases and/or tables, for example, by purpose, owner, or environment.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    Identifies the Timestream resource to which tags should be added. This value is an Amazon Resource Name (ARN).

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags to be assigned to the Timestream resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Too many requests were made by a user exceeding service quotas. The request was throttled.

    ", + "exception":true + }, + "TimeUnit":{ + "type":"string", + "enum":[ + "MILLISECONDS", + "SECONDS", + "MICROSECONDS", + "NANOSECONDS" + ] + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Timestream resource that the tags will be removed from. This value is an Amazon Resource Name (ARN).

    " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    A list of tags keys. Existing tags of the resource whose keys are members of this list will be removed from the Timestream resource.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDatabaseRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "KmsKeyId" + ], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the database.

    " + }, + "KmsKeyId":{ + "shape":"StringValue2048", + "documentation":"

    The identifier of the new KMS key (KmsKeyId) to be used to encrypt the data stored in the database. If the KmsKeyId currently registered with the database is the same as the KmsKeyId in the request, there will not be any update.

    You can specify the KmsKeyId using any of the following:

    • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

    • Key ARN: arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

    • Alias name: alias/ExampleAlias

    • Alias ARN: arn:aws:kms:us-east-1:111122223333:alias/ExampleAlias

    " + } + } + }, + "UpdateDatabaseResponse":{ + "type":"structure", + "members":{ + "Database":{"shape":"Database"} + } + }, + "UpdateTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "RetentionProperties" + ], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database.

    " + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timesream table.

    " + }, + "RetentionProperties":{ + "shape":"RetentionProperties", + "documentation":"

    The retention duration of the memory store and the magnetic store.

    " + } + } + }, + "UpdateTableResponse":{ + "type":"structure", + "members":{ + "Table":{ + "shape":"Table", + "documentation":"

    The updated Timestream table.

    " + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Invalid or malformed request.

    ", + "exception":true + }, + "WriteRecordsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "Records" + ], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timestream database.

    " + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

    The name of the Timesream table.

    " + }, + "CommonAttributes":{ + "shape":"Record", + "documentation":"

    A record containing the common measure and dimension attributes shared across all the records in the request. The measure and dimension attributes specified in here will be merged with the measure and dimension attributes in the records object when the data is written into Timestream.

    " + }, + "Records":{ + "shape":"Records", + "documentation":"

    An array of records containing the unique dimension and measure attributes for each time series data point.

    " + } + } + } + }, + "documentation":"

    Amazon Timestream is a fast, scalable, fully managed time series database service that makes it easy to store and analyze trillions of time series data points per day. With Timestream, you can easily store and analyze IoT sensor data to derive insights from your IoT applications. You can analyze industrial telemetry to streamline equipment management and maintenance. You can also store and analyze log data and metrics to improve the performance and availability of your applications. Timestream is built from the ground up to effectively ingest, process, and store time series data. It organizes data to optimize query processing. It automatically scales based on the volume of data ingested and on the query volume to ensure you receive optimal performance while inserting and querying data. As your data grows over time, Timestream’s adaptive query processing engine spans across storage tiers to provide fast analysis while reducing costs.

    " +} diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 366cba32a0ec..c0f53842d9f4 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + wafv2 + AWS Java SDK :: Services :: WAFV2 + The AWS Java SDK for WAFV2 module holds the client classes that are used for + communicating with WAFV2. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.wafv2 + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/wafv2/src/main/resources/codegen-resources/paginators-1.json b/services/wafv2/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/wafv2/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/wafv2/src/main/resources/codegen-resources/service-2.json b/services/wafv2/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..72470f4da7d6 --- /dev/null +++ b/services/wafv2/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3818 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-07-29", + "endpointPrefix":"wafv2", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"WAFV2", + "serviceFullName":"AWS WAFV2", + "serviceId":"WAFV2", + "signatureVersion":"v4", + "targetPrefix":"AWSWAF_20190729", + "uid":"wafv2-2019-07-29" + }, + "operations":{ + "AssociateWebACL":{ + "name":"AssociateWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateWebACLRequest"}, + "output":{"shape":"AssociateWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFUnavailableEntityException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Associates a Web ACL with a regional application resource, to protect the resource. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    For AWS CloudFront, don't use this call. Instead, use your CloudFront distribution configuration. To associate a Web ACL, in the CloudFront call UpdateDistribution, set the web ACL ID to the Amazon Resource Name (ARN) of the Web ACL. For information, see UpdateDistribution.

    " + }, + "CheckCapacity":{ + "name":"CheckCapacity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckCapacityRequest"}, + "output":{"shape":"CheckCapacityResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFInvalidResourceException"}, + {"shape":"WAFUnavailableEntityException"}, + {"shape":"WAFSubscriptionNotFoundException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Returns the web ACL capacity unit (WCU) requirements for a specified scope and set of rules. You can use this to check the capacity requirements for the rules you want to use in a RuleGroup or WebACL.

    AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

    " + }, + "CreateIPSet":{ + "name":"CreateIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIPSetRequest"}, + "output":{"shape":"CreateIPSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFDuplicateItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Creates an IPSet, which you use to identify web requests that originate from specific IP addresses or ranges of IP addresses. For example, if you're receiving a lot of requests from a ranges of IP addresses, you can configure AWS WAF to block them using an IPSet that lists those IP addresses.

    " + }, + "CreateRegexPatternSet":{ + "name":"CreateRegexPatternSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRegexPatternSetRequest"}, + "output":{"shape":"CreateRegexPatternSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFDuplicateItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Creates a RegexPatternSet, which you reference in a RegexPatternSetReferenceStatement, to have AWS WAF inspect a web request component for the specified patterns.

    " + }, + "CreateRuleGroup":{ + "name":"CreateRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRuleGroupRequest"}, + "output":{"shape":"CreateRuleGroupResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFDuplicateItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFUnavailableEntityException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFSubscriptionNotFoundException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Creates a RuleGroup per the specifications provided.

    A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

    " + }, + "CreateWebACL":{ + "name":"CreateWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWebACLRequest"}, + "output":{"shape":"CreateWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFDuplicateItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFInvalidResourceException"}, + {"shape":"WAFUnavailableEntityException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFSubscriptionNotFoundException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Creates a WebACL per the specifications provided.

    A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

    " + }, + "DeleteFirewallManagerRuleGroups":{ + "name":"DeleteFirewallManagerRuleGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFirewallManagerRuleGroupsRequest"}, + "output":{"shape":"DeleteFirewallManagerRuleGroupsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    Deletes all rule groups that are managed by AWS Firewall Manager for the specified web ACL.

    You can only use this if ManagedByFirewallManager is false in the specified WebACL.

    " + }, + "DeleteIPSet":{ + "name":"DeleteIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIPSetRequest"}, + "output":{"shape":"DeleteIPSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFAssociatedItemException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Deletes the specified IPSet.

    " + }, + "DeleteLoggingConfiguration":{ + "name":"DeleteLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoggingConfigurationRequest"}, + "output":{"shape":"DeleteLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Deletes the LoggingConfiguration from the specified web ACL.

    " + }, + "DeletePermissionPolicy":{ + "name":"DeletePermissionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePermissionPolicyRequest"}, + "output":{"shape":"DeletePermissionPolicyResponse"}, + "errors":[ + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"} + ], + "documentation":"

    Permanently deletes an IAM policy from the specified rule group.

    You must be the owner of the rule group to perform this operation.

    " + }, + "DeleteRegexPatternSet":{ + "name":"DeleteRegexPatternSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRegexPatternSetRequest"}, + "output":{"shape":"DeleteRegexPatternSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFAssociatedItemException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Deletes the specified RegexPatternSet.

    " + }, + "DeleteRuleGroup":{ + "name":"DeleteRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleGroupRequest"}, + "output":{"shape":"DeleteRuleGroupResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFAssociatedItemException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Deletes the specified RuleGroup.

    " + }, + "DeleteWebACL":{ + "name":"DeleteWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWebACLRequest"}, + "output":{"shape":"DeleteWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFAssociatedItemException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Deletes the specified WebACL.

    You can only use this if ManagedByFirewallManager is false in the specified WebACL.

    " + }, + "DescribeManagedRuleGroup":{ + "name":"DescribeManagedRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeManagedRuleGroupRequest"}, + "output":{"shape":"DescribeManagedRuleGroupResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidResourceException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Provides high-level information for a managed rule group, including descriptions of the rules.

    " + }, + "DisassociateWebACL":{ + "name":"DisassociateWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateWebACLRequest"}, + "output":{"shape":"DisassociateWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Disassociates a Web ACL from a regional application resource. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    For AWS CloudFront, don't use this call. Instead, use your CloudFront distribution configuration. To disassociate a Web ACL, provide an empty web ACL ID in the CloudFront call UpdateDistribution. For information, see UpdateDistribution.

    " + }, + "GetIPSet":{ + "name":"GetIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIPSetRequest"}, + "output":{"shape":"GetIPSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves the specified IPSet.

    " + }, + "GetLoggingConfiguration":{ + "name":"GetLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLoggingConfigurationRequest"}, + "output":{"shape":"GetLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Returns the LoggingConfiguration for the specified web ACL.

    " + }, + "GetPermissionPolicy":{ + "name":"GetPermissionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPermissionPolicyRequest"}, + "output":{"shape":"GetPermissionPolicyResponse"}, + "errors":[ + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"} + ], + "documentation":"

    Returns the IAM policy that is attached to the specified rule group.

    You must be the owner of the rule group to perform this operation.

    " + }, + "GetRateBasedStatementManagedKeys":{ + "name":"GetRateBasedStatementManagedKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRateBasedStatementManagedKeysRequest"}, + "output":{"shape":"GetRateBasedStatementManagedKeysResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves the keys that are currently blocked by a rate-based rule. The maximum number of managed keys that can be blocked for a single rate-based rule is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.

    " + }, + "GetRegexPatternSet":{ + "name":"GetRegexPatternSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRegexPatternSetRequest"}, + "output":{"shape":"GetRegexPatternSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves the specified RegexPatternSet.

    " + }, + "GetRuleGroup":{ + "name":"GetRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRuleGroupRequest"}, + "output":{"shape":"GetRuleGroupResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves the specified RuleGroup.

    " + }, + "GetSampledRequests":{ + "name":"GetSampledRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSampledRequestsRequest"}, + "output":{"shape":"GetSampledRequestsResponse"}, + "errors":[ + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.

    GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

    " + }, + "GetWebACL":{ + "name":"GetWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetWebACLRequest"}, + "output":{"shape":"GetWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves the specified WebACL.

    " + }, + "GetWebACLForResource":{ + "name":"GetWebACLForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetWebACLForResourceRequest"}, + "output":{"shape":"GetWebACLForResourceResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFUnavailableEntityException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves the WebACL for the specified resource.

    " + }, + "ListAvailableManagedRuleGroups":{ + "name":"ListAvailableManagedRuleGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAvailableManagedRuleGroupsRequest"}, + "output":{"shape":"ListAvailableManagedRuleGroupsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves an array of managed rule groups that are available for you to use. This list includes all AWS Managed Rules rule groups and the AWS Marketplace managed rule groups that you're subscribed to.

    " + }, + "ListIPSets":{ + "name":"ListIPSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIPSetsRequest"}, + "output":{"shape":"ListIPSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves an array of IPSetSummary objects for the IP sets that you manage.

    " + }, + "ListLoggingConfigurations":{ + "name":"ListLoggingConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLoggingConfigurationsRequest"}, + "output":{"shape":"ListLoggingConfigurationsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves an array of your LoggingConfiguration objects.

    " + }, + "ListRegexPatternSets":{ + "name":"ListRegexPatternSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRegexPatternSetsRequest"}, + "output":{"shape":"ListRegexPatternSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves an array of RegexPatternSetSummary objects for the regex pattern sets that you manage.

    " + }, + "ListResourcesForWebACL":{ + "name":"ListResourcesForWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourcesForWebACLRequest"}, + "output":{"shape":"ListResourcesForWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves an array of the Amazon Resource Names (ARNs) for the regional resources that are associated with the specified web ACL. If you want the list of AWS CloudFront resources, use the AWS CloudFront call ListDistributionsByWebACLId.

    " + }, + "ListRuleGroups":{ + "name":"ListRuleGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleGroupsRequest"}, + "output":{"shape":"ListRuleGroupsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves an array of RuleGroupSummary objects for the rule groups that you manage.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves the TagInfoForResource for the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

    You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

    " + }, + "ListWebACLs":{ + "name":"ListWebACLs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWebACLsRequest"}, + "output":{"shape":"ListWebACLsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Retrieves an array of WebACLSummary objects for the web ACLs that you manage.

    " + }, + "PutLoggingConfiguration":{ + "name":"PutLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLoggingConfigurationRequest"}, + "output":{"shape":"PutLoggingConfigurationResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFServiceLinkedRoleErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFLimitsExceededException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Enables the specified LoggingConfiguration, to start logging from a web ACL, according to the configuration provided.

    You can access information about all traffic that AWS WAF inspects using the following steps:

    1. Create an Amazon Kinesis Data Firehose.

      Create the data firehose with a PUT source and in the Region that you are operating. If you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

      Give the data firehose a name that starts with the prefix aws-waf-logs-. For example, aws-waf-logs-us-east-2-analytics.

      Do not create the data firehose using a Kinesis stream as your source.

    2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

    When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

    " + }, + "PutPermissionPolicy":{ + "name":"PutPermissionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutPermissionPolicyRequest"}, + "output":{"shape":"PutPermissionPolicyResponse"}, + "errors":[ + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidPermissionPolicyException"} + ], + "documentation":"

    Attaches an IAM policy to the specified resource. Use this to share a rule group across accounts.

    You must be the owner of the rule group to perform this operation.

    This action is subject to the following restrictions:

    • You can attach only one policy with each PutPermissionPolicy request.

    • The ARN in the request must be a valid WAF RuleGroup ARN and the rule group must exist in the same region.

    • The user making the request must be the owner of the rule group.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Associates tags with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

    You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFTagOperationException"}, + {"shape":"WAFTagOperationInternalErrorException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Disassociates tags from an AWS resource. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be \"customer\" and the tag value might be \"companyA.\" You can specify one or more tags to add to each container. You can add up to 50 tags to each AWS resource.

    " + }, + "UpdateIPSet":{ + "name":"UpdateIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateIPSetRequest"}, + "output":{"shape":"UpdateIPSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFDuplicateItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Updates the specified IPSet.

    " + }, + "UpdateRegexPatternSet":{ + "name":"UpdateRegexPatternSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRegexPatternSetRequest"}, + "output":{"shape":"UpdateRegexPatternSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFDuplicateItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Updates the specified RegexPatternSet.

    " + }, + "UpdateRuleGroup":{ + "name":"UpdateRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuleGroupRequest"}, + "output":{"shape":"UpdateRuleGroupResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFDuplicateItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFUnavailableEntityException"}, + {"shape":"WAFSubscriptionNotFoundException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Updates the specified RuleGroup.

    A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

    " + }, + "UpdateWebACL":{ + "name":"UpdateWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateWebACLRequest"}, + "output":{"shape":"UpdateWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFDuplicateItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFLimitsExceededException"}, + {"shape":"WAFInvalidResourceException"}, + {"shape":"WAFUnavailableEntityException"}, + {"shape":"WAFSubscriptionNotFoundException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Updates the specified WebACL.

    A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

    " + } + }, + "shapes":{ + "Action":{"type":"string"}, + "AllQueryArguments":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    All query arguments of a web request.

    This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

    " + }, + "AllowAction":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Specifies that AWS WAF should allow requests.

    This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

    " + }, + "AndStatement":{ + "type":"structure", + "required":["Statements"], + "members":{ + "Statements":{ + "shape":"Statements", + "documentation":"

    The statements to combine with AND logic. You can use any statements that can be nested.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement.

    " + }, + "AssociateWebACLRequest":{ + "type":"structure", + "required":[ + "WebACLArn", + "ResourceArn" + ], + "members":{ + "WebACLArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Web ACL that you want to associate with the resource.

    " + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to associate with the web ACL.

    The ARN must be in one of the following formats:

    • For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id

    • For an API Gateway REST API: arn:aws:apigateway:region::/restapis/api-id/stages/stage-name

    • For an AppSync GraphQL API: arn:aws:appsync:region:account-id:apis/GraphQLApiId

    " + } + } + }, + "AssociateWebACLResponse":{ + "type":"structure", + "members":{ + } + }, + "BlockAction":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Specifies that AWS WAF should block requests.

    This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

    " + }, + "Body":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The body of a web request. This immediately follows the request headers.

    This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

    " + }, + "Boolean":{"type":"boolean"}, + "ByteMatchStatement":{ + "type":"structure", + "required":[ + "SearchString", + "FieldToMatch", + "TextTransformations", + "PositionalConstraint" + ], + "members":{ + "SearchString":{ + "shape":"SearchString", + "documentation":"

    A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch. The maximum length of the value is 50 bytes.

    Valid values depend on the component that you specify for inspection in FieldToMatch:

    • Method: The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.

    • UriPath: The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg.

    If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

    If you're using the AWS WAF API

    Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.

    For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90, in the value of SearchString.

    If you're using the AWS CLI or one of the AWS SDKs

    The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.

    " + }, + "FieldToMatch":{ + "shape":"FieldToMatch", + "documentation":"

    The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch.

    " + }, + "TextTransformations":{ + "shape":"TextTransformations", + "documentation":"

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

    " + }, + "PositionalConstraint":{ + "shape":"PositionalConstraint", + "documentation":"

    The area within the portion of a web request that you want AWS WAF to search for SearchString. Valid values include the following:

    CONTAINS

    The specified part of the web request must include the value of SearchString, but the location doesn't matter.

    CONTAINS_WORD

    The specified part of the web request must include the value of SearchString, and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:

    • SearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot.

    • SearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot;.

    EXACTLY

    The value of the specified part of the web request must exactly match the value of SearchString.

    STARTS_WITH

    The value of SearchString must appear at the beginning of the specified part of the web request.

    ENDS_WITH

    The value of SearchString must appear at the end of the specified part of the web request.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.

    " + }, + "CapacityUnit":{ + "type":"long", + "min":1 + }, + "CheckCapacityRequest":{ + "type":"structure", + "required":[ + "Scope", + "Rules" + ], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Rules":{ + "shape":"Rules", + "documentation":"

    An array of Rule that you're configuring to use in a rule group or web ACL.

    " + } + } + }, + "CheckCapacityResponse":{ + "type":"structure", + "members":{ + "Capacity":{ + "shape":"ConsumedCapacity", + "documentation":"

    The capacity required by the rules and scope.

    " + } + } + }, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "EQ", + "NE", + "LE", + "LT", + "GE", + "GT" + ] + }, + "ConsumedCapacity":{ + "type":"long", + "min":0 + }, + "CountAction":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Specifies that AWS WAF should count requests.

    This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

    " + }, + "Country":{"type":"string"}, + "CountryCode":{ + "type":"string", + "enum":[ + "AF", + "AX", + "AL", + "DZ", + "AS", + "AD", + "AO", + "AI", + "AQ", + "AG", + "AR", + "AM", + "AW", + "AU", + "AT", + "AZ", + "BS", + "BH", + "BD", + "BB", + "BY", + "BE", + "BZ", + "BJ", + "BM", + "BT", + "BO", + "BQ", + "BA", + "BW", + "BV", + "BR", + "IO", + "BN", + "BG", + "BF", + "BI", + "KH", + "CM", + "CA", + "CV", + "KY", + "CF", + "TD", + "CL", + "CN", + "CX", + "CC", + "CO", + "KM", + "CG", + "CD", + "CK", + "CR", + "CI", + "HR", + "CU", + "CW", + "CY", + "CZ", + "DK", + "DJ", + "DM", + "DO", + "EC", + "EG", + "SV", + "GQ", + "ER", + "EE", + "ET", + "FK", + "FO", + "FJ", + "FI", + "FR", + "GF", + "PF", + "TF", + "GA", + "GM", + "GE", + "DE", + "GH", + "GI", + "GR", + "GL", + "GD", + "GP", + "GU", + "GT", + "GG", + "GN", + "GW", + "GY", + "HT", + "HM", + "VA", + "HN", + "HK", + "HU", + "IS", + "IN", + "ID", + "IR", + "IQ", + "IE", + "IM", + "IL", + "IT", + "JM", + "JP", + "JE", + "JO", + "KZ", + "KE", + "KI", + "KP", + "KR", + "KW", + "KG", + "LA", + "LV", + "LB", + "LS", + "LR", + "LY", + "LI", + "LT", + "LU", + "MO", + "MK", + "MG", + "MW", + "MY", + "MV", + "ML", + "MT", + "MH", + "MQ", + "MR", + "MU", + "YT", + "MX", + "FM", + "MD", + "MC", + "MN", + "ME", + "MS", + "MA", + "MZ", + "MM", + "NA", + "NR", + "NP", + "NL", + "NC", + "NZ", + "NI", + "NE", + "NG", + "NU", + "NF", + "MP", + "NO", + "OM", + "PK", + "PW", + "PS", + "PA", + "PG", + "PY", + "PE", + "PH", + "PN", + "PL", + "PT", + "PR", + "QA", + "RE", + "RO", + "RU", + "RW", + "BL", + "SH", + "KN", + "LC", + "MF", + "PM", + "VC", + "WS", + "SM", + "ST", + "SA", + "SN", + "RS", + "SC", + "SL", + "SG", + "SX", + "SK", + "SI", + "SB", + "SO", + "ZA", + "GS", + "SS", + "ES", + "LK", + "SD", + "SR", + "SJ", + "SZ", + "SE", + "CH", + "SY", + "TW", + "TJ", + "TZ", + "TH", + "TL", + "TG", + "TK", + "TO", + "TT", + "TN", + "TR", + "TM", + "TC", + "TV", + "UG", + "UA", + "AE", + "GB", + "US", + "UM", + "UY", + "UZ", + "VU", + "VE", + "VN", + "VG", + "VI", + "WF", + "EH", + "YE", + "ZM", + "ZW" + ] + }, + "CountryCodes":{ + "type":"list", + "member":{"shape":"CountryCode"}, + "min":1 + }, + "CreateIPSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "IPAddressVersion", + "Addresses" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the IP set. You cannot change the name of an IPSet after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.

    " + }, + "IPAddressVersion":{ + "shape":"IPAddressVersion", + "documentation":"

    Specify IPV4 or IPV6.

    " + }, + "Addresses":{ + "shape":"IPAddresses", + "documentation":"

    Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.

    Examples:

    • To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32.

    • To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

    • To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128.

    • To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64.

    For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of key:value pairs to associate with the resource.

    " + } + } + }, + "CreateIPSetResponse":{ + "type":"structure", + "members":{ + "Summary":{ + "shape":"IPSetSummary", + "documentation":"

    High-level information about an IPSet, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage an IPSet, and the ARN, that you provide to the IPSetReferenceStatement to use the address set in a Rule.

    " + } + } + }, + "CreateRegexPatternSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "RegularExpressionList" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the set. You cannot change the name after you create the set.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the set that helps with identification. You cannot change the description of a set after you create it.

    " + }, + "RegularExpressionList":{ + "shape":"RegularExpressionList", + "documentation":"

    Array of regular expression strings.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of key:value pairs to associate with the resource.

    " + } + } + }, + "CreateRegexPatternSetResponse":{ + "type":"structure", + "members":{ + "Summary":{ + "shape":"RegexPatternSetSummary", + "documentation":"

    High-level information about a RegexPatternSet, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RegexPatternSet, and the ARN, that you provide to the RegexPatternSetReferenceStatement to use the pattern set in a Rule.

    " + } + } + }, + "CreateRuleGroupRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Capacity", + "VisibilityConfig" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule group. You cannot change the name of a rule group after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Capacity":{ + "shape":"CapacityUnit", + "documentation":"

    The web ACL capacity units (WCUs) required for this rule group.

    When you create your own rule group, you define this, and you cannot change it after creation. When you add or modify the rules in a rule group, AWS WAF enforces this limit. You can check the capacity for a set of rules using CheckCapacity.

    AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.

    " + }, + "Rules":{ + "shape":"Rules", + "documentation":"

    The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

    " + }, + "VisibilityConfig":{ + "shape":"VisibilityConfig", + "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of key:value pairs to associate with the resource.

    " + } + } + }, + "CreateRuleGroupResponse":{ + "type":"structure", + "members":{ + "Summary":{ + "shape":"RuleGroupSummary", + "documentation":"

    High-level information about a RuleGroup, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RuleGroup, and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule.

    " + } + } + }, + "CreateWebACLRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "DefaultAction", + "VisibilityConfig" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the Web ACL. You cannot change the name of a Web ACL after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "DefaultAction":{ + "shape":"DefaultAction", + "documentation":"

    The action to perform if none of the Rules contained in the WebACL match.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.

    " + }, + "Rules":{ + "shape":"Rules", + "documentation":"

    The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

    " + }, + "VisibilityConfig":{ + "shape":"VisibilityConfig", + "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of key:value pairs to associate with the resource.

    " + } + } + }, + "CreateWebACLResponse":{ + "type":"structure", + "members":{ + "Summary":{ + "shape":"WebACLSummary", + "documentation":"

    High-level information about a WebACL, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a WebACL, and the ARN, that you provide to operations like AssociateWebACL.

    " + } + } + }, + "DefaultAction":{ + "type":"structure", + "members":{ + "Block":{ + "shape":"BlockAction", + "documentation":"

    Specifies that AWS WAF should block requests by default.

    " + }, + "Allow":{ + "shape":"AllowAction", + "documentation":"

    Specifies that AWS WAF should allow requests by default.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    In a WebACL, this is the action that you want AWS WAF to perform when a web request doesn't match any of the rules in the WebACL. The default action must be a terminating action, so count is not allowed.

    " + }, + "DeleteFirewallManagerRuleGroupsRequest":{ + "type":"structure", + "required":[ + "WebACLArn", + "WebACLLockToken" + ], + "members":{ + "WebACLArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the web ACL.

    " + }, + "WebACLLockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "DeleteFirewallManagerRuleGroupsResponse":{ + "type":"structure", + "members":{ + "NextWebACLLockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "DeleteIPSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the IP set. You cannot change the name of an IPSet after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "DeleteIPSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteLoggingConfigurationRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration.

    " + } + } + }, + "DeleteLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "DeletePermissionPolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group from which you want to delete the policy.

    You must be the owner of the rule group to perform this operation.

    " + } + } + }, + "DeletePermissionPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteRegexPatternSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the set. You cannot change the name after you create the set.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "DeleteRegexPatternSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteRuleGroupRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule group. You cannot change the name of a rule group after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "DeleteRuleGroupResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteWebACLRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the Web ACL. You cannot change the name of a Web ACL after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    The unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "DeleteWebACLResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeManagedRuleGroupRequest":{ + "type":"structure", + "required":[ + "VendorName", + "Name", + "Scope" + ], + "members":{ + "VendorName":{ + "shape":"VendorName", + "documentation":"

    The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.

    " + }, + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + } + } + }, + "DescribeManagedRuleGroupResponse":{ + "type":"structure", + "members":{ + "Capacity":{ + "shape":"CapacityUnit", + "documentation":"

    The web ACL capacity units (WCUs) required for this rule group. AWS WAF uses web ACL capacity units (WCU) to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect each rule's relative cost. Rule group capacity is fixed at creation, so users can plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

    " + }, + "Rules":{ + "shape":"RuleSummaries", + "documentation":"

    " + } + } + }, + "DisassociateWebACLRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to disassociate from the web ACL.

    The ARN must be in one of the following formats:

    • For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id

    • For an API Gateway REST API: arn:aws:apigateway:region::/restapis/api-id/stages/stage-name

    • For an AppSync GraphQL API: arn:aws:appsync:region:account-id:apis/GraphQLApiId

    " + } + } + }, + "DisassociateWebACLResponse":{ + "type":"structure", + "members":{ + } + }, + "EntityDescription":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[\\w+=:#@/\\-,\\.][\\w+=:#@/\\-,\\.\\s]+[\\w+=:#@/\\-,\\.]$" + }, + "EntityId":{ + "type":"string", + "max":36, + "min":1, + "pattern":"^[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}$" + }, + "EntityName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\w\\-]+$" + }, + "ErrorMessage":{"type":"string"}, + "ErrorReason":{"type":"string"}, + "ExcludedRule":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule to exclude.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT. This effectively excludes the rule from acting on web requests.

    " + }, + "ExcludedRules":{ + "type":"list", + "member":{"shape":"ExcludedRule"} + }, + "FallbackBehavior":{ + "type":"string", + "enum":[ + "MATCH", + "NO_MATCH" + ] + }, + "FieldToMatch":{ + "type":"structure", + "members":{ + "SingleHeader":{ + "shape":"SingleHeader", + "documentation":"

    Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer. This setting isn't case sensitive.

    " + }, + "SingleQueryArgument":{ + "shape":"SingleQueryArgument", + "documentation":"

    Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion. The name can be up to 30 characters long and isn't case sensitive.

    This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

    " + }, + "AllQueryArguments":{ + "shape":"AllQueryArguments", + "documentation":"

    Inspect all query arguments.

    " + }, + "UriPath":{ + "shape":"UriPath", + "documentation":"

    Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg.

    " + }, + "QueryString":{ + "shape":"QueryString", + "documentation":"

    Inspect the query string. This is the part of a URL that appears after a ? character, if any.

    " + }, + "Body":{ + "shape":"Body", + "documentation":"

    Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.

    Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don't need to inspect more than 8 KB, you can guarantee that you don't allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement, with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn't support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.

    " + }, + "Method":{ + "shape":"Method", + "documentation":"

    Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The part of a web request that you want AWS WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.

    " + }, + "FieldToMatchData":{ + "type":"string", + "max":64, + "min":1, + "pattern":".*\\S.*" + }, + "FirewallManagerRuleGroup":{ + "type":"structure", + "required":[ + "Name", + "Priority", + "FirewallManagerStatement", + "OverrideAction", + "VisibilityConfig" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule group. You cannot change the name of a rule group after you create it.

    " + }, + "Priority":{ + "shape":"RulePriority", + "documentation":"

    If you define more than one rule group in the first or last Firewall Manager rule groups, AWS WAF evaluates each request against the rule groups in order, starting from the lowest priority setting. The priorities don't need to be consecutive, but they must all be different.

    " + }, + "FirewallManagerStatement":{ + "shape":"FirewallManagerStatement", + "documentation":"

    The processing guidance for an AWS Firewall Manager rule. This is like a regular rule Statement, but it can only contain a rule group reference.

    " + }, + "OverrideAction":{"shape":"OverrideAction"}, + "VisibilityConfig":{"shape":"VisibilityConfig"} + }, + "documentation":"

    A rule group that's defined for an AWS Firewall Manager WAF policy.

    " + }, + "FirewallManagerRuleGroups":{ + "type":"list", + "member":{"shape":"FirewallManagerRuleGroup"} + }, + "FirewallManagerStatement":{ + "type":"structure", + "members":{ + "ManagedRuleGroupStatement":{"shape":"ManagedRuleGroupStatement"}, + "RuleGroupReferenceStatement":{"shape":"RuleGroupReferenceStatement"} + }, + "documentation":"

    The processing guidance for an AWS Firewall Manager rule. This is like a regular rule Statement, but it can only contain a rule group reference.

    " + }, + "ForwardedIPConfig":{ + "type":"structure", + "required":[ + "HeaderName", + "FallbackBehavior" + ], + "members":{ + "HeaderName":{ + "shape":"ForwardedIPHeaderName", + "documentation":"

    The name of the HTTP header to use for the IP address. For example, to use the X-Forwarded-For (XFF) header, set this to X-Forwarded-For.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    " + }, + "FallbackBehavior":{ + "shape":"FallbackBehavior", + "documentation":"

    The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    You can specify the following fallback behaviors:

    • MATCH - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.

    • NO_MATCH - Treat the web request as not matching the rule statement.

    " + } + }, + "documentation":"

    The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    This configuration is used for GeoMatchStatement and RateBasedStatement. For IPSetReferenceStatement, use IPSetForwardedIPConfig instead.

    AWS WAF only evaluates the first IP address found in the specified HTTP header.

    " + }, + "ForwardedIPHeaderName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ForwardedIPPosition":{ + "type":"string", + "enum":[ + "FIRST", + "LAST", + "ANY" + ] + }, + "GeoMatchStatement":{ + "type":"structure", + "members":{ + "CountryCodes":{ + "shape":"CountryCodes", + "documentation":"

    An array of two-character country codes, for example, [ \"US\", \"CN\" ], from the alpha-2 country ISO codes of the ISO 3166 international standard.

    " + }, + "ForwardedIPConfig":{ + "shape":"ForwardedIPConfig", + "documentation":"

    The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule statement used to identify web requests based on country of origin.

    " + }, + "GetIPSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the IP set. You cannot change the name of an IPSet after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + } + } + }, + "GetIPSetResponse":{ + "type":"structure", + "members":{ + "IPSet":{ + "shape":"IPSet", + "documentation":"

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "GetLoggingConfigurationRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration.

    " + } + } + }, + "GetLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

    The LoggingConfiguration for the specified web ACL.

    " + } + } + }, + "GetPermissionPolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the rule group for which you want to get the policy.

    " + } + } + }, + "GetPermissionPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"PolicyString", + "documentation":"

    The IAM policy that is attached to the specified rule group.

    " + } + } + }, + "GetRateBasedStatementManagedKeysRequest":{ + "type":"structure", + "required":[ + "Scope", + "WebACLName", + "WebACLId", + "RuleName" + ], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "WebACLName":{ + "shape":"EntityName", + "documentation":"

    The name of the Web ACL. You cannot change the name of a Web ACL after you create it.

    " + }, + "WebACLId":{ + "shape":"EntityId", + "documentation":"

    The unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "RuleName":{ + "shape":"EntityName", + "documentation":"

    The name of the rate-based rule to get the keys for.

    " + } + } + }, + "GetRateBasedStatementManagedKeysResponse":{ + "type":"structure", + "members":{ + "ManagedKeysIPV4":{ + "shape":"RateBasedStatementManagedKeysIPSet", + "documentation":"

    The keys that are of Internet Protocol version 4 (IPv4).

    " + }, + "ManagedKeysIPV6":{ + "shape":"RateBasedStatementManagedKeysIPSet", + "documentation":"

    The keys that are of Internet Protocol version 6 (IPv6).

    " + } + } + }, + "GetRegexPatternSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the set. You cannot change the name after you create the set.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + } + } + }, + "GetRegexPatternSetResponse":{ + "type":"structure", + "members":{ + "RegexPatternSet":{ + "shape":"RegexPatternSet", + "documentation":"

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "GetRuleGroupRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule group. You cannot change the name of a rule group after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + } + } + }, + "GetRuleGroupResponse":{ + "type":"structure", + "members":{ + "RuleGroup":{ + "shape":"RuleGroup", + "documentation":"

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "GetSampledRequestsRequest":{ + "type":"structure", + "required":[ + "WebAclArn", + "RuleMetricName", + "Scope", + "TimeWindow", + "MaxItems" + ], + "members":{ + "WebAclArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon resource name (ARN) of the WebACL for which you want a sample of requests.

    " + }, + "RuleMetricName":{ + "shape":"MetricName", + "documentation":"

    The metric name assigned to the Rule or RuleGroup for which you want a sample of requests.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "TimeWindow":{ + "shape":"TimeWindow", + "documentation":"

    The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

    " + }, + "MaxItems":{ + "shape":"ListMaxItems", + "documentation":"

    The number of requests that you want AWS WAF to return from among the first 5,000 requests that your AWS resource received during the time range. If your resource received fewer requests than the value of MaxItems, GetSampledRequests returns information about all of them.

    " + } + } + }, + "GetSampledRequestsResponse":{ + "type":"structure", + "members":{ + "SampledRequests":{ + "shape":"SampledHTTPRequests", + "documentation":"

    A complex type that contains detailed information about each of the requests in the sample.

    " + }, + "PopulationSize":{ + "shape":"PopulationSize", + "documentation":"

    The total number of requests from which GetSampledRequests got a sample of MaxItems requests. If PopulationSize is less than MaxItems, the sample includes every request that your AWS resource received during the specified time range.

    " + }, + "TimeWindow":{ + "shape":"TimeWindow", + "documentation":"

    Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests. Times are in Coordinated Universal Time (UTC) format.

    " + } + } + }, + "GetWebACLForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN (Amazon Resource Name) of the resource.

    " + } + } + }, + "GetWebACLForResourceResponse":{ + "type":"structure", + "members":{ + "WebACL":{ + "shape":"WebACL", + "documentation":"

    The Web ACL that is associated with the resource. If there is no associated resource, AWS WAF returns a null Web ACL.

    " + } + } + }, + "GetWebACLRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the Web ACL. You cannot change the name of a Web ACL after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    The unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + } + } + }, + "GetWebACLResponse":{ + "type":"structure", + "members":{ + "WebACL":{ + "shape":"WebACL", + "documentation":"

    The Web ACL specification. You can modify the settings in this Web ACL and use it to update this Web ACL or create a new one.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "HTTPHeader":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"HeaderName", + "documentation":"

    The name of the HTTP header.

    " + }, + "Value":{ + "shape":"HeaderValue", + "documentation":"

    The value of the HTTP header.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Part of the response from GetSampledRequests. This is a complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests.

    " + }, + "HTTPHeaders":{ + "type":"list", + "member":{"shape":"HTTPHeader"} + }, + "HTTPMethod":{"type":"string"}, + "HTTPRequest":{ + "type":"structure", + "members":{ + "ClientIP":{ + "shape":"IPString", + "documentation":"

    The IP address that the request originated from. If the web ACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:

    • c-ip, if the viewer did not use an HTTP proxy or a load balancer to send the request

    • x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer to send the request

    " + }, + "Country":{ + "shape":"Country", + "documentation":"

    The two-letter country code for the country that the request originated from. For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2.

    " + }, + "URI":{ + "shape":"URIString", + "documentation":"

    The URI path of the request, which identifies the resource, for example, /images/daily-ad.jpg.

    " + }, + "Method":{ + "shape":"HTTPMethod", + "documentation":"

    The HTTP method specified in the sampled web request.

    " + }, + "HTTPVersion":{ + "shape":"HTTPVersion", + "documentation":"

    The HTTP version specified in the sampled web request, for example, HTTP/1.1.

    " + }, + "Headers":{ + "shape":"HTTPHeaders", + "documentation":"

    A complex type that contains the name and value for each header in the sampled web request.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Part of the response from GetSampledRequests. This is a complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests.

    " + }, + "HTTPVersion":{"type":"string"}, + "HeaderName":{"type":"string"}, + "HeaderValue":{"type":"string"}, + "IPAddress":{ + "type":"string", + "max":50, + "min":1, + "pattern":".*\\S.*" + }, + "IPAddressVersion":{ + "type":"string", + "enum":[ + "IPV4", + "IPV6" + ] + }, + "IPAddresses":{ + "type":"list", + "member":{"shape":"IPAddress"} + }, + "IPSet":{ + "type":"structure", + "required":[ + "Name", + "Id", + "ARN", + "IPAddressVersion", + "Addresses" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the IP set. You cannot change the name of an IPSet after you create it.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the entity.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.

    " + }, + "IPAddressVersion":{ + "shape":"IPAddressVersion", + "documentation":"

    Specify IPV4 or IPV6.

    " + }, + "Addresses":{ + "shape":"IPAddresses", + "documentation":"

    Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.

    Examples:

    • To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32.

    • To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

    • To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128.

    • To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64.

    For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports any CIDR range. For information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    AWS WAF assigns an ARN to each IPSet that you create. To use an IP set in a rule, you provide the ARN to the Rule statement IPSetReferenceStatement.

    " + }, + "IPSetForwardedIPConfig":{ + "type":"structure", + "required":[ + "HeaderName", + "FallbackBehavior", + "Position" + ], + "members":{ + "HeaderName":{ + "shape":"ForwardedIPHeaderName", + "documentation":"

    The name of the HTTP header to use for the IP address. For example, to use the X-Forwarded-For (XFF) header, set this to X-Forwarded-For.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    " + }, + "FallbackBehavior":{ + "shape":"FallbackBehavior", + "documentation":"

    The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    You can specify the following fallback behaviors:

    • MATCH - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.

    • NO_MATCH - Treat the web request as not matching the rule statement.

    " + }, + "Position":{ + "shape":"ForwardedIPPosition", + "documentation":"

    The position in the header to search for the IP address. The header can contain IP addresses of the original client and also of proxies. For example, the header value could be 10.1.1.1, 127.0.0.0, 10.10.10.10 where the first IP address identifies the original client and the rest identify proxies that the request went through.

    The options for this setting are the following:

    • FIRST - Inspect the first IP address in the list of IP addresses in the header. This is usually the client's original IP.

    • LAST - Inspect the last IP address in the list of IP addresses in the header.

    • ANY - Inspect all IP addresses in the header for a match. If the header contains more than 10 IP addresses, AWS WAF inspects the last 10.

    " + } + }, + "documentation":"

    The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    This configuration is used only for IPSetReferenceStatement. For GeoMatchStatement and RateBasedStatement, use ForwardedIPConfig instead.

    " + }, + "IPSetReferenceStatement":{ + "type":"structure", + "required":["ARN"], + "members":{ + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IPSet that this statement references.

    " + }, + "IPSetForwardedIPConfig":{ + "shape":"IPSetForwardedIPConfig", + "documentation":"

    The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet.

    Each IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

    " + }, + "IPSetSummaries":{ + "type":"list", + "member":{"shape":"IPSetSummary"} + }, + "IPSetSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the IP set. You cannot change the name of an IPSet after you create it.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the entity.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    High-level information about an IPSet, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage an IPSet, and the ARN, that you provide to the IPSetReferenceStatement to use the address set in a Rule.

    " + }, + "IPString":{"type":"string"}, + "ListAvailableManagedRuleGroupsRequest":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

    The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListAvailableManagedRuleGroupsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "ManagedRuleGroups":{ + "shape":"ManagedRuleGroupSummaries", + "documentation":"

    " + } + } + }, + "ListIPSetsRequest":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

    The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListIPSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "IPSets":{ + "shape":"IPSetSummaries", + "documentation":"

    Array of IPSets. This may not be the full list of IPSets that you have defined. See the Limit specification for this request.

    " + } + } + }, + "ListLoggingConfigurationsRequest":{ + "type":"structure", + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

    The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListLoggingConfigurationsResponse":{ + "type":"structure", + "members":{ + "LoggingConfigurations":{ + "shape":"LoggingConfigurations", + "documentation":"

    " + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + } + } + }, + "ListMaxItems":{ + "type":"long", + "max":500, + "min":1 + }, + "ListRegexPatternSetsRequest":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

    The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListRegexPatternSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "RegexPatternSets":{ + "shape":"RegexPatternSetSummaries", + "documentation":"

    " + } + } + }, + "ListResourcesForWebACLRequest":{ + "type":"structure", + "required":["WebACLArn"], + "members":{ + "WebACLArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Web ACL.

    " + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    Used for web ACLs that are scoped for regional applications. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    " + } + } + }, + "ListResourcesForWebACLResponse":{ + "type":"structure", + "members":{ + "ResourceArns":{ + "shape":"ResourceArns", + "documentation":"

    The array of Amazon Resource Names (ARNs) of the associated resources.

    " + } + } + }, + "ListRuleGroupsRequest":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

    The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListRuleGroupsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "RuleGroups":{ + "shape":"RuleGroupSummaries", + "documentation":"

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

    The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

    " + }, + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "TagInfoForResource":{ + "shape":"TagInfoForResource", + "documentation":"

    The collection of tagging definitions for the resource.

    " + } + } + }, + "ListWebACLsRequest":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

    The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.

    " + } + } + }, + "ListWebACLsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

    When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.

    " + }, + "WebACLs":{ + "shape":"WebACLSummaries", + "documentation":"

    " + } + } + }, + "LockToken":{ + "type":"string", + "max":36, + "min":1, + "pattern":"^[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}$" + }, + "LogDestinationConfigs":{ + "type":"list", + "member":{"shape":"ResourceArn"}, + "max":100, + "min":1 + }, + "LoggingConfiguration":{ + "type":"structure", + "required":[ + "ResourceArn", + "LogDestinationConfigs" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs.

    " + }, + "LogDestinationConfigs":{ + "shape":"LogDestinationConfigs", + "documentation":"

    The Amazon Kinesis Data Firehose Amazon Resource Name (ARNs) that you want to associate with the web ACL.

    " + }, + "RedactedFields":{ + "shape":"RedactedFields", + "documentation":"

    The parts of the request that you want to keep out of the logs. For example, if you redact the HEADER field, the HEADER field in the firehose will be xxx.

    You must use one of the following values: URI, QUERY_STRING, HEADER, or METHOD.

    " + }, + "ManagedByFirewallManager":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the logging configuration was created by AWS Firewall Manager, as part of an AWS WAF policy configuration. If true, only Firewall Manager can modify or delete the configuration.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Defines an association between Amazon Kinesis Data Firehose destinations and a web ACL resource, for logging from AWS WAF. As part of the association, you can specify parts of the standard logging fields to keep out of the logs.

    " + }, + "LoggingConfigurations":{ + "type":"list", + "member":{"shape":"LoggingConfiguration"} + }, + "ManagedRuleGroupStatement":{ + "type":"structure", + "required":[ + "VendorName", + "Name" + ], + "members":{ + "VendorName":{ + "shape":"VendorName", + "documentation":"

    The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.

    " + }, + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.

    " + }, + "ExcludedRules":{ + "shape":"ExcludedRules", + "documentation":"

    The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

    You can't nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    " + }, + "ManagedRuleGroupSummaries":{ + "type":"list", + "member":{"shape":"ManagedRuleGroupSummary"} + }, + "ManagedRuleGroupSummary":{ + "type":"structure", + "members":{ + "VendorName":{ + "shape":"VendorName", + "documentation":"

    The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.

    " + }, + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    The description of the managed rule group, provided by AWS Managed Rules or the AWS Marketplace seller who manages it.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include AWS Managed Rules rule groups, which are free of charge to AWS WAF customers, and AWS Marketplace managed rule groups, which you can subscribe to through AWS Marketplace.

    " + }, + "Method":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The HTTP method of a web request. The method indicates the type of operation that the request is asking the origin to perform.

    This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

    " + }, + "MetricName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[\\w#:\\.\\-/]+$" + }, + "NextMarker":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "NoneAction":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Specifies that AWS WAF should do nothing. This is generally used to try out a rule without performing any actions. You set the OverrideAction on the Rule.

    This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

    " + }, + "NotStatement":{ + "type":"structure", + "required":["Statement"], + "members":{ + "Statement":{ + "shape":"Statement", + "documentation":"

    The statement to negate. You can use any statement that can be nested.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement.

    " + }, + "OrStatement":{ + "type":"structure", + "required":["Statements"], + "members":{ + "Statements":{ + "shape":"Statements", + "documentation":"

    The statements to combine with OR logic. You can use any statements that can be nested.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement.

    " + }, + "OverrideAction":{ + "type":"structure", + "members":{ + "Count":{ + "shape":"CountAction", + "documentation":"

    Override the rule action setting to count.

    " + }, + "None":{ + "shape":"NoneAction", + "documentation":"

    Don't override the rule action setting.

    " + } + }, + "documentation":"

    The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement.

    Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.

    In a Rule, you must specify either this OverrideAction setting or the rule Action setting, but not both:

    • If the rule statement references a rule group, use this override action setting and not the action setting.

    • If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.

    " + }, + "PaginationLimit":{ + "type":"integer", + "max":100, + "min":1 + }, + "ParameterExceptionField":{ + "type":"string", + "enum":[ + "WEB_ACL", + "RULE_GROUP", + "REGEX_PATTERN_SET", + "IP_SET", + "MANAGED_RULE_SET", + "RULE", + "EXCLUDED_RULE", + "STATEMENT", + "BYTE_MATCH_STATEMENT", + "SQLI_MATCH_STATEMENT", + "XSS_MATCH_STATEMENT", + "SIZE_CONSTRAINT_STATEMENT", + "GEO_MATCH_STATEMENT", + "RATE_BASED_STATEMENT", + "RULE_GROUP_REFERENCE_STATEMENT", + "REGEX_PATTERN_REFERENCE_STATEMENT", + "IP_SET_REFERENCE_STATEMENT", + "MANAGED_RULE_SET_STATEMENT", + "AND_STATEMENT", + "OR_STATEMENT", + "NOT_STATEMENT", + "IP_ADDRESS", + "IP_ADDRESS_VERSION", + "FIELD_TO_MATCH", + "TEXT_TRANSFORMATION", + "SINGLE_QUERY_ARGUMENT", + "SINGLE_HEADER", + "DEFAULT_ACTION", + "RULE_ACTION", + "ENTITY_LIMIT", + "OVERRIDE_ACTION", + "SCOPE_VALUE", + "RESOURCE_ARN", + "RESOURCE_TYPE", + "TAGS", + "TAG_KEYS", + "METRIC_NAME", + "FIREWALL_MANAGER_STATEMENT", + "FALLBACK_BEHAVIOR", + "POSITION", + "FORWARDED_IP_CONFIG", + "IP_SET_FORWARDED_IP_CONFIG", + "HEADER_NAME" + ] + }, + "ParameterExceptionParameter":{ + "type":"string", + "min":1 + }, + "PolicyString":{ + "type":"string", + "max":395000, + "min":1, + "pattern":".*\\S.*" + }, + "PopulationSize":{"type":"long"}, + "PositionalConstraint":{ + "type":"string", + "enum":[ + "EXACTLY", + "STARTS_WITH", + "ENDS_WITH", + "CONTAINS", + "CONTAINS_WORD" + ] + }, + "PutLoggingConfigurationRequest":{ + "type":"structure", + "required":["LoggingConfiguration"], + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

    " + } + } + }, + "PutLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

    " + } + } + }, + "PutPermissionPolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Policy" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.

    " + }, + "Policy":{ + "shape":"PolicyString", + "documentation":"

    The policy to attach to the specified rule group.

    The policy specifications must conform to the following:

    • The policy must be composed using IAM Policy version 2012-10-17 or version 2015-01-01.

    • The policy must include specifications for Effect, Action, and Principal.

    • Effect must specify Allow.

    • Action must specify wafv2:CreateWebACL, wafv2:UpdateWebACL, and wafv2:PutFirewallManagerRuleGroups. AWS WAF rejects any extra actions or wildcard actions in the policy.

    • The policy must not include a Resource parameter.

    For more information, see IAM Policies.

    " + } + } + }, + "PutPermissionPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "QueryString":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The query string of a web request. This is the part of a URL that appears after a ? character, if any.

    This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

    " + }, + "RateBasedStatement":{ + "type":"structure", + "required":[ + "Limit", + "AggregateKeyType" + ], + "members":{ + "Limit":{ + "shape":"RateLimit", + "documentation":"

    The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopeDownStatement, this limit is applied only to the requests that match the statement.

    " + }, + "AggregateKeyType":{ + "shape":"RateBasedStatementAggregateKeyType", + "documentation":"

    Setting that indicates how to aggregate the request counts. The options are the following:

    • IP - Aggregate the request counts on the IP address from the web request origin.

    • FORWARDED_IP - Aggregate the request counts on the first IP address in an HTTP header. If you use this, configure the ForwardedIPConfig, to specify the header to use.

    " + }, + "ScopeDownStatement":{ + "shape":"Statement", + "documentation":"

    An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.

    " + }, + "ForwardedIPConfig":{ + "shape":"ForwardedIPConfig", + "documentation":"

    The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

    If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

    This is required if AggregateKeyType is set to FORWARDED_IP.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

    When the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.

    You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

    • An IP match statement with an IP set that specified the address 192.0.2.44.

    • A string match statement that searches in the User-Agent header for the string BadBot.

    In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

    You cannot nest a RateBasedStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    " + }, + "RateBasedStatementAggregateKeyType":{ + "type":"string", + "enum":[ + "IP", + "FORWARDED_IP" + ] + }, + "RateBasedStatementManagedKeysIPSet":{ + "type":"structure", + "members":{ + "IPAddressVersion":{"shape":"IPAddressVersion"}, + "Addresses":{ + "shape":"IPAddresses", + "documentation":"

    The IP addresses that are currently blocked.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The set of IP addresses that are currently blocked for a rate-based statement.

    " + }, + "RateLimit":{ + "type":"long", + "max":2000000000, + "min":100 + }, + "RedactedFields":{ + "type":"list", + "member":{"shape":"FieldToMatch"}, + "max":100 + }, + "Regex":{ + "type":"structure", + "members":{ + "RegexString":{ + "shape":"RegexPatternString", + "documentation":"

    The string representing the regular expression.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A single regular expression. This is used in a RegexPatternSet.

    " + }, + "RegexPatternSet":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the set. You cannot change the name after you create the set.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the entity.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the set that helps with identification. You cannot change the description of a set after you create it.

    " + }, + "RegularExpressionList":{ + "shape":"RegularExpressionList", + "documentation":"

    The regular expression patterns in the set.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Contains one or more regular expressions.

    AWS WAF assigns an ARN to each RegexPatternSet that you create. To use a set in a rule, you provide the ARN to the Rule statement RegexPatternSetReferenceStatement.

    " + }, + "RegexPatternSetReferenceStatement":{ + "type":"structure", + "required":[ + "ARN", + "FieldToMatch", + "TextTransformations" + ], + "members":{ + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.

    " + }, + "FieldToMatch":{ + "shape":"FieldToMatch", + "documentation":"

    The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch.

    " + }, + "TextTransformations":{ + "shape":"TextTransformations", + "documentation":"

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet.

    Each regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

    " + }, + "RegexPatternSetSummaries":{ + "type":"list", + "member":{"shape":"RegexPatternSetSummary"} + }, + "RegexPatternSetSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the data type instance. You cannot change the name after you create the instance.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the set that helps with identification. You cannot change the description of a set after you create it.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the entity.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    High-level information about a RegexPatternSet, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RegexPatternSet, and the ARN, that you provide to the RegexPatternSetReferenceStatement to use the pattern set in a Rule.

    " + }, + "RegexPatternString":{ + "type":"string", + "max":512, + "min":1, + "pattern":".*" + }, + "RegularExpressionList":{ + "type":"list", + "member":{"shape":"Regex"} + }, + "ResourceArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":".*\\S.*" + }, + "ResourceArns":{ + "type":"list", + "member":{"shape":"ResourceArn"} + }, + "ResourceType":{ + "type":"string", + "enum":[ + "APPLICATION_LOAD_BALANCER", + "API_GATEWAY", + "APPSYNC" + ] + }, + "Rule":{ + "type":"structure", + "required":[ + "Name", + "Priority", + "Statement", + "VisibilityConfig" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule. You can't change the name of a Rule after you create it.

    " + }, + "Priority":{ + "shape":"RulePriority", + "documentation":"

    If you define more than one Rule in a WebACL, AWS WAF evaluates each request against the Rules in order based on the value of Priority. AWS WAF processes rules with lower priority first. The priorities don't need to be consecutive, but they must all be different.

    " + }, + "Statement":{ + "shape":"Statement", + "documentation":"

    The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement.

    " + }, + "Action":{ + "shape":"RuleAction", + "documentation":"

    The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.

    This is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement.

    You must specify either this Action setting or the rule OverrideAction setting, but not both:

    • If the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.

    • If the rule statement references a rule group, use the override action setting and not this action setting.

    " + }, + "OverrideAction":{ + "shape":"OverrideAction", + "documentation":"

    The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement.

    Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.

    In a Rule, you must specify either this OverrideAction setting or the rule Action setting, but not both:

    • If the rule statement references a rule group, use this override action setting and not the action setting.

    • If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.

    " + }, + "VisibilityConfig":{ + "shape":"VisibilityConfig", + "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

    " + }, + "RuleAction":{ + "type":"structure", + "members":{ + "Block":{ + "shape":"BlockAction", + "documentation":"

    Instructs AWS WAF to block the web request.

    " + }, + "Allow":{ + "shape":"AllowAction", + "documentation":"

    Instructs AWS WAF to allow the web request.

    " + }, + "Count":{ + "shape":"CountAction", + "documentation":"

    Instructs AWS WAF to count the web request and allow it.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The action that AWS WAF should take on a web request when it matches a rule's statement. Settings at the web ACL level can override the rule action setting.

    " + }, + "RuleGroup":{ + "type":"structure", + "required":[ + "Name", + "Id", + "Capacity", + "ARN", + "VisibilityConfig" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule group. You cannot change the name of a rule group after you create it.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "Capacity":{ + "shape":"CapacityUnit", + "documentation":"

    The web ACL capacity units (WCUs) required for this rule group.

    When you create your own rule group, you define this, and you cannot change it after creation. When you add or modify the rules in a rule group, AWS WAF enforces this limit. You can check the capacity for a set of rules using CheckCapacity.

    AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

    " + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the entity.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.

    " + }, + "Rules":{ + "shape":"Rules", + "documentation":"

    The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

    " + }, + "VisibilityConfig":{ + "shape":"VisibilityConfig", + "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

    " + }, + "RuleGroupReferenceStatement":{ + "type":"structure", + "required":["ARN"], + "members":{ + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the entity.

    " + }, + "ExcludedRules":{ + "shape":"ExcludedRules", + "documentation":"

    The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

    You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    " + }, + "RuleGroupSummaries":{ + "type":"list", + "member":{"shape":"RuleGroupSummary"} + }, + "RuleGroupSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the data type instance. You cannot change the name after you create the instance.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the entity.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    High-level information about a RuleGroup, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RuleGroup, and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule.

    " + }, + "RulePriority":{ + "type":"integer", + "min":0 + }, + "RuleSummaries":{ + "type":"list", + "member":{"shape":"RuleSummary"} + }, + "RuleSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule.

    " + }, + "Action":{"shape":"RuleAction"} + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    High-level information about a Rule, returned by operations like DescribeManagedRuleGroup. This provides information like the ID, that you can use to retrieve and manage a RuleGroup, and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule.

    " + }, + "Rules":{ + "type":"list", + "member":{"shape":"Rule"} + }, + "SampleWeight":{ + "type":"long", + "min":0 + }, + "SampledHTTPRequest":{ + "type":"structure", + "required":[ + "Request", + "Weight" + ], + "members":{ + "Request":{ + "shape":"HTTPRequest", + "documentation":"

    A complex type that contains detailed information about the request.

    " + }, + "Weight":{ + "shape":"SampleWeight", + "documentation":"

    A value that indicates how one result in the response relates proportionally to other results in the response. For example, a result that has a weight of 2 represents roughly twice as many web requests as a result that has a weight of 1.

    " + }, + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

    The time at which AWS WAF received the request from your AWS resource, in Unix time format (in seconds).

    " + }, + "Action":{ + "shape":"Action", + "documentation":"

    The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT.

    " + }, + "RuleNameWithinRuleGroup":{ + "shape":"EntityName", + "documentation":"

    The name of the Rule that the request matched. For managed rule groups, the format for this name is <vendor name>#<managed rule group name>#<rule name>. For your own rule groups, the format for this name is <rule group name>#<rule name>. If the rule is not in a rule group, this field is absent.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Represents a single sampled web request. The response from GetSampledRequests includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains an array of SampledHTTPRequest objects.

    " + }, + "SampledHTTPRequests":{ + "type":"list", + "member":{"shape":"SampledHTTPRequest"} + }, + "Scope":{ + "type":"string", + "enum":[ + "CLOUDFRONT", + "REGIONAL" + ] + }, + "SearchString":{"type":"blob"}, + "SingleHeader":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"FieldToMatchData", + "documentation":"

    The name of the query header to inspect.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    One of the headers in a web request, identified by name, for example, User-Agent or Referer. This setting isn't case sensitive.

    This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

    " + }, + "SingleQueryArgument":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"FieldToMatchData", + "documentation":"

    The name of the query argument to inspect.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    One query argument in a web request, identified by name, for example UserName or SalesRegion. The name can be up to 30 characters long and isn't case sensitive.

    " + }, + "Size":{ + "type":"long", + "max":21474836480, + "min":0 + }, + "SizeConstraintStatement":{ + "type":"structure", + "required":[ + "FieldToMatch", + "ComparisonOperator", + "Size", + "TextTransformations" + ], + "members":{ + "FieldToMatch":{ + "shape":"FieldToMatch", + "documentation":"

    The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch.

    " + }, + "ComparisonOperator":{ + "shape":"ComparisonOperator", + "documentation":"

    The operator to use to compare the request part to the size setting.

    " + }, + "Size":{ + "shape":"Size", + "documentation":"

    The size, in byte, to compare to the request part, after any transformations.

    " + }, + "TextTransformations":{ + "shape":"TextTransformations", + "documentation":"

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.

    If you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.

    If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

    " + }, + "SqliMatchStatement":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TextTransformations" + ], + "members":{ + "FieldToMatch":{ + "shape":"FieldToMatch", + "documentation":"

    The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch.

    " + }, + "TextTransformations":{ + "shape":"TextTransformations", + "documentation":"

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.

    " + }, + "Statement":{ + "type":"structure", + "members":{ + "ByteMatchStatement":{ + "shape":"ByteMatchStatement", + "documentation":"

    A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.

    " + }, + "SqliMatchStatement":{ + "shape":"SqliMatchStatement", + "documentation":"

    Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.

    " + }, + "XssMatchStatement":{ + "shape":"XssMatchStatement", + "documentation":"

    A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.

    " + }, + "SizeConstraintStatement":{ + "shape":"SizeConstraintStatement", + "documentation":"

    A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.

    If you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.

    If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

    " + }, + "GeoMatchStatement":{ + "shape":"GeoMatchStatement", + "documentation":"

    A rule statement used to identify web requests based on country of origin.

    " + }, + "RuleGroupReferenceStatement":{ + "shape":"RuleGroupReferenceStatement", + "documentation":"

    A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

    You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    " + }, + "IPSetReferenceStatement":{ + "shape":"IPSetReferenceStatement", + "documentation":"

    A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet.

    Each IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

    " + }, + "RegexPatternSetReferenceStatement":{ + "shape":"RegexPatternSetReferenceStatement", + "documentation":"

    A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet.

    Each regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

    " + }, + "RateBasedStatement":{ + "shape":"RateBasedStatement", + "documentation":"

    A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

    When the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.

    You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

    • An IP match statement with an IP set that specified the address 192.0.2.44.

    • A string match statement that searches in the User-Agent header for the string BadBot.

    In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

    You cannot nest a RateBasedStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    " + }, + "AndStatement":{ + "shape":"AndStatement", + "documentation":"

    A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement.

    " + }, + "OrStatement":{ + "shape":"OrStatement", + "documentation":"

    A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement.

    " + }, + "NotStatement":{ + "shape":"NotStatement", + "documentation":"

    A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement.

    " + }, + "ManagedRuleGroupStatement":{ + "shape":"ManagedRuleGroupStatement", + "documentation":"

    A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

    You can't nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The processing guidance for a Rule, used by AWS WAF to determine whether a web request matches the rule.

    " + }, + "Statements":{ + "type":"list", + "member":{"shape":"Statement"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as \"customer.\" Tag keys are case-sensitive.

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

    You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

    " + }, + "TagInfoForResource":{ + "type":"structure", + "members":{ + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + }, + "TagList":{ + "shape":"TagList", + "documentation":"

    The array of Tag objects defined for the resource.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The collection of tagging definitions for an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

    You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    An array of key:value pairs to associate with the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TextTransformation":{ + "type":"structure", + "required":[ + "Priority", + "Type" + ], + "members":{ + "Priority":{ + "shape":"TextTransformationPriority", + "documentation":"

    Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don't need to be consecutive, but they must all be different.

    " + }, + "Type":{ + "shape":"TextTransformationType", + "documentation":"

    You can specify the following transformation types:

    CMD_LINE

    When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

    • Delete the following characters: \\ \" ' ^

    • Delete spaces before the following characters: / (

    • Replace the following characters with a space: , ;

    • Replace multiple spaces with one space

    • Convert uppercase letters (A-Z) to lowercase (a-z)

    COMPRESS_WHITE_SPACE

    Use this option to replace the following characters with a space character (decimal 32):

    • \\f, formfeed, decimal 12

    • \\t, tab, decimal 9

    • \\n, newline, decimal 10

    • \\r, carriage return, decimal 13

    • \\v, vertical tab, decimal 11

    • non-breaking space, decimal 160

    COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

    HTML_ENTITY_DECODE

    Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

    • Replaces (ampersand)quot; with \"

    • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

    • Replaces (ampersand)lt; with a \"less than\" symbol

    • Replaces (ampersand)gt; with >

    • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

    • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

    LOWERCASE

    Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

    URL_DECODE

    Use this option to decode a URL-encoded value.

    NONE

    Specify NONE if you don't want any text transformations.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.

    " + }, + "TextTransformationPriority":{ + "type":"integer", + "min":0 + }, + "TextTransformationType":{ + "type":"string", + "enum":[ + "NONE", + "COMPRESS_WHITE_SPACE", + "HTML_ENTITY_DECODE", + "LOWERCASE", + "CMD_LINE", + "URL_DECODE" + ] + }, + "TextTransformations":{ + "type":"list", + "member":{"shape":"TextTransformation"}, + "min":1 + }, + "TimeWindow":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime" + ], + "members":{ + "StartTime":{ + "shape":"Timestamp", + "documentation":"

    The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

    " + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

    The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

    You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

    In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

    " + }, + "Timestamp":{"type":"timestamp"}, + "URIString":{"type":"string"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    An array of keys identifying the tags to disassociate from the resource.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateIPSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "Addresses", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the IP set. You cannot change the name of an IPSet after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.

    " + }, + "Addresses":{ + "shape":"IPAddresses", + "documentation":"

    Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.

    Examples:

    • To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32.

    • To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

    • To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128.

    • To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64.

    For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "UpdateIPSetResponse":{ + "type":"structure", + "members":{ + "NextLockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

    " + } + } + }, + "UpdateRegexPatternSetRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "RegularExpressionList", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the set. You cannot change the name after you create the set.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the set that helps with identification. You cannot change the description of a set after you create it.

    " + }, + "RegularExpressionList":{ + "shape":"RegularExpressionList", + "documentation":"

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "UpdateRegexPatternSetResponse":{ + "type":"structure", + "members":{ + "NextLockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

    " + } + } + }, + "UpdateRuleGroupRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "VisibilityConfig", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the rule group. You cannot change the name of a rule group after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.

    " + }, + "Rules":{ + "shape":"Rules", + "documentation":"

    The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

    " + }, + "VisibilityConfig":{ + "shape":"VisibilityConfig", + "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "UpdateRuleGroupResponse":{ + "type":"structure", + "members":{ + "NextLockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

    " + } + } + }, + "UpdateWebACLRequest":{ + "type":"structure", + "required":[ + "Name", + "Scope", + "Id", + "DefaultAction", + "VisibilityConfig", + "LockToken" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the Web ACL. You cannot change the name of a Web ACL after you create it.

    " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

    Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

    • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

    • API and SDKs - For all calls, use the Region endpoint us-east-1.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    The unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "DefaultAction":{ + "shape":"DefaultAction", + "documentation":"

    The action to perform if none of the Rules contained in the WebACL match.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.

    " + }, + "Rules":{ + "shape":"Rules", + "documentation":"

    The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

    " + }, + "VisibilityConfig":{ + "shape":"VisibilityConfig", + "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + } + } + }, + "UpdateWebACLResponse":{ + "type":"structure", + "members":{ + "NextLockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken.

    " + } + } + }, + "UriPath":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    The path component of the URI of a web request. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg.

    This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

    " + }, + "VendorName":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, + "VisibilityConfig":{ + "type":"structure", + "required":[ + "SampledRequestsEnabled", + "CloudWatchMetricsEnabled", + "MetricName" + ], + "members":{ + "SampledRequestsEnabled":{ + "shape":"Boolean", + "documentation":"

    A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.

    " + }, + "CloudWatchMetricsEnabled":{ + "shape":"Boolean", + "documentation":"

    A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics.

    " + }, + "MetricName":{ + "shape":"MetricName", + "documentation":"

    A name of the CloudWatch metric. The name can contain only the characters: A-Z, a-z, 0-9, - (hyphen), and _ (underscore). The name can be from one to 128 characters long. It can't contain whitespace or metric names reserved for AWS WAF, for example \"All\" and \"Default_Action.\"

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + }, + "WAFAssociatedItemException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF couldn’t perform the operation because your resource is being used by another resource or it’s associated with another resource.

    ", + "exception":true + }, + "WAFDuplicateItemException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF couldn’t perform the operation because the resource that you tried to save is a duplicate of an existing one.

    ", + "exception":true + }, + "WAFInternalErrorException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    Your request is valid, but AWS WAF couldn’t perform the operation because of a system problem. Retry your request.

    ", + "exception":true, + "fault":true + }, + "WAFInvalidOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The operation isn't valid.

    ", + "exception":true + }, + "WAFInvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "Field":{"shape":"ParameterExceptionField"}, + "Parameter":{"shape":"ParameterExceptionParameter"}, + "Reason":{"shape":"ErrorReason"} + }, + "documentation":"

    The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

    • You specified an invalid parameter name or value.

    • Your nested statement isn't valid. You might have tried to nest a statement that can’t be nested.

    • You tried to update a WebACL with a DefaultAction that isn't among the types available at DefaultAction.

    • Your request references an ARN that is malformed, or corresponds to a resource with which a Web ACL cannot be associated.

    ", + "exception":true + }, + "WAFInvalidPermissionPolicyException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The operation failed because the specified policy isn't in the proper format.

    The policy specifications must conform to the following:

    • The policy must be composed using IAM Policy version 2012-10-17 or version 2015-01-01.

    • The policy must include specifications for Effect, Action, and Principal.

    • Effect must specify Allow.

    • Action must specify wafv2:CreateWebACL, wafv2:UpdateWebACL, and wafv2:PutFirewallManagerRuleGroups. AWS WAF rejects any extra actions or wildcard actions in the policy.

    • The policy must not include a Resource parameter.

    For more information, see IAM Policies.

    ", + "exception":true + }, + "WAFInvalidResourceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF couldn’t perform the operation because the resource that you requested isn’t valid. Check the resource, and try again.

    ", + "exception":true + }, + "WAFLimitsExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF couldn’t perform the operation because you exceeded your resource limit. For example, the maximum number of WebACL objects that you can create for an AWS account. For more information, see Limits in the AWS WAF Developer Guide.

    ", + "exception":true + }, + "WAFNonexistentItemException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF couldn’t perform the operation because your resource doesn’t exist.

    ", + "exception":true + }, + "WAFOptimisticLockException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF couldn’t save your changes because you tried to update or delete a resource that has changed since you last retrieved it. Get the resource again, make any changes you need to make to the new copy, and retry your operation.

    ", + "exception":true + }, + "WAFServiceLinkedRoleErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF is not able to access the service linked role. This can be caused by a previous PutLoggingConfiguration request, which can lock the service linked role for about 20 seconds. Please try your request again. The service linked role can also be locked by a previous DeleteServiceLinkedRole request, which can lock the role for 15 minutes or more. If you recently made a call to DeleteServiceLinkedRole, wait at least 15 minutes and try the request again. If you receive this same exception again, you will have to wait additional time until the role is unlocked.

    ", + "exception":true + }, + "WAFSubscriptionNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    ", + "exception":true + }, + "WAFTagOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An error occurred during the tagging operation. Retry your request.

    ", + "exception":true + }, + "WAFTagOperationInternalErrorException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF couldn’t perform your tagging operation because of an internal error. Retry your request.

    ", + "exception":true, + "fault":true + }, + "WAFUnavailableEntityException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    AWS WAF couldn’t retrieve the resource that you requested. Retry your request.

    ", + "exception":true + }, + "WebACL":{ + "type":"structure", + "required":[ + "Name", + "Id", + "ARN", + "DefaultAction", + "VisibilityConfig" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the Web ACL. You cannot change the name of a Web ACL after you create it.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    A unique identifier for the WebACL. This ID is returned in the responses to create and list commands. You use this ID to do things like get, update, and delete a WebACL.

    " + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Web ACL that you want to associate with the resource.

    " + }, + "DefaultAction":{ + "shape":"DefaultAction", + "documentation":"

    The action to perform if none of the Rules contained in the WebACL match.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.

    " + }, + "Rules":{ + "shape":"Rules", + "documentation":"

    The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

    " + }, + "VisibilityConfig":{ + "shape":"VisibilityConfig", + "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + }, + "Capacity":{ + "shape":"ConsumedCapacity", + "documentation":"

    The web ACL capacity units (WCUs) currently being used by this web ACL.

    AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

    " + }, + "PreProcessFirewallManagerRuleGroups":{ + "shape":"FirewallManagerRuleGroups", + "documentation":"

    The first set of rules for AWS WAF to process in the web ACL. This is defined in an AWS Firewall Manager WAF policy and contains only rule group references. You can't alter these. Any rules and rule groups that you define for the web ACL are prioritized after these.

    In the Firewall Manager WAF policy, the Firewall Manager administrator can define a set of rule groups to run first in the web ACL and a set of rule groups to run last. Within each set, the administrator prioritizes the rule groups, to determine their relative processing order.

    " + }, + "PostProcessFirewallManagerRuleGroups":{ + "shape":"FirewallManagerRuleGroups", + "documentation":"

    The last set of rules for AWS WAF to process in the web ACL. This is defined in an AWS Firewall Manager WAF policy and contains only rule group references. You can't alter these. Any rules and rule groups that you define for the web ACL are prioritized before these.

    In the Firewall Manager WAF policy, the Firewall Manager administrator can define a set of rule groups to run first in the web ACL and a set of rule groups to run last. Within each set, the administrator prioritizes the rule groups, to determine their relative processing order.

    " + }, + "ManagedByFirewallManager":{ + "shape":"Boolean", + "documentation":"

    Indicates whether this web ACL is managed by AWS Firewall Manager. If true, then only AWS Firewall Manager can delete the web ACL or any Firewall Manager rule groups in the web ACL.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

    " + }, + "WebACLSummaries":{ + "type":"list", + "member":{"shape":"WebACLSummary"} + }, + "WebACLSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

    The name of the Web ACL. You cannot change the name of a Web ACL after you create it.

    " + }, + "Id":{ + "shape":"EntityId", + "documentation":"

    The unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

    " + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

    A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.

    " + }, + "LockToken":{ + "shape":"LockToken", + "documentation":"

    A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException. If this happens, perform another get, and use the new token returned by that operation.

    " + }, + "ARN":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the entity.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    High-level information about a WebACL, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a WebACL, and the ARN, that you provide to operations like AssociateWebACL.

    " + }, + "XssMatchStatement":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TextTransformations" + ], + "members":{ + "FieldToMatch":{ + "shape":"FieldToMatch", + "documentation":"

    The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch.

    " + }, + "TextTransformations":{ + "shape":"TextTransformations", + "documentation":"

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

    " + } + }, + "documentation":"

    This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

    A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.

    " + } + }, + "documentation":"

    This is the latest version of the AWS WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like \"V2\" or \"v2\", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements.

    If you used AWS WAF prior to this release, you can't use this AWS WAFV2 API to access any AWS WAF resources that you created before. You can access your old rules, web ACLs, and other AWS WAF resources only through the AWS WAF Classic APIs. The AWS WAF Classic APIs have retained the prior names, endpoints, and namespaces.

    For information, including how to migrate your AWS WAF resources to this version, see the AWS WAF Developer Guide.

    AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API. AWS WAF also lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the API Gateway REST API, CloudFront distribution, the Application Load Balancer, or the AWS AppSync GraphQL API responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You also can configure CloudFront to return a custom error page when a request is blocked.

    This API guide is for developers who need detailed information about AWS WAF API actions, data types, and errors. For detailed information about AWS WAF features and an overview of how to use AWS WAF, see the AWS WAF Developer Guide.

    You can make calls using the endpoints listed in AWS Service Endpoints for AWS WAF.

    • For regional applications, you can use any of the endpoints in the list. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

    • For AWS CloudFront applications, you must use the API endpoint listed for US East (N. Virginia): us-east-1.

    Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

    We currently provide two versions of the AWS WAF API: this API and the prior versions, the classic AWS WAF APIs. This new API provides the same functionality as the older versions, with the following major improvements:

    • You use one API for both global and regional applications. Where you need to distinguish the scope, you specify a Scope parameter and set it to CLOUDFRONT or REGIONAL.

    • You can define a Web ACL or rule group with a single call, and update it with a single call. You define all rule specifications in JSON format, and pass them to your rule group or Web ACL calls.

    • The limits AWS WAF places on the use of rules more closely reflects the cost of running each type of rule. Rule groups include capacity settings, so you know the maximum cost of a rule group when you use it.

    " +} diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml new file mode 100644 index 000000000000..c204199197b1 --- /dev/null +++ b/services/wellarchitected/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + wellarchitected + AWS Java SDK :: Services :: Well Architected + The AWS Java SDK for Well Architected module holds the client classes that are used for + communicating with Well Architected. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.wellarchitected + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json b/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..279d4ee705a1 --- /dev/null +++ b/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,49 @@ +{ + "pagination": { + "ListAnswers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListLensReviewImprovements": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListLensReviews": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListLenses": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListMilestones": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListNotifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListShareInvitations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListWorkloadShares": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListWorkloads": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/wellarchitected/src/main/resources/codegen-resources/service-2.json b/services/wellarchitected/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..124f80ce87e3 --- /dev/null +++ b/services/wellarchitected/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2240 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-03-31", + "endpointPrefix":"wellarchitected", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Well-Architected", + "serviceFullName":"AWS Well-Architected Tool", + "serviceId":"WellArchitected", + "signatureVersion":"v4", + "signingName":"wellarchitected", + "uid":"wellarchitected-2020-03-31" + }, + "operations":{ + "AssociateLenses":{ + "name":"AssociateLenses", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/associateLenses" + }, + "input":{"shape":"AssociateLensesInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Associate a lens to a workload.

    " + }, + "CreateMilestone":{ + "name":"CreateMilestone", + "http":{ + "method":"POST", + "requestUri":"/workloads/{WorkloadId}/milestones" + }, + "input":{"shape":"CreateMilestoneInput"}, + "output":{"shape":"CreateMilestoneOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Create a milestone for an existing workload.

    " + }, + "CreateWorkload":{ + "name":"CreateWorkload", + "http":{ + "method":"POST", + "requestUri":"/workloads" + }, + "input":{"shape":"CreateWorkloadInput"}, + "output":{"shape":"CreateWorkloadOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Create a new workload.

    The owner of a workload can share the workload with other AWS accounts and IAM users in the same AWS Region. Only the owner of a workload can delete it.

    For more information, see Defining a Workload in the AWS Well-Architected Tool User Guide.

    " + }, + "CreateWorkloadShare":{ + "name":"CreateWorkloadShare", + "http":{ + "method":"POST", + "requestUri":"/workloads/{WorkloadId}/shares" + }, + "input":{"shape":"CreateWorkloadShareInput"}, + "output":{"shape":"CreateWorkloadShareOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Create a workload share.

    The owner of a workload can share it with other AWS accounts and IAM users in the same AWS Region. Shared access to a workload is not removed until the workload invitation is deleted.

    For more information, see Sharing a Workload in the AWS Well-Architected Tool User Guide.

    " + }, + "DeleteWorkload":{ + "name":"DeleteWorkload", + "http":{ + "method":"DELETE", + "requestUri":"/workloads/{WorkloadId}" + }, + "input":{"shape":"DeleteWorkloadInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Delete an existing workload.

    " + }, + "DeleteWorkloadShare":{ + "name":"DeleteWorkloadShare", + "http":{ + "method":"DELETE", + "requestUri":"/workloads/{WorkloadId}/shares/{ShareId}" + }, + "input":{"shape":"DeleteWorkloadShareInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Delete a workload share.

    " + }, + "DisassociateLenses":{ + "name":"DisassociateLenses", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/disassociateLenses" + }, + "input":{"shape":"DisassociateLensesInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Disassociate a lens from a workload.

    The AWS Well-Architected Framework lens (wellarchitected) cannot be removed from a workload.

    " + }, + "GetAnswer":{ + "name":"GetAnswer", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}/lensReviews/{LensAlias}/answers/{QuestionId}" + }, + "input":{"shape":"GetAnswerInput"}, + "output":{"shape":"GetAnswerOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get lens review.

    " + }, + "GetLensReview":{ + "name":"GetLensReview", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}/lensReviews/{LensAlias}" + }, + "input":{"shape":"GetLensReviewInput"}, + "output":{"shape":"GetLensReviewOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get lens review.

    " + }, + "GetLensReviewReport":{ + "name":"GetLensReviewReport", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}/lensReviews/{LensAlias}/report" + }, + "input":{"shape":"GetLensReviewReportInput"}, + "output":{"shape":"GetLensReviewReportOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get lens review report.

    " + }, + "GetLensVersionDifference":{ + "name":"GetLensVersionDifference", + "http":{ + "method":"GET", + "requestUri":"/lenses/{LensAlias}/versionDifference" + }, + "input":{"shape":"GetLensVersionDifferenceInput"}, + "output":{"shape":"GetLensVersionDifferenceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get lens version differences.

    " + }, + "GetMilestone":{ + "name":"GetMilestone", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}/milestones/{MilestoneNumber}" + }, + "input":{"shape":"GetMilestoneInput"}, + "output":{"shape":"GetMilestoneOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get a milestone for an existing workload.

    " + }, + "GetWorkload":{ + "name":"GetWorkload", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}" + }, + "input":{"shape":"GetWorkloadInput"}, + "output":{"shape":"GetWorkloadOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get an existing workload.

    " + }, + "ListAnswers":{ + "name":"ListAnswers", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}/lensReviews/{LensAlias}/answers" + }, + "input":{"shape":"ListAnswersInput"}, + "output":{"shape":"ListAnswersOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List of answers.

    " + }, + "ListLensReviewImprovements":{ + "name":"ListLensReviewImprovements", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}/lensReviews/{LensAlias}/improvements" + }, + "input":{"shape":"ListLensReviewImprovementsInput"}, + "output":{"shape":"ListLensReviewImprovementsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List lens review improvements.

    " + }, + "ListLensReviews":{ + "name":"ListLensReviews", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}/lensReviews" + }, + "input":{"shape":"ListLensReviewsInput"}, + "output":{"shape":"ListLensReviewsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List lens reviews.

    " + }, + "ListLenses":{ + "name":"ListLenses", + "http":{ + "method":"GET", + "requestUri":"/lenses" + }, + "input":{"shape":"ListLensesInput"}, + "output":{"shape":"ListLensesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List the available lenses.

    " + }, + "ListMilestones":{ + "name":"ListMilestones", + "http":{ + "method":"POST", + "requestUri":"/workloads/{WorkloadId}/milestonesSummaries" + }, + "input":{"shape":"ListMilestonesInput"}, + "output":{"shape":"ListMilestonesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List all milestones for an existing workload.

    " + }, + "ListNotifications":{ + "name":"ListNotifications", + "http":{ + "method":"POST", + "requestUri":"/notifications" + }, + "input":{"shape":"ListNotificationsInput"}, + "output":{"shape":"ListNotificationsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List lens notifications.

    " + }, + "ListShareInvitations":{ + "name":"ListShareInvitations", + "http":{ + "method":"GET", + "requestUri":"/shareInvitations" + }, + "input":{"shape":"ListShareInvitationsInput"}, + "output":{"shape":"ListShareInvitationsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List the workload invitations.

    " + }, + "ListWorkloadShares":{ + "name":"ListWorkloadShares", + "http":{ + "method":"GET", + "requestUri":"/workloads/{WorkloadId}/shares" + }, + "input":{"shape":"ListWorkloadSharesInput"}, + "output":{"shape":"ListWorkloadSharesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List the workload shares associated with the workload.

    " + }, + "ListWorkloads":{ + "name":"ListWorkloads", + "http":{ + "method":"POST", + "requestUri":"/workloadsSummaries" + }, + "input":{"shape":"ListWorkloadsInput"}, + "output":{"shape":"ListWorkloadsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List workloads. Paginated.

    " + }, + "UpdateAnswer":{ + "name":"UpdateAnswer", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/lensReviews/{LensAlias}/answers/{QuestionId}" + }, + "input":{"shape":"UpdateAnswerInput"}, + "output":{"shape":"UpdateAnswerOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Update the answer.

    " + }, + "UpdateLensReview":{ + "name":"UpdateLensReview", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/lensReviews/{LensAlias}" + }, + "input":{"shape":"UpdateLensReviewInput"}, + "output":{"shape":"UpdateLensReviewOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Update lens review.

    " + }, + "UpdateShareInvitation":{ + "name":"UpdateShareInvitation", + "http":{ + "method":"PATCH", + "requestUri":"/shareInvitations/{ShareInvitationId}" + }, + "input":{"shape":"UpdateShareInvitationInput"}, + "output":{"shape":"UpdateShareInvitationOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Update a workload invitation.

    " + }, + "UpdateWorkload":{ + "name":"UpdateWorkload", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}" + }, + "input":{"shape":"UpdateWorkloadInput"}, + "output":{"shape":"UpdateWorkloadOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Update an existing workload.

    " + }, + "UpdateWorkloadShare":{ + "name":"UpdateWorkloadShare", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/shares/{ShareId}" + }, + "input":{"shape":"UpdateWorkloadShareInput"}, + "output":{"shape":"UpdateWorkloadShareOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Update a workload share.

    " + }, + "UpgradeLensReview":{ + "name":"UpgradeLensReview", + "http":{ + "method":"PUT", + "requestUri":"/workloads/{WorkloadId}/lensReviews/{LensAlias}/upgrade" + }, + "input":{"shape":"UpgradeLensReviewInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Upgrade lens review.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    User does not have sufficient access to perform this action.

    ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "Answer":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "PillarId":{"shape":"PillarId"}, + "QuestionTitle":{"shape":"QuestionTitle"}, + "QuestionDescription":{"shape":"QuestionDescription"}, + "ImprovementPlanUrl":{"shape":"ImprovementPlanUrl"}, + "HelpfulResourceUrl":{"shape":"HelpfulResourceUrl"}, + "Choices":{"shape":"Choices"}, + "SelectedChoices":{"shape":"SelectedChoices"}, + "IsApplicable":{"shape":"IsApplicable"}, + "Risk":{"shape":"Risk"}, + "Notes":{"shape":"Notes"} + }, + "documentation":"

    An answer of the question.

    " + }, + "AnswerSummaries":{ + "type":"list", + "member":{"shape":"AnswerSummary"}, + "documentation":"

    List of answer summaries of lens review in a workload.

    " + }, + "AnswerSummary":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "PillarId":{"shape":"PillarId"}, + "QuestionTitle":{"shape":"QuestionTitle"}, + "Choices":{"shape":"Choices"}, + "SelectedChoices":{"shape":"SelectedChoices"}, + "IsApplicable":{"shape":"IsApplicable"}, + "Risk":{"shape":"Risk"} + }, + "documentation":"

    An answer summary of a lens review in a workload.

    " + }, + "AssociateLensesInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAliases" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAliases":{"shape":"LensAliases"} + }, + "documentation":"

    Input to associate lens reviews.

    " + }, + "AwsAccountId":{ + "type":"string", + "documentation":"

    An AWS account ID.

    ", + "pattern":"[0-9]{12}" + }, + "AwsRegion":{ + "type":"string", + "documentation":"

    An AWS Region, for example, us-west-2 or ap-northeast-1.

    ", + "max":100 + }, + "Base64String":{ + "type":"string", + "documentation":"

    The Base64-encoded string representation of a lens review report.

    This data can be used to create a PDF file.

    " + }, + "Choice":{ + "type":"structure", + "members":{ + "ChoiceId":{"shape":"ChoiceId"}, + "Title":{"shape":"ChoiceTitle"}, + "Description":{"shape":"ChoiceDescription"} + }, + "documentation":"

    A choice available to answer question.

    " + }, + "ChoiceDescription":{ + "type":"string", + "documentation":"

    The description of a choice.

    ", + "max":1024, + "min":1 + }, + "ChoiceId":{ + "type":"string", + "documentation":"

    The ID of a choice.

    ", + "max":64, + "min":1 + }, + "ChoiceTitle":{ + "type":"string", + "documentation":"

    The title of a choice.

    ", + "max":512, + "min":1 + }, + "Choices":{ + "type":"list", + "member":{"shape":"Choice"}, + "documentation":"

    List of choices available for a question.

    " + }, + "ClientRequestToken":{ + "type":"string", + "documentation":"

    A unique case-sensitive string used to ensure that this request is idempotent (executes only once).

    You should not reuse the same token for other requests. If you retry a request with the same client request token and the same parameters after it has completed successfully, the result of the original request is returned.

    This token is listed as required, however, if you do not specify it, the AWS SDKs automatically generate one for you. If you are not using the AWS SDK or the AWS CLI, you must provide this token or the request will fail.

    " + }, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "ResourceId":{"shape":"ExceptionResourceId"}, + "ResourceType":{"shape":"ExceptionResourceType"} + }, + "documentation":"

    The resource already exists.

    ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "Count":{ + "type":"integer", + "documentation":"

    A non-negative integer that denotes how many.

    ", + "min":0 + }, + "CreateMilestoneInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "MilestoneName", + "ClientRequestToken" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "MilestoneName":{"shape":"MilestoneName"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + }, + "documentation":"

    Input for milestone creation.

    " + }, + "CreateMilestoneOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "MilestoneNumber":{"shape":"MilestoneNumber"} + }, + "documentation":"

    Output of a create milestone call.

    " + }, + "CreateWorkloadInput":{ + "type":"structure", + "required":[ + "WorkloadName", + "Description", + "Environment", + "ReviewOwner", + "Lenses", + "ClientRequestToken" + ], + "members":{ + "WorkloadName":{"shape":"WorkloadName"}, + "Description":{"shape":"WorkloadDescription"}, + "Environment":{"shape":"WorkloadEnvironment"}, + "AccountIds":{"shape":"WorkloadAccountIds"}, + "AwsRegions":{"shape":"WorkloadAwsRegions"}, + "NonAwsRegions":{"shape":"WorkloadNonAwsRegions"}, + "PillarPriorities":{"shape":"WorkloadPillarPriorities"}, + "ArchitecturalDesign":{"shape":"WorkloadArchitecturalDesign"}, + "ReviewOwner":{"shape":"WorkloadReviewOwner"}, + "IndustryType":{"shape":"WorkloadIndustryType"}, + "Industry":{"shape":"WorkloadIndustry"}, + "Lenses":{"shape":"WorkloadLenses"}, + "Notes":{"shape":"Notes"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + }, + "documentation":"

    Input for workload creation.

    " + }, + "CreateWorkloadOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "WorkloadArn":{"shape":"WorkloadArn"} + }, + "documentation":"

    Output of a create workload call.

    " + }, + "CreateWorkloadShareInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "SharedWith", + "PermissionType", + "ClientRequestToken" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "SharedWith":{"shape":"SharedWith"}, + "PermissionType":{"shape":"PermissionType"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + }, + "documentation":"

    Input for Create Workload Share

    " + }, + "CreateWorkloadShareOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "ShareId":{"shape":"ShareId"} + }, + "documentation":"

    Input for Create Workload Share

    " + }, + "DeleteWorkloadInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "ClientRequestToken" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true, + "location":"querystring", + "locationName":"ClientRequestToken" + } + }, + "documentation":"

    Input for workload deletion.

    " + }, + "DeleteWorkloadShareInput":{ + "type":"structure", + "required":[ + "ShareId", + "WorkloadId", + "ClientRequestToken" + ], + "members":{ + "ShareId":{ + "shape":"ShareId", + "location":"uri", + "locationName":"ShareId" + }, + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true, + "location":"querystring", + "locationName":"ClientRequestToken" + } + }, + "documentation":"

    Input for Delete Workload Share

    " + }, + "DifferenceStatus":{ + "type":"string", + "enum":[ + "UPDATED", + "NEW", + "DELETED" + ] + }, + "DisassociateLensesInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAliases" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAliases":{"shape":"LensAliases"} + }, + "documentation":"

    Input to disassociate lens reviews.

    " + }, + "ExceptionMessage":{ + "type":"string", + "documentation":"

    Description of the error.

    " + }, + "ExceptionResourceId":{ + "type":"string", + "documentation":"

    Identifier of the resource affected.

    " + }, + "ExceptionResourceType":{ + "type":"string", + "documentation":"

    Type of the resource affected.

    " + }, + "GetAnswerInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAlias", + "QuestionId" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "QuestionId":{ + "shape":"QuestionId", + "location":"uri", + "locationName":"QuestionId" + }, + "MilestoneNumber":{ + "shape":"MilestoneNumber", + "location":"querystring", + "locationName":"MilestoneNumber" + } + }, + "documentation":"

    Input to get answer.

    " + }, + "GetAnswerOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "MilestoneNumber":{"shape":"MilestoneNumber"}, + "LensAlias":{"shape":"LensAlias"}, + "Answer":{"shape":"Answer"} + }, + "documentation":"

    Output of a get answer call.

    " + }, + "GetLensReviewInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAlias" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "MilestoneNumber":{ + "shape":"MilestoneNumber", + "location":"querystring", + "locationName":"MilestoneNumber" + } + }, + "documentation":"

    Input to get lens review.

    " + }, + "GetLensReviewOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "MilestoneNumber":{"shape":"MilestoneNumber"}, + "LensReview":{"shape":"LensReview"} + }, + "documentation":"

    Output of a get lens review call.

    " + }, + "GetLensReviewReportInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAlias" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "MilestoneNumber":{ + "shape":"MilestoneNumber", + "location":"querystring", + "locationName":"MilestoneNumber" + } + }, + "documentation":"

    Input to get lens review report.

    " + }, + "GetLensReviewReportOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "MilestoneNumber":{"shape":"MilestoneNumber"}, + "LensReviewReport":{"shape":"LensReviewReport"} + }, + "documentation":"

    Output of a get lens review report call.

    " + }, + "GetLensVersionDifferenceInput":{ + "type":"structure", + "required":[ + "LensAlias", + "BaseLensVersion" + ], + "members":{ + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "BaseLensVersion":{ + "shape":"LensVersion", + "documentation":"

    The base version of the lens.

    ", + "location":"querystring", + "locationName":"BaseLensVersion" + } + } + }, + "GetLensVersionDifferenceOutput":{ + "type":"structure", + "members":{ + "LensAlias":{"shape":"LensAlias"}, + "BaseLensVersion":{ + "shape":"LensVersion", + "documentation":"

    The base version of the lens.

    " + }, + "LatestLensVersion":{ + "shape":"LensVersion", + "documentation":"

    The latest version of the lens.

    " + }, + "VersionDifferences":{"shape":"VersionDifferences"} + } + }, + "GetMilestoneInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "MilestoneNumber" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "MilestoneNumber":{ + "shape":"MilestoneNumber", + "location":"uri", + "locationName":"MilestoneNumber" + } + }, + "documentation":"

    Input to get a milestone.

    " + }, + "GetMilestoneOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "Milestone":{"shape":"Milestone"} + }, + "documentation":"

    Output of a get milestone call.

    " + }, + "GetWorkloadInput":{ + "type":"structure", + "required":["WorkloadId"], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + } + }, + "documentation":"

    Input to get a workload.

    " + }, + "GetWorkloadOutput":{ + "type":"structure", + "members":{ + "Workload":{"shape":"Workload"} + }, + "documentation":"

    Output of a get workload call.

    " + }, + "HelpfulResourceUrl":{ + "type":"string", + "documentation":"

    The helpful resource URL for a question.

    ", + "max":2048, + "min":1 + }, + "ImprovementPlanUrl":{ + "type":"string", + "documentation":"

    The improvement plan URL for a question.

    ", + "max":2048, + "min":1 + }, + "ImprovementSummaries":{ + "type":"list", + "member":{"shape":"ImprovementSummary"}, + "documentation":"

    List of improvement summaries of lens review in a workload.

    " + }, + "ImprovementSummary":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "PillarId":{"shape":"PillarId"}, + "QuestionTitle":{"shape":"QuestionTitle"}, + "Risk":{"shape":"Risk"}, + "ImprovementPlanUrl":{"shape":"ImprovementPlanUrl"} + }, + "documentation":"

    An improvement summary of a lens review in a workload.

    " + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    There is a problem with the AWS Well-Architected Tool API service.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "IsApplicable":{ + "type":"boolean", + "documentation":"

    Defines whether this question is applicable to a lens review.

    " + }, + "IsReviewOwnerUpdateAcknowledged":{"type":"boolean"}, + "LensAlias":{ + "type":"string", + "documentation":"

    The alias of the lens, for example, serverless.

    Each lens is identified by its LensSummary$LensAlias.

    ", + "max":64, + "min":1 + }, + "LensAliases":{ + "type":"list", + "member":{"shape":"LensAlias"}, + "documentation":"

    List of lens aliases to associate or disassociate with a workload.

    Identify a lens using its LensSummary$LensAlias.

    ", + "min":1 + }, + "LensDescription":{ + "type":"string", + "documentation":"

    The description of the lens.

    ", + "max":1024, + "min":1 + }, + "LensName":{ + "type":"string", + "documentation":"

    The full name of the lens.

    ", + "max":128, + "min":1 + }, + "LensReview":{ + "type":"structure", + "members":{ + "LensAlias":{"shape":"LensAlias"}, + "LensVersion":{ + "shape":"LensVersion", + "documentation":"

    The version of the lens.

    " + }, + "LensName":{"shape":"LensName"}, + "LensStatus":{ + "shape":"LensStatus", + "documentation":"

    The status of the lens.

    " + }, + "PillarReviewSummaries":{"shape":"PillarReviewSummaries"}, + "UpdatedAt":{"shape":"Timestamp"}, + "Notes":{"shape":"Notes"}, + "RiskCounts":{"shape":"RiskCounts"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    A lens review of a question.

    " + }, + "LensReviewReport":{ + "type":"structure", + "members":{ + "LensAlias":{"shape":"LensAlias"}, + "Base64String":{"shape":"Base64String"} + }, + "documentation":"

    A report of a lens review.

    " + }, + "LensReviewSummaries":{ + "type":"list", + "member":{"shape":"LensReviewSummary"}, + "documentation":"

    List of lens summaries of lens reviews of a workload.

    " + }, + "LensReviewSummary":{ + "type":"structure", + "members":{ + "LensAlias":{"shape":"LensAlias"}, + "LensVersion":{ + "shape":"LensVersion", + "documentation":"

    The version of the lens.

    " + }, + "LensName":{"shape":"LensName"}, + "LensStatus":{ + "shape":"LensStatus", + "documentation":"

    The status of the lens.

    " + }, + "UpdatedAt":{"shape":"Timestamp"}, + "RiskCounts":{"shape":"RiskCounts"} + }, + "documentation":"

    A lens review summary of a workload.

    " + }, + "LensStatus":{ + "type":"string", + "enum":[ + "CURRENT", + "NOT_CURRENT", + "DEPRECATED" + ] + }, + "LensSummaries":{ + "type":"list", + "member":{"shape":"LensSummary"}, + "documentation":"

    List of lens summaries of available lenses.

    " + }, + "LensSummary":{ + "type":"structure", + "members":{ + "LensAlias":{"shape":"LensAlias"}, + "LensVersion":{ + "shape":"LensVersion", + "documentation":"

    The version of the lens.

    " + }, + "LensName":{"shape":"LensName"}, + "Description":{"shape":"LensDescription"} + }, + "documentation":"

    A lens summary of a lens.

    " + }, + "LensUpgradeSummary":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "WorkloadName":{"shape":"WorkloadName"}, + "LensAlias":{"shape":"LensAlias"}, + "CurrentLensVersion":{ + "shape":"LensVersion", + "documentation":"

    The current version of the lens.

    " + }, + "LatestLensVersion":{ + "shape":"LensVersion", + "documentation":"

    The latest version of the lens.

    " + } + }, + "documentation":"

    Lens upgrade summary return object.

    " + }, + "LensVersion":{ + "type":"string", + "max":128, + "min":1 + }, + "ListAnswersInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAlias" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "PillarId":{ + "shape":"PillarId", + "location":"querystring", + "locationName":"PillarId" + }, + "MilestoneNumber":{ + "shape":"MilestoneNumber", + "location":"querystring", + "locationName":"MilestoneNumber" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListAnswersMaxResults", + "documentation":"

    The maximum number of results to return for this request.

    ", + "location":"querystring", + "locationName":"MaxResults" + } + }, + "documentation":"

    Input to list answers.

    " + }, + "ListAnswersMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "ListAnswersOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "MilestoneNumber":{"shape":"MilestoneNumber"}, + "LensAlias":{"shape":"LensAlias"}, + "AnswerSummaries":{"shape":"AnswerSummaries"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    Output of a list answers call.

    " + }, + "ListLensReviewImprovementsInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAlias" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "PillarId":{ + "shape":"PillarId", + "location":"querystring", + "locationName":"PillarId" + }, + "MilestoneNumber":{ + "shape":"MilestoneNumber", + "location":"querystring", + "locationName":"MilestoneNumber" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListLensReviewImprovementsMaxResults", + "documentation":"

    The maximum number of results to return for this request.

    ", + "location":"querystring", + "locationName":"MaxResults" + } + }, + "documentation":"

    Input to list lens review improvements.

    " + }, + "ListLensReviewImprovementsMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListLensReviewImprovementsOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "MilestoneNumber":{"shape":"MilestoneNumber"}, + "LensAlias":{"shape":"LensAlias"}, + "ImprovementSummaries":{"shape":"ImprovementSummaries"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    Output of a list lens review improvements call.

    " + }, + "ListLensReviewsInput":{ + "type":"structure", + "required":["WorkloadId"], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "MilestoneNumber":{ + "shape":"MilestoneNumber", + "location":"querystring", + "locationName":"MilestoneNumber" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"MaxResults" + } + }, + "documentation":"

    Input to list lens reviews.

    " + }, + "ListLensReviewsOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "MilestoneNumber":{"shape":"MilestoneNumber"}, + "LensReviewSummaries":{"shape":"LensReviewSummaries"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    Output of a list lens reviews call.

    " + }, + "ListLensesInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"MaxResults" + } + }, + "documentation":"

    Input to list lenses.

    " + }, + "ListLensesOutput":{ + "type":"structure", + "members":{ + "LensSummaries":{"shape":"LensSummaries"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    Output of a list lenses call.

    " + }, + "ListMilestonesInput":{ + "type":"structure", + "required":["WorkloadId"], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"} + }, + "documentation":"

    Input to list all milestones for a workload.

    " + }, + "ListMilestonesOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "MilestoneSummaries":{"shape":"MilestoneSummaries"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    Output of a list milestones call.

    " + }, + "ListNotificationsInput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{ + "shape":"ListNotificationsMaxResults", + "documentation":"

    The maximum number of results to return for this request.

    " + } + } + }, + "ListNotificationsMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "ListNotificationsOutput":{ + "type":"structure", + "members":{ + "NotificationSummaries":{ + "shape":"NotificationSummaries", + "documentation":"

    List of lens notification summaries in a workload.

    " + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListShareInvitationsInput":{ + "type":"structure", + "members":{ + "WorkloadNamePrefix":{ + "shape":"WorkloadNamePrefix", + "location":"querystring", + "locationName":"WorkloadNamePrefix" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListShareInvitationsMaxResults", + "documentation":"

    The maximum number of results to return for this request.

    ", + "location":"querystring", + "locationName":"MaxResults" + } + }, + "documentation":"

    Input for List Share Invitations

    " + }, + "ListShareInvitationsMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "ListShareInvitationsOutput":{ + "type":"structure", + "members":{ + "ShareInvitationSummaries":{ + "shape":"ShareInvitationSummaries", + "documentation":"

    List of share invitation summaries in a workload.

    " + }, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    Input for List Share Invitations

    " + }, + "ListWorkloadSharesInput":{ + "type":"structure", + "required":["WorkloadId"], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "SharedWithPrefix":{ + "shape":"SharedWithPrefix", + "documentation":"

    The AWS account ID or IAM role with which the workload is shared.

    ", + "location":"querystring", + "locationName":"SharedWithPrefix" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListWorkloadSharesMaxResults", + "documentation":"

    The maximum number of results to return for this request.

    ", + "location":"querystring", + "locationName":"MaxResults" + } + }, + "documentation":"

    Input for List Workload Share

    " + }, + "ListWorkloadSharesMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "ListWorkloadSharesOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "WorkloadShareSummaries":{"shape":"WorkloadShareSummaries"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    Input for List Workload Share

    " + }, + "ListWorkloadsInput":{ + "type":"structure", + "members":{ + "WorkloadNamePrefix":{"shape":"WorkloadNamePrefix"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{ + "shape":"ListWorkloadsMaxResults", + "documentation":"

    The maximum number of results to return for this request.

    " + } + }, + "documentation":"

    Input to list all workloads.

    " + }, + "ListWorkloadsMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "ListWorkloadsOutput":{ + "type":"structure", + "members":{ + "WorkloadSummaries":{"shape":"WorkloadSummaries"}, + "NextToken":{"shape":"NextToken"} + }, + "documentation":"

    Output of a list workloads call.

    " + }, + "MaxResults":{ + "type":"integer", + "documentation":"

    The maximum number of results to return for this request.

    ", + "max":50, + "min":1 + }, + "Milestone":{ + "type":"structure", + "members":{ + "MilestoneNumber":{"shape":"MilestoneNumber"}, + "MilestoneName":{"shape":"MilestoneName"}, + "RecordedAt":{"shape":"Timestamp"}, + "Workload":{"shape":"Workload"} + }, + "documentation":"

    A milestone return object.

    " + }, + "MilestoneName":{ + "type":"string", + "documentation":"

    The name of the milestone in a workload.

    Milestone names must be unique within a workload.

    ", + "max":100, + "min":3 + }, + "MilestoneNumber":{ + "type":"integer", + "documentation":"

    The milestone number.

    A workload can have a maximum of 100 milestones.

    ", + "max":100, + "min":1 + }, + "MilestoneSummaries":{ + "type":"list", + "member":{"shape":"MilestoneSummary"}, + "documentation":"

    A list of milestone summaries.

    " + }, + "MilestoneSummary":{ + "type":"structure", + "members":{ + "MilestoneNumber":{"shape":"MilestoneNumber"}, + "MilestoneName":{"shape":"MilestoneName"}, + "RecordedAt":{"shape":"Timestamp"}, + "WorkloadSummary":{"shape":"WorkloadSummary"} + }, + "documentation":"

    A milestone summary return object.

    " + }, + "NextToken":{ + "type":"string", + "documentation":"

    The token to use to retrieve the next set of results.

    " + }, + "Notes":{ + "type":"string", + "documentation":"

    The notes associated with the workload.

    ", + "max":2084 + }, + "NotificationSummaries":{ + "type":"list", + "member":{"shape":"NotificationSummary"} + }, + "NotificationSummary":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"NotificationType", + "documentation":"

    The type of notification.

    " + }, + "LensUpgradeSummary":{ + "shape":"LensUpgradeSummary", + "documentation":"

    Summary of lens upgrade.

    " + } + }, + "documentation":"

    A notification summary return object.

    " + }, + "NotificationType":{ + "type":"string", + "enum":[ + "LENS_VERSION_UPGRADED", + "LENS_VERSION_DEPRECATED" + ] + }, + "PermissionType":{ + "type":"string", + "documentation":"

    Permission granted on a workload share.

    ", + "enum":[ + "READONLY", + "CONTRIBUTOR" + ] + }, + "PillarDifference":{ + "type":"structure", + "members":{ + "PillarId":{"shape":"PillarId"}, + "DifferenceStatus":{ + "shape":"DifferenceStatus", + "documentation":"

    Indicates the type of change to the pillar.

    " + }, + "QuestionDifferences":{ + "shape":"QuestionDifferences", + "documentation":"

    List of question differences.

    " + } + }, + "documentation":"

    A pillar difference return object.

    " + }, + "PillarDifferences":{ + "type":"list", + "member":{"shape":"PillarDifference"} + }, + "PillarId":{ + "type":"string", + "documentation":"

    The ID used to identify a pillar, for example, security.

    A pillar is identified by its PillarReviewSummary$PillarId.

    ", + "max":64, + "min":1 + }, + "PillarName":{ + "type":"string", + "documentation":"

    The name of the pillar.

    ", + "max":128, + "min":1 + }, + "PillarNotes":{ + "type":"map", + "key":{"shape":"PillarId"}, + "value":{"shape":"Notes"}, + "documentation":"

    List of pillar notes of a lens review in a workload.

    " + }, + "PillarReviewSummaries":{ + "type":"list", + "member":{"shape":"PillarReviewSummary"}, + "documentation":"

    List of pillar review summaries of lens review in a workload.

    " + }, + "PillarReviewSummary":{ + "type":"structure", + "members":{ + "PillarId":{"shape":"PillarId"}, + "PillarName":{"shape":"PillarName"}, + "Notes":{"shape":"Notes"}, + "RiskCounts":{"shape":"RiskCounts"} + }, + "documentation":"

    A pillar review summary of a lens review.

    " + }, + "QuestionDescription":{ + "type":"string", + "documentation":"

    The description of the question.

    ", + "max":1024, + "min":1 + }, + "QuestionDifference":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "QuestionTitle":{"shape":"QuestionTitle"}, + "DifferenceStatus":{ + "shape":"DifferenceStatus", + "documentation":"

    Indicates the type of change to the question.

    " + } + }, + "documentation":"

    A question difference return object.

    " + }, + "QuestionDifferences":{ + "type":"list", + "member":{"shape":"QuestionDifference"} + }, + "QuestionId":{ + "type":"string", + "documentation":"

    The ID of the question.

    ", + "max":128, + "min":1 + }, + "QuestionTitle":{ + "type":"string", + "documentation":"

    The title of the question.

    ", + "max":512, + "min":1 + }, + "QuotaCode":{ + "type":"string", + "documentation":"

    Service Quotas requirement to identify originating quota.

    " + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "ResourceId":{"shape":"ExceptionResourceId"}, + "ResourceType":{"shape":"ExceptionResourceType"} + }, + "documentation":"

    The requested resource was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "Risk":{ + "type":"string", + "documentation":"

    The risk for a given workload, lens review, pillar, or question.

    ", + "enum":[ + "UNANSWERED", + "HIGH", + "MEDIUM", + "NONE", + "NOT_APPLICABLE" + ] + }, + "RiskCounts":{ + "type":"map", + "key":{"shape":"Risk"}, + "value":{"shape":"Count"}, + "documentation":"

    A map from risk names to the count of how questions have that rating.

    " + }, + "SelectedChoices":{ + "type":"list", + "member":{"shape":"ChoiceId"}, + "documentation":"

    List of selected choice IDs in a question answer.

    " + }, + "ServiceCode":{ + "type":"string", + "documentation":"

    Service Quotas requirement to identify originating service.

    " + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "Message", + "QuotaCode", + "ServiceCode" + ], + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "ResourceId":{"shape":"ExceptionResourceId"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "QuotaCode":{"shape":"QuotaCode"}, + "ServiceCode":{"shape":"ServiceCode"} + }, + "documentation":"

    The user has reached their resource quota.

    ", + "error":{"httpStatusCode":402}, + "exception":true + }, + "ShareId":{ + "type":"string", + "documentation":"

    The ID associated with the workload share.

    ", + "pattern":"[0-9a-f]{32}" + }, + "ShareInvitation":{ + "type":"structure", + "members":{ + "ShareInvitationId":{ + "shape":"ShareInvitationId", + "documentation":"

    The ID assigned to the share invitation.

    " + }, + "WorkloadId":{"shape":"WorkloadId"} + }, + "documentation":"

    The share invitation.

    " + }, + "ShareInvitationAction":{ + "type":"string", + "documentation":"

    Share invitation action taken by contributor.

    ", + "enum":[ + "ACCEPT", + "REJECT" + ] + }, + "ShareInvitationId":{ + "type":"string", + "pattern":"[0-9a-f]{32}" + }, + "ShareInvitationSummaries":{ + "type":"list", + "member":{"shape":"ShareInvitationSummary"} + }, + "ShareInvitationSummary":{ + "type":"structure", + "members":{ + "ShareInvitationId":{ + "shape":"ShareInvitationId", + "documentation":"

    The ID assigned to the share invitation.

    " + }, + "SharedBy":{"shape":"AwsAccountId"}, + "SharedWith":{"shape":"SharedWith"}, + "PermissionType":{"shape":"PermissionType"}, + "WorkloadName":{"shape":"WorkloadName"}, + "WorkloadId":{"shape":"WorkloadId"} + }, + "documentation":"

    A share invitation summary return object.

    " + }, + "ShareStatus":{ + "type":"string", + "documentation":"

    The status of a workload share.

    ", + "enum":[ + "ACCEPTED", + "REJECTED", + "PENDING", + "REVOKED", + "EXPIRED" + ] + }, + "SharedWith":{ + "type":"string", + "documentation":"

    The AWS account ID or IAM role with which the workload is shared.

    ", + "max":2048, + "min":12 + }, + "SharedWithPrefix":{ + "type":"string", + "max":100 + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "QuotaCode":{"shape":"QuotaCode"}, + "ServiceCode":{"shape":"ServiceCode"} + }, + "documentation":"

    Request was denied due to request throttling.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{ + "type":"timestamp", + "documentation":"

    The date and time recorded.

    " + }, + "UpdateAnswerInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAlias", + "QuestionId" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "QuestionId":{ + "shape":"QuestionId", + "location":"uri", + "locationName":"QuestionId" + }, + "SelectedChoices":{"shape":"SelectedChoices"}, + "Notes":{"shape":"Notes"}, + "IsApplicable":{"shape":"IsApplicable"} + }, + "documentation":"

    Input to update answer.

    " + }, + "UpdateAnswerOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "LensAlias":{"shape":"LensAlias"}, + "Answer":{"shape":"Answer"} + }, + "documentation":"

    Output of a update answer call.

    " + }, + "UpdateLensReviewInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAlias" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "LensNotes":{"shape":"Notes"}, + "PillarNotes":{"shape":"PillarNotes"} + }, + "documentation":"

    Input for update lens review.

    " + }, + "UpdateLensReviewOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "LensReview":{"shape":"LensReview"} + }, + "documentation":"

    Output of a update lens review call.

    " + }, + "UpdateShareInvitationInput":{ + "type":"structure", + "required":[ + "ShareInvitationId", + "ShareInvitationAction" + ], + "members":{ + "ShareInvitationId":{ + "shape":"ShareInvitationId", + "documentation":"

    The ID assigned to the share invitation.

    ", + "location":"uri", + "locationName":"ShareInvitationId" + }, + "ShareInvitationAction":{"shape":"ShareInvitationAction"} + }, + "documentation":"

    Input for Update Share Invitation

    " + }, + "UpdateShareInvitationOutput":{ + "type":"structure", + "members":{ + "ShareInvitation":{ + "shape":"ShareInvitation", + "documentation":"

    The updated workload share invitation.

    " + } + } + }, + "UpdateWorkloadInput":{ + "type":"structure", + "required":["WorkloadId"], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "WorkloadName":{"shape":"WorkloadName"}, + "Description":{"shape":"WorkloadDescription"}, + "Environment":{"shape":"WorkloadEnvironment"}, + "AccountIds":{"shape":"WorkloadAccountIds"}, + "AwsRegions":{"shape":"WorkloadAwsRegions"}, + "NonAwsRegions":{"shape":"WorkloadNonAwsRegions"}, + "PillarPriorities":{"shape":"WorkloadPillarPriorities"}, + "ArchitecturalDesign":{"shape":"WorkloadArchitecturalDesign"}, + "ReviewOwner":{"shape":"WorkloadReviewOwner"}, + "IsReviewOwnerUpdateAcknowledged":{ + "shape":"IsReviewOwnerUpdateAcknowledged", + "documentation":"

    Flag indicating whether the workload owner has acknowledged that the Review owner field is required.

    If a Review owner is not added to the workload within 60 days of acknowledgement, access to the workload is restricted until an owner is added.

    " + }, + "IndustryType":{"shape":"WorkloadIndustryType"}, + "Industry":{"shape":"WorkloadIndustry"}, + "Notes":{"shape":"Notes"}, + "ImprovementStatus":{"shape":"WorkloadImprovementStatus"} + }, + "documentation":"

    Input to update a workload.

    " + }, + "UpdateWorkloadOutput":{ + "type":"structure", + "members":{ + "Workload":{"shape":"Workload"} + }, + "documentation":"

    Output of an update workload call.

    " + }, + "UpdateWorkloadShareInput":{ + "type":"structure", + "required":[ + "ShareId", + "WorkloadId", + "PermissionType" + ], + "members":{ + "ShareId":{ + "shape":"ShareId", + "location":"uri", + "locationName":"ShareId" + }, + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "PermissionType":{"shape":"PermissionType"} + }, + "documentation":"

    Input for Update Workload Share

    " + }, + "UpdateWorkloadShareOutput":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "WorkloadShare":{"shape":"WorkloadShare"} + }, + "documentation":"

    Input for Update Workload Share

    " + }, + "UpgradeLensReviewInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "LensAlias", + "MilestoneName" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "LensAlias":{ + "shape":"LensAlias", + "location":"uri", + "locationName":"LensAlias" + }, + "MilestoneName":{"shape":"MilestoneName"}, + "ClientRequestToken":{"shape":"ClientRequestToken"} + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "Reason":{"shape":"ValidationExceptionReason"}, + "Fields":{"shape":"ValidationExceptionFieldList"} + }, + "documentation":"

    The user input is not valid.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "Name", + "Message" + ], + "members":{ + "Name":{"shape":"ValidationExceptionFieldName"}, + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

    Stores information about a field passed inside a request that resulted in an exception.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"}, + "documentation":"

    The fields that caused the error, if applicable.

    " + }, + "ValidationExceptionFieldName":{ + "type":"string", + "documentation":"

    The field name for which validation failed.

    " + }, + "ValidationExceptionReason":{ + "type":"string", + "documentation":"

    The reason why the request failed validation.

    ", + "enum":[ + "UNKNOWN_OPERATION", + "CANNOT_PARSE", + "FIELD_VALIDATION_FAILED", + "OTHER" + ] + }, + "VersionDifferences":{ + "type":"structure", + "members":{ + "PillarDifferences":{ + "shape":"PillarDifferences", + "documentation":"

    The differences between the base and latest versions of the lens.

    " + } + }, + "documentation":"

    The differences between the base and latest versions of the lens.

    " + }, + "Workload":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "WorkloadArn":{"shape":"WorkloadArn"}, + "WorkloadName":{"shape":"WorkloadName"}, + "Description":{"shape":"WorkloadDescription"}, + "Environment":{"shape":"WorkloadEnvironment"}, + "UpdatedAt":{"shape":"Timestamp"}, + "AccountIds":{"shape":"WorkloadAccountIds"}, + "AwsRegions":{"shape":"WorkloadAwsRegions"}, + "NonAwsRegions":{"shape":"WorkloadNonAwsRegions"}, + "ArchitecturalDesign":{"shape":"WorkloadArchitecturalDesign"}, + "ReviewOwner":{"shape":"WorkloadReviewOwner"}, + "ReviewRestrictionDate":{"shape":"Timestamp"}, + "IsReviewOwnerUpdateAcknowledged":{ + "shape":"IsReviewOwnerUpdateAcknowledged", + "documentation":"

    Flag indicating whether the workload owner has acknowledged that the Review owner field is required.

    If a Review owner is not added to the workload within 60 days of acknowledgement, access to the workload is restricted until an owner is added.

    " + }, + "IndustryType":{"shape":"WorkloadIndustryType"}, + "Industry":{"shape":"WorkloadIndustry"}, + "Notes":{"shape":"Notes"}, + "ImprovementStatus":{"shape":"WorkloadImprovementStatus"}, + "RiskCounts":{"shape":"RiskCounts"}, + "PillarPriorities":{"shape":"WorkloadPillarPriorities"}, + "Lenses":{"shape":"WorkloadLenses"}, + "Owner":{"shape":"AwsAccountId"}, + "ShareInvitationId":{ + "shape":"ShareInvitationId", + "documentation":"

    The ID assigned to the share invitation.

    " + } + }, + "documentation":"

    A workload return object.

    " + }, + "WorkloadAccountIds":{ + "type":"list", + "member":{"shape":"AwsAccountId"}, + "documentation":"

    The list of AWS account IDs associated with the workload.

    ", + "max":100 + }, + "WorkloadArchitecturalDesign":{ + "type":"string", + "documentation":"

    The URL of the architectural design for the workload.

    ", + "max":2048 + }, + "WorkloadArn":{ + "type":"string", + "documentation":"

    The ARN for the workload.

    " + }, + "WorkloadAwsRegions":{ + "type":"list", + "member":{"shape":"AwsRegion"}, + "documentation":"

    The list of AWS Regions associated with the workload, for example, us-east-2, or ca-central-1.

    ", + "max":50 + }, + "WorkloadDescription":{ + "type":"string", + "documentation":"

    The description for the workload.

    ", + "max":250, + "min":3 + }, + "WorkloadEnvironment":{ + "type":"string", + "documentation":"

    The environment for the workload.

    ", + "enum":[ + "PRODUCTION", + "PREPRODUCTION" + ] + }, + "WorkloadId":{ + "type":"string", + "documentation":"

    The ID assigned to the workload. This ID is unique within an AWS Region.

    ", + "pattern":"[0-9a-f]{32}" + }, + "WorkloadImprovementStatus":{ + "type":"string", + "documentation":"

    The improvement status for a workload.

    ", + "enum":[ + "NOT_APPLICABLE", + "NOT_STARTED", + "IN_PROGRESS", + "COMPLETE", + "RISK_ACKNOWLEDGED" + ] + }, + "WorkloadIndustry":{ + "type":"string", + "documentation":"

    The industry for the workload.

    ", + "max":100 + }, + "WorkloadIndustryType":{ + "type":"string", + "documentation":"

    The industry type for the workload.

    If specified, must be one of the following:

    • Agriculture

    • Automobile

    • Defense

    • Design and Engineering

    • Digital Advertising

    • Education

    • Environmental Protection

    • Financial Services

    • Gaming

    • General Public Services

    • Healthcare

    • Hospitality

    • InfoTech

    • Justice and Public Safety

    • Life Sciences

    • Manufacturing

    • Media & Entertainment

    • Mining & Resources

    • Oil & Gas

    • Power & Utilities

    • Professional Services

    • Real Estate & Construction

    • Retail & Wholesale

    • Social Protection

    • Telecommunications

    • Travel, Transportation & Logistics

    • Other

    ", + "max":100 + }, + "WorkloadLenses":{ + "type":"list", + "member":{"shape":"LensAlias"}, + "documentation":"

    The list of lenses associated with the workload. Each lens is identified by its LensSummary$LensAlias.

    " + }, + "WorkloadName":{ + "type":"string", + "documentation":"

    The name of the workload.

    The name must be unique within an account within a Region. Spaces and capitalization are ignored when checking for uniqueness.

    ", + "max":100, + "min":3 + }, + "WorkloadNamePrefix":{ + "type":"string", + "documentation":"

    An optional string added to the beginning of each workload name returned in the results.

    ", + "max":100 + }, + "WorkloadNonAwsRegion":{ + "type":"string", + "max":25, + "min":3 + }, + "WorkloadNonAwsRegions":{ + "type":"list", + "member":{"shape":"WorkloadNonAwsRegion"}, + "documentation":"

    The list of non-AWS Regions associated with the workload.

    ", + "max":5 + }, + "WorkloadPillarPriorities":{ + "type":"list", + "member":{"shape":"PillarId"}, + "documentation":"

    The priorities of the pillars, which are used to order items in the improvement plan. Each pillar is represented by its PillarReviewSummary$PillarId.

    " + }, + "WorkloadReviewOwner":{ + "type":"string", + "documentation":"

    The review owner of the workload. The name, email address, or identifier for the primary group or individual that owns the workload review process.

    ", + "max":255, + "min":3 + }, + "WorkloadShare":{ + "type":"structure", + "members":{ + "ShareId":{"shape":"ShareId"}, + "SharedBy":{"shape":"AwsAccountId"}, + "SharedWith":{"shape":"SharedWith"}, + "PermissionType":{"shape":"PermissionType"}, + "Status":{"shape":"ShareStatus"}, + "WorkloadName":{"shape":"WorkloadName"}, + "WorkloadId":{"shape":"WorkloadId"} + }, + "documentation":"

    A workload share return object.

    " + }, + "WorkloadShareSummaries":{ + "type":"list", + "member":{"shape":"WorkloadShareSummary"}, + "documentation":"

    A list of workload share summaries.

    " + }, + "WorkloadShareSummary":{ + "type":"structure", + "members":{ + "ShareId":{"shape":"ShareId"}, + "SharedWith":{"shape":"SharedWith"}, + "PermissionType":{"shape":"PermissionType"}, + "Status":{"shape":"ShareStatus"} + }, + "documentation":"

    A workload share summary return object.

    " + }, + "WorkloadSummaries":{ + "type":"list", + "member":{"shape":"WorkloadSummary"}, + "documentation":"

    A list of workload summaries.

    " + }, + "WorkloadSummary":{ + "type":"structure", + "members":{ + "WorkloadId":{"shape":"WorkloadId"}, + "WorkloadArn":{"shape":"WorkloadArn"}, + "WorkloadName":{"shape":"WorkloadName"}, + "Owner":{"shape":"AwsAccountId"}, + "UpdatedAt":{"shape":"Timestamp"}, + "Lenses":{"shape":"WorkloadLenses"}, + "RiskCounts":{"shape":"RiskCounts"}, + "ImprovementStatus":{"shape":"WorkloadImprovementStatus"} + }, + "documentation":"

    A workload summary return object.

    " + } + }, + "documentation":"AWS Well-Architected Tool

    This is the AWS Well-Architected Tool API Reference.

    The AWS Well-Architected Tool API provides programmatic access to the AWS Well-Architected Tool in the AWS Management Console.

    Managing workloads:

    Managing milestones:

    Managing lenses:

    Managing reviews:

    Managing workload shares:

    Managing workload share invitations:

    For information about the AWS Well-Architected Tool, see the AWS Well-Architected Tool User Guide.

    " +} diff --git a/services/workdocs/build.properties b/services/workdocs/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/workdocs/build.properties +++ b/services/workdocs/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 3d994bf5c3a7..b7cd1572be1d 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -1,6 +1,6 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.62-SNAPSHOT + + workmailmessageflow + AWS Java SDK :: Services :: WorkMailMessageFlow + The AWS Java SDK for WorkMailMessageFlow module holds the client classes that are used for + communicating with WorkMailMessageFlow. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.workmailmessageflow + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/workmailmessageflow/src/main/resources/codegen-resources/paginators-1.json b/services/workmailmessageflow/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/workmailmessageflow/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/workmailmessageflow/src/main/resources/codegen-resources/service-2.json b/services/workmailmessageflow/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..6d4d4fdad2f7 --- /dev/null +++ b/services/workmailmessageflow/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,74 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-05-01", + "endpointPrefix":"workmailmessageflow", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon WorkMail Message Flow", + "serviceId":"WorkMailMessageFlow", + "signatureVersion":"v4", + "uid":"workmailmessageflow-2019-05-01" + }, + "operations":{ + "GetRawMessageContent":{ + "name":"GetRawMessageContent", + "http":{ + "method":"GET", + "requestUri":"/messages/{messageId}" + }, + "input":{"shape":"GetRawMessageContentRequest"}, + "output":{"shape":"GetRawMessageContentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves the raw content of an in-transit email message, in MIME format.

    " + } + }, + "shapes":{ + "GetRawMessageContentRequest":{ + "type":"structure", + "required":["messageId"], + "members":{ + "messageId":{ + "shape":"messageIdType", + "documentation":"

    The identifier of the email message to retrieve.

    ", + "location":"uri", + "locationName":"messageId" + } + } + }, + "GetRawMessageContentResponse":{ + "type":"structure", + "required":["messageContent"], + "members":{ + "messageContent":{ + "shape":"messageContentBlob", + "documentation":"

    The raw content of the email message, in MIME format.

    " + } + }, + "payload":"messageContent" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

    The requested email message is not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "errorMessage":{"type":"string"}, + "messageContentBlob":{ + "type":"blob", + "streaming":true + }, + "messageIdType":{ + "type":"string", + "max":120, + "min":1, + "pattern":"[a-z0-9\\-]*" + } + }, + "documentation":"

    The WorkMail Message Flow API provides access to email messages as they are being sent and received by a WorkMail organization.

    " +} diff --git a/services/workspaces/build.properties b/services/workspaces/build.properties index ecf2dae6fcb1..15ec2da1fc71 100644 --- a/services/workspaces/build.properties +++ b/services/workspaces/build.properties @@ -1,5 +1,5 @@ # -# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index e84edb8f40b4..0c79eb0a97a9 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -1,6 +1,6 @@ + @@ -6,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.7.16-SNAPSHOT + 2.15.62-SNAPSHOT ../../pom.xml @@ -17,10 +32,6 @@ https://aws.amazon.com/sdkforjava - - ../.. - - @@ -64,6 +75,11 @@ http-client-spi ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + software.amazon.awssdk sdk-core @@ -89,6 +105,11 @@ utils ${awsjavasdk.version} + + software.amazon.awssdk + profiles + ${awsjavasdk.version} + netty-nio-client software.amazon.awssdk @@ -158,6 +179,22 @@ assertj-core test + + software.amazon.awssdk + service-test-utils + ${awsjavasdk.version} + test + + + io.reactivex.rxjava2 + rxjava + test + + + software.amazon.eventstream + eventstream + test + diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/customization.config b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/customization.config deleted file mode 100644 index 53b007a003d5..000000000000 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/customization.config +++ /dev/null @@ -1,8 +0,0 @@ -{ - "blacklistedSimpleMethods" : [ - "allTypes", - "nestedContainers", - "operationWithNoInputOrOutput" - ], - "useAutoConstructList": true -} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/service-2.json deleted file mode 100644 index 6bda6e0c936f..000000000000 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/service-2.json +++ /dev/null @@ -1,268 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2016-03-11", - "endpointPrefix":"autoconstructlists", - "jsonVersion":"1.1", - "protocol":"json", - "serviceAbbreviation":"AmazonCodeGenerationJsonRpcCustomized", - "serviceFullName":"Amazon Code Generation Json Rpc Customized", - "serviceId":"AmazonCodeGenerationJsonRpcCustomized", - "signatureVersion":"v4", - "targetPrefix":"AmazonCodeGenerationJsonRpcCustomized", - "timestampFormat":"unixTimestamp" - }, - "operations":{ - "AllTypes":{ - "name":"AllTypes", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AllTypesStructure"}, - "output":{"shape":"AllTypesStructure"}, - "errors":[ - {"shape":"EmptyModeledException"} - ] - }, - "NestedContainers":{ - "name":"NestedContainers", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"NestedContainersStructure"}, - "output":{"shape":"NestedContainersStructure"} - }, - "OperationWithNoInputOrOutput":{ - "name":"OperationWithNoInputOrOutput", - "http":{ - "method":"POST", - "requestUri":"/" - } - }, - "StreamingInputOperation":{ - "name":"StreamingInputOperation", - "http":{ - "method":"POST", - "requestUri":"/2016-03-11/streamingInputOperation" - }, - "input":{"shape":"StructureWithStreamingMember"} - }, - "StreamingOutputOperation":{ - "name":"StreamingOutputOperation", - "http":{ - "method":"POST", - "requestUri":"/2016-03-11/streamingOutputOperation" - }, - "output":{"shape":"StructureWithStreamingMember"} - } - }, - "shapes":{ - "AllTypesStructure":{ - "type":"structure", - "members":{ - "StringMember":{"shape":"String"}, - "IntegerMember":{"shape":"Integer"}, - "BooleanMember":{"shape":"Boolean"}, - "FloatMember":{"shape":"Float"}, - "DoubleMember":{"shape":"Double"}, - "LongMember":{"shape":"Long"}, - "SimpleList":{"shape":"ListOfStrings"}, - "ListOfEnums":{"shape":"ListOfEnums"}, - "ListOfMaps":{"shape":"ListOfMapStringToString"}, - "ListOfStructs":{"shape":"ListOfSimpleStructs"}, - "MapOfStringToIntegerList":{"shape":"MapOfStringToIntegerList"}, - "MapOfStringToString":{"shape":"MapOfStringToString"}, - "MapOfStringToSimpleStruct":{"shape":"MapOfStringToSimpleStruct"}, - "MapOfEnumToEnum":{"shape":"MapOfEnumToEnum"}, - "MapOfEnumToString":{"shape":"MapOfEnumToString"}, - "MapOfStringToEnum":{"shape":"MapOfStringToEnum"}, - "MapOfEnumToSimpleStruct":{"shape":"MapOfEnumToSimpleStruct"}, - "TimestampMember":{"shape":"Timestamp"}, - "StructWithNestedTimestampMember":{"shape":"StructWithTimestamp"}, - "BlobArg":{"shape":"BlobType"}, - "StructWithNestedBlob":{"shape":"StructWithNestedBlobType"}, - "BlobMap":{"shape":"BlobMapType"}, - "ListOfBlobs":{"shape":"ListOfBlobsType"}, - "RecursiveStruct":{"shape":"RecursiveStructType"}, - "PolymorphicTypeWithSubTypes":{"shape":"BaseType"}, - "PolymorphicTypeWithoutSubTypes":{"shape":"SubTypeOne"}, - "EnumType":{"shape":"EnumType"} - } - }, - "BaseType":{ - "type":"structure", - "members":{ - "BaseMember":{"shape":"String"} - } - }, - "BlobMapType":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"BlobType"} - }, - "BlobType":{"type":"blob"}, - "Boolean":{"type":"boolean"}, - "Double":{"type":"double"}, - "EmptyModeledException":{ - "type":"structure", - "members":{ - }, - "exception":true - }, - "Float":{"type":"float"}, - "IdempotentOperationStructure":{ - "type":"structure", - "members":{ - "IdempotencyToken":{ - "shape":"String", - "idempotencyToken":true - } - } - }, - "Integer":{"type":"integer"}, - "ListOfBlobsType":{ - "type":"list", - "member":{"shape":"BlobType"} - }, - "ListOfIntegers":{ - "type":"list", - "member":{"shape":"Integer"} - }, - "ListOfListOfListOfStrings":{ - "type":"list", - "member":{"shape":"ListOfListOfStrings"} - }, - "ListOfListOfStrings":{ - "type":"list", - "member":{"shape":"ListOfStrings"} - }, - "ListOfMapStringToString":{ - "type":"list", - "member":{"shape":"MapOfStringToString"} - }, - "ListOfSimpleStructs":{ - "type":"list", - "member":{"shape":"SimpleStruct"} - }, - "ListOfStrings":{ - "type":"list", - "member":{"shape":"String"} - }, - "ListOfEnums":{ - "type":"list", - "member":{"shape":"EnumType"} - }, - "Long":{"type":"long"}, - "MapOfStringToIntegerList":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"ListOfIntegers"} - }, - "MapOfStringToListOfListOfStrings":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"ListOfListOfStrings"} - }, - "MapOfStringToSimpleStruct":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"SimpleStruct"} - }, - "MapOfStringToString":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"String"} - }, - "MapOfEnumToEnum":{ - "type":"map", - "key":{"shape":"EnumType"}, - "value":{"shape":"EnumType"} - }, - "MapOfEnumToString":{ - "type":"map", - "key":{"shape":"EnumType"}, - "value":{"shape":"String"} - }, - "MapOfStringToEnum":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"EnumType"} - }, - "MapOfEnumToSimpleStruct":{ - "type":"map", - "key":{"shape":"EnumType"}, - "value":{"shape":"SimpleStruct"} - }, - "NestedContainersStructure":{ - "type":"structure", - "members":{ - "ListOfListOfStrings":{"shape":"ListOfListOfStrings"}, - "ListOfListOfListOfStrings":{"shape":"ListOfListOfListOfStrings"}, - "MapOfStringToListOfListOfStrings":{"shape":"MapOfStringToListOfListOfStrings"} - } - }, - "RecursiveListType":{ - "type":"list", - "member":{"shape":"RecursiveStructType"} - }, - "RecursiveMapType":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"RecursiveStructType"} - }, - "RecursiveStructType":{ - "type":"structure", - "members":{ - "NoRecurse":{"shape":"String"}, - "RecursiveStruct":{"shape":"RecursiveStructType"}, - "RecursiveList":{"shape":"RecursiveListType"}, - "RecursiveMap":{"shape":"RecursiveMapType"} - } - }, - "SimpleStruct":{ - "type":"structure", - "members":{ - "StringMember":{"shape":"String"} - } - }, - "StreamType":{ - "type":"blob", - "streaming":true - }, - "String":{"type":"string"}, - "StructWithNestedBlobType":{ - "type":"structure", - "members":{ - "NestedBlob":{"shape":"BlobType"} - } - }, - "StructWithTimestamp":{ - "type":"structure", - "members":{ - "NestedTimestamp":{"shape":"Timestamp"} - } - }, - "StructureWithStreamingMember":{ - "type":"structure", - "members":{ - "StreamingMember":{"shape":"StreamType"} - }, - "payload":"StreamingMember" - }, - "SubTypeOne":{ - "type":"structure", - "members":{ - "SubTypeOneMember":{"shape":"String"} - } - }, - "EnumType": { - "type":"string", - "enum": [ - "EnumValue1", "EnumValue2" - ] - }, - "Timestamp":{"type":"timestamp"} - } -} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json index a805c6567572..db2252d4653d 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json @@ -151,6 +151,26 @@ }, "input":{"shape":"QueryParamWithoutValueInput"} }, + "StreamingInputOperationWithRequiredChecksum":{ + "name":"OperationWithRequiredChecksum", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"StructureWithStreamingMember"}, + "output":{"shape":"AllTypesStructure"}, + "httpChecksumRequired": true + }, + "OperationWithRequiredChecksum":{ + "name":"OperationWithRequiredChecksum", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"AllTypesStructure"}, + "output":{"shape":"AllTypesStructure"}, + "httpChecksumRequired": true + }, "StreamingInputOperation":{ "name":"StreamingInputOperation", "http":{ @@ -179,6 +199,19 @@ "output": { "shape": "EventStreamOutput" } + }, + "OperationWithHostPrefix": { + "name": "OperationWithHostPrefix", + "http": { + "method": "POST", + "requestUri": "/2016-03-11/OperationWithHostPrefix" + }, + "endpoint": { + "hostPrefix": "{StringMember}-foo." + }, + "input": { + "shape": "AllTypesStructure" + } } }, "shapes":{ diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscovery/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscovery/service-2.json new file mode 100644 index 000000000000..3ba86efbbb2d --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscovery/service-2.json @@ -0,0 +1,135 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-08-31", + "endpointPrefix":"awsendpointdiscoverytestservice", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"AwsEndpointDiscoveryTest", + "serviceFullName":"AwsEndpointDiscoveryTest", + "serviceId":"AwsEndpointDiscoveryTest", + "signatureVersion":"v4", + "signingName":"awsendpointdiscoverytestservice", + "targetPrefix":"AwsEndpointDiscoveryTestService" + }, + "operations":{ + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/DescribeEndpoints" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "endpointoperation":true + }, + "TestDiscoveryIdentifiersRequired":{ + "name":"TestDiscoveryIdentifiersRequired", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestDiscoveryIdentifiersRequiredRequest"}, + "output":{"shape":"TestDiscoveryIdentifiersRequiredResponse"}, + "endpointdiscovery":{"required":true} + }, + "TestDiscoveryOptional":{ + "name":"TestDiscoveryOptional", + "http":{ + "method":"POST", + "requestUri":"/TestDiscoveryOptional" + }, + "input":{"shape":"TestDiscoveryOptionalRequest"}, + "output":{"shape":"TestDiscoveryOptionalResponse"}, + "endpointdiscovery":{ + } + }, + "TestDiscoveryRequired":{ + "name":"TestDiscoveryRequired", + "http":{ + "method":"POST", + "requestUri":"/TestDiscoveryRequired" + }, + "input":{"shape":"TestDiscoveryRequiredRequest"}, + "output":{"shape":"TestDiscoveryRequiredResponse"}, + "endpointdiscovery":{"required":true} + } + }, + "shapes":{ + "Boolean":{"type":"boolean"}, + "DescribeEndpointsRequest":{ + "type":"structure", + "members":{ + "Operation":{"shape":"String"}, + "Identifiers":{"shape":"Identifiers"} + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "required":["Endpoints"], + "members":{ + "Endpoints":{"shape":"Endpoints"} + } + }, + "Endpoint":{ + "type":"structure", + "required":[ + "Address", + "CachePeriodInMinutes" + ], + "members":{ + "Address":{"shape":"String"}, + "CachePeriodInMinutes":{"shape":"Long"} + } + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "Identifiers":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Long":{"type":"long"}, + "String":{"type":"string"}, + "TestDiscoveryIdentifiersRequiredRequest":{ + "type":"structure", + "required":["Sdk"], + "members":{ + "Sdk":{ + "shape":"String", + "endpointdiscoveryid":true + } + } + }, + "TestDiscoveryIdentifiersRequiredResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + }, + "TestDiscoveryOptionalRequest":{ + "type":"structure", + "members":{ + } + }, + "TestDiscoveryOptionalResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + }, + "TestDiscoveryRequiredRequest":{ + "type":"structure", + "members":{ + } + }, + "TestDiscoveryRequiredResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + } + } +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired-withcustomization/customization.config b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired-withcustomization/customization.config new file mode 100644 index 000000000000..8175efc28858 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired-withcustomization/customization.config @@ -0,0 +1,3 @@ +{ + "allowEndpointOverrideForEndpointDiscoveryRequiredOperations": true +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired-withcustomization/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired-withcustomization/service-2.json new file mode 100644 index 000000000000..1adcc3ac2d95 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired-withcustomization/service-2.json @@ -0,0 +1,113 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-08-31", + "endpointPrefix":"awsendpointdiscoveryrequiredwithcustomizationtestservice", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"AwsEndpointDiscoveryRequiredWithCustomizationTest", + "serviceFullName":"AwsEndpointDiscoveryRequiredWithCustomizationTest", + "serviceId":"AwsEndpointDiscoveryRequiredWithCustomizationTest", + "signatureVersion":"v4", + "signingName":"awsendpointdiscoveryrequiredtestwithcustomizationservice", + "targetPrefix":"AwsEndpointDiscoveryRequiredWithCustomizationTestService" + }, + "operations":{ + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/DescribeEndpoints" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "endpointoperation":true + }, + "TestDiscoveryIdentifiersRequired":{ + "name":"TestDiscoveryIdentifiersRequired", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestDiscoveryIdentifiersRequiredRequest"}, + "output":{"shape":"TestDiscoveryIdentifiersRequiredResponse"}, + "endpointdiscovery":{"required":true} + }, + "TestDiscoveryRequired":{ + "name":"TestDiscoveryRequired", + "http":{ + "method":"POST", + "requestUri":"/TestDiscoveryRequired" + }, + "input":{"shape":"TestDiscoveryRequiredRequest"}, + "output":{"shape":"TestDiscoveryRequiredResponse"}, + "endpointdiscovery":{"required":true} + } + }, + "shapes":{ + "Boolean":{"type":"boolean"}, + "DescribeEndpointsRequest":{ + "type":"structure", + "members":{ + "Operation":{"shape":"String"}, + "Identifiers":{"shape":"Identifiers"} + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "required":["Endpoints"], + "members":{ + "Endpoints":{"shape":"Endpoints"} + } + }, + "Endpoint":{ + "type":"structure", + "required":[ + "Address", + "CachePeriodInMinutes" + ], + "members":{ + "Address":{"shape":"String"}, + "CachePeriodInMinutes":{"shape":"Long"} + } + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "Identifiers":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Long":{"type":"long"}, + "String":{"type":"string"}, + "TestDiscoveryIdentifiersRequiredRequest":{ + "type":"structure", + "required":["Sdk"], + "members":{ + "Sdk":{ + "shape":"String", + "endpointdiscoveryid":true + } + } + }, + "TestDiscoveryIdentifiersRequiredResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + }, + "TestDiscoveryRequiredRequest":{ + "type":"structure", + "members":{ + } + }, + "TestDiscoveryRequiredResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + } + } +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired/service-2.json new file mode 100644 index 000000000000..580005251e25 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscoveryrequired/service-2.json @@ -0,0 +1,113 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-08-31", + "endpointPrefix":"awsendpointdiscoveryrequiredtestservice", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"AwsEndpointDiscoveryRequiredTest", + "serviceFullName":"AwsEndpointDiscoveryRequiredTest", + "serviceId":"AwsEndpointDiscoveryRequiredTest", + "signatureVersion":"v4", + "signingName":"awsendpointdiscoveryrequiredtestservice", + "targetPrefix":"AwsEndpointDiscoveryRequiredTestService" + }, + "operations":{ + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/DescribeEndpoints" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "endpointoperation":true + }, + "TestDiscoveryIdentifiersRequired":{ + "name":"TestDiscoveryIdentifiersRequired", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestDiscoveryIdentifiersRequiredRequest"}, + "output":{"shape":"TestDiscoveryIdentifiersRequiredResponse"}, + "endpointdiscovery":{"required":true} + }, + "TestDiscoveryRequired":{ + "name":"TestDiscoveryRequired", + "http":{ + "method":"POST", + "requestUri":"/TestDiscoveryRequired" + }, + "input":{"shape":"TestDiscoveryRequiredRequest"}, + "output":{"shape":"TestDiscoveryRequiredResponse"}, + "endpointdiscovery":{"required":true} + } + }, + "shapes":{ + "Boolean":{"type":"boolean"}, + "DescribeEndpointsRequest":{ + "type":"structure", + "members":{ + "Operation":{"shape":"String"}, + "Identifiers":{"shape":"Identifiers"} + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "required":["Endpoints"], + "members":{ + "Endpoints":{"shape":"Endpoints"} + } + }, + "Endpoint":{ + "type":"structure", + "required":[ + "Address", + "CachePeriodInMinutes" + ], + "members":{ + "Address":{"shape":"String"}, + "CachePeriodInMinutes":{"shape":"Long"} + } + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "Identifiers":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Long":{"type":"long"}, + "String":{"type":"string"}, + "TestDiscoveryIdentifiersRequiredRequest":{ + "type":"structure", + "required":["Sdk"], + "members":{ + "Sdk":{ + "shape":"String", + "endpointdiscoveryid":true + } + } + }, + "TestDiscoveryIdentifiersRequiredResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + }, + "TestDiscoveryRequiredRequest":{ + "type":"structure", + "members":{ + } + }, + "TestDiscoveryRequiredResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + } + } +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/eventstreams/customization.config b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/eventstreams/customization.config new file mode 100644 index 000000000000..23fadd2c7c57 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/eventstreams/customization.config @@ -0,0 +1,5 @@ +{ + "useLegacyEventGenerationScheme": { + "EventStream": ["LegacyGeneratedEvent"] + } +} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/eventstreams/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/eventstreams/service-2.json new file mode 100644 index 000000000000..0936a9605520 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/eventstreams/service-2.json @@ -0,0 +1,132 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-03-11", + "endpointPrefix":"eventstream-rest-json", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"EventStreamRestJson", + "serviceFullName":"Event Stream Rest Json", + "serviceId":"EventStreamRestJson", + "signatureVersion":"v4", + "targetPrefix":"ProtocolTestsService", + "timestampFormat":"unixTimestamp", + "uid":"restjson-2016-03-11" + }, + "operations":{ + "EventStreamOperation": { + "name": "EventStreamOperation", + "http": { + "method": "POST", + "requestUri": "/2016-03-11/eventStreamOperation" + }, + "input": { + "shape": "EventStreamOperationRequest" + }, + "output": { + "shape": "EventStreamOutput" + } + } + }, + "shapes":{ + "EventStreamOperationRequest": { + "type": "structure", + "required": [ + "InputEventStream" + ], + "members": { + "InputEventStream": { + "shape": "InputEventStream" + } + }, + "payload":"InputEventStream" + }, + "EventStreamOutput": { + "type": "structure", + "required": [ + "EventStream" + ], + "members": { + "EventStream": { + "shape": "EventStream" + } + } + }, + "InputEventStream": { + "type": "structure", + "members": { + "InputEvent": { + "shape": "InputEvent" + }, + "InputEventB": { + "shape": "InputEvent" + }, + "InputEventTwo": { + "shape": "InputEventTwo" + } + }, + "eventstream": true + }, + "InputEvent": { + "type": "structure", + "members": { + "ExplicitPayloadMember": { + "shape":"ExplicitPayloadMember", + "eventpayload":true + } + }, + "event": true + }, + "InputEventTwo": { + "type": "structure", + "members": { + "ExplicitPayloadMember": { + "shape":"ExplicitPayloadMember", + "eventpayload":true + } + }, + "event": true + }, + + "ExplicitPayloadMember":{"type":"blob"}, + "EventStream": { + "type": "structure", + "members": { + "TheEventOne": { + "shape": "EventOne" + }, + "LegacyGeneratedEvent": { + "shape": "EventOne" + }, + "EventTwo": { + "shape": "EventTwo" + }, + "SecondEventTwo": { + "shape": "EventTwo" + } + }, + "eventstream": true + }, + "EventOne": { + "type": "structure", + "members": { + "Foo": { + "shape": "String" + } + }, + "event": true + }, + "EventTwo": { + "type": "structure", + "members": { + "Bar": { + "shape": "String" + } + }, + "event": true + }, + "String": { + "type":"string" + } + } +} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/waiters/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/waiters/service-2.json new file mode 100644 index 000000000000..a5e3a6c46462 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/waiters/service-2.json @@ -0,0 +1,816 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-03-11", + "endpointPrefix":"restjson", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"RestJsonProtocolTests", + "serviceFullName":"AWS DR Tools Rest JSON Protocol Tests", + "serviceId":"RestJsonWithWaiters", + "signatureVersion":"v4", + "targetPrefix":"ProtocolTestsService", + "uid":"restjson-2016-03-11" + }, + "operations":{ + "AllTypes":{ + "name":"AllTypes", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"AllTypesStructure"}, + "output":{"shape":"AllTypesStructure"}, + "errors":[ + {"shape":"EmptyModeledException"}, + {"shape":"ExplicitPayloadAndHeadersException"}, + {"shape":"ImplicitPayloadException"} + ] + }, + "DeleteOperation":{ + "name":"DeleteOperation", + "http":{ + "method":"DELETE", + "requestUri":"/2016-03-11/deleteOperation" + } + }, + "FurtherNestedContainers":{ + "name":"FurtherNestedContainers", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/furtherNestedContainers" + }, + "input":{"shape":"FurtherNestedContainersStructure"}, + "output":{"shape":"FurtherNestedContainersStructure"} + }, + "GetOperationWithBody":{ + "name":"GetOperationWithBody", + "http":{ + "method":"GET", + "requestUri":"/2016-03-11/getOperationWithBody" + }, + "input":{"shape":"GetOperationWithBodyInput"} + }, + "HeadOperation":{ + "name":"HeadOperation", + "http":{ + "method":"HEAD", + "requestUri":"/2016-03-11/headOperation" + } + }, + "IdempotentOperation":{ + "name":"IdempotentOperation", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/idempotentOperation/{PathParam}" + }, + "input":{"shape":"IdempotentOperationStructure"} + }, + "JsonValuesOperation":{ + "name":"JsonValuesOperation", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/JsonValuesStructure" + }, + "input":{"shape":"JsonValuesStructure"}, + "output":{"shape":"JsonValuesStructure"}, + "errors":[ + {"shape":"EmptyModeledException"} + ] + }, + "MapOfStringToListOfStringInQueryParams":{ + "name":"MapOfStringToListOfStringInQueryParams", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/mapOfStringToListOfStringInQueryParams" + }, + "input":{"shape":"MapOfStringToListOfStringInQueryParamsInput"} + }, + "MembersInHeaders":{ + "name":"MembersInHeaders", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/membersInHeaders" + }, + "input":{"shape":"MembersInHeadersStructure"}, + "output":{"shape":"MembersInHeadersStructure"} + }, + "MembersInQueryParams":{ + "name":"MembersInQueryParams", + "http":{ + "method":"GET", + "requestUri":"/2016-03-11/membersInQueryParams?StaticQueryParam=foo" + }, + "input":{"shape":"MembersInQueryParamsInput"}, + "output":{"shape":"MembersInQueryParamsInput"} + }, + "MultiLocationOperation":{ + "name":"MultiLocationOperation", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/multiLocationOperation/{PathParam}" + }, + "input":{"shape":"MultiLocationOperationInput"}, + "output":{"shape":"MultiLocationOperationInput"} + }, + "NestedContainers":{ + "name":"NestedContainers", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/nestedContainers" + }, + "input":{"shape":"NestedContainersStructure"}, + "output":{"shape":"NestedContainersStructure"} + }, + "OperationWithExplicitPayloadBlob":{ + "name":"OperationWithExplicitPayloadBlob", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/operationWithExplicitPayloadBlob" + }, + "input":{"shape":"OperationWithExplicitPayloadBlobInput"}, + "output":{"shape":"OperationWithExplicitPayloadBlobInput"} + }, + "OperationWithExplicitPayloadStructure":{ + "name":"OperationWithExplicitPayloadStructure", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/operationWithExplicitPayloadStructure" + }, + "input":{"shape":"OperationWithExplicitPayloadStructureInput"}, + "output":{"shape":"OperationWithExplicitPayloadStructureInput"} + }, + "OperationWithGreedyLabel":{ + "name":"OperationWithGreedyLabel", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/operationWithGreedyLabel/{NonGreedyPathParam}/{GreedyPathParam+}" + }, + "input":{"shape":"OperationWithGreedyLabelInput"} + }, + "OperationWithModeledContentType":{ + "name":"OperationWithModeledContentType", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/operationWithModeledContentType" + }, + "input":{"shape":"OperationWithModeledContentTypeInput"} + }, + "OperationWithNoInputOrOutput":{ + "name":"OperationWithNoInputOrOutput", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/operationWithNoInputOrOutput" + } + }, + "QueryParamWithoutValue":{ + "name":"QueryParamWithoutValue", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/queryParamWithoutValue?param" + }, + "input":{"shape":"QueryParamWithoutValueInput"} + }, + "StatusCodeInOutputOperation":{ + "name":"StatusCodeInOutputOperation", + "http":{ + "method":"GET", + "requestUri":"/2016-03-11/statusCodeInOutput" + }, + "output":{"shape":"StatusCodeInOutputStructure"} + }, + "StreamingInputOperation":{ + "name":"StreamingInputOperation", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/streamingInputOperation" + }, + "input":{"shape":"StructureWithStreamingMember"} + }, + "StreamingOutputOperation":{ + "name":"StreamingOutputOperation", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/streamingOutputOperation" + }, + "output":{"shape":"StructureWithStreamingMember"} + }, + "EventStreamOperation": { + "name": "EventStreamOperation", + "http": { + "method": "POST", + "requestUri": "/2016-03-11/eventStreamOperation" + }, + "input": { + "shape": "EventStreamOperationRequest" + }, + "output": { + "shape": "EventStreamOutput" + } + } + }, + "shapes":{ + "AllTypesStructure":{ + "type":"structure", + "members":{ + "StringMember":{"shape":"String"}, + "IntegerMember":{"shape":"Integer"}, + "BooleanMember":{"shape":"Boolean"}, + "FloatMember":{"shape":"Float"}, + "DoubleMember":{"shape":"Double"}, + "LongMember":{"shape":"Long"}, + "BigDecimalMember":{"shape":"NumericValue"}, + "SimpleList":{"shape":"ListOfStrings"}, + "ListOfMaps":{"shape":"ListOfMapStringToString"}, + "ListOfStructs":{"shape":"ListOfSimpleStructs"}, + "MapOfStringToIntegerList":{"shape":"MapOfStringToIntegerList"}, + "MapOfStringToString":{"shape":"MapOfStringToString"}, + "MapOfStringToStruct":{"shape":"MapOfStringToSimpleStruct"}, + "TimestampMember":{"shape":"Timestamp"}, + "StructWithNestedTimestampMember":{"shape":"StructWithTimestamp"}, + "TimestampFormatMember":{"shape":"IsoTimestamp"}, + "BlobArg":{"shape":"BlobType"}, + "StructWithNestedBlob":{"shape":"StructWithNestedBlobType"}, + "BlobMap":{"shape":"BlobMapType"}, + "ListOfBlobs":{"shape":"ListOfBlobsType"}, + "RecursiveStruct":{"shape":"RecursiveStructType"}, + "PolymorphicTypeWithSubTypes":{"shape":"BaseType"}, + "PolymorphicTypeWithoutSubTypes":{"shape":"SubTypeOne"}, + "EnumMember":{"shape":"EnumType"}, + "ListOfEnums":{"shape":"ListOfEnums"}, + "MapOfEnumToEnum":{"shape":"MapOfEnumToEnum"} + } + }, + "BaseType":{ + "type":"structure", + "members":{ + "BaseMember":{"shape":"String"} + } + }, + "BlobMapType":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"BlobType"} + }, + "BlobType":{"type":"blob"}, + "Boolean":{"type":"boolean"}, + "Double":{"type":"double"}, + "EmptyModeledException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "EnumType":{ + "type":"string", + "enum":[ + "EnumValue1", + "EnumValue2" + ] + }, + "ExplicitPayloadAndHeadersException":{ + "type":"structure", + "members":{ + "StringHeader":{ + "shape":"String", + "location":"header", + "locationName":"x-amz-string" + }, + "IntegerHeader":{ + "shape":"Integer", + "location":"header", + "locationName":"x-amz-integer" + }, + "LongHeader":{ + "shape":"Long", + "location":"header", + "locationName":"x-amz-long" + }, + "DoubleHeader":{ + "shape":"Double", + "location":"header", + "locationName":"x-amz-double" + }, + "FloatHeader":{ + "shape":"Float", + "location":"header", + "locationName":"x-amz-float" + }, + "TimestampHeader":{ + "shape":"Timestamp", + "location":"header", + "locationName":"x-amz-timestamp" + }, + "BooleanHeader":{ + "shape":"Boolean", + "location":"header", + "locationName":"x-amz-boolean" + }, + "PayloadMember":{"shape":"SimpleStruct"} + }, + "exception":true, + "payload":"PayloadMember" + }, + "Float":{"type":"float"}, + "FurtherNestedContainersStructure":{ + "type":"structure", + "members":{ + "ListOfNested":{"shape":"ListOfNested"} + } + }, + "GetOperationWithBodyInput":{ + "type":"structure", + "members":{ + "StringMember":{"shape":"String"} + } + }, + "IdempotentOperationStructure":{ + "type":"structure", + "required":["PathIdempotentToken"], + "members":{ + "PathIdempotentToken":{ + "shape":"String", + "idempotencyToken":true, + "location":"uri", + "locationName":"PathParam" + }, + "QueryIdempotentToken":{ + "shape":"String", + "idempotencyToken":true, + "location":"querystring", + "locationName":"QueryParam" + }, + "HeaderIdempotentToken":{ + "shape":"String", + "idempotencyToken":true, + "location":"header", + "locationName":"x-amz-idempotent-header" + } + } + }, + "ImplicitPayloadException":{ + "type":"structure", + "members":{ + "StringMember":{"shape":"String"}, + "IntegerMember":{"shape":"Integer"}, + "LongMember":{"shape":"Long"}, + "DoubleMember":{"shape":"Double"}, + "FloatMember":{"shape":"Float"}, + "TimestampMember":{"shape":"Timestamp"}, + "BooleanMember":{"shape":"Boolean"}, + "BlobMember":{"shape":"BlobType"}, + "ListMember":{"shape":"ListOfStrings"}, + "MapMember":{"shape":"MapOfStringToString"}, + "SimpleStructMember":{"shape":"SimpleStruct"} + }, + "exception":true + }, + "Integer":{"type":"integer"}, + // Shape is customized to BigDecimal in customization.config + "NumericValue": { + "type": "string", + "pattern":"([0-9]*\\.)?[0-9]+" + }, + "IsoTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "JsonValuesStructure":{ + "type":"structure", + "members":{ + "JsonValueHeaderMember":{ + "shape":"String", + "jsonvalue":true, + "location":"header", + "locationName":"Encoded-Header" + }, + "JsonValueMember":{ + "shape":"String", + "jsonvalue":true + } + } + }, + "ListOfAllTypesStructs":{ + "type":"list", + "member":{"shape":"AllTypesStructure"} + }, + "ListOfBlobsType":{ + "type":"list", + "member":{"shape":"BlobType"} + }, + "ListOfEnums":{ + "type":"list", + "member":{"shape":"EnumType"} + }, + "ListOfIntegers":{ + "type":"list", + "member":{"shape":"Integer"} + }, + "ListOfListOfListsOfStrings":{ + "type":"list", + "member":{"shape":"ListOfListsOfStrings"} + }, + "ListOfListsOfAllTypesStructs":{ + "type":"list", + "member":{"shape":"ListOfAllTypesStructs"} + }, + "ListOfListsOfStrings":{ + "type":"list", + "member":{"shape":"ListOfStrings"} + }, + "ListOfListsOfStructs":{ + "type":"list", + "member":{"shape":"ListOfSimpleStructs"} + }, + "ListOfMapStringToString":{ + "type":"list", + "member":{"shape":"MapOfStringToString"} + }, + "ListOfNested":{ + "type":"list", + "member":{"shape":"NestedContainersStructure"} + }, + "ListOfSimpleStructs":{ + "type":"list", + "member":{"shape":"SimpleStruct"} + }, + "ListOfStrings":{ + "type":"list", + "member":{"shape":"String"} + }, + "Long":{"type":"long"}, + "MapOfEnumToEnum":{ + "type":"map", + "key":{"shape":"EnumType"}, + "value":{"shape":"EnumType"} + }, + "MapOfStringToIntegerList":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"ListOfIntegers"} + }, + "MapOfStringToListOfListsOfStrings":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"ListOfListsOfStrings"} + }, + "MapOfStringToListOfStringInQueryParamsInput":{ + "type":"structure", + "members":{ + "MapOfStringToListOfStrings":{ + "shape":"MapOfStringToListOfStrings", + "location":"querystring" + } + } + }, + "MapOfStringToListOfStrings":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"ListOfStrings"} + }, + "MapOfStringToSimpleStruct":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"SimpleStruct"} + }, + "MapOfStringToString":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "MembersInHeadersStructure":{ + "type":"structure", + "members":{ + "StringMember":{ + "shape":"String", + "location":"header", + "locationName":"x-amz-string" + }, + "BooleanMember":{ + "shape":"Boolean", + "location":"header", + "locationName":"x-amz-boolean" + }, + "IntegerMember":{ + "shape":"Integer", + "location":"header", + "locationName":"x-amz-integer" + }, + "LongMember":{ + "shape":"Long", + "location":"header", + "locationName":"x-amz-long" + }, + "FloatMember":{ + "shape":"Float", + "location":"header", + "locationName":"x-amz-float" + }, + "DoubleMember":{ + "shape":"Double", + "location":"header", + "locationName":"x-amz-double" + }, + "TimestampMember":{ + "shape":"Timestamp", + "location":"header", + "locationName":"x-amz-timestamp" + }, + "IsoTimestampMember":{ + "shape":"IsoTimestamp", + "location":"header", + "locationName":"x-amz-iso-timestamp" + } + } + }, + "MembersInQueryParamsInput":{ + "type":"structure", + "members":{ + "StringQueryParam":{ + "shape":"String", + "location":"querystring", + "locationName":"String" + }, + "BooleanQueryParam":{ + "shape":"Boolean", + "location":"querystring", + "locationName":"Boolean" + }, + "IntegerQueryParam":{ + "shape":"Integer", + "location":"querystring", + "locationName":"Integer" + }, + "LongQueryParam":{ + "shape":"Long", + "location":"querystring", + "locationName":"Long" + }, + "FloatQueryParam":{ + "shape":"Float", + "location":"querystring", + "locationName":"Float" + }, + "DoubleQueryParam":{ + "shape":"Double", + "location":"querystring", + "locationName":"Double" + }, + "TimestampQueryParam":{ + "shape":"Timestamp", + "location":"querystring", + "locationName":"Timestamp" + }, + "ListOfStrings":{ + "shape":"ListOfStrings", + "location":"querystring", + "locationName":"item" + }, + "MapOfStringToString":{ + "shape":"MapOfStringToString", + "location":"querystring" + } + } + }, + "MultiLocationOperationInput":{ + "type":"structure", + "required":["PathParam"], + "members":{ + "PathParam":{ + "shape":"String", + "location":"uri", + "locationName":"PathParam" + }, + "QueryParamOne":{ + "shape":"String", + "location":"querystring", + "locationName":"QueryParamOne" + }, + "QueryParamTwo":{ + "shape":"String", + "location":"querystring", + "locationName":"QueryParamTwo" + }, + "StringHeaderMember":{ + "shape":"String", + "location":"header", + "locationName":"x-amz-header-string" + }, + "TimestampHeaderMember":{ + "shape":"Timestamp", + "location":"header", + "locationName":"x-amz-timearg" + }, + "PayloadStructParam":{"shape":"PayloadStructType"} + } + }, + "NestedContainersStructure":{ + "type":"structure", + "members":{ + "ListOfListsOfStrings":{"shape":"ListOfListsOfStrings"}, + "ListOfListsOfStructs":{"shape":"ListOfListsOfStructs"}, + "ListOfListsOfAllTypesStructs":{"shape":"ListOfListsOfAllTypesStructs"}, + "ListOfListOfListsOfStrings":{"shape":"ListOfListOfListsOfStrings"}, + "MapOfStringToListOfListsOfStrings":{"shape":"MapOfStringToListOfListsOfStrings"}, + "StringMember":{"shape":"String"} + } + }, + "OperationWithExplicitPayloadBlobInput":{ + "type":"structure", + "members":{ + "PayloadMember":{"shape":"BlobType"} + }, + "payload":"PayloadMember" + }, + "OperationWithExplicitPayloadStructureInput":{ + "type":"structure", + "members":{ + "PayloadMember":{"shape":"SimpleStruct"} + }, + "payload":"PayloadMember" + }, + "OperationWithGreedyLabelInput":{ + "type":"structure", + "required":[ + "NonGreedyPathParam", + "GreedyPathParam" + ], + "members":{ + "NonGreedyPathParam":{ + "shape":"String", + "location":"uri", + "locationName":"NonGreedyPathParam" + }, + "GreedyPathParam":{ + "shape":"String", + "location":"uri", + "locationName":"GreedyPathParam" + } + } + }, + "OperationWithModeledContentTypeInput":{ + "type":"structure", + "members":{ + "ContentType":{ + "shape":"String", + "location":"header", + "locationName":"Content-Type" + } + } + }, + "PayloadStructType":{ + "type":"structure", + "members":{ + "PayloadMemberOne":{"shape":"String"}, + "PayloadMemberTwo":{"shape":"String"} + } + }, + "QueryParamWithoutValueInput":{ + "type":"structure", + "members":{ + } + }, + "RecursiveListType":{ + "type":"list", + "member":{"shape":"RecursiveStructType"} + }, + "RecursiveMapType":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"RecursiveStructType"} + }, + "RecursiveStructType":{ + "type":"structure", + "members":{ + "NoRecurse":{"shape":"String"}, + "RecursiveStruct":{"shape":"RecursiveStructType"}, + "RecursiveList":{"shape":"RecursiveListType"}, + "RecursiveMap":{"shape":"RecursiveMapType"} + } + }, + "SimpleStruct":{ + "type":"structure", + "members":{ + "StringMember":{"shape":"String"} + } + }, + "StatusCodeInOutputStructure":{ + "type":"structure", + "members":{ + "StatusCodeMember":{ + "shape":"Integer", + "location":"statusCode" + } + } + }, + "StreamType":{ + "type":"blob", + "streaming":true, + "requiresLength":true + }, + "String":{"type":"string"}, + "StructWithNestedBlobType":{ + "type":"structure", + "members":{ + "NestedBlob":{"shape":"BlobType"} + } + }, + "StructWithTimestamp":{ + "type":"structure", + "members":{ + "NestedTimestamp":{"shape":"Timestamp"} + } + }, + "StructureWithStreamingMember":{ + "type":"structure", + "members":{ + "StreamingMember":{"shape":"StreamType"} + }, + "payload":"StreamingMember" + }, + "SubTypeOne":{ + "type":"structure", + "members":{ + "SubTypeOneMember":{"shape":"String"} + } + }, + "Timestamp":{"type":"timestamp"}, + "EventStreamOperationRequest": { + "type": "structure", + "required": [ + "InputEventStream" + ], + "members": { + "InputEventStream": { + "shape": "InputEventStream" + } + }, + "payload":"InputEventStream" + }, + "EventStreamOutput": { + "type": "structure", + "required": [ + "EventStream" + ], + "members": { + "EventStream": { + "shape": "EventStream" + } + } + }, + "InputEventStream": { + "type": "structure", + "members": { + "InputEvent": { + "shape": "InputEvent" + } + }, + "eventstream": true + }, + "InputEvent": { + "type": "structure", + "members": { + "ExplicitPayloadMember": { + "shape":"ExplicitPayloadMember", + "eventpayload":true + }, + "HeaderMember": { + "shape": "String", + "eventheader": true + } + }, + "event": true + }, + "ExplicitPayloadMember":{"type":"blob"}, + "EventStream": { + "type": "structure", + "members": { + "EventOne": { + "shape": "EventOne" + }, + "EventTwo": { + "shape": "EventTwo" + } + }, + "eventstream": true + }, + "EventOne": { + "type": "structure", + "members": { + "Foo": { + "shape": "String" + } + }, + "event": true + }, + "EventTwo": { + "type": "structure", + "members": { + "Bar": { + "shape": "String" + } + }, + "event": true + } + } +} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/waiters/waiters-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/waiters/waiters-2.json new file mode 100644 index 000000000000..b629e680af14 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/waiters/waiters-2.json @@ -0,0 +1,32 @@ +{ + "version": 2, + "waiters": { + "AllTypesSuccess": { + "delay": 1, + "operation": "AllTypes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "state": "retry", + "matcher": "status", + "expected": 404 + }, + { + "matcher": "error", + "expected": "EmptyModeledException", + "state": "failure" + }, + { + "state": "failure", + "matcher": "status", + "expected": 500 + } + ] + } + } +} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/xml/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/xml/service-2.json index fef93c63e449..44a7a4395de4 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/xml/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/xml/service-2.json @@ -151,6 +151,26 @@ }, "input":{"shape":"QueryParamWithoutValueInput"} }, + "StreamingInputOperationWithRequiredChecksum":{ + "name":"OperationWithRequiredChecksum", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"StructureWithStreamingMember"}, + "output":{"shape":"AllTypesStructure"}, + "httpChecksumRequired": true + }, + "OperationWithRequiredChecksum":{ + "name":"OperationWithRequiredChecksum", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"AllTypesStructure"}, + "output":{"shape":"AllTypesStructure"}, + "httpChecksumRequired": true + }, "StreamingInputOperation":{ "name":"StreamingInputOperation", "http":{ diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncSignerOverrideTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncSignerOverrideTest.java new file mode 100644 index 000000000000..1762d341fb08 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncSignerOverrideTest.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.verify; +import static software.amazon.awssdk.core.client.config.SdkAdvancedClientOption.SIGNER; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.StreamingInputOperationRequest; + +/** + * Test to ensure that operations that use the {@link software.amazon.awssdk.auth.signer.AsyncAws4Signer} don't apply + * the override when the signer is overridden by the customer. + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncSignerOverrideTest { + @Mock + public Signer mockSigner; + + @Test + public void test_signerOverriddenForStreamingInput_takesPrecedence() { + ProtocolRestJsonAsyncClient asyncClient = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_WEST_2) + .overrideConfiguration(o -> o.putAdvancedOption(SIGNER, mockSigner)) + .build(); + + try { + asyncClient.streamingInputOperation(StreamingInputOperationRequest.builder().build(), + AsyncRequestBody.fromString("test")).join(); + } catch (Exception expected) { + } + + verify(mockSigner).sign(any(SdkHttpFullRequest.class), any(ExecutionAttributes.class)); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryAndEndpointOverrideTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryAndEndpointOverrideTest.java new file mode 100644 index 000000000000..80688c00b345 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryAndEndpointOverrideTest.java @@ -0,0 +1,237 @@ +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.net.URI; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletionException; +import java.util.function.Consumer; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder; +import software.amazon.awssdk.core.client.builder.SdkClientBuilder; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.endpointdiscoveryrequiredtest.EndpointDiscoveryRequiredTestAsyncClient; +import software.amazon.awssdk.services.endpointdiscoveryrequiredtest.EndpointDiscoveryRequiredTestClient; +import software.amazon.awssdk.services.endpointdiscoveryrequiredwithcustomizationtest.EndpointDiscoveryRequiredWithCustomizationTestAsyncClient; +import software.amazon.awssdk.services.endpointdiscoveryrequiredwithcustomizationtest.EndpointDiscoveryRequiredWithCustomizationTestClient; +import software.amazon.awssdk.services.endpointdiscoverytest.EndpointDiscoveryTestAsyncClient; +import software.amazon.awssdk.services.endpointdiscoverytest.EndpointDiscoveryTestClient; + +/** + * Verify the behavior of endpoint discovery when combined with endpoint override configuration. + */ +@RunWith(Parameterized.class) +public class EndpointDiscoveryAndEndpointOverrideTest { + private static final String OPTIONAL_SERVICE_ENDPOINT = "https://awsendpointdiscoverytestservice.us-west-2.amazonaws.com"; + private static final String REQUIRED_SERVICE_ENDPOINT = "https://awsendpointdiscoveryrequiredtestservice.us-west-2.amazonaws.com"; + private static final String REQUIRED_CUSTOMIZED_SERVICE_ENDPOINT = "https://awsendpointdiscoveryrequiredwithcustomizationtestservice.us-west-2.amazonaws.com"; + private static final String ENDPOINT_OVERRIDE = "https://endpointoverride"; + + private static final List> ALL_TEST_CASES = new ArrayList<>(); + + private final TestCase testCase; + + static { + // This first case (case 0/1) is different than other SDKs/the SEP. This should probably actually throw an exception. + ALL_TEST_CASES.addAll(endpointDiscoveryOptionalCases(true, true, ENDPOINT_OVERRIDE + "/DescribeEndpoints", ENDPOINT_OVERRIDE + "/TestDiscoveryOptional")); + ALL_TEST_CASES.addAll(endpointDiscoveryOptionalCases(true, false, OPTIONAL_SERVICE_ENDPOINT + "/DescribeEndpoints", OPTIONAL_SERVICE_ENDPOINT + "/TestDiscoveryOptional")); + ALL_TEST_CASES.addAll(endpointDiscoveryOptionalCases(false, true, ENDPOINT_OVERRIDE + "/TestDiscoveryOptional")); + ALL_TEST_CASES.addAll(endpointDiscoveryOptionalCases(false, false, OPTIONAL_SERVICE_ENDPOINT + "/TestDiscoveryOptional")); + + ALL_TEST_CASES.addAll(endpointDiscoveryRequiredCases(true, true)); + ALL_TEST_CASES.addAll(endpointDiscoveryRequiredCases(true, false, REQUIRED_SERVICE_ENDPOINT + "/DescribeEndpoints")); + ALL_TEST_CASES.addAll(endpointDiscoveryRequiredCases(false, true)); + ALL_TEST_CASES.addAll(endpointDiscoveryRequiredCases(false, false)); + + // These cases are different from what one would expect. Even though endpoint discovery is required (based on the model), + // if the customer specifies an endpoint override AND the service is customized, we actually bypass endpoint discovery. + ALL_TEST_CASES.addAll(endpointDiscoveryRequiredAndCustomizedCases(true, true, ENDPOINT_OVERRIDE + "/TestDiscoveryRequired")); + ALL_TEST_CASES.addAll(endpointDiscoveryRequiredAndCustomizedCases(true, false, REQUIRED_CUSTOMIZED_SERVICE_ENDPOINT + "/DescribeEndpoints")); + ALL_TEST_CASES.addAll(endpointDiscoveryRequiredAndCustomizedCases(false, true, ENDPOINT_OVERRIDE + "/TestDiscoveryRequired")); + ALL_TEST_CASES.addAll(endpointDiscoveryRequiredAndCustomizedCases(false, false)); + } + + public EndpointDiscoveryAndEndpointOverrideTest(TestCase testCase) { + this.testCase = testCase; + } + + @Before + public void reset() { + EndpointCapturingInterceptor.reset(); + } + + @Parameterized.Parameters(name = "{index} - {0}") + public static List> testCases() { + return ALL_TEST_CASES; + } + + @Test(timeout = 5_000) + public void invokeTestCase() { + try { + testCase.callClient(); + Assert.fail(); + } catch (Throwable e) { + // Unwrap async exceptions so that they can be tested the same as async ones. + if (e instanceof CompletionException) { + e = e.getCause(); + } + + if (testCase.expectedPaths.length > 0) { + // We're using fake endpoints, so we expect even "valid" requests to fail because of unknown host exceptions. + assertThat(e.getCause()).hasRootCauseInstanceOf(UnknownHostException.class); + } else { + // If the requests are not expected to go through, we expect to see illegal state exceptions because the + // client is configured incorrectly. + assertThat(e).isInstanceOf(IllegalStateException.class); + } + } + + if (testCase.enforcePathOrder) { + assertThat(EndpointCapturingInterceptor.ENDPOINTS).containsExactly(testCase.expectedPaths); + } else { + // Async is involved when order doesn't matter, so wait a little while until the expected number of paths arrive. + while (EndpointCapturingInterceptor.ENDPOINTS.size() < testCase.expectedPaths.length) { + Thread.yield(); + } + assertThat(EndpointCapturingInterceptor.ENDPOINTS).containsExactlyInAnyOrder(testCase.expectedPaths); + } + } + + private static List> endpointDiscoveryOptionalCases(boolean endpointDiscoveryEnabled, + boolean endpointOverridden, + String... expectedEndpoints) { + TestCase syncCase = new TestCase<>(createClient(EndpointDiscoveryTestClient.builder().endpointDiscoveryEnabled(endpointDiscoveryEnabled), + endpointOverridden), + c -> c.testDiscoveryOptional(r -> {}), + caseName(EndpointDiscoveryTestClient.class, endpointDiscoveryEnabled, endpointOverridden, expectedEndpoints), + false, + expectedEndpoints); + + TestCase asyncCase = new TestCase<>(createClient(EndpointDiscoveryTestAsyncClient.builder().endpointDiscoveryEnabled(endpointDiscoveryEnabled), + endpointOverridden), + c -> c.testDiscoveryOptional(r -> {}).join(), + caseName(EndpointDiscoveryTestAsyncClient.class, endpointDiscoveryEnabled, endpointOverridden, expectedEndpoints), + false, + expectedEndpoints); + + return Arrays.asList(syncCase, asyncCase); + } + + private static List> endpointDiscoveryRequiredCases(boolean endpointDiscoveryEnabled, + boolean endpointOverridden, + String... expectedEndpoints) { + TestCase syncCase = new TestCase<>(createClient(EndpointDiscoveryRequiredTestClient.builder().endpointDiscoveryEnabled(endpointDiscoveryEnabled), + endpointOverridden), + c -> c.testDiscoveryRequired(r -> {}), + caseName(EndpointDiscoveryRequiredTestClient.class, endpointDiscoveryEnabled, endpointOverridden, expectedEndpoints), + true, + expectedEndpoints); + + TestCase asyncCase = new TestCase<>(createClient(EndpointDiscoveryRequiredTestAsyncClient.builder().endpointDiscoveryEnabled(endpointDiscoveryEnabled), + endpointOverridden), + c -> c.testDiscoveryRequired(r -> {}).join(), + caseName(EndpointDiscoveryRequiredTestAsyncClient.class, endpointDiscoveryEnabled, endpointOverridden, expectedEndpoints), + true, + expectedEndpoints); + + return Arrays.asList(syncCase, asyncCase); + } + + private static List> endpointDiscoveryRequiredAndCustomizedCases(boolean endpointDiscoveryEnabled, + boolean endpointOverridden, + String... expectedEndpoints) { + TestCase syncCase = new TestCase<>(createClient(EndpointDiscoveryRequiredWithCustomizationTestClient.builder().endpointDiscoveryEnabled(endpointDiscoveryEnabled), + endpointOverridden), + c -> c.testDiscoveryRequired(r -> {}), + caseName(EndpointDiscoveryRequiredWithCustomizationTestClient.class, endpointDiscoveryEnabled, endpointOverridden, expectedEndpoints), + true, + expectedEndpoints); + + TestCase asyncCase = new TestCase<>(createClient(EndpointDiscoveryRequiredWithCustomizationTestAsyncClient.builder().endpointDiscoveryEnabled(endpointDiscoveryEnabled), + endpointOverridden), + c -> c.testDiscoveryRequired(r -> {}).join(), + caseName(EndpointDiscoveryRequiredWithCustomizationTestAsyncClient.class, endpointDiscoveryEnabled, endpointOverridden, expectedEndpoints), + true, + expectedEndpoints); + + return Arrays.asList(syncCase, asyncCase); + } + + private static T createClient(AwsClientBuilder clientBuilder, + boolean endpointOverridden) { + return clientBuilder.region(Region.US_WEST_2) + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .applyMutation(c -> addEndpointOverride(c, endpointOverridden)) + .overrideConfiguration(c -> c.retryPolicy(p -> p.numRetries(0)) + .addExecutionInterceptor(new EndpointCapturingInterceptor())) + .build(); + } + + private static String caseName(Class client, + boolean endpointDiscoveryEnabled, + boolean endpointOverridden, + String... expectedEndpoints) { + return "(Client=" + client.getSimpleName() + + ", DiscoveryEnabled=" + endpointDiscoveryEnabled + + ", EndpointOverridden=" + endpointOverridden + + ") => (ExpectedEndpoints=" + Arrays.toString(expectedEndpoints) + ")"; + } + + private static void addEndpointOverride(SdkClientBuilder builder, boolean endpointOverridden) { + if (endpointOverridden) { + builder.endpointOverride(URI.create(ENDPOINT_OVERRIDE)); + } + } + + private static class TestCase { + private final T client; + private final Consumer methodCall; + private final String caseName; + private final boolean enforcePathOrder; + private final String[] expectedPaths; + + private TestCase(T client, Consumer methodCall, String caseName, boolean enforcePathOrder, String... expectedPaths) { + this.client = client; + this.methodCall = methodCall; + this.caseName = caseName; + this.enforcePathOrder = enforcePathOrder; + this.expectedPaths = expectedPaths; + } + + private void callClient() { + methodCall.accept(client); + } + + @Override + public String toString() { + return caseName; + } + } + + private static class EndpointCapturingInterceptor implements ExecutionInterceptor { + private static final List ENDPOINTS = Collections.synchronizedList(new ArrayList<>()); + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + ENDPOINTS.add(context.httpRequest().getUri().toString()); + } + + private static void reset() { + ENDPOINTS.clear(); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryTest.java new file mode 100644 index 000000000000..3a78b0e356a9 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryTest.java @@ -0,0 +1,164 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.concurrent.ExecutionException; +import org.assertj.core.api.AbstractThrowableAssert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryFailedException; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.internal.SdkInternalTestAdvancedClientOption; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.endpointdiscoverytest.EndpointDiscoveryTestAsyncClient; +import software.amazon.awssdk.services.endpointdiscoverytest.EndpointDiscoveryTestClient; +import software.amazon.awssdk.services.endpointdiscoverytest.model.EndpointDiscoveryTestException; + +public class EndpointDiscoveryTest { + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + private EndpointDiscoveryTestClient client; + + private EndpointDiscoveryTestAsyncClient asyncClient; + + @Before + public void setupClient() { + client = EndpointDiscoveryTestClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointDiscoveryEnabled(true) + .overrideConfiguration(c -> c.putAdvancedOption( + SdkInternalTestAdvancedClientOption.ENDPOINT_OVERRIDDEN_OVERRIDE, false)) + .build(); + + asyncClient = EndpointDiscoveryTestAsyncClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointDiscoveryEnabled(true) + .overrideConfiguration(c -> c.putAdvancedOption( + SdkInternalTestAdvancedClientOption.ENDPOINT_OVERRIDDEN_OVERRIDE, false)) + .build(); + } + + @Test + public void syncRequiredOperation_EmptyEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubEmptyResponse(); + assertThatThrownBy(() -> client.testDiscoveryRequired(r -> {})) + .isInstanceOf(EndpointDiscoveryFailedException.class); + } + + @Test + public void asyncRequiredOperation_EmptyEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubEmptyResponse(); + assertAsyncRequiredOperationCallThrowable() + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(IllegalArgumentException.class); + } + + @Test + public void syncRequiredOperation_NonRetryableEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubDescribeEndpointsResponse(404); + assertThatThrownBy(() -> client.testDiscoveryRequired(r -> {})) + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(EndpointDiscoveryTestException.class); + } + + @Test + public void asyncRequiredOperation_NonRetryableEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubDescribeEndpointsResponse(404); + assertAsyncRequiredOperationCallThrowable() + .isInstanceOf(EndpointDiscoveryFailedException.class); + } + + @Test + public void syncRequiredOperation_RetryableEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubDescribeEndpointsResponse(500); + assertThatThrownBy(() -> client.testDiscoveryRequired(r -> {})) + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(EndpointDiscoveryTestException.class); + } + + @Test + public void asyncRequiredOperation_RetryableEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubDescribeEndpointsResponse(500); + assertAsyncRequiredOperationCallThrowable() + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(EndpointDiscoveryTestException.class); + } + + @Test + public void syncRequiredOperation_InvalidEndpointEndpointDiscoveryResponse_CausesSdkException() { + stubDescribeEndpointsResponse(200, "invalid", 15); + assertThatThrownBy(() -> client.testDiscoveryRequired(r -> {})) + .isInstanceOf(SdkClientException.class); + } + + @Test + public void asyncRequiredOperation_InvalidEndpointEndpointDiscoveryResponse_CausesSdkException() { + stubDescribeEndpointsResponse(200, "invalid", 15); + assertAsyncRequiredOperationCallThrowable() + .isInstanceOf(SdkClientException.class); + } + + private void stubEmptyResponse() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(200) + .withBody("{}"))); + } + + private void stubDescribeEndpointsResponse(int status) { + stubDescribeEndpointsResponse(status, "localhost", 60); + } + + private void stubDescribeEndpointsResponse(int status, String address, long cachePeriodInMinutes) { + stubFor(post(urlPathEqualTo("/DescribeEndpoints")) + .willReturn(aResponse().withStatus(status) + .withBody("{" + + " \"Endpoints\": [{" + + " \"Address\": \"" + address + "\"," + + " \"CachePeriodInMinutes\": " + cachePeriodInMinutes + + " }]" + + "}"))); + } + + private AbstractThrowableAssert assertAsyncRequiredOperationCallThrowable() { + try { + asyncClient.testDiscoveryRequired(r -> {}).get(); + throw new AssertionError(); + } catch (InterruptedException e) { + return assertThat(e); + } catch (ExecutionException e) { + return assertThat(e.getCause()); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HostPrefixTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HostPrefixTest.java new file mode 100644 index 000000000000..0a12dc428fb3 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HostPrefixTest.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.net.URI; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; +import software.amazon.awssdk.utils.StringInputStream; +import software.amazon.awssdk.utils.builder.SdkBuilder; + +public class HostPrefixTest { + + private MockSyncHttpClient mockHttpClient; + private ProtocolRestJsonClient client; + private MockAsyncHttpClient mockAsyncClient; + + private ProtocolRestJsonAsyncClient asyncClient; + + @Before + public void setupClient() { + mockHttpClient = new MockSyncHttpClient(); + mockAsyncClient = new MockAsyncHttpClient(); + client = ProtocolRestJsonClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", + "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost")) + .httpClient(mockHttpClient) + .build(); + + asyncClient = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost")) + .httpClient(mockAsyncClient) + .build(); + } + + @Test + public void invalidHostPrefix_shouldThrowException() { + assertThatThrownBy(() -> client.operationWithHostPrefix(b -> b.stringMember("123#"))) + .isInstanceOf(IllegalArgumentException.class).hasMessageContaining("must only contain alphanumeric characters and " + + "dashes"); + + assertThatThrownBy(() -> asyncClient.operationWithHostPrefix(b -> b.stringMember("123#")).join()).hasCauseInstanceOf(IllegalArgumentException.class).hasMessageContaining("must only contain alphanumeric characters and dashes"); + } + + @Test + public void nullHostPrefix_shouldThrowException() { + assertThatThrownBy(() -> client.operationWithHostPrefix(SdkBuilder::build)) + .isInstanceOf(IllegalArgumentException.class).hasMessageContaining("component is missing"); + + assertThatThrownBy(() -> asyncClient.operationWithHostPrefix(SdkBuilder::build).join()) + .hasCauseInstanceOf(IllegalArgumentException.class).hasMessageContaining("component is missing"); + } + + @Test + public void syncValidHostPrefix_shouldPrefixEndpoint() { + mockHttpClient.stubNextResponse(HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200) + .build()) + .responseBody(AbortableInputStream.create(new StringInputStream(""))) + .build()); + client.operationWithHostPrefix(b -> b.stringMember("123")); + assertThat(mockHttpClient.getLastRequest().getUri().getHost()).isEqualTo("123-foo.localhost"); + + } + + @Test + public void asyncValidHostPrefix_shouldPrefixEndpoint() { + mockAsyncClient.stubNextResponse(HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200) + .build()) + .responseBody(AbortableInputStream.create(new StringInputStream(""))) + .build()); + asyncClient.operationWithHostPrefix(b -> b.stringMember("123")).join(); + assertThat(mockAsyncClient.getLastRequest().getUri().getHost()).isEqualTo("123-foo.localhost"); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumRequiredTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumRequiredTest.java new file mode 100644 index 000000000000..f481d4ebcd3f --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumRequiredTest.java @@ -0,0 +1,162 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; + +import io.reactivex.Flowable; +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.awscore.client.builder.AwsAsyncClientBuilder; +import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder; +import software.amazon.awssdk.awscore.client.builder.AwsSyncClientBuilder; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestxml.ProtocolRestXmlAsyncClient; +import software.amazon.awssdk.services.protocolrestxml.ProtocolRestXmlClient; + +/** + * Verify that the "httpChecksumRequired" C2J trait results in a valid MD5 checksum of the payload being included in the HTTP + * request. + */ +public class HttpChecksumRequiredTest { + private SdkHttpClient httpClient; + private SdkAsyncHttpClient httpAsyncClient; + + private ProtocolRestJsonClient jsonClient; + private ProtocolRestJsonAsyncClient jsonAsyncClient; + private ProtocolRestXmlClient xmlClient; + private ProtocolRestXmlAsyncClient xmlAsyncClient; + + @Before + public void setup() throws IOException { + httpClient = Mockito.mock(SdkHttpClient.class); + httpAsyncClient = Mockito.mock(SdkAsyncHttpClient.class); + + jsonClient = initializeSync(ProtocolRestJsonClient.builder()).build(); + jsonAsyncClient = initializeAsync(ProtocolRestJsonAsyncClient.builder()).build(); + xmlClient = initializeSync(ProtocolRestXmlClient.builder()).build(); + xmlAsyncClient = initializeAsync(ProtocolRestXmlAsyncClient.builder()).build(); + + SdkHttpFullResponse successfulHttpResponse = SdkHttpResponse.builder() + .statusCode(200) + .putHeader("Content-Length", "0") + .build(); + + ExecutableHttpRequest request = Mockito.mock(ExecutableHttpRequest.class); + Mockito.when(request.call()).thenReturn(HttpExecuteResponse.builder() + .response(successfulHttpResponse) + .build()); + Mockito.when(httpClient.prepareRequest(any())).thenReturn(request); + + Mockito.when(httpAsyncClient.execute(any())).thenAnswer(invocation -> { + AsyncExecuteRequest asyncExecuteRequest = invocation.getArgumentAt(0, AsyncExecuteRequest.class); + asyncExecuteRequest.responseHandler().onHeaders(successfulHttpResponse); + asyncExecuteRequest.responseHandler().onStream(Flowable.empty()); + return CompletableFuture.completedFuture(null); + }); + } + + private & AwsClientBuilder> T initializeSync(T syncClientBuilder) { + return initialize(syncClientBuilder.httpClient(httpClient)); + } + + private & AwsClientBuilder> T initializeAsync(T asyncClientBuilder) { + return initialize(asyncClientBuilder.httpClient(httpAsyncClient)); + } + + private > T initialize(T clientBuilder) { + return clientBuilder.credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_WEST_2); + } + + @Test + public void syncJsonSupportsChecksumRequiredTrait() { + jsonClient.operationWithRequiredChecksum(r -> r.stringMember("foo")); + assertThat(getSyncRequest().firstMatchingHeader("Content-MD5")).hasValue("g8VCvPTPCMoU01rBlBVt9w=="); + } + + @Test + public void syncStreamingInputJsonSupportsChecksumRequiredTrait() { + jsonClient.streamingInputOperationWithRequiredChecksum(r -> {}, RequestBody.fromString("foo")); + assertThat(getSyncRequest().firstMatchingHeader("Content-MD5")).hasValue("rL0Y20zC+Fzt72VPzMSk2A=="); + } + + @Test + public void syncStreamingInputXmlSupportsChecksumRequiredTrait() { + xmlClient.streamingInputOperationWithRequiredChecksum(r -> {}, RequestBody.fromString("foo")); + assertThat(getSyncRequest().firstMatchingHeader("Content-MD5")).hasValue("rL0Y20zC+Fzt72VPzMSk2A=="); + } + + @Test + public void syncXmlSupportsChecksumRequiredTrait() { + xmlClient.operationWithRequiredChecksum(r -> r.stringMember("foo")); + assertThat(getSyncRequest().firstMatchingHeader("Content-MD5")).hasValue("vqm481l+Lv0zEvdu+duE6Q=="); + } + + @Test + public void asyncJsonSupportsChecksumRequiredTrait() { + jsonAsyncClient.operationWithRequiredChecksum(r -> r.stringMember("foo")).join(); + assertThat(getAsyncRequest().firstMatchingHeader("Content-MD5")).hasValue("g8VCvPTPCMoU01rBlBVt9w=="); + } + + @Test + public void asyncXmlSupportsChecksumRequiredTrait() { + xmlAsyncClient.operationWithRequiredChecksum(r -> r.stringMember("foo")).join(); + assertThat(getAsyncRequest().firstMatchingHeader("Content-MD5")).hasValue("vqm481l+Lv0zEvdu+duE6Q=="); + } + + @Test(expected = CompletionException.class) + public void asyncStreamingInputJsonFailsWithChecksumRequiredTrait() { + jsonAsyncClient.streamingInputOperationWithRequiredChecksum(r -> {}, AsyncRequestBody.fromString("foo")).join(); + } + + @Test(expected = CompletionException.class) + public void asyncStreamingInputXmlFailsWithChecksumRequiredTrait() { + xmlAsyncClient.streamingInputOperationWithRequiredChecksum(r -> {}, AsyncRequestBody.fromString("foo")).join(); + } + + private SdkHttpRequest getSyncRequest() { + ArgumentCaptor captor = ArgumentCaptor.forClass(HttpExecuteRequest.class); + Mockito.verify(httpClient).prepareRequest(captor.capture()); + return captor.getValue().httpRequest(); + } + + private SdkHttpRequest getAsyncRequest() { + ArgumentCaptor captor = ArgumentCaptor.forClass(AsyncExecuteRequest.class); + Mockito.verify(httpAsyncClient).execute(captor.capture()); + return captor.getValue().request(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/InvalidRegionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/InvalidRegionTest.java new file mode 100644 index 000000000000..623f7cbc0386 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/InvalidRegionTest.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; + +public class InvalidRegionTest { + @Test + public void invalidClientRegionGivesHelpfulMessage() { + assertThatThrownBy(() -> ProtocolRestJsonClient.builder() + .region(Region.of("US_EAST_1")) + .credentialsProvider(AnonymousCredentialsProvider.create()) + .build()) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region"); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ModelSerializationTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ModelSerializationTest.java new file mode 100644 index 000000000000..b70fc339db3c --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ModelSerializationTest.java @@ -0,0 +1,119 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.module.SimpleModule; +import java.io.IOException; +import java.time.Instant; +import org.junit.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesRequest; +import software.amazon.awssdk.services.protocolrestjson.model.BaseType; +import software.amazon.awssdk.services.protocolrestjson.model.RecursiveStructType; +import software.amazon.awssdk.services.protocolrestjson.model.SimpleStruct; +import software.amazon.awssdk.services.protocolrestjson.model.StructWithNestedBlobType; +import software.amazon.awssdk.services.protocolrestjson.model.StructWithTimestamp; +import software.amazon.awssdk.services.protocolrestjson.model.SubTypeOne; + +/** + * Verify that modeled objects can be marshalled using Jackson. + */ +public class ModelSerializationTest { + @Test + public void jacksonSerializationWorksForEmptyRequestObjects() throws IOException { + validateJacksonSerialization(AllTypesRequest.builder().build()); + } + + @Test + public void jacksonSerializationWorksForPopulatedRequestModels() throws IOException { + SdkBytes blob = SdkBytes.fromUtf8String("foo"); + + SimpleStruct simpleStruct = SimpleStruct.builder().stringMember("foo").build(); + StructWithTimestamp structWithTimestamp = StructWithTimestamp.builder().nestedTimestamp(Instant.EPOCH).build(); + StructWithNestedBlobType structWithNestedBlob = StructWithNestedBlobType.builder().nestedBlob(blob).build(); + RecursiveStructType recursiveStruct = RecursiveStructType.builder() + .recursiveStruct(RecursiveStructType.builder().build()) + .build(); + BaseType baseType = BaseType.builder().baseMember("foo").build(); + SubTypeOne subtypeOne = SubTypeOne.builder().subTypeOneMember("foo").build(); + + validateJacksonSerialization(AllTypesRequest.builder() + .stringMember("foo") + .integerMember(5) + .booleanMember(true) + .floatMember(5F) + .doubleMember(5D) + .longMember(5L) + .simpleList("foo", "bar") + .listOfMaps(singletonList(singletonMap("foo", "bar"))) + .listOfStructs(simpleStruct) + .mapOfStringToIntegerList(singletonMap("foo", singletonList(5))) + .mapOfStringToStruct(singletonMap("foo", simpleStruct)) + .timestampMember(Instant.EPOCH) + .structWithNestedTimestampMember(structWithTimestamp) + .blobArg(blob) + .structWithNestedBlob(structWithNestedBlob) + .blobMap(singletonMap("foo", blob)) + .listOfBlobs(blob, blob) + .recursiveStruct(recursiveStruct) + .polymorphicTypeWithSubTypes(baseType) + .polymorphicTypeWithoutSubTypes(subtypeOne) + .enumMember("foo") + .listOfEnumsWithStrings("foo", "bar") + .mapOfEnumToEnumWithStrings(singletonMap("foo", "bar")) + .build()); + } + + private void validateJacksonSerialization(AllTypesRequest original) throws IOException { + SimpleModule instantModule = new SimpleModule(); + instantModule.addSerializer(Instant.class, new InstantSerializer()); + instantModule.addDeserializer(Instant.class, new InstantDeserializer()); + + ObjectMapper mapper = new ObjectMapper(); + mapper.registerModule(instantModule); + + String serialized = mapper.writeValueAsString(original.toBuilder()); + AllTypesRequest deserialized = mapper.readValue(serialized, AllTypesRequest.serializableBuilderClass()).build(); + assertThat(deserialized).isEqualTo(original); + + } + + private class InstantSerializer extends JsonSerializer { + @Override + public void serialize(Instant t, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) + throws IOException { + jsonGenerator.writeString(t.toString()); + } + } + + private class InstantDeserializer extends JsonDeserializer { + @Override + public Instant deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException { + return Instant.parse(jsonParser.getText()); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ProfileFileConfigurationTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ProfileFileConfigurationTest.java new file mode 100644 index 000000000000..a70bb0f2a40f --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ProfileFileConfigurationTest.java @@ -0,0 +1,100 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; + +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; +import software.amazon.awssdk.awscore.AwsExecutionAttribute; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.signer.NoOpSigner; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; +import software.amazon.awssdk.utils.StringInputStream; + +public class ProfileFileConfigurationTest { + @Test + public void profileIsHonoredForCredentialsAndRegion() { + EnvironmentVariableHelper.run(env -> { + env.remove(SdkSystemSetting.AWS_REGION); + env.remove(SdkSystemSetting.AWS_ACCESS_KEY_ID); + env.remove(SdkSystemSetting.AWS_SECRET_ACCESS_KEY); + + String profileContent = "[profile foo]\n" + + "region = us-banana-46\n" + + "aws_access_key_id = profileIsHonoredForCredentials_akid\n" + + "aws_secret_access_key = profileIsHonoredForCredentials_skid"; + String profileName = "foo"; + Signer signer = mock(NoOpSigner.class); + + ProtocolRestJsonClient client = + ProtocolRestJsonClient.builder() + .overrideConfiguration(overrideConfig(profileContent, profileName, signer)) + .build(); + + Mockito.when(signer.sign(any(), any())).thenCallRealMethod(); + + try { + client.allTypes(); + } catch (SdkClientException e) { + // expected + } + + ArgumentCaptor httpRequest = ArgumentCaptor.forClass(SdkHttpFullRequest.class); + ArgumentCaptor attributes = ArgumentCaptor.forClass(ExecutionAttributes.class); + Mockito.verify(signer).sign(httpRequest.capture(), attributes.capture()); + + AwsCredentials credentials = attributes.getValue().getAttribute(AwsSignerExecutionAttribute.AWS_CREDENTIALS); + assertThat(credentials.accessKeyId()).isEqualTo("profileIsHonoredForCredentials_akid"); + assertThat(credentials.secretAccessKey()).isEqualTo("profileIsHonoredForCredentials_skid"); + + Region region = attributes.getValue().getAttribute(AwsExecutionAttribute.AWS_REGION); + assertThat(region.id()).isEqualTo("us-banana-46"); + + assertThat(httpRequest.getValue().getUri().getHost()).contains("us-banana-46"); + }); + } + + private ClientOverrideConfiguration overrideConfig(String profileContent, String profileName, Signer signer) { + return ClientOverrideConfiguration.builder() + .defaultProfileFile(profileFile(profileContent)) + .defaultProfileName(profileName) + .retryPolicy(r -> r.numRetries(0)) + .putAdvancedOption(SdkAdvancedClientOption.SIGNER, signer) + .build(); + } + + private ProfileFile profileFile(String content) { + return ProfileFile.builder() + .content(new StringInputStream(content)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/customresponsemetadata/CustomResponseMetadataTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/customresponsemetadata/CustomResponseMetadataTest.java index 85af47742483..e0104a254ff3 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/customresponsemetadata/CustomResponseMetadataTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/customresponsemetadata/CustomResponseMetadataTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/eventstreams/EventDispatchTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/eventstreams/EventDispatchTest.java new file mode 100644 index 000000000000..62daf1df6062 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/eventstreams/EventDispatchTest.java @@ -0,0 +1,169 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.eventstreams; + +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +import java.util.function.Consumer; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.services.eventstreamrestjson.model.EventOne; +import software.amazon.awssdk.services.eventstreamrestjson.model.EventStream; +import software.amazon.awssdk.services.eventstreamrestjson.model.EventStreamOperationResponseHandler; +import software.amazon.awssdk.services.eventstreamrestjson.model.EventTwo; + +/** + * Tests to ensure that the generated classes that represent each event on an + * event stream call the correct visitor methods; i.e. that the double + * dispatching works as expected. + */ +@RunWith(MockitoJUnitRunner.class) +public class EventDispatchTest { + + @Mock + private EventStreamOperationResponseHandler.Visitor visitor; + + @Mock + private Consumer onDefaultConsumer; + + @Mock + private Consumer theEventOneConsumer; + + @Mock + private Consumer legacyGeneratedEventConsumer; + + @Mock + private Consumer eventTwoConsumer; + + @Mock + private Consumer secondEventTwoConsumer; + + @Rule + public ExpectedException expected = ExpectedException.none(); + + @Test + public void test_acceptTheEventOne_correctVisitorMethodCalled() { + EventStream eventStream = EventStream.theEventOneBuilder().build(); + eventStream.accept(visitor); + + verify(visitor).visitTheEventOne(Mockito.eq((EventOne) eventStream)); + verifyNoMoreInteractions(visitor); + } + + @Test + public void test_acceptTheEventOne_visitorBuiltWithBuilder_correctVisitorMethodCalled() { + EventStreamOperationResponseHandler.Visitor visitor = visitorBuiltWithBuilder(); + EventStream eventStream = EventStream.theEventOneBuilder().build(); + + eventStream.accept(visitor); + + verify(theEventOneConsumer).accept(eq((EventOne) eventStream)); + verifyNoMoreConsumerInteractions(); + } + + @Test + public void test_acceptLegacyGeneratedEvent_correctVisitorMethodCalled() { + EventStream eventStream = EventOne.builder().build(); + eventStream.accept(visitor); + + // Note: notice the visit() method rather than visitLegacyGeneratedEvent() + verify(visitor).visit(Mockito.eq((EventOne) eventStream)); + verifyNoMoreInteractions(visitor); + } + + @Test + public void test_acceptLegacyGeneratedEvent_visitorBuiltWithBuilder_correctVisitorMethodCalled() { + EventStreamOperationResponseHandler.Visitor visitor = visitorBuiltWithBuilder(); + EventStream eventStream = EventOne.builder().build(); + + eventStream.accept(visitor); + + verify(legacyGeneratedEventConsumer).accept(eq((EventOne) eventStream)); + verifyNoMoreConsumerInteractions(); + } + + @Test + public void test_acceptEventTwo_correctVisitorMethodCalled() { + EventStream eventStream = EventStream.eventTwoBuilder().build(); + eventStream.accept(visitor); + + verify(visitor).visitEventTwo(Mockito.eq((EventTwo) eventStream)); + verifyNoMoreInteractions(visitor); + } + + @Test + public void test_acceptEvenTwo_visitorBuiltWithBuilder_correctVisitorMethodCalled() { + EventStreamOperationResponseHandler.Visitor visitor = visitorBuiltWithBuilder(); + + EventStream eventStream = EventStream.eventTwoBuilder().build(); + + eventStream.accept(visitor); + + verify(eventTwoConsumer).accept(eq((EventTwo) eventStream)); + verifyNoMoreConsumerInteractions(); + } + + @Test + public void test_acceptSecondEventTwo_correctVisitorMethodCalled() { + EventStream eventStream = EventStream.secondEventTwoBuilder().build(); + eventStream.accept(visitor); + + verify(visitor).visitSecondEventTwo(Mockito.eq((EventTwo) eventStream)); + verifyNoMoreInteractions(visitor); + } + + @Test + public void test_acceptSecondEvenTwo_visitorBuiltWithBuilder_correctVisitorMethodCalled() { + EventStreamOperationResponseHandler.Visitor visitor = visitorBuiltWithBuilder(); + + EventStream eventStream = EventStream.secondEventTwoBuilder().build(); + + eventStream.accept(visitor); + + verify(secondEventTwoConsumer).accept(eq((EventTwo) eventStream)); + verifyNoMoreConsumerInteractions(); + } + + @Test + public void test_acceptOnBaseClass_UnCustomizedEvent_throwsException() { + expected.expect(UnsupportedOperationException.class); + + EventTwo eventTwo = EventTwo.builder().build(); + eventTwo.accept(visitor); + } + + private EventStreamOperationResponseHandler.Visitor visitorBuiltWithBuilder() { + return EventStreamOperationResponseHandler.Visitor.builder() + .onDefault(onDefaultConsumer) + .onTheEventOne(theEventOneConsumer) + .onEventTwo(eventTwoConsumer) + .onEventOne(legacyGeneratedEventConsumer) + .onSecondEventTwo(secondEventTwoConsumer) + .build(); + } + + private void verifyNoMoreConsumerInteractions() { + verifyNoMoreInteractions(onDefaultConsumer, theEventOneConsumer, eventTwoConsumer, legacyGeneratedEventConsumer, + secondEventTwoConsumer); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/eventstreams/EventMarshallingTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/eventstreams/EventMarshallingTest.java new file mode 100644 index 000000000000..c439f66e17fc --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/eventstreams/EventMarshallingTest.java @@ -0,0 +1,146 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.eventstreams; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.when; +import io.reactivex.Flowable; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkHttpContentPublisher; +import software.amazon.awssdk.services.eventstreamrestjson.EventStreamRestJsonAsyncClient; +import software.amazon.awssdk.services.eventstreamrestjson.model.EventStream; +import software.amazon.awssdk.services.eventstreamrestjson.model.EventStreamOperationRequest; +import software.amazon.awssdk.services.eventstreamrestjson.model.EventStreamOperationResponseHandler; +import software.amazon.awssdk.services.eventstreamrestjson.model.InputEventStream; +import software.amazon.eventstream.Message; +import software.amazon.eventstream.MessageDecoder; + +@RunWith(MockitoJUnitRunner.class) +public class EventMarshallingTest { + @Mock + public SdkAsyncHttpClient mockHttpClient; + + private EventStreamRestJsonAsyncClient client; + + private List marshalledEvents; + + private MessageDecoder chunkDecoder; + private MessageDecoder eventDecoder; + + @Before + public void setup() { + when(mockHttpClient.execute(any(AsyncExecuteRequest.class))).thenAnswer(this::mockExecute); + client = EventStreamRestJsonAsyncClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .httpClient(mockHttpClient) + .build(); + + marshalledEvents = new ArrayList<>(); + + chunkDecoder = new MessageDecoder(); + eventDecoder = new MessageDecoder(); + } + + @Test + public void testMarshalling_setsCorrectEventType() { + List inputEvents = Stream.of( + InputEventStream.inputEventBuilder().build(), + InputEventStream.inputEventBBuilder().build(), + InputEventStream.inputEventTwoBuilder().build() + ).collect(Collectors.toList()); + + Flowable inputStream = Flowable.fromIterable(inputEvents); + + client.eventStreamOperation(EventStreamOperationRequest.builder().build(), inputStream, EventStreamOperationResponseHandler.builder() + .subscriber(() -> new Subscriber() { + @Override + public void onSubscribe(Subscription subscription) { + + } + + @Override + public void onNext(EventStream eventStream) { + + } + + @Override + public void onError(Throwable throwable) { + + } + + @Override + public void onComplete() { + + } + }) + .build()).join(); + + List expectedTypes = Stream.of( + "InputEvent", + "InputEventB", + "InputEventTwo" + ).collect(Collectors.toList());; + + assertThat(marshalledEvents).hasSize(inputEvents.size()); + + for (int i = 0; i < marshalledEvents.size(); ++i) { + Message marshalledEvent = marshalledEvents.get(i); + String expectedType = expectedTypes.get(i); + assertThat(marshalledEvent.getHeaders().get(":event-type").getString()) + .isEqualTo(expectedType); + } + } + + private CompletableFuture mockExecute(InvocationOnMock invocation) { + AsyncExecuteRequest request = invocation.getArgumentAt(0, AsyncExecuteRequest.class); + SdkHttpContentPublisher content = request.requestContentPublisher(); + List chunks = Flowable.fromPublisher(content).toList().blockingGet(); + + for (ByteBuffer c : chunks) { + chunkDecoder.feed(c); + } + + for (Message m : chunkDecoder.getDecodedMessages()) { + eventDecoder.feed(m.getPayload()); + } + + marshalledEvents.addAll(eventDecoder.getDecodedMessages()); + + request.responseHandler().onHeaders(SdkHttpResponse.builder().statusCode(200).build()); + request.responseHandler().onStream(Flowable.empty()); + + return CompletableFuture.completedFuture(null); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java new file mode 100644 index 000000000000..12541ba5e417 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java @@ -0,0 +1,247 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.time.Duration; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.model.EmptyModeledException; + +@RunWith(MockitoJUnitRunner.class) +public class CoreMetricsTest { + private static final String SERVICE_ID = "AmazonProtocolRestJson"; + private static final String REQUEST_ID = "req-id"; + private static final String EXTENDED_REQUEST_ID = "extended-id"; + private static final int MAX_RETRIES = 2; + + private static ProtocolRestJsonClient client; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Mock + private SdkHttpClient mockHttpClient; + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Mock + private MetricPublisher mockPublisher; + + @Before + public void setup() throws IOException { + client = ProtocolRestJsonClient.builder() + .httpClient(mockHttpClient) + .credentialsProvider(mockCredentialsProvider) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .build(); + AbortableInputStream content = contentStream("{}"); + SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() + .statusCode(200) + .putHeader("x-amz-request-id", REQUEST_ID) + .putHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .content(content) + .build(); + + HttpExecuteResponse mockResponse = mockExecuteResponse(httpResponse); + + ExecutableHttpRequest mockExecuteRequest = mock(ExecutableHttpRequest.class); + when(mockExecuteRequest.call()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return mockResponse; + }); + + when(mockHttpClient.prepareRequest(any(HttpExecuteRequest.class))) + .thenReturn(mockExecuteRequest); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + if (client != null) { + client.close(); + } + client = null; + } + + @Test + public void testApiCall_noConfiguredPublisher_succeeds() { + ProtocolRestJsonClient noPublisher = ProtocolRestJsonClient.builder() + .credentialsProvider(mockCredentialsProvider) + .httpClient(mockHttpClient) + .build(); + + noPublisher.allTypes(); + } + + @Test + public void testApiCall_publisherOverriddenOnRequest_requestPublisherTakesPrecedence() { + MetricPublisher requestMetricPublisher = mock(MetricPublisher.class); + + client.allTypes(r -> r.overrideConfiguration(o -> o.addMetricPublisher(requestMetricPublisher))); + + verify(requestMetricPublisher).publish(any(MetricCollection.class)); + verifyZeroInteractions(mockPublisher); + } + + @Test + public void testApiCall_operationSuccessful_addsMetrics() { + client.allTypes(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(mockPublisher).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + + assertThat(capturedCollection.name()).isEqualTo("ApiCall"); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ID)) + .containsExactly(SERVICE_ID); + assertThat(capturedCollection.metricValues(CoreMetric.OPERATION_NAME)) + .containsExactly("AllTypes"); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(true); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_DURATION).get(0)) + .isGreaterThan(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.CREDENTIALS_FETCH_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.MARSHALLING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(0); + + assertThat(capturedCollection.children()).hasSize(1); + MetricCollection attemptCollection = capturedCollection.children().get(0); + + assertThat(attemptCollection.name()).isEqualTo("ApiCallAttempt"); + assertThat(attemptCollection.metricValues(CoreMetric.BACKOFF_DELAY_DURATION)) + .containsExactly(Duration.ZERO); + assertThat(attemptCollection.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(200); + assertThat(attemptCollection.metricValues(CoreMetric.SIGNING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_REQUEST_ID)) + .containsExactly(REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .containsExactly(EXTENDED_REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ofMillis(100)); + assertThat(attemptCollection.metricValues(CoreMetric.UNMARSHALLING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + } + + @Test + public void testApiCall_serviceReturnsError_errorInfoIncludedInMetrics() throws IOException { + AbortableInputStream content = contentStream("{}"); + + SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() + .statusCode(500) + .putHeader("x-amz-request-id", REQUEST_ID) + .putHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .putHeader("X-Amzn-Errortype", "EmptyModeledException") + .content(content) + .build(); + + HttpExecuteResponse response = mockExecuteResponse(httpResponse); + + ExecutableHttpRequest mockExecuteRequest = mock(ExecutableHttpRequest.class); + when(mockExecuteRequest.call()).thenReturn(response); + + when(mockHttpClient.prepareRequest(any(HttpExecuteRequest.class))) + .thenReturn(mockExecuteRequest); + + thrown.expect(EmptyModeledException.class); + try { + client.allTypes(); + } finally { + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(mockPublisher).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + + assertThat(capturedCollection.children()).hasSize(MAX_RETRIES + 1); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(MAX_RETRIES); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(false); + + for (MetricCollection requestMetrics : capturedCollection.children()) { + // A service exception is still a successful HTTP execution so + // we should still have HTTP metrics as well. + assertThat(requestMetrics.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(500); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_REQUEST_ID)) + .containsExactly(REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .containsExactly(EXTENDED_REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION)).hasOnlyOneElementSatisfying(d -> { + assertThat(d).isGreaterThanOrEqualTo(Duration.ZERO); + }); + assertThat(requestMetrics.metricValues(CoreMetric.UNMARSHALLING_DURATION)).hasOnlyOneElementSatisfying(d -> { + assertThat(d).isGreaterThanOrEqualTo(Duration.ZERO); + }); + } + } + } + + private static HttpExecuteResponse mockExecuteResponse(SdkHttpFullResponse httpResponse) { + HttpExecuteResponse mockResponse = mock(HttpExecuteResponse.class); + when(mockResponse.httpResponse()).thenReturn(httpResponse); + when(mockResponse.responseBody()).thenReturn(httpResponse.content()); + return mockResponse; + } + + private static AbortableInputStream contentStream(String content) { + ByteArrayInputStream baos = new ByteArrayInputStream(content.getBytes()); + return AbortableInputStream.create(baos); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/SyncClientMetricPublisherResolutionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/SyncClientMetricPublisherResolutionTest.java new file mode 100644 index 000000000000..9006ec7d6c44 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/SyncClientMetricPublisherResolutionTest.java @@ -0,0 +1,176 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.Arrays; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClientBuilder; + +@RunWith(MockitoJUnitRunner.class) +public class SyncClientMetricPublisherResolutionTest { + + @Mock + private SdkHttpClient mockHttpClient; + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + private ProtocolRestJsonClient client; + + @After + public void teardown() { + if (client != null) { + client.close(); + } + + client = null; + } + + @Test + public void testApiCall_noPublishersSet_noException() throws IOException { + client = clientWithPublishers(); + client.allTypes(); + } + + @Test + public void testApiCall_publishersSetOnClient_clientPublishersInvoked() throws IOException { + MetricPublisher publisher1 = mock(MetricPublisher.class); + MetricPublisher publisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(publisher1, publisher2); + + try { + client.allTypes(); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(publisher1).publish(any(MetricCollection.class)); + verify(publisher2).publish(any(MetricCollection.class)); + } + } + + @Test + public void testApiCall_publishersSetOnRequest_requestPublishersInvoked() throws IOException { + MetricPublisher publisher1 = mock(MetricPublisher.class); + MetricPublisher publisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(); + + try { + client.allTypes(r -> r.overrideConfiguration(o -> + o.addMetricPublisher(publisher1).addMetricPublisher(publisher2))); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(publisher1).publish(any(MetricCollection.class)); + verify(publisher2).publish(any(MetricCollection.class)); + } + } + + @Test + public void testApiCall_publishersSetOnClientAndRequest_requestPublishersInvoked() throws IOException { + MetricPublisher clientPublisher1 = mock(MetricPublisher.class); + MetricPublisher clientPublisher2 = mock(MetricPublisher.class); + + MetricPublisher requestPublisher1 = mock(MetricPublisher.class); + MetricPublisher requestPublisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(clientPublisher1, clientPublisher2); + + try { + client.allTypes(r -> r.overrideConfiguration(o -> + o.addMetricPublisher(requestPublisher1).addMetricPublisher(requestPublisher2))); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(requestPublisher1).publish(any(MetricCollection.class)); + verify(requestPublisher2).publish(any(MetricCollection.class)); + verifyZeroInteractions(clientPublisher1); + verifyZeroInteractions(clientPublisher2); + } + } + + private ProtocolRestJsonClient clientWithPublishers(MetricPublisher... metricPublishers) throws IOException { + ProtocolRestJsonClientBuilder builder = ProtocolRestJsonClient.builder() + .httpClient(mockHttpClient) + .credentialsProvider(mockCredentialsProvider); + + AbortableInputStream content = AbortableInputStream.create(new ByteArrayInputStream("{}".getBytes())); + SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() + .statusCode(200) + .content(content) + .build(); + + HttpExecuteResponse mockResponse = mockExecuteResponse(httpResponse); + + ExecutableHttpRequest mockExecuteRequest = mock(ExecutableHttpRequest.class); + when(mockExecuteRequest.call()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return mockResponse; + }); + + when(mockHttpClient.prepareRequest(any(HttpExecuteRequest.class))) + .thenReturn(mockExecuteRequest); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + + if (metricPublishers != null) { + builder.overrideConfiguration(o -> o.metricPublishers(Arrays.asList(metricPublishers))); + } + + return builder.build(); + } + + private static HttpExecuteResponse mockExecuteResponse(SdkHttpFullResponse httpResponse) { + HttpExecuteResponse mockResponse = mock(HttpExecuteResponse.class); + when(mockResponse.httpResponse()).thenReturn(httpResponse); + when(mockResponse.responseBody()).thenReturn(httpResponse.content()); + return mockResponse; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncClientMetricPublisherResolutionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncClientMetricPublisherResolutionTest.java new file mode 100644 index 000000000000..64ea187f46f8 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncClientMetricPublisherResolutionTest.java @@ -0,0 +1,160 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.util.Arrays; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.model.ProtocolRestJsonException; + +@RunWith(MockitoJUnitRunner.class) +public class AsyncClientMetricPublisherResolutionTest { + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + private ProtocolRestJsonAsyncClient client; + + + @Before + public void setup() { + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + wireMock.resetAll(); + if (client != null) { + client.close(); + } + client = null; + } + + @Test + public void testApiCall_noPublishersSet_noNpe() { + client = clientWithPublishers(); + // This is thrown because all the requests to our wiremock are + // nonsense, it's just important that we don't get NPE because we + // don't have publishers set + thrown.expectCause(instanceOf(ProtocolRestJsonException.class)); + client.allTypes().join(); + } + + @Test + public void testApiCall_publishersSetOnClient_clientPublishersInvoked() throws IOException { + MetricPublisher publisher1 = mock(MetricPublisher.class); + MetricPublisher publisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(publisher1, publisher2); + + try { + client.allTypes().join(); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(publisher1).publish(any(MetricCollection.class)); + verify(publisher2).publish(any(MetricCollection.class)); + } + } + + @Test + public void testApiCall_publishersSetOnRequest_requestPublishersInvoked() throws IOException { + MetricPublisher publisher1 = mock(MetricPublisher.class); + MetricPublisher publisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(); + + try { + client.allTypes(r -> r.overrideConfiguration(o -> + o.addMetricPublisher(publisher1).addMetricPublisher(publisher2))) + .join(); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(publisher1).publish(any(MetricCollection.class)); + verify(publisher2).publish(any(MetricCollection.class)); + } + } + + @Test + public void testApiCall_publishersSetOnClientAndRequest_requestPublishersInvoked() throws IOException { + MetricPublisher clientPublisher1 = mock(MetricPublisher.class); + MetricPublisher clientPublisher2 = mock(MetricPublisher.class); + + MetricPublisher requestPublisher1 = mock(MetricPublisher.class); + MetricPublisher requestPublisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(clientPublisher1, clientPublisher2); + + try { + client.allTypes(r -> r.overrideConfiguration(o -> + o.addMetricPublisher(requestPublisher1).addMetricPublisher(requestPublisher2))) + .join(); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(requestPublisher1).publish(any(MetricCollection.class)); + verify(requestPublisher2).publish(any(MetricCollection.class)); + verifyZeroInteractions(clientPublisher1); + verifyZeroInteractions(clientPublisher2); + } + } + + private ProtocolRestJsonAsyncClient clientWithPublishers(MetricPublisher... metricPublishers) { + ProtocolRestJsonAsyncClientBuilder builder = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())); + + if (metricPublishers != null) { + builder.overrideConfiguration(o -> o.metricPublishers(Arrays.asList(metricPublishers))); + } + + return builder.build(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java new file mode 100644 index 000000000000..1a0852da47af --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java @@ -0,0 +1,123 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; + +/** + * Core metrics test for async non-streaming API + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncCoreMetricsTest extends BaseAsyncCoreMetricsTest { + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Mock + private MetricPublisher mockPublisher; + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + private ProtocolRestJsonAsyncClient client; + + + @Before + public void setup() throws IOException { + client = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .build(); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + wireMock.resetAll(); + if (client != null) { + client.close(); + } + client = null; + } + + @Override + String operationName() { + return "AllTypes"; + } + + @Override + Supplier> callable() { + return () -> client.allTypes(); + } + + @Override + MetricPublisher publisher() { + return mockPublisher; + } + + @Test + public void apiCall_noConfiguredPublisher_succeeds() { + stubSuccessfulResponse(); + ProtocolRestJsonAsyncClient noPublisher = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .build(); + + noPublisher.allTypes().join(); + } + + @Test + public void apiCall_publisherOverriddenOnRequest_requestPublisherTakesPrecedence() { + stubSuccessfulResponse(); + MetricPublisher requestMetricPublisher = mock(MetricPublisher.class); + + client.allTypes(r -> r.overrideConfiguration(o -> o.addMetricPublisher(requestMetricPublisher))).join(); + + verify(requestMetricPublisher).publish(any(MetricCollection.class)); + verifyZeroInteractions(mockPublisher); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java new file mode 100644 index 000000000000..8641e9db4e39 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java @@ -0,0 +1,102 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static org.mockito.Mockito.when; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.EmptyPublisher; +import software.amazon.awssdk.core.signer.NoOpSigner; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.EventStreamOperationRequest; +import software.amazon.awssdk.services.protocolrestjson.model.EventStreamOperationResponseHandler; + +/** + * Core metrics test for async streaming API + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncEventStreamingCoreMetricsTest extends BaseAsyncCoreMetricsTest { + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Mock + private MetricPublisher mockPublisher; + + + private ProtocolRestJsonAsyncClient client; + + @Before + public void setup() { + client = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher) + .retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .build(); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + wireMock.resetAll(); + if (client != null) { + client.close(); + } + client = null; + } + + @Override + String operationName() { + return "EventStreamOperation"; + } + + @Override + Supplier> callable() { + return () -> client.eventStreamOperation(EventStreamOperationRequest.builder().overrideConfiguration(b -> b.signer(new NoOpSigner())).build(), + new EmptyPublisher<>(), + EventStreamOperationResponseHandler.builder() + .subscriber(b -> {}) + .build()); + } + + @Override + MetricPublisher publisher() { + return mockPublisher; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java new file mode 100644 index 000000000000..5b6b148046f1 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java @@ -0,0 +1,97 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static org.mockito.Mockito.when; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.StreamingInputOperationRequest; + +/** + * Core metrics test for async streaming API + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncStreamingCoreMetricsTest extends BaseAsyncCoreMetricsTest { + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Mock + private MetricPublisher mockPublisher; + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + private ProtocolRestJsonAsyncClient client; + + @Before + public void setup() throws IOException { + client = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .build(); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + wireMock.resetAll(); + if (client != null) { + client.close(); + } + client = null; + } + + @Override + String operationName() { + return "StreamingInputOperation"; + } + + @Override + Supplier> callable() { + return () -> client.streamingInputOperation(StreamingInputOperationRequest.builder().build(), + AsyncRequestBody.fromBytes("helloworld".getBytes())); + } + + @Override + MetricPublisher publisher() { + return mockPublisher; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java new file mode 100644 index 000000000000..bf31b0fa4ad2 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java @@ -0,0 +1,254 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.verify; + +import com.github.tomakehurst.wiremock.http.Fault; +import com.github.tomakehurst.wiremock.stubbing.Scenario; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.model.EmptyModeledException; + +@RunWith(MockitoJUnitRunner.class) +public abstract class BaseAsyncCoreMetricsTest { + private static final String SERVICE_ID = "AmazonProtocolRestJson"; + private static final String REQUEST_ID = "req-id"; + private static final String EXTENDED_REQUEST_ID = "extended-id"; + static final int MAX_RETRIES = 2; + public static final Duration FIXED_DELAY = Duration.ofMillis(500); + + @Test + public void apiCall_operationSuccessful_addsMetrics() { + stubSuccessfulResponse(); + callable().get().join(); + addDelayIfNeeded(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(publisher()).publish(collectionCaptor.capture()); + MetricCollection capturedCollection = collectionCaptor.getValue(); + + verifySuccessfulApiCallCollection(capturedCollection); + + assertThat(capturedCollection.children()).hasSize(1); + MetricCollection attemptCollection = capturedCollection.children().get(0); + + assertThat(attemptCollection.name()).isEqualTo("ApiCallAttempt"); + + verifySuccessfulApiCallAttemptCollection(attemptCollection); + assertThat(attemptCollection.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(FIXED_DELAY); + } + + @Test + public void apiCall_allRetryAttemptsFailedOf500() { + stubErrorResponse(); + assertThatThrownBy(() -> callable().get().join()).hasCauseInstanceOf(EmptyModeledException.class); + addDelayIfNeeded(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(publisher()).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + verifyFailedApiCallCollection(capturedCollection); + assertThat(capturedCollection.children()).hasSize(MAX_RETRIES + 1); + + capturedCollection.children().forEach(this::verifyFailedApiCallAttemptCollection); + } + + @Test + public void apiCall_allRetryAttemptsFailedOfNetworkError() { + stubNetworkError(); + assertThatThrownBy(() -> callable().get().join()).hasCauseInstanceOf(SdkClientException.class); + addDelayIfNeeded(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(publisher()).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + verifyFailedApiCallCollection(capturedCollection); + assertThat(capturedCollection.children()).hasSize(MAX_RETRIES + 1); + + capturedCollection.children().forEach(requestMetrics -> { + assertThat(requestMetrics.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .isEmpty(); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_REQUEST_ID)) + .isEmpty(); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .isEmpty(); + assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(FIXED_DELAY); + }); + } + + @Test + public void apiCall_firstAttemptFailedRetrySucceeded() { + stubSuccessfulRetry(); + callable().get().join(); + addDelayIfNeeded(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(publisher()).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + verifyApiCallCollection(capturedCollection); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(1); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(true); + + assertThat(capturedCollection.children()).hasSize(2); + + MetricCollection failedAttempt = capturedCollection.children().get(0); + verifyFailedApiCallAttemptCollection(failedAttempt); + + MetricCollection successfulAttempt = capturedCollection.children().get(1); + verifySuccessfulApiCallAttemptCollection(successfulAttempt); + } + + /** + * Adds delay after calling CompletableFuture.join to wait for publisher to get metrics. + */ + void addDelayIfNeeded() { + try { + Thread.sleep(200); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + } + + abstract String operationName(); + + abstract Supplier> callable(); + + abstract MetricPublisher publisher(); + + private void verifyFailedApiCallAttemptCollection(MetricCollection requestMetrics) { + assertThat(requestMetrics.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(500); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_REQUEST_ID)) + .containsExactly(REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .containsExactly(EXTENDED_REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.BACKOFF_DELAY_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + } + + private void verifySuccessfulApiCallAttemptCollection(MetricCollection attemptCollection) { + assertThat(attemptCollection.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(200); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_REQUEST_ID)) + .containsExactly(REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .containsExactly(EXTENDED_REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.BACKOFF_DELAY_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(attemptCollection.metricValues(CoreMetric.SIGNING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + } + + private void verifyFailedApiCallCollection(MetricCollection capturedCollection) { + verifyApiCallCollection(capturedCollection); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(MAX_RETRIES); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(false); + } + + private void verifySuccessfulApiCallCollection(MetricCollection capturedCollection) { + verifyApiCallCollection(capturedCollection); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(0); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(true); + } + + private void verifyApiCallCollection(MetricCollection capturedCollection) { + assertThat(capturedCollection.name()).isEqualTo("ApiCall"); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ID)) + .containsExactly(SERVICE_ID); + assertThat(capturedCollection.metricValues(CoreMetric.OPERATION_NAME)) + .containsExactly(operationName()); + assertThat(capturedCollection.metricValues(CoreMetric.CREDENTIALS_FETCH_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.MARSHALLING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_DURATION).get(0)) + .isGreaterThan(FIXED_DELAY); + } + + void stubSuccessfulResponse() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(200) + .withHeader("x-amz-request-id", REQUEST_ID) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + .withHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .withBody("{}"))); + } + + void stubErrorResponse() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(500) + .withHeader("x-amz-request-id", REQUEST_ID) + .withHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + .withHeader("X-Amzn-Errortype", "EmptyModeledException") + .withBody("{}"))); + } + + void stubNetworkError() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withFault(Fault.CONNECTION_RESET_BY_PEER) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + )); + } + + void stubSuccessfulRetry() { + stubFor(post(anyUrl()) + .inScenario("retry at 500") + .whenScenarioStateIs(Scenario.STARTED) + .willSetStateTo("first attempt") + .willReturn(aResponse() + .withHeader("x-amz-request-id", REQUEST_ID) + .withHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + .withHeader("X-Amzn-Errortype", "EmptyModeledException") + .withStatus(500))); + + stubFor(post(anyUrl()) + .inScenario("retry at 500") + .whenScenarioStateIs("first attempt") + .willSetStateTo("second attempt") + .willReturn(aResponse() + .withStatus(200) + .withHeader("x-amz-request-id", REQUEST_ID) + .withHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + .withBody("{}"))); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/AsyncOperationCancelTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/AsyncOperationCancelTest.java index b5cf7c96734c..4cff1fa28f0b 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/AsyncOperationCancelTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/AsyncOperationCancelTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -63,7 +63,7 @@ public void setUp() { } @Test - public void testNonStreamingOperation() { + public void testNonStreamingOperation() throws InterruptedException { CompletableFuture responseFuture = client.allTypes(r -> { }); responseFuture.cancel(true); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/AsyncOperationCancelTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/AsyncOperationCancelTest.java index 096c2853f3e6..5af311ab04eb 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/AsyncOperationCancelTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/AsyncOperationCancelTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/HashCodeEqualsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/HashCodeEqualsTest.java index 338085fb4455..46da01f4cb14 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/HashCodeEqualsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/HashCodeEqualsTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/GetValueForFieldTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/GetValueForFieldTest.java similarity index 89% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/GetValueForFieldTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/GetValueForFieldTest.java index 73a311ab5981..33275739d2e2 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/GetValueForFieldTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/GetValueForFieldTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ListCopierTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ListCopierTest.java similarity index 89% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ListCopierTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ListCopierTest.java index dc6a5a69ba6f..feaf6793d43f 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ListCopierTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ListCopierTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; import static org.assertj.core.api.Assertions.assertThat; diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/MapCopierTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/MapCopierTest.java similarity index 91% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/MapCopierTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/MapCopierTest.java index 0e0acbefa72e..98102618dad9 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/MapCopierTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/MapCopierTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -13,8 +13,9 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; +import org.assertj.core.api.Assertions; import org.junit.Test; import software.amazon.awssdk.core.util.DefaultSdkAutoConstructMap; import software.amazon.awssdk.core.util.SdkAutoConstructMap; @@ -69,7 +70,7 @@ public void unknownEnumKeyNotAddedToCopiedMap() { Map mapOfEnumToEnum = new HashMap<>(); mapOfEnumToEnum.put("foo", "bar"); Map copy = MapOfEnumToEnumCopier.copyStringToEnum(mapOfEnumToEnum); - assertThat(copy).isEmpty(); + Assertions.assertThat(copy).isEmpty(); } @Test @@ -77,6 +78,6 @@ public void knownEnumKeyAddedToCopiedMap() { Map mapOfEnumToEnum = new HashMap<>(); mapOfEnumToEnum.put(EnumType.ENUM_VALUE1.toString(), "bar"); Map copy = MapOfEnumToEnumCopier.copyStringToEnum(mapOfEnumToEnum); - assertThat(copy).hasSize(1); + Assertions.assertThat(copy).hasSize(1); } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderListMemberTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderListMemberTest.java similarity index 92% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderListMemberTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderListMemberTest.java index bae54df6f057..64281a760651 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderListMemberTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderListMemberTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; import static org.assertj.core.api.Assertions.assertThat; diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderMapMemberTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderMapMemberTest.java similarity index 92% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderMapMemberTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderMapMemberTest.java index febc413ad718..7a91e323e39c 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderMapMemberTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderMapMemberTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; import org.junit.Test; import software.amazon.awssdk.core.util.SdkAutoConstructMap; diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestxml/AsyncOperationCancelTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestxml/AsyncOperationCancelTest.java index 42edb6ed190b..ce17d2380ba1 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestxml/AsyncOperationCancelTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestxml/AsyncOperationCancelTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncClientRetryModeTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncClientRetryModeTest.java new file mode 100644 index 000000000000..1151107bb359 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncClientRetryModeTest.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import java.util.concurrent.CompletionException; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; + +public class AsyncClientRetryModeTest + extends ClientRetryModeTestSuite { + @Override + protected ProtocolRestJsonAsyncClientBuilder newClientBuilder() { + return ProtocolRestJsonAsyncClient.builder(); + } + + @Override + protected AllTypesResponse callAllTypes(ProtocolRestJsonAsyncClient client) { + try { + return client.allTypes().join(); + } catch (CompletionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } + + throw e; + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetryHeaderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetryHeaderTest.java new file mode 100644 index 000000000000..4b344f994059 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetryHeaderTest.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import java.net.URI; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; + +public class AsyncRetryHeaderTest extends RetryHeaderTestSuite { + private final ProtocolRestJsonAsyncClient client; + + public AsyncRetryHeaderTest() { + super(new MockAsyncHttpClient()); + client = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost")) + .httpClient(mockHttpClient) + .build(); + } + + @Override + protected void callAllTypesOperation() { + client.allTypes().join(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/ClientRetryModeTestSuite.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/ClientRetryModeTestSuite.java new file mode 100644 index 000000000000..35d5d70e23f8 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/ClientRetryModeTestSuite.java @@ -0,0 +1,131 @@ +/* + * Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.proxyAllTo; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; +import software.amazon.awssdk.utils.StringInputStream; + +public abstract class ClientRetryModeTestSuite> { + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + @Test + public void legacyRetryModeIsFourAttempts() { + stubThrottlingResponse(); + ClientT client = clientBuilder().overrideConfiguration(o -> o.retryPolicy(RetryMode.LEGACY)).build(); + assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class); + verifyRequestCount(4); + } + + @Test + public void standardRetryModeIsThreeAttempts() { + stubThrottlingResponse(); + ClientT client = clientBuilder().overrideConfiguration(o -> o.retryPolicy(RetryMode.STANDARD)).build(); + assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class); + verifyRequestCount(3); + } + + @Test + public void retryModeCanBeSetByProfileFile() { + ProfileFile profileFile = ProfileFile.builder() + .content(new StringInputStream("[profile foo]\n" + + "retry_mode = standard")) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + stubThrottlingResponse(); + ClientT client = clientBuilder().overrideConfiguration(o -> o.defaultProfileFile(profileFile) + .defaultProfileName("foo")).build(); + assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class); + verifyRequestCount(3); + } + + @Test + public void legacyRetryModeExcludesThrottlingExceptions() throws InterruptedException { + stubThrottlingResponse(); + + ExecutorService executor = Executors.newFixedThreadPool(51); + ClientT client = clientBuilder().overrideConfiguration(o -> o.retryPolicy(RetryMode.LEGACY)).build(); + + for (int i = 0; i < 51; ++i) { + executor.execute(() -> assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class)); + } + executor.shutdown(); + assertThat(executor.awaitTermination(30, TimeUnit.SECONDS)).isTrue(); + + // 51 requests * 4 attempts = 204 requests + verifyRequestCount(204); + } + + @Test + public void standardRetryModeIncludesThrottlingExceptions() throws InterruptedException { + stubThrottlingResponse(); + + ExecutorService executor = Executors.newFixedThreadPool(51); + ClientT client = clientBuilder().overrideConfiguration(o -> o.retryPolicy(RetryMode.STANDARD)).build(); + + for (int i = 0; i < 51; ++i) { + executor.execute(() -> assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class)); + } + executor.shutdown(); + assertThat(executor.awaitTermination(30, TimeUnit.SECONDS)).isTrue(); + + // Would receive 153 without throttling (51 requests * 3 attempts = 153 requests) + verifyRequestCount(151); + } + + private BuilderT clientBuilder() { + return newClientBuilder().credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())); + } + + protected abstract BuilderT newClientBuilder(); + + protected abstract AllTypesResponse callAllTypes(ClientT client); + + private void verifyRequestCount(int count) { + verify(count, anyRequestedFor(anyUrl())); + } + + private void stubThrottlingResponse() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(429))); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java new file mode 100644 index 000000000000..9b683170a5c0 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java @@ -0,0 +1,126 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.List; +import java.util.stream.Stream; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.testutils.service.http.MockHttpClient; + +/** + * A set of tests that verify the behavior of retry-related headers (amz-sdk-invocation-id and amz-sdk-request). + */ +public abstract class RetryHeaderTestSuite { + protected final T mockHttpClient; + + protected RetryHeaderTestSuite(T mockHttpClient) { + this.mockHttpClient = mockHttpClient; + } + + @Before + public void setupClient() { + mockHttpClient.reset(); + } + + protected abstract void callAllTypesOperation(); + + @Test + public void invocationIdSharedBetweenRetries() { + mockHttpClient.stubResponses(retryableFailure(), retryableFailure(), success()); + + callAllTypesOperation(); + + List requests = mockHttpClient.getRequests(); + + assertThat(requests).hasSize(3); + String firstInvocationId = invocationId(requests.get(0)); + assertThat(invocationId(requests.get(1))).isEqualTo(firstInvocationId); + assertThat(invocationId(requests.get(2))).isEqualTo(firstInvocationId); + } + + @Test + public void invocationIdDifferentBetweenApiCalls() { + mockHttpClient.stubResponses(success()); + + callAllTypesOperation(); + callAllTypesOperation(); + + List requests = mockHttpClient.getRequests(); + + assertThat(requests).hasSize(2); + String firstInvocationId = invocationId(requests.get(0)); + assertThat(invocationId(requests.get(1))).isNotEqualTo(firstInvocationId); + } + + @Test + public void retryAttemptAndMaxAreCorrect() { + mockHttpClient.stubResponses(retryableFailure(), success()); + + callAllTypesOperation(); + + List requests = mockHttpClient.getRequests(); + + assertThat(requests).hasSize(2); + assertThat(retryComponent(requests.get(0), "attempt")).isEqualTo("1"); + assertThat(retryComponent(requests.get(1), "attempt")).isEqualTo("2"); + assertThat(retryComponent(requests.get(0), "max")).isEqualTo("3"); + assertThat(retryComponent(requests.get(1), "max")).isEqualTo("3"); + } + + private String invocationId(SdkHttpRequest request) { + return request.firstMatchingHeader("amz-sdk-invocation-id") + .orElseThrow(() -> new AssertionError("Expected aws-sdk-invocation-id in " + request)); + } + + private String retryComponent(SdkHttpRequest request, String componentName) { + return retryComponent(request.firstMatchingHeader("amz-sdk-request") + .orElseThrow(() -> new AssertionError("Expected amz-sdk-request in " + request)), + componentName); + } + + private String retryComponent(String amzSdkRequestHeader, String componentName) { + return Stream.of(amzSdkRequestHeader.split(";")) + .map(h -> h.split("=")) + .filter(h -> h[0].trim().equals(componentName)) + .map(h -> h[1].trim()) + .findAny() + .orElseThrow(() -> new AssertionError("Expected " + componentName + " in " + amzSdkRequestHeader)); + } + + private HttpExecuteResponse retryableFailure() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder() + .statusCode(500) + .putHeader("content-length", "0") + .build()) + .build(); + } + + private HttpExecuteResponse success() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder() + .statusCode(200) + .putHeader("content-length", "0") + .build()) + .build(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncClientRetryModeTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncClientRetryModeTest.java new file mode 100644 index 000000000000..1d6f4e60adb4 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncClientRetryModeTest.java @@ -0,0 +1,32 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; + +public class SyncClientRetryModeTest extends ClientRetryModeTestSuite { + @Override + protected ProtocolRestJsonClientBuilder newClientBuilder() { + return ProtocolRestJsonClient.builder(); + } + + @Override + protected AllTypesResponse callAllTypes(ProtocolRestJsonClient client) { + return client.allTypes(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetryHeaderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetryHeaderTest.java new file mode 100644 index 000000000000..e697a5c65124 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetryHeaderTest.java @@ -0,0 +1,43 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import java.net.URI; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; + +public class SyncRetryHeaderTest extends RetryHeaderTestSuite { + private final ProtocolRestJsonClient client; + + public SyncRetryHeaderTest() { + super(new MockSyncHttpClient()); + client = ProtocolRestJsonClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", + "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost")) + .httpClient(mockHttpClient) + .build(); + } + + @Override + protected void callAllTypesOperation() { + client.allTypes(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/tostring/SensitiveDataRedactedTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/tostring/SensitiveDataRedactedTest.java index 5eda86056e17..48b4fc5deba0 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/tostring/SensitiveDataRedactedTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/tostring/SensitiveDataRedactedTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaiterResourceTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaiterResourceTest.java new file mode 100644 index 000000000000..298cac2447ee --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaiterResourceTest.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.waiters; + +import java.util.concurrent.ScheduledExecutorService; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.services.restjsonwithwaiters.RestJsonWithWaitersAsyncClient; +import software.amazon.awssdk.services.restjsonwithwaiters.RestJsonWithWaitersClient; +import software.amazon.awssdk.services.restjsonwithwaiters.waiters.RestJsonWithWaitersAsyncWaiter; +import software.amazon.awssdk.services.restjsonwithwaiters.waiters.RestJsonWithWaitersWaiter; + +@RunWith(MockitoJUnitRunner.class) +public class WaiterResourceTest { + @Mock + private RestJsonWithWaitersClient client; + + @Mock + private RestJsonWithWaitersAsyncClient asyncClient; + + @Mock + private ScheduledExecutorService executorService; + + @Test + public void closeSyncWaiter_customizedClientProvided_shouldNotCloseClient() { + RestJsonWithWaitersWaiter waiter = RestJsonWithWaitersWaiter.builder() + .client(client) + .build(); + + waiter.close(); + Mockito.verify(client, Mockito.never()).close(); + } + + @Test + public void closeAsyncWaiter_customizedClientAndExecutorServiceProvided_shouldNotClose() { + RestJsonWithWaitersAsyncWaiter waiter = RestJsonWithWaitersAsyncWaiter.builder() + .client(asyncClient) + .scheduledExecutorService(executorService) + .build(); + + waiter.close(); + Mockito.verify(client, Mockito.never()).close(); + Mockito.verify(executorService, Mockito.never()).shutdown(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersAsyncFunctionalTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersAsyncFunctionalTest.java new file mode 100644 index 000000000000..e2bde5eebf77 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersAsyncFunctionalTest.java @@ -0,0 +1,222 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.waiters; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.awscore.exception.AwsErrorDetails; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; +import software.amazon.awssdk.core.waiters.WaiterOverrideConfiguration; +import software.amazon.awssdk.core.waiters.WaiterResponse; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.services.restjsonwithwaiters.RestJsonWithWaitersAsyncClient; +import software.amazon.awssdk.services.restjsonwithwaiters.model.AllTypesRequest; +import software.amazon.awssdk.services.restjsonwithwaiters.model.AllTypesResponse; +import software.amazon.awssdk.services.restjsonwithwaiters.model.EmptyModeledException; +import software.amazon.awssdk.services.restjsonwithwaiters.waiters.RestJsonWithWaitersAsyncWaiter; +import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.builder.SdkBuilder; + +public class WaitersAsyncFunctionalTest { + + public RestJsonWithWaitersAsyncClient asyncClient; + public RestJsonWithWaitersAsyncWaiter asyncWaiter; + + @Before + public void setup() { + asyncClient = mock(RestJsonWithWaitersAsyncClient.class); + asyncWaiter = RestJsonWithWaitersAsyncWaiter.builder() + .client(asyncClient) + .overrideConfiguration(WaiterOverrideConfiguration.builder() + .maxAttempts(3) + .backoffStrategy(BackoffStrategy.none()) + .build()) + .build(); + } + + @After + public void cleanup() { + asyncClient.close(); + asyncWaiter.close(); + } + + @Test + public void allTypeOperation_withAsyncWaiter_shouldReturnResponse() throws ExecutionException, InterruptedException { + AllTypesResponse response = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()) + .build(); + + CompletableFuture serviceFuture = new CompletableFuture<>(); + + + when(asyncClient.allTypes(any(AllTypesRequest.class))).thenReturn(serviceFuture); + CompletableFuture> responseFuture = asyncWaiter.waitUntilAllTypesSuccess(AllTypesRequest.builder() + .integerMember(1) + .build()); + serviceFuture.complete(response); + + assertThat(responseFuture.get().attemptsExecuted()).isEqualTo(1); + assertThat(responseFuture.get().matched().response()).hasValueSatisfying(r -> assertThat(r).isEqualTo(response)); + } + + @Test + public void allTypeOperationFailed_withAsyncWaiter_shouldReturnException() throws ExecutionException, InterruptedException { + CompletableFuture serviceFuture = new CompletableFuture<>(); + + when(asyncClient.allTypes(any(AllTypesRequest.class))).thenReturn(serviceFuture); + CompletableFuture> responseFuture = asyncWaiter.waitUntilAllTypesSuccess(AllTypesRequest.builder().build()); + + serviceFuture.completeExceptionally(SdkServiceException.builder().statusCode(200).build()); + + assertThat(responseFuture.get().attemptsExecuted()).isEqualTo(1); + assertThat(responseFuture.get().matched().exception()).hasValueSatisfying(r -> assertThat(r).isInstanceOf(SdkServiceException.class)); + } + + @Test + public void allTypeOperationRetry_withAsyncWaiter_shouldReturnResponseAfterException() throws ExecutionException, InterruptedException { + AllTypesResponse response2 = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()) + .build(); + + CompletableFuture serviceFuture1 = + CompletableFutureUtils.failedFuture(SdkServiceException.builder().statusCode(404).build()); + CompletableFuture serviceFuture2 = new CompletableFuture<>(); + + when(asyncClient.allTypes(any(AllTypesRequest.class))).thenReturn(serviceFuture1, serviceFuture2); + + CompletableFuture> responseFuture = asyncWaiter.waitUntilAllTypesSuccess(AllTypesRequest.builder().build()); + + serviceFuture2.complete(response2); + + assertThat(responseFuture.get().attemptsExecuted()).isEqualTo(2); + assertThat(responseFuture.get().matched().response()).hasValueSatisfying(r -> assertThat(r).isEqualTo(response2)); + } + + @Test + public void requestOverrideConfig_shouldTakePrecedence() { + AllTypesResponse response = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()) + .build(); + + CompletableFuture serviceFuture1 = CompletableFutureUtils.failedFuture(SdkServiceException.builder().statusCode(404).build()); + CompletableFuture serviceFuture2 = CompletableFuture.completedFuture(response); + + when(asyncClient.allTypes(any(AllTypesRequest.class))).thenReturn(serviceFuture1, serviceFuture2); + + assertThatThrownBy(() -> + asyncWaiter.waitUntilAllTypesSuccess(b -> b.build(), o -> o.maxAttempts(1)).join()) + .hasMessageContaining("exceeded the max retry attempts").hasCauseInstanceOf(SdkClientException.class); + } + + @Test + public void unexpectedException_shouldNotRetry() { + CompletableFuture failedFuture = CompletableFutureUtils.failedFuture(new RuntimeException("")); + when(asyncClient.allTypes(any(AllTypesRequest.class))).thenReturn(failedFuture); + + assertThatThrownBy(() -> asyncWaiter.waitUntilAllTypesSuccess(b -> b.build()).join()) + .hasMessageContaining("An exception was thrown and did not match any waiter acceptors") + .hasCauseInstanceOf(SdkClientException.class); + } + + @Test + public void unexpectedResponse_shouldRetry() { + AllTypesResponse response = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()).build(); + CompletableFuture future1 = + CompletableFuture.completedFuture((AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(202) + .build()).build()); + + CompletableFuture future2 = + CompletableFuture.completedFuture(response); + + when(asyncClient.allTypes(any(AllTypesRequest.class))).thenReturn(future1, future2); + + WaiterResponse waiterResponse = asyncWaiter.waitUntilAllTypesSuccess(b -> b.build()).join(); + assertThat(waiterResponse.attemptsExecuted()).isEqualTo(2); + assertThat(waiterResponse.matched().response()).hasValueSatisfying(r -> assertThat(r).isEqualTo(response)); + } + + @Test + public void failureResponse_shouldThrowException() { + CompletableFuture future = + CompletableFuture.completedFuture((AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(500) + .build()) + .build()); + when(asyncClient.allTypes(any(AllTypesRequest.class))).thenReturn(future); + assertThatThrownBy(() -> asyncWaiter.waitUntilAllTypesSuccess(SdkBuilder::build).join()) + .hasMessageContaining("transitioned the waiter to failure state") + .hasCauseInstanceOf(SdkClientException.class); + } + + @Test + public void failureException_shouldThrowException() { + when(asyncClient.allTypes(any(AllTypesRequest.class))).thenReturn(CompletableFutureUtils.failedFuture(EmptyModeledException.builder() + .awsErrorDetails(AwsErrorDetails.builder() + .errorCode("EmptyModeledException") + .build()) + .build())); + assertThatThrownBy(() -> asyncWaiter.waitUntilAllTypesSuccess(SdkBuilder::build).join()) + .hasMessageContaining("transitioned the waiter to failure state") + .hasCauseInstanceOf(SdkClientException.class); + } + + @Test + public void closeWaiterCreatedWithClient_clientDoesNotClose() { + asyncWaiter.close(); + verify(asyncClient, never()).close(); + } + + @Test + public void closeWaiterCreatedWithExecutorService_executorServiceDoesNotClose() { + ScheduledExecutorService executorService = mock(ScheduledExecutorService.class); + RestJsonWithWaitersAsyncWaiter newWaiter = RestJsonWithWaitersAsyncWaiter.builder() + .scheduledExecutorService(executorService) + .overrideConfiguration(WaiterOverrideConfiguration.builder() + .maxAttempts(3) + .backoffStrategy(BackoffStrategy.none()) + .build()) + .build(); + + newWaiter.close(); + verify(executorService, never()).shutdown(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeDefaultAcceptorsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeDefaultAcceptorsTest.java new file mode 100644 index 000000000000..36b43a64db09 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeDefaultAcceptorsTest.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.waiters; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.junit.Test; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.waiters.WaiterState; +import software.amazon.awssdk.services.restjsonwithwaiters.waiters.internal.WaitersRuntime; + +/** + * Verify the accuracy of {@link WaitersRuntime#DEFAULT_ACCEPTORS}. + */ +public class WaitersRuntimeDefaultAcceptorsTest { + + @Test + public void defaultAcceptorsRetryOnUnrecognizedResponse() { + assertThat(WaitersRuntime.DEFAULT_ACCEPTORS.stream().filter(acceptor -> acceptor.matches(new Object())).findFirst()) + .hasValueSatisfying(v -> assertThat(v.waiterState()).isEqualTo(WaiterState.RETRY)); + } +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeResponseStatusAcceptorTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeResponseStatusAcceptorTest.java new file mode 100644 index 000000000000..977104a719b7 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeResponseStatusAcceptorTest.java @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.waiters; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.List; +import org.junit.Test; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkResponse; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.waiters.WaiterState; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.services.restjsonwithwaiters.waiters.internal.WaitersRuntime.ResponseStatusAcceptor; + +/** + * Verify the accuracy of {@link ResponseStatusAcceptor}. + */ +public class WaitersRuntimeResponseStatusAcceptorTest { + @Test + public void usesStatus() { + assertThat(new ResponseStatusAcceptor(200, WaiterState.RETRY).waiterState()).isEqualTo(WaiterState.RETRY); + assertThat(new ResponseStatusAcceptor(200, WaiterState.FAILURE).waiterState()).isEqualTo(WaiterState.FAILURE); + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).waiterState()).isEqualTo(WaiterState.SUCCESS); + } + + @Test + public void checksStatusOnResponse() { + SdkHttpFullResponse http200 = SdkHttpResponse.builder().statusCode(200).build(); + SdkHttpFullResponse http500 = SdkHttpResponse.builder().statusCode(500).build(); + + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).matches(new ExampleSdkResponse(http200))).isTrue(); + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).matches(new ExampleSdkResponse(http500))).isFalse(); + assertThat(new ResponseStatusAcceptor(500, WaiterState.SUCCESS).matches(new ExampleSdkResponse(http500))).isTrue(); + assertThat(new ResponseStatusAcceptor(500, WaiterState.SUCCESS).matches(new ExampleSdkResponse(http200))).isFalse(); + } + + @Test + public void checksStatusOnException() { + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).matches((Throwable) null)).isFalse(); + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).matches(new Throwable())).isFalse(); + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).matches(SdkException.create("", null))).isFalse(); + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).matches(SdkServiceException.create("", null))).isFalse(); + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).matches(SdkServiceException.builder() + .message("") + .statusCode(500) + .build())) + .isFalse(); + assertThat(new ResponseStatusAcceptor(200, WaiterState.SUCCESS).matches(SdkServiceException.builder() + .message("") + .statusCode(200) + .build())) + .isTrue(); + } + + private static class ExampleSdkResponse extends SdkResponse { + protected ExampleSdkResponse(SdkHttpResponse httpResponse) { + super(new Builder() { + @Override + public Builder sdkHttpResponse(SdkHttpResponse sdkHttpResponse) { + throw new UnsupportedOperationException(); + } + + @Override + public SdkHttpResponse sdkHttpResponse() { + return httpResponse; + } + + @Override + public SdkResponse build() { + throw new UnsupportedOperationException(); + } + }); + } + + @Override + public Builder toBuilder() { + throw new UnsupportedOperationException(); + } + + @Override + public List> sdkFields() { + throw new UnsupportedOperationException(); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeValueTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeValueTest.java new file mode 100644 index 000000000000..47837f343e1f --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersRuntimeValueTest.java @@ -0,0 +1,312 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.waiters; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.Test; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.protocol.MarshallingType; +import software.amazon.awssdk.core.traits.LocationTrait; +import software.amazon.awssdk.services.restjsonwithwaiters.waiters.internal.WaitersRuntime.Value; +import software.amazon.awssdk.utils.Pair; + +public class WaitersRuntimeValueTest { + @Test + public void valueReturnsConstructorInput() { + assertThat(new Value(null).value()).isEqualTo(null); + assertThat(new Value(sdkPojo()).value()).isEqualTo(sdkPojo()); + assertThat(new Value(5).value()).isEqualTo(5); + assertThat(new Value("").value()).isEqualTo(""); + assertThat(new Value(true).value()).isEqualTo(true); + assertThat(new Value(emptyList()).value()).isEqualTo(emptyList()); + } + + @Test + public void valuesReturnsListForm() { + assertThat(new Value(null).values()).isEqualTo(emptyList()); + assertThat(new Value(5).values()).isEqualTo(singletonList(5)); + assertThat(new Value("").values()).isEqualTo(singletonList("")); + assertThat(new Value(true).values()).isEqualTo(singletonList(true)); + assertThat(new Value(singletonList("a")).values()).isEqualTo(singletonList("a")); + assertThat(new Value(sdkPojo()).values()).isEqualTo(singletonList(sdkPojo())); + } + + @Test + public void andBehavesWithBooleans() { + assertThat(booleanTrue().and(booleanTrue())).isEqualTo(booleanTrue()); + assertThat(booleanFalse().and(booleanTrue())).isEqualTo(booleanFalse()); + assertThat(booleanTrue().and(booleanFalse())).isEqualTo(booleanFalse()); + assertThat(booleanFalse().and(booleanFalse())).isEqualTo(booleanFalse()); + } + + @Test + public void andBehavesWithPojos() { + Value truePojo1 = sdkPojoValue(Pair.of("foo", "bar")); + Value truePojo2 = sdkPojoValue(Pair.of("foo", "bar")); + Value falsePojo1 = sdkPojoValue(); + Value falsePojo2 = sdkPojoValue(); + + assertThat(truePojo1.and(truePojo2)).isSameAs(truePojo2); + assertThat(falsePojo1.and(truePojo1)).isSameAs(falsePojo1); + assertThat(truePojo1.and(falsePojo1)).isSameAs(falsePojo1); + assertThat(falsePojo1.and(falsePojo2)).isSameAs(falsePojo1); + } + + @Test + public void andBehavesWithLists() { + Value trueList1 = new Value(singletonList("foo")); + Value trueList2 = new Value(singletonList("foo")); + Value falseList1 = new Value(emptyList()); + Value falseList2 = new Value(emptyList()); + + assertThat(trueList1.and(trueList2)).isSameAs(trueList2); + assertThat(falseList1.and(trueList1)).isSameAs(falseList1); + assertThat(trueList1.and(falseList1)).isSameAs(falseList1); + assertThat(falseList1.and(falseList2)).isSameAs(falseList1); + } + + @Test + public void andBehavesWithStrings() { + Value trueList1 = new Value("foo"); + Value trueList2 = new Value("foo"); + Value falseList1 = new Value(""); + Value falseList2 = new Value(""); + + assertThat(trueList1.and(trueList2)).isSameAs(trueList2); + assertThat(falseList1.and(trueList1)).isSameAs(falseList1); + assertThat(trueList1.and(falseList1)).isSameAs(falseList1); + assertThat(falseList1.and(falseList2)).isSameAs(falseList1); + } + + @Test + public void orBehavesWithBooleans() { + assertThat(booleanTrue().or(booleanTrue())).isEqualTo(booleanTrue()); + assertThat(booleanFalse().or(booleanTrue())).isEqualTo(booleanTrue()); + assertThat(booleanTrue().or(booleanFalse())).isEqualTo(booleanTrue()); + assertThat(booleanFalse().or(booleanFalse())).isEqualTo(new Value(null)); + } + + @Test + public void orBehavesWithPojos() { + Value truePojo1 = sdkPojoValue(Pair.of("foo", "bar")); + Value truePojo2 = sdkPojoValue(Pair.of("foo", "bar")); + Value falsePojo1 = sdkPojoValue(); + Value falsePojo2 = sdkPojoValue(); + + assertThat(truePojo1.or(truePojo2)).isSameAs(truePojo1); + assertThat(falsePojo1.or(truePojo1)).isSameAs(truePojo1); + assertThat(truePojo1.or(falsePojo1)).isSameAs(truePojo1); + assertThat(falsePojo1.or(falsePojo2)).isEqualTo(new Value(null)); + } + + @Test + public void orBehavesWithLists() { + Value trueList1 = new Value(singletonList("foo")); + Value trueList2 = new Value(singletonList("foo")); + Value falseList1 = new Value(emptyList()); + Value falseList2 = new Value(emptyList()); + + assertThat(trueList1.or(trueList2)).isSameAs(trueList1); + assertThat(falseList1.or(trueList1)).isSameAs(trueList1); + assertThat(trueList1.or(falseList1)).isSameAs(trueList1); + assertThat(falseList1.or(falseList2)).isEqualTo(new Value(null)); + } + + @Test + public void orBehavesWithStrings() { + Value trueList1 = new Value("foo"); + Value trueList2 = new Value("foo"); + Value falseList1 = new Value(""); + Value falseList2 = new Value(""); + + assertThat(trueList1.or(trueList2)).isSameAs(trueList1); + assertThat(falseList1.or(trueList1)).isSameAs(trueList1); + assertThat(trueList1.or(falseList1)).isSameAs(trueList1); + assertThat(falseList1.or(falseList2)).isEqualTo(new Value(null)); + } + + @Test + public void notBehaves() { + assertThat(booleanTrue().not()).isEqualTo(booleanFalse()); + assertThat(booleanFalse().not()).isEqualTo(booleanTrue()); + assertThat(new Value("").not()).isEqualTo(booleanTrue()); + } + + @Test + public void constantBehaves() { + assertThat(new Value(null).constant(new Value(5))).isEqualTo(new Value(5)); + assertThat(new Value(null).constant(5)).isEqualTo(new Value(5)); + } + + @Test + public void wildcardBehavesWithNull() { + assertThat(new Value(null).wildcard()).isEqualTo(new Value(null)); + } + + @Test + public void wildcardBehavesWithPojos() { + assertThat(sdkPojoValue(Pair.of("foo", "bar"), + Pair.of("foo2", singletonList("bar")), + Pair.of("foo3", sdkPojo(Pair.of("x", "y")))) + .wildcard()) + .isEqualTo(new Value(asList("bar", + singletonList("bar"), + sdkPojo(Pair.of("x", "y"))))); + } + + @Test + public void flattenBehavesWithNull() { + assertThat(new Value(null).flatten()).isEqualTo(new Value(null)); + } + + @Test + public void flattenBehavesWithLists() { + assertThat(new Value(asList("bar", + singletonList("bar"), + sdkPojo(Pair.of("x", "y")))) + .flatten()) + .isEqualTo(new Value(asList("bar", + "bar", + sdkPojo(Pair.of("x", "y"))))); + } + + @Test + public void fieldBehaves() { + assertThat(new Value(null).field("foo")).isEqualTo(new Value(null)); + assertThat(sdkPojoValue(Pair.of("foo", "bar")).field("foo")).isEqualTo(new Value("bar")); + } + + @Test + public void filterBehaves() { + assertThat(new Value(null).filter(x -> new Value(true))).isEqualTo(new Value(null)); + + Value listValue = new Value(asList("foo", "bar")); + assertThat(listValue.filter(x -> new Value(Objects.equals(x.value(), "foo")))).isEqualTo(new Value(asList("foo"))); + assertThat(listValue.filter(x -> new Value(false))).isEqualTo(new Value(emptyList())); + assertThat(listValue.filter(x -> new Value(true))).isEqualTo(listValue); + } + + @Test + public void lengthBehaves() { + assertThat(new Value(null).length()).isEqualTo(new Value(null)); + assertThat(new Value("a").length()).isEqualTo(new Value(1)); + assertThat(sdkPojoValue(Pair.of("a", "b")).length()).isEqualTo(new Value(1)); + assertThat(new Value(singletonList("a")).length()).isEqualTo(new Value(1)); + } + + @Test + public void containsBehaves() { + assertThat(new Value(null).length()).isEqualTo(new Value(null)); + assertThat(new Value("abcde").contains(new Value("bcd"))).isEqualTo(new Value(true)); + assertThat(new Value("abcde").contains(new Value("f"))).isEqualTo(new Value(false)); + assertThat(new Value(asList("a", "b")).contains(new Value("a"))).isEqualTo(new Value(true)); + assertThat(new Value(asList("a", "b")).contains(new Value("c"))).isEqualTo(new Value(false)); + } + + @Test + public void compareIntegerBehaves() { + assertThat(new Value(1).compare(">", new Value(2))).isEqualTo(new Value(false)); + assertThat(new Value(1).compare(">=", new Value(2))).isEqualTo(new Value(false)); + assertThat(new Value(1).compare("<=", new Value(2))).isEqualTo(new Value(true)); + assertThat(new Value(1).compare("<", new Value(2))).isEqualTo(new Value(true)); + assertThat(new Value(1).compare("==", new Value(2))).isEqualTo(new Value(false)); + assertThat(new Value(1).compare("!=", new Value(2))).isEqualTo(new Value(true)); + + assertThat(new Value(1).compare(">", new Value(1))).isEqualTo(new Value(false)); + assertThat(new Value(1).compare(">=", new Value(1))).isEqualTo(new Value(true)); + assertThat(new Value(1).compare("<=", new Value(1))).isEqualTo(new Value(true)); + assertThat(new Value(1).compare("<", new Value(1))).isEqualTo(new Value(false)); + assertThat(new Value(1).compare("==", new Value(1))).isEqualTo(new Value(true)); + assertThat(new Value(1).compare("!=", new Value(1))).isEqualTo(new Value(false)); + } + + @Test + public void multiSelectListBehaves() { + assertThat(new Value(5).multiSelectList(x -> new Value(1), x -> new Value(2))) + .isEqualTo(new Value(asList(1, 2))); + } + + private Value booleanTrue() { + return new Value(true); + } + + private Value booleanFalse() { + return new Value(false); + } + + @SafeVarargs + private final Value sdkPojoValue(Pair... entry) { + return new Value(sdkPojo(entry)); + } + + @SafeVarargs + private final SdkPojo sdkPojo(Pair... entry) { + Map result = new HashMap<>(); + Stream.of(entry).forEach(e -> result.put(e.left(), e.right())); + return new MockSdkPojo(result); + } + + private static class MockSdkPojo implements SdkPojo { + private final Map map; + + private MockSdkPojo(Map map) { + this.map = map; + } + + @Override + public List> sdkFields() { + return map.entrySet().stream().map(this::sdkField).collect(Collectors.toList()); + } + + private SdkField sdkField(Map.Entry e) { + @SuppressWarnings("unchecked") + Class valueClass = (Class) e.getValue().getClass(); + return SdkField.builder(MarshallingType.newType(valueClass)) + .memberName(e.getKey()) + .getter(x -> e.getValue()) + .traits(LocationTrait.builder().build()) + .build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MockSdkPojo that = (MockSdkPojo) o; + return Objects.equals(map, that.map); + } + + @Override + public int hashCode() { + return Objects.hash(map); + } + } +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersSyncFunctionalTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersSyncFunctionalTest.java new file mode 100644 index 000000000000..3da0198d74f7 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/waiters/WaitersSyncFunctionalTest.java @@ -0,0 +1,197 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.waiters; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.awscore.exception.AwsErrorDetails; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; +import software.amazon.awssdk.core.waiters.WaiterOverrideConfiguration; +import software.amazon.awssdk.core.waiters.WaiterResponse; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.services.restjsonwithwaiters.RestJsonWithWaitersClient; +import software.amazon.awssdk.services.restjsonwithwaiters.model.AllTypesRequest; +import software.amazon.awssdk.services.restjsonwithwaiters.model.AllTypesResponse; +import software.amazon.awssdk.services.restjsonwithwaiters.model.EmptyModeledException; +import software.amazon.awssdk.services.restjsonwithwaiters.waiters.RestJsonWithWaitersWaiter; +import software.amazon.awssdk.utils.builder.SdkBuilder; + +public class WaitersSyncFunctionalTest { + + private RestJsonWithWaitersClient client; + private RestJsonWithWaitersWaiter waiter; + + @Before + public void setup() { + client = mock(RestJsonWithWaitersClient.class); + waiter = RestJsonWithWaitersWaiter.builder() + .client(client) + .overrideConfiguration(WaiterOverrideConfiguration.builder() + .maxAttempts(3) + .backoffStrategy(BackoffStrategy.none()) + .build()) + .build(); + } + + @After + public void cleanup() { + client.close(); + waiter.close(); + } + + @Test + public void allTypeOperation_withSyncWaiter_shouldReturnResponse() { + AllTypesResponse response = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()) + .build(); + + + when(client.allTypes(any(AllTypesRequest.class))).thenReturn(response); + WaiterResponse waiterResponse = waiter.waitUntilAllTypesSuccess(AllTypesRequest.builder().build()); + + assertThat(waiterResponse.attemptsExecuted()).isEqualTo(1); + assertThat(waiterResponse.matched().response()).hasValueSatisfying(r -> assertThat(r).isEqualTo(response)); + } + + @Test + public void allTypeOperationFailed_withSyncWaiter_shouldThrowException() { + when(client.allTypes(any(AllTypesRequest.class))).thenThrow(SdkServiceException.builder().statusCode(200).build()); + + WaiterResponse waiterResponse = waiter.waitUntilAllTypesSuccess(AllTypesRequest.builder().build()); + + assertThat(waiterResponse.attemptsExecuted()).isEqualTo(1); + assertThat(waiterResponse.matched().exception()).hasValueSatisfying(r -> assertThat(r).isInstanceOf(SdkServiceException.class)); + } + + @Test + public void allTypeOperationRetry_withSyncWaiter_shouldReturnResponseAfterException() { + AllTypesResponse response = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()) + .build(); + when(client.allTypes(any(AllTypesRequest.class))).thenThrow(SdkServiceException.builder().statusCode(404).build()) + .thenReturn(response); + + WaiterResponse waiterResponse = waiter.waitUntilAllTypesSuccess(AllTypesRequest.builder().build()); + + assertThat(waiterResponse.attemptsExecuted()).isEqualTo(2); + assertThat(waiterResponse.matched().response()).hasValueSatisfying(r -> assertThat(r).isEqualTo(response)); + } + + @Test + public void allTypeOperationRetryMoreThanMaxAttempts_withSyncWaiter_shouldThrowException() { + SdkServiceException exception = SdkServiceException.builder().statusCode(404).build(); + AllTypesResponse response = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()) + .build(); + when(client.allTypes(any(AllTypesRequest.class))).thenThrow(exception) + .thenThrow(exception) + .thenThrow(exception) + .thenReturn(response); + assertThatThrownBy(() -> waiter.waitUntilAllTypesSuccess(AllTypesRequest.builder().build())) + .isInstanceOf(SdkClientException.class).hasMessageContaining("exceeded the max retry attempts"); + } + + @Test + public void requestOverrideConfig_shouldTakePrecedence() { + AllTypesResponse response = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()) + .build(); + when(client.allTypes(any(AllTypesRequest.class))).thenThrow(SdkServiceException.builder().statusCode(404).build()) + .thenReturn(response); + assertThatThrownBy(() -> waiter.waitUntilAllTypesSuccess(b -> b.build(), o -> o.maxAttempts(1))) + .isInstanceOf(SdkClientException.class).hasMessageContaining("exceeded the max retry attempts"); + } + + @Test + public void unexpectedException_shouldNotRetry() { + when(client.allTypes(any(AllTypesRequest.class))).thenThrow(new RuntimeException("blah")); + + assertThatThrownBy(() -> waiter.waitUntilAllTypesSuccess(b -> b.build())) + .hasMessageContaining("An exception was thrown and did not match any waiter acceptors") + .isInstanceOf(SdkClientException.class); + } + + @Test + public void failureException_shouldThrowException() { + when(client.allTypes(any(AllTypesRequest.class))).thenThrow(EmptyModeledException.builder() + .awsErrorDetails(AwsErrorDetails.builder() + .errorCode("EmptyModeledException") + .build()) + .build()); + assertThatThrownBy(() -> waiter.waitUntilAllTypesSuccess(SdkBuilder::build)) + .hasMessageContaining("transitioned the waiter to failure state") + .isInstanceOf(SdkClientException.class); + } + + @Test + public void unexpectedResponse_shouldRetry() { + AllTypesResponse response1 = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(202) + .build()) + .build(); + AllTypesResponse response2 = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(200) + .build()) + .build(); + when(client.allTypes(any(AllTypesRequest.class))).thenReturn(response1, response2); + + WaiterResponse waiterResponse = waiter.waitUntilAllTypesSuccess(AllTypesRequest.builder().build()); + + assertThat(waiterResponse.attemptsExecuted()).isEqualTo(2); + assertThat(waiterResponse.matched().response()).hasValueSatisfying(r -> assertThat(r).isEqualTo(response2)); + } + + @Test + public void failureResponse_shouldThrowException() { + AllTypesResponse response = (AllTypesResponse) AllTypesResponse.builder() + .sdkHttpResponse(SdkHttpResponse.builder() + .statusCode(500) + .build()) + .build(); + when(client.allTypes(any(AllTypesRequest.class))).thenReturn(response); + assertThatThrownBy(() -> waiter.waitUntilAllTypesSuccess(SdkBuilder::build)) + .hasMessageContaining("A waiter acceptor was matched and transitioned the waiter to failure state") + .isInstanceOf(SdkClientException.class); + } + + @Test + public void closeWaiterCreatedWithClient_clientDoesNotClose() { + waiter.close(); + verify(client, never()).close(); + } + +} diff --git a/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body.gz b/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body.gz new file mode 100644 index 000000000000..5e0eb47d6729 Binary files /dev/null and b/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body.gz differ diff --git a/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body_with_extra_data.gz b/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body_with_extra_data.gz new file mode 100644 index 000000000000..6dfa2fe08732 Binary files /dev/null and b/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body_with_extra_data.gz differ diff --git a/test/codegen-generated-classes-test/src/test/resources/jetty-logging.properties b/test/codegen-generated-classes-test/src/test/resources/jetty-logging.properties new file mode 100644 index 000000000000..4ee410e7fa92 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/resources/jetty-logging.properties @@ -0,0 +1,18 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +# Set up logging implementation +org.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.StdErrLog +org.eclipse.jetty.LEVEL=OFF diff --git a/test/codegen-generated-classes-test/src/test/resources/log4j.properties b/test/codegen-generated-classes-test/src/test/resources/log4j.properties new file mode 100644 index 000000000000..2f52be5df856 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/resources/log4j.properties @@ -0,0 +1,33 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +log4j.rootLogger=ERROR, A1 +log4j.appender.A1=org.apache.log4j.ConsoleAppender +log4j.appender.A1.layout=org.apache.log4j.PatternLayout + +# Print the date in ISO 8601 format +log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + +# Adjust to see more / less logging +#log4j.logger.com.amazonaws.ec2=DEBUG + +# HttpClient 3 Wire Logging +#log4j.logger.httpclient.wire=DEBUG + +# HttpClient 4 Wire Logging +# log4j.logger.org.apache.http.wire=INFO +# log4j.logger.org.apache.http=DEBUG +# log4j.logger.org.apache.http.wire=DEBUG +# log4j.logger.software.amazon.awssdk=DEBUG diff --git a/test/dynamodbdocument-v1/pom.xml b/test/dynamodbdocument-v1/pom.xml deleted file mode 100644 index c3a0f0dd5006..000000000000 --- a/test/dynamodbdocument-v1/pom.xml +++ /dev/null @@ -1,108 +0,0 @@ - - - - - 4.0.0 - - aws-sdk-java-pom - software.amazon.awssdk - 2.7.16-SNAPSHOT - ../../pom.xml - - dynamodbdocument-v1 - AWS Java SDK :: Test :: Amazon DynamoDB Document API v1 - DynamoDB Document API largely unchanged from v1. The v1 API is kept for testing purposes only. All classes are in the test directories to prevent use in application code. - https://aws.amazon.com/sdkforjava - - - ../.. - - - - - - software.amazon.awssdk - bom-internal - ${project.version} - pom - import - - - - - - - software.amazon.awssdk - regions - ${awsjavasdk.version} - test - - - software.amazon.awssdk - annotations - ${awsjavasdk.version} - test - - - software.amazon.awssdk - utils - ${awsjavasdk.version} - test - - - software.amazon.awssdk - sdk-core - ${awsjavasdk.version} - test - - - software.amazon.awssdk - aws-core - ${awsjavasdk.version} - test - - - dynamodb - software.amazon.awssdk - ${awsjavasdk.version} - test - - - s3 - software.amazon.awssdk - ${awsjavasdk.version} - test - - - service-test-utils - software.amazon.awssdk - ${awsjavasdk.version} - test - - - junit - junit - test - - - mockito-core - org.mockito - test - - - diff --git a/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/NestedJsonDocumentIntegrationTest.java b/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/NestedJsonDocumentIntegrationTest.java deleted file mode 100644 index 05846a36a350..000000000000 --- a/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/NestedJsonDocumentIntegrationTest.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.core.exception.SdkServiceException; -import software.amazon.awssdk.core.util.SdkAutoConstructMap; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.util.TableUtils; -import software.amazon.awssdk.testutils.service.AwsTestBase; - -/** - * DynamoDB supports nested attributes up to 32 levels deep. - * http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html - */ -public class NestedJsonDocumentIntegrationTest extends AwsTestBase { - - private static final String TABLE = "java-sdk-nested-json-document-" + System.currentTimeMillis(); - private static final String HASH = "hash"; - private static final String JSON_MAP_ATTRIBUTE = "json"; - private static final String JSON_MAP_NESTED_KEY = "key"; - /* - * DynamoDB supports nested attributes up to 32 levels deep. - * http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html - */ - private static final int MAX_JSON_PATH_DEPTH = 32; - private static DynamoDbClient ddb; - - @BeforeClass - public static void setup() throws Exception { - setUpCredentials(); - ddb = DynamoDbClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - - ddb.createTable(CreateTableRequest.builder() - .tableName(TABLE) - .keySchema(KeySchemaElement.builder().attributeName(HASH).keyType(KeyType.HASH).build()) - .attributeDefinitions(AttributeDefinition.builder().attributeName(HASH).attributeType(ScalarAttributeType.S).build()) - .provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(1L).writeCapacityUnits(1L).build()).build()); - - TableUtils.waitUntilActive(ddb, TABLE); - } - - @AfterClass - public static void tearDown() { - ddb.deleteTable(DeleteTableRequest.builder().tableName(TABLE).build()); - } - - @Test - public void testMaxNestedDepth() { - // minus 1 to account for the top-level attribute - int MAX_MAP_DEPTH = MAX_JSON_PATH_DEPTH - 1; - - AttributeValue nestedJson = buildNestedMapAttribute(MAX_MAP_DEPTH); - - Map item = new HashMap(); - item.put(HASH, AttributeValue.builder().s("foo").build()); - item.put(JSON_MAP_ATTRIBUTE, nestedJson); - - ddb.putItem(PutItemRequest.builder() - .tableName(TABLE) - .item(item) - .build()); - - // Make sure we can read the max-depth item - GetItemResponse itemResult = ddb.getItem(GetItemRequest.builder() - .tableName(TABLE) - .key(Collections.singletonMap(HASH, - AttributeValue.builder().s("foo").build())) - .build()); - int mapDepth = computeDepthOfNestedMapAttribute( - itemResult.item().get(JSON_MAP_ATTRIBUTE)); - Assert.assertEquals(MAX_MAP_DEPTH, mapDepth); - - - // Attempt to put a JSON document with over-limit depth - AttributeValue nestedJson_OverLimit = buildNestedMapAttribute(MAX_MAP_DEPTH + 1); - - Map item_OverLimit = new HashMap(); - item_OverLimit.put(HASH, AttributeValue.builder().s("foo").build()); - item_OverLimit.put("json", nestedJson_OverLimit); - - try { - ddb.putItem(PutItemRequest.builder() - .tableName(TABLE) - .item(item_OverLimit).build()); - Assert.fail("ValidationException is expected, since the depth exceeds the service limit."); - } catch (SdkServiceException expected) { - // Ignored or expected. - } - } - - private AttributeValue buildNestedMapAttribute(int depth) { - AttributeValue value = AttributeValue.builder().s("foo").build(); - while (depth-- > 0) { - value = AttributeValue.builder().m(Collections.singletonMap(JSON_MAP_NESTED_KEY, value)).build(); - } - return value; - } - - private int computeDepthOfNestedMapAttribute(AttributeValue mapAttr) { - int depth = 0; - while (mapAttr != null && mapAttr.m() != null && !(mapAttr.m() instanceof SdkAutoConstructMap)) { - depth++; - mapAttr = mapAttr.m().get(JSON_MAP_NESTED_KEY); - } - return depth; - } -} diff --git a/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/TableUtilsIntegrationTest.java b/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/TableUtilsIntegrationTest.java deleted file mode 100644 index 28a574b32300..000000000000 --- a/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/TableUtilsIntegrationTest.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import org.junit.After; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; -import software.amazon.awssdk.services.dynamodb.util.TableUtils; -import software.amazon.awssdk.services.dynamodb.util.TableUtils.TableNeverTransitionedToStateException; -import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; - -public class TableUtilsIntegrationTest extends AwsIntegrationTestBase { - - private static final int CUSTOM_TIMEOUT = 5 * 1000; - - /** - * Wait a generous amount of time after the custom timeout to account for - * variance due to polling interval. This is only used in tests that use - * {@link TableUtilsIntegrationTest#CUSTOM_TIMEOUT} - */ - private static final int TEST_TIMEOUT = CUSTOM_TIMEOUT * 2; - - private static final int CUSTOM_POLLING_INTERVAL = 1 * 1000; - private static final long READ_CAPACITY = 5L; - private static final long WRITE_CAPACITY = 5L; - private static final String HASH_KEY_NAME = "someHash"; - - private static DynamoDbClient ddb; - private String tableName; - - @BeforeClass - public static void setupFixture() { - ddb = DynamoDbClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - } - - private CreateTableRequest createTableRequest() { - return CreateTableRequest.builder() - .tableName(tableName) - .keySchema(KeySchemaElement.builder() - .keyType(KeyType.HASH) - .attributeName(HASH_KEY_NAME).build()) - .attributeDefinitions(AttributeDefinition.builder() - .attributeName(HASH_KEY_NAME) - .attributeType(ScalarAttributeType.S).build()) - .provisionedThroughput(ProvisionedThroughput.builder() - .readCapacityUnits(READ_CAPACITY) - .writeCapacityUnits(WRITE_CAPACITY).build()) - .build(); - } - - private DeleteTableRequest deleteTableRequest() { - return DeleteTableRequest.builder().tableName(tableName).build(); - } - - private void createTable() { - ddb.createTable(createTableRequest()); - } - - @Before - public void setup() { - tableName = "TableUtilsTest-" + System.currentTimeMillis(); - } - - @After - public void tearDown() throws InterruptedException { - if (tableStatus() != null) { - if (!tableStatus().equals(TableStatus.DELETING)) { - TableUtils.waitUntilActive(ddb, tableName); - ddb.deleteTable(DeleteTableRequest.builder().tableName(tableName).build()); - } - waitUntilTableDeleted(); - } - } - - /** - * @return Table status or null if it doesn't exist. - */ - private String tableStatus() { - try { - return ddb.describeTable(DescribeTableRequest.builder().tableName(tableName).build()).table().tableStatusAsString(); - } catch (ResourceNotFoundException e) { - return null; - } - } - - // TODO replace with waiters when available. - private void waitUntilTableDeleted() throws InterruptedException { - long startTime = System.currentTimeMillis(); - // Wait up to five minutes for a table to be deleted. - long endTime = startTime + 5 * 60 * 1000; - while (System.currentTimeMillis() < endTime) { - try { - ddb.describeTable(DescribeTableRequest.builder().tableName(tableName).build()); - Thread.sleep(1000); - } catch (ResourceNotFoundException e) { - return; - } - } - } - - @Test(expected = IllegalArgumentException.class) - public void waitUntilActive_InvalidTimeout_ThrowsException() throws Exception { - TableUtils.waitUntilActive(ddb, tableName, -1, 10); - } - - @Test(expected = IllegalArgumentException.class) - public void waitUntilActive_InvalidInterval_ThrowsException() throws Exception { - TableUtils.waitUntilActive(ddb, tableName, 10, -1); - } - - @Test(expected = IllegalArgumentException.class) - public void waitUntilActive_IntervalGreaterThanTimeout_ThrowsException() throws Exception { - TableUtils.waitUntilActive(ddb, tableName, 10, 100); - } - - @Test - public void waitUntilActive_MethodBlocksUntilTableIsActive() throws Exception { - createTable(); - TableUtils.waitUntilActive(ddb, tableName); - assertEquals(TableStatus.ACTIVE, - ddb.describeTable(DescribeTableRequest.builder().tableName(tableName).build()).table().tableStatus()); - } - - @Test(expected = TableNeverTransitionedToStateException.class, timeout = TEST_TIMEOUT) - public void waitUntilActive_TableNeverTransitionsToActive_ThrowsException() throws Exception { - createTable(); - // We wait long enough for DescribeTable to return something but not - // long enough for the table to transition to active - TableUtils.waitUntilActive(ddb, tableName, 1 * 1000, 500); - } - - @Test(expected = TableNeverTransitionedToStateException.class, timeout = TEST_TIMEOUT) - public void waitUntilActive_NoSuchTable_BlocksUntilTimeoutThenThrowsException() throws - InterruptedException { - TableUtils.waitUntilActive(ddb, tableName, CUSTOM_TIMEOUT, CUSTOM_POLLING_INTERVAL); - } - - @Test - public void waitUntilExists_MethodBlocksUntilTableExists() throws InterruptedException { - createTable(); - TableUtils.waitUntilExists(ddb, tableName); - assertNotNull(ddb.describeTable(DescribeTableRequest.builder().tableName(tableName).build())); - } - - @Test(expected = SdkClientException.class, timeout = TEST_TIMEOUT) - public void waitUntilExists_NoSuchTable_BlocksUntilTimeoutThenThrowsException() throws - InterruptedException { - TableUtils.waitUntilExists(ddb, tableName, CUSTOM_TIMEOUT, CUSTOM_POLLING_INTERVAL); - } - - @Test - public void testCreateTableIfNotExists() throws InterruptedException { - assertTrue(TableUtils.createTableIfNotExists(ddb, createTableRequest())); - TableUtils.waitUntilExists(ddb, tableName); - assertFalse(TableUtils.createTableIfNotExists(ddb, createTableRequest())); - } - - @Test - public void testDeleteTableIfExists() throws InterruptedException { - assertFalse(TableUtils.deleteTableIfExists(ddb, deleteTableRequest())); - createTable(); - TableUtils.waitUntilActive(ddb, tableName); - assertTrue(TableUtils.deleteTableIfExists(ddb, deleteTableRequest())); - waitUntilTableDeleted(); - } - -} diff --git a/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/document/UpdateItemIntegrationTest.java b/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/document/UpdateItemIntegrationTest.java deleted file mode 100644 index 195b905c9a89..000000000000 --- a/test/dynamodbdocument-v1/src/it/java/software/amazon/awssdk/services/dynamodb/document/UpdateItemIntegrationTest.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.core.exception.SdkServiceException; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.spec.GetItemSpec; -import software.amazon.awssdk.services.dynamodb.document.utils.NameMap; -import software.amazon.awssdk.services.dynamodb.document.utils.ValueMap; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; -import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; - -public class UpdateItemIntegrationTest { - - private static final long READ_CAPACITY = 1; - private static final long WRITE_CAPACITY = 1; - private static final Long FIRST_CUSTOMER_ID = 1000L; - private static final String ADDRESS_TYPE_HOME = "home"; - private static final String ADDRESS_TYPE_WORK = "work"; - private static DynamoDb dynamoDb; - private static String TABLE_NAME = "UpdateItemIntegrationTest"; - private static String HASH_KEY = "customer_id"; - private static String RANGE_KEY = "address_type"; - - @BeforeClass - public static void setUp() throws Exception { - DynamoDbClient client = DynamoDbClient.builder() - .credentialsProvider(AwsIntegrationTestBase.CREDENTIALS_PROVIDER_CHAIN) - .build(); - dynamoDb = new DynamoDb(client); - - createTable(); - fillInData(); - } - - private static void createTable() throws Exception { - Table table = dynamoDb.getTable(TABLE_NAME); - TableDescription desc = table.waitForActiveOrDelete(); - if (desc == null) { - // table doesn't exist; let's create it - KeySchemaElement hashKey = - KeySchemaElement.builder().attributeName(HASH_KEY).keyType(KeyType.HASH).build(); - KeySchemaElement rangeKey = - KeySchemaElement.builder().attributeName(RANGE_KEY).keyType(KeyType.RANGE).build(); - CreateTableRequest createTableRequest = CreateTableRequest.builder(). - tableName(TABLE_NAME) - .keySchema(Arrays.asList(hashKey, rangeKey)) - .attributeDefinitions( - AttributeDefinition.builder().attributeName(HASH_KEY).attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName(RANGE_KEY).attributeType(ScalarAttributeType.S).build()) - .provisionedThroughput( - ProvisionedThroughput.builder().readCapacityUnits(READ_CAPACITY).writeCapacityUnits(WRITE_CAPACITY).build()) - .build(); - table = dynamoDb.createTable(createTableRequest); - table.waitForActive(); - } - } - - private static void fillInData() { - Table table = dynamoDb.getTable(TABLE_NAME); - table.putItem(new Item().with(HASH_KEY, FIRST_CUSTOMER_ID) - .with(RANGE_KEY, ADDRESS_TYPE_WORK) - .with("AddressLine1", "1918 8th Aven") - .with("city", "seattle") - .with("state", "WA") - .with("zipcode", 98104)); - table.putItem(new Item().with(HASH_KEY, FIRST_CUSTOMER_ID) - .with(RANGE_KEY, ADDRESS_TYPE_HOME) - .with("AddressLine1", "15606 NE 40th ST") - .with("city", "redmond") - .with("state", "WA") - .with("zipcode", 98052)); - } - - @AfterClass - public static void shutDown() throws Exception { - // Table table = dynamoDB.getTable(TABLE_NAME); - // table.delete(); - dynamoDb.shutdown(); - } - - /** - * This test case tests the various methods in AttributeUpdate class. At - * each phase, retrieves the items and compares its values. - */ - @Test - public void testAddingNewAttributeToExistingRow() { - final String phoneNumber1 = "123-456-7890"; - final Set phoneNumbers = new HashSet(); - phoneNumbers.add(phoneNumber1); - - // Adds a new attribute to the row. - Table table = dynamoDb.getTable(TABLE_NAME); - table.updateItem(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK, - new AttributeUpdate("phone").put(phoneNumbers)); - Item item = table.getItem(new GetItemSpec() - .withPrimaryKey(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK) - .withConsistentRead(true) - ); - Set phoneNumbersRetrieved = item.getStringSet("phone"); - assertEquals(phoneNumbers, phoneNumbersRetrieved); - assertTrue(phoneNumbersRetrieved.contains(phoneNumber1)); - assertTrue(phoneNumbersRetrieved.size() == 1); - - // Adds a new element to the attribute - final String phoneNumber2 = "987-654-3210"; - table.updateItem(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK, - new AttributeUpdate("phone").addElements(phoneNumber2)); - item = table.getItem(new GetItemSpec() - .withPrimaryKey(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK) - .withConsistentRead(true)); - phoneNumbersRetrieved = item.getStringSet("phone"); - assertTrue(phoneNumbersRetrieved.contains(phoneNumber2)); - assertTrue(phoneNumbersRetrieved.contains(phoneNumber1)); - assertTrue(phoneNumbersRetrieved.size() == 2); - - // removes an element from the attribute - table.updateItem(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK, - new AttributeUpdate("phone").removeElements(phoneNumber2)); - item = table.getItem(new GetItemSpec() - .withPrimaryKey(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK) - .withConsistentRead(true)); - phoneNumbersRetrieved = item.getStringSet("phone"); - assertFalse(phoneNumbersRetrieved.contains(phoneNumber2)); - assertTrue(phoneNumbersRetrieved.contains(phoneNumber1)); - assertTrue(phoneNumbersRetrieved.size() == 1); - - // deletes the attribute - table.updateItem(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK, new AttributeUpdate("phone").delete()); - item = table.getItem(new GetItemSpec() - .withPrimaryKey(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK) - .withConsistentRead(true)); - phoneNumbersRetrieved = item.getStringSet("phone"); - assertNull(phoneNumbersRetrieved); - - final Number oldValue = item.getNumber("zipcode"); - - // Increments the zip code attribute - table.updateItem(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK, - new AttributeUpdate("zipcode").addNumeric(1)); - item = table.getItem(new GetItemSpec() - .withPrimaryKey(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK) - .withConsistentRead(true)); - Number newValue = item.getNumber("zipcode"); - assertEquals(oldValue.longValue() + 1, newValue.longValue()); - - // Decrements the zip code attribute - table.updateItem(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK, - new AttributeUpdate("zipcode").addNumeric(-1)); - item = table.getItem(new GetItemSpec() - .withPrimaryKey(HASH_KEY, FIRST_CUSTOMER_ID, RANGE_KEY, ADDRESS_TYPE_WORK) - .withConsistentRead(true)); - newValue = item.getNumber("zipcode"); - assertEquals(oldValue.longValue(), newValue.longValue()); - } - - /** - * This test cases performs an update item with expected set. The update - * must fail as the expected condition is not met. - */ - @Test - public void testUpdateItemWithExpectedSet() { - final String phoneNumber1 = "123-456-7890"; - final String phoneNumber2 = "987-654-3210"; - final Set phoneNumbers = new HashSet(); - phoneNumbers.add(phoneNumber1); - Table table = dynamoDb.getTable(TABLE_NAME); - try { - table.updateItem( - HASH_KEY, FIRST_CUSTOMER_ID, - RANGE_KEY, ADDRESS_TYPE_WORK, - Arrays.asList(new Expected("phone").eq(phoneNumbers)), - new AttributeUpdate("phone").addElements(phoneNumber2)); - fail("Update Should fail as the phone number attribute is not present in the row"); - } catch (Exception e) { - assertTrue(e instanceof SdkServiceException); - } - } - - /** - * Performs an update using the update expression. Asserts by retrieving the - * item and checking if the update values are present in the record. - */ - @Test - public void testUpdateItemWithUpdateExpression() { - final String phoneNumber1 = "123-456-7890"; - final String phoneNumber2 = "987-654-3210"; - final Set phoneNumbers = new HashSet(); - phoneNumbers.add(phoneNumber1); - phoneNumbers.add(phoneNumber2); - final String updateExpression = "set #phoneAttributeName = :phoneAtributeValue"; - - final Map nameMap = new HashMap(); - nameMap.put("#phoneAttributeName", "phone"); - final Map valueMap = new HashMap(); - valueMap.put(":phoneAtributeValue", phoneNumbers); - - Table table = dynamoDb.getTable(TABLE_NAME); - table.updateItem( - HASH_KEY, FIRST_CUSTOMER_ID, - RANGE_KEY, ADDRESS_TYPE_WORK, - updateExpression, nameMap, valueMap); - Item item = table.getItem(new GetItemSpec() - .withPrimaryKey( - HASH_KEY, FIRST_CUSTOMER_ID, - RANGE_KEY, ADDRESS_TYPE_WORK) - .withConsistentRead(true)); - Set phoneNumbersRetrieved = item.getStringSet("phone"); - assertNotNull(phoneNumbersRetrieved); - assertTrue(phoneNumbersRetrieved.size() == 2); - assertTrue(phoneNumbersRetrieved.contains(phoneNumber1)); - assertTrue(phoneNumbersRetrieved.contains(phoneNumber2)); - } - - /** - * Performs an update using the update and conditional expression. The - * update should fail as the conditional expression fails to true. - */ - @Test - public void testUpdateItemWithConditionExpression() { - Table table = dynamoDb.getTable(TABLE_NAME); - try { - table.updateItem( - HASH_KEY, FIRST_CUSTOMER_ID, - RANGE_KEY, ADDRESS_TYPE_WORK, - "set #mno = list_append(:phoneNumber, :phoneNumber)", - "zipcode = :zipcode", - new NameMap().with("#mno", "phone"), - new ValueMap() - .withList(":phoneNumber", "987-654-3210") - // compare zipecode, which is of type int, to string - // leading to an intentional failure in the update condition - .withString(":zipcode", "98104") - ); - fail("Update Should fail as the zip code mentioned in the condition expression doesn't match"); - } catch (SdkServiceException e) { - assertTrue(e.getMessage().contains("conditional request failed")); - } - } - -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Attribute.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Attribute.java deleted file mode 100644 index 43c0678426ff..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Attribute.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; - -/** - * A key/value pair. - */ -public class Attribute { - private final String name; - private final Object value; - - public Attribute(String attrName, Object value) { - InternalUtils.checkInvalidAttrName(attrName); - this.name = attrName; - this.value = value; - } - - public String name() { - return name; - } - - public Object value() { - return value; - } - - @Override - public String toString() { - return "{" + name + ": " + value + "}"; - } - - @Override - public int hashCode() { - final int prime = 31; - int hashCode = 1; - // attribute name is never null as enforced in ctor - hashCode = prime * hashCode + name().hashCode(); - hashCode = prime * hashCode - + ((value() == null) ? 0 : value().hashCode()); - return hashCode; - } - - @Override - public boolean equals(Object in) { - if (in instanceof Attribute) { - Attribute that = (Attribute) in; - if (this.name.equals(that.name)) { - if (this.value == null) { - return that.value == null; - } else { - return this.value.equals(that.value); - } - } - } - return false; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/AttributeTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/AttributeTest.java deleted file mode 100644 index 1de470ca0d02..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/AttributeTest.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.util.HashSet; -import java.util.Set; -import org.junit.Test; - -public class AttributeTest { - - @Test(expected = IllegalArgumentException.class) - public void nullAttributeName() { - new Attribute(null, "invalid attribute name"); - } - - @Test - public void nullAttributeValue() { - Attribute a = new Attribute("null attribute value is fine", null); - assertTrue(a.hashCode() != 0); - } - - @Test - public void testHashCode() { - Attribute a1 = new Attribute("name", null); - Attribute a2 = new Attribute("name", "a2"); - Attribute a3 = new Attribute("name", "a3"); - Attribute a4 = new Attribute("name4", "a3"); - Set checkUniqueness = new HashSet(); - checkUniqueness.add(a1.hashCode()); - checkUniqueness.add(a2.hashCode()); - checkUniqueness.add(a3.hashCode()); - checkUniqueness.add(a4.hashCode()); - assertTrue(checkUniqueness.size() == 4); - } - - @Test - public void testEquals() { - Attribute a1 = new Attribute("name", null); - Attribute a2 = new Attribute("name", "a2"); - Attribute a3 = new Attribute("name", "a3"); - Attribute a4 = new Attribute("name4", "a3"); - Set checkUniqueness = new HashSet(); - checkUniqueness.add(a1); - checkUniqueness.add(a2); - checkUniqueness.add(a3); - checkUniqueness.add(a4); - assertTrue(checkUniqueness.size() == 4); - - assertTrue(checkUniqueness.contains(new Attribute("name", null))); - assertTrue(checkUniqueness.contains(new Attribute("name", "a2"))); - assertTrue(checkUniqueness.contains(new Attribute("name", "a3"))); - assertTrue(checkUniqueness.contains(new Attribute("name4", "a3"))); - - assertFalse(checkUniqueness.contains(new Attribute("not", "exist"))); - assertFalse(a1.equals("name")); - assertFalse(a1.equals(null)); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/AttributeUpdate.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/AttributeUpdate.java deleted file mode 100644 index b50afc0da81c..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/AttributeUpdate.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeAction; - -/** - * Used to update an attribute. Each instance of AttributeUpdate includes the - * name, action and new value to be used for modifying the attribute. - *

    - * Typical usages: - *

    - * new AttributeUpdate("strAttr").put("attrValue"); - *

    - * new AttributeUpdate("intAttr").addNumeric(42); - *

    - * ... - *

    - *

    - * See http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/ - * API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdates - */ -public class AttributeUpdate { - - private final String attributeName; - - private AttributeAction action; - - private Set attributeValues; - - private Object value; - - /** - * Used to update an attribute. Each instance of AttributeUpdate includes the - * name, action and new value to be used for modifying the attribute. - *

    - * Typical usages: - *

    - * new AttributeUpdate("strAttr").put("attrValue"); - *

    - * new AttributeUpdate("intAttr").addNumeric(42); - *

    - * ... - *

    - *

    - * See http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/ - * API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdates - */ - public AttributeUpdate(String attributeName) { - this.attributeName = attributeName; - } - - /** - * Used to update an attribute. Each instance of AttributeUpdate includes the - * name, action and new value to be used for modifying the attribute. - *

    - * Typical usages: - *

    - * new AttributeUpdate("strAttr").put("attrValue"); - *

    - * new AttributeUpdate("intAttr").addNumeric(42); - *

    - * ... - *

    - *

    - * See http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/ - * API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdates - */ - public AttributeUpdate put(Object attributeValue) { - action = AttributeAction.PUT; - this.value = attributeValue; - return this; - } - - public AttributeUpdate delete() { - action = AttributeAction.DELETE; - return this; - } - - public AttributeUpdate removeElements(Object... elementsToBeRemoved) { - action = AttributeAction.DELETE; - this.attributeValues = Collections.unmodifiableSet(new LinkedHashSet( - Arrays.asList(elementsToBeRemoved))); - return this; - } - - public AttributeUpdate addNumeric(Number value) { - action = AttributeAction.ADD; - this.value = value; - return this; - } - - public AttributeUpdate addElements(Object... newElements) { - action = AttributeAction.ADD; - this.attributeValues = Collections.unmodifiableSet(new LinkedHashSet( - Arrays.asList(newElements))); - return this; - } - - public String getAttributeName() { - return attributeName; - } - - public AttributeAction getAction() { - return action; - } - - public Set getAttributeValues() { - return attributeValues; - } - - public Object value() { - return value; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/BatchGetItemOutcome.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/BatchGetItemOutcome.java deleted file mode 100644 index 3e141fc9fb8d..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/BatchGetItemOutcome.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.document.api.BatchGetItemApi; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.document.spec.BatchGetItemSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; - -/** - * The outcome of a batch get-item operation from DynamoDB. - */ -public class BatchGetItemOutcome { - private final BatchGetItemResponse result; - - /** - * @param result the low-level result; must not be null - */ - public BatchGetItemOutcome(BatchGetItemResponse result) { - if (result == null) { - throw new IllegalArgumentException(); - } - this.result = result; - } - - /** - * Returns a map of table name to the list of retrieved items - */ - public Map> getTableItems() { - Map>> res = - result.responses(); - Map> map = new LinkedHashMap>(res.size()); - for (Map.Entry>> e - : res.entrySet()) { - String tableName = e.getKey(); - List> items = e.getValue(); - map.put(tableName, InternalUtils.toItemList(items)); - } - return map; - } - - /** - * Convenient method to return the low-level unprocessed keys. - * - * @see BatchGetItemApi#batchGetItemUnprocessed(Map) - * @see BatchGetItemApi#batchGetItemUnprocessed(software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity, - * Map) - * @see BatchGetItemSpec#withUnprocessedKeys(Map) - */ - public Map getUnprocessedKeys() { - return result.unprocessedKeys(); - } - - /** - * Returns a non-null low-level result returned from the server side. - */ - public BatchGetItemResponse batchGetItemResponse() { - return result; - } - - @Override - public String toString() { - return String.valueOf(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/BatchWriteItemOutcome.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/BatchWriteItemOutcome.java deleted file mode 100644 index 64749efaef06..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/BatchWriteItemOutcome.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.document.api.BatchWriteItemApi; -import software.amazon.awssdk.services.dynamodb.document.spec.BatchWriteItemSpec; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemResponse; -import software.amazon.awssdk.services.dynamodb.model.WriteRequest; - -/** - * The outcome of a batch write-item operation from DynamoDB. - */ -public class BatchWriteItemOutcome { - private final BatchWriteItemResponse result; - - /** - * @param result the low-level result; must not be null - */ - public BatchWriteItemOutcome(BatchWriteItemResponse result) { - if (result == null) { - throw new IllegalArgumentException(); - } - this.result = result; - } - - /** - * Convenient method to return the low-level unprocessed items. - * - * @see BatchWriteItemApi#batchWriteItemUnprocessed(Map) - * @see BatchWriteItemSpec#withUnprocessedItems(Map) - */ - public Map> getUnprocessedItems() { - return result.unprocessedItems(); - } - - /** - * Returns a non-null low-level result returned from the server side. - */ - public BatchWriteItemResponse batchWriteItemResult() { - return result; - } - - @Override - public String toString() { - return String.valueOf(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/DeleteItemOutcome.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/DeleteItemOutcome.java deleted file mode 100644 index d726ee198238..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/DeleteItemOutcome.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.DeleteItemResponse; - -/** - * The outcome of deleting an item from a DynamoDB table. - */ -public class DeleteItemOutcome { - private final DeleteItemResponse result; - - /** - * @param result the low-level result; must not be null - */ - public DeleteItemOutcome(DeleteItemResponse result) { - if (result == null) { - throw new IllegalArgumentException(); - } - this.result = result; - } - - /** - * Returns all the returned attributes as a (non-null) {@link Item}. - */ - public Item getItem() { - Map attributes = - InternalUtils.toSimpleMapValue(result.attributes()); - Item item = Item.fromMap(attributes); - return item; - } - - /** - * Returns a non-null low-level result returned from the server side. - */ - public DeleteItemResponse getDeleteItemResponse() { - return result; - } - - @Override - public String toString() { - return String.valueOf(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/DynamoDb.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/DynamoDb.java deleted file mode 100644 index eae87598278e..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/DynamoDb.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.api.BatchGetItemApi; -import software.amazon.awssdk.services.dynamodb.document.api.BatchWriteItemApi; -import software.amazon.awssdk.services.dynamodb.document.api.ListTablesApi; -import software.amazon.awssdk.services.dynamodb.document.internal.BatchGetItemImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.BatchWriteItemImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.ListTablesImpl; -import software.amazon.awssdk.services.dynamodb.document.spec.BatchGetItemSpec; -import software.amazon.awssdk.services.dynamodb.document.spec.BatchWriteItemSpec; -import software.amazon.awssdk.services.dynamodb.document.spec.ListTablesSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.WriteRequest; - -/** - * DynamoDB Document API. This class is the entry point to make use of this - * library. - */ -@ThreadSafe -public class DynamoDb implements ListTablesApi, BatchGetItemApi, - BatchWriteItemApi { - private final DynamoDbClient client; - - private final ListTablesImpl listTablesDelegate; - private final BatchGetItemImpl batchGetItemDelegate; - private final BatchWriteItemImpl batchWriteItemDelegate; - - public DynamoDb(DynamoDbClient client) { - if (client == null) { - throw new IllegalArgumentException(); - } - this.client = client; - this.listTablesDelegate = new ListTablesImpl(client); - this.batchGetItemDelegate = new BatchGetItemImpl(client); - this.batchWriteItemDelegate = new BatchWriteItemImpl(client); - } - - /** - * Create a DynamoDB object that talks to the specified AWS region. The - * underlying service client will use all the default client configurations, - * including the default credentials provider chain. See - * {@link DynamoDbClient#DynamoDbClient()} for more information. - *

    BatchWriteRetryStrategyTest - * If you need more control over the client configuration, use - * {@link DynamoDb#DynamoDb(DynamoDbClient)} instead. - * - * @param regionEnum - * the AWS region enum - * @see DynamoDbClient#DynamoDbClient() - */ - public DynamoDb(Region regionEnum) { - this(DynamoDbClient.builder().region(regionEnum).build()); - } - - /** - * Returns the specified DynamoDB table. No network call is involved. - */ - public Table getTable(String tableName) { - return new Table(client, tableName); - } - - /** - * Creates the specified table in DynamoDB. - */ - public Table createTable(CreateTableRequest req) { - CreateTableResponse result = client.createTable(req); - return new Table(client, req.tableName(), - result.tableDescription()); - } - - /** - * Creates the specified table in DynamoDB. - */ - public Table createTable(String tableName, - List keySchema, - List attributeDefinitions, - ProvisionedThroughput provisionedThroughput) { - return createTable(CreateTableRequest.builder() - .tableName(tableName) - .keySchema(keySchema) - .attributeDefinitions(attributeDefinitions) - .provisionedThroughput(provisionedThroughput) - .build()); - } - - @Override - public TableCollection listTables() { - return listTablesDelegate.listTables(); - } - - @Override - public TableCollection listTables(String exclusiveStartTableName) { - return listTablesDelegate.listTables(exclusiveStartTableName); - } - - @Override - public TableCollection listTables(String exclusiveStartTableName, - int maxResultSize) { - return listTablesDelegate.listTables(exclusiveStartTableName, - maxResultSize); - } - - @Override - public TableCollection listTables(int maxResultSize) { - return listTablesDelegate.listTables(maxResultSize); - } - - @Override - public TableCollection listTables(ListTablesSpec spec) { - return listTablesDelegate.listTables(spec); - } - - @Override - public BatchGetItemOutcome batchGetItem( - ReturnConsumedCapacity returnConsumedCapacity, - TableKeysAndAttributes... tableKeysAndAttributes) { - return batchGetItemDelegate.batchGetItem(returnConsumedCapacity, - tableKeysAndAttributes); - } - - @Override - public BatchGetItemOutcome batchGetItem( - TableKeysAndAttributes... tableKeysAndAttributes) { - return batchGetItemDelegate.batchGetItem(tableKeysAndAttributes); - } - - @Override - public BatchGetItemOutcome batchGetItem(BatchGetItemSpec spec) { - return batchGetItemDelegate.batchGetItem(spec); - } - - @Override - public BatchGetItemOutcome batchGetItemUnprocessed( - ReturnConsumedCapacity returnConsumedCapacity, - Map unprocessedKeys) { - return batchGetItemDelegate.batchGetItemUnprocessed( - returnConsumedCapacity, unprocessedKeys); - } - - @Override - public BatchGetItemOutcome batchGetItemUnprocessed( - Map unprocessedKeys) { - return batchGetItemDelegate.batchGetItemUnprocessed(unprocessedKeys); - } - - @Override - public BatchWriteItemOutcome batchWriteItem( - TableWriteItems... tableWriteItems) { - return batchWriteItemDelegate.batchWriteItem(tableWriteItems); - } - - @Override - public BatchWriteItemOutcome batchWriteItem(BatchWriteItemSpec spec) { - return batchWriteItemDelegate.batchWriteItem(spec); - } - - @Override - public BatchWriteItemOutcome batchWriteItemUnprocessed( - Map> unprocessedItems) { - return batchWriteItemDelegate.batchWriteItemUnprocessed(unprocessedItems); - } - - /** - * Shuts down and release all resources. - */ - public void shutdown() throws Exception { - client.close(); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Expected.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Expected.java deleted file mode 100644 index f1d0e718c563..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Expected.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; - - -/** - * Represents a condition to be compared with an attribute value. - *

    - * Typical usages: - *

    - * new Expected("strAttr").eq("attrValue"); - *

    - * new Expected("intAttr").gt(42); - *

    - * ... - *

    - *

    - * See - * http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/ - * API_ExpectedAttributeValue.html. - */ -public class Expected { - private final String attribute; - private ComparisonOperator op; - private Object[] values; - - public Expected(String attrName) { - InternalUtils.checkInvalidAttrName(attrName); - this.attribute = attrName; - } - - /** Returns the attribute. */ - public String getAttribute() { - return attribute; - } - - public ComparisonOperator getComparisonOperator() { - return op; - } - - public Object[] values() { - return values == null ? null : values.clone(); - } - - private Expected values(Object... values) { - this.values = values.clone(); - return this; - } - - private Expected withComparisonOperator(ComparisonOperator op) { - this.op = op; - return this; - } - - /** - * Creates and returns a condition of the range key being equal to the given - * value. - */ - public Expected eq(Object val) { - return withComparisonOperator(ComparisonOperator.EQ).values(val); - } - - public Expected ne(Object val) { - return withComparisonOperator(ComparisonOperator.NE).values(val); - } - - /** - * Expects the attribute be an existing attribute. - */ - public Expected exists() { - return withComparisonOperator(ComparisonOperator.NOT_NULL); - } - - /** - * Expects the attribute be non-existing. - */ - public Expected notExist() { - return withComparisonOperator(ComparisonOperator.NULL); - } - - public Expected contains(Object val) { - return withComparisonOperator(ComparisonOperator.CONTAINS).values(val); - } - - public Expected notContains(Object val) { - return withComparisonOperator(ComparisonOperator.NOT_CONTAINS).values(val); - } - - /** - * Creates and returns a condition of the range key with a value that begins - * with the given value. - */ - public Expected beginsWith(String val) { - return withComparisonOperator(ComparisonOperator.BEGINS_WITH).values(val); - } - - public Expected in(Object... values) { - if (values == null || values.length == 0) { - throw new IllegalArgumentException("values must not be null or empty."); - } - - return withComparisonOperator(ComparisonOperator.IN).values(values); - } - - /** - * Creates and returns a condition of the range key that has a value between - * the given values. - */ - public Expected between(Object low, Object hi) { - return withComparisonOperator(ComparisonOperator.BETWEEN).values(low, hi); - } - - /** - * Creates and returns a condition of the range key being greater than or - * equal to the given value. - */ - public Expected ge(Object val) { - return withComparisonOperator(ComparisonOperator.GE).values(val); - } - - /** - * Creates and returns a condition of the range key being greater than the - * given value. - */ - public Expected gt(Object val) { - return withComparisonOperator(ComparisonOperator.GT).values(val); - } - - /** - * Creates and returns a condition of the range key being less than or equal - * to the given value. - */ - public Expected le(Object val) { - return withComparisonOperator(ComparisonOperator.LE).values(val); - } - - /** - * Creates and returns a condition of the range key being less than the - * given value. - */ - public Expected lt(Object val) { - return withComparisonOperator(ComparisonOperator.LT).values(val); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ExpectedTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ExpectedTest.java deleted file mode 100644 index 83e8625deea6..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ExpectedTest.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.core.util.SdkAutoConstructList; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; - -public class ExpectedTest { - - private static Entry toExpectedAttributeValue(Expected expected) { - Map map = InternalUtils - .toExpectedAttributeValueMap(Arrays.asList(expected)); - Assert.assertEquals(1, map.size()); - - Iterator> iter = map.entrySet().iterator(); - return iter.next(); - } - - @Test - public void testExpected_EQ() { - Expected expected = new Expected("foo").eq("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.EQ, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - - expected = new Expected("foo").eq(null); - ddbExpected = toExpectedAttributeValue(expected); - ddbExpected_attrName = ddbExpected.getKey(); - ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.EQ, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals(true, ddbExpected_value.attributeValueList().get(0).nul()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_NE() { - Expected expected = new Expected("foo").ne("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.NE, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_EXISTS() { - Expected expected = new Expected("foo").exists(); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.NOT_NULL, ddbExpected_value.comparisonOperator()); - Assert.assertTrue(ddbExpected_value.attributeValueList() instanceof SdkAutoConstructList); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_NOTEXISTS() { - Expected expected = new Expected("foo").notExist(); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.NULL, ddbExpected_value.comparisonOperator()); - Assert.assertTrue(ddbExpected_value.attributeValueList() instanceof SdkAutoConstructList); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_CONTAINS() { - Expected expected = new Expected("foo").contains("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.CONTAINS, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_NOTCONTAINS() { - Expected expected = new Expected("foo").notContains("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.NOT_CONTAINS, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_BEGINSWITH() { - Expected expected = new Expected("foo").beginsWith("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.BEGINS_WITH, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_IN() { - // Single value - Expected expected = new Expected("foo").in("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.IN, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - - // Multi-value - expected = new Expected("foo").in("bar", "charlie", null); - ddbExpected = toExpectedAttributeValue(expected); - ddbExpected_attrName = ddbExpected.getKey(); - ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(3, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals("charlie", ddbExpected_value.attributeValueList().get(1).s()); - Assert.assertEquals(true, ddbExpected_value.attributeValueList().get(2).nul()); - Assert.assertEquals(ComparisonOperator.IN, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - - // Null values - try { - expected = new Expected("foo").in((Object[]) null); - Assert.fail(); - } catch (IllegalArgumentException e) { - // Ignored or expected. - } - - // Empty values - try { - expected = new Expected("foo").in(); - Assert.fail(); - } catch (IllegalArgumentException e) { - // Ignored or expected. - } - } - - @Test - public void testExpected_BETWEEN() { - Expected expected = new Expected("foo").between(0, 100); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(2, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("0", ddbExpected_value.attributeValueList().get(0).n()); - Assert.assertEquals("100", ddbExpected_value.attributeValueList().get(1).n()); - Assert.assertEquals(ComparisonOperator.BETWEEN, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_GE() { - Expected expected = new Expected("foo").ge("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.GE, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_GT() { - Expected expected = new Expected("foo").gt("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.GT, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_LE() { - Expected expected = new Expected("foo").le("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.LE, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_LT() { - Expected expected = new Expected("foo").lt("bar"); - Entry ddbExpected = toExpectedAttributeValue(expected); - String ddbExpected_attrName = ddbExpected.getKey(); - ExpectedAttributeValue ddbExpected_value = ddbExpected.getValue(); - - Assert.assertEquals("foo", ddbExpected_attrName); - Assert.assertEquals(ComparisonOperator.LT, ddbExpected_value.comparisonOperator()); - Assert.assertEquals(1, ddbExpected_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbExpected_value.attributeValueList().get(0).s()); - Assert.assertEquals(null, ddbExpected_value.value()); - Assert.assertEquals(null, ddbExpected_value.exists()); - } - - @Test - public void testExpected_EmptyAttributeName() { - try { - new Expected(null); - Assert.fail(); - } catch (IllegalArgumentException expected) { - // Ignored or expected. - } - - try { - new Expected(""); - Assert.fail(); - } catch (IllegalArgumentException expected) { - // Ignored or expected. - } - } - - @Test - public void testExpected_DuplicateAttribute() { - Table fakeTable = new Table(DynamoDbClient.builder().region(Region.US_WEST_2).build(), "fake-table"); - try { - fakeTable.putItem(new Item(), - new Expected("foo").eq("bar"), - new Expected("foo").eq("charlie")); - Assert.fail(); - } catch (IllegalArgumentException expected) { - // Ignored or expected. - } - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/FilterConditionTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/FilterConditionTest.java deleted file mode 100644 index 42f8c4e7a822..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/FilterConditionTest.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.core.util.SdkAutoConstructList; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; - -/** - * Covers ScanFilter, which shares the same underlying implementation as QueryFilter. - */ -public class FilterConditionTest { - - private static Entry toAttributeCondition(ScanFilter ScanFilter) { - Map map = InternalUtils - .toAttributeConditionMap(Arrays.asList(ScanFilter)); - Assert.assertEquals(1, map.size()); - - Iterator> iter = map.entrySet().iterator(); - return iter.next(); - } - - @Test - public void testScanFilter_EQ() { - ScanFilter ScanFilter = new ScanFilter("foo").eq("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.EQ, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - - ScanFilter = new ScanFilter("foo").eq(null); - ddbscanFilter = toAttributeCondition(ScanFilter); - ddbscanFilter_attrName = ddbscanFilter.getKey(); - ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.EQ, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals(true, ddbscanFilter_value.attributeValueList().get(0).nul()); - } - - @Test - public void testScanFilter_NE() { - ScanFilter ScanFilter = new ScanFilter("foo").ne("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.NE, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - } - - @Test - public void testScanFilter_EXISTS() { - ScanFilter ScanFilter = new ScanFilter("foo").exists(); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.NOT_NULL, ddbscanFilter_value.comparisonOperator()); - Assert.assertTrue(ddbscanFilter_value.attributeValueList() instanceof SdkAutoConstructList); - } - - @Test - public void testScanFilter_NOTEXISTS() { - ScanFilter ScanFilter = new ScanFilter("foo").notExist(); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.NULL, ddbscanFilter_value.comparisonOperator()); - Assert.assertTrue(ddbscanFilter_value.attributeValueList() instanceof SdkAutoConstructList); - } - - @Test - public void testScanFilter_CONTAINS() { - ScanFilter ScanFilter = new ScanFilter("foo").contains("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.CONTAINS, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - } - - @Test - public void testScanFilter_NOTCONTAINS() { - ScanFilter ScanFilter = new ScanFilter("foo").notContains("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.NOT_CONTAINS, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - } - - @Test - public void testScanFilter_BEGINSWITH() { - ScanFilter ScanFilter = new ScanFilter("foo").beginsWith("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.BEGINS_WITH, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - } - - @Test - public void testScanFilter_IN() { - // Single value - ScanFilter ScanFilter = new ScanFilter("foo").in("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.IN, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - - // Multi-value - ScanFilter = new ScanFilter("foo").in("bar", "charlie", null); - ddbscanFilter = toAttributeCondition(ScanFilter); - ddbscanFilter_attrName = ddbscanFilter.getKey(); - ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(3, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - Assert.assertEquals("charlie", ddbscanFilter_value.attributeValueList().get(1).s()); - Assert.assertEquals(true, ddbscanFilter_value.attributeValueList().get(2).nul()); - Assert.assertEquals(ComparisonOperator.IN, ddbscanFilter_value.comparisonOperator()); - - // Null values - try { - ScanFilter = new ScanFilter("foo").in((Object[]) null); - Assert.fail(); - } catch (IllegalArgumentException e) { - // Ignored or expected. - } - - // Empty values - try { - ScanFilter = new ScanFilter("foo").in(); - Assert.fail(); - } catch (IllegalArgumentException e) { - // Ignored or expected. - } - } - - @Test - public void testScanFilter_BETWEEN() { - ScanFilter ScanFilter = new ScanFilter("foo").between(0, 100); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(2, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("0", ddbscanFilter_value.attributeValueList().get(0).n()); - Assert.assertEquals("100", ddbscanFilter_value.attributeValueList().get(1).n()); - Assert.assertEquals(ComparisonOperator.BETWEEN, ddbscanFilter_value.comparisonOperator()); - } - - @Test - public void testScanFilter_GE() { - ScanFilter ScanFilter = new ScanFilter("foo").ge("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.GE, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - } - - @Test - public void testScanFilter_GT() { - ScanFilter ScanFilter = new ScanFilter("foo").gt("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.GT, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - } - - @Test - public void testScanFilter_LE() { - ScanFilter ScanFilter = new ScanFilter("foo").le("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.LE, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - } - - @Test - public void testScanFilter_LT() { - ScanFilter ScanFilter = new ScanFilter("foo").lt("bar"); - Entry ddbscanFilter = toAttributeCondition(ScanFilter); - String ddbscanFilter_attrName = ddbscanFilter.getKey(); - Condition ddbscanFilter_value = ddbscanFilter.getValue(); - - Assert.assertEquals("foo", ddbscanFilter_attrName); - Assert.assertEquals(ComparisonOperator.LT, ddbscanFilter_value.comparisonOperator()); - Assert.assertEquals(1, ddbscanFilter_value.attributeValueList().size()); - Assert.assertEquals("bar", ddbscanFilter_value.attributeValueList().get(0).s()); - } - - @Test - public void testScanFilter_EmptyAttributeName() { - try { - new ScanFilter(null); - Assert.fail(); - } catch (IllegalArgumentException ScanFilter) { - // Ignored or expected. - } - - try { - new ScanFilter(""); - Assert.fail(); - } catch (IllegalArgumentException ScanFilter) { - // Ignored or expected. - } - } - - @Test - public void testScanFilter_DuplicateAttribute() { - Table fakeTable = new Table(DynamoDbClient.builder().region(Region.US_WEST_2).build(), "fake-table"); - try { - fakeTable.scan( - new ScanFilter("foo").eq("bar"), - new ScanFilter("foo").eq("charlie")); - Assert.fail(); - } catch (IllegalArgumentException ScanFilter) { - // Ignored or expected. - } - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/GetItemOutcome.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/GetItemOutcome.java deleted file mode 100644 index 8f542961c5de..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/GetItemOutcome.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; - -/** - * The outcome of getting an item from DynamoDB table. - */ -public class GetItemOutcome { - private final GetItemResponse result; - - /** - * @param result the low-level result; must not be null - */ - public GetItemOutcome(GetItemResponse result) { - if (result == null) { - throw new IllegalArgumentException(); - } - this.result = result; - } - - /** - * Returns all the returned attributes as an {@link Item}; or null if the - * item doesn't exist. - */ - public Item getItem() { - Map attributes = - InternalUtils.toSimpleMapValue(result.item()); - Item item = Item.fromMap(attributes); - return item; - } - - /** - * Returns a non-null low-level result returned from the server side. - */ - public GetItemResponse getGetItemResponse() { - return result; - } - - @Override - public String toString() { - return String.valueOf(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/IncompatibleTypeException.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/IncompatibleTypeException.java deleted file mode 100644 index 3bb5918b3d55..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/IncompatibleTypeException.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.core.exception.SdkClientException; - -/** - * Thrown upon incompatible type during data conversion. - */ -public class IncompatibleTypeException extends SdkClientException { - private static final long serialVersionUID = 1L; - - public IncompatibleTypeException(String message) { - super(SdkClientException.builder().message(message)); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Index.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Index.java deleted file mode 100644 index dee44e58f814..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Index.java +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.api.QueryApi; -import software.amazon.awssdk.services.dynamodb.document.api.ScanApi; -import software.amazon.awssdk.services.dynamodb.document.internal.IndexQueryImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.IndexScanImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.ScanImpl; -import software.amazon.awssdk.services.dynamodb.document.spec.QuerySpec; -import software.amazon.awssdk.services.dynamodb.document.spec.ScanSpec; -import software.amazon.awssdk.services.dynamodb.document.spec.UpdateTableSpec; -import software.amazon.awssdk.services.dynamodb.model.DeleteGlobalSecondaryIndexAction; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndexDescription; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndexUpdate; -import software.amazon.awssdk.services.dynamodb.model.IndexStatus; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; -import software.amazon.awssdk.services.dynamodb.model.UpdateGlobalSecondaryIndexAction; - -/** - * Represents a secondary index on a DynamoDB table. This covers - * both GSI (Global Secondary Index) and LSI (Local Secondary Index). Instance - * of this class can be obtained via {@link Table#getIndex(String)}. - */ -@ThreadSafe -public class Index implements QueryApi, ScanApi { - private static final long SLEEP_TIME_MILLIS = 5000; - private final Table table; - private final String indexName; - private final QueryApi queryDelegate; - private final ScanImpl scanDelegate; - - Index(DynamoDbClient client, String indexName, Table table) { - if (client == null) { - throw new IllegalArgumentException("client must be specified"); - } - if (indexName == null || indexName.trim().length() == 0) { - throw new IllegalArgumentException("index name must not be null or empty"); - } - if (table == null) { - throw new IllegalArgumentException("table must be specified"); - } - this.table = table; - this.indexName = indexName; - this.queryDelegate = new IndexQueryImpl(client, this); - this.scanDelegate = new IndexScanImpl(client, this); - } - - /** - * Returns the owning table. - */ - public final Table getTable() { - return table; - } - - /** - * @return the name of this index - */ - public final String getIndexName() { - return indexName; - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition) { - return queryDelegate.query(hashKey, rangeKeyCondition); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, QueryFilter... queryFilters) { - return queryDelegate.query(hashKey, rangeKeyCondition, queryFilters); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, String filterExpression, - Map nameMap, Map valueMap) { - return queryDelegate.query(hashKey, rangeKeyCondition, - filterExpression, nameMap, valueMap); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, String projectionExpression, - String filterExpression, Map nameMap, - Map valueMap) { - return queryDelegate.query(hashKey, rangeKeyCondition, - projectionExpression, filterExpression, nameMap, valueMap); - } - - @Override - public ItemCollection query(QuerySpec spec) { - return queryDelegate.query(spec); - } - - @Override - public ItemCollection query( - String hashKeyName, Object hashKeyValue) { - return queryDelegate.query(hashKeyName, hashKeyValue); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition) { - return queryDelegate.query(hashKeyName, hashKeyValue, rangeKeyCondition); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - QueryFilter... queryFilters) { - return queryDelegate.query(hashKeyName, hashKeyValue, - rangeKeyCondition, queryFilters); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - String filterExpression, Map nameMap, - Map valueMap) { - return queryDelegate.query(hashKeyName, hashKeyValue, - rangeKeyCondition, filterExpression, nameMap, valueMap); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - String filterExpression, String projectionExpression, - Map nameMap, Map valueMap) { - return queryDelegate.query(hashKeyName, hashKeyValue, - rangeKeyCondition, filterExpression, projectionExpression, - nameMap, valueMap); - } - - @Override - public ItemCollection query(KeyAttribute hashKey) { - return queryDelegate.query(hashKey); - } - - /** - * Updates the provisioned throughput for this global secondary index (GSI). - * Setting the throughput for an index helps you manage performance and is - * part of the provisioned throughput feature of DynamoDB. - *

    - * The provisioned throughput values can be upgraded or downgraded based on - * the maximums and minimums listed in the Limits section in the Amazon DynamoDB Developer Guide. - *

    - * This index must be a global secondary index and in the - * ACTIVE state for this operation to succeed. Updating a GSI - * is an asynchronous operation; while executing the operation, the index is - * in the UPDATING state. While the index is in the - * UPDATING state, the index still has the provisioned - * throughput from before the call. The new provisioned throughput setting - * is in effect only when the index returns to the ACTIVE state - * after the update is complete. - * - * @param provisionedThroughput - * target provisioned throughput - * - * @return the updated table description returned from DynamoDB. - */ - public TableDescription updateGsi( - ProvisionedThroughput provisionedThroughput) { - return table.updateTable(new UpdateTableSpec() - .withGlobalSecondaryIndexUpdates(GlobalSecondaryIndexUpdate.builder() - .update(UpdateGlobalSecondaryIndexAction.builder() - .indexName(indexName) - .provisionedThroughput(provisionedThroughput).build()) - .build())); - } - - /** - * Deletes this global secondary index (GSI) from the DynamoDB table. - * Involves network calls. - *

    - * This index must be a global secondary index and in the - * ACTIVE state for this operation to succeed. Deleting a GSI - * is an asynchronous operation; while executing the operation, the index is - * in the DELETING state. - * - * @return the updated table description returned from DynamoDB. - */ - public TableDescription deleteGsi() { - return table.updateTable(new UpdateTableSpec() - .withGlobalSecondaryIndexUpdates( - GlobalSecondaryIndexUpdate.builder() - .delete(DeleteGlobalSecondaryIndexAction.builder() - .indexName(indexName).build()) - .build())); - } - - /** - * A convenient blocking call that can be used, typically during index - * creation, to wait for the index to become active by polling the table - * every 5 seconds. - *

    - * Currently online index creation is only supported for Global Secondary - * Index (GSI). Calling this method on a Local Secondary Index (LSI) would - * result in IllegalArgumentException. - * - * @return the table description when the index has become active - * - * @throws IllegalArgumentException if the table is being deleted, or if - * the GSI is not being created or updated, or if the GSI doesn't exist - * @throws ResourceNotFoundException if the table doesn't exist - */ - public TableDescription waitForActive() throws InterruptedException { - final Table table = getTable(); - final String tableName = table.getTableName(); - final String indexName = getIndexName(); - retry: - for (; ; ) { - TableDescription desc = table.waitForActive(); - final List list = desc.globalSecondaryIndexes(); - if (list != null) { - for (GlobalSecondaryIndexDescription d : list) { - if (d.indexName().equals(indexName)) { - switch (d.indexStatus()) { - case ACTIVE: - return desc; - case CREATING: - case UPDATING: - Thread.sleep(SLEEP_TIME_MILLIS); - continue retry; - default: - throw new IllegalArgumentException( - "Global Secondary Index " - + indexName - + " is not being created or updated (with status=" - + d.indexStatusAsString() + ")"); - } - } - } - } - throw new IllegalArgumentException("Global Secondary Index " - + indexName + " does not exist in Table " + tableName + ")"); - } - } - - /** - * A convenient blocking call that can be used, typically during index - * deletion on an active table, to wait for the index to become deleted by - * polling the table every 5 seconds. - *

    - * Currently online index deletion is only supported for Global Secondary - * Index (GSI). The behavior of calling this method on a Local Secondary - * Index (LSI) would result in returning the latest table description. - * - * @return the table description if this GSI has been deleted; or null if - * the underlying table has been deleted. - * - * @throws IllegalArgumentException if the table is being deleted, or if the - * GSI is not being deleted. - * @throws ResourceNotFoundException if the table doesn't exist - */ - public TableDescription waitForDelete() throws InterruptedException { - final String indexName = getIndexName(); - retry: - for (; ; ) { - final TableDescription desc = getTable().waitForActive(); - List list = desc.globalSecondaryIndexes(); - if (list != null) { - for (GlobalSecondaryIndexDescription d : list) { - if (d.indexName().equals(indexName)) { - if (d.indexStatus() == IndexStatus.DELETING) { - Thread.sleep(SLEEP_TIME_MILLIS); - continue retry; - } - throw new IllegalArgumentException( - "Global Secondary Index " + indexName - + " is not being deleted (with status=" + d.indexStatusAsString() + ")"); - } - } - } - return desc; - } - } - - /** - * A convenient blocking call that can be used to wait on an index until it - * has either become active or deleted (ie no longer exists) by polling the - * table every 5 seconds. - *

    - * Currently online index creation/deletion is only supported for Global - * Secondary Index (GSI). The behavior of calling this method on a Local - * Secondary Index (LSI) would result in returning the latest table - * description. - * - * @return the table description when the index has become either active - * or deleted - * - * @throws IllegalArgumentException if the table is being deleted - * @throws ResourceNotFoundException if the table doesn't exist - */ - public TableDescription waitForActiveOrDelete() throws InterruptedException { - final Table table = getTable(); - final String indexName = getIndexName(); - retry: - for (; ; ) { - TableDescription desc = table.waitForActive(); - List list = desc.globalSecondaryIndexes(); - if (list != null) { - for (GlobalSecondaryIndexDescription d : desc.globalSecondaryIndexes()) { - if (d.indexName().equals(indexName)) { - if (d.indexStatus() == IndexStatus.ACTIVE) { - return desc; - } - Thread.sleep(SLEEP_TIME_MILLIS); - continue retry; - } - } - } - return desc; - } - } - - @Override - public ItemCollection scan(ScanFilter... scanFilters) { - return scanDelegate.scan(scanFilters); - } - - @Override - public ItemCollection scan(String filterExpression, - Map nameMap, Map valueMap) { - return scanDelegate.scan(filterExpression, nameMap, valueMap); - } - - @Override - public ItemCollection scan(String filterExpression, - String projectionExpression, Map nameMap, - Map valueMap) { - return scanDelegate.scan(filterExpression, projectionExpression, nameMap, valueMap); - } - - @Override - public ItemCollection scan(ScanSpec params) { - return scanDelegate.scan(params); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Item.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Item.java deleted file mode 100644 index 5c135de56989..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Item.java +++ /dev/null @@ -1,1445 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import static java.util.Arrays.asList; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.checkInvalidAttrName; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.checkInvalidAttribute; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.rejectNullInput; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.rejectNullOrEmptyInput; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.rejectNullValue; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.valToString; -import static software.amazon.awssdk.utils.BinaryUtils.copyAllBytesFrom; -import static software.amazon.awssdk.utils.BinaryUtils.copyBytesFrom; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import software.amazon.awssdk.core.util.json.JacksonUtils; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.document.internal.ItemValueConformer; -import software.amazon.awssdk.utils.BinaryUtils; - -/** - * An item in DynamoDB. An item is a collection of attributes. Each attribute - * has a name and a value. An attribute value can be one of the followings: - *

      - *
    • String
    • - *
    • Set<String>
    • - *
    • Number (including any subtypes and primitive types)
    • - *
    • Set<Number>
    • - *
    • byte[]
    • - *
    • Set<byte[]>
    • - *
    • ByteBuffer
    • - *
    • Set<ByteBuffer>
    • - *
    • Boolean or boolean
    • - *
    • null
    • - *
    • Map<String,T>, where T can be any type on this list but must not - * induce any circular reference
    • - *
    • List<T>, where T can be any type on this list but must not induce - * any circular reference
    • - *
    - * For an Item to be successfully persisted in DynamoDB, at a - * minimum the respective attributes for the primary key must be specified. - */ -public class Item { - private static final String DUPLICATE_VALUES_FOUND_IN_INPUT = "Duplicate values found in input"; - private static final ItemValueConformer VALUE_CONFORMER = new ItemValueConformer(); - private final Map attributes = new LinkedHashMap(); - - /** - * Convenient factory method - instantiates an Item from the - * given map. - * - * @param attributes - * simple Java types; not the DyanmoDB types - */ - public static Item fromMap(Map attributes) { - if (attributes == null) { - return null; - } - Item item = new Item(); - for (Map.Entry e : attributes.entrySet()) { - item.with(e.getKey(), e.getValue()); - } - return item; - } - - /** - * Convenient factory method - instantiates an Item from the - * given JSON string. - * - * @return an Item initialized from the given JSON document; - * or null if the input is null. - */ - public static Item fromJson(String json) { - if (json == null) { - return null; - } - @SuppressWarnings("unchecked") - Map map = (Map) - VALUE_CONFORMER.transform(JacksonUtils.fromJsonString(json, Map.class)); - return fromMap(map); - } - - /** - * Returns true if the specified attribute exists with a null value; false - * otherwise. - */ - public boolean isNull(String attrName) { - return attributes.containsKey(attrName) - && attributes.get(attrName) == null; - } - - /** - * Returns true if this item contains the specified attribute; false - * otherwise. - */ - public boolean isPresent(String attrName) { - return attributes.containsKey(attrName); - } - - /** - * Returns the value of the specified attribute in the current item as a - * string; or null if the attribute either doesn't exist or the attribute - * value is null. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public String getString(String attrName) { - Object val = attributes.get(attrName); - return valToString(val); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given string value. - */ - public Item withString(String attrName, String val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, val); - return this; - } - - /** - * Returns the value of the specified attribute in the current item as a - * BigDecimal; or null if the attribute either doesn't exist or - * the attribute value is null. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws NumberFormatException if the attribute value is not a valid - * representation of a {@code BigDecimal}. - */ - public BigDecimal getNumber(String attrName) { - Object val = attributes.get(attrName); - return toBigDecimal(val); - } - - private BigDecimal toBigDecimal(Object val) { - if (val == null) { - return null; - } - return val instanceof BigDecimal - ? (BigDecimal) val - : new BigDecimal(val.toString()) - ; - } - - /** - * Returns the value of the specified attribute in the current item as an - * BigInteger; or null if the attribute doesn't exist. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws NumberFormatException - * if the attribute value is null or not a valid representation - * of a {@code BigDecimal}. - */ - public BigInteger getBigInteger(String attrName) { - BigDecimal bd = getNumber(attrName); - return bd == null ? null : bd.toBigInteger(); - } - - /** - * Returns the value of the specified attribute in the current item as a - * short. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws NumberFormatException - * if the attribute value is null or not a valid representation - * of a {@code BigDecimal}. - */ - public short getShort(String attrName) { - BigDecimal bd = getNumber(attrName); - if (bd == null) { - throw new NumberFormatException("value of " + attrName + " is null"); - } - return bd.shortValue(); - } - - /** - * Returns the value of the specified attribute in the current item as an - * int. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws NumberFormatException - * if the attribute value is null or not a valid representation - * of a {@code BigDecimal}. - */ - public int getInt(String attrName) { - BigDecimal bd = getNumber(attrName); - if (bd == null) { - throw new NumberFormatException("value of " + attrName + " is null"); - } - return bd.intValue(); - } - - /** - * Returns the value of the specified attribute in the current item as an - * long. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws NumberFormatException - * if the attribute value is null or not a valid representation - * of a {@code BigDecimal}. - */ - public long getLong(String attrName) { - BigDecimal bd = getNumber(attrName); - if (bd == null) { - throw new NumberFormatException("value of " + attrName + " is null"); - } - return bd.longValue(); - } - - /** - * Returns the value of the specified attribute in the current item as a - * float. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws NumberFormatException - * if the attribute value is null or not a valid representation - * of a {@code BigDecimal}. - */ - public float getFloat(String attrName) { - BigDecimal bd = getNumber(attrName); - if (bd == null) { - throw new NumberFormatException("value of " + attrName + " is null"); - } - return bd.floatValue(); - } - - /** - * Returns the value of the specified attribute in the current item as a - * double. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws NumberFormatException - * if the attribute value is null or not a valid representation - * of a {@code BigDecimal}. - */ - public double getDouble(String attrName) { - BigDecimal bd = getNumber(attrName); - if (bd == null) { - throw new NumberFormatException("value of " + attrName + " is null"); - } - return bd.doubleValue(); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withNumber(String attrName, BigDecimal val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, val); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withNumber(String attrName, Number val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, toBigDecimal(val)); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withInt(String attrName, int val) { - checkInvalidAttrName(attrName); - return withNumber(attrName, Integer.valueOf(val)); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withBigInteger(String attrName, BigInteger val) { - checkInvalidAttrName(attrName); - return withNumber(attrName, val); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withShort(String attrName, short val) { - checkInvalidAttrName(attrName); - return withNumber(attrName, Short.valueOf(val)); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withFloat(String attrName, float val) { - checkInvalidAttrName(attrName); - return withNumber(attrName, Float.valueOf(val)); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withDouble(String attrName, double val) { - checkInvalidAttrName(attrName); - return withNumber(attrName, Double.valueOf(val)); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withLong(String attrName, long val) { - checkInvalidAttrName(attrName); - return withNumber(attrName, Long.valueOf(val)); - } - - /** - * Returns the value of the specified attribute in the current item as a - * byte array; or null if the attribute either doesn't exist or the - * attribute value is null. - * - * @throws UnsupportedOperationException - * If the attribute value involves a byte buffer which is not - * backed by an accessible array - * - * @throws IncompatibleTypeException - * if the attribute value cannot be converted into a byte array - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public byte[] getBinary(String attrName) { - Object val = attributes.get(attrName); - return toByteArray(val); - } - - /** - * Returns the value of the specified attribute in the current item as a - * ByteBuffer; or null if the attribute either doesn't exist or - * the attribute value is null. - * - * @throws IncompatibleTypeException - * if the attribute value cannot be converted into a byte array - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public ByteBuffer getByteBuffer(String attrName) { - Object val = attributes.get(attrName); - return toByteBuffer(val); - } - - /** - * This method is assumed to be only called from a getter method, but NOT - * from a setter method. - */ - private byte[] toByteArray(Object val) { - if (val == null) { - return null; - } - if (val instanceof byte[]) { - return (byte[]) val; - } - if (val instanceof ByteBuffer) { - // Defensive code but execution should never get here. The internal - // representation of binary should always be - // byte[], not ByteBuffer. This allows Item to be converted into - // a JSON string via Jackson without causing trouble. - return copyAllBytesFrom((ByteBuffer) val); - } - throw new IncompatibleTypeException(val.getClass() - + " cannot be converted into a byte array"); - } - - private ByteBuffer toByteBuffer(Object val) { - if (val == null) { - return null; - } - if (val instanceof byte[]) { - return ByteBuffer.wrap((byte[]) val); - } - if (val instanceof ByteBuffer) { - // Defensive code but execution should never get here. The internal - // representation of binary should always be - // byte[], not ByteBuffer. This allows Item to be converted into - // a JSON string via Jackson without causing trouble. - return (ByteBuffer) val; - } - throw new IncompatibleTypeException(val.getClass() - + " cannot be converted into a ByteBuffer"); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withBinary(String attrName, byte[] val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, val); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withBinary(String attrName, ByteBuffer val) { - checkInvalidAttribute(attrName, val); - // convert ByteBuffer to bytes to keep Jackson happy - attributes.put(attrName, copyBytesFrom(val)); - return this; - } - - /** - * Returns the value of the specified attribute in the current item as a set - * of strings; or null if the attribute either doesn't exist or the - * attribute value is null. - * - * @throws IncompatibleTypeException - * if the attribute value cannot be converted into a set of - * strings because of duplicate elements - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public Set getStringSet(String attrName) { - Object val = attributes.get(attrName); - if (val == null) { - return null; - } - Set stringSet = new LinkedHashSet(); - if (val instanceof Collection) { - Collection col = (Collection) val; - if (col.size() == 0) { - return stringSet; - } - for (Object element : col) { - String s = element == null ? null : valToString(element); - if (!stringSet.add(s)) { - throw new IncompatibleTypeException(val.getClass() + " cannot be converted into a set of strings because " + - "of duplicate elements"); - } - } - return stringSet; - } - stringSet.add(valToString(val)); - return stringSet; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withStringSet(String attrName, Set val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, val); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withStringSet(String attrName, String... val) { - checkInvalidAttribute(attrName, val); - Set strSet = new LinkedHashSet(asList(val)); - if (strSet.size() != val.length) { - throw new IllegalArgumentException(DUPLICATE_VALUES_FOUND_IN_INPUT); - } - attributes.put(attrName, strSet); - return this; - } - - /** - * Returns the value of the specified attribute in the current item as a set - * of BigDecimal's; or null if the attribute either doesn't exist or the - * attribute value is null. - * - * @throws NumberFormatException - * if the attribute involves a value that is not a valid - * representation of a {@code BigDecimal}. - * - * @throws IncompatibleTypeException - * if the attribute value cannot be converted into a set of - * BigDecimal's because of duplicate elements - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public Set getNumberSet(String attrName) { - Object val = attributes.get(attrName); - if (val == null) { - return null; - } - Set numSet = new LinkedHashSet(); - if (val instanceof Collection) { - Collection col = (Collection) val; - if (col.size() == 0) { - return numSet; - } - for (Object element : col) { - BigDecimal bd = toBigDecimal(element); - if (!numSet.add(bd)) { - throw new IncompatibleTypeException(val.getClass() + " cannot be converted into a set of BigDecimal's " + - "because of duplicate elements"); - } - } - return numSet; - } else if (val instanceof BigDecimal) { - numSet.add((BigDecimal) val); - return numSet; - } else { - numSet.add(new BigDecimal(val.toString())); - return numSet; - } - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withBigDecimalSet(String attrName, Set val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, val); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withBigDecimalSet(String attrName, BigDecimal... vals) { - checkInvalidAttribute(attrName, vals); - Set set = new LinkedHashSet(asList(vals)); - if (set.size() != vals.length) { - throw new IllegalArgumentException(DUPLICATE_VALUES_FOUND_IN_INPUT); - } - attributes.put(attrName, set); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withNumberSet(String attrName, Number... vals) { - checkInvalidAttribute(attrName, vals); - Set set = InternalUtils.toBigDecimalSet(vals); - if (set.size() != vals.length) { - throw new IllegalArgumentException(DUPLICATE_VALUES_FOUND_IN_INPUT); - } - return withBigDecimalSet(attrName, set); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withNumberSet(String attrName, Set vals) { - checkInvalidAttribute(attrName, vals); - Set set = InternalUtils.toBigDecimalSet(vals); - if (set.size() != vals.size()) { - throw new IllegalArgumentException(DUPLICATE_VALUES_FOUND_IN_INPUT); - } - return withBigDecimalSet(attrName, set); - } - - /** - * Returns the value of the specified attribute in the current item as a set - * of byte arrays; or null if the attribute either doesn't exist or the - * attribute value is null. - * - * @throws IncompatibleTypeException - * if the attribute value cannot be converted into a set of byte - * arrays - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public Set getBinarySet(String attrName) { - Object val = attributes.get(attrName); - if (val == null) { - return null; - } - Set binarySet = new LinkedHashSet(); - if (val instanceof Collection) { - Collection col = (Collection) val; - if (col.size() == 0) { - return binarySet; - } - for (Object element : col) { - byte[] ba = toByteArray(element); - if (!binarySet.add(ba)) { - throw new IncompatibleTypeException(val.getClass() + " cannot be converted into a set of byte arrays " + - "because of duplicate elements"); - } - } - return binarySet; - } else if (val instanceof byte[]) { - binarySet.add((byte[]) val); - return binarySet; - } else if (val instanceof ByteBuffer) { - // Defensive code but execution should never get here. The internal - // representation of binary should always be - // byte[], not ByteBuffer. This allows Item to be converted into - // a JSON string via Jackson without causing trouble. - ByteBuffer bb = (ByteBuffer) val; - binarySet.add(copyAllBytesFrom(bb)); - return binarySet; - } - throw new IncompatibleTypeException(val.getClass() - + " cannot be converted into a set of byte arrays"); - } - - /** - * Returns the value of the specified attribute in the current item as a set - * of ByteBuffer; or null if the attribute either doesn't exist - * or the attribute value is null. - * - * @throws IncompatibleTypeException - * if the attribute value cannot be converted into a set of - * ByteBuffer - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public Set getByteBufferSet(String attrName) { - Object val = attributes.get(attrName); - if (val == null) { - return null; - } - Set binarySet = new LinkedHashSet(); - if (val instanceof Collection) { - Collection col = (Collection) val; - if (col.size() == 0) { - return binarySet; - } - for (Object element : col) { - ByteBuffer ba = toByteBuffer(element); - if (!binarySet.add(ba)) { - throw new IncompatibleTypeException(val.getClass() + " cannot be converted into a set of ByteBuffer " + - "because of duplicate elements"); - } - } - return binarySet; - } else if (val instanceof ByteBuffer) { - // Defensive code but execution should never get here. The internal - // representation of binary should always be - // byte[], not ByteBuffer. This allows Item to be converted into - // a JSON string via Jackson without causing trouble. - binarySet.add((ByteBuffer) val); - return binarySet; - } else if (val instanceof byte[]) { - binarySet.add(ByteBuffer.wrap((byte[]) val)); - return binarySet; - } - throw new IncompatibleTypeException(val.getClass() - + " cannot be converted into a set of ByteBuffer"); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withBinarySet(String attrName, Set val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, val); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withByteBufferSet(String attrName, Set val) { - checkInvalidAttribute(attrName, val); - // convert ByteBuffer to bytes to keep Jackson happy - Set set = new LinkedHashSet(val.size()); - for (ByteBuffer bb : val) { - set.add(copyBytesFrom(bb)); - } - attributes.put(attrName, set); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withBinarySet(String attrName, byte[]... vals) { - checkInvalidAttribute(attrName, vals); - Set set = new LinkedHashSet(asList(vals)); - if (set.size() != vals.length) { - throw new IllegalArgumentException(DUPLICATE_VALUES_FOUND_IN_INPUT); - } - attributes.put(attrName, set); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withBinarySet(String attrName, ByteBuffer... vals) { - checkInvalidAttribute(attrName, vals); - // convert ByteBuffer to bytes to keep Jackson happy - Set set = new LinkedHashSet(vals.length); - for (ByteBuffer bb : vals) { - set.add(copyBytesFrom(bb)); - } - if (set.size() != vals.length) { - throw new IllegalArgumentException(DUPLICATE_VALUES_FOUND_IN_INPUT); - } - attributes.put(attrName, set); - return this; - } - - /** - * Returns the value of the specified attribute in the current item as a set - * of T's.; or null if the attribute either doesn't exist or - * the attribute value is null. - * - * @throws ClassCastException - * if the attribute involves a value that cannot be casted to - * T - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public List getList(String attrName) { - Object val = attributes.get(attrName); - if (val == null) { - return null; - } - if (val instanceof List) { - @SuppressWarnings("unchecked") - List ret = (List) val; - return ret; - } - List list = new ArrayList(); - if (val instanceof Collection) { - Collection col = (Collection) val; - for (Object element : col) { - @SuppressWarnings("unchecked") - T t = (T) element; - list.add(t); - } - return list; - } - @SuppressWarnings("unchecked") - T t = (T) val; - list.add(t); - return list; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withList(String attrName, List val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, VALUE_CONFORMER.transform(val)); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given values as a list. - */ - public Item withList(String attrName, Object... vals) { - checkInvalidAttribute(attrName, vals); - List listIn = asList(vals); - attributes.put(attrName, VALUE_CONFORMER.transform(listIn)); - return this; - } - - /** - * Returns the value of the specified attribute in the current item as a map - * of string-to-T's; or null if the attribute either doesn't - * exist or the attribute value is null. Note that any numeric type of a - * map is always canonicalized into BigDecimal, and therefore - * if T referred to a Number type, it would need - * to be BigDecimal to avoid a class cast exception. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws ClassCastException - * if the attribute is not a map of string to T - */ - @SuppressWarnings("unchecked") - public Map getMap(String attrName) { - return (Map) attributes.get(attrName); - } - - /** - * Convenient method to return the specified attribute in the current item - * as a (copy of) map of string-to-T's where T must be a - * subclass of Number; or null if the attribute doesn't - * exist. - * - * @param attrName - * the attribute name - * @param valueType - * the specific number type of the value to be returned. - * Currently, only
      - *
    • Short
    • - *
    • Integer
    • - *
    • Long
    • - *
    • Float
    • - *
    • Double
    • - *
    • Number
    • - *
    • BigDecimal
    • - *
    • BigInteger
    • - *
    are supported. - * - * @throws UnsupportedOperationException - * if the value type is not supported - * @throws ClassCastException - * if the attribute is not a map of string to numbers - */ - @SuppressWarnings("unchecked") - public Map getMapOfNumbers(String attrName, - Class valueType) { - if (valueType == Short.class - || valueType == Integer.class - || valueType == Long.class - || valueType == Float.class - || valueType == Double.class - || valueType == Number.class - || valueType == BigDecimal.class - || valueType == BigInteger.class) { - final Map src = - (Map) attributes.get(attrName); - if (src == null) { - return null; - } - final Map dst = new LinkedHashMap(src.size()); - for (Map.Entry e : src.entrySet()) { - final String key = e.getKey(); - final BigDecimal val = e.getValue(); - if (val == null) { - dst.put(key, null); - } else if (valueType == Short.class) { - dst.put(key, (T) Short.valueOf(val.shortValue())); - } else if (valueType == Integer.class) { - dst.put(key, (T) Integer.valueOf(val.intValue())); - } else if (valueType == Long.class) { - dst.put(key, (T) Long.valueOf(val.longValue())); - } else if (valueType == Float.class) { - dst.put(key, (T) Float.valueOf(val.floatValue())); - } else if (valueType == Double.class) { - dst.put(key, (T) Double.valueOf(val.doubleValue())); - } else if (valueType == BigDecimal.class || valueType == Number.class) { - dst.put(key, (T) val); - } else if (valueType == BigInteger.class) { - dst.put(key, (T) val.toBigInteger()); - } - } - return dst; - } else { - throw new UnsupportedOperationException("Value type " + valueType - + " is not currently supported"); - } - } - - /** - * Convenient method to return the value of the specified attribute in the - * current item as a map of string-to-Object's; or null if the - * attribute either doesn't exist or the attribute value is null. Note that - * any numeric type of the map will be returned as BigDecimal. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - * - * @throws ClassCastException if the attribute is not a map - */ - @SuppressWarnings("unchecked") - public Map getRawMap(String attrName) { - return (Map) attributes.get(attrName); - } - - /** - * Sets the value of the specified attribute in the current item to the - * given value. - */ - public Item withMap(String attrName, Map val) { - checkInvalidAttribute(attrName, val); - attributes.put(attrName, VALUE_CONFORMER.transform(val)); - return this; - } - - /** - * Sets the value of the specified attribute in the current item to the - * given JSON document in the form of a string. - */ - public Item withJson(String attrName, String json) { - checkInvalidAttribute(attrName, json); - attributes.put(attrName, - VALUE_CONFORMER.transform(JacksonUtils.fromJsonString(json, Object.class))); - return this; - } - - /** - * Returns the value of the specified attribute in the current item as a - * JSON string; or null if the attribute either doesn't - * exist or the attribute value is null. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public String getJson(String attrName) { - checkInvalidAttrName(attrName); - Object val = attributes.get(attrName); - return val == null ? null : JacksonUtils.toJsonString(val); - } - - /** - * Returns the value of the specified attribute in the current item as a - * JSON string with pretty indentation; or null if the attribute either - * doesn't exist or the attribute value is null. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public String getJsonPretty(String attrName) { - checkInvalidAttrName(attrName); - Object val = attributes.get(attrName); - return val == null ? null : JacksonUtils.toJsonPrettyString(val); - } - - /** - * Returns the value of the specified attribute in the current item as a - * non-null Boolean. - * - * @throws IncompatibleTypeException - * if either the attribute doesn't exist or if the attribute - * value cannot be converted into a non-null Boolean value - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public Boolean getBool(String attrName) { - final Object val = attributes.get(attrName); - if (val instanceof Boolean) { - return (Boolean) val; - } - if (val instanceof String) { - if ("1".equals(val)) { - return true; - } - if ("0".equals(val)) { - return false; - } - return Boolean.valueOf((String) val); - } - throw new IncompatibleTypeException("Value of attribute " + attrName - + " of type " + getTypeOf(attrName) - + " cannot be converted into a boolean value"); - } - - /** - * Returns the value of the specified attribute in the current item as a - * primitive boolean. - * - * @throws IncompatibleTypeException - * if either the attribute doesn't exist or if the attribute - * value cannot be converted into a boolean value - */ - public boolean getBoolean(String attrName) { - final Boolean b = getBool(attrName); - return b.booleanValue(); - } - - /** - * Sets the value of the specified attribute in the current item to the - * boolean value. - */ - public Item withBoolean(String attrName, boolean val) { - checkInvalidAttrName(attrName); - attributes.put(attrName, Boolean.valueOf(val)); - return this; - } - - /** - * Sets the value of the specified attribute to null. - */ - public Item withNull(String attrName) { - checkInvalidAttrName(attrName); - attributes.put(attrName, null); - return this; - } - - /** - * Sets the value of the specified attribute to the given value. An - * attribute value can be a - *
      - *
    • Number
    • - *
    • String
    • - *
    • binary (ie byte array or byte buffer)
    • - *
    • boolean
    • - *
    • null
    • - *
    • list (of any of the types on this list)
    • - *
    • map (with string key to value of any of the types on this list)
    • - *
    • set (of any of the types on this list)
    • - *
    - */ - public Item with(String attrName, Object val) { - if (val == null) { - return withNull(attrName); - } - if (val instanceof String) { - return withString(attrName, (String) val); - } - if (val instanceof Number) { - return withNumber(attrName, (Number) val); - } - if (val instanceof byte[]) { - return withBinary(attrName, (byte[]) val); - } - if (val instanceof ByteBuffer) { - return withBinary(attrName, (ByteBuffer) val); - } - if (val instanceof Boolean) { - return withBoolean(attrName, (Boolean) val); - } - if (val instanceof List) { - return withList(attrName, (List) val); - } - if (val instanceof Map) { - @SuppressWarnings("unchecked") - Map map = (Map) val; - return withMap(attrName, map); - } - if (val instanceof Set) { - Set set = (Set) val; - // Treat an empty set as a set of String - if (set.size() == 0) { - @SuppressWarnings("unchecked") - Set ss = (Set) val; - return withStringSet(attrName, ss); - } - // Try to locate the first non-null element and use that as the - // representative type - Object representative = null; - for (Object o : set) { - if (o != null) { - representative = o; - } - } - // If all elements are null, treat the element type as String - if (representative == null || representative instanceof String) { - @SuppressWarnings("unchecked") - Set ss = (Set) val; - return withStringSet(attrName, ss); - } - if (representative instanceof Number) { - @SuppressWarnings("unchecked") - Set ns = (Set) val; - return withNumberSet(attrName, ns); - } - if (representative instanceof byte[]) { - @SuppressWarnings("unchecked") - Set bs = (Set) val; - return withBinarySet(attrName, bs); - } - if (representative instanceof ByteBuffer) { - @SuppressWarnings("unchecked") - Set bs = (Set) val; - return withByteBufferSet(attrName, bs); - } - throw new UnsupportedOperationException("Set of " - + representative.getClass() + " is not currently supported"); - } - throw new UnsupportedOperationException("Input type " - + val.getClass() + " is not currently supported"); - } - - /** - * Convenient methods - sets the attributes of this item from the given - * key attributes. - */ - public Item withPrimaryKey(PrimaryKey primaryKey) { - rejectNullValue(primaryKey); - if (primaryKey.getComponents().size() == 0) { - throw new IllegalArgumentException("primary key must not be empty"); - } - for (KeyAttribute ka : primaryKey.getComponents()) { - this.with(ka.name(), ka.value()); - } - return this; - } - - /** - * Convenient method to set the attributes of this item from the given - * hash-only primary key name and value. - */ - public Item withPrimaryKey(String hashKeyName, Object hashKeyValue) { - return withKeyComponent(hashKeyName, hashKeyValue); - } - - /** - * Convenient method to set the attributes of this item from the given - * hash and range primary key. - */ - public Item withPrimaryKey(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - return withKeyComponent(hashKeyName, hashKeyValue) - .withKeyComponent(rangeKeyName, rangeKeyValue); - } - - /** - * Convenient methods - sets the attributes of this item from the specified - * key components. - */ - public Item withKeyComponents(KeyAttribute... components) { - rejectNullOrEmptyInput(components); - for (KeyAttribute ka : components) { - rejectNullValue(ka); - this.with(ka.name(), ka.value()); - } - return this; - } - - /** - * Convenient methods - sets an attribute of this item for the specified - * key attribute name and value. - */ - public Item withKeyComponent(String keyAttrName, Object keyAttrValue) { - return with(keyAttrName, keyAttrValue); - } - - /** - * Returns the value of the specified attribute in the current item as an - * object; or null if the attribute either doesn't exist or the attribute - * value is null. - *

    - * An attribute value can be a - *

      - *
    • Number
    • - *
    • String
    • - *
    • binary (ie byte array or byte buffer)
    • - *
    • boolean
    • - *
    • null
    • - *
    • list (of any of the types on this list)
    • - *
    • map (with string key to value of any of the types on this list)
    • - *
    • set (of any of the types on this list)
    • - *
    - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public Object get(String attrName) { - return attributes.get(attrName); - } - - /** - * Returns the type of the specified attribute in the current item; or null - * if the attribute either doesn't exist or the attribute value is null. - * - * @see #isNull(String) #isNull(String) to check if the attribute value is - * null. - * @see #isPresent(String) #isPresent(String) to check if the attribute - * value is present. - */ - public Class getTypeOf(String attrName) { - Object val = attributes.get(attrName); - return val == null ? null : val.getClass(); - } - - /** - * Removes the specified attribute from the current item. - */ - public Item removeAttribute(String attrName) { - checkInvalidAttrName(attrName); - attributes.remove(attrName); - return this; - } - - /** - * Returns all attributes of the current item. - */ - public Iterable> attributes() { - return new LinkedHashMap(attributes).entrySet(); - } - - /** - * Returns true if this item has the specified attribute; false otherwise. - */ - public boolean hasAttribute(String attrName) { - return attributes.containsKey(attrName); - } - - /** - * Returns all attributes of the current item as a map. - */ - public Map asMap() { - return new LinkedHashMap(attributes); - } - - /** - * Returns the number of attributes of this item. - */ - public int numberOfAttributes() { - return attributes.size(); - } - - /** - * Returns this item as a JSON string. Note all binary data will become - * base-64 encoded in the resultant string. - */ - public String toJson() { - return JacksonUtils.toJsonString(this.attributes); - } - - /** - * Utility method to decode the designated binary attributes from base-64 - * encoding; converting binary lists into binary sets. - * - * @param binaryAttrNames - * names of binary attributes or binary set attributes currently - * base-64 encoded (typically when converted from a JSON string.) - * - * @see #fromJson(String) - */ - public Item base64Decode(String... binaryAttrNames) { - rejectNullInput(binaryAttrNames); - // Verify all attributes are good - for (String attrName : binaryAttrNames) { - checkInvalidAttrName(attrName); - if (String.class == getTypeOf(attrName)) { - String b64 = getString(attrName); - BinaryUtils.fromBase64(b64); - } else { - Set b64s = getStringSet(attrName); - for (String b64 : b64s) { - BinaryUtils.fromBase64(b64); - } - } - } - // Decodes b64 into binary - for (String attrName : binaryAttrNames) { - if (String.class == getTypeOf(attrName)) { - String b64 = getString(attrName); - byte[] bytes = BinaryUtils.fromBase64(b64); - withBinary(attrName, bytes); - } else { - Set b64s = getStringSet(attrName); - Set binarySet = new LinkedHashSet(b64s.size()); - for (String b64 : b64s) { - binarySet.add(BinaryUtils.fromBase64(b64)); - } - withBinarySet(attrName, binarySet); - } - } - return this; - } - - /** - * Utility method to converts the designated attributes from - * List into Set, throwing - * IllegalArgumentException should there be duplicate elements. - * - * @param listAttrNames - * names of attributes to be converted. - * - * @see #fromJson(String) - */ - public Item convertListsToSets(String... listAttrNames) { - rejectNullInput(listAttrNames); - // Verify all attributes are good - for (String attrName : listAttrNames) { - checkInvalidAttrName(attrName); - if (List.class.isAssignableFrom(getTypeOf(attrName))) { - List list = getList(attrName); - if (list != null) { - for (Object e : list) { - if (e instanceof String) { - Set ss = getStringSet(attrName); - if (list.size() != ss.size()) { - throw new IllegalArgumentException("List cannot be converted to Set due to duplicate elements"); - } - } else if (e instanceof Number) { - Set ss = getNumberSet(attrName); - if (list.size() != ss.size()) { - throw new IllegalArgumentException("List cannot be converted to Set due to duplicate elements"); - } - } else if (e instanceof byte[]) { - Set ss = getBinarySet(attrName); - if (list.size() != ss.size()) { - throw new IllegalArgumentException("List cannot be converted to Set due to duplicate elements"); - } - } - } - } - } else { - throw new IllegalArgumentException("Attribute " + attrName + " is not a list"); - } - } - // Do the conversion - for (String attrName : listAttrNames) { - checkInvalidAttrName(attrName); - List list = getList(attrName); - if (list != null) { - boolean converted = false; - for (Object e : list) { - if (e instanceof String) { - Set set = getStringSet(attrName); - withStringSet(attrName, set); - converted = true; - break; - } else if (e instanceof Number) { - Set set = getNumberSet(attrName); - withBigDecimalSet(attrName, set); - converted = true; - break; - } else if (e instanceof byte[]) { - Set set = getBinarySet(attrName); - withBinarySet(attrName, set); - converted = true; - break; - } - } - if (!converted) { - // All elements are null. So treat it as a String set. - Set set = getStringSet(attrName); - withStringSet(attrName, set); - } - } - } - return this; - } - - /** - * Returns this item as a pretty JSON string. Note all binary data will - * become base-64 encoded in the resultant string. - */ - public String toJsonPretty() { - return JacksonUtils.toJsonPrettyString(this.attributes); - } - - @Override - public String toString() { - return "{ Item: " + attributes.toString() + " }"; - } - - @Override - public int hashCode() { - return attributes.hashCode(); - } - - @Override - public boolean equals(Object in) { - if (in instanceof Item) { - Item that = (Item) in; - return this.attributes.equals(that.attributes); - } else { - return false; - } - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemCollection.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemCollection.java deleted file mode 100644 index 1f655209c04b..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemCollection.java +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.document.internal.PageBasedCollection; -import software.amazon.awssdk.services.dynamodb.document.internal.PageIterable; -import software.amazon.awssdk.services.dynamodb.model.Capacity; -import software.amazon.awssdk.services.dynamodb.model.ConsumedCapacity; - -/** - * A collection of Item's. - * - * An ItemCollection object maintains a cursor pointing to its - * current pages of data. Initially the cursor is positioned before the first page. - * The next method moves the cursor to the next row, and because it returns - * false when there are no more rows in the ItemCollection object, - * it can be used in a while loop to iterate through the collection. - * - * Network calls can be triggered when the collection is iterated across page - * boundaries. - * - * @param low level result type - */ -public abstract class ItemCollection extends PageBasedCollection { - private int accumulatedItemCount; - private int accumulatedScannedCount; - private ConsumedCapacity accumulatedConsumedCapacity; - - protected final void accumulateStats(ConsumedCapacity consumedCapacity, - Integer count, Integer scannedCount) { - if (consumedCapacity != null) { - if (accumulatedConsumedCapacity == null) { - // Create a new consumed capacity by cloning the one passed in - ConsumedCapacity.Builder cloneBuilder = ConsumedCapacity.builder(); - - cloneBuilder.capacityUnits(consumedCapacity.capacityUnits()); - cloneBuilder.globalSecondaryIndexes( - clone(consumedCapacity.globalSecondaryIndexes())); - cloneBuilder.localSecondaryIndexes( - clone(consumedCapacity.localSecondaryIndexes())); - cloneBuilder.table(clone(consumedCapacity.table())); - cloneBuilder.tableName(consumedCapacity.tableName()); - - this.accumulatedConsumedCapacity = cloneBuilder.build(); - } else { - // Accumulate the capacity units - final Double capunit = accumulatedConsumedCapacity.capacityUnits(); - final Double delta = consumedCapacity.capacityUnits(); - if (capunit == null) { - accumulatedConsumedCapacity = accumulatedConsumedCapacity.toBuilder().capacityUnits(delta).build(); - } else { - accumulatedConsumedCapacity = accumulatedConsumedCapacity.toBuilder().capacityUnits(capunit.doubleValue() - + (delta == null ? 0 : delta.doubleValue())).build(); - } - // Accumulate the GSI capacities - final Map gsi = accumulatedConsumedCapacity.globalSecondaryIndexes(); - if (gsi == null) { - accumulatedConsumedCapacity = accumulatedConsumedCapacity.toBuilder().globalSecondaryIndexes( - clone(consumedCapacity.globalSecondaryIndexes())).build(); - } else { - accumulatedConsumedCapacity = accumulatedConsumedCapacity.toBuilder().globalSecondaryIndexes(add( - consumedCapacity.globalSecondaryIndexes(), - clone(accumulatedConsumedCapacity.globalSecondaryIndexes()))).build(); - } - // Accumulate the LSI capacities - final Map lsi = accumulatedConsumedCapacity.localSecondaryIndexes(); - if (lsi == null) { - accumulatedConsumedCapacity = accumulatedConsumedCapacity.toBuilder().localSecondaryIndexes( - clone(consumedCapacity.localSecondaryIndexes())).build(); - } else { - accumulatedConsumedCapacity = accumulatedConsumedCapacity.toBuilder().localSecondaryIndexes(add( - consumedCapacity.localSecondaryIndexes(), - clone(accumulatedConsumedCapacity.localSecondaryIndexes()))).build(); - } - // Accumulate table capacity - final Capacity tableCapacity = accumulatedConsumedCapacity.table(); - if (tableCapacity == null) { - accumulatedConsumedCapacity = accumulatedConsumedCapacity.toBuilder() - .table(clone(consumedCapacity.table())) - .build(); - } else { - accumulatedConsumedCapacity = accumulatedConsumedCapacity.toBuilder() - .table(add(consumedCapacity.table(), - accumulatedConsumedCapacity.table())).build(); - } - } - } - if (count != null) { - this.accumulatedItemCount += count.intValue(); - } - if (scannedCount != null) { - this.accumulatedScannedCount += scannedCount.intValue(); - } - } - - private Map add(Map from, Map to) { - if (to == null) { - return clone(from); - } - if (from != null) { - for (Map.Entry entryFrom : from.entrySet()) { - final String key = entryFrom.getKey(); - final Capacity tocap = to.get(key); - final Capacity fromcap = entryFrom.getValue(); - if (tocap == null) { - to.put(key, clone(fromcap)); - } else { - to.put(key, Capacity.builder().capacityUnits( - doubleOf(tocap) + doubleOf(fromcap)).build()); - } - } - } - return to; - } - - private Capacity add(final Capacity from, final Capacity to) { - return Capacity.builder().capacityUnits(doubleOf(from) + doubleOf(to)).build(); - } - - private Map clone(Map capacityMap) { - if (capacityMap == null) { - return null; - } - Map clone = - new HashMap(capacityMap.size()); - for (Map.Entry e : capacityMap.entrySet()) { - clone.put(e.getKey(), clone(e.getValue())); - } - return clone; - } - - private Capacity clone(Capacity capacity) { - return capacity == null - ? null - : Capacity.builder().capacityUnits(capacity.capacityUnits()).build(); - } - - private double doubleOf(Capacity cap) { - if (cap == null) { - return 0.0; - } - Double val = cap.capacityUnits(); - return val == null ? 0.0 : val.doubleValue(); - } - - /** - * Returns the count of items accumulated so far. - * @deprecated This method returns the accumulated count and not the total count. - * Use {@link #getAccumulatedItemCount} instead. - */ - @Deprecated - public int getTotalCount() { - return getAccumulatedItemCount(); - } - - /** - * Returns the count of items accumulated so far. - */ - public int getAccumulatedItemCount() { - return accumulatedItemCount; - } - - /** - * Returns the scanned count accumulated so far. - * @deprecated This method returns the accumulated count and not the total count. - * Use {@link #getAccumulatedScannedCount} instead. - */ - @Deprecated - public int getTotalScannedCount() { - return getAccumulatedScannedCount(); - } - - /** - * Returns the scanned count accumulated so far. - */ - public int getAccumulatedScannedCount() { - return accumulatedScannedCount; - } - - /** - * Returns the consumed capacity accumulated so far. - * @deprecated This method returns the accumulated consumed capacity and not the total. - * Use {@link #getAccumulatedScannedCount} instead. - */ - @Deprecated - public ConsumedCapacity getTotalConsumedCapacity() { - return getAccumulatedConsumedCapacity(); - } - - /** - * Returns the consumed capacity accumulated so far. - */ - public ConsumedCapacity getAccumulatedConsumedCapacity() { - return accumulatedConsumedCapacity; - } - - // Overriding these just so javadocs will show up. - - /** - * Returns an {@code Iterable>} that iterates over pages of - * items from this collection. Each call to {@code Iterator.next} on an - * {@code Iterator} returned from this {@code Iterable} results in exactly - * one call to DynamoDB to retrieve a single page of results. - *

    - * - * ItemCollection<QueryResponse> collection = ...; - * for (Page<Item> page : collection.pages()) { - * processItems(page); - * - * ConsumedCapacity consumedCapacity = - * page.getLowLevelResult().getConsumedCapacity(); - * - * Thread.sleep(getBackoff(consumedCapacity.getCapacityUnits())); - * } - * - *

    - * The use of the internal/undocumented {@code PageIterable} class instead - * of {@code Iterable} in the public interface here is retained for - * backwards compatibility. It doesn't expose any methods beyond those - * of the {@code Iterable} interface. This method will be changed to return - * an {@code Iterable>} directly in a future release of the - * SDK. - * - * @see Page - */ - @Override - public PageIterable pages() { - return super.pages(); - } - - /** - * Returns the maximum number of resources to be retrieved in this - * collection; or null if there is no limit. - */ - @Override - public abstract Integer getMaxResultSize(); - - /** - * Returns the low-level result last retrieved (for the current page) from - * the server side; or null if there has yet no calls to the server. - */ - @Override - public R getLastLowLevelResult() { - return super.getLastLowLevelResult(); - } - - /** - * Used to register a listener for the event of receiving a low-level result - * from the server side. - * - * @param listener - * listener to be registered. If null, a "none" listener will be - * set. - * @return the previously registered listener. The return value is never - * null. - */ - @Override - public LowLevelResultListener registerLowLevelResultListener( - LowLevelResultListener listener) { - - return super.registerLowLevelResultListener(listener); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemTest.java deleted file mode 100644 index 95c62194a295..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemTest.java +++ /dev/null @@ -1,921 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.document.utils.FluentArrayList; -import software.amazon.awssdk.services.dynamodb.document.utils.FluentHashSet; -import software.amazon.awssdk.services.dynamodb.document.utils.ValueMap; -import software.amazon.awssdk.utils.BinaryUtils; - -public class ItemTest { - - @Test - public void jsonDoubleMax() { - double[] values = { - Double.MAX_VALUE, Double.MIN_NORMAL, Double.MIN_VALUE, Double.MIN_NORMAL - }; - for (double val : values) { - doJsonDoubleTest(val); - } - } - - private void doJsonDoubleTest(double value) { - Item item1 = new Item().withDouble("double", value); - final String json = item1.toJsonPretty(); - System.out.println(json); - Item item2 = Item.fromJson(json); - assertEquals(json, item2.toJsonPretty()); - } - - @Test - public void isNull() { - Item item = new Item(); - assertFalse(item.isNull("test")); - - item.withNull("test"); - assertTrue(item.isNull("test")); - - item.removeAttribute("test"); - assertFalse(item.isNull("test")); - - item.withString("test", "foo"); - assertFalse(item.isNull("test")); - assertEquals("foo", item.getString("test")); - } - - @Test - public void is_null() { - Item item = new Item(); - assertFalse(item.isNull("test")); - - item.with("test", null); - assertTrue(item.isNull("test")); - - assertNull(item.get("test")); - - item.removeAttribute("test"); - assertFalse(item.isNull("test")); - - assertNull(item.get("test")); - } - - @Test - public void isPresent() { - Item item = new Item(); - assertFalse(item.isPresent("test")); - - item.withNull("test"); - assertTrue(item.isPresent("test")); - - item.removeAttribute("test"); - assertFalse(item.isPresent("test")); - - item.withString("test", "foo"); - assertTrue(item.isPresent("test")); - } - - @Test - public void toBigDecimal_Null() { - Item item = new Item(); - assertNull(item.getNumber("test")); - } - - @Test(expected = NumberFormatException.class) - public void getInt_Null() { - Item item = new Item(); - item.getInt("test"); - } - - @Test(expected = NumberFormatException.class) - public void getLong_Null() { - Item item = new Item(); - item.getLong("test"); - } - - @Test(expected = NumberFormatException.class) - public void getNumber_NonNumber() { - Item item = new Item(); - item.withString("test", "foo"); - item.getNumber("test"); - } - - @Test - public void withNumber() { - Item item = new Item(); - item.withNumber("test", BigDecimal.ONE); - assertSame(BigDecimal.ONE, item.getNumber("test")); - assertTrue(1 == item.getInt("test")); - assertTrue(1L == item.getLong("test")); - } - - @Test - public void withLong() { - Item item = new Item(); - item.withLong("test", 123L); - assertTrue(123L == item.getLong("test")); - } - - @Test - public void toByteArray() { - Item item = new Item(); - assertNull(item.getBinary("test")); - byte[] bytes = {1, 2, 3}; - item.withBinary("test", bytes); - assertTrue(Arrays.equals(bytes, item.getBinary("test"))); - ByteBuffer bb = ByteBuffer.wrap(bytes); - item.withBinary("test", bb); - assertTrue(byte[].class == item.getTypeOf("test")); - assertTrue(Arrays.equals(bytes, item.getBinary("test"))); - assertTrue(Arrays.equals(bytes, item.getByteBuffer("test").array())); - } - - @Test(expected = IncompatibleTypeException.class) - public void toByteArray_IncompatibleTypeException() { - Item item = new Item(); - item.withString("test", "foo"); - item.getBinary("test"); - } - - @Test - public void toByteBuffer() { - Item item = new Item(); - assertNull(item.getByteBuffer("test")); - byte[] bytes = {1, 2, 3}; - item.withBinary("test", ByteBuffer.wrap(bytes)); - ByteBuffer toByteBuffer = item.getByteBuffer("test"); - assertTrue(Arrays.equals(bytes, toByteBuffer.array())); - assertTrue(Arrays.equals(bytes, item.getBinary("test"))); - item.withBinary("test", bytes); - assertSame(byte[].class, item.getTypeOf("test")); - assertTrue(Arrays.equals(bytes, item.getByteBuffer("test").array())); - } - - @Test(expected = IncompatibleTypeException.class) - public void toByteBuffer_IncompatibleTypeException() { - Item item = new Item(); - item.withString("test", "foo"); - item.getByteBuffer("test"); - } - - @Test - public void valToString() { - Item item = new Item(); - item.withNumber("test", BigDecimal.ONE); - assertEquals("1", item.getString("test")); - assertNull(item.getString("foo")); - item.withBoolean("test", false); - assertEquals("false", item.getString("test")); - } - - @Test - public void getStringSet_fromList() { - Item item = new Item(); - item.withList("test", "a", "b", "c"); - Set ss = item.getStringSet("test"); - assertTrue(ss.size() == 3); - assertTrue(ss.contains("a")); - assertTrue(ss.contains("b")); - assertTrue(ss.contains("c")); - } - - @Test - public void getStringSet_fromNumbers() { - Item item = new Item(); - item.withNumberSet("test", 1, 2); - Set ss = item.getStringSet("test"); - assertTrue(ss.size() == 2); - assertTrue(ss.contains("1")); - assertTrue(ss.contains("2")); - } - - @Test - public void getStringSet_fromBooleans() { - Item item = new Item(); - item.withList("test", true, false); - Set ss = item.getStringSet("test"); - assertTrue(ss.size() == 2); - assertTrue(ss.contains("true")); - assertTrue(ss.contains("false")); - } - - @Test - public void getStringSet_fromBoolean() { - Item item = new Item(); - item.withBoolean("test", true); - Set ss = item.getStringSet("test"); - assertTrue(ss.size() == 1); - assertTrue(ss.contains("true")); - } - - @Test(expected = IncompatibleTypeException.class) - public void getStringSet_fromBinary() { - Item item = new Item(); - item.withBinary("test", new byte[] {1, 2}); - item.getStringSet("test"); - } - - @Test - public void getStringSet_empty() { - Item item = new Item(); - item.with("test", new FluentArrayList()); - Set ss = item.getStringSet("test"); - assertTrue(ss.size() == 0); - } - - @Test(expected = IncompatibleTypeException.class) - public void getStringSet_duplicateElements() { - Item item = new Item(); - item.withList("test", "a", "b", "a"); - item.getStringSet("test"); - } - - @Test - public void getStringSet_nullElement() { - Item item = new Item(); - item.withList("test", "a", null, "c"); - Set ss = item.getStringSet("test"); - assertTrue(ss.size() == 3); - assertTrue(ss.contains("a")); - assertTrue(ss.contains(null)); - assertTrue(ss.contains("c")); - } - - @Test - public void getNumberSet() { - Item item = new Item(); - assertNull(item.getNumberSet("test")); - item.withList("test", BigDecimal.ZERO, BigDecimal.ONE, BigDecimal.TEN); - Set ss = item.getNumberSet("test"); - assertTrue(ss.size() == 3); - assertTrue(ss.contains(BigDecimal.ZERO)); - assertTrue(ss.contains(BigDecimal.ONE)); - assertTrue(ss.contains(BigDecimal.TEN)); - } - - @Test - public void getNumberSet_number() { - Item item = new Item(); - item.withNumber("test", 123); - Set ss = item.getNumberSet("test"); - assertTrue(ss.size() == 1); - assertTrue(ss.contains(new BigDecimal("123"))); - } - - @Test - public void getNumberSet_string() { - Item item = new Item(); - item.withString("test", "123"); - Set ss = item.getNumberSet("test"); - assertTrue(ss.size() == 1); - assertTrue(ss.contains(new BigDecimal("123"))); - } - - @Test - public void getNumberSet_empty() { - Item item = new Item(); - item.with("test", new FluentArrayList()); - Set ss = item.getNumberSet("test"); - assertTrue(ss.size() == 0); - } - - @Test(expected = IncompatibleTypeException.class) - public void getNumberSet_duplicateElements() { - Item item = new Item(); - item.withList("test", BigDecimal.ZERO, BigDecimal.ONE, BigDecimal.ZERO); - item.getNumberSet("test"); - } - - @Test - public void getNumberSet_nullElement() { - Item item = new Item(); - item.withList("test", BigDecimal.ZERO, null, BigDecimal.TEN); - Set ss = item.getNumberSet("test"); - assertTrue(ss.size() == 3); - assertTrue(ss.contains(BigDecimal.ZERO)); - assertTrue(ss.contains(null)); - assertTrue(ss.contains(BigDecimal.TEN)); - } - - @Test - public void getBinarySet_bytes() { - Item item = new Item(); - assertNull(item.getBinarySet("test")); - item.withList("test", new byte[] {1, 2}, new byte[] {3, 4}); - Set bas = item.getBinarySet("test"); - assertTrue(bas.size() == 2); - boolean a = false; - boolean b = false; - for (byte[] ba : bas) { - if (Arrays.equals(ba, new byte[] {1, 2})) { - a = true; - } else if (Arrays.equals(ba, new byte[] {3, 4})) { - b = true; - } - } - assertTrue(a); - assertTrue(b); - } - - @Test - public void getBinarySet_singleByteArray() { - Item item = new Item(); - item.with("test", new byte[] {1, 2}); - Set bs = item.getBinarySet("test"); - assertTrue(bs.size() == 1); - boolean a = false; - for (byte[] ba : bs) { - if (Arrays.equals(ba, new byte[] {1, 2})) { - a = true; - } - } - assertTrue(a); - - Set bbs = item.getByteBufferSet("test"); - assertTrue(bbs.size() == 1); - a = false; - for (ByteBuffer ba : bbs) { - if (Arrays.equals(ba.array(), new byte[] {1, 2})) { - a = true; - } - } - assertTrue(a); - } - - @Test - public void getBinarySet_singleByteBuffer() { - Item item = new Item(); - item.with("test", ByteBuffer.wrap(new byte[] {1, 2})); - Set bbs = item.getByteBufferSet("test"); - assertTrue(bbs.size() == 1); - boolean a = false; - for (ByteBuffer ba : bbs) { - if (Arrays.equals(ba.array(), new byte[] {1, 2})) { - a = true; - } - } - assertTrue(a); - Set bs = item.getBinarySet("test"); - assertTrue(bs.size() == 1); - a = false; - for (byte[] ba : bs) { - if (Arrays.equals(ba, new byte[] {1, 2})) { - a = true; - } - } - assertTrue(a); - } - - @Test - public void getBinarySet_empty() { - Item item = new Item(); - item.with("test", new FluentHashSet()); - Set bs = item.getBinarySet("test"); - assertTrue(bs.size() == 0); - - Set bbs = item.getByteBufferSet("test"); - assertTrue(bbs.size() == 0); - } - - @Test(expected = IncompatibleTypeException.class) - public void getBinarySet_Incompatible() { - Item item = new Item(); - item.withString("test", "foo"); - item.getBinarySet("test"); - } - - @Test - public void getByteBufferSet_empty() { - Item item = new Item(); - assertNull(item.getByteBufferSet("test")); - item.with("test", new FluentHashSet()); - Set bs = item.getBinarySet("test"); - assertTrue(bs.size() == 0); - - Set bbs = item.getByteBufferSet("test"); - assertTrue(bbs.size() == 0); - } - - @Test(expected = IncompatibleTypeException.class) - public void getByteBufferSet_Incompatible() { - Item item = new Item(); - item.withString("test", "foo"); - item.getByteBufferSet("test"); - } - - @Test - public void getByteBufferSet() { - Item item = new Item(); - item.withList("test", new byte[] {1, 2, 3}, new byte[] {4, 5, 6}); - Set bs = item.getByteBufferSet("test"); - assertTrue(bs.size() == 2); - boolean a = false, b = false; - for (ByteBuffer bb : bs) { - if (Arrays.equals(bb.array(), new byte[] {1, 2, 3})) { - a = true; - } else if (Arrays.equals(bb.array(), new byte[] {4, 5, 6})) { - b = true; - } - } - assertTrue(a); - assertTrue(b); - } - - @Test - public void getList_null() { - Item item = new Item(); - assertNull(item.getList("test")); - } - - @Test - public void getList_list() { - Item item = new Item().withList("test", "abc", "def"); - List list = item.getList("test"); - assertTrue(list.size() == 2); - assertEquals("abc", list.get(0)); - assertEquals("def", list.get(1)); - } - - @Test - public void getList_string() { - Item item = new Item().withString("test", "foo"); - List list = item.getList("test"); - assertTrue(list.size() == 1); - assertEquals("foo", list.get(0)); - } - - @Test - public void toJSON_null() { - assertNull(new Item().getJson("test")); - assertNull(new Item().getJsonPretty("test")); - } - - @Test - public void fromJSON_null() { - assertNull(Item.fromJson(null)); - } - - @Test - public void fromJSON_array() { - Item item = new Item() - .withJson("arrayJson", "[\"foo\", \"bar\"]"); - List arrayJson = item.getList("arrayJson"); - String[] expectedArray = new String[] {"foo", "bar"}; - Assert.assertArrayEquals(expectedArray, arrayJson.toArray()); - } - - @Test - public void fromJSON_map() { - Item item = new Item() - .withJson("mapJson", "{\"foo\": \"bar\"}"); - Map mapJson = item.getMap("mapJson"); - Assert.assertEquals("bar", mapJson.get("foo")); - } - - @Test - public void toFromJSON() { - Item item = new Item() - .withString("stringA", "stringV") - .withFloat("floatA", 123.45f) - // Jackson will convert byte[] into Base64-encoded binary data - .withBinary("binaryA", new byte[] {1, 2, 3}) - .withBoolean("booleanA", true) - .withNull("nullA") - .withJson("jsonA", "{\"myjson\": 321}") - .withList("listA", "a", "b", "c") - .withMap("mapA", new ValueMap().with("map-a", "a").with("map-b", "b")) - .withStringSet("strSetA", "sa", "sb", "sc") - .withNumberSet("numSetA", BigDecimal.ONE, BigDecimal.ZERO) - .withBinarySet("binarySetA", new byte[] {00, 11}, new byte[] {22, 33}) - .withBinarySet("byteBufferSetA", - ByteBuffer.wrap(new byte[] {44, 55}), - ByteBuffer.wrap(new byte[] {66, 77})); - String json = item.toJsonPretty(); - System.out.println(json); - System.out.println("byte[]{1,2,3} => " + BinaryUtils.toBase64(new byte[] {1, 2, 3})); - System.out.println("byte[]{00,11} => " + BinaryUtils.toBase64(new byte[] {00, 11})); - System.out.println("byte[]{22,33} => " + BinaryUtils.toBase64(new byte[] {22, 33})); - System.out.println("byte[]{44,44} => " + BinaryUtils.toBase64(new byte[] {44, 55})); - System.out.println("byte[]{66,77} => " + BinaryUtils.toBase64(new byte[] {66, 77})); - Item itemTo = Item.fromJson(json); - System.out.println(itemTo); - assertTrue(List.class.isAssignableFrom(itemTo.getTypeOf("binarySetA"))); - assertTrue(List.class.isAssignableFrom(itemTo.getTypeOf("byteBufferSetA"))); - itemTo.base64Decode("binaryA", "binarySetA", "byteBufferSetA"); - assertTrue(Arrays.equals(itemTo.getBinary("binaryA"), item.getBinary("binaryA"))); - assertTrue(itemTo.getBinarySet("binarySetA").size() == 2); - { // verity the binary content of "binarySetA" - boolean a = false, b = false; - for (byte[] bytes : itemTo.getBinarySet("binarySetA")) { - if (Arrays.equals(bytes, new byte[] {00, 11})) { - a = true; - } else if (Arrays.equals(bytes, new byte[] {22, 33})) { - b = true; - } - } - assertTrue(a); - assertTrue(b); - assertTrue(Set.class.isAssignableFrom(itemTo.getTypeOf("binarySetA"))); - } - assertTrue(itemTo.getBinarySet("byteBufferSetA").size() == 2); - { // verity the binary content of "byteBufferSetA" - boolean a = false, b = false; - for (byte[] bytes : itemTo.getBinarySet("byteBufferSetA")) { - if (Arrays.equals(bytes, new byte[] {44, 55})) { - a = true; - } else if (Arrays.equals(bytes, new byte[] {66, 77})) { - b = true; - } - } - assertTrue(a); - assertTrue(b); - assertTrue(Set.class.isAssignableFrom(itemTo.getTypeOf("byteBufferSetA"))); - } - // JSON doesn't support Set, so all all sets now become lists - assertTrue(List.class.isAssignableFrom(itemTo.getTypeOf("strSetA"))); - assertTrue(List.class.isAssignableFrom(itemTo.getTypeOf("numSetA"))); - itemTo.convertListsToSets("strSetA", "numSetA"); - assertTrue(Set.class.isAssignableFrom(itemTo.getTypeOf("strSetA"))); - assertTrue(Set.class.isAssignableFrom(itemTo.getTypeOf("numSetA"))); - { - Set set = itemTo.getStringSet("strSetA"); - assertTrue(set.size() == item.getStringSet("strSetA").size()); - set.containsAll(item.getStringSet("strSetA")); - } - { - Set set = itemTo.getNumberSet("numSetA"); - assertTrue(set.size() == item.getStringSet("numSetA").size()); - set.containsAll(item.getNumberSet("numSetA")); - } - } - - @Test(expected = IllegalArgumentException.class) - public void withStringSet_duplicates() { - new Item().withStringSet("test", "a", "b", "a"); - } - - @Test(expected = IllegalArgumentException.class) - public void withBigDecimalSet_duplicates() { - new Item().withBigDecimalSet("test", new BigDecimal("1"), BigDecimal.ONE); - } - - @Test(expected = IllegalArgumentException.class) - public void withNumberSet_duplicates() { - new Item().withNumberSet("test", new BigDecimal("1"), new BigInteger("1")); - } - - @Test(expected = IllegalArgumentException.class) - public void withNumberSet_duplicates2() { - new Item().withNumberSet("test", new BigDecimal("1.0"), new Float("1")); - } - - @Test(expected = IllegalArgumentException.class) - public void withNumberSet_duplicates3() { - Set set = new FluentHashSet().withAll( - new BigDecimal("1.0"), new Float("1")); - assertTrue(set.size() == 2); - // Become duplicates when get converted into BigDecimal - new Item().withNumberSet("test", set); - } - - @Test(expected = IllegalArgumentException.class) - public void invalidNullInput() { - new Item().withNumber("test", null); - } - - @Test(expected = IllegalArgumentException.class) - public void nullAttrName() { - new Item().withNull(null); - } - - @Test(expected = IllegalArgumentException.class) - public void blankAttrName() { - new Item().withNull(" "); - } - - @Test(expected = IllegalArgumentException.class) - public void withKeyComponents_null() { - new Item().withKeyComponents(); - } - - @Test(expected = IllegalArgumentException.class) - public void withKeyComponents_nullComponent() { - new Item().withKeyComponents((KeyAttribute) null); - } - - @Test - public void withKeyComponents() { - Item item = new Item().withKeyComponents(new KeyAttribute("name", 123)); - Assert.assertTrue(123 == item.getInt("name")); - Assert.assertTrue(BigDecimal.class == item.getTypeOf("name")); - } - - @Test(expected = IncompatibleTypeException.class) - public void getBOOL_null() { - Item item = new Item(); - item.getBool("test"); - } - - @Test(expected = IllegalArgumentException.class) - public void withPrimaryKey_null() { - new Item().withPrimaryKey(null); - } - - @Test(expected = IllegalArgumentException.class) - public void withPrimaryKey_empty() { - new Item().withPrimaryKey(new PrimaryKey()); - } - - @Test(expected = IncompatibleTypeException.class) - public void getBOOL_invalidValue() { - Item item = new Item().withInt("test", 123); - item.getBool("test"); - } - - @Test - public void getBOOL_Boolean() { - Item item = new Item().withBoolean("test", Boolean.TRUE); - assertEquals(Boolean.TRUE, item.getBool("test")); - item.withBoolean("test", Boolean.FALSE); - assertEquals(Boolean.FALSE, item.getBool("test")); - } - - @Test - public void getBOOL_01() { - Item item = new Item().withString("test", "1"); - assertEquals(Boolean.TRUE, item.getBool("test")); - item.withString("test", "0"); - assertEquals(Boolean.FALSE, item.getBool("test")); - item.withString("test", "true"); - assertEquals(Boolean.TRUE, item.getBool("test")); - item.withString("test", "false"); - assertEquals(Boolean.FALSE, item.getBool("test")); - } - - @Test - public void withShort() { - assertTrue(1 == new Item().withShort("test", (short) 1).getInt("test")); - } - - @Test(expected = IllegalArgumentException.class) - public void withShort_emptyName() { - new Item().withShort(" ", (short) 1); - } - - @Test(expected = IllegalArgumentException.class) - public void withShort_nullName() { - new Item().withShort(null, (short) 1); - } - - @Test - public void withDouble() { - assertTrue(1 == new Item().withDouble("test", 1.0).getInt("test")); - } - - @Test(expected = IllegalArgumentException.class) - public void withDouble_emptyName() { - new Item().withDouble(" ", 1.0); - } - - @Test(expected = IllegalArgumentException.class) - public void withDouble_nullName() { - new Item().withDouble(null, 1.0); - } - - // https://github.com/aws/aws-sdk-java/issues/311#issuecomment-64474230 - @Test(expected = ClassCastException.class) - public void issues311() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMap("item_key"); - @SuppressWarnings("unused") - BigInteger b = mapout.get("map_key"); - } - - @Test - public void getRawMap() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getRawMap("item_key"); - Object b = mapout.get("map_key"); - assertEquals("123", b.toString()); - } - - @Test - public void getMapOfNumbers_BigInteger() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMapOfNumbers("item_key", BigInteger.class); - BigInteger b = mapout.get("map_key"); - assertEquals("123", b.toString()); - } - - @Test - public void getMapOfNumbers_BigDecimal() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMapOfNumbers("item_key", BigDecimal.class); - BigDecimal b = mapout.get("map_key"); - assertEquals("123", b.toString()); - } - - @Test - public void getMapOfNumbers_Short() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMapOfNumbers("item_key", Short.class); - Short b = mapout.get("map_key"); - assertEquals("123", b.toString()); - } - - @Test - public void getMapOfNumbers_Integer() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMapOfNumbers("item_key", Integer.class); - Integer b = mapout.get("map_key"); - assertEquals("123", b.toString()); - } - - @Test - public void getMapOfNumbers_Long() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMapOfNumbers("item_key", Long.class); - Long b = mapout.get("map_key"); - assertEquals("123", b.toString()); - } - - @Test - public void getMapOfNumbers_Float() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMapOfNumbers("item_key", Float.class); - Float b = mapout.get("map_key"); - assertEquals(b.toString(), "123.0", b.toString()); - } - - @Test - public void getMapOfNumbers_Double() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMapOfNumbers("item_key", Double.class); - Double b = mapout.get("map_key"); - assertEquals(b.toString(), "123.0", b.toString()); - } - - @Test - public void getMapOfNumbers_Number() { - Map bigIntMap_input = new HashMap(); - bigIntMap_input.put("map_key", new BigInteger("123")); - - Item i = new Item().withMap("item_key", bigIntMap_input); - Map mapout = i.getMapOfNumbers("item_key", Number.class); - Number b = mapout.get("map_key"); - assertEquals("123", b.toString()); - } - - @Test - public void getMapOfNumbers_NotExist() { - Item i = new Item(); - assertNull(i.getMapOfNumbers("item_key", Short.class)); - } - - @Test - public void getBigInteger() { - Item i = new Item().withInt("item_key", 123); - BigInteger b = i.getBigInteger("item_key"); - assertEquals("123", b.toString()); - - assertNull(i.getBigInteger("foo")); - } - - @Test - public void getShort() { - Item i = new Item().withInt("item_key", 123); - short b = i.getShort("item_key"); - assertTrue(b == 123); - } - - @Test(expected = NumberFormatException.class) - public void getShortNotExist() { - Item i = new Item(); - i.getShort("item_key"); - } - - @Test - public void getFloat() { - Item i = new Item().withFloat("item_key", 123.45f); - float b = i.getFloat("item_key"); - assertTrue(b == 123.45f); - } - - @Test(expected = NumberFormatException.class) - public void getFloatNotExist() { - Item i = new Item(); - i.getFloat("item_key"); - } - - @Test - public void getDouble() { - Item i = new Item().withDouble("item_key", 123.45); - double b = i.getFloat("item_key"); - assertTrue(b + "", b > 123.44 && b <= 123.45); - } - - @Test(expected = NumberFormatException.class) - public void getDoubleNotExist() { - Item i = new Item(); - i.getDouble("item_key"); - } - - @Test(expected = IllegalArgumentException.class) - public void withNullBigInteger() { - new Item().withBigInteger("foo", null); - } - - @Test(expected = IllegalArgumentException.class) - public void withNullNumber() { - new Item().withNumber("foo", null); - } - - @Test - public void hasAttribute() { - assertFalse(new Item().hasAttribute("foo")); - assertTrue(new Item().with("foo", null).hasAttribute("foo")); - assertTrue(new Item().with("foo", "fooval").hasAttribute("foo")); - assertTrue(new Item().with("foo", "fooval").with("bar", "barval").hasAttribute("foo")); - assertTrue(new Item().with("foo", "fooval").with("bar", "barval").hasAttribute("bar")); - assertFalse(new Item().with("foo", "fooval").with("bar", "barval").hasAttribute("notExist")); - } - - @Test - public void testEquals() { - assertEquals(new Item().with("foo", "fooval").with("bar", "barval"), - new Item().with("foo", "fooval").with("bar", "barval")); - assertEquals(new Item().with("foo", "fooval").with("bar", "barval"), - new Item().withPrimaryKey(new PrimaryKey("foo", "fooval", "bar", "barval"))); - - assertFalse(new Item().equals(new Object())); - assertFalse(new Item().equals(null)); - - Set items = new HashSet(); - items.add(new Item().with("foo", "fooval").with("bar", "barval")); - items.add(new Item().with("foo", "fooval")); - assertTrue(items.size() == 2); - - assertTrue(items.contains(new Item().with("foo", "fooval"))); - assertTrue(items.contains(new Item().with("foo", "fooval").with("bar", "barval"))); - assertFalse(items.contains(new Item())); - - items.add(new Item()); - assertTrue(items.contains(new Item())); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemTestUtils.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemTestUtils.java deleted file mode 100644 index e0a3e73f3b71..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ItemTestUtils.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; - -public class ItemTestUtils { - /** - * Used for testing purposes. - */ - public static boolean equalsItem(Item itemFrom, Item itemTo) { - return equalsSimpleValue(itemFrom.asMap(), itemTo.asMap()); - } - - /** - * Used for testing purposes. - */ - @SuppressWarnings("unchecked") - public static boolean equalsSimpleValue(Object v0, Object v1) { - if (v0 == null || v1 == null) { - return v0 == null && v1 == null; - } - // Byte buffer or byte array - if (v0 instanceof ByteBuffer) { - return equalsByteBuffer((ByteBuffer) v0, v1); - } else if (v1 instanceof ByteBuffer) { - return equalsByteBuffer((ByteBuffer) v1, v0); - } else if (v0 instanceof byte[]) { - return equalsByteArray((byte[]) v0, v1); - } else if (v1 instanceof byte[]) { - return equalsByteArray((byte[]) v1, v0); - } - // Number - if (v0 instanceof Number && v1 instanceof Number) { - String s0 = InternalUtils.valToString(v0); - String s1 = InternalUtils.valToString(v1); - return s0.equals(s1); - } - // Map - if (v0 instanceof Map && v1 instanceof Map) { - Map map0 = (Map) v0; - Map map1 = (Map) v1; - - if (map0.size() != map1.size()) { - return false; - } - - for (Entry e : map0.entrySet()) { - if (!equalsSimpleValue( - e.getValue(), - map1.get(e.getKey()))) { - return false; - } - } - return true; - } - if (v0 instanceof List && v1 instanceof List) { - List map0 = (List) v0; - List map1 = (List) v1; - - if (map0.size() != map1.size()) { - return false; - } - for (int i = 0; i < map0.size(); i++) { - if (!equalsSimpleValue(map0.get(i), map1.get(i))) { - return false; - } - } - return true; - } - // Set - // Currently this works only if both set have the elements in the same - // iteration order. Can we do better ? - if (v0 instanceof Set && v1 instanceof Set) { - Set set0 = (Set) v0; - Set set1 = (Set) v1; - - if (set0.size() != set1.size()) { - return false; - } - - for (Object element0 : set0) { - boolean matchFound = false; - - for (Object element1 : set1) { - if (equalsSimpleValue(element0, element1)) { - matchFound = true; - break; - } - } - - if (!matchFound) { - return false; - } - } - - return true; - } - - return v0.equals(v1); - } - - /** - * Used for testing purposes. - */ - private static boolean equalsByteBuffer(ByteBuffer fromByteBuffer, Object o) { - byte[] from = fromByteBuffer.array(); - return equalsByteArray(from, o); - } - - /** - * Used for testing purposes. - */ - private static boolean equalsByteArray(byte[] from, Object o) { - if (o instanceof ByteBuffer) { - byte[] to = ((ByteBuffer) o).array(); - return Arrays.equals(from, to); - } else { - return (o instanceof byte[]) - && Arrays.equals(from, ((byte[]) o)); - } - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/KeyAttribute.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/KeyAttribute.java deleted file mode 100644 index b55993a6337d..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/KeyAttribute.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; - -/** - * A key attribute which consists of an attribute name and value. - */ -public class KeyAttribute extends Attribute { - - /** - * A key attribute which consists of an attribute name and value. - */ - public KeyAttribute(String attrName, Object value) { - super(attrName, value); - InternalUtils.checkInvalidAttrName(attrName); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/KeyCondition.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/KeyCondition.java deleted file mode 100644 index e85bb813ba4d..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/KeyCondition.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; - -/** - * Simple key conditions. - */ -public enum KeyCondition { - EQ(ComparisonOperator.EQ), - LE(ComparisonOperator.LE), - LT(ComparisonOperator.LT), - GE(ComparisonOperator.GE), - GT(ComparisonOperator.GT), - BEGINS_WITH(ComparisonOperator.BEGINS_WITH), - BETWEEN(ComparisonOperator.BETWEEN),; - - private final ComparisonOperator comparisonOperator; - - KeyCondition(ComparisonOperator comparisonOperator) { - this.comparisonOperator = comparisonOperator; - } - - public ComparisonOperator toComparisonOperator() { - return comparisonOperator; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/LowLevelResultListener.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/LowLevelResultListener.java deleted file mode 100644 index d628b310ea4c..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/LowLevelResultListener.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -/** - * Can be used to listen to the event of receiving a low level result of type R - * from the server side. - * - * @param - * low level result type - */ -public abstract class LowLevelResultListener { - public static final LowLevelResultListener NONE = - new LowLevelResultListener() { - @Override - public void onLowLevelResult(Object result) { - } - }; - - @SuppressWarnings("unchecked") - public static LowLevelResultListener none() { - return (LowLevelResultListener) NONE; - } - - public abstract void onLowLevelResult(R lowLevelResult); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Page.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Page.java deleted file mode 100644 index 7c5d5d938503..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Page.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import software.amazon.awssdk.core.exception.SdkServiceException; - -/** - * A page contains a list of items; accessing methods on the list are - * guaranteed to be purely in-memory operations that will not block or throw - * exceptions because of transient network issues. A page also knows whether it - * has a "next page", and if so knows how to retrieve it (which - * will almost certainly involve a remote network call that may block or - * fail). - * - * @param item type - * @param low level result type - */ -public abstract class Page implements Iterable { - private final List content; - private final R lowLevelResult; - - /** - * @param content an unmodifiable list of content - * @param lowLevelResult the low level (response) result from AWS - */ - public Page(List content, R lowLevelResult) { - if (content == null || lowLevelResult == null) { - throw new IllegalArgumentException("both content and lowLevelResult must be specified"); - } - this.content = content; - this.lowLevelResult = lowLevelResult; - } - - /** - * Checks whether this page has a "next page." If this method returns - * true, the next page can be retrieved by calling {@code next}. If it - * returns false, any call to {@code next} will be guaranteed to throw an - * {@code IllegalStateException}. - * - * @return true if there is next page; false otherwise - */ - public abstract boolean hasNextPage(); - - /** - * Retrieves the next page. - * - * @return the next page - * @throws NoSuchElementException if there is no next page - * @throws SdkServiceException on error making the remote call - */ - public abstract Page nextPage(); - - public final int size() { - return content.size(); - } - - @Override - public final Iterator iterator() { - return content.iterator(); - } - - @Override - public String toString() { - return content.toString(); - } - - public final R lowLevelResult() { - return lowLevelResult; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PageTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PageTest.java deleted file mode 100644 index 517c7906b787..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PageTest.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.ArrayList; -import java.util.List; -import org.junit.Test; - -public class PageTest { - - @Test(expected = IllegalArgumentException.class) - public void testNull_content() { - new TestPage(null, new Object()); - } - - ; - - @Test(expected = IllegalArgumentException.class) - public void testNull_result() { - new TestPage(new ArrayList(), null); - } - - @Test - public void test_toString() { - System.out.println(new TestPage(new ArrayList(), new Object()) - .toString()); - } - - private static class TestPage extends Page { - TestPage(List content, Object result) { - super(content, result); - } - - ; - - @Override - public boolean hasNextPage() { - return false; - } - - @Override - public TestPage nextPage() { - return null; - } - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PrimaryKey.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PrimaryKey.java deleted file mode 100644 index ad4236ee5283..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PrimaryKey.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Collection; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; - -/** - * Used to represent a primary key that has one or multiple key components. - */ -public class PrimaryKey { - private final Map components = new LinkedHashMap(); - - public PrimaryKey() { - } - - /** - * Constructs with the specified key components. - */ - public PrimaryKey(KeyAttribute... components) { - addComponents(components); - } - - /** - * Constructs with a hash key. - */ - public PrimaryKey(String hashKeyName, Object hashKeyValue) { - addComponent(hashKeyName, hashKeyValue); - } - - /** - * Constructs with a hash key and a range key. - */ - public PrimaryKey(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - if (hashKeyName.equals(rangeKeyName)) { - throw new IllegalArgumentException("hashKyeName must not be the same as the rangeKeyName"); - } - addComponent(hashKeyName, hashKeyValue); - addComponent(rangeKeyName, rangeKeyValue); - } - - /** - * Returns all the key components of this primary key. - */ - public Collection getComponents() { - return components.values(); - } - - /** - * Returns all the key component names of this primary key as a set. - */ - public Set getComponentNameSet() { - return components.keySet(); - } - - /** - * Returns true if this primary has the specified key attribute name; - * false otherwise. - */ - public boolean hasComponent(String attrName) { - return components.containsKey(attrName); - } - - /** - * Add one or multiple key components to this primary key. - * - * Note adding a key component with the same name as that of an existing - * one would overwrite and become a single key component instead of two. - */ - public PrimaryKey addComponents(KeyAttribute... components) { - if (components != null) { - for (KeyAttribute ka : components) { - InternalUtils.rejectNullInput(ka); - this.components.put(ka.name(), ka); - } - } - return this; - } - - /** - * Add a key component to this primary key. - * - * Note adding a key component with the same name as that of an existing - * one would overwrite and become a single key component instead of two. - */ - public PrimaryKey addComponent(String keyAttributeName, Object keyAttributeValue) { - components.put(keyAttributeName, - new KeyAttribute(keyAttributeName, keyAttributeValue)); - return this; - } - - @Override - public String toString() { - return String.valueOf(components); - } - - @Override - public int hashCode() { - return components.hashCode(); - } - - @Override - public boolean equals(Object in) { - if (in instanceof PrimaryKey) { - PrimaryKey that = (PrimaryKey) in; - return this.components.equals(that.components); - } else { - return false; - } - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PrimaryKeyTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PrimaryKeyTest.java deleted file mode 100644 index dea5cbce36c9..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PrimaryKeyTest.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.util.HashSet; -import java.util.Set; -import org.junit.Test; - -public class PrimaryKeyTest { - - @Test - public void ctor_KeyAttributes() { - new PrimaryKey(); - } - - @Test(expected = IllegalArgumentException.class) - public void ctor_nullKeyAttributes() { - new PrimaryKey((KeyAttribute) null); - } - - @Test - public void ctor_nullValue() { - new PrimaryKey("name", null); - } - - @Test(expected = IllegalArgumentException.class) - public void ctor_nullName() { - new PrimaryKey(null, "val"); - } - - @Test(expected = IllegalArgumentException.class) - public void ctor_emptyName() { - new PrimaryKey(" ", "val"); - } - - @Test(expected = IllegalArgumentException.class) - public void ctor_sameHashRangeKeyNames() { - new PrimaryKey("key", "val1", "key", "val2"); - } - - @Test(expected = IllegalArgumentException.class) - public void ctor_badHashKeyName() { - new PrimaryKey("", "val1", "key", "val2"); - } - - @Test(expected = IllegalArgumentException.class) - public void ctor_badRangeKeyName() { - new PrimaryKey("key1", "val1", "", "val2"); - } - - @Test(expected = IllegalArgumentException.class) - public void addComponents_nullElement() { - new PrimaryKey().addComponents((KeyAttribute) null); - } - - @Test(expected = IllegalArgumentException.class) - public void addComponent_nullName() { - new PrimaryKey().addComponent(null, "val"); - } - - @Test(expected = IllegalArgumentException.class) - public void addComponent_emptyName() { - new PrimaryKey().addComponent(" ", "val"); - } - - @Test - public void addComponent_nullVal() { - new PrimaryKey().addComponent("key", null); - } - - @Test - public void addComponents_null() { - new PrimaryKey().addComponents((KeyAttribute[]) null); - } - - @Test - public void ctor_nullHashRangeKeys() { - new PrimaryKey("hashkey", null, "rangekey", null); - } - - @Test - public void testEquals() { - assertEquals(new PrimaryKey("hashkey", null, "rangekey", null), - new PrimaryKey("hashkey", null, "rangekey", null)); - - assertEquals(new PrimaryKey("k1", "v1", "k2", "v2"), - new PrimaryKey("k1", "v1", "k2", "v2")); - assertFalse(new PrimaryKey("k1", "v1").equals(new Attribute("k1", "v1"))); - assertFalse(new PrimaryKey("k1", "v1").equals(null)); - - Set set = new HashSet(); - set.add(new PrimaryKey("k1", "v1", "k2", "v2")); - set.add(new PrimaryKey("k1", "v1", "k2", "v2")); - assertTrue(set.size() == 1); - - set.add(new PrimaryKey("k1", "v1")); - assertTrue(set.size() == 2); - } - - @Test - public void hasComponent() { - assertTrue(new PrimaryKey("hashkey", null, "rangekey", null).hasComponent("hashkey")); - assertTrue(new PrimaryKey("hashkey", null, "rangekey", null).hasComponent("rangekey")); - assertFalse(new PrimaryKey("hashkey", null, "rangekey", null).hasComponent("notExist")); - assertTrue(new PrimaryKey("k1", "v1", "k2", "v2").hasComponent("k1")); - assertTrue(new PrimaryKey("k1", "v1", "k2", "v2").hasComponent("k2")); - assertFalse(new PrimaryKey("k1", "v1", "k2", "v2").hasComponent("notExist")); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PutItemOutcome.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PutItemOutcome.java deleted file mode 100644 index e13067f7a945..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/PutItemOutcome.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.PutItemResponse; - -/** - * The outcome of putting an item to a DynamoDB table. - */ -public class PutItemOutcome { - private final PutItemResponse result; - - /** - * @param result the low-level result; must not be null - */ - public PutItemOutcome(PutItemResponse result) { - if (result == null) { - throw new IllegalArgumentException(); - } - this.result = result; - } - - /** - * Returns all the returned attributes as a (non-null) {@link Item}. - */ - public Item getItem() { - Map attributes = - InternalUtils.toSimpleMapValue(result.attributes()); - Item item = Item.fromMap(attributes); - return item; - } - - /** - * Returns a non-null low-level result returned from the server side. - */ - public PutItemResponse getPutItemResponse() { - return result; - } - - @Override - public String toString() { - return String.valueOf(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/QueryFilter.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/QueryFilter.java deleted file mode 100644 index 920efe49eecc..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/QueryFilter.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.services.dynamodb.document.internal.Filter; - -/** - * A query filter. - * - * Typical usages: - *
    - * new QueryFilter("strAttr").eq("attrValue"); - *

    - * new QueryFilter("intAttr").gt(42); - *

    - * ... - *

    - */ -public class QueryFilter extends Filter { - - /** - * A query filter. - * - * Typical usages: - *
    - * new QueryFilter("strAttr").eq("attrValue"); - *

    - * new QueryFilter("intAttr").gt(42); - *

    - * ... - *

    - */ - public QueryFilter(String attr) { - super(attr); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/QueryOutcome.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/QueryOutcome.java deleted file mode 100644 index 7f0176996bf0..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/QueryOutcome.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.List; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.QueryResponse; - -/** - * The outcome of query on DynamoDB table. - */ -public class QueryOutcome { - private final QueryResponse result; - - /** - * @param result the low-level result; must not be null - */ - public QueryOutcome(QueryResponse result) { - if (result == null) { - throw new IllegalArgumentException(); - } - this.result = result; - } - - /** - * Returns a non-null list of the returned items; can be empty. - */ - public List getItems() { - return InternalUtils.toItemList(result.items()); - } - - /** - * Returns a non-null low-level result returned from the server side. - */ - public QueryResponse getQueryResponse() { - return result; - } - - @Override - public String toString() { - return String.valueOf(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/RangeKeyCondition.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/RangeKeyCondition.java deleted file mode 100644 index 81c1208ef05d..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/RangeKeyCondition.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; - - -/** - * A condition for selecting items with a range key. Typical usages: - *
    - * new RangeKeyCondition("strAttr").eq("attrValue"); - *

    - * new RangeKeyCondition("intAttr").gt(42); - *

    - * ... - *

    - */ -public class RangeKeyCondition { - private final String attrName; - private KeyCondition kcond; - private Object[] values; - - /** - * A condition for selecting items with a range key. Typical usages: - *
    - * new RangeKeyCondition("strAttr").eq("attrValue"); - *

    - * new RangeKeyCondition("intAttr").gt(42); - *

    - * ... - *

    - */ - public RangeKeyCondition(String attrName) { - InternalUtils.checkInvalidAttrName(attrName); - this.attrName = attrName; - } - - public String getAttrName() { - return attrName; - } - - public KeyCondition getKeyCondition() { - return kcond; - } - - public Object[] values() { - return values == null ? null : values.clone(); - } - - /** - * Creates and returns a condition of the range key being equal to the given - * value. - */ - public RangeKeyCondition eq(Object val) { - kcond = KeyCondition.EQ; - return values(val); - } - - /** - * Creates and returns a condition of the range key with a value that begins - * with the given value. - */ - public RangeKeyCondition beginsWith(String val) { - kcond = KeyCondition.BEGINS_WITH; - return values(val); - } - - /** - * Creates and returns a condition of the range key that has a value between - * the given values. - */ - public RangeKeyCondition between(Object low, Object hi) { - kcond = KeyCondition.BETWEEN; - return values(low, hi); - } - - /** - * Creates and returns a condition of the range key being greater than or - * equal to the given value. - */ - public RangeKeyCondition ge(Object val) { - kcond = KeyCondition.GE; - return values(val); - } - - /** - * Creates and returns a condition of the range key being greater than the - * given value. - */ - public RangeKeyCondition gt(Object val) { - kcond = KeyCondition.GT; - return values(val); - } - - /** - * Creates and returns a condition of the range key being less than or equal - * to the given value. - */ - public RangeKeyCondition le(Object val) { - kcond = KeyCondition.LE; - return values(val); - } - - /** - * Creates and returns a condition of the range key being less than the - * given value. - */ - public RangeKeyCondition lt(Object val) { - kcond = KeyCondition.LT; - return values(val); - } - - private RangeKeyCondition values(Object... values) { - this.values = values; - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ScanFilter.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ScanFilter.java deleted file mode 100644 index 99753f842374..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ScanFilter.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.services.dynamodb.document.internal.Filter; - -/** - * A scan filter. - * - * Typical usages: - *
    - * new ScanFilter("strAttr").eq("attrValue"); - *

    - * new ScanFilter("intAttr").gt(42); - *

    - * ... - *

    - */ -public class ScanFilter extends Filter { - - /** - * A scan filter. - * - * Typical usages: - *
    - * new ScanFilter("strAttr").eq("attrValue"); - *

    - * new ScanFilter("intAttr").gt(42); - *

    - * ... - *

    - */ - public ScanFilter(String attr) { - super(attr); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ScanOutcome.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ScanOutcome.java deleted file mode 100644 index 834e46a7a7f9..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/ScanOutcome.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.List; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; - -/** - * The outcome of scanning the DynamoDB table. - */ -public class ScanOutcome { - private final ScanResponse result; - - /** - * @param result the low-level result; must not be null - */ - public ScanOutcome(ScanResponse result) { - if (result == null) { - throw new IllegalArgumentException(); - } - this.result = result; - } - - /** - * Returns a non-null list of the returned items; can be empty. - */ - public List getItems() { - return InternalUtils.toItemList(result.items()); - } - - /** - * Returns a non-null low-level result returned from the server side. - */ - public ScanResponse scanResult() { - return result; - } - - @Override - public String toString() { - return String.valueOf(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Table.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Table.java deleted file mode 100644 index 7761cf4d0c89..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/Table.java +++ /dev/null @@ -1,776 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Collection; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.api.DeleteItemApi; -import software.amazon.awssdk.services.dynamodb.document.api.GetItemApi; -import software.amazon.awssdk.services.dynamodb.document.api.PutItemApi; -import software.amazon.awssdk.services.dynamodb.document.api.QueryApi; -import software.amazon.awssdk.services.dynamodb.document.api.ScanApi; -import software.amazon.awssdk.services.dynamodb.document.api.UpdateItemApi; -import software.amazon.awssdk.services.dynamodb.document.internal.DeleteItemImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.GetItemImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.document.internal.PutItemImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.QueryImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.ScanImpl; -import software.amazon.awssdk.services.dynamodb.document.internal.UpdateItemImpl; -import software.amazon.awssdk.services.dynamodb.document.spec.DeleteItemSpec; -import software.amazon.awssdk.services.dynamodb.document.spec.GetItemSpec; -import software.amazon.awssdk.services.dynamodb.document.spec.PutItemSpec; -import software.amazon.awssdk.services.dynamodb.document.spec.QuerySpec; -import software.amazon.awssdk.services.dynamodb.document.spec.ScanSpec; -import software.amazon.awssdk.services.dynamodb.document.spec.UpdateItemSpec; -import software.amazon.awssdk.services.dynamodb.document.spec.UpdateTableSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateGlobalSecondaryIndexAction; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableResponse; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndexDescription; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndexUpdate; -import software.amazon.awssdk.services.dynamodb.model.IndexStatus; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; -import software.amazon.awssdk.services.dynamodb.model.UpdateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.UpdateTableResponse; - -/** - * A DynamoDB table. Instance of this class is typically obtained via - * {@link DynamoDb#getTable(String)}. - */ -@ThreadSafe -public class Table implements PutItemApi, GetItemApi, QueryApi, ScanApi, - UpdateItemApi, DeleteItemApi { - private static final long SLEEP_TIME_MILLIS = 5000; - private final String tableName; - private final DynamoDbClient client; - private final PutItemImpl putItemDelegate; - private final GetItemImpl getItemDelegate; - private final UpdateItemImpl updateItemDelegate; - private final DeleteItemImpl deleteItemDelegate; - private final QueryImpl queryDelegate; - private final ScanImpl scanDelegate; - private volatile TableDescription tableDescription; - - public Table(DynamoDbClient client, String tableName) { - this(client, tableName, null); - } - - public Table(DynamoDbClient client, String tableName, - TableDescription tableDescription) { - if (client == null) { - throw new IllegalArgumentException("client must be specified"); - } - if (tableName == null || tableName.trim().length() == 0) { - throw new IllegalArgumentException("table name must not be null or empty"); - } - this.client = client; - this.tableName = tableName; - this.tableDescription = tableDescription; - - this.putItemDelegate = new PutItemImpl(client, this); - this.getItemDelegate = new GetItemImpl(client, this); - this.updateItemDelegate = new UpdateItemImpl(client, this); - this.deleteItemDelegate = new DeleteItemImpl(client, this); - - this.queryDelegate = new QueryImpl(client, this); - this.scanDelegate = new ScanImpl(client, this); - } - - public String getTableName() { - return tableName; - } - - /** - * Returns the table description; or null if the table description has not - * yet been described via {@link #describe()}. No network call. - */ - public TableDescription getDescription() { - return tableDescription; - } - - /** - * Retrieves the table description from DynamoDB. Involves network calls. - * Meant to be called as infrequently as possible to avoid throttling - * exception from the server side. - * - * @return a non-null table description - * - * @throws ResourceNotFoundException if the table doesn't exist - */ - public TableDescription describe() { - DescribeTableResponse result = client.describeTable( - InternalUtils.applyUserAgent(DescribeTableRequest.builder().tableName(tableName).build())); - TableDescription description = result.table(); - tableDescription = description; - return description; - } - - /** - * Gets a reference to the specified index. No network call. - */ - public Index getIndex(String indexName) { - return new Index(client, indexName, this); - } - - @Override - public PutItemOutcome putItem(Item item) { - return putItemDelegate.putItem(item); - } - - @Override - public PutItemOutcome putItem(Item item, Expected... expected) { - return putItemDelegate.putItem(item, expected); - } - - @Override - public PutItemOutcome putItem(Item item, String conditionExpression, - Map nameMap, Map valueMap) { - return putItemDelegate.putItem(item, conditionExpression, nameMap, - valueMap); - } - - @Override - public PutItemOutcome putItem(PutItemSpec spec) { - return putItemDelegate.putItem(spec); - } - - @Override - public GetItemOutcome getItemOutcome(KeyAttribute... primaryKeyComponents) { - return getItemDelegate.getItemOutcome(primaryKeyComponents); - } - - @Override - public GetItemOutcome getItemOutcome(PrimaryKey primaryKey) { - return getItemDelegate.getItemOutcome(primaryKey); - } - - @Override - public GetItemOutcome getItemOutcome(PrimaryKey primaryKey, - String projectionExpression, Map nameMap) { - return getItemDelegate.getItemOutcome(primaryKey, projectionExpression, - nameMap); - } - - @Override - public GetItemOutcome getItemOutcome(GetItemSpec params) { - return getItemDelegate.getItemOutcome(params); - } - - @Override - public UpdateItemOutcome updateItem(PrimaryKey primaryKey, - AttributeUpdate... attributeUpdates) { - return updateItemDelegate.updateItem(primaryKey, attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem(PrimaryKey primaryKey, - Collection expected, AttributeUpdate... attributeUpdates) { - return updateItemDelegate.updateItem(primaryKey, expected, - attributeUpdates); - - } - - @Override - public UpdateItemOutcome updateItem(PrimaryKey primaryKey, - String updateExpression, Map nameMap, - Map valueMap) { - return updateItemDelegate.updateItem(primaryKey, updateExpression, - nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(PrimaryKey primaryKey, - String updateExpression, String conditionExpression, - Map nameMap, Map valueMap) { - return updateItemDelegate.updateItem(primaryKey, updateExpression, - conditionExpression, nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(UpdateItemSpec updateItemSpec) { - return updateItemDelegate.updateItem(updateItemSpec); - } - - @Override - public ItemCollection query(String hashKeyName, Object hashKeyValue) { - return queryDelegate.query(hashKeyName, hashKeyValue); - } - - @Override - public ItemCollection query(KeyAttribute hashKey) { - return queryDelegate.query(hashKey); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition) { - return queryDelegate.query(hashKey, rangeKeyCondition); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, String filterExpression, - String projectionExpression, Map nameMap, - Map valueMap) { - return queryDelegate.query(hashKey, rangeKeyCondition, - filterExpression, projectionExpression, nameMap, valueMap); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, QueryFilter... queryFilters) { - return queryDelegate.query(hashKey, rangeKeyCondition, queryFilters); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, String filterExpression, - Map nameMap, Map valueMap) { - return queryDelegate.query(hashKey, rangeKeyCondition, - filterExpression, nameMap, valueMap); - } - - @Override - public ItemCollection query(QuerySpec spec) { - return queryDelegate.query(spec); - } - - @Override - public ItemCollection scan(ScanFilter... scanFilters) { - return scanDelegate.scan(scanFilters); - } - - @Override - public ItemCollection scan(String filterExpression, - Map nameMap, Map valueMap) { - return scanDelegate.scan(filterExpression, nameMap, valueMap); - } - - @Override - public ItemCollection scan(String filterExpression, - String projectionExpression, Map nameMap, - Map valueMap) { - return scanDelegate.scan(filterExpression, projectionExpression, nameMap, valueMap); - } - - @Override - public ItemCollection scan(ScanSpec params) { - return scanDelegate.scan(params); - } - - @Override - public DeleteItemOutcome deleteItem(KeyAttribute... primaryKeyComponents) { - return deleteItemDelegate.deleteItem(primaryKeyComponents); - } - - @Override - public DeleteItemOutcome deleteItem(PrimaryKey primaryKey) { - return deleteItemDelegate.deleteItem(primaryKey); - } - - @Override - public DeleteItemOutcome deleteItem(PrimaryKey primaryKey, - Expected... expected) { - return deleteItemDelegate.deleteItem(primaryKey, expected); - } - - @Override - public DeleteItemOutcome deleteItem(PrimaryKey primaryKey, - String conditionExpression, Map nameMap, - Map valueMap) { - return deleteItemDelegate.deleteItem(primaryKey, - conditionExpression, nameMap, valueMap); - } - - @Override - public DeleteItemOutcome deleteItem(DeleteItemSpec spec) { - return deleteItemDelegate.deleteItem(spec); - } - - /** - * Updates the provisioned throughput for this table. Setting the - * throughput for a table helps you manage performance and is part of the - * provisioned throughput feature of DynamoDB. - *

    - * The provisioned throughput values can be upgraded or downgraded based - * on the maximums and minimums listed in the - * Limits - * section in the Amazon DynamoDB Developer Guide. - *

    - * This table must be in the ACTIVE state for this operation - * to succeed. UpdateTable is an asynchronous operation; while - * executing the operation, the table is in the UPDATING - * state. While the table is in the UPDATING state, the - * table still has the provisioned throughput from before the call. The - * new provisioned throughput setting is in effect only when the table - * returns to the ACTIVE state after the UpdateTable - * operation. - *

    - * You can create, update or delete indexes using UpdateTable. - *

    - * - * @param spec used to specify all the detailed parameters - * - * @return the updated table description returned from DynamoDB. - */ - public TableDescription updateTable(UpdateTableSpec spec) { - UpdateTableRequest.Builder reqBuilder = spec.getRequest().toBuilder(); - reqBuilder.tableName(getTableName()); - UpdateTableRequest updated = reqBuilder.build(); - UpdateTableResponse result = client.updateTable(updated); - TableDescription description = result.tableDescription(); - this.tableDescription = description; - - return description; - } - - /** - * Creates a global secondary index (GSI) with only a hash key on this - * table. Involves network calls. This table must be in the - * ACTIVE state for this operation to succeed. Creating a - * global secondary index is an asynchronous operation; while executing the - * operation, the index is in the CREATING state. Once created, - * the index will be in ACTIVE state. - * - * @param create - * used to specify the details of the index creation - * @param hashKeyDefinition - * used to specify the attribute for describing the key schema - * for the hash key of the GSI to be created for this table. - * - * @return the index being created - */ - public Index createGsi( - CreateGlobalSecondaryIndexAction create, - AttributeDefinition hashKeyDefinition) { - return doCreateGsi(create, hashKeyDefinition); - } - - /** - * Creates a global secondary index (GSI) with both a hash key and a range - * key on this table. Involves network calls. This table must be in the - * ACTIVE state for this operation to succeed. Creating a - * global secondary index is an asynchronous operation; while executing the - * operation, the index is in the CREATING state. Once created, - * the index will be in ACTIVE state. - * - * @param create - * used to specify the details of the index creation - * @param hashKeyDefinition - * used to specify the attribute for describing the key schema - * for the hash key of the GSI to be created for this table. - * @param rangeKeyDefinition - * used to specify the attribute for describing the key schema - * for the range key of the GSI to be created for this table. - * - * @return the index being created - */ - public Index createGsi( - CreateGlobalSecondaryIndexAction create, - AttributeDefinition hashKeyDefinition, - AttributeDefinition rangeKeyDefinition) { - return doCreateGsi(create, hashKeyDefinition, rangeKeyDefinition); - } - - private Index doCreateGsi( - CreateGlobalSecondaryIndexAction create, - AttributeDefinition... keyDefinitions) { - UpdateTableSpec spec = new UpdateTableSpec() - .withAttributeDefinitions(keyDefinitions) - .withGlobalSecondaryIndexUpdates(GlobalSecondaryIndexUpdate.builder() - .create(create).build()); - updateTable(spec); - return this.getIndex(create.indexName()); - } - - /** - * Updates the provisioned throughput for this table. Setting the - * throughput for a table helps you manage performance and is part of the - * provisioned throughput feature of DynamoDB. - *

    - * The provisioned throughput values can be upgraded or downgraded based - * on the maximums and minimums listed in the - * Limits - * section in the Amazon DynamoDB Developer Guide. - *

    - * This table must be in the ACTIVE state for this operation - * to succeed. UpdateTable is an asynchronous operation; while - * executing the operation, the table is in the UPDATING - * state. While the table is in the UPDATING state, the - * table still has the provisioned throughput from before the call. The - * new provisioned throughput setting is in effect only when the table - * returns to the ACTIVE state after the UpdateTable - * operation. - *

    - * You can create, update or delete indexes using UpdateTable. - *

    - * - * @param provisionedThroughput target provisioned throughput - * - * @return the updated table description returned from DynamoDB. - */ - public TableDescription updateTable( - ProvisionedThroughput provisionedThroughput) { - return updateTable(new UpdateTableSpec() - .withProvisionedThroughput(provisionedThroughput)); - } - - /** - * A convenient blocking call that can be used, typically during table - * creation, to wait for the table to become active. This method uses - * {@link software.amazon.awssdk.services.dynamodb.waiters.AmazonDynamoDBWaiters} - * to poll the status of the table every 5 seconds. - * - * @return the table description when the table has become active - * - * @throws IllegalArgumentException if the table is being deleted - * @throws ResourceNotFoundException if the table doesn't exist - */ - public TableDescription waitForActive() throws InterruptedException { - throw new UnsupportedOperationException(); - } - - /** - * A convenient blocking call that can be used, typically during table - * deletion, to wait for the table to become deleted. This method uses - * {@link software.amazon.awssdk.services.dynamodb.waiters.AmazonDynamoDBWaiters} - * to poll the status of the table every 5 seconds. - */ - public void waitForDelete() throws InterruptedException { - throw new UnsupportedOperationException(); - } - - /** - * A convenient blocking call that can be used to wait on a table until it - * has either become active or deleted (ie no longer exists) by polling the - * table every 5 seconds. - * - * @return the table description if the table has become active; or null - * if the table has been deleted. - * - * @deprecated If this method is called immediately after - * {@link DynamoDbClient#createTable(CreateTableRequest)} or - * {@link DynamoDbClient#deleteTable(DeleteTableRequest)} operation, - * the result might be incorrect as all {@link DynamoDbClient} - * operations are eventually consistent and might have a few seconds delay before the status is changed. - */ - @Deprecated - public TableDescription waitForActiveOrDelete() throws InterruptedException { - try { - for (; ; ) { - TableDescription desc = describe(); - if (desc.tableStatus() == TableStatus.ACTIVE) { - return desc; - } else { - Thread.sleep(SLEEP_TIME_MILLIS); - } - } - } catch (ResourceNotFoundException deleted) { - // Ignored or expected. - } - return null; - } - - /** - * A convenient blocking call that can be used to wait on a table and all - * it's indexes until both the table and it's indexes have either become - * active or deleted (ie no longer exists) by polling the table every 5 - * seconds. - * - * @return the table description if the table and all it's indexes have - * become active; or null if the table has been deleted. - * - * @deprecated If this method is called immediately after - * {@link DynamoDbClient#createTable(CreateTableRequest)} or - * {@link DynamoDbClient#deleteTable(DeleteTableRequest)} operation, - * the result might be incorrect as all {@link DynamoDbClient} - * operations are eventually consistent and might have a few seconds delay before the status is changed. - */ - @Deprecated - public TableDescription waitForAllActiveOrDelete() throws InterruptedException { - try { - retry: - for (; ; ) { - TableDescription desc = describe(); - if (desc.tableStatus() == TableStatus.ACTIVE) { - List descriptions = - desc.globalSecondaryIndexes(); - if (descriptions != null) { - for (GlobalSecondaryIndexDescription d : descriptions) { - if (d.indexStatus() != IndexStatus.ACTIVE) { - // Some index is not active. Keep waiting. - Thread.sleep(SLEEP_TIME_MILLIS); - continue retry; - } - } - } - return desc; - } - Thread.sleep(SLEEP_TIME_MILLIS); - continue; - } - } catch (ResourceNotFoundException deleted) { - // Ignored or expected. - } - return null; - } - - /** - * Deletes the table from DynamoDB. Involves network calls. - */ - public DeleteTableResponse delete() { - return client.deleteTable(DeleteTableRequest.builder().tableName(tableName).build()); - } - - @Override - public Item getItem(KeyAttribute... primaryKeyComponents) { - return getItemDelegate.getItem(primaryKeyComponents); - } - - @Override - public Item getItem(PrimaryKey primaryKey) { - return getItemDelegate.getItem(primaryKey); - } - - @Override - public Item getItem(PrimaryKey primaryKey, String projectionExpression, - Map nameMap) { - return getItemDelegate.getItem(primaryKey, projectionExpression, nameMap); - } - - @Override - public Item getItem(GetItemSpec spec) { - return getItemDelegate.getItem(spec); - } - - @Override - public GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue) { - return getItemDelegate.getItemOutcome(hashKeyName, hashKeyValue); - } - - @Override - public GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - return getItemDelegate.getItemOutcome(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue); - } - - @Override - public Item getItem(String hashKeyName, Object hashKeyValue) { - return getItemDelegate.getItem(hashKeyName, hashKeyValue); - } - - @Override - public Item getItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - return getItemDelegate.getItem(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition) { - return queryDelegate.query(hashKeyName, hashKeyValue, rangeKeyCondition); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - QueryFilter... queryFilters) { - return queryDelegate.query(hashKeyName, hashKeyValue, - rangeKeyCondition, queryFilters); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - String filterExpression, Map nameMap, - Map valueMap) { - return queryDelegate.query(hashKeyName, hashKeyValue, - rangeKeyCondition, filterExpression, nameMap, valueMap); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - String filterExpression, String projectionExpression, - Map nameMap, Map valueMap) { - return queryDelegate.query(hashKeyName, hashKeyValue, - rangeKeyCondition, filterExpression, projectionExpression, - nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, AttributeUpdate... attributeUpdates) { - return updateItemDelegate.updateItem(hashKeyName, hashKeyValue, - attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue, - AttributeUpdate... attributeUpdates) { - return updateItemDelegate.updateItem(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue, attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, Collection expected, - AttributeUpdate... attributeUpdates) { - return updateItemDelegate.updateItem(hashKeyName, hashKeyValue, - expected, attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue, - Collection expected, AttributeUpdate... attributeUpdates) { - return updateItemDelegate.updateItem(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue, - expected, attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, String updateExpression, - Map nameMap, Map valueMap) { - return updateItemDelegate.updateItem(hashKeyName, hashKeyValue, - updateExpression, nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String updateExpression, - Map nameMap, - Map valueMap) { - return updateItemDelegate.updateItem(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue, - updateExpression, nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String updateExpression, String conditionExpression, - Map nameMap, Map valueMap) { - return updateItemDelegate.updateItem(hashKeyName, hashKeyValue, - updateExpression, conditionExpression, - nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String updateExpression, String conditionExpression, - Map nameMap, Map valueMap) { - return updateItemDelegate.updateItem(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue, - updateExpression, conditionExpression, - nameMap, valueMap); - } - - @Override - public GetItemOutcome getItemOutcome(String hashKeyName, - Object hashKeyValue, String projectionExpression, - Map nameMap) { - return getItemDelegate.getItemOutcome(hashKeyName, hashKeyValue, - projectionExpression, nameMap); - } - - @Override - public GetItemOutcome getItemOutcome(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue, - String projectionExpression, Map nameMap) { - return getItemDelegate.getItemOutcome(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue, projectionExpression, nameMap); - } - - @Override - public Item getItem(String hashKeyName, Object hashKeyValue, - String projectionExpression, Map nameMap) { - return getItemDelegate.getItem(hashKeyName, hashKeyValue, - projectionExpression, nameMap); - } - - @Override - public Item getItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String projectionExpression, Map nameMap) { - return getItemDelegate.getItem(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue, projectionExpression, nameMap); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, Object hashKeyValue) { - return deleteItemDelegate.deleteItem(hashKeyName, hashKeyValue); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue) { - return deleteItemDelegate.deleteItem(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, Expected... expected) { - return deleteItemDelegate.deleteItem(hashKeyName, hashKeyValue, - expected); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue, - Expected... expected) { - return deleteItemDelegate.deleteItem(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue, expected); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, String conditionExpression, - Map nameMap, Map valueMap) { - return deleteItemDelegate.deleteItem(hashKeyName, hashKeyValue, - conditionExpression, nameMap, valueMap); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue, - String conditionExpression, Map nameMap, - Map valueMap) { - return deleteItemDelegate.deleteItem(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue, - conditionExpression, nameMap, valueMap); - } - - @Override - public String toString() { - return "{" + tableName + ": " + tableDescription + "}"; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableCollection.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableCollection.java deleted file mode 100644 index 7e7fe5ddd8c3..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableCollection.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import software.amazon.awssdk.services.dynamodb.document.internal.PageBasedCollection; -import software.amazon.awssdk.services.dynamodb.document.internal.PageIterable; - -/** - * A collection of Table's (initialized with the respective table - * names). - * - * An TableCollection object maintains a cursor pointing to its - * current pages of data. Initially the cursor is positioned before the first - * page. The next method moves the cursor to the next row, and because it - * returns false when there are no more rows in the TableCollection - * object, it can be used in a while loop to iterate through the collection. - * - * Network calls can be triggered when the collection is iterated across page - * boundaries. - * - * @param low level result type - */ -public abstract class TableCollection extends PageBasedCollection { - - // Overriding these just so javadocs will show up. - - /** - * Returns an {@code Iterable>} that iterates over pages of - * tables from this collection. Each call to {@code Iterator.next} on an - * {@code Iterator} returned from this {@code Iterable} results in exactly - * one call to DynamoDB to retrieve a single page of results. - *

    - * - * TableCollection<?> collection = ...; - * for (Page<Table> page : collection.pages()) { - * processTables(page); - * } - * - *

    - * The use of the internal/undocumented {@code PageIterable} class instead - * of {@code Iterable} in the public interface here is retained for - * backwards compatibility. It doesn't expose any methods beyond those - * of the {@code Iterable} interface. This method will be changed to return - * an {@code Iterable>} directly in a future release of the - * SDK. - * - * @see Page - */ - @Override - public PageIterable pages() { - return super.pages(); - } - - /** - * Returns the maximum number of resources to be retrieved in this - * collection; or null if there is no limit. - */ - @Override - public abstract Integer getMaxResultSize(); - - /** - * Returns the low-level result last retrieved (for the current page) from - * the server side; or null if there has yet no calls to the server. - */ - @Override - public R getLastLowLevelResult() { - return super.getLastLowLevelResult(); - } - - /** - * Used to register a listener for the event of receiving a low-level result - * from the server side. - * - * @param listener - * listener to be registered. If null, a "none" listener will be - * set. - * @return the previously registered listener. The return value is never - * null. - */ - @Override - public LowLevelResultListener registerLowLevelResultListener( - LowLevelResultListener listener) { - - return super.registerLowLevelResultListener(listener); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableKeysAndAttributes.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableKeysAndAttributes.java deleted file mode 100644 index d5fadcb866a0..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableKeysAndAttributes.java +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * Used to specify one or multiple primary keys of a specific table, the - * attributes to be retrieved from that table, and the consistency of the read - * operation in a BatchGetItem request. - */ -public class TableKeysAndAttributes { - private final String tableName; - private List primaryKeys; - private Set attributeNames; - private boolean consistentRead; - - private String projectionExpression; - private Map nameMap; - - public TableKeysAndAttributes(String tableName) { - if (tableName == null || tableName.trim().length() == 0) { - throw new IllegalArgumentException("table name must not be null or empty"); - } - this.tableName = tableName; - } - - /** - * Return the list of primary keys (of the current table) to be included in - * a batch get-item operation. - */ - public List getPrimaryKeys() { - return primaryKeys; - } - - /** - * Used to specify multiple primary keys. A primary key could consist of - * either a hash-key or both a hash-key and a range-key depending on the - * schema of the table. - */ - public TableKeysAndAttributes withPrimaryKeys(PrimaryKey... primaryKeys) { - if (primaryKeys == null) { - this.primaryKeys = null; - } else { - Set pkNameSet = null; - for (PrimaryKey pk : primaryKeys) { - if (pkNameSet == null) { - pkNameSet = pk.getComponentNameSet(); - } else { - if (!pkNameSet.equals(pk.getComponentNameSet())) { - throw new IllegalArgumentException( - "primary key attribute names must be consistent for the specified primary keys"); - } - } - } - this.primaryKeys = new ArrayList(Arrays.asList(primaryKeys)); - } - return this; - } - - /** - * Used to specify multiple hash-only primary keys. - * @param hashKeyName hash-only key name - * @param hashKeyValues a list of hash key values - */ - public TableKeysAndAttributes withHashOnlyKeys(String hashKeyName, Object... hashKeyValues) { - if (hashKeyName == null) { - throw new IllegalArgumentException(); - } - PrimaryKey[] primaryKeys = new PrimaryKey[hashKeyValues.length]; - for (int i = 0; i < hashKeyValues.length; i++) { - primaryKeys[i] = new PrimaryKey(hashKeyName, hashKeyValues[i]); - } - return withPrimaryKeys(primaryKeys); - } - - /** - * Used to specify multiple hash-and-range primary keys. - * - * @param hashKeyName - * hash key name - * @param rangeKeyName - * range key name - * @param alternatingHashAndRangeKeyValues - * a list of alternating hash key value and range key value - */ - public TableKeysAndAttributes withHashAndRangeKeys( - String hashKeyName, String rangeKeyName, - Object... alternatingHashAndRangeKeyValues) { - if (hashKeyName == null) { - throw new IllegalArgumentException("hash key name must be specified"); - } - if (rangeKeyName == null) { - throw new IllegalArgumentException("range key name must be specified"); - } - if (alternatingHashAndRangeKeyValues.length % 2 != 0) { - throw new IllegalArgumentException("number of hash and range key values must be the same"); - } - final int len = alternatingHashAndRangeKeyValues.length / 2; - PrimaryKey[] primaryKeys = new PrimaryKey[len]; - for (int i = 0; i < alternatingHashAndRangeKeyValues.length; i += 2) { - primaryKeys[i >> 1] = new PrimaryKey( - hashKeyName, alternatingHashAndRangeKeyValues[i], - rangeKeyName, alternatingHashAndRangeKeyValues[i + 1]); - } - return withPrimaryKeys(primaryKeys); - } - - /** - * Adds a primary key to be included in the batch get-item operation. A - * primary key could consist of either a hash-key or both a - * hash-key and a range-key depending on the schema of the table. - */ - public TableKeysAndAttributes addPrimaryKey(PrimaryKey primaryKey) { - if (primaryKey != null) { - if (primaryKeys == null) { - primaryKeys = new ArrayList(); - } - checkConsistency(primaryKey); - this.primaryKeys.add(primaryKey); - } - return this; - } - - private void checkConsistency(PrimaryKey primaryKey) { - if (this.primaryKeys.size() > 0) { - // use the first one as the representative - final Set nameSet = primaryKeys.get(0).getComponentNameSet(); - if (!nameSet.equals(primaryKey.getComponentNameSet())) { - throw new IllegalArgumentException( - "primary key must be added with consistent key attribute name(s)"); - } - } - } - - /** - * Adds a hash-only primary key to be included in the batch get-item - * operation. - * - * @param hashKeyName name of the hash key attribute name - * @param hashKeyValue name of the hash key value - * @return the current instance for method chaining purposes - */ - public TableKeysAndAttributes addHashOnlyPrimaryKey( - String hashKeyName, Object hashKeyValue) { - this.addPrimaryKey(new PrimaryKey(hashKeyName, hashKeyValue)); - return this; - } - - /** - * Adds multiple hash-only primary keys to be included in the batch get-item - * operation. - * - * @param hashKeyName name of the hash key attribute name - * @param hashKeyValues multiple hash key values - * @return the current instance for method chaining purposes - */ - public TableKeysAndAttributes addHashOnlyPrimaryKeys(String hashKeyName, - Object... hashKeyValues) { - for (Object hashKeyValue : hashKeyValues) { - this.addPrimaryKey(new PrimaryKey(hashKeyName, hashKeyValue)); - } - return this; - } - - /** - * Adds multiple hash-and-range primary keys to be included in the batch - * get-item operation. - * - * @param hashKeyName - * name of the hash key attribute name - * @param rangeKeyName - * name of the range key attribute name - * @param alternatingHashRangeKeyValues - * used to specify multiple alternating hash key and range key - * values - * @return the current instance for method chaining purposes - */ - public TableKeysAndAttributes addHashAndRangePrimaryKeys( - String hashKeyName, String rangeKeyName, - Object... alternatingHashRangeKeyValues) { - if (alternatingHashRangeKeyValues.length % 2 != 0) { - throw new IllegalArgumentException( - "The multiple hash and range key values must alternate"); - } - for (int i = 0; i < alternatingHashRangeKeyValues.length; i += 2) { - Object hashKeyValue = alternatingHashRangeKeyValues[i]; - Object rangeKeyValue = alternatingHashRangeKeyValues[i + 1]; - this.addPrimaryKey( - new PrimaryKey() - .addComponent(hashKeyName, hashKeyValue) - .addComponent(rangeKeyName, rangeKeyValue)); - } - return this; - } - - /** - * Adds a primary key (that consists of a hash-key and a range-key) to be - * included in the batch get-item operation. - * - * @param hashKeyName hash key attribute name - * @param hashKeyValue hash key value - * @param rangeKeyName range key attribute name - * @param rangeKeyValue range key value - * @return the current instance for method chaining purposes - */ - public TableKeysAndAttributes addHashAndRangePrimaryKey( - String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - this.addPrimaryKey( - new PrimaryKey() - .addComponent(hashKeyName, hashKeyValue) - .addComponent(rangeKeyName, rangeKeyValue)); - return this; - } - - /** - * Used to specify the attributes to be retrieved in each item returned - * from the batch get-item operation. - * - * @param attributeNames names of the attributes to be retrieved in each - * item returned from the batch get-item operation. - * @return the current instance for method chaining purposes - */ - public TableKeysAndAttributes withAttributeNames(String... attributeNames) { - if (attributeNames == null) { - this.attributeNames = null; - } else { - this.attributeNames = Collections.unmodifiableSet( - new LinkedHashSet(Arrays.asList(attributeNames))); - } - return this; - } - - public TableKeysAndAttributes withAttributeNames(List attributeNames) { - if (attributeNames == null) { - this.attributeNames = null; - } else { - this.attributeNames = Collections.unmodifiableSet( - new LinkedHashSet(attributeNames)); - } - return this; - } - - public Set getAttributeNames() { - return attributeNames; - } - - public String getTableName() { - return tableName; - } - - public boolean isConsistentRead() { - return consistentRead; - } - - public TableKeysAndAttributes withConsistentRead(boolean consistentRead) { - this.consistentRead = consistentRead; - return this; - } - - public String getProjectionExpression() { - return projectionExpression; - } - - public TableKeysAndAttributes withProjectionExpression(String projectionExpression) { - this.projectionExpression = projectionExpression; - return this; - } - - public Map nameMap() { - return nameMap; - } - - public TableKeysAndAttributes withNameMap(Map nameMap) { - this.nameMap = nameMap == null - ? null : Collections.unmodifiableMap(new LinkedHashMap(nameMap)); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableKeysAndAttributesTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableKeysAndAttributesTest.java deleted file mode 100644 index 7c98d7f2985e..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableKeysAndAttributesTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.List; -import org.junit.Assert; -import org.junit.Test; - -public class TableKeysAndAttributesTest { - - @Test - public void testNameSetConsistency() { - TableKeysAndAttributes t = new TableKeysAndAttributes("myTable") - .withHashAndRangeKeys( - // specify the hash key name and range key name once - "foo", "bar", - // followed by multiple values - 123, 1, - 123, 2, - 456, 1, - 456, 2, - 456, 3); - List keys = t.getPrimaryKeys(); - Assert.assertTrue(5 == keys.size()); - for (PrimaryKey key : keys) { - Assert.assertTrue(key.getComponentNameSet().contains("foo")); - Assert.assertTrue(key.getComponentNameSet().contains("bar")); - System.out.println(key); - } - System.out.println(keys); - } - - @Test(expected = IllegalArgumentException.class) - public void testNameSetInConsistency() { - new TableKeysAndAttributes("myTable") - .withPrimaryKeys( - new PrimaryKey("foo", 123, "bar", 345), - new PrimaryKey("foo", 123, "ba", 345)); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableWriteItems.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableWriteItems.java deleted file mode 100644 index d0bcc2e6bf28..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/TableWriteItems.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Set; - -/** - * Used to specify items to be put and/or primary keys to be deleted from a - * specific table in a BatchWriteItem request. - */ -public class TableWriteItems { - private final String tableName; - private List primaryKeysToDelete; - private Collection itemsToPut; - - public TableWriteItems(String tableName) { - if (tableName == null || tableName.trim().length() == 0) { - throw new IllegalArgumentException("table name must not be null or empty"); - } - this.tableName = tableName; - } - - /** - * Return the list of primary keys (of the current table) to be deleted in - * a batch write operation. - */ - public List getPrimaryKeysToDelete() { - return primaryKeysToDelete; - } - - /** - * Used to specify multiple primary keys to be deleted from the current - * table. A primary key could consist of either a hash-key or both a - * hash-key and a range-key depending on the schema of the table. - */ - public TableWriteItems withPrimaryKeysToDelete( - PrimaryKey... primaryKeysToDelete) { - if (primaryKeysToDelete == null) { - this.primaryKeysToDelete = null; - } else { - Set pkNameSet = null; - for (PrimaryKey pk : primaryKeysToDelete) { - if (pkNameSet == null) { - pkNameSet = pk.getComponentNameSet(); - } else { - if (!pkNameSet.equals(pk.getComponentNameSet())) { - throw new IllegalArgumentException( - "primary key attribute names must be consistent for the specified primary keys"); - } - } - } - this.primaryKeysToDelete = new ArrayList( - Arrays.asList(primaryKeysToDelete)); - } - return this; - } - - /** - * Used to specify multiple hash-only primary keys to be deleted from the - * current table. - * - * @param hashKeyName - * hash-only key name - * @param hashKeyValues - * a list of hash key values - */ - public TableWriteItems withHashOnlyKeysToDelete(String hashKeyName, - Object... hashKeyValues) { - if (hashKeyName == null) { - throw new IllegalArgumentException(); - } - PrimaryKey[] primaryKeys = new PrimaryKey[hashKeyValues.length]; - for (int i = 0; i < hashKeyValues.length; i++) { - primaryKeys[i] = new PrimaryKey(hashKeyName, hashKeyValues[i]); - } - return withPrimaryKeysToDelete(primaryKeys); - } - - /** - * Used to specify multiple hash-and-range primary keys to be deleted - * from the current table. - * - * @param hashKeyName - * hash key name - * @param rangeKeyName - * range key name - * @param alternatingHashAndRangeKeyValues - * a list of alternating hash key value and range key value - */ - public TableWriteItems withHashAndRangeKeysToDelete( - String hashKeyName, String rangeKeyName, - Object... alternatingHashAndRangeKeyValues) { - if (hashKeyName == null) { - throw new IllegalArgumentException("hash key name must be specified"); - } - if (rangeKeyName == null) { - throw new IllegalArgumentException("range key name must be specified"); - } - if (alternatingHashAndRangeKeyValues.length % 2 != 0) { - throw new IllegalArgumentException("number of hash and range key values must be the same"); - } - final int len = alternatingHashAndRangeKeyValues.length / 2; - PrimaryKey[] primaryKeys = new PrimaryKey[len]; - for (int i = 0; i < alternatingHashAndRangeKeyValues.length; i += 2) { - primaryKeys[i >> 1] = new PrimaryKey( - hashKeyName, alternatingHashAndRangeKeyValues[i], - rangeKeyName, alternatingHashAndRangeKeyValues[i + 1]); - } - return withPrimaryKeysToDelete(primaryKeys); - } - - /** - * Adds a primary key to be deleted in a batch write-item operation. A - * primary key could consist of either a hash-key or both a - * hash-key and a range-key depending on the schema of the table. - */ - public TableWriteItems addPrimaryKeyToDelete(PrimaryKey primaryKey) { - if (primaryKey != null) { - if (primaryKeysToDelete == null) { - primaryKeysToDelete = new ArrayList(); - } - checkConsistency(primaryKey); - this.primaryKeysToDelete.add(primaryKey); - } - return this; - } - - private void checkConsistency(PrimaryKey primaryKey) { - if (this.primaryKeysToDelete.size() > 0) { - // use the first one as the representative - final Set nameSet = primaryKeysToDelete.get(0).getComponentNameSet(); - if (!nameSet.equals(primaryKey.getComponentNameSet())) { - throw new IllegalArgumentException( - "primary key must be added with consistent key attribute name(s)"); - } - } - } - - /** - * Adds a hash-only primary key to be deleted in a batch write - * operation. - * - * @param hashKeyName name of the hash key attribute name - * @param hashKeyValue name of the hash key value - * @return the current instance for method chaining purposes - */ - public TableWriteItems addHashOnlyPrimaryKeyToDelete( - String hashKeyName, Object hashKeyValue) { - this.addPrimaryKeyToDelete(new PrimaryKey(hashKeyName, hashKeyValue)); - return this; - } - - /** - * Adds multiple hash-only primary keys to be deleted in a batch write - * operation. - * - * @param hashKeyName name of the hash key attribute name - * @param hashKeyValues multiple hash key values - * @return the current instance for method chaining purposes - */ - public TableWriteItems addHashOnlyPrimaryKeysToDelete(String hashKeyName, - Object... hashKeyValues) { - for (Object hashKeyValue : hashKeyValues) { - this.addPrimaryKeyToDelete(new PrimaryKey(hashKeyName, hashKeyValue)); - } - return this; - } - - /** - * Adds multiple hash-and-range primary keys to be deleted in a batch - * write operation. - * - * @param hashKeyName - * name of the hash key attribute name - * @param rangeKeyName - * name of the range key attribute name - * @param alternatingHashRangeKeyValues - * used to specify multiple alternating hash key and range key - * values - * @return the current instance for method chaining purposes - */ - public TableWriteItems addHashAndRangePrimaryKeysToDelete( - String hashKeyName, String rangeKeyName, - Object... alternatingHashRangeKeyValues) { - if (alternatingHashRangeKeyValues.length % 2 != 0) { - throw new IllegalArgumentException( - "The multiple hash and range key values must alternate"); - } - for (int i = 0; i < alternatingHashRangeKeyValues.length; i += 2) { - Object hashKeyValue = alternatingHashRangeKeyValues[i]; - Object rangeKeyValue = alternatingHashRangeKeyValues[i + 1]; - this.addPrimaryKeyToDelete( - new PrimaryKey() - .addComponent(hashKeyName, hashKeyValue) - .addComponent(rangeKeyName, rangeKeyValue)); - } - return this; - } - - /** - * Adds a primary key (that consists of a hash-key and a range-key) to be - * deleted in a batch write operation. - * - * @param hashKeyName hash key attribute name - * @param hashKeyValue hash key value - * @param rangeKeyName range key attribute name - * @param rangeKeyValue range key value - * @return the current instance for method chaining purposes - */ - public TableWriteItems addHashAndRangePrimaryKeyToDelete( - String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - this.addPrimaryKeyToDelete( - new PrimaryKey() - .addComponent(hashKeyName, hashKeyValue) - .addComponent(rangeKeyName, rangeKeyValue)); - return this; - } - - /** - * Used to specify the items to be put in the current table in a batch write - * operation. - * - * @return the current instance for method chaining purposes - */ - public TableWriteItems withItemsToPut(Item... itemsToPut) { - if (itemsToPut == null) { - this.itemsToPut = null; - } else { - this.itemsToPut = new ArrayList(Arrays.asList(itemsToPut)); - } - return this; - } - - /** - * Used to specify the collection of items to be put in the current table in - * a batch write operation. - * - * @return the current instance for method chaining purposes - */ - public TableWriteItems withItemsToPut(Collection itemsToPut) { - if (itemsToPut == null) { - this.itemsToPut = null; - } else { - this.itemsToPut = new ArrayList(itemsToPut); - } - return this; - } - - /** - * Returns the collection of items to be put in the current table in - * a batch write operation. - */ - public Collection getItemsToPut() { - return itemsToPut == null - ? null - : Collections.unmodifiableCollection(itemsToPut); - } - - public String getTableName() { - return tableName; - } - - /** - * Adds an item to be put to the current table in a batch write operation. - */ - public TableWriteItems addItemToPut(Item item) { - if (item != null) { - if (itemsToPut == null) { - itemsToPut = new ArrayList(); - } - this.itemsToPut.add(item); - } - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/UpdateItemOutcome.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/UpdateItemOutcome.java deleted file mode 100644 index 6169b8906d44..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/UpdateItemOutcome.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; - -/** - * The outcome of updating an item in a DynamoDB table. - */ -public class UpdateItemOutcome { - private final UpdateItemResponse result; - - /** - * @param result the low-level result; must not be null - */ - public UpdateItemOutcome(UpdateItemResponse result) { - if (result == null) { - throw new IllegalArgumentException(); - } - this.result = result; - } - - /** - * Returns all the returned attributes as a (non-null) {@link Item}. - */ - public Item getItem() { - Map attributes = - InternalUtils.toSimpleMapValue(result.attributes()); - Item item = Item.fromMap(attributes); - return item; - } - - /** - * Returns a non-null low-level result returned from the server side. - */ - public UpdateItemResponse getUpdateItemResponse() { - return result; - } - - @Override - public String toString() { - return String.valueOf(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/BatchGetItemApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/BatchGetItemApi.java deleted file mode 100644 index 599c4f35c7c5..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/BatchGetItemApi.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.BatchGetItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.TableKeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.document.spec.BatchGetItemSpec; -import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; - -/** - * DynamoDB BatchGetItem API that can be used to retrieve multiple items from - * multiple tables in one request/response by specifying one or multiple primary - * keys per table in the request. - */ -@ThreadSafe -public interface BatchGetItemApi { - /** - * Used to perform a batch get-item operation from DynamoDB. - * - * @param returnConsumedCapacity - * returned capacity to be returned - * @param tableKeyAndAttributes - * the tables, keys, and attributes specification to be used to - * retrieve the items. - */ - BatchGetItemOutcome batchGetItem( - ReturnConsumedCapacity returnConsumedCapacity, - TableKeysAndAttributes... tableKeyAndAttributes); - - /** - * Used to perform a batch get-item operation from DynamoDB. - * - * @param tableKeyAndAttributes - * the tables, keys, and attributes specification to be used to - * retrieve the items. - */ - BatchGetItemOutcome batchGetItem( - TableKeysAndAttributes... tableKeyAndAttributes); - - /** - * Used to perform a batch get-item operation from DynamoDB with full - * parameter specification. - */ - BatchGetItemOutcome batchGetItem(BatchGetItemSpec spec); - - /** - * Used to perform a batch get-item for the unprocessed keys returned from a - * previous batch get-item operation. - * - * @param returnConsumedCapacity - * returned capacity to be returned - * @param unprocessedKeys - * the unprocessed keys returned from the result of a previous - * batch-get-item operation. - * - * @see BatchGetItemOutcome#getUnprocessedKeys() - */ - BatchGetItemOutcome batchGetItemUnprocessed( - ReturnConsumedCapacity returnConsumedCapacity, - Map unprocessedKeys); - - /** - * Used to perform a batch get-item for the unprocessed keys returned from a - * previous batch get-item operation. - * - * @param unprocessedKeys - * the unprocessed keys returned from the result of a previous - * batch-get-item operation. - * - * @see BatchGetItemOutcome#getUnprocessedKeys() - */ - BatchGetItemOutcome batchGetItemUnprocessed( - Map unprocessedKeys); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/BatchWriteItemApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/BatchWriteItemApi.java deleted file mode 100644 index de51ec95b11c..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/BatchWriteItemApi.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.BatchWriteItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.TableWriteItems; -import software.amazon.awssdk.services.dynamodb.document.spec.BatchWriteItemSpec; -import software.amazon.awssdk.services.dynamodb.model.WriteRequest; - -/** - * DynamoDB BatchWriteItem API that can be used to put multiple items to and/or - * delete multiple items from multiple tables in a single request-response - * to/from DynamoDB. - */ -@ThreadSafe -public interface BatchWriteItemApi { - - /** - * Used to perform a batch write operation to DynamoDB. - * - * @param tableWriteItems - * the tables and the respective keys to delete from and/or the - * respective items to be put. - */ - BatchWriteItemOutcome batchWriteItem( - TableWriteItems... tableWriteItems); - - /** - * Used to perform a batch write operation to DynamoDB with full parameter - * specification. - */ - BatchWriteItemOutcome batchWriteItem(BatchWriteItemSpec spec); - - /** - * Used to perform a batch write operation for the unprocessed items - * returned from a previous batch write operation. - * - * @param unprocessedItems - * the unprocessed items returned from the result of a previous - * batch write operation - * - * @see BatchWriteItemOutcome#getUnprocessedItems() - */ - BatchWriteItemOutcome batchWriteItemUnprocessed( - Map> unprocessedItems); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/DeleteItemApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/DeleteItemApi.java deleted file mode 100644 index 292707b45103..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/DeleteItemApi.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.DeleteItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.spec.DeleteItemSpec; - -/** - * A Table-centric DeleteItem API. - */ -@ThreadSafe -public interface DeleteItemApi { - /** Deletes an item by primary key. */ - DeleteItemOutcome deleteItem(KeyAttribute... primaryKeyComponents); - - /** Deletes an item by primary key. */ - DeleteItemOutcome deleteItem(PrimaryKey primaryKey); - - /** Deletes an item by hash-only primary key. */ - DeleteItemOutcome deleteItem(String hashKeyName, Object hashKeyValue); - - /** Deletes an item by hash key-and-range primary key. */ - DeleteItemOutcome deleteItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue); - - /** - * Conditional delete with the specified primary key and expected - * conditions. - */ - DeleteItemOutcome deleteItem(PrimaryKey primaryKey, - Expected... expected); - - /** - * Conditional delete with the specified hash-only primary key and expected - * conditions. - */ - DeleteItemOutcome deleteItem(String hashKeyName, Object hashKeyValue, - Expected... expected); - - /** - * Conditional delete with the specified hash-and-range primary key and - * expected conditions. - */ - DeleteItemOutcome deleteItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - Expected... expected); - - /** - * Conditional delete with the specified primary key and condition - * expression. - */ - DeleteItemOutcome deleteItem(PrimaryKey primaryKey, - String conditionExpression, - Map nameMap, - Map valueMap); - - /** - * Conditional delete with the specified hash-only primary key and condition - * expression. - */ - DeleteItemOutcome deleteItem(String hashKeyName, Object hashKeyValue, - String conditionExpression, - Map nameMap, - Map valueMap); - - /** - * Conditional delete with the specified hash-and-range primary key and - * condition expression. - */ - DeleteItemOutcome deleteItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String conditionExpression, - Map nameMap, - Map valueMap); - - /** Deletes an item by specifying all the details. */ - DeleteItemOutcome deleteItem(DeleteItemSpec spec); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/GetItemApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/GetItemApi.java deleted file mode 100644 index 262e5fcff6fc..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/GetItemApi.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.GetItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.spec.GetItemSpec; - -/** - * A Table-centric GetItem API. - *

    - * In general, all getter methods in this library incur no network. - * GetItemApi is the only exception due to the fact that the - * web service API is indistinguishable from a Java getter method. - */ -@ThreadSafe -public interface GetItemApi { - - /** - * Retrieves an item and the associated information by primary key. Incurs - * network access. - * - * @return the (non-null) result of item retrieval. - */ - GetItemOutcome getItemOutcome(PrimaryKey primaryKey); - - /** - * Retrieves an item and the associated information by primary key when the - * primary key is a hash-only key. Incurs network access. - * - * @return the (non-null) result of item retrieval. - */ - GetItemOutcome getItemOutcome(KeyAttribute... primaryKeyComponents); - - /** - * Retrieves an item and the associated information by primary key when the - * primary key is a hash-only key. Incurs network access. - * - * @return the (non-null) result of item retrieval. - */ - GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue); - - /** - * Retrieves an item and the associated information by primary key when the - * primary key consists of both a hash-key and a range-key. Incurs network - * access. - * - * @return the (non-null) result of item retrieval. - */ - GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue); - - /** - * Retrieves an item and the associated information using projection - * expression. Incurs network access. - * - * @param projectionExpression - * projection expression, example: "a.b , c[0].e" - * - * @param nameMap - * actual values for the attribute-name place holders; can be - * null if there is no attribute-name placeholder. - * - * @return the (non-null) result of item retrieval. - */ - GetItemOutcome getItemOutcome(PrimaryKey primaryKey, - String projectionExpression, Map nameMap); - - /** - * Retrieves an item and the associated information via the specified hash - * key using projection expression. Incurs network access. - * - * @return the (non-null) result of item retrieval. - */ - GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue, - String projectionExpression, Map nameMap); - - /** - * Retrieves an item and the associated information via the specified hash - * key and range key using projection expression. Incurs network access. - * - * @return the (non-null) result of item retrieval. - */ - GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String projectionExpression, Map nameMap); - - /** - * Retrieves an item via the specified hash key using projection expression. - * Incurs network access. - * - * @return the retrieved item; or null if the item doesn't exist. - */ - Item getItem(String hashKeyName, Object hashKeyValue, - String projectionExpression, Map nameMap); - - /** - * Retrieves an item via the specified hash key and range key using - * projection expression. Incurs network access. - * - * @return the retrieved item; or null if the item doesn't exist. - */ - Item getItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String projectionExpression, Map nameMap); - - /** - * Retrieves an item and the associated information by specifying all the - * details. Incurs network access. - * - * @return the (non-null) result of item retrieval. - */ - GetItemOutcome getItemOutcome(GetItemSpec spec); - - /** - * Retrieves an item by primary key; or null if the item doesn't exist. - * Incurs network access. - * - * @return the retrieved item; or null if the item doesn't exist. - */ - Item getItem(PrimaryKey primaryKey); - - /** - * Retrieves an item by primary key. Incurs network access. - * - * @return the retrieved item; or null if the item doesn't exist. - */ - Item getItem(KeyAttribute... primaryKeyComponents); - - /** - * Retrieves an item by primary key when the primary key is a hash-only key. - * Incurs network access. - * - * @return the retrieved item; or null if the item doesn't exist. - */ - Item getItem(String hashKeyName, Object hashKey); - - /** - * Retrieves an item by primary key when the primary key consists of both a - * hash-key and a range-key. Incurs network access. - * - * @return the retrieved item; or null if the item doesn't exist. - */ - Item getItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue); - - /** - * Retrieves an item using projection expression. Incurs network access. - * - * @param projectionExpression - * projection expression, example: "a.b , c[0].e" - * - * @param nameMap - * actual values for the attribute-name place holders; can be - * null if there is no attribute-name placeholder. - * - * @return the retrieved item; or null if the item doesn't exist. - */ - Item getItem(PrimaryKey primaryKey, String projectionExpression, - Map nameMap); - - /** - * Retrieves an item by specifying all the details. Incurs network access. - * - * @return the retrieved item; or null if the item doesn't exist. - */ - Item getItem(GetItemSpec spec); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/ListTablesApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/ListTablesApi.java deleted file mode 100644 index 2c16e9792010..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/ListTablesApi.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.TableCollection; -import software.amazon.awssdk.services.dynamodb.document.spec.ListTablesSpec; -import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse; - -/** - * DynamoDB ListTables API. - */ -@ThreadSafe -public interface ListTablesApi { - /** - * Returns a collection of all the tables (initialized with the respective - * table names) associated with the current account and endpoint. The - * underlying collection is paginated with a page size of 100. A network - * call is made whenever the collection is iterated across a page boundary. - */ - TableCollection listTables(); - - /** - * Returns a collection of tables (initialized with the respective table - * names) associated with the current account and endpoint, starting with a - * name after the specified exclusiveStartTableName . The - * underlying collection is paginated with a page size of 100. A network - * call is made whenever the collection is iterated across a page boundary. - * - * @param exclusiveStartTableName - * The first table name that this operation will evaluate, - * exclusive of the specified - * exclusiveStartTableName. Use the value that was - * returned for LastEvaluatedTableName in a previous - * operation, so that you can obtain the next page of results. - */ - TableCollection listTables(String exclusiveStartTableName); - - /** - * Returns a collection of tables (initialized with the respective table - * names) up to the specified maxResultSize associated with - * the current account and endpoint, starting with a name after the - * specified exclusiveStartTableName. The underlying collection - * is paginated with a page size of 100. A network call is made whenever the - * collection is iterated across a page boundary. - * - * @param exclusiveStartTableName - * The first table name that this operation will evaluate - * exclusive of the specified - * exclusiveStartTableName. Use the value that was - * returned for LastEvaluatedTableName in a previous - * operation, so that you can obtain the next page of results. - * @param maxResultSize - * A maximum number of table names to return. - */ - TableCollection listTables(String exclusiveStartTableName, int maxResultSize); - - /** - * Returns a collection of tables (initialized with the respective table - * names) up to the specified maxResultSize associated with - * the current account and endpoint. The underlying collection - * is paginated with a page size of 100. A network call is made whenever the - * collection is iterated across a page boundary. - * - * @param maxResultSize - * A maximum number of table names to return. - */ - TableCollection listTables(int maxResultSize); - - /** - * List tables by specifying all the details. The underlying collection is - * paginated with the specified page size (which defaults to 100). A network - * call is made whenever the collection is iterated across a page boundary. - * - * - * @param spec - * can be used to specify all the detailed parameters of listing - * tables. - * - * @return a collection of tables associated with the current account and - * endpoint. - */ - TableCollection listTables(ListTablesSpec spec); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/PutItemApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/PutItemApi.java deleted file mode 100644 index b7485c186848..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/PutItemApi.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.PutItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.PutItemSpec; - -/** - * A Table-centric PutItem API. - */ -@ThreadSafe -public interface PutItemApi { - /** - * Unconditional put. - */ - PutItemOutcome putItem(Item item); - - /** - * Conditional put. - */ - PutItemOutcome putItem(Item item, Expected... expected); - - /** - * Conditional put via condition expression. - */ - PutItemOutcome putItem(Item item, String conditionExpression, - Map nameMap, Map valueMap); - - /** Puts an item by specifying all the details. */ - PutItemOutcome putItem(PutItemSpec spec); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/QueryApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/QueryApi.java deleted file mode 100644 index 0d79bb211228..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/QueryApi.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.ItemCollection; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.QueryFilter; -import software.amazon.awssdk.services.dynamodb.document.QueryOutcome; -import software.amazon.awssdk.services.dynamodb.document.RangeKeyCondition; -import software.amazon.awssdk.services.dynamodb.document.spec.QuerySpec; - -/** - * A Table-centric Query API. - */ -@ThreadSafe -public interface QueryApi { - /** - * Retrieves items by the specified hash key. - */ - ItemCollection query(String hashKeyName, Object hashKeyValue); - - /** - * Retrieves items by the specified hash key. - */ - ItemCollection query(KeyAttribute hashKey); - - /** - * Retrieves items by the specified hash key and a range key condition. - */ - ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition); - - ItemCollection query(String hashKeyName, Object hashKeyValue, - RangeKeyCondition rangeKeyCondition); - - /** - * Retrieves items by the specified hash key, a range key condition - * and a list of query filters. - */ - ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, QueryFilter... queryFilters); - - ItemCollection query(String hashKeyName, Object hashKeyValue, - RangeKeyCondition rangeKeyCondition, QueryFilter... queryFilters); - - /** - * Retrieves items by the specified hash key, a range key condition, and - * a filter expression string. - * - * @param filterExpression filter expression - * example: "(#a > :a) AND (#c > :c OR #e < :e)" - * - * @param nameMap actual values for the attribute-name place holders; - * can be null if there is no attribute-name placeholder. - * @param valueMap actual values for the value place holders - * can be null if there is no attribute-value placeholder. - */ - ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, - String filterExpression, - Map nameMap, - Map valueMap); - - ItemCollection query(String hashKeyName, Object hashKeyValue, - RangeKeyCondition rangeKeyCondition, - String filterExpression, - Map nameMap, - Map valueMap); - - /** - * Retrieves items by the specified hash key, a range key condition, - * a filter expression and a projection expression. - * - * @param filterExpression filter expression - * example: "(#a > :a) AND (#c > :c OR #e < :e)" - * - * @param projectionExpression projection expression - * example: "a.b, c[0].e" - * - * @param nameMap actual values for the attribute-name place holders; - * can be null if there is no attribute-name placeholder. - * @param valueMap actual values for the value place holders - * can be null if there is no attribute-value placeholder. - */ - ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, - String filterExpression, - String projectionExpression, - Map nameMap, - Map valueMap); - - /** - * Retrieves items by the specified hash key, a range key condition, - * a filter expression and a projection expression. - * - * @param filterExpression filter expression - * example: "(#a > :a) AND (#c > :c OR #e < :e)" - * - * @param projectionExpression projection expression - * example: "a.b, c[0].e" - * - * @param nameMap actual values for the attribute-name place holders; - * can be null if there is no attribute-name placeholder. - * @param valueMap actual values for the value place holders - * can be null if there is no attribute-value placeholder. - */ - ItemCollection query(String hashKeyName, Object hashKeyValue, - RangeKeyCondition rangeKeyCondition, - String filterExpression, - String projectionExpression, - Map nameMap, - Map valueMap); - - /** - * Queries table by specifying all the details. - */ - ItemCollection query(QuerySpec spec); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/ScanApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/ScanApi.java deleted file mode 100644 index f423e927ec9c..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/ScanApi.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.ItemCollection; -import software.amazon.awssdk.services.dynamodb.document.ScanFilter; -import software.amazon.awssdk.services.dynamodb.document.ScanOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.ScanSpec; - -/** - * A Table-centric Scan API. - */ -@ThreadSafe -public interface ScanApi { - /** - * Retrieves items by the specified list of scan filters. - */ - ItemCollection scan(ScanFilter... scanFilters); - - /** - * Scans table using a Filter Expression. - * - * @param filterExpression - * condition expression example: - * "(#a > :a) AND (#c > :c OR #e < :e)" - * - * @param nameMap - * actual values for the attribute-name place holders; can be - * null if there is no attribute-name placeholder. - * @param valueMap - * actual values for the value place holders can be null if there - * is no attribute-value placeholder. - */ - ItemCollection scan(String filterExpression, - Map nameMap, - Map valueMap); - - /** - * Scans table using a Filter Expression and a Projection Expression. - * - * @param filterExpression - * condition expression example: - * "(#a > :a) AND (#c > :c OR #e < :e)" - * @param projectionExpression - * projection expression example: "a.b , c[0].e" - * - * @param nameMap actual values for the attribute-name place holders; - * can be null if there is no attribute-name placeholder. - * @param valueMap actual values for the value place holders - * can be null if there is no attribute-value placeholder. - */ - ItemCollection scan( - String filterExpression, - String projectionExpression, - Map nameMap, - Map valueMap); - - /** - * Scans table by specifying all the details. - */ - ItemCollection scan(ScanSpec params); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/UpdateItemApi.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/UpdateItemApi.java deleted file mode 100644 index 6a3bd4661bbb..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/api/UpdateItemApi.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.api; - -import java.util.Collection; -import java.util.Map; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.services.dynamodb.document.AttributeUpdate; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.UpdateItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.UpdateItemSpec; - -/** - * A Table-centric UpdateItem API. - */ -@ThreadSafe -public interface UpdateItemApi { - - /** - * Updates an item with the attributes specified. - * - * @param primaryKey - * primary key of the item to be updated - * @param attributeUpdates - * attributes to be updated - */ - UpdateItemOutcome updateItem(PrimaryKey primaryKey, - AttributeUpdate... attributeUpdates); - - UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - AttributeUpdate... attributeUpdates); - - UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - AttributeUpdate... attributeUpdates); - - /** - * Updates an item with the attributes specified. - * - * @param primaryKey - * primary key of the item to be updated - * @param expected - * the condition to match for the update to succeed. - * @param attributeUpdates - * attributes to be updated - */ - UpdateItemOutcome updateItem(PrimaryKey primaryKey, - Collection expected, AttributeUpdate... attributeUpdates); - - /** - * Updates an item with the specified hash-only key and attributes. - */ - UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - Collection expected, AttributeUpdate... attributeUpdates); - - /** - * Updates an item with the specified hash key, range key and attributes. - */ - UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - Collection expected, AttributeUpdate... attributeUpdates); - - /** - * Performs an update on an item in the table using the given update - * expression string. - * - * @param primaryKey - * primary key of the item to be updated - * @param updateExpression - * the update expression that specifies the attributes to be - * updated. - * @param nameMap - * the map containing the mapping between attribute names used in - * update expression and the actual name of the attributes - * @param valueMap - * the map containing the mapping between the attribute value - * used in update expression and the actual value of the - * attribute - */ - UpdateItemOutcome updateItem(PrimaryKey primaryKey, - String updateExpression, Map nameMap, - Map valueMap); - - UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String updateExpression, Map nameMap, - Map valueMap); - - UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String updateExpression, Map nameMap, - Map valueMap); - - /** - * Updates an item with the specified primary key using the given - * update expression provided the condition expression evaluates to true. - * - * @param primaryKey - * primary key of the item to be updated - * @param updateExpression - * the update expression that specifies the attributes to be - * updated. - * @param conditionExpression - * the condition expression that specifies the condition that - * needs to be evaluated to true - * @param nameMap - * the map containing the mapping between attribute names used in - * update and condition expression and the actual name of the - * attributes - * @param valueMap - * the map containing the mapping between the attribute value - * used in update and condition expression and the actual value - * of the attribute - */ - UpdateItemOutcome updateItem(PrimaryKey primaryKey, - String updateExpression, String conditionExpression, - Map nameMap, Map valueMap); - - /** - * Updates an item with the specified hash key using the given - * update expression provided the condition expression evaluates to true. - */ - UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String updateExpression, String conditionExpression, - Map nameMap, Map valueMap); - - /** - * Updates an item with the specified hash key and range key using the given - * update expression provided the condition expression evaluates to true. - */ - UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String updateExpression, String conditionExpression, - Map nameMap, Map valueMap); - - /** - * Performs an update on an item in the table by specifying all the details. - * - * @param updateItemSpec - * the update specification for the item to be updated. - */ - UpdateItemOutcome updateItem(UpdateItemSpec updateItemSpec); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/AbstractImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/AbstractImpl.java deleted file mode 100644 index d20545047122..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/AbstractImpl.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Table; - -/** - * Internal common base class for API implementations. - */ -abstract class AbstractImpl { - private final Table table; - private final DynamoDbClient client; - - protected AbstractImpl(DynamoDbClient client, Table table) { - this.client = client; - this.table = table; - } - - /** - * Returns the owning table. - */ - public final Table getTable() { - return table; - } - - final DynamoDbClient getClient() { - return client; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/BatchGetItemImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/BatchGetItemImpl.java deleted file mode 100644 index cd6d81bf41cc..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/BatchGetItemImpl.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.BatchGetItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.TableKeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.document.api.BatchGetItemApi; -import software.amazon.awssdk.services.dynamodb.document.spec.BatchGetItemSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; - -/** - * The implementation for BatchGetItemApi. - */ -public class BatchGetItemImpl implements BatchGetItemApi { - private final DynamoDbClient client; - - public BatchGetItemImpl(DynamoDbClient client) { - this.client = client; - } - - @Override - public BatchGetItemOutcome batchGetItem( - ReturnConsumedCapacity returnConsumedCapacity, - TableKeysAndAttributes... tableKeysAndAttributes) { - return doBatchGetItem(new BatchGetItemSpec() - .withReturnConsumedCapacity(returnConsumedCapacity) - .withTableKeyAndAttributes(tableKeysAndAttributes)); - } - - @Override - public BatchGetItemOutcome batchGetItem( - TableKeysAndAttributes... tableKeysAndAttributes) { - return doBatchGetItem(new BatchGetItemSpec() - .withTableKeyAndAttributes(tableKeysAndAttributes)); - } - - @Override - public BatchGetItemOutcome batchGetItem(BatchGetItemSpec spec) { - return doBatchGetItem(spec); - } - - private BatchGetItemOutcome doBatchGetItem(BatchGetItemSpec spec) { - final Collection tableKeysAndAttributesCol = - spec.getTableKeysAndAttributes(); - // Unprocessed keys take precedence - Map requestItems = spec.getUnprocessedKeys(); - if (requestItems == null || requestItems.size() == 0) { - // handle new requests only if there is no unprocessed keys - requestItems = new LinkedHashMap(); - } - if (tableKeysAndAttributesCol != null) { - for (TableKeysAndAttributes tableKeysAndAttributes : tableKeysAndAttributesCol) { - // attributes against one table - final Set attrNames = tableKeysAndAttributes.getAttributeNames(); - // primary keys against one table - final List pks = tableKeysAndAttributes.getPrimaryKeys(); - final List> keys = new ArrayList>(pks.size()); - for (PrimaryKey pk : pks) { - keys.add(InternalUtils.toAttributeValueMap(pk)); - } - final KeysAndAttributes keysAndAttrs = KeysAndAttributes.builder() - .attributesToGet(attrNames) - .consistentRead(tableKeysAndAttributes.isConsistentRead()) - .keys(keys) - .projectionExpression(tableKeysAndAttributes.getProjectionExpression()) - .expressionAttributeNames(tableKeysAndAttributes.nameMap()) - .build(); - requestItems.put(tableKeysAndAttributes.getTableName(), keysAndAttrs); - } - } - BatchGetItemRequest req = spec.getRequest() - .toBuilder() - .requestItems(requestItems) - .build(); - spec.setRequest(req); - BatchGetItemResponse result = client.batchGetItem(req); - return new BatchGetItemOutcome(result); - } - - @Override - public BatchGetItemOutcome batchGetItemUnprocessed( - ReturnConsumedCapacity returnConsumedCapacity, - Map unprocessedKeys) { - return doBatchGetItem(new BatchGetItemSpec() - .withReturnConsumedCapacity(returnConsumedCapacity) - .withUnprocessedKeys(unprocessedKeys)); - } - - @Override - public BatchGetItemOutcome batchGetItemUnprocessed( - Map unprocessedKeys) { - return doBatchGetItem(new BatchGetItemSpec() - .withUnprocessedKeys(unprocessedKeys)); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/BatchWriteItemImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/BatchWriteItemImpl.java deleted file mode 100644 index 787371d0452e..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/BatchWriteItemImpl.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.toAttributeValueMap; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.toAttributeValues; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.BatchWriteItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.TableWriteItems; -import software.amazon.awssdk.services.dynamodb.document.api.BatchWriteItemApi; -import software.amazon.awssdk.services.dynamodb.document.spec.BatchWriteItemSpec; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemResponse; -import software.amazon.awssdk.services.dynamodb.model.DeleteRequest; -import software.amazon.awssdk.services.dynamodb.model.PutRequest; -import software.amazon.awssdk.services.dynamodb.model.WriteRequest; - -/** - * The implementation for BatchWriteItemApi. - */ -public class BatchWriteItemImpl implements BatchWriteItemApi { - private final DynamoDbClient client; - - public BatchWriteItemImpl(DynamoDbClient client) { - this.client = client; - } - - @Override - public BatchWriteItemOutcome batchWriteItem( - TableWriteItems... tableWriteItems) { - return doBatchWriteItem(new BatchWriteItemSpec() - .withTableWriteItems(tableWriteItems)); - } - - @Override - public BatchWriteItemOutcome batchWriteItem(BatchWriteItemSpec spec) { - return doBatchWriteItem(spec); - } - - @Override - public BatchWriteItemOutcome batchWriteItemUnprocessed( - Map> unprocessedItems) { - return doBatchWriteItem(new BatchWriteItemSpec() - .withUnprocessedItems(unprocessedItems)); - } - - private BatchWriteItemOutcome doBatchWriteItem(BatchWriteItemSpec spec) { - final Collection tableWriteItemsCol = - spec.getTableWriteItems(); - // Unprocessed items take precedence - Map> requestItems = - spec.getUnprocessedItems(); - if (requestItems == null || requestItems.size() == 0) { - // handle new requests only if there is no unprocessed items - requestItems = new LinkedHashMap>(); - } - if (tableWriteItemsCol != null) { - for (TableWriteItems tableWriteItems : tableWriteItemsCol) { - // items to be put to a single table - Collection itemsToPut = tableWriteItems.getItemsToPut(); - // primary keys to deleted in a single table - final List pksToDelete = - tableWriteItems.getPrimaryKeysToDelete(); - // Merge them into a list of write requests to a single table - final int numPut = itemsToPut == null ? 0 : itemsToPut.size(); - final int numDel = pksToDelete == null ? 0 : pksToDelete.size(); - final List writeRequests = - new ArrayList(numPut + numDel); - // Put requests for a single table - if (itemsToPut != null) { - for (Item item : itemsToPut) { - writeRequests.add(WriteRequest.builder() - .putRequest(PutRequest.builder() - .item(toAttributeValues(item)) - .build()) - .build()); - } - } - // Delete requests for a single table - if (pksToDelete != null) { - for (PrimaryKey pkToDelete : pksToDelete) { - writeRequests.add(WriteRequest.builder() - .deleteRequest(DeleteRequest.builder() - .key(toAttributeValueMap(pkToDelete)) - .build()) - .build()); - } - } - requestItems.put(tableWriteItems.getTableName(), writeRequests); - } - } - BatchWriteItemRequest req = spec.getRequest().toBuilder() - .requestItems(requestItems).build(); - BatchWriteItemResponse result = client.batchWriteItem(req); - return new BatchWriteItemOutcome(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/DeleteItemImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/DeleteItemImpl.java deleted file mode 100644 index 7e70b43e436e..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/DeleteItemImpl.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.Collection; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.DeleteItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.Table; -import software.amazon.awssdk.services.dynamodb.document.api.DeleteItemApi; -import software.amazon.awssdk.services.dynamodb.document.spec.DeleteItemSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteItemResponse; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; - -/** - * The implementation for DeleteItemApi. - */ -public class DeleteItemImpl extends AbstractImpl implements DeleteItemApi { - public DeleteItemImpl(DynamoDbClient client, Table table) { - super(client, table); - } - - @Override - public DeleteItemOutcome deleteItem(KeyAttribute... primaryKeyComponents) { - return doDeleteItem(new DeleteItemSpec() - .withPrimaryKey(primaryKeyComponents)); - } - - @Override - public DeleteItemOutcome deleteItem(PrimaryKey primaryKey) { - return doDeleteItem(new DeleteItemSpec() - .withPrimaryKey(primaryKey)); - } - - @Override - public DeleteItemOutcome deleteItem(PrimaryKey primaryKeys, - Expected... expected) { - return doDeleteItem(new DeleteItemSpec() - .withPrimaryKey(primaryKeys) - .withExpected(expected)); - } - - @Override - public DeleteItemOutcome deleteItem(PrimaryKey primaryKeys, - String conditionExpression, Map nameMap, - Map valueMap) { - return doDeleteItem(new DeleteItemSpec() - .withPrimaryKey(primaryKeys) - .withConditionExpression(conditionExpression) - .withNameMap(nameMap) - .valueMap(valueMap)) - ; - } - - @Override - public DeleteItemOutcome deleteItem(DeleteItemSpec spec) { - return doDeleteItem(spec); - } - - private DeleteItemOutcome doDeleteItem(DeleteItemSpec spec) { - // set the table name - final String tableName = getTable().getTableName(); - // set up the keys - DeleteItemRequest.Builder requestBuilder = spec.getRequest().toBuilder() - .tableName(tableName) - .key(InternalUtils.toAttributeValueMap(spec.getKeyComponents())); - // set up the expected attribute map, if any - final Collection expected = spec.getExpected(); - final Map expectedMap = - InternalUtils.toExpectedAttributeValueMap(expected); - // set up the value map, if any (when expression API is used) - final Map attrValMap = - InternalUtils.fromSimpleMap(spec.valueMap()); - // set up the request - requestBuilder.expected(expectedMap) - .expressionAttributeNames(spec.nameMap()) - .expressionAttributeValues(attrValMap); - DeleteItemResponse result = getClient().deleteItem(requestBuilder.build()); - return new DeleteItemOutcome(result); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, Object hashKeyValue) { - return deleteItem(new PrimaryKey(hashKeyName, hashKeyValue)); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue) { - return deleteItem( - new PrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue)); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, Expected... expected) { - return deleteItem(new PrimaryKey(hashKeyName, hashKeyValue), expected); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue, - Expected... expected) { - return deleteItem( - new PrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue), - expected); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, String conditionExpression, - Map nameMap, Map valueMap) { - return deleteItem(new PrimaryKey(hashKeyName, hashKeyValue), - conditionExpression, nameMap, valueMap); - } - - @Override - public DeleteItemOutcome deleteItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue, - String conditionExpression, Map nameMap, - Map valueMap) { - return deleteItem( - new PrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue), - conditionExpression, nameMap, valueMap); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/Filter.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/Filter.java deleted file mode 100644 index c0671c942dcc..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/Filter.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; - -/** - * Abstract base class for both query filters and scan filters. - */ -public abstract class Filter> { - private final String attribute; - private ComparisonOperator op; - private Object[] values; - - /** - * Create a filter for the specified top-level attribute. - * - * @param attrName - * attribute name - */ - protected Filter(String attrName) { - InternalUtils.checkInvalidAttrName(attrName); - this.attribute = attrName; - } - - /** Returns the attribute name. */ - public String getAttribute() { - return attribute; - } - - public ComparisonOperator getComparisonOperator() { - return op; - } - - public Object[] values() { - return values == null ? null : values.clone(); - } - - @SuppressWarnings("unchecked") - protected T values(Object... values) { - this.values = values.clone(); - return (T) this; - } - - @SuppressWarnings("unchecked") - private T withComparisonOperator(ComparisonOperator op) { - this.op = op; - return (T) this; - } - - /** - * Creates and returns a condition of the range key being equal to the given - * value. - */ - public T eq(Object val) { - return withComparisonOperator(ComparisonOperator.EQ).values(val); - } - - public T ne(Object val) { - return withComparisonOperator(ComparisonOperator.NE).values(val); - } - - /** - * Expects the attribute be an existing attribute. - */ - public T exists() { - return withComparisonOperator(ComparisonOperator.NOT_NULL); - } - - /** - * Expects the attribute be non-existing. - */ - public T notExist() { - return withComparisonOperator(ComparisonOperator.NULL); - } - - public T contains(Object val) { - return withComparisonOperator(ComparisonOperator.CONTAINS).values(val); - } - - public T notContains(Object val) { - return withComparisonOperator(ComparisonOperator.NOT_CONTAINS).values(val); - } - - /** - * Creates and returns a condition of the range key with a value that begins - * with the given value. - */ - public T beginsWith(String val) { - return withComparisonOperator(ComparisonOperator.BEGINS_WITH).values(val); - } - - public T in(Object... values) { - if (values == null || values.length == 0) { - throw new IllegalArgumentException("values must not be null or empty."); - } - - return withComparisonOperator(ComparisonOperator.IN).values(values); - } - - /** - * Creates and returns a condition of the range key that has a value between - * the given values. - */ - public T between(Object low, Object hi) { - return withComparisonOperator(ComparisonOperator.BETWEEN).values(low, hi); - } - - /** - * Creates and returns a condition of the range key being greater than or - * equal to the given value. - */ - public T ge(Object val) { - return withComparisonOperator(ComparisonOperator.GE).values(val); - } - - /** - * Creates and returns a condition of the range key being greater than the - * given value. - */ - public T gt(Object val) { - return withComparisonOperator(ComparisonOperator.GT).values(val); - } - - /** - * Creates and returns a condition of the range key being less than or equal - * to the given value. - */ - public T le(Object val) { - return withComparisonOperator(ComparisonOperator.LE).values(val); - } - - /** - * Creates and returns a condition of the range key being less than the - * given value. - */ - public T lt(Object val) { - return withComparisonOperator(ComparisonOperator.LT).values(val); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/GetItemImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/GetItemImpl.java deleted file mode 100644 index 3ea0bd0a9835..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/GetItemImpl.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.GetItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.Table; -import software.amazon.awssdk.services.dynamodb.document.api.GetItemApi; -import software.amazon.awssdk.services.dynamodb.document.spec.GetItemSpec; -import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; - -/** - * The implementation for GetItemApi. - */ -public class GetItemImpl extends AbstractImpl implements GetItemApi { - public GetItemImpl(DynamoDbClient client, Table table) { - super(client, table); - } - - @Override - public GetItemOutcome getItemOutcome(KeyAttribute... primaryKeyComponents) { - return doLoadItem(new GetItemSpec() - .withPrimaryKey(primaryKeyComponents)); - } - - @Override - public GetItemOutcome getItemOutcome(PrimaryKey primaryKey) { - return doLoadItem(new GetItemSpec() - .withPrimaryKey(primaryKey)); - } - - @Override - public GetItemOutcome getItemOutcome(PrimaryKey primaryKey, - String projectionExpression, Map nameMap) { - return doLoadItem(new GetItemSpec() - .withPrimaryKey(primaryKey) - .withProjectionExpression(projectionExpression) - .withNameMap(nameMap)); - } - - @Override - public GetItemOutcome getItemOutcome(GetItemSpec spec) { - return doLoadItem(spec); - } - - @Override - public Item getItem(GetItemSpec spec) { - return doLoadItem(spec).getItem(); - } - - private GetItemOutcome doLoadItem(GetItemSpec spec) { - String tableName = getTable().getTableName(); - // Set up the key attributes - GetItemRequest req = spec.getRequest().toBuilder() - .tableName(tableName) - .key(InternalUtils.toAttributeValueMap(spec.getKeyComponents())) - .expressionAttributeNames(spec.nameMap()) - .build(); - - GetItemResponse result = getClient().getItem(req); - return new GetItemOutcome(result); - } - - @Override - public Item getItem(KeyAttribute... primaryKey) { - return getItemOutcome(primaryKey).getItem(); - } - - @Override - public Item getItem(PrimaryKey primaryKey) { - return getItemOutcome(primaryKey).getItem(); - } - - @Override - public Item getItem(PrimaryKey primaryKey, String projectionExpression, - Map nameMap) { - return getItemOutcome(primaryKey, projectionExpression, nameMap).getItem(); - } - - @Override - public GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue) { - return getItemOutcome(new KeyAttribute(hashKeyName, hashKeyValue)); - } - - @Override - public GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - return getItemOutcome(new KeyAttribute(hashKeyName, hashKeyValue), - new KeyAttribute(rangeKeyName, rangeKeyValue)); - } - - @Override - public Item getItem(String hashKeyName, Object hashKeyValue) { - return getItemOutcome(hashKeyName, hashKeyValue).getItem(); - } - - @Override - public Item getItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - return getItemOutcome(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue).getItem(); - } - - @Override - public GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue, - String projectionExpression, Map nameMap) { - return getItemOutcome(new PrimaryKey(hashKeyName, hashKeyValue), - projectionExpression, nameMap); - } - - @Override - public GetItemOutcome getItemOutcome(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String projectionExpression, Map nameMap) { - return getItemOutcome( - new PrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue), - projectionExpression, nameMap); - } - - @Override - public Item getItem(String hashKeyName, Object hashKeyValue, - String projectionExpression, Map nameMap) { - return getItemOutcome(new PrimaryKey(hashKeyName, hashKeyValue), - projectionExpression, nameMap).getItem(); - } - - @Override - public Item getItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String projectionExpression, Map nameMap) { - return getItemOutcome( - new PrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue), - projectionExpression, nameMap).getItem(); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IndexQueryImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IndexQueryImpl.java deleted file mode 100644 index 11313cc5e192..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IndexQueryImpl.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Index; -import software.amazon.awssdk.services.dynamodb.document.ItemCollection; -import software.amazon.awssdk.services.dynamodb.document.QueryOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.QuerySpec; - -/** - * The implementation for QueryApi for an index. - */ -public class IndexQueryImpl extends QueryImpl { - private final Index index; - - public IndexQueryImpl(DynamoDbClient client, Index index) { - super(client, index.getTable()); - this.index = index; - } - - @Override - protected ItemCollection doQuery(QuerySpec spec) { - spec.setRequest(spec.getRequest().toBuilder().indexName(index.getIndexName()).build()); - return super.doQuery(spec); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IndexScanImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IndexScanImpl.java deleted file mode 100644 index 225fc1d2766c..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IndexScanImpl.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Index; -import software.amazon.awssdk.services.dynamodb.document.ItemCollection; -import software.amazon.awssdk.services.dynamodb.document.ScanOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.ScanSpec; - -/** - * The implementation for ScanApi for an index. - */ -public class IndexScanImpl extends ScanImpl { - private final Index index; - - public IndexScanImpl(DynamoDbClient client, Index index) { - super(client, index.getTable()); - this.index = index; - } - - @Override - protected ItemCollection doScan(ScanSpec spec) { - spec.setRequest(spec.getRequest().toBuilder().indexName(index.getIndexName()).build()); - return super.doScan(spec); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/InternalUtils.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/InternalUtils.java deleted file mode 100644 index 05459b48eb14..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/InternalUtils.java +++ /dev/null @@ -1,662 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static software.amazon.awssdk.utils.BinaryUtils.copyAllBytesFrom; - -import java.math.BigDecimal; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.awscore.AwsRequest; -import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.core.util.SdkAutoConstructList; -import software.amazon.awssdk.core.util.SdkAutoConstructMap; -import software.amazon.awssdk.core.util.VersionInfo; -import software.amazon.awssdk.services.dynamodb.document.AttributeUpdate; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.IncompatibleTypeException; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; - -/** - * Internal utilities. Not meant for general use. May change without notice. - */ -public final class InternalUtils { - - private InternalUtils() { - } - - /** - * Returns a non-null list of Item's given the low level - * list of item information. - */ - public static List toItemList(List> items) { - if (items == null) { - return Collections.emptyList(); - } - List result = new ArrayList(items.size()); - for (Map item : items) { - result.add(Item.fromMap(toSimpleMapValue(item))); - } - return result; - } - - /** - * Converts an Item into the low-level representation; - * or null if the input is null. - */ - public static Map toAttributeValues(Item item) { - if (item == null) { - return null; - } - // row with multiple attributes - Map result = new LinkedHashMap(); - for (Map.Entry entry : item.attributes()) { - result.put(entry.getKey(), toAttributeValue(entry.getValue())); - } - return result; - } - - /** - * Converts a map of string to simple objects into the low-level - * representation; or null if the input is null. - */ - public static Map fromSimpleMap( - Map map) { - if (map == null) { - return null; - } - // row with multiple attributes - Map result = new LinkedHashMap(); - for (Map.Entry entry : map.entrySet()) { - result.put(entry.getKey(), toAttributeValue(entry.getValue())); - } - return result; - } - - /** - * Converts a list of AttributeUpdate into the low-level - * representation; or null if the input is null. - */ - public static Map toAttributeValueUpdate( - List attributesToUpdate) { - if (attributesToUpdate == null) { - return null; - } - - Map result = new LinkedHashMap(); - - for (AttributeUpdate attribute : attributesToUpdate) { - AttributeValueUpdate.Builder attributeToUpdateBuilder = AttributeValueUpdate.builder() - .action(attribute.getAction()); - if (attribute.value() != null) { - attributeToUpdateBuilder.value(toAttributeValue(attribute.value())); - } else if (attribute.getAttributeValues() != null) { - attributeToUpdateBuilder.value(toAttributeValue(attribute - .getAttributeValues())); - } - result.put(attribute.getAttributeName(), attributeToUpdateBuilder.build()); - } - - return result; - } - - /** - * Converts a simple value into the low-level {@code } - * representation. - * - * @param value - * the given value which can be one of the followings: - *

      - *
    • String
    • - *
    • Set<String>
    • - *
    • Number (including any subtypes and primitive types)
    • - *
    • Set<Number>
    • - *
    • byte[]
    • - *
    • Set<byte[]>
    • - *
    • ByteBuffer
    • - *
    • Set<ByteBuffer>
    • - *
    • Boolean or boolean
    • - *
    • null
    • - *
    • Map<String,T>, where T can be any type on this list but must not - * induce any circular reference
    • - *
    • List<T>, where T can be any type on this list but must not induce - * any circular reference
    • - *
    - * @return a non-null low level representation of the input object value - * - * @throws UnsupportedOperationException - * if the input object type is not supported - */ - public static AttributeValue toAttributeValue(Object value) { - AttributeValue.Builder resultBuilder = AttributeValue.builder(); - if (value == null) { - return resultBuilder.nul(Boolean.TRUE).build(); - } else if (value instanceof Boolean) { - return resultBuilder.bool((Boolean) value).build(); - } else if (value instanceof String) { - return resultBuilder.s((String) value).build(); - } else if (value instanceof BigDecimal) { - BigDecimal bd = (BigDecimal) value; - return resultBuilder.n(bd.toPlainString()).build(); - } else if (value instanceof Number) { - return resultBuilder.n(value.toString()).build(); - } else if (value instanceof byte[]) { - return resultBuilder.b(SdkBytes.fromByteArray((byte[]) value)).build(); - } else if (value instanceof ByteBuffer) { - return resultBuilder.b(SdkBytes.fromByteBuffer((ByteBuffer) value)).build(); - } else if (value instanceof Set) { - // default to an empty string set if there is no element - @SuppressWarnings("unchecked") - Set set = (Set) value; - if (set.size() == 0) { - resultBuilder.ss(new ArrayList<>()); - return resultBuilder.build(); - } - Object element = set.iterator().next(); - if (element instanceof String) { - @SuppressWarnings("unchecked") - Set ss = (Set) value; - resultBuilder.ss(new ArrayList(ss)); - } else if (element instanceof Number) { - @SuppressWarnings("unchecked") - Set in = (Set) value; - List out = new ArrayList(set.size()); - for (Number n : in) { - BigDecimal bd = InternalUtils.toBigDecimal(n); - out.add(bd.toPlainString()); - } - resultBuilder.ns(out); - } else if (element instanceof byte[]) { - @SuppressWarnings("unchecked") - Set in = (Set) value; - List out = new ArrayList<>(set.size()); - for (byte[] buf : in) { - out.add(SdkBytes.fromByteArray(buf)); - } - resultBuilder.bs(out); - } else if (element instanceof ByteBuffer) { - @SuppressWarnings("unchecked") - Set in = (Set) value; - List out = new ArrayList<>(set.size()); - for (ByteBuffer buf : in) { - out.add(SdkBytes.fromByteBuffer(buf)); - } - resultBuilder.bs(out); - } else { - throw new UnsupportedOperationException("element type: " - + element.getClass()); - } - } else if (value instanceof List) { - @SuppressWarnings("unchecked") - List in = (List) value; - List out = new ArrayList(); - for (Object v : in) { - out.add(toAttributeValue(v)); - } - resultBuilder.l(out); - } else if (value instanceof Map) { - @SuppressWarnings("unchecked") - Map in = (Map) value; - Map attrs = new HashMap<>(); - for (Map.Entry e : in.entrySet()) { - attrs.put(e.getKey(), toAttributeValue(e.getValue())); - //resultBuilder.addMEntry(e.getKey(), toAttributeValue(e.getValue())); - } - resultBuilder.m(attrs); - } else { - throw new UnsupportedOperationException("value type: " - + value.getClass()); - } - return resultBuilder.build(); - } - - /** - * Converts a list of low-level AttributeValue into a list of - * simple values. Each value in the returned list can be one of the - * followings: - * - *
      - *
    • String
    • - *
    • Set<String>
    • - *
    • Number (including any subtypes and primitive types)
    • - *
    • Set<Number>
    • - *
    • byte[]
    • - *
    • Set<byte[]>
    • - *
    • ByteBuffer
    • - *
    • Set<ByteBuffer>
    • - *
    • Boolean or boolean
    • - *
    • null
    • - *
    • Map<String,T>, where T can be any type on this list but must not - * induce any circular reference
    • - *
    • List<T>, where T can be any type on this list but must not induce - * any circular reference
    • - *
    - */ - public static List toSimpleList(List attrValues) { - if (attrValues == null) { - return null; - } - List result = new ArrayList(attrValues.size()); - for (AttributeValue attrValue : attrValues) { - Object value = toSimpleValue(attrValue); - result.add(value); - } - return result; - } - - /** - * Convenient method to convert a list of low-level - * AttributeValue into a list of values of the same type T. - * Each value in the returned list can be one of the followings: - *
      - *
    • String
    • - *
    • Set<String>
    • - *
    • Number (including any subtypes and primitive types)
    • - *
    • Set<Number>
    • - *
    • byte[]
    • - *
    • Set<byte[]>
    • - *
    • ByteBuffer
    • - *
    • Set<ByteBuffer>
    • - *
    • Boolean or boolean
    • - *
    • null
    • - *
    • Map<String,T>, where T can be any type on this list but must not - * induce any circular reference
    • - *
    • List<T>, where T can be any type on this list but must not induce - * any circular reference
    • - *
    - */ - public static List toSimpleListValue(List values) { - if (values == null) { - return null; - } - - List result = new ArrayList(values.size()); - for (AttributeValue v : values) { - T t = toSimpleValue(v); - result.add(t); - } - return result; - } - - public static Map toSimpleMapValue( - Map values) { - if (values == null) { - return null; - } - - Map result = new LinkedHashMap(values.size()); - for (Map.Entry entry : values.entrySet()) { - T t = toSimpleValue(entry.getValue()); - result.put(entry.getKey(), t); - } - return result; - } - - /** - * Returns the string representation of the given value; or null if the - * value is null. For BigDecimal it will be the string - * representation without an exponent field. - */ - public static String valToString(Object val) { - if (val instanceof BigDecimal) { - BigDecimal bd = (BigDecimal) val; - return bd.toPlainString(); - } - if (val == null) { - return null; - } - if (val instanceof String - || val instanceof Boolean - || val instanceof Number) { - return val.toString(); - } - throw new IncompatibleTypeException("Cannot convert " + val.getClass() + " into a string"); - } - - /** - * Converts a low-level AttributeValue into a simple value, - * which can be one of the followings: - * - *
      - *
    • String
    • - *
    • Set<String>
    • - *
    • Number (including any subtypes and primitive types)
    • - *
    • Set<Number>
    • - *
    • byte[]
    • - *
    • Set<byte[]>
    • - *
    • ByteBuffer
    • - *
    • Set<ByteBuffer>
    • - *
    • Boolean or boolean
    • - *
    • null
    • - *
    • Map<String,T>, where T can be any type on this list but must not - * induce any circular reference
    • - *
    • List<T>, where T can be any type on this list but must not induce - * any circular reference
    • - *
    - * - * @throws IllegalArgumentException - * if an empty AttributeValue value is specified - */ - static T toSimpleValue(AttributeValue value) { - if (value == null) { - return null; - } - if (Boolean.TRUE.equals(value.nul())) { - return null; - } else if (Boolean.FALSE.equals(value.nul())) { - throw new UnsupportedOperationException("False-NULL is not supported in DynamoDB"); - } else if (value.bool() != null) { - @SuppressWarnings("unchecked") - T t = (T) value.bool(); - return t; - } else if (value.s() != null) { - @SuppressWarnings("unchecked") - T t = (T) value.s(); - return t; - } else if (value.n() != null) { - @SuppressWarnings("unchecked") - T t = (T) new BigDecimal(value.n()); - return t; - } else if (value.b() != null) { - @SuppressWarnings("unchecked") - T t = (T) value.b().asByteArray(); - return t; - } else if (value.ss() != null && !(value.ss() instanceof SdkAutoConstructList)) { - @SuppressWarnings("unchecked") - T t = (T) new LinkedHashSet(value.ss()); - return t; - } else if (value.ns() != null && !(value.ns() instanceof SdkAutoConstructList)) { - Set set = new LinkedHashSet(value.ns().size()); - for (String s : value.ns()) { - set.add(new BigDecimal(s)); - } - @SuppressWarnings("unchecked") - T t = (T) set; - return t; - } else if (value.bs() != null && !(value.bs() instanceof SdkAutoConstructList)) { - Set set = new LinkedHashSet(value.bs().size()); - for (SdkBytes bb : value.bs()) { - set.add(copyAllBytesFrom(bb.asByteBuffer())); - } - @SuppressWarnings("unchecked") - T t = (T) set; - return t; - } else if (value.l() != null && !(value.l() instanceof SdkAutoConstructList)) { - @SuppressWarnings("unchecked") - T t = (T) toSimpleList(value.l()); - return t; - } else if (value.m() != null && !(value.m() instanceof SdkAutoConstructMap)) { - @SuppressWarnings("unchecked") - T t = (T) toSimpleMapValue(value.m()); - return t; - } else { - throw new IllegalArgumentException( - "Attribute value must not be empty: " + value); - } - } - - /** - * Returns the minimum of the two input integers taking null into account. - * Returns null if both integers are null. Otherwise, a null Integer is - * treated as infinity. - */ - public static Integer minimum(Integer one, Integer two) { - if (one == null) { - return two; - } else if (two == null) { - return one; - } else if (one < two) { - return one; - } else { - return two; - } - } - - /** - * Returns the low level representation of a collection of Expected. - */ - public static Map toExpectedAttributeValueMap( - Collection expectedSet) { - if (expectedSet == null) { - return null; - } - Map expectedMap = - new LinkedHashMap(); - for (Expected expected : expectedSet) { - final String attr = expected.getAttribute(); - final Object[] values = expected.values(); - ExpectedAttributeValue.Builder eavBuilder = ExpectedAttributeValue.builder(); - if (values != null) { - if (values.length > 0) { - // convert from list of object values to list of AttributeValues - AttributeValue[] avs = InternalUtils.toAttributeValues(values); - eavBuilder.attributeValueList(avs); - } else { - throw new IllegalStateException("Bug!"); - } - } - ComparisonOperator op = expected.getComparisonOperator(); - if (op == null) { - throw new IllegalArgumentException( - "Comparison operator for attribute " + expected.getAttribute() - + " must be specified"); - } - eavBuilder.comparisonOperator(op); - expectedMap.put(attr, eavBuilder.build()); - } - if (expectedSet.size() != expectedMap.size()) { - throw new IllegalArgumentException("duplicates attribute names not allowed in input"); - } - return Collections.unmodifiableMap(expectedMap); - } - - /** - * Returns the low level representation of a collection of Filter. - */ - public static Map toAttributeConditionMap(Collection> filters) { - if (filters == null) { - return null; - } - Map conditionMap = new LinkedHashMap(); - for (Filter filter : filters) { - final String attr = filter.getAttribute(); - final Object[] values = filter.values(); - Condition.Builder conditionBuilder = Condition.builder(); - if (values != null) { - if (values.length > 0) { - // convert from list of object values to list of AttributeValues - AttributeValue[] avs = InternalUtils.toAttributeValues(values); - conditionBuilder.attributeValueList(avs); - } else { - throw new IllegalStateException("Bug!"); - } - } - ComparisonOperator op = filter.getComparisonOperator(); - if (op == null) { - throw new IllegalArgumentException( - "Comparison operator for attribute " + filter.getAttribute() - + " must be specified"); - } - conditionBuilder.comparisonOperator(op); - conditionMap.put(attr, conditionBuilder.build()); - } - if (filters.size() != conditionMap.size()) { - throw new IllegalArgumentException("duplicates attribute names not allowed in input"); - } - return Collections.unmodifiableMap(conditionMap); - } - - /** - * Converts the input array of values into an array of low level - * representation of those values. - * - * A value in the input array can be one of the followings: - * - *
      - *
    • String
    • - *
    • Set<String>
    • - *
    • Number (including any subtypes and primitive types)
    • - *
    • Set<Number>
    • - *
    • byte[]
    • - *
    • Set<byte[]>
    • - *
    • ByteBuffer
    • - *
    • Set<ByteBuffer>
    • - *
    • Boolean or boolean
    • - *
    • null
    • - *
    • Map<String,T>, where T can be any type on this list but must not - * induce any circular reference
    • - *
    • List<T>, where T can be any type on this list but must not induce - * any circular reference
    • - *
    - */ - public static AttributeValue[] toAttributeValues(Object[] values) { - AttributeValue[] attrValues = new AttributeValue[values.length]; - for (int i = 0; i < values.length; i++) { - attrValues[i] = InternalUtils.toAttributeValue(values[i]); - } - return attrValues; - } - - /** - * Converts the specified primary key into the low-level representation. - */ - public static Map toAttributeValueMap( - Collection primaryKey) { - if (primaryKey == null) { - return null; - } - Map keys = new LinkedHashMap(); - for (KeyAttribute keyAttr : primaryKey) { - keys.put(keyAttr.name(), - InternalUtils.toAttributeValue(keyAttr.value())); - } - return Collections.unmodifiableMap(keys); - } - - /** - * Converts the specified primary key into the low-level representation. - */ - public static Map toAttributeValueMap( - PrimaryKey primaryKey) { - if (primaryKey == null) { - return null; - } - return toAttributeValueMap(primaryKey.getComponents()); - } - - /** - * Converts the specified primary key into the low-level representation. - */ - public static Map toAttributeValueMap( - KeyAttribute... primaryKey) { - if (primaryKey == null) { - return null; - } - return toAttributeValueMap(Arrays.asList(primaryKey)); - } - - /** - * Converts a number into BigDecimal representation. - */ - public static BigDecimal toBigDecimal(Number n) { - if (n instanceof BigDecimal) { - return (BigDecimal) n; - } - return new BigDecimal(n.toString()); - } - - public static Set toBigDecimalSet(Number... val) { - Set set = new LinkedHashSet(val.length); - for (Number n : val) { - set.add(InternalUtils.toBigDecimal(n)); - } - return set; - } - - public static Set toBigDecimalSet(Set vals) { - Set set = new LinkedHashSet(vals.size()); - for (Number n : vals) { - set.add(InternalUtils.toBigDecimal(n)); - } - return set; - } - - /** - * Append the custom user-agent string. - */ - public static X applyUserAgent(X request) { - final AwsRequestOverrideConfiguration newCfg = request.overrideConfiguration() - .map(AwsRequestOverrideConfiguration::toBuilder) - .orElse(AwsRequestOverrideConfiguration.builder()) - .addApiName(apiName -> apiName.name("dynamodb-table-api").version(VersionInfo.SDK_VERSION)) - .build(); - - return (X) request.toBuilder() - .overrideConfiguration(newCfg) - .build(); - } - - public static void rejectNullValue(Object val) { - if (val == null) { - throw new IllegalArgumentException("Input value must not be null"); - } - } - - public static void rejectNullInput(Object input) { - if (input == null) { - throw new IllegalArgumentException("Input must not be null"); - } - } - - public static void rejectEmptyInput(Object[] input) { - if (input.length == 0) { - throw new IllegalArgumentException("At least one input must be specified"); - } - } - - public static void rejectNullOrEmptyInput(Object[] input) { - rejectNullInput(input); - rejectEmptyInput(input); - } - - public static void checkInvalidAttrName(String attrName) { - if (attrName == null || attrName.trim().length() == 0) { - throw new IllegalArgumentException("Attribute name must not be null or empty"); - } - } - - public static void checkInvalidAttribute(String attrName, Object val) { - checkInvalidAttrName(attrName); - rejectNullValue(val); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/InternalUtilsTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/InternalUtilsTest.java deleted file mode 100644 index 40917e2f6d98..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/InternalUtilsTest.java +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.toAttributeValues; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.toItemList; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.toSimpleList; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.toSimpleValue; -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.valToString; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.core.util.SdkAutoConstructList; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.utils.FluentHashSet; -import software.amazon.awssdk.services.dynamodb.document.utils.ValueMap; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; - -public class InternalUtilsTest { - - @Test - public void nullInput() { - assertTrue(toItemList(null).size() == 0); - assertNull(toAttributeValues((Item) null)); - assertNull(toSimpleList(null)); - assertNull(toSimpleValue(null)); - assertNull(valToString(null)); - } - - @Test - @Ignore // Does not pass anymore because the builder will create a duplicate of the BB - public void toAttributeValue_ByteBuffer() { - ByteBuffer bbFrom = ByteBuffer.allocate(10); - AttributeValue av = InternalUtils.toAttributeValue(bbFrom); - ByteBuffer bbTo = av.b().asByteBuffer(); - assertSame(bbFrom, bbTo); - } - - @Test - public void toAttributeValue_byteArray() { - byte[] bytesFrom = {1, 2, 3, 4}; - AttributeValue av = InternalUtils.toAttributeValue(bytesFrom); - ByteBuffer bbTo = av.b().asByteBuffer(); - assertTrue(ByteBuffer.wrap(bytesFrom).compareTo(bbTo) == 0); - } - - @Test - public void toAttributeValue_Number() { - { - AttributeValue av = InternalUtils.toAttributeValue(123); - String num = av.n(); - assertEquals("123", num); - } - { // 17 decimal places - AttributeValue av = InternalUtils.toAttributeValue(0.99999999999999999); - String num = av.n(); - assertEquals("1.0", num); - } - { // 16 decimal places - AttributeValue av = InternalUtils.toAttributeValue(0.9999999999999999); - String num = av.n(); - assertEquals("0.9999999999999999", num); - } - { - String numFrom = "0.99999999999999999999999999999999999999"; - AttributeValue av = InternalUtils.toAttributeValue( - new BigDecimal(numFrom)); - String numTo = av.n(); - assertEquals(numFrom, numTo); - } - } - - @Test - public void toAttributeValue_emptySet() { - AttributeValue av = InternalUtils.toAttributeValue(new HashSet()); - List ss = av.ss(); - assertTrue(ss.size() == 0); - assertTrue(av.ns() instanceof SdkAutoConstructList); - } - - @Test - public void toAttributeValue_NumberSet() { - Set nsFrom = new FluentHashSet() - .with(123) - .with(123.45) - .with(Integer.valueOf(678)) - .with(new BigInteger("1234567890123456789012345678901234567890")) - .with(new BigDecimal("0.99999999999999999999999999999999999999")); - AttributeValue av = InternalUtils.toAttributeValue(nsFrom); - assertTrue(av.ss() instanceof SdkAutoConstructList); - List ns = av.ns(); - assertTrue(ns.size() == 5); - assertTrue(ns.contains("123")); - assertTrue(ns.contains("123.45")); - assertTrue(ns.contains("678")); - assertTrue(ns.contains("1234567890123456789012345678901234567890")); - assertTrue(ns.contains("0.99999999999999999999999999999999999999")); - } - - @Test - public void toAttributeValue_ByteArraySet() { - byte[] ba1From = new byte[] {1, 2, 3}; - byte[] ba2From = new byte[] {4, 5, 6}; - Set nsFrom = new FluentHashSet() - .with(ba1From) - .with(ba2From); - AttributeValue av = InternalUtils.toAttributeValue(nsFrom); - assertTrue(av.ss() instanceof SdkAutoConstructList); - List bs = av.bs(); - assertTrue(bs.size() == 2); - boolean bool1 = false; - boolean bool2 = false; - for (SdkBytes b : bs) { - if (ByteBuffer.wrap(ba1From).compareTo(b.asByteBuffer()) == 0) { - bool1 = true; - } else if (ByteBuffer.wrap(ba2From).compareTo(b.asByteBuffer()) == 0) { - bool2 = true; - } - } - assertTrue(bool1); - assertTrue(bool2); - } - - @Test - public void toAttributeValue_ByteBufferSet() { - byte[] ba1From = new byte[] {1, 2, 3}; - byte[] ba2From = new byte[] {4, 5, 6}; - Set nsFrom = new FluentHashSet() - .with(ByteBuffer.wrap(ba1From)) - .with(ByteBuffer.wrap(ba2From)); - AttributeValue av = InternalUtils.toAttributeValue(nsFrom); - assertTrue(av.ss() instanceof SdkAutoConstructList); - List bs = av.bs(); - assertTrue(bs.size() == 2); - boolean bool1 = false; - boolean bool2 = false; - for (SdkBytes b : bs) { - if (ByteBuffer.wrap(ba1From).compareTo(b.asByteBuffer()) == 0) { - bool1 = true; - } else if (ByteBuffer.wrap(ba2From).compareTo(b.asByteBuffer()) == 0) { - bool2 = true; - } - } - assertTrue(bool1); - assertTrue(bool2); - } - - @Test - public void toAttributeValue_null() { - AttributeValue av = InternalUtils.toAttributeValue(null); - assertEquals(Boolean.TRUE, av.nul()); - } - - @Test(expected = UnsupportedOperationException.class) - public void toAttributeValue_UnsupportedOperationException() { - InternalUtils.toAttributeValue(new Object()); - } - - @Test - public void toAttributeValue_emptyMap() { - AttributeValue av = InternalUtils.toAttributeValue(new HashMap()); - Map m = av.m(); - assertTrue(m.size() == 0); - } - - @Test - public void toAttributeValue_emptyList() { - AttributeValue av = InternalUtils.toAttributeValue(new ArrayList()); - List l = av.l(); - assertTrue(l.size() == 0); - } - - @Test - public void toAttributeValue_MapOfMap() { - AttributeValue av = InternalUtils.toAttributeValue(new ValueMap() - .with("emptyMap", new ValueMap())); - Map m = av.m(); - assertTrue(m.size() == 1); - AttributeValue emptyMap = m.get("emptyMap"); - Map mInner = emptyMap.m(); - assertTrue(0 == mInner.size()); - } - - @Test - public void toSimpleListValue_empty() { - List listFrom = new ArrayList(); - List listTo = toSimpleList(listFrom); - assertTrue(listTo.size() == 0); - } - - @Test - public void toSimpleListValue_null() { - assertNull(InternalUtils.toSimpleListValue(null)); - } - - @Test - public void toSimpleListValue() { - List listFrom = new ArrayList(); - listFrom.add(AttributeValue.builder().s("test").build()); - listFrom.add(AttributeValue.builder().n("123").build()); - List listTo = InternalUtils.toSimpleListValue(listFrom); - assertTrue(listTo.size() == 2); - assertEquals("test", listTo.get(0)); - assertEquals(new BigDecimal("123"), listTo.get(1)); - } - - @Test - public void toSimpleValue_null() { - assertNull(toSimpleValue(null)); - assertNull(toSimpleValue(AttributeValue.builder().nul(Boolean.TRUE).build())); - } - - @Test(expected = IllegalArgumentException.class) - public void toSimpleValue_empty() { - toSimpleValue(AttributeValue.builder().build()); - } - - @Test(expected = UnsupportedOperationException.class) - public void toSimpleValue_FalseNull() { - toSimpleValue(AttributeValue.builder().nul(Boolean.FALSE).build()); - } - - @Test - public void toSimpleValue_NS() { - Set numset = toSimpleValue( - AttributeValue.builder().ns("123", "456").build()); - assertTrue(numset.size() == 2); - assertTrue(numset.contains(new BigDecimal("123"))); - assertTrue(numset.contains(new BigDecimal("456"))); - } - - @Test - public void toSimpleValue_emptyNS() { - Set numset = toSimpleValue( - AttributeValue.builder().ns(new ArrayList()).build()); - assertTrue(numset.size() == 0); - } - - @Test - public void toSimpleValue_M() { - Map mapFrom = new HashMap(); - mapFrom.put("fooBOOL", AttributeValue.builder().bool(Boolean.TRUE).build()); - mapFrom.put("fooString", AttributeValue.builder().s("bar").build()); - Map mapTo = toSimpleValue( - AttributeValue.builder().m(mapFrom).build()); - assertTrue(mapTo.size() == 2); - assertEquals(Boolean.TRUE, mapTo.get("fooBOOL")); - assertEquals("bar", mapTo.get("fooString")); - } - - @Test - public void toSimpleValue_emptyM() { - Map mapFrom = new HashMap(); - Map mapTo = toSimpleValue( - AttributeValue.builder().m(mapFrom).build()); - assertTrue(mapTo.size() == 0); - } - - @Test - public void toSimpleValue_ByteArray() { - byte[] bytesFrom = new byte[] {1, 2, 3}; - ByteBuffer byteBufferTo = ByteBuffer.allocate(3).put(bytesFrom); - byteBufferTo.rewind(); - byte[] bytesTo = toSimpleValue( - AttributeValue.builder().b(SdkBytes.fromByteBuffer(byteBufferTo)).build()); - assertTrue(Arrays.equals(bytesTo, bytesFrom)); - } - - @Test - public void toSimpleValue_DirectByteBuffer() { - byte[] bytesFrom = new byte[] {1, 2, 3}; - ByteBuffer byteBufferTo = ByteBuffer.allocateDirect(3).put(bytesFrom); - byteBufferTo.rewind(); - byte[] bytesTo = toSimpleValue( - AttributeValue.builder().b(SdkBytes.fromByteBuffer(byteBufferTo)).build()); - assertTrue(Arrays.equals(bytesTo, bytesFrom)); - } - - @Test(expected = IllegalArgumentException.class) - public void toExpectedAttributeValueMap_missingComparisonOperator() { - InternalUtils.toExpectedAttributeValueMap(Arrays.asList(new Expected("attrName"))); - } - - @Test - public void toExpectedAttributeValueMap() { - Map to = - InternalUtils.toExpectedAttributeValueMap(Arrays.asList( - new Expected("attr1").exists(), - new Expected("attr2").exists() - )); - assertTrue(to.size() == 2); - } - - @Test(expected = IllegalArgumentException.class) - public void toExpectedAttributeValueMap_duplicateAttributeNames() { - InternalUtils.toExpectedAttributeValueMap(Arrays.asList( - new Expected("attr1").exists(), - new Expected("attr1").ge(1) - )); - } - - @Test - public void toAttributeValueMap_nullKeyAttributeCollection() { - assertNull(InternalUtils.toAttributeValueMap((Collection) null)); - } - - @Test - public void toAttributeValueMap_nullPrimaryKey() { - assertNull(InternalUtils.toAttributeValueMap((PrimaryKey) null)); - } - - @Test - public void toAttributeValueMap_nullKeyAttributes() { - assertNull(InternalUtils.toAttributeValueMap((KeyAttribute[]) null)); - } - - @Test - public void toAttributeValueMap_KeyAttributes() { - Map map = InternalUtils.toAttributeValueMap( - new KeyAttribute("hashname", "hashvalue"), - new KeyAttribute("rangekey", 123)); - AttributeValue av = map.get("hashname"); - assertEquals("hashvalue", av.s()); - av = map.get("rangekey"); - assertEquals("123", av.n()); - } - - @Test - public void valToString_int() { - String s = valToString(123.456); - assertEquals("123.456", s); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ItemValueConformer.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ItemValueConformer.java deleted file mode 100644 index 5d76d8c32fc7..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ItemValueConformer.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static software.amazon.awssdk.utils.BinaryUtils.copyBytesFrom; - -import java.math.BigDecimal; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * Used to standardize a given Item's value into a standard - * internal representation (for purposes such as equality comparison.) - */ -public class ItemValueConformer extends ValueTransformer { - /** - * This method is assumed to be called for the purpose of a setter method - * invocation, but NOT a getter method invocation. - */ - @Override - public Object transform(Object value) { - if (value == null) { - return value; - } else if (value instanceof Boolean) { - return value; - } else if (value instanceof String) { - return value; - } else if (value instanceof Number) { - return InternalUtils.toBigDecimal((Number) value); - } else if (value instanceof byte[]) { - return value; - } else if (value instanceof ByteBuffer) { - return copyBytesFrom((ByteBuffer) value); - } else if (value instanceof Set) { - @SuppressWarnings("unchecked") - Set set = (Set) value; - if (set.size() == 0) { - return value; - } - Object element = set.iterator().next(); - if (element instanceof String) { - return value; - } else if (element instanceof BigDecimal) { - return value; - } else if (element instanceof Number) { - @SuppressWarnings("unchecked") - Set in = (Set) value; - Set out = new LinkedHashSet(set.size()); - for (Number n : in) { - out.add(InternalUtils.toBigDecimal(n)); - } - return out; - } else if (element instanceof byte[]) { - return value; - } else if (element instanceof ByteBuffer) { - @SuppressWarnings("unchecked") - Set bs = (Set) value; - Set out = new LinkedHashSet(bs.size()); - for (ByteBuffer bb : bs) { - out.add(copyBytesFrom(bb)); - } - return out; - } else { - throw new UnsupportedOperationException("element type: " - + element.getClass()); - } - } else if (value instanceof List) { - @SuppressWarnings("unchecked") - List in = (List) value; - if (in.size() == 0) { - return in; - } - List out = new ArrayList(); - for (Object v : in) { - out.add(transform(v)); - } - return out; - } else if (value instanceof Map) { - @SuppressWarnings("unchecked") - Map in = (Map) value; - if (in.size() == 0) { - return in; - } - Map out = new LinkedHashMap(in.size()); - for (Map.Entry e : in.entrySet()) { - out.put(e.getKey(), transform(e.getValue())); - } - return out; - } else { - throw new UnsupportedOperationException("value type: " - + value.getClass()); - } - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ItemValueConformerTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ItemValueConformerTest.java deleted file mode 100644 index 81bb2de283e1..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ItemValueConformerTest.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.math.BigDecimal; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.document.utils.FluentHashSet; - -public class ItemValueConformerTest { - - @Test - public void byteBuffer() { - byte[] bytes = {1, 2, 3}; - ByteBuffer bb = ByteBuffer.wrap(bytes); - byte[] bytesTo = (byte[]) new ItemValueConformer().transform(bb); - assertTrue(Arrays.equals(bytesTo, bytes)); - } - - @Test - public void emptySet() { - Set from = new HashSet(); - Set to = (Set) new ItemValueConformer().transform(from); - assertTrue(to.size() == 0); - } - - @Test - public void stringSet() { - Set from = new FluentHashSet("a", "b"); - Set to = (Set) new ItemValueConformer().transform(from); - assertTrue(to.size() == 2); - assertTrue(to.contains("a")); - assertTrue(to.contains("b")); - } - - @Test - public void bytesSet() { - byte[] bytes123 = {1, 2, 3}; - byte[] bytes456 = {4, 5, 6}; - Set from = new FluentHashSet(bytes123, bytes456); - @SuppressWarnings("unchecked") - Set to = (Set) new ItemValueConformer().transform(from); - assertTrue(to.size() == 2); - boolean a = false, b = false; - for (byte[] bytes : to) { - if (Arrays.equals(bytes123, bytes)) { - a = true; - } else if (Arrays.equals(bytes456, bytes)) { - b = true; - } - } - assertTrue(a); - assertTrue(b); - } - - @Test - public void byteBufferSet() { - byte[] bytes123 = {1, 2, 3}; - byte[] bytes456 = {4, 5, 6}; - Set from = new FluentHashSet(ByteBuffer.wrap(bytes123), ByteBuffer.wrap(bytes456)); - @SuppressWarnings("unchecked") - Set to = (Set) new ItemValueConformer().transform(from); - assertTrue(to.size() == 2); - boolean a = false, b = false; - for (byte[] bytes : to) { - if (Arrays.equals(bytes123, bytes)) { - a = true; - } else if (Arrays.equals(bytes456, bytes)) { - b = true; - } - } - assertTrue(a); - assertTrue(b); - } - - @Test - public void bigDecimalSet() { - Set from = new FluentHashSet(BigDecimal.ZERO, BigDecimal.TEN); - Set to = (Set) new ItemValueConformer().transform(from); - assertTrue(to.size() == 2); - assertTrue(to.contains(BigDecimal.ZERO)); - assertTrue(to.contains(BigDecimal.TEN)); - } - - @Test - public void bigNumberSet() { - Set from = new FluentHashSet(Integer.MAX_VALUE, Double.MAX_VALUE); - Set to = (Set) new ItemValueConformer().transform(from); - assertTrue(to.size() == 2); - - assertFalse(to.contains(Integer.MAX_VALUE)); - assertFalse(to.contains(Double.MAX_VALUE)); - - assertTrue(to.contains(new BigDecimal(String.valueOf(Integer.MAX_VALUE)))); - assertTrue(to.contains(new BigDecimal(String.valueOf(Double.MAX_VALUE)))); - } - - @Test - public void emptyMap() { - Map from = new HashMap(); - Map to = (Map) new ItemValueConformer().transform(from); - assertTrue(to.size() == 0); - } - - @Test(expected = UnsupportedOperationException.class) - public void uknownType() { - new ItemValueConformer().transform(new Object()); - } - - @Test(expected = UnsupportedOperationException.class) - public void uknownsetType() { - new ItemValueConformer().transform(new FluentHashSet(new Object())); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IteratorSupport.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IteratorSupport.java deleted file mode 100644 index b84bb37c67b1..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/IteratorSupport.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.Iterator; -import java.util.NoSuchElementException; -import software.amazon.awssdk.services.dynamodb.document.Page; - -/** - * An internal iterator implementation for {@link PageBasedCollection}. - *

    - * NOTE: this internal class is marked as public since it has been incorrectly - * exposed in the public method {@link PageBasedCollection#iterator()}, and it - * will be changed to be package private in the next major version. - * - * @param - * resource type - * @param - * low level result type - */ -public class IteratorSupport implements Iterator { - /** - * Used to iterate through the resource pages, dynamically making network - * calls as needed. - */ - final PageIterator resourcePageIterator; - /** - * Used to iterate through a list of resources already retrieved. - */ - private Iterator localResourceIterator; - private T resource; - - IteratorSupport(PageIterator resourcePageIterator) { - this.resourcePageIterator = resourcePageIterator; - } - - @Override - public boolean hasNext() { - if (resource != null) { - return true; - } - resource = nextResource(); - return (resource != null); - } - - @Override - public T next() { - T rval = resource; - if (rval == null) { - rval = nextResource(); - if (rval == null) { - throw new NoSuchElementException("No more elements"); - } - } else { - resource = null; - } - return rval; - } - - @Override - public void remove() { - throw new UnsupportedOperationException( - "Collection is read-only"); - } - - private T nextResource() { - while (true) { - if (localResourceIterator != null && localResourceIterator.hasNext()) { - return localResourceIterator.next(); - } - if (!resourcePageIterator.hasNext()) { - return null; - } - Page resourcePage = resourcePageIterator.next(); - localResourceIterator = resourcePage.iterator(); - } - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesCollection.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesCollection.java deleted file mode 100644 index 6f91c0312ac1..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesCollection.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Page; -import software.amazon.awssdk.services.dynamodb.document.Table; -import software.amazon.awssdk.services.dynamodb.document.TableCollection; -import software.amazon.awssdk.services.dynamodb.document.spec.ListTablesSpec; -import software.amazon.awssdk.services.dynamodb.model.ListTablesRequest; -import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse; - -class ListTablesCollection extends TableCollection { - - private final DynamoDbClient client; - private final ListTablesSpec spec; - private final String startKey; - - ListTablesCollection(DynamoDbClient client, ListTablesSpec spec) { - this.client = client; - this.spec = spec; - this.startKey = spec.getExclusiveStartTableName(); - } - - @Override - public Page firstPage() { - ListTablesRequest request = spec.getRequest() - .toBuilder() - .exclusiveStartTableName(startKey) - .limit(InternalUtils.minimum( - spec.maxResultSize(), - spec.maxPageSize())) - .build(); - spec.setRequest(request); - ListTablesResponse result = client.listTables(request); - setLastLowLevelResult(result); - return new ListTablesPage(client, spec, request, 0, result); - } - - @Override - public Integer getMaxResultSize() { - return spec.maxResultSize(); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesImpl.java deleted file mode 100644 index 9c488233e6e3..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesImpl.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.TableCollection; -import software.amazon.awssdk.services.dynamodb.document.api.ListTablesApi; -import software.amazon.awssdk.services.dynamodb.document.spec.ListTablesSpec; -import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse; - -/** - * The implementation for ListTablesApi. - */ -public class ListTablesImpl implements ListTablesApi { - private final DynamoDbClient client; - - public ListTablesImpl(DynamoDbClient client) { - this.client = client; - } - - @Override - public TableCollection listTables(ListTablesSpec spec) { - return doList(spec); - } - - @Override - public TableCollection listTables() { - return doList(new ListTablesSpec()); - } - - @Override - public TableCollection listTables(String exclusiveStartTableName) { - return doList(new ListTablesSpec() - .withExclusiveStartTableName(exclusiveStartTableName)); - } - - @Override - public TableCollection listTables(String exclusiveStartTableName, - int maxResultSize) { - return doList(new ListTablesSpec() - .withExclusiveStartTableName(exclusiveStartTableName) - .withMaxResultSize(maxResultSize)); - } - - @Override - public TableCollection listTables(int maxResultSize) { - return doList(new ListTablesSpec() - .withMaxResultSize(maxResultSize)); - } - - private TableCollection doList(ListTablesSpec spec) { - return new ListTablesCollection(client, spec); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesPage.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesPage.java deleted file mode 100644 index 0e08ecd951cf..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ListTablesPage.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.NoSuchElementException; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Page; -import software.amazon.awssdk.services.dynamodb.document.Table; -import software.amazon.awssdk.services.dynamodb.document.spec.ListTablesSpec; -import software.amazon.awssdk.services.dynamodb.model.ListTablesRequest; -import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse; - -class ListTablesPage extends Page { - private final DynamoDbClient client; - private final ListTablesSpec spec; - private ListTablesRequest request; - private final int index; - private final String lastEvaluatedKey; - - ListTablesPage( - DynamoDbClient client, - ListTablesSpec spec, - ListTablesRequest request, - int index, - ListTablesResponse result) { - super(Collections.unmodifiableList( - toTableList(client, result.tableNames())), - result); - this.client = client; - this.spec = spec; - this.request = request; - this.index = index; - Integer max = spec.maxResultSize(); - if (max != null && (index + result.tableNames().size()) > max) { - this.lastEvaluatedKey = null; - } else { - this.lastEvaluatedKey = result.lastEvaluatedTableName(); - } - } - - private static List toTableList(DynamoDbClient client, List tableNames) { - if (tableNames == null) { - return null; - } - List
    result = new ArrayList
    (tableNames.size()); - for (String tableName : tableNames) { - result.add(new Table(client, tableName)); - } - return result; - } - - @Override - public boolean hasNextPage() { - if (lastEvaluatedKey == null) { - return false; - } - Integer max = spec.maxResultSize(); - if (max == null) { - return true; - } - return nextRequestLimit(max.intValue()) > 0; - } - - private int nextRequestLimit(int max) { - int nextIndex = index + this.size(); - return InternalUtils.minimum( - max - nextIndex, - spec.maxPageSize()); - } - - @Override - public Page nextPage() { - if (lastEvaluatedKey == null) { - throw new NoSuchElementException("No more pages"); - } - final Integer max = spec.maxResultSize(); - if (max != null) { - int nextLimit = nextRequestLimit(max.intValue()); - if (nextLimit == 0) { - throw new NoSuchElementException("No more pages"); - } - request = request.toBuilder().limit(nextLimit).build(); - } - request = request.toBuilder().exclusiveStartTableName(lastEvaluatedKey).build(); - ListTablesResponse result = client.listTables(request); - final int nextIndex = index + this.size(); - return new ListTablesPage(client, spec, request, nextIndex, result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageBasedCollection.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageBasedCollection.java deleted file mode 100644 index 96c2380056b8..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageBasedCollection.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import software.amazon.awssdk.services.dynamodb.document.LowLevelResultListener; -import software.amazon.awssdk.services.dynamodb.document.Page; - -/** - * Abstract base class for all page-based collections. - * - * @param resource type - * @param low level outcome/result type - */ -public abstract class PageBasedCollection implements Iterable { - private volatile R lastLowLevelResult; - private volatile LowLevelResultListener listener = LowLevelResultListener.none(); - - @Override - public IteratorSupport iterator() { - PageIterable pageIterable = pages(); - final PageIterator pageIterator = pageIterable.iterator(); - return new IteratorSupport(pageIterator); - } - - public PageIterable pages() { - return new PageIterable(this); - } - - public abstract Page firstPage(); - - /** - * Returns the maximum number of resources to be retrieved in this - * collection; or null if there is no limit. - */ - public abstract Integer getMaxResultSize(); - - /** - * Returns the low-level result last retrieved (for the current page) from - * the server side; or null if there has yet no calls to the server. - */ - public R getLastLowLevelResult() { - return lastLowLevelResult; - } - - /** - * Internal method used by the implementation layer for setting - * the low level result received from the server side. - */ - protected void setLastLowLevelResult(R lowLevelResult) { - this.lastLowLevelResult = lowLevelResult; - // deliver the event of receiving a low level result from the server side - listener.onLowLevelResult(lowLevelResult); - } - - /** - * Used to register a listener for the event of receiving a low-level result - * from the server side. - * - * @param listener - * listener to be registered. If null, a "none" listener will be - * set. - * @return the previously registered listener. The return value is never - * null. - */ - public LowLevelResultListener registerLowLevelResultListener(LowLevelResultListener listener) { - LowLevelResultListener prev = this.listener; - if (listener == null) { - this.listener = LowLevelResultListener.none(); - } else { - this.listener = listener; - } - return prev; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageIterable.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageIterable.java deleted file mode 100644 index 82255f6fcb43..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageIterable.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import software.amazon.awssdk.services.dynamodb.document.Page; - - -/** - * @param resource type - * @param low level result type - */ -public class PageIterable implements Iterable> { - private final PageBasedCollection col; - - PageIterable(PageBasedCollection col) { - this.col = col; - } - - @Override - public PageIterator iterator() { - return new PageIterator(col); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageIterator.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageIterator.java deleted file mode 100644 index 90815c52f4ba..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PageIterator.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.Iterator; -import software.amazon.awssdk.services.dynamodb.document.Page; - -/** - * @param resource type - * @param low level result type - */ -class PageIterator implements Iterator> { - - private final PageBasedCollection col; - private Page page; - - PageIterator(PageBasedCollection col) { - this.col = col; - } - - @Override - public boolean hasNext() { - Integer max = col.getMaxResultSize(); - if (max != null && max.intValue() <= 0) { - return false; - } - return page == null || page.hasNextPage(); - } - - @Override - public Page next() { - if (page == null) { - page = col.firstPage(); - } else { - page = page.nextPage(); - col.setLastLowLevelResult(page.lowLevelResult()); - } - return page; - } - - @Override - public void remove() { - throw new UnsupportedOperationException("Collection is read-only"); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PutItemImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PutItemImpl.java deleted file mode 100644 index 52c0e2a37905..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/PutItemImpl.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.PutItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.Table; -import software.amazon.awssdk.services.dynamodb.document.api.PutItemApi; -import software.amazon.awssdk.services.dynamodb.document.spec.PutItemSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.model.PutItemResponse; - -/** - * The implementation for PutItemApi. - */ -public class PutItemImpl extends AbstractImpl implements PutItemApi { - public PutItemImpl(DynamoDbClient client, Table table) { - super(client, table); - } - - @Override - public PutItemOutcome putItem(Item item) { - return doPutItem(new PutItemSpec().withItem(item)); - } - - @Override - public PutItemOutcome putItem(Item item, Expected... expected) { - return doPutItem(new PutItemSpec() - .withItem(item) - .withExpected(expected)); - } - - @Override - public PutItemOutcome putItem(Item item, String conditionExpression, - Map nameMap, Map valueMap) { - return doPutItem(new PutItemSpec() - .withItem(item) - .withConditionExpression(conditionExpression) - .withNameMap(nameMap) - .valueMap(valueMap)); - } - - @Override - public PutItemOutcome putItem(PutItemSpec spec) { - return doPutItem(spec); - } - - private PutItemOutcome doPutItem(PutItemSpec spec) { - // set the table name - String tableName = getTable().getTableName(); - PutItemRequest.Builder requestBuilder = spec.getRequest().toBuilder().tableName(tableName); - // set up the item - Item item = spec.getItem(); - final Map attributes = InternalUtils.toAttributeValues(item); - // set up the expected attribute map, if any - final Map expectedMap = - InternalUtils.toExpectedAttributeValueMap(spec.getExpected()); - // set up the value map, if any (when expression API is used) - final Map attrValMap = - InternalUtils.fromSimpleMap(spec.valueMap()); - // set up the request - requestBuilder.item(attributes) - .expected(expectedMap) - .expressionAttributeNames(spec.nameMap()) - .expressionAttributeValues(attrValMap) - ; - PutItemResponse result = getClient().putItem(requestBuilder.build()); - return new PutItemOutcome(result); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryCollection.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryCollection.java deleted file mode 100644 index 0b00f0479ccb..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryCollection.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.LinkedHashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.ItemCollection; -import software.amazon.awssdk.services.dynamodb.document.Page; -import software.amazon.awssdk.services.dynamodb.document.QueryOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.QuerySpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.QueryRequest; -import software.amazon.awssdk.services.dynamodb.model.QueryResponse; - -class QueryCollection extends ItemCollection { - private final DynamoDbClient client; - private final QuerySpec spec; - private final Map startKey; - - QueryCollection(DynamoDbClient client, QuerySpec spec) { - this.client = client; - this.spec = spec; - Map startKey = - spec.getRequest().exclusiveStartKey(); - this.startKey = startKey == null - ? null - : new LinkedHashMap(startKey); - } - - @Override - public Page firstPage() { - QueryRequest request = spec.getRequest().toBuilder() - .exclusiveStartKey(startKey) - .limit(InternalUtils.minimum( - spec.maxResultSize(), - spec.maxPageSize())) - .build(); - spec.setRequest(request); - QueryResponse result = client.query(request); - QueryOutcome outcome = new QueryOutcome(result); - setLastLowLevelResult(outcome); - return new QueryPage(client, spec, request, 0, outcome); - } - - @Override - public Integer getMaxResultSize() { - return spec.maxResultSize(); - } - - protected void setLastLowLevelResult(QueryOutcome lowLevelResult) { - super.setLastLowLevelResult(lowLevelResult); - QueryResponse result = lowLevelResult.getQueryResponse(); - accumulateStats(result.consumedCapacity(), result.count(), - result.scannedCount()); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryCollectionTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryCollectionTest.java deleted file mode 100644 index 7d7bfff6399d..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryCollectionTest.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.util.HashMap; -import java.util.Map; -import java.util.Random; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.document.QueryOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.QuerySpec; -import software.amazon.awssdk.services.dynamodb.model.Capacity; -import software.amazon.awssdk.services.dynamodb.model.ConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.QueryResponse; - -public class QueryCollectionTest { - private static final Random rand = new Random(); - - @Test - public void testEmptyResult() { - QueryCollection col = new QueryCollection(null, new QuerySpec()); - col.setLastLowLevelResult(new QueryOutcome(QueryResponse.builder().build())); - assertTrue(0 == col.getTotalCount()); - assertTrue(0 == col.getTotalScannedCount()); - assertNull(col.getTotalConsumedCapacity()); - } - - @Test - public void setLastLowLevelResult() { - QueryCollection col = new QueryCollection(null, new QuerySpec()); - QueryResponse result = QueryResponse.builder() - .count(rand.nextInt()) - .scannedCount(rand.nextInt()).build(); - - Map gsi = new HashMap(); - gsi.put("gsi1", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - gsi.put("gsi2", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - - Map lsi = new HashMap(); - lsi.put("lsi1", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - lsi.put("lsi2", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - - ConsumedCapacity consumedCapacity = ConsumedCapacity.builder() - .capacityUnits(rand.nextDouble()) - .table(Capacity.builder().capacityUnits(rand.nextDouble()).build()) - .tableName("tableName") - .globalSecondaryIndexes(gsi) - .localSecondaryIndexes(lsi) - .build(); - // Once - result = result.toBuilder().consumedCapacity(consumedCapacity).build(); - col.setLastLowLevelResult(new QueryOutcome(result)); - - assertTrue(result.count() == col.getTotalCount()); - assertTrue(result.scannedCount() == col.getTotalScannedCount()); - - ConsumedCapacity total = col.getTotalConsumedCapacity(); - assertNotSame(total, consumedCapacity); - assertEquals(total, consumedCapacity); - - assertNotSame(gsi, total.globalSecondaryIndexes()); - assertNotSame(lsi, total.localSecondaryIndexes()); - - // Twice - col.setLastLowLevelResult(new QueryOutcome(result)); - - assertTrue(result.count() * 2 == col.getTotalCount()); - assertTrue(result.scannedCount() * 2 == col.getTotalScannedCount()); - - total = col.getTotalConsumedCapacity(); - assertTrue(total.capacityUnits() == 2 * consumedCapacity.capacityUnits()); - - Map gsiTotal = total.globalSecondaryIndexes(); - Map lsiTotal = total.localSecondaryIndexes(); - assertTrue(2 == gsiTotal.size()); - assertTrue(2 == lsiTotal.size()); - - assertTrue(gsi.get("gsi1").capacityUnits() * 2 == gsiTotal.get("gsi1").capacityUnits()); - assertTrue(gsi.get("gsi2").capacityUnits() * 2 == gsiTotal.get("gsi2").capacityUnits()); - - assertTrue(lsi.get("lsi1").capacityUnits() * 2 == lsiTotal.get("lsi1").capacityUnits()); - assertTrue(lsi.get("lsi2").capacityUnits() * 2 == lsiTotal.get("lsi2").capacityUnits()); - - // A different one - QueryResponse result3 = QueryResponse.builder() - .count(rand.nextInt()) - .scannedCount(rand.nextInt()) - .build(); - - Map gsi3 = new HashMap(); - gsi3.put("gsi3", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - - Map lsi3 = new HashMap(); - lsi3.put("lsi3", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - - ConsumedCapacity consumedCapacity3 = ConsumedCapacity.builder() - .capacityUnits(rand.nextDouble()) - .table(Capacity.builder().capacityUnits(rand.nextDouble()).build()) - .tableName("tableName") - .globalSecondaryIndexes(gsi3) - .localSecondaryIndexes(lsi3) - .build(); - result3 = result3.toBuilder().consumedCapacity(consumedCapacity3).build(); - col.setLastLowLevelResult(new QueryOutcome(result3)); - - assertTrue(result.count() * 2 + result3.count() == col.getTotalCount()); - assertTrue(result.scannedCount() * 2 + result3.scannedCount() == col.getTotalScannedCount()); - - total = col.getTotalConsumedCapacity(); - assertTrue(total.capacityUnits() == - 2 * consumedCapacity.capacityUnits() - + consumedCapacity3.capacityUnits()); - - gsiTotal = total.globalSecondaryIndexes(); - lsiTotal = total.localSecondaryIndexes(); - assertTrue(3 == gsiTotal.size()); - assertTrue(3 == lsiTotal.size()); - - assertTrue(gsi.get("gsi1").capacityUnits() * 2 == gsiTotal.get("gsi1").capacityUnits()); - assertTrue(gsi.get("gsi2").capacityUnits() * 2 == gsiTotal.get("gsi2").capacityUnits()); - assertTrue(gsi3.get("gsi3").capacityUnits() == gsiTotal.get("gsi3").capacityUnits()); - - assertTrue(lsi.get("lsi1").capacityUnits() * 2 == lsiTotal.get("lsi1").capacityUnits()); - assertTrue(lsi.get("lsi2").capacityUnits() * 2 == lsiTotal.get("lsi2").capacityUnits()); - assertTrue(lsi3.get("lsi3").capacityUnits() == lsiTotal.get("lsi3").capacityUnits()); - - // An empty one - QueryResponse result4 = QueryResponse.builder().build(); - ConsumedCapacity consumedCapacity4 = ConsumedCapacity.builder().build(); - result4 = result4.toBuilder().consumedCapacity(consumedCapacity4).build(); - col.setLastLowLevelResult(new QueryOutcome(result4)); - - // all assertions are expected to be the same as the last set of assertions - assertTrue(result.count() * 2 + result3.count() == col.getTotalCount()); - assertTrue(result.scannedCount() * 2 + result3.scannedCount() == col.getTotalScannedCount()); - - total = col.getTotalConsumedCapacity(); - assertTrue(total.capacityUnits() == - 2 * consumedCapacity.capacityUnits() - + consumedCapacity3.capacityUnits()); - - gsiTotal = total.globalSecondaryIndexes(); - lsiTotal = total.localSecondaryIndexes(); - assertTrue(3 == gsiTotal.size()); - assertTrue(3 == lsiTotal.size()); - - assertTrue(gsi.get("gsi1").capacityUnits() * 2 == gsiTotal.get("gsi1").capacityUnits()); - assertTrue(gsi.get("gsi2").capacityUnits() * 2 == gsiTotal.get("gsi2").capacityUnits()); - assertTrue(gsi3.get("gsi3").capacityUnits() == gsiTotal.get("gsi3").capacityUnits()); - - assertTrue(lsi.get("lsi1").capacityUnits() * 2 == lsiTotal.get("lsi1").capacityUnits()); - assertTrue(lsi.get("lsi2").capacityUnits() * 2 == lsiTotal.get("lsi2").capacityUnits()); - assertTrue(lsi3.get("lsi3").capacityUnits() == lsiTotal.get("lsi3").capacityUnits()); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryImpl.java deleted file mode 100644 index 764b0fc27f32..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryImpl.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.Collection; -import java.util.LinkedHashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.ItemCollection; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.KeyCondition; -import software.amazon.awssdk.services.dynamodb.document.QueryFilter; -import software.amazon.awssdk.services.dynamodb.document.QueryOutcome; -import software.amazon.awssdk.services.dynamodb.document.RangeKeyCondition; -import software.amazon.awssdk.services.dynamodb.document.Table; -import software.amazon.awssdk.services.dynamodb.document.api.QueryApi; -import software.amazon.awssdk.services.dynamodb.document.spec.QuerySpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.QueryRequest; - -/** - * The implementation for QueryApi of a table. - */ -public class QueryImpl extends AbstractImpl implements QueryApi { - public QueryImpl(DynamoDbClient client, Table table) { - super(client, table); - } - - @Override - public ItemCollection query(String hashKeyName, Object hashKey) { - return doQuery(new QuerySpec() - .withHashKey(new KeyAttribute(hashKeyName, hashKey))); - } - - @Override - public ItemCollection query(KeyAttribute hashKey) { - return doQuery(new QuerySpec().withHashKey(hashKey)); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition) { - return doQuery(new QuerySpec().withHashKey(hashKey) - .withRangeKeyCondition(rangeKeyCondition)); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, QueryFilter... queryFilters) { - return doQuery(new QuerySpec().withHashKey(hashKey) - .withRangeKeyCondition(rangeKeyCondition) - .withQueryFilters(queryFilters)); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, String filterExpression, - Map nameMap, Map valueMap) { - return doQuery(new QuerySpec().withHashKey(hashKey) - .withRangeKeyCondition(rangeKeyCondition) - .withFilterExpression(filterExpression) - .withNameMap(nameMap) - .valueMap(valueMap)); - } - - @Override - public ItemCollection query(KeyAttribute hashKey, - RangeKeyCondition rangeKeyCondition, String filterExpression, - String projectionExpression, Map nameMap, - Map valueMap) { - return doQuery(new QuerySpec().withHashKey(hashKey) - .withRangeKeyCondition(rangeKeyCondition) - .withFilterExpression(filterExpression) - .withProjectionExpression(projectionExpression) - .withNameMap(nameMap) - .valueMap(valueMap)); - } - - @Override - public ItemCollection query(QuerySpec spec) { - return doQuery(spec); - } - - protected ItemCollection doQuery(QuerySpec spec) { - // set the table name - String tableName = getTable().getTableName(); - QueryRequest.Builder requestBuilder = spec.getRequest().toBuilder().tableName(tableName); - - Map conditions = new LinkedHashMap<>(); - - if (spec.getRequest().keyConditions() != null) { - conditions.putAll(spec.getRequest().keyConditions()); - } - - // hash key - final KeyAttribute hashKey = spec.getHashKey(); - if (hashKey != null) { - conditions.put(hashKey.name(), - Condition.builder() - .comparisonOperator(ComparisonOperator.EQ) - .attributeValueList(InternalUtils.toAttributeValue(hashKey.value())).build()); - } - // range key condition - RangeKeyCondition rangeKeyCond = spec.getRangeKeyCondition(); - if (rangeKeyCond != null) { - KeyCondition keyCond = rangeKeyCond.getKeyCondition(); - if (keyCond == null) { - throw new IllegalArgumentException("key condition not specified in range key condition"); - } - Object[] values = rangeKeyCond.values(); - if (values == null) { - throw new IllegalArgumentException("key condition values not specified in range key condition"); - } - conditions.put(rangeKeyCond.getAttrName(), - Condition.builder() - .comparisonOperator(keyCond.toComparisonOperator()) - .attributeValueList(InternalUtils.toAttributeValues(values)).build()); - } - - requestBuilder.keyConditions(conditions); - - // query filters; - Collection filters = spec.getQueryFilters(); - if (filters != null) { - requestBuilder.queryFilter(InternalUtils.toAttributeConditionMap(filters)); - } - - // set up the start key, if any - Collection startKey = spec.getExclusiveStartKey(); - if (startKey != null) { - requestBuilder.exclusiveStartKey(InternalUtils.toAttributeValueMap(startKey)); - } - - // set up the value map, if any (when expression API is used) - final Map attrValMap = InternalUtils.fromSimpleMap(spec.valueMap()); - // set up expressions, if any - requestBuilder.expressionAttributeNames(spec.nameMap()) - .expressionAttributeValues(attrValMap); - - spec.setRequest(requestBuilder.build()); - return new QueryCollection(getClient(), spec); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition) { - return query(new KeyAttribute(hashKeyName, hashKeyValue), rangeKeyCondition); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - QueryFilter... queryFilters) { - return query(new KeyAttribute(hashKeyName, hashKeyValue), - rangeKeyCondition, queryFilters); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - String filterExpression, Map nameMap, - Map valueMap) { - return query(new KeyAttribute(hashKeyName, hashKeyValue), - rangeKeyCondition, filterExpression, nameMap, valueMap); - } - - @Override - public ItemCollection query(String hashKeyName, - Object hashKeyValue, RangeKeyCondition rangeKeyCondition, - String filterExpression, String projectionExpression, - Map nameMap, Map valueMap) { - return query(new KeyAttribute(hashKeyName, hashKeyValue), - rangeKeyCondition, filterExpression, projectionExpression, - nameMap, valueMap); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryPage.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryPage.java deleted file mode 100644 index d27e56d89587..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/QueryPage.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.toItemList; - -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.Page; -import software.amazon.awssdk.services.dynamodb.document.QueryOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.QuerySpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.QueryRequest; -import software.amazon.awssdk.services.dynamodb.model.QueryResponse; - -class QueryPage extends Page { - private final DynamoDbClient client; - private final QuerySpec spec; - private QueryRequest request; - private final int index; - private final Map lastEvaluatedKey; - - QueryPage( - DynamoDbClient client, - QuerySpec spec, - QueryRequest request, - int index, - QueryOutcome outcome) { - super(Collections.unmodifiableList( - toItemList(outcome.getQueryResponse().items())), - outcome); - this.client = client; - this.spec = spec; - this.request = request; - this.index = index; - - final Integer max = spec.maxResultSize(); - final QueryResponse result = outcome.getQueryResponse(); - final List ilist = result.items(); - final int size = ilist == null ? 0 : ilist.size(); - if (max != null && (index + size) > max) { - this.lastEvaluatedKey = null; - } else { - this.lastEvaluatedKey = result.lastEvaluatedKey(); - } - } - - @Override - public boolean hasNextPage() { - if (lastEvaluatedKey == null) { - return false; - } - Integer max = spec.maxResultSize(); - if (max == null) { - return true; - } - return nextRequestLimit(max.intValue()) > 0; - } - - private int nextRequestLimit(int max) { - int nextIndex = index + this.size(); - return InternalUtils.minimum( - max - nextIndex, - spec.maxPageSize()); - } - - @Override - public Page nextPage() { - if (lastEvaluatedKey == null) { - throw new NoSuchElementException("No more pages"); - } - final Integer max = spec.maxResultSize(); - if (max != null) { - int nextLimit = nextRequestLimit(max.intValue()); - if (nextLimit == 0) { - throw new NoSuchElementException("No more pages"); - } - request = request.toBuilder().limit(nextLimit).build(); - } - request = request.toBuilder().exclusiveStartKey(lastEvaluatedKey).build(); - QueryResponse result = client.query(request); - final int nextIndex = index + this.size(); - return new QueryPage(client, spec, request, nextIndex, - new QueryOutcome(result)); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanCollection.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanCollection.java deleted file mode 100644 index 3eba543f89e8..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanCollection.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.LinkedHashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.ItemCollection; -import software.amazon.awssdk.services.dynamodb.document.Page; -import software.amazon.awssdk.services.dynamodb.document.ScanOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.ScanSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; - -class ScanCollection extends ItemCollection { - - private final DynamoDbClient client; - private final ScanSpec spec; - private final Map startKey; - - ScanCollection(DynamoDbClient client, ScanSpec spec) { - this.client = client; - this.spec = spec; - Map startKey = spec.getRequest() - .exclusiveStartKey(); - this.startKey = startKey == null ? null : new LinkedHashMap(startKey); - } - - @Override - public Page firstPage() { - ScanRequest request = spec.getRequest(); - request = request.toBuilder() - .exclusiveStartKey(startKey) - .limit(InternalUtils.minimum( - spec.maxResultSize(), - spec.maxPageSize())) - .build(); - - spec.setRequest(request); - - ScanResponse result = client.scan(request); - ScanOutcome outcome = new ScanOutcome(result); - setLastLowLevelResult(outcome); - return new ScanPage(client, spec, request, 0, outcome); - } - - @Override - public Integer getMaxResultSize() { - return spec.maxResultSize(); - } - - protected void setLastLowLevelResult(ScanOutcome lowLevelResult) { - super.setLastLowLevelResult(lowLevelResult); - ScanResponse result = lowLevelResult.scanResult(); - accumulateStats(result.consumedCapacity(), result.count(), - result.scannedCount()); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanCollectionTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanCollectionTest.java deleted file mode 100644 index 6c0e87fad17e..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanCollectionTest.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.util.HashMap; -import java.util.Map; -import java.util.Random; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.document.ScanOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.ScanSpec; -import software.amazon.awssdk.services.dynamodb.model.Capacity; -import software.amazon.awssdk.services.dynamodb.model.ConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; - -public class ScanCollectionTest { - private static final Random rand = new Random(); - - @Test - public void testEmptyResult() { - ScanCollection col = new ScanCollection(null, new ScanSpec()); - col.setLastLowLevelResult(new ScanOutcome(ScanResponse.builder().build())); - assertTrue(0 == col.getTotalCount()); - assertTrue(0 == col.getTotalScannedCount()); - assertNull(col.getTotalConsumedCapacity()); - } - - @Test - public void setLastLowLevelResult() { - ScanCollection col = new ScanCollection(null, new ScanSpec()); - ScanResponse result = ScanResponse.builder() - .count(rand.nextInt()) - .scannedCount(rand.nextInt()).build(); - - Map gsi = new HashMap(); - gsi.put("gsi1", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - gsi.put("gsi2", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - - Map lsi = new HashMap(); - lsi.put("lsi1", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - lsi.put("lsi2", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - - ConsumedCapacity consumedCapacity = ConsumedCapacity.builder() - .capacityUnits(rand.nextDouble()) - .table(Capacity.builder().capacityUnits(rand.nextDouble()).build()) - .tableName("tableName") - .globalSecondaryIndexes(gsi) - .localSecondaryIndexes(lsi).build(); - // Once - result = result.toBuilder().consumedCapacity(consumedCapacity).build(); - col.setLastLowLevelResult(new ScanOutcome(result)); - - assertTrue(result.count() == col.getTotalCount()); - assertTrue(result.scannedCount() == col.getTotalScannedCount()); - - ConsumedCapacity total = col.getTotalConsumedCapacity(); - assertNotSame(total, consumedCapacity); - assertEquals(total, consumedCapacity); - - assertNotSame(gsi, total.globalSecondaryIndexes()); - assertNotSame(lsi, total.localSecondaryIndexes()); - - // Twice - col.setLastLowLevelResult(new ScanOutcome(result)); - - assertTrue(result.count() * 2 == col.getTotalCount()); - assertTrue(result.scannedCount() * 2 == col.getTotalScannedCount()); - - total = col.getTotalConsumedCapacity(); - assertTrue(total.capacityUnits() == 2 * consumedCapacity.capacityUnits()); - - Map gsiTotal = total.globalSecondaryIndexes(); - Map lsiTotal = total.localSecondaryIndexes(); - assertTrue(2 == gsiTotal.size()); - assertTrue(2 == lsiTotal.size()); - - assertTrue(gsi.get("gsi1").capacityUnits() * 2 == gsiTotal.get("gsi1").capacityUnits()); - assertTrue(gsi.get("gsi2").capacityUnits() * 2 == gsiTotal.get("gsi2").capacityUnits()); - - assertTrue(lsi.get("lsi1").capacityUnits() * 2 == lsiTotal.get("lsi1").capacityUnits()); - assertTrue(lsi.get("lsi2").capacityUnits() * 2 == lsiTotal.get("lsi2").capacityUnits()); - - // A different one - ScanResponse result3 = ScanResponse.builder() - .count(rand.nextInt()) - .scannedCount(rand.nextInt()) - .build(); - - Map gsi3 = new HashMap(); - gsi3.put("gsi3", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - - Map lsi3 = new HashMap(); - lsi3.put("lsi3", Capacity.builder().capacityUnits(rand.nextDouble()).build()); - - ConsumedCapacity consumedCapacity3 = ConsumedCapacity.builder() - .capacityUnits(rand.nextDouble()) - .table(Capacity.builder().capacityUnits(rand.nextDouble()).build()) - .tableName("tableName") - .globalSecondaryIndexes(gsi3) - .localSecondaryIndexes(lsi3) - .build(); - result3 = result3.toBuilder().consumedCapacity(consumedCapacity3).build(); - col.setLastLowLevelResult(new ScanOutcome(result3)); - - assertTrue(result.count() * 2 + result3.count() == col.getTotalCount()); - assertTrue(result.scannedCount() * 2 + result3.scannedCount() == col.getTotalScannedCount()); - - total = col.getTotalConsumedCapacity(); - assertTrue(total.capacityUnits() == - 2 * consumedCapacity.capacityUnits() - + consumedCapacity3.capacityUnits()); - - gsiTotal = total.globalSecondaryIndexes(); - lsiTotal = total.localSecondaryIndexes(); - assertTrue(3 == gsiTotal.size()); - assertTrue(3 == lsiTotal.size()); - - assertTrue(gsi.get("gsi1").capacityUnits() * 2 == gsiTotal.get("gsi1").capacityUnits()); - assertTrue(gsi.get("gsi2").capacityUnits() * 2 == gsiTotal.get("gsi2").capacityUnits()); - assertTrue(gsi3.get("gsi3").capacityUnits() == gsiTotal.get("gsi3").capacityUnits()); - - assertTrue(lsi.get("lsi1").capacityUnits() * 2 == lsiTotal.get("lsi1").capacityUnits()); - assertTrue(lsi.get("lsi2").capacityUnits() * 2 == lsiTotal.get("lsi2").capacityUnits()); - assertTrue(lsi3.get("lsi3").capacityUnits() == lsiTotal.get("lsi3").capacityUnits()); - - // An empty one - ScanResponse result4 = ScanResponse.builder().build(); - ConsumedCapacity consumedCapacity4 = ConsumedCapacity.builder().build(); - result4 = result4.toBuilder().consumedCapacity(consumedCapacity4).build(); - col.setLastLowLevelResult(new ScanOutcome(result4)); - - // all assertions are expected to be the same as the last set of assertions - assertTrue(result.count() * 2 + result3.count() == col.getTotalCount()); - assertTrue(result.scannedCount() * 2 + result3.scannedCount() == col.getTotalScannedCount()); - - total = col.getTotalConsumedCapacity(); - assertTrue(total.capacityUnits() == - 2 * consumedCapacity.capacityUnits() - + consumedCapacity3.capacityUnits()); - - gsiTotal = total.globalSecondaryIndexes(); - lsiTotal = total.localSecondaryIndexes(); - assertTrue(3 == gsiTotal.size()); - assertTrue(3 == lsiTotal.size()); - - assertTrue(gsi.get("gsi1").capacityUnits() * 2 == gsiTotal.get("gsi1").capacityUnits()); - assertTrue(gsi.get("gsi2").capacityUnits() * 2 == gsiTotal.get("gsi2").capacityUnits()); - assertTrue(gsi3.get("gsi3").capacityUnits() == gsiTotal.get("gsi3").capacityUnits()); - - assertTrue(lsi.get("lsi1").capacityUnits() * 2 == lsiTotal.get("lsi1").capacityUnits()); - assertTrue(lsi.get("lsi2").capacityUnits() * 2 == lsiTotal.get("lsi2").capacityUnits()); - assertTrue(lsi3.get("lsi3").capacityUnits() == lsiTotal.get("lsi3").capacityUnits()); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanImpl.java deleted file mode 100644 index 9a84ae4c83c3..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanImpl.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.Collection; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.ItemCollection; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.ScanFilter; -import software.amazon.awssdk.services.dynamodb.document.ScanOutcome; -import software.amazon.awssdk.services.dynamodb.document.Table; -import software.amazon.awssdk.services.dynamodb.document.api.ScanApi; -import software.amazon.awssdk.services.dynamodb.document.spec.ScanSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; - -/** - * The implementation for ScanApi. - */ -public class ScanImpl extends AbstractImpl implements ScanApi { - public ScanImpl(DynamoDbClient client, Table table) { - super(client, table); - } - - @Override - public ItemCollection scan(ScanFilter... scanFilters) { - return doScan(new ScanSpec() - .withScanFilters(scanFilters)); - } - - - @Override - public ItemCollection scan(String filterExpression, - Map nameMap, Map valueMap) { - return doScan(new ScanSpec() - .withFilterExpression(filterExpression) - .withNameMap(nameMap) - .valueMap(valueMap)); - } - - - @Override - public ItemCollection scan(String filterExpression, - String projectionExpression, Map nameMap, - Map valueMap) { - return doScan(new ScanSpec() - .withFilterExpression(filterExpression) - .withProjectionExpression(projectionExpression) - .withNameMap(nameMap) - .valueMap(valueMap)); - } - - @Override - public ItemCollection scan(ScanSpec spec) { - return doScan(spec); - } - - protected ItemCollection doScan(ScanSpec spec) { - // set the table name - String tableName = getTable().getTableName(); - ScanRequest.Builder requestBuilder = spec.getRequest().toBuilder().tableName(tableName); - - // set up the start key, if any - Collection startKey = spec.getExclusiveStartKey(); - if (startKey != null) { - requestBuilder.exclusiveStartKey(InternalUtils.toAttributeValueMap(startKey)); - } - - // scan filters; - Collection filters = spec.scanFilters(); - if (filters != null) { - requestBuilder.scanFilter(InternalUtils.toAttributeConditionMap(filters)); - } - - // set up the value map, if any (when expression API is used) - final Map attrValMap = InternalUtils.fromSimpleMap(spec.valueMap()); - // set up expressions, if any - requestBuilder.expressionAttributeNames(spec.nameMap()) - .expressionAttributeValues(attrValMap); - - spec.setRequest(requestBuilder.build()); - return new ScanCollection(getClient(), spec); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanPage.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanPage.java deleted file mode 100644 index 2935f80a6713..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ScanPage.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import static software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils.toItemList; - -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.document.Page; -import software.amazon.awssdk.services.dynamodb.document.ScanOutcome; -import software.amazon.awssdk.services.dynamodb.document.spec.ScanSpec; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; - -class ScanPage extends Page { - - private final DynamoDbClient client; - private final ScanSpec spec; - private ScanRequest request; - private final int index; - private final Map lastEvaluatedKey; - - ScanPage( - DynamoDbClient client, - ScanSpec spec, - ScanRequest request, - int index, - ScanOutcome outcome) { - super(Collections.unmodifiableList( - toItemList(outcome.scanResult().items())), - outcome); - this.client = client; - this.spec = spec; - this.request = request; - this.index = index; - - final Integer max = spec.maxResultSize(); - final ScanResponse result = outcome.scanResult(); - final List ilist = result.items(); - final int size = ilist == null ? 0 : ilist.size(); - if (max != null && (index + size) > max) { - this.lastEvaluatedKey = null; - } else { - this.lastEvaluatedKey = result.lastEvaluatedKey(); - } - } - - @Override - public boolean hasNextPage() { - if (lastEvaluatedKey == null) { - return false; - } - Integer max = spec.maxResultSize(); - if (max == null) { - return true; - } - return nextRequestLimit(max.intValue()) > 0; - } - - private int nextRequestLimit(int max) { - int nextIndex = index + this.size(); - return InternalUtils.minimum( - max - nextIndex, - spec.maxPageSize()); - } - - @Override - public Page nextPage() { - if (lastEvaluatedKey == null) { - throw new NoSuchElementException("No more pages"); - } - final Integer max = spec.maxResultSize(); - if (max != null) { - int nextLimit = nextRequestLimit(max.intValue()); - if (nextLimit == 0) { - throw new NoSuchElementException("No more pages"); - } - request = request.toBuilder().limit(nextLimit).build(); - } - request = request.toBuilder().exclusiveStartKey(lastEvaluatedKey).build(); - // fire off request to the server side - ScanResponse result = client.scan(request); - final int nextIndex = index + this.size(); - return new ScanPage(client, spec, request, nextIndex, - new ScanOutcome(result)); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/UpdateItemImpl.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/UpdateItemImpl.java deleted file mode 100644 index c6ecad296a81..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/UpdateItemImpl.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -import java.util.Collection; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.document.AttributeUpdate; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.Table; -import software.amazon.awssdk.services.dynamodb.document.UpdateItemOutcome; -import software.amazon.awssdk.services.dynamodb.document.api.UpdateItemApi; -import software.amazon.awssdk.services.dynamodb.document.spec.UpdateItemSpec; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; - -/** - * The implementation for UpdateItemApi. - */ -public class UpdateItemImpl implements UpdateItemApi { - - private final Table table; - private final DynamoDbClient client; - - public UpdateItemImpl(DynamoDbClient client, Table table) { - this.client = client; - this.table = table; - } - - @Override - public UpdateItemOutcome updateItem(PrimaryKey primaryKey, - AttributeUpdate... attributeUpdates) { - return updateItem(new UpdateItemSpec() - .withPrimaryKey(primaryKey) - .withAttributeUpdate(attributeUpdates)); - } - - @Override - public UpdateItemOutcome updateItem(PrimaryKey primaryKey, - Collection expected, AttributeUpdate... attributeUpdates) { - return updateItem(new UpdateItemSpec() - .withPrimaryKey(primaryKey) - .withExpected(expected) - .withAttributeUpdate(attributeUpdates)); - } - - @Override - public UpdateItemOutcome updateItem(PrimaryKey primaryKey, - String updateExpression, Map nameMap, - Map valueMap) { - return updateItem(new UpdateItemSpec() - .withPrimaryKey(primaryKey) - .withUpdateExpression(updateExpression) - .withNameMap(nameMap) - .valueMap(valueMap)); - } - - @Override - public UpdateItemOutcome updateItem(PrimaryKey primaryKey, - String updateExpression, String conditionExpression, - Map nameMap, Map valueMap) { - - return updateItem(new UpdateItemSpec().withPrimaryKey(primaryKey) - .withUpdateExpression(updateExpression) - .withConditionExpression(conditionExpression) - .withNameMap(nameMap) - .valueMap(valueMap)); - } - - @Override - public UpdateItemOutcome updateItem(UpdateItemSpec spec) { - return doUpdateItem(spec); - } - - private UpdateItemOutcome doUpdateItem(UpdateItemSpec spec) { - final UpdateItemRequest.Builder requestBuilder = spec.getRequest().toBuilder(); - requestBuilder.key(InternalUtils.toAttributeValueMap(spec.getKeyComponents())); - requestBuilder.tableName(table.getTableName()); - final Collection expected = spec.getExpected(); - final Map expectedMap = - InternalUtils.toExpectedAttributeValueMap(expected); - requestBuilder.expected(expectedMap); - requestBuilder.attributeUpdates( - InternalUtils.toAttributeValueUpdate(spec.getAttributeUpdate())); - requestBuilder.expressionAttributeNames(spec.nameMap()); - requestBuilder.expressionAttributeValues( - InternalUtils.fromSimpleMap(spec.valueMap())); - return new UpdateItemOutcome(client.updateItem(requestBuilder.build())); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, AttributeUpdate... attributeUpdates) { - return updateItem(new PrimaryKey(hashKeyName, hashKeyValue), - attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, String rangeKeyName, Object rangeKeyValue, - AttributeUpdate... attributeUpdates) { - return updateItem(new PrimaryKey(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue), attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, Collection expected, - AttributeUpdate... attributeUpdates) { - return updateItem(new PrimaryKey(hashKeyName, hashKeyValue), - expected, - attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem( - String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - Collection expected, - AttributeUpdate... attributeUpdates) { - return updateItem(new PrimaryKey(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue), - expected, - attributeUpdates); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, String updateExpression, - Map nameMap, Map valueMap) { - return updateItem(new PrimaryKey(hashKeyName, hashKeyValue), - updateExpression, nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String updateExpression, Map nameMap, - Map valueMap) { - return updateItem(new PrimaryKey(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue), - updateExpression, nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, - Object hashKeyValue, String updateExpression, - String conditionExpression, Map nameMap, - Map valueMap) { - return updateItem(new PrimaryKey(hashKeyName, hashKeyValue), - updateExpression, conditionExpression, nameMap, valueMap); - } - - @Override - public UpdateItemOutcome updateItem(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue, - String updateExpression, String conditionExpression, - Map nameMap, Map valueMap) { - return updateItem(new PrimaryKey(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue), - updateExpression, conditionExpression, nameMap, valueMap); - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ValueTransformer.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ValueTransformer.java deleted file mode 100644 index 3cd5f1502a8e..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/internal/ValueTransformer.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.internal; - -/** - * Internal value transformer SPI. - */ -abstract class ValueTransformer { - abstract Object transform(Object value); -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractCollectionSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractCollectionSpec.java deleted file mode 100644 index fabb06929542..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractCollectionSpec.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - - -import software.amazon.awssdk.awscore.AwsRequest; - -/** - * Abstract implementation class for parameter specification that involves - * collection of results. - * - * @param request type - */ -abstract class AbstractCollectionSpec - extends AbstractSpec { - - private Integer maxPageSize; - /** - * The maximum number of resources to be retrieved; or null if there is no - * limit. - */ - private Integer maxResultSize; - - - AbstractCollectionSpec(T req) { - super(req); - } - - public AbstractCollectionSpec withMaxResultSize( - Integer maxResultSize) { - this.maxResultSize = maxResultSize; - return this; - } - - public AbstractCollectionSpec withMaxResultSize( - int maxResultSize) { - this.maxResultSize = maxResultSize; - return this; - } - - public AbstractCollectionSpec withMaxPageSize(Integer maxPageSize) { - this.maxPageSize = maxPageSize; - return this; - } - - public AbstractCollectionSpec withMaxPageSize(int maxPageSize) { - this.maxPageSize = maxPageSize; - return this; - } - - /** - * The maximum number of resources to be retrieved in this query, including - * all the resources in all pages to be retrieved. - */ - public Integer maxResultSize() { - return maxResultSize; - } - - public void setMaxResultSize(Integer maxResultSize) { - this.maxResultSize = maxResultSize; - } - - public void setMaxResultSize(int maxResultSize) { - this.maxResultSize = maxResultSize; - } - - /** - * The maximum number of resources to be retrieved in a single page; used - * for pagination purposes. - */ - public Integer maxPageSize() { - return maxPageSize; - } - - public void setMaxPageSize(Integer value) { - maxPageSize = value; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractSpec.java deleted file mode 100644 index ffbd202afa6c..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractSpec.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import software.amazon.awssdk.awscore.AwsRequest; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; - -/** - * Abstract implementation base class for parameter specification. - * - * @param request type - */ -class AbstractSpec { - private T req; - - AbstractSpec(T req) { - setRequest(req); - } - - public void setRequest(T req) { - InternalUtils.applyUserAgent(req); - this.req = req; - } - - /** - * Internal method. Not meant to be called directly. May change without notice. - */ - public T getRequest() { - return req; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractSpecWithPrimaryKey.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractSpecWithPrimaryKey.java deleted file mode 100644 index 4e46f55727f8..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/AbstractSpecWithPrimaryKey.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Arrays; -import java.util.Collection; -import software.amazon.awssdk.awscore.AwsRequest; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; - -/** - * Common base class for parameter specification that involves a primary key. - */ -public class AbstractSpecWithPrimaryKey - extends AbstractSpec { - private Collection keyComponents; - - protected AbstractSpecWithPrimaryKey(T request) { - super(request); - } - - /** - * Returns the primary key components that has been specified. - */ - public final Collection getKeyComponents() { - return keyComponents; - } - - /** - * Sets the primary key with the specified key components. - */ - public AbstractSpecWithPrimaryKey withPrimaryKey(KeyAttribute... components) { - if (components == null) { - this.keyComponents = null; - } else { - this.keyComponents = Arrays.asList(components); - } - return this; - } - - /** - * Sets the primary key. - */ - public AbstractSpecWithPrimaryKey withPrimaryKey(PrimaryKey primaryKey) { - if (primaryKey == null) { - this.keyComponents = null; - } else { - this.keyComponents = primaryKey.getComponents(); - } - return this; - } - - /** - * Sets the primary key with the specified hash-only key name and value. - */ - public AbstractSpecWithPrimaryKey withPrimaryKey(String hashKeyName, Object hashKeyValue) { - if (hashKeyName == null) { - throw new IllegalArgumentException(); - } - withPrimaryKey(new PrimaryKey(hashKeyName, hashKeyValue)); - return this; - } - - /** - * Sets the primary key with the specified hash key and range key. - */ - public AbstractSpecWithPrimaryKey withPrimaryKey(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - if (hashKeyName == null) { - throw new IllegalArgumentException("Invalid hash key name"); - } - if (rangeKeyName == null) { - throw new IllegalArgumentException("Invalid range key name"); - } - if (hashKeyName.equals(rangeKeyName)) { - throw new IllegalArgumentException("Names of hash and range keys must not be the same"); - } - withPrimaryKey(new PrimaryKey(hashKeyName, hashKeyValue, - rangeKeyName, rangeKeyValue)); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/BatchGetItemSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/BatchGetItemSpec.java deleted file mode 100644 index 998df0aba0ac..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/BatchGetItemSpec.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.TableKeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; - -/** - * Full parameter specification for the BatchGetItem API. - */ -public class BatchGetItemSpec extends AbstractSpec { - private Collection tableKeyAndAttributes; - private Map unprocessedKeys; - - public BatchGetItemSpec() { - super(BatchGetItemRequest.builder().build()); - } - - public Collection getTableKeysAndAttributes() { - return tableKeyAndAttributes; - } - - public BatchGetItemSpec withTableKeyAndAttributes( - TableKeysAndAttributes... tableKeyAndAttributes) { - if (tableKeyAndAttributes == null) { - this.tableKeyAndAttributes = null; - } else { - Set names = new LinkedHashSet(); - for (TableKeysAndAttributes e : tableKeyAndAttributes) { - names.add(e.getTableName()); - } - if (names.size() != tableKeyAndAttributes.length) { - throw new IllegalArgumentException( - "table names must not duplicate in the list of TableKeysAndAttributes"); - } - this.tableKeyAndAttributes = Arrays.asList(tableKeyAndAttributes); - } - return this; - } - - - public String getReturnConsumedCapacity() { - return getRequest().returnConsumedCapacityAsString(); - } - - - public BatchGetItemSpec withReturnConsumedCapacity(ReturnConsumedCapacity capacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(capacity).build()); - return this; - } - - public Map getUnprocessedKeys() { - return unprocessedKeys; - } - - public BatchGetItemSpec withUnprocessedKeys( - Map unprocessedKeys) { - this.unprocessedKeys = Collections.unmodifiableMap( - new LinkedHashMap(unprocessedKeys)); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/BatchWriteItemSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/BatchWriteItemSpec.java deleted file mode 100644 index 5123f30b92ec..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/BatchWriteItemSpec.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.TableWriteItems; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.WriteRequest; - -/** - * Full parameter specification for the BatchWriteItem API. - */ -public class BatchWriteItemSpec extends AbstractSpec { - private Collection tableWriteItems; - private Map> unprocessedItems; - - public BatchWriteItemSpec() { - super(BatchWriteItemRequest.builder().build()); - } - - public Collection getTableWriteItems() { - return tableWriteItems; - } - - public BatchWriteItemSpec withTableWriteItems( - TableWriteItems... tableWriteItems) { - if (tableWriteItems == null) { - this.tableWriteItems = null; - } else { - Set names = new LinkedHashSet(); - for (TableWriteItems e : tableWriteItems) { - names.add(e.getTableName()); - } - if (names.size() != tableWriteItems.length) { - throw new IllegalArgumentException( - "table names must not duplicate in the list of TableWriteItems"); - } - this.tableWriteItems = Arrays.asList(tableWriteItems); - } - return this; - } - - - public String getReturnConsumedCapacity() { - return getRequest().returnConsumedCapacityAsString(); - } - - - public BatchWriteItemSpec withReturnConsumedCapacity(ReturnConsumedCapacity capacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(capacity).build()); - return this; - } - - public Map> getUnprocessedItems() { - return unprocessedItems; - } - - public BatchWriteItemSpec withUnprocessedItems( - Map> unprocessedItems) { - this.unprocessedItems = Collections.unmodifiableMap( - new LinkedHashMap>(unprocessedItems)); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/DeleteItemSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/DeleteItemSpec.java deleted file mode 100644 index 4c450da62340..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/DeleteItemSpec.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.ReturnItemCollectionMetrics; -import software.amazon.awssdk.services.dynamodb.model.ReturnValue; - -/** - * Full parameter specification for the DeleteItem API. - */ -public class DeleteItemSpec extends AbstractSpecWithPrimaryKey { - private Collection expected; - - private Map nameMap; - private Map valueMap; - - public DeleteItemSpec() { - super(DeleteItemRequest.builder().build()); - } - - @Override - public DeleteItemSpec withPrimaryKey(KeyAttribute... components) { - super.withPrimaryKey(components); - return this; - } - - @Override - public DeleteItemSpec withPrimaryKey(PrimaryKey primaryKey) { - super.withPrimaryKey(primaryKey); - return this; - } - - @Override - public DeleteItemSpec withPrimaryKey(String hashKeyName, Object hashKeyValue) { - super.withPrimaryKey(hashKeyName, hashKeyValue); - return this; - } - - @Override - public DeleteItemSpec withPrimaryKey(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - super.withPrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue); - return this; - } - - public Collection getExpected() { - return expected; - } - - public DeleteItemSpec withExpected(Expected... expected) { - if (expected == null) { - this.expected = null; - return this; - } - return withExpected(Arrays.asList(expected)); - } - - public DeleteItemSpec withExpected(Collection expected) { - if (expected == null) { - this.expected = null; - return this; - } - Set names = new LinkedHashSet(); - for (Expected e : expected) { - names.add(e.getAttribute()); - } - if (names.size() != expected.size()) { - throw new IllegalArgumentException( - "attribute names must not duplicate in the list of expected"); - } - this.expected = Collections.unmodifiableCollection(expected); - return this; - } - - public String getConditionExpression() { - return getRequest().conditionExpression(); - } - - public DeleteItemSpec withConditionExpression(String conditionExpression) { - setRequest(getRequest().toBuilder().conditionExpression(conditionExpression).build()); - return this; - } - - public Map nameMap() { - return nameMap; - } - - /** - * Applicable only when an expression has been specified. - * Used to specify the actual values for the attribute-name placeholders, - * where the value in the map can either be string for simple attribute - * name, or a JSON path expression. - */ - public DeleteItemSpec withNameMap(Map nameMap) { - if (nameMap == null) { - this.nameMap = null; - } else { - this.nameMap = Collections.unmodifiableMap( - new LinkedHashMap(nameMap)); - } - return this; - } - - public Map valueMap() { - return valueMap; - } - - /** - * Applicable only when an expression has been specified. Used to - * specify the actual values for the attribute-value placeholders. - */ - public DeleteItemSpec valueMap(Map valueMap) { - if (valueMap == null) { - this.valueMap = null; - } else { - this.valueMap = Collections.unmodifiableMap( - new LinkedHashMap(valueMap)); - } - return this; - } - - public String getConditionalOperator() { - return getRequest().conditionalOperatorAsString(); - } - - public DeleteItemSpec withConditionalOperator(ConditionalOperator conditionalOperator) { - setRequest(getRequest().toBuilder().conditionalOperator(conditionalOperator).build()); - return this; - } - - public String getReturnConsumedCapacity() { - return getRequest().returnConsumedCapacityAsString(); - } - - public DeleteItemSpec withReturnConsumedCapacity( - ReturnConsumedCapacity returnConsumedCapacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(returnConsumedCapacity).build()); - return this; - } - - public String getReturnItemCollectionMetrics() { - return getRequest().returnItemCollectionMetricsAsString(); - } - - public DeleteItemSpec withReturnItemCollectionMetrics( - ReturnItemCollectionMetrics returnItemCollectionMetrics) { - setRequest(getRequest().toBuilder().returnItemCollectionMetrics(returnItemCollectionMetrics).build()); - return this; - } - - public String getReturnValues() { - return getRequest().returnValuesAsString(); - } - - public DeleteItemSpec withReturnValues(ReturnValue returnValues) { - setRequest(getRequest().toBuilder().returnValues(returnValues).build()); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/GetItemSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/GetItemSpec.java deleted file mode 100644 index 14d621f424b0..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/GetItemSpec.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; - -/** - * Full parameter specification for the GetItem API. - */ -public class GetItemSpec extends AbstractSpecWithPrimaryKey { - private Map nameMap; - - public GetItemSpec() { - super(GetItemRequest.builder().build()); - } - - @Override - public GetItemSpec withPrimaryKey(KeyAttribute... components) { - super.withPrimaryKey(components); - return this; - } - - @Override - public GetItemSpec withPrimaryKey(PrimaryKey primaryKey) { - super.withPrimaryKey(primaryKey); - return this; - } - - @Override - public GetItemSpec withPrimaryKey(String hashKeyName, Object hashKeyValue) { - super.withPrimaryKey(hashKeyName, hashKeyValue); - return this; - } - - @Override - public GetItemSpec withPrimaryKey(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - super.withPrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue); - return this; - } - - public String getReturnConsumedCapacity() { - return getRequest().returnConsumedCapacityAsString(); - } - - public GetItemSpec withReturnConsumedCapacity(ReturnConsumedCapacity capacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(capacity).build()); - return this; - } - - public List getAttributesToGet() { - return getRequest().attributesToGet(); - } - - public GetItemSpec withAttributesToGet(String... attrNames) { - if (attrNames == null) { - setRequest(getRequest().toBuilder().attributesToGet((String[]) null).build()); - } else { - setRequest(getRequest().toBuilder().attributesToGet(Arrays.asList(attrNames)).build()); - } - return this; - } - - public Boolean isConsistentRead() { - return getRequest().consistentRead(); - } - - public GetItemSpec withConsistentRead(boolean consistentRead) { - setRequest(getRequest().toBuilder().consistentRead(consistentRead).build()); - return this; - } - - public String getProjectionExpression() { - return getRequest().projectionExpression(); - } - - /** - * When a projection expression is specified, the corresponding name-map can - * optionally be specified via { {@link #withNameMap(Map)}. (Note - * attributes-to-get must not be specified if a projection expression has - * been specified.) - */ - public GetItemSpec withProjectionExpression(String projectionExpression) { - setRequest(getRequest().toBuilder().projectionExpression(projectionExpression).build()); - return this; - } - - public Map nameMap() { - return nameMap; - } - - /** - * Applicable only when an expression has been specified. - * Used to specify the actual values for the attribute-name placeholders, - * where the value in the map can either be string for simple attribute - * name, or a JSON path expression. - */ - public GetItemSpec withNameMap(Map nameMap) { - if (nameMap == null) { - this.nameMap = null; - } else { - this.nameMap = Collections.unmodifiableMap( - new LinkedHashMap(nameMap)); - } - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/ListTablesSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/ListTablesSpec.java deleted file mode 100644 index d14db7007fea..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/ListTablesSpec.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import software.amazon.awssdk.services.dynamodb.model.ListTablesRequest; - -/** - * Full parameter specification for the ListTables API. - */ -public class ListTablesSpec extends AbstractCollectionSpec { - - public ListTablesSpec() { - super(ListTablesRequest.builder().build()); - } - - public String getExclusiveStartTableName() { - return getRequest().exclusiveStartTableName(); - } - - public ListTablesSpec withExclusiveStartTableName(String exclusiveStartTableName) { - setRequest(getRequest().toBuilder().exclusiveStartTableName(exclusiveStartTableName).build()); - return this; - } - - @Override - public ListTablesSpec withMaxResultSize(Integer maxResultSize) { - setMaxResultSize(maxResultSize); - return this; - } - - @Override - public ListTablesSpec withMaxResultSize(int maxResultSize) { - setMaxResultSize(maxResultSize); - return this; - } - - @Override - public ListTablesSpec withMaxPageSize(Integer maxPageSize) { - setMaxPageSize(maxPageSize); - return this; - } - - @Override - public ListTablesSpec withMaxPageSize(int maxPageSize) { - setMaxPageSize(maxPageSize); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/PutItemSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/PutItemSpec.java deleted file mode 100644 index 86770b9d4ee0..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/PutItemSpec.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.Item; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.ReturnItemCollectionMetrics; -import software.amazon.awssdk.services.dynamodb.model.ReturnValue; - -/** - * Full parameter specification for the PutItem API. - */ -public class PutItemSpec extends AbstractSpec { - private Item item; - private Collection expected; - private Map nameMap; - private Map valueMap; - - public PutItemSpec() { - super(PutItemRequest.builder().build()); - } - - public Item getItem() { - return item; - } - - public PutItemSpec withItem(Item item) { - this.item = item; - return this; - } - - public Collection getExpected() { - return expected; - } - - public PutItemSpec withExpected(Expected... expected) { - if (expected == null) { - this.expected = null; - return this; - } - return withExpected(Arrays.asList(expected)); - } - - public PutItemSpec withExpected(Collection expected) { - if (expected == null) { - this.expected = null; - return this; - } - Set names = new LinkedHashSet(); - for (Expected e : expected) { - names.add(e.getAttribute()); - } - if (names.size() != expected.size()) { - throw new IllegalArgumentException( - "attribute names must not duplicate in the list of expected"); - } - this.expected = Collections.unmodifiableCollection(expected); - return this; - } - - public String getConditionExpression() { - return getRequest().conditionExpression(); - } - - public PutItemSpec withConditionExpression(String conditionExpression) { - setRequest(getRequest().toBuilder().conditionExpression(conditionExpression).build()); - return this; - } - - public Map nameMap() { - return nameMap; - } - - /** - * Applicable only when an expression has been specified. - * Used to specify the actual values for the attribute-name placeholders, - * where the value in the map can either be string for simple attribute - * name, or a JSON path expression. - */ - public PutItemSpec withNameMap(Map nameMap) { - if (nameMap == null) { - this.nameMap = null; - } else { - this.nameMap = Collections.unmodifiableMap( - new LinkedHashMap(nameMap)); - } - return this; - } - - public Map valueMap() { - return valueMap; - } - - /** - * Applicable only when an expression has been specified. Used to - * specify the actual values for the attribute-value placeholders. - */ - public PutItemSpec valueMap(Map valueMap) { - if (valueMap == null) { - this.valueMap = null; - } else { - this.valueMap = Collections.unmodifiableMap( - new LinkedHashMap(valueMap)); - } - return this; - } - - public String getConditionalOperator() { - return getRequest().conditionalOperatorAsString(); - } - - public PutItemSpec withConditionalOperator( - ConditionalOperator conditionalOperator) { - setRequest(getRequest().toBuilder().conditionalOperator(conditionalOperator).build()); - return this; - } - - public String getReturnConsumedCapacity() { - return getRequest().returnConsumedCapacityAsString(); - } - - public PutItemSpec withReturnConsumedCapacity( - ReturnConsumedCapacity returnConsumedCapacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(returnConsumedCapacity).build()); - return this; - } - - public String getReturnItemCollectionMetrics() { - return getRequest().returnItemCollectionMetricsAsString(); - } - - public PutItemSpec withReturnItemCollectionMetrics( - ReturnItemCollectionMetrics returnItemCollectionMetrics) { - setRequest(getRequest().toBuilder() - .returnItemCollectionMetrics(returnItemCollectionMetrics) - .build()); - return this; - } - - public String getReturnValues() { - return getRequest().returnValuesAsString(); - } - - public PutItemSpec withReturnValues(ReturnValue returnValues) { - setRequest(getRequest().toBuilder().returnValues(returnValues).build()); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/QuerySpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/QuerySpec.java deleted file mode 100644 index 2ddea07383bc..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/QuerySpec.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.QueryFilter; -import software.amazon.awssdk.services.dynamodb.document.RangeKeyCondition; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.QueryRequest; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.Select; - -/** - * Full parameter specification for the Query API. - */ -public class QuerySpec extends AbstractCollectionSpec { - private KeyAttribute hashKey; - private RangeKeyCondition rangeKeyCondition; - private Collection queryFilters; - private Map nameMap; - private Map valueMap; - - private Collection exclusiveStartKey; - - public QuerySpec() { - super(QueryRequest.builder().build()); - } - - public KeyAttribute getHashKey() { - return hashKey; - } - - public QuerySpec withHashKey(KeyAttribute hashKey) { - this.hashKey = hashKey; - return this; - } - - public QuerySpec withHashKey(String hashKeyName, Object hashKeyValue) { - this.hashKey = new KeyAttribute(hashKeyName, hashKeyValue); - return this; - } - - public RangeKeyCondition getRangeKeyCondition() { - return rangeKeyCondition; - } - - public QuerySpec withRangeKeyCondition(RangeKeyCondition rangeKeyCondition) { - this.rangeKeyCondition = rangeKeyCondition; - return this; - } - - /** - * When a key condition expression is specified, the corresponding name-map - * and value-map can optionally be specified via {@link #withNameMap(Map)} - * and {@link #valueMap(Map)}. (Note the hash key and range key - * conditions must not be specified if a key condition expression has been - * specified.) - */ - public QuerySpec withKeyConditionExpression(String keyConditionExpression) { - setRequest(getRequest().toBuilder().keyConditionExpression(keyConditionExpression).build()); - return this; - } - - public String getKeyConditionExpression() { - return getRequest().keyConditionExpression(); - } - - public QuerySpec withAttributesToGet(String... attributes) { - setRequest(getRequest().toBuilder().attributesToGet(Arrays.asList(attributes)).build()); - return this; - } - - public List getAttributesToGet() { - return getRequest().attributesToGet(); - } - - public QuerySpec withConditionalOperator(ConditionalOperator op) { - setRequest(getRequest().toBuilder().conditionalOperator(op).build()); - return this; - } - - public String getConditionalOperator() { - return getRequest().conditionalOperatorAsString(); - } - - public QuerySpec withConsistentRead(boolean consistentRead) { - setRequest(getRequest().toBuilder().consistentRead(consistentRead).build()); - return this; - } - - public boolean isConsistentRead() { - return getRequest().consistentRead(); - } - - public QuerySpec withQueryFilters(QueryFilter... queryFilters) { - if (queryFilters == null) { - this.queryFilters = null; - } else { - Set names = new LinkedHashSet(); - for (QueryFilter e : queryFilters) { - names.add(e.getAttribute()); - } - if (names.size() != queryFilters.length) { - throw new IllegalArgumentException( - "attribute names must not duplicate in the list of query filters"); - } - this.queryFilters = Arrays.asList(queryFilters); - } - return this; - } - - public Collection getQueryFilters() { - return queryFilters; - } - - /** - * When a filter expression is specified, the corresponding name-map and - * value-map can optionally be specified via {@link #withNameMap(Map)} and - * {@link #valueMap(Map)}. (Note query filters must not be specified if - * a filter expression has been specified.) - */ - public QuerySpec withFilterExpression(String filterExpression) { - setRequest(getRequest().toBuilder().filterExpression(filterExpression).build()); - return this; - } - - public String getFilterExpression() { - return getRequest().filterExpression(); - } - - /** - * When a projection expression is specified, the corresponding name-map and - * value-map can optionally be specified via {@link #withNameMap(Map)} and - * {@link #valueMap(Map)}. (Note attributes-to-get must not be specified - * if a projection expression has been specified.) - */ - public QuerySpec withProjectionExpression(String projectionExpression) { - setRequest(getRequest().toBuilder().projectionExpression(projectionExpression).build()); - return this; - } - - public String getProjectionExpression() { - return getRequest().projectionExpression(); - } - - public Map nameMap() { - return nameMap; - } - - /** - * Applicable only when an expression has been specified. - * Used to specify the actual values for the attribute-name placeholders, - * where the value in the map can either be string for simple attribute - * name, or a JSON path expression. - */ - public QuerySpec withNameMap(Map nameMap) { - if (nameMap == null) { - this.nameMap = null; - } else { - this.nameMap = Collections.unmodifiableMap(new LinkedHashMap(nameMap)); - } - return this; - } - - public Map valueMap() { - return valueMap; - } - - /** - * Applicable only when an expression has been specified. Used to - * specify the actual values for the attribute-value placeholders. - */ - public QuerySpec valueMap(Map valueMap) { - if (valueMap == null) { - this.valueMap = null; - } else { - this.valueMap = Collections.unmodifiableMap(new LinkedHashMap(valueMap)); - } - return this; - } - - public String getReturnConsumedCapacity() { - return getRequest().returnConsumedCapacityAsString(); - } - - public QuerySpec withReturnConsumedCapacity( - ReturnConsumedCapacity returnConsumedCapacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(returnConsumedCapacity).build()); - return this; - } - - public QuerySpec withScanIndexForward(boolean scanIndexForward) { - setRequest(getRequest().toBuilder().scanIndexForward(scanIndexForward).build()); - return this; - } - - public boolean isScanIndexForward() { - return getRequest().scanIndexForward(); - } - - public QuerySpec withSelect(Select select) { - setRequest(getRequest().toBuilder().select(select).build()); - return this; - } - - public String select() { - return getRequest().selectAsString(); - } - - // Exclusive start key - - public Collection getExclusiveStartKey() { - return exclusiveStartKey; - } - - public QuerySpec withExclusiveStartKey(KeyAttribute... exclusiveStartKey) { - if (exclusiveStartKey == null) { - this.exclusiveStartKey = null; - } else { - this.exclusiveStartKey = Arrays.asList(exclusiveStartKey); - } - return this; - } - - public QuerySpec withExclusiveStartKey(PrimaryKey exclusiveStartKey) { - if (exclusiveStartKey == null) { - this.exclusiveStartKey = null; - } else { - this.exclusiveStartKey = exclusiveStartKey.getComponents(); - } - return this; - } - - public QuerySpec withExclusiveStartKey( - String hashKeyName, Object hashKeyValue) { - return withExclusiveStartKey(new KeyAttribute(hashKeyName, hashKeyValue)); - } - - public QuerySpec withExclusiveStartKey( - String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - return withExclusiveStartKey( - new KeyAttribute(hashKeyName, hashKeyValue), - new KeyAttribute(rangeKeyName, rangeKeyValue)); - } - - // Max result size - - @Override - public QuerySpec withMaxResultSize(Integer maxResultSize) { - setMaxResultSize(maxResultSize); - return this; - } - - @Override - public QuerySpec withMaxResultSize(int maxResultSize) { - setMaxResultSize(maxResultSize); - return this; - } - - @Override - public QuerySpec withMaxPageSize(Integer maxPageSize) { - setMaxPageSize(maxPageSize); - return this; - } - - @Override - public QuerySpec withMaxPageSize(int maxPageSize) { - setMaxPageSize(maxPageSize); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/ScanSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/ScanSpec.java deleted file mode 100644 index e40b43c4a4c9..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/ScanSpec.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.document.ScanFilter; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.Select; - -/** - * API for fully specifying all the parameters of a Table-centric Scan API. - */ -public class ScanSpec extends AbstractCollectionSpec { - private Collection scanFilters; - private Map nameMap; - private Map valueMap; - - private Collection exclusiveStartKey; - - public ScanSpec() { - super(ScanRequest.builder().build()); - } - - /** - * @see ScanRequest#scanFilter() - */ - public Collection scanFilters() { - return scanFilters; - } - - /** - * @see ScanRequest#withScanFilter(Map) - */ - public ScanSpec withScanFilters(ScanFilter... scanFilters) { - if (scanFilters == null) { - this.scanFilters = null; - } else { - Set names = new LinkedHashSet(); - for (ScanFilter e : scanFilters) { - names.add(e.getAttribute()); - } - if (names.size() != scanFilters.length) { - throw new IllegalArgumentException( - "attribute names must not duplicate in the list of scan filters"); - } - this.scanFilters = Arrays.asList(scanFilters); - } - return this; - } - - /** - * AND|OR that applies to all the conditions in the ScanFilters. - * - * @see ScanRequest#getConditionalOperator() - */ - public String getConditionalOperator() { - return getRequest().conditionalOperatorAsString(); - } - - /** - * @see ScanRequest#withConditionalOperator(ConditionalOperator) - */ - public ScanSpec withConditionalOperator(ConditionalOperator op) { - setRequest(getRequest().toBuilder().conditionalOperator(op).build()); - return this; - } - - /** - * @see ScanRequest#getAttributesToGet() - */ - public List getAttributesToGet() { - return getRequest().attributesToGet(); - } - - /** - * @see ScanRequest#withAttributesToGet(String...) - */ - public ScanSpec withAttributesToGet(String... attributes) { - if (attributes == null) { - setRequest(getRequest().toBuilder().attributesToGet((String []) null).build()); - } else { - setRequest(getRequest().toBuilder().attributesToGet(Arrays.asList(attributes)).build()); - } - return this; - } - - /** - * Any query filters will be ignored if a filter expression has been - * specified. When a filter expression is specified, the corresponding - * name-map and value-map can also be specified via - * {@link #withNameMap(Map)} and {@link #valueMap(Map)}. - * - * @see ScanRequest#getFilterExpression() - */ - public String getFilterExpression() { - return getRequest().filterExpression(); - } - - /** - * @see ScanRequest#withFilterExpression(String) - */ - public ScanSpec withFilterExpression(String filterExpression) { - setRequest(getRequest().toBuilder().filterExpression(filterExpression).build()); - return this; - } - - /** - * @see ScanRequest#getProjectionExpression() - */ - public String getProjectionExpression() { - return getRequest().projectionExpression(); - } - - /** - * @see ScanRequest#withProjectionExpression(String) - */ - public ScanSpec withProjectionExpression(String projectionExpression) { - setRequest(getRequest().toBuilder().projectionExpression(projectionExpression).build()); - return this; - } - - /** - * @see ScanRequest#getExpressionAttributeNames() - */ - public Map nameMap() { - return nameMap; - } - - /** - * Applicable only when an expression has been specified. - * Used to specify the actual values for the attribute-name placeholders, - * where the value in the map can either be string for simple attribute - * name, or a JSON path expression. - * - * @see ScanRequest#withExpressionAttributeNames(Map) - */ - public ScanSpec withNameMap(Map nameMap) { - if (nameMap == null) { - this.nameMap = null; - } else { - this.nameMap = Collections.unmodifiableMap(new LinkedHashMap(nameMap)); - } - return this; - } - - /** - * @see ScanRequest#getExpressionAttributeValues() - */ - public Map valueMap() { - return valueMap; - } - - /** - * Applicable only when an expression has been specified. Used to - * specify the actual values for the attribute-value placeholders. - * - * @see ScanRequest#withExpressionAttributeValues(Map) - */ - public ScanSpec valueMap(Map valueMap) { - if (valueMap == null) { - this.valueMap = null; - } else { - this.valueMap = Collections.unmodifiableMap(new LinkedHashMap(valueMap)); - } - return this; - } - - /** - * @see ScanRequest#getReturnConsumedCapacity() - */ - public String getReturnConsumedCapacity() { - return getRequest().returnConsumedCapacityAsString(); - } - - /** - * @see ScanRequest#withReturnConsumedCapacity(ReturnConsumedCapacity) - */ - public ScanSpec withReturnConsumedCapacity(ReturnConsumedCapacity capacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(capacity).build()); - return this; - } - - /** - * Specifies the attributes to be returned. - * - * @see ScanRequest#select() - */ - // ALL_ATTRIBUTES | ALL_PROJECTED_ATTRIBUTES | SPECIFIC_ATTRIBUTES | COUNT - public String select() { - return getRequest().selectAsString(); - } - - /** - * @see ScanRequest#withSelect(Select) - */ - public ScanSpec withSelect(Select select) { - setRequest(getRequest().toBuilder().select(select).build()); - return this; - } - - /** - * @see ScanRequest#segment() - */ - public Integer segment() { - return getRequest().segment(); - } - - /** - * @see ScanRequest#withSegment(Integer) - */ - public ScanSpec withSegment(Integer segment) { - setRequest(getRequest().toBuilder().segment(segment).build()); - return this; - } - - /** - * @see ScanRequest#getTotalSegments() - */ - public Integer getTotalSegments() { - return getRequest().totalSegments(); - } - - /** - * @see ScanRequest#withTotalSegments(Integer) - */ - public ScanSpec withTotalSegments(Integer totalSegments) { - setRequest(getRequest().toBuilder().totalSegments(totalSegments).build()); - return this; - } - - /** - * @see ScanRequest#isConsistentRead() - */ - public Boolean isConsistentRead() { - return getRequest().consistentRead(); - } - - /** - * @see ScanRequest#withConsistentRead(Boolean) - */ - public ScanSpec withConsistentRead(Boolean consistentRead) { - setRequest(getRequest().toBuilder().consistentRead(consistentRead).build()); - return this; - } - - // Exclusive start key - - /** - * @see ScanRequest#getExclusiveStartKey() - */ - public Collection getExclusiveStartKey() { - return exclusiveStartKey; - } - - /** - * @see ScanRequest#withExclusiveStartKey(Map) - */ - public ScanSpec withExclusiveStartKey(KeyAttribute... exclusiveStartKey) { - if (exclusiveStartKey == null) { - this.exclusiveStartKey = null; - } else { - this.exclusiveStartKey = Arrays.asList(exclusiveStartKey); - } - return this; - } - - /** - * @see ScanRequest#withExclusiveStartKey(Map) - */ - public ScanSpec withExclusiveStartKey(PrimaryKey exclusiveStartKey) { - if (exclusiveStartKey == null) { - this.exclusiveStartKey = null; - } else { - this.exclusiveStartKey = exclusiveStartKey.getComponents(); - } - return this; - } - - /** - * @see ScanRequest#withExclusiveStartKey(Map) - */ - public ScanSpec withExclusiveStartKey( - String hashKeyName, Object hashKeyValue) { - return withExclusiveStartKey(new KeyAttribute(hashKeyName, hashKeyValue)); - } - - /** - * @see ScanRequest#withExclusiveStartKey(Map) - */ - public ScanSpec withExclusiveStartKey( - String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - return withExclusiveStartKey( - new KeyAttribute(hashKeyName, hashKeyValue), - new KeyAttribute(rangeKeyName, rangeKeyValue)); - } - - // Max result size - - @Override - public ScanSpec withMaxResultSize(Integer maxResultSize) { - setMaxResultSize(maxResultSize); - return this; - } - - @Override - public ScanSpec withMaxResultSize(int maxResultSize) { - setMaxResultSize(maxResultSize); - return this; - } - - @Override - public ScanSpec withMaxPageSize(Integer maxPageSize) { - setMaxPageSize(maxPageSize); - return this; - } - - @Override - public ScanSpec withMaxPageSize(int maxPageSize) { - setMaxPageSize(maxPageSize); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/UpdateItemSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/UpdateItemSpec.java deleted file mode 100644 index b2beecab7e77..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/UpdateItemSpec.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.AttributeUpdate; -import software.amazon.awssdk.services.dynamodb.document.Expected; -import software.amazon.awssdk.services.dynamodb.document.KeyAttribute; -import software.amazon.awssdk.services.dynamodb.document.PrimaryKey; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.ReturnItemCollectionMetrics; -import software.amazon.awssdk.services.dynamodb.model.ReturnValue; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; - -/** - * Full parameter specification for the UpdateItem API. - */ -public class UpdateItemSpec extends AbstractSpecWithPrimaryKey { - private List attributes; - private Collection expected; - - private Map nameMap; - private Map valueMap; - - public UpdateItemSpec() { - super(UpdateItemRequest.builder().build()); - } - - @Override - public UpdateItemSpec withPrimaryKey(KeyAttribute... components) { - super.withPrimaryKey(components); - return this; - } - - @Override - public UpdateItemSpec withPrimaryKey(PrimaryKey primaryKey) { - super.withPrimaryKey(primaryKey); - return this; - } - - @Override - public UpdateItemSpec withPrimaryKey(String hashKeyName, Object hashKeyValue) { - super.withPrimaryKey(hashKeyName, hashKeyValue); - return this; - } - - @Override - public UpdateItemSpec withPrimaryKey(String hashKeyName, Object hashKeyValue, - String rangeKeyName, Object rangeKeyValue) { - super.withPrimaryKey(hashKeyName, hashKeyValue, rangeKeyName, rangeKeyValue); - return this; - } - - public List getAttributeUpdate() { - return attributes; - } - - public UpdateItemSpec withAttributeUpdate( - List attributeUpdates) { - this.attributes = attributeUpdates; - return this; - } - - public UpdateItemSpec withAttributeUpdate( - AttributeUpdate... attributeUpdates) { - this.attributes = new ArrayList(Arrays.asList(attributeUpdates)); - return this; - } - - public UpdateItemSpec addAttributeUpdate(AttributeUpdate attributeUpdate) { - if (null == this.attributes) { - this.attributes = new ArrayList(); - } - this.attributes.add(attributeUpdate); - return this; - } - - public UpdateItemSpec clearAttributeUpdate() { - this.attributes = null; - return this; - } - - public Collection getExpected() { - return expected; - } - - public UpdateItemSpec withExpected(Expected... expected) { - if (expected == null) { - this.expected = null; - return this; - } - return withExpected(Arrays.asList(expected)); - } - - public UpdateItemSpec withExpected(Collection expected) { - if (expected == null) { - this.expected = null; - return this; - } - Set names = new LinkedHashSet(); - for (Expected e : expected) { - names.add(e.getAttribute()); - } - if (names.size() != expected.size()) { - throw new IllegalArgumentException( - "attribute names must not duplicate in the list of expected"); - } - this.expected = Collections.unmodifiableCollection(expected); - return this; - } - - public String getUpdateExpression() { - return getRequest().updateExpression(); - } - - public UpdateItemSpec withUpdateExpression(String updateExpression) { - setRequest(getRequest().toBuilder().updateExpression(updateExpression).build()); - return this; - } - - public String getConditionExpression() { - return getRequest().conditionExpression(); - } - - public UpdateItemSpec withConditionExpression(String conditionExpression) { - setRequest(getRequest().toBuilder().conditionExpression(conditionExpression).build()); - return this; - } - - public Map nameMap() { - return nameMap; - } - - /** - * Applicable only when an expression has been specified. - * Used to specify the actual values for the attribute-name placeholders, - * where the value in the map can either be string for simple attribute - * name, or a JSON path expression. - */ - public UpdateItemSpec withNameMap(Map nameMap) { - if (nameMap == null) { - this.nameMap = null; - } else { - this.nameMap = Collections.unmodifiableMap( - new LinkedHashMap(nameMap)); - } - return this; - } - - public Map valueMap() { - return valueMap; - } - - /** - * Applicable only when an expression has been specified. Used to - * specify the actual values for the attribute-value placeholders. - */ - public UpdateItemSpec valueMap(Map valueMap) { - if (valueMap == null) { - this.valueMap = null; - } else { - this.valueMap = Collections.unmodifiableMap( - new LinkedHashMap(valueMap)); - } - return this; - } - - public String getConditionalOperator() { - return getRequest().conditionalOperatorAsString(); - } - - public String getReturnConsumedCapacity() { - return getRequest().returnConsumedCapacityAsString(); - } - - public UpdateItemSpec withReturnConsumedCapacity( - String returnConsumedCapacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(returnConsumedCapacity).build()); - return this; - } - - public UpdateItemSpec withReturnConsumedCapacity( - ReturnConsumedCapacity returnConsumedCapacity) { - setRequest(getRequest().toBuilder().returnConsumedCapacity(returnConsumedCapacity).build()); - return this; - } - - public String getReturnItemCollectionMetrics() { - return getRequest().returnItemCollectionMetricsAsString(); - } - - public UpdateItemSpec withReturnItemCollectionMetrics( - ReturnItemCollectionMetrics returnItemCollectionMetrics) { - setRequest(getRequest().toBuilder().returnItemCollectionMetrics(returnItemCollectionMetrics).build()); - return this; - } - - public UpdateItemSpec withReturnItemCollectionMetrics( - String returnItemCollectionMetrics) { - setRequest(getRequest().toBuilder().returnItemCollectionMetrics(returnItemCollectionMetrics).build()); - return this; - } - - public String getReturnValues() { - return getRequest().returnValuesAsString(); - } - - public UpdateItemSpec withReturnValues(ReturnValue returnValues) { - setRequest(getRequest().toBuilder().returnValues(returnValues).build()); - return this; - } - - public UpdateItemSpec withReturnValues(String returnValues) { - setRequest(getRequest().toBuilder().returnValues(returnValues).build()); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/UpdateTableSpec.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/UpdateTableSpec.java deleted file mode 100644 index daba71deb2b4..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/spec/UpdateTableSpec.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.spec; - -import java.util.Collection; -import java.util.List; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndexUpdate; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.UpdateTableRequest; - -/** - * Full parameter specification for the UpdateTable API. - */ -public class UpdateTableSpec extends AbstractSpec { - public UpdateTableSpec() { - super(UpdateTableRequest.builder().build()); - } - - public ProvisionedThroughput getProvisionedThroughput() { - return getRequest().provisionedThroughput(); - } - - public UpdateTableSpec withProvisionedThroughput( - ProvisionedThroughput provisionedThroughput) { - setRequest(getRequest().toBuilder().provisionedThroughput(provisionedThroughput).build()); - return this; - } - - public List getAttributeDefinitions() { - return getRequest().attributeDefinitions(); - } - - public UpdateTableSpec withAttributeDefinitions( - AttributeDefinition... attributeDefinitions) { - setRequest(getRequest().toBuilder().attributeDefinitions(attributeDefinitions).build()); - return this; - } - - public UpdateTableSpec withAttributeDefinitions( - Collection attributeDefinitions) { - setRequest(getRequest().toBuilder().attributeDefinitions(attributeDefinitions).build()); - return this; - } - - public UpdateTableSpec withGlobalSecondaryIndexUpdates( - GlobalSecondaryIndexUpdate... globalSecondaryIndexUpdates) { - setRequest(getRequest().toBuilder().globalSecondaryIndexUpdates( - globalSecondaryIndexUpdates).build()); - return this; - } - - public UpdateTableSpec withGlobalSecondaryIndexUpdates( - Collection globalSecondaryIndexUpdates) { - setRequest(getRequest().toBuilder().globalSecondaryIndexUpdates( - globalSecondaryIndexUpdates) - .build()); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentArrayList.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentArrayList.java deleted file mode 100644 index 3e3ea920d218..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentArrayList.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.utils; - -import java.util.ArrayList; -import java.util.Collection; - -/** - * Utility subclass of {@link ArrayList} that supports fluent API. - */ -public class FluentArrayList extends ArrayList { - private static final long serialVersionUID = -8269850815375778149L; - - public FluentArrayList(int initialCapacity) { - super(initialCapacity); - } - - public FluentArrayList() { - super(); - } - - // @SafeVarargs - public FluentArrayList(E... elements) { - appendAll(elements); - } - - public FluentArrayList(Collection c) { - super(c); - } - - /** - * Fluent method to add the specified element to this list. - */ - public FluentArrayList append(E e) { - super.add(e); - return this; - } - - /** - * Fluent method to remove the specified element from this list. - */ - public FluentArrayList delete(Object o) { - super.remove(o); - return this; - } - - /** - * Fluent method to add the elements from the specified collection to this - * list. - */ - public FluentArrayList appendAll(Collection c) { - super.addAll(c); - return this; - } - - /** - * Fluent method to add the elements to this list. - */ - // @SuppressWarnings("unchecked") - public FluentArrayList appendAll(E... elements) { - if (elements != null) { - for (E e : elements) { - add(e); - } - } - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentHashMap.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentHashMap.java deleted file mode 100644 index 8574d7c7e106..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentHashMap.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.utils; - -import java.util.LinkedHashMap; - -/** - * Utility subclass of {@link LinkedHashMap} that supports fluent API. - */ -public class FluentHashMap extends LinkedHashMap { - private static final long serialVersionUID = 4857340227048063855L; - - /** - * Fluent method to remove the specified key from this map. - */ - public FluentHashMap delete(Object key) { - remove(key); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentHashSet.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentHashSet.java deleted file mode 100644 index 17b6a1bc4fac..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/FluentHashSet.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.utils; - -import java.util.Collection; -import java.util.LinkedHashSet; - -/** - * Utility subclass of {@link LinkedHashSet} that supports fluent API. - */ -public class FluentHashSet extends LinkedHashSet { - private static final long serialVersionUID = -549868294257559427L; - - public FluentHashSet() { - super(); - } - - public FluentHashSet(Collection c) { - super(c); - } - - // @SafeVarargs - public FluentHashSet(E... elements) { - withAll(elements); - } - - public FluentHashSet(int initialCapacity, float loadFactor) { - super(initialCapacity, loadFactor); - } - - public FluentHashSet(int initialCapacity) { - super(initialCapacity); - } - - /** - * Fluent method to add the specified element to this set. - */ - public FluentHashSet with(E e) { - super.add(e); - return this; - } - - /** - * Fluent method to add the elements from the specified collection to this - * set. - */ - public FluentHashSet withAll(Collection c) { - super.addAll(c); - return this; - } - - /** - * Fluent method to add the elements to this set. - */ - // @SuppressWarnings("unchecked") - public FluentHashSet withAll(E... elements) { - if (elements != null) { - for (E e : elements) { - add(e); - } - } - return this; - } - - /** - * Fluent method to remove the specified element from this set. - */ - public FluentHashSet delete(Object o) { - super.remove(o); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/NameMap.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/NameMap.java deleted file mode 100644 index a9be53a62b5e..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/NameMap.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.utils; - - -/** - * Utility class for name maps. - */ -public class NameMap extends FluentHashMap { - private static final long serialVersionUID = 1L; - - /** - * Fluent method to sets the given key (attribute name place holder) to the - * specified value (the actual attribute name.) - */ - public NameMap with(String key, String value) { - super.put(key, value); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueList.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueList.java deleted file mode 100644 index c5fe0cb78bdb..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueList.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.utils; - -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; - -/** - * Utility value list. - */ -public class ValueList extends FluentArrayList { - private static final long serialVersionUID = 1L; - - public ValueList(int initialCapacity) { - super(initialCapacity); - } - - public ValueList(Object... elements) { - super(elements); - } - - public ValueList() { - super(); - } - - public ValueList(Collection c) { - super(c); - } - - public ValueList appendAll(Object... elements) { - super.appendAll(elements); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendString(String val) { - super.append(val); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendNumber(BigDecimal val) { - super.append(val); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendNumber(Number val) { - super.append(InternalUtils.toBigDecimal(val)); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendInt(int val) { - return appendNumber(Integer.valueOf(val)); - } - - /** - * Appends the given value to this list. - */ - public ValueList appendLong(long val) { - return appendNumber(Long.valueOf(val)); - } - - /** - * Appends the given value to this list. - */ - public ValueList appendBinary(byte[] val) { - super.append(val); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendStringSet(Set val) { - super.append(val); - return this; - } - - /** - * Appends the given values to this list as a string set. - */ - public ValueList appendStringSet(String... val) { - super.append(new LinkedHashSet(Arrays.asList(val))); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendNumberSet(Set val) { - super.append(val); - return this; - } - - /** - * Appends the given value to this list as a set of BigDecimals. - */ - public ValueList appendNumberSet(BigDecimal... val) { - super.append(new LinkedHashSet(Arrays.asList(val))); - return this; - } - - /** - * Appends the given values to this list as a number set. - */ - public ValueList appendNumberSet(Number... val) { - super.append(InternalUtils.toBigDecimalSet(val)); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendBinarySet(Set val) { - super.append(val); - return this; - } - - /** - * Appends the given values to this list as a set of byte arrays. - */ - public ValueList appendBinarySet(byte[]... val) { - super.append(new LinkedHashSet(Arrays.asList(val))); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendList(List val) { - super.append(new ArrayList(val)); - return this; - } - - /** - * Appends the given values to this list as a list. - */ - public ValueList appendList(Object... vals) { - super.append(new ArrayList(Arrays.asList(vals))); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendMap(Map val) { - super.append(val); - return this; - } - - /** - * Appends the given value to this list. - */ - public ValueList appendBoolean(boolean val) { - super.append(Boolean.valueOf(val)); - return this; - } - - /** - * Appends a null value to this list. - */ - public ValueList appendNull() { - super.append(null); - return this; - } - - /** - * Appends the given value to this list. A value can be a - *
      - *
    • Number
    • - *
    • String
    • - *
    • binary (ie byte array or byte buffer)
    • - *
    • boolean
    • - *
    • null
    • - *
    • list (of any of the types on this list)
    • - *
    • map (append string key to value of any of the types on this list)
    • - *
    • set (of any of the types on this list)
    • - *
    - */ - public ValueList append(Object val) { - if (val == this) { - throw new IllegalArgumentException("Self reference is not allowed"); - } - // TODO: fail fast if val is not a supported type - super.append(val); - return this; - } - -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueMap.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueMap.java deleted file mode 100644 index c11ee95d5de8..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueMap.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.utils; - -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.core.util.json.JacksonUtils; -import software.amazon.awssdk.services.dynamodb.document.internal.InternalUtils; -import software.amazon.awssdk.services.dynamodb.document.internal.ItemValueConformer; - - -/** - * Utility class for value maps. - */ -public class ValueMap extends FluentHashMap { - private static final long serialVersionUID = 1L; - private static final ItemValueConformer VALUE_CONFORMER = new ItemValueConformer(); - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withString(String key, String val) { - super.put(key, val); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withNumber(String key, BigDecimal val) { - super.put(key, val); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withNumber(String key, Number val) { - super.put(key, InternalUtils.toBigDecimal(val)); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withInt(String key, int val) { - return withNumber(key, Integer.valueOf(val)); - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withLong(String key, long val) { - return withNumber(key, Long.valueOf(val)); - } - - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withBinary(String key, byte[] val) { - super.put(key, val); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withStringSet(String key, Set val) { - super.put(key, val); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withStringSet(String key, String... val) { - super.put(key, new LinkedHashSet(Arrays.asList(val))); - return this; - } - - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withNumberSet(String key, Set val) { - super.put(key, val); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withNumberSet(String key, BigDecimal... val) { - super.put(key, new LinkedHashSet(Arrays.asList(val))); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withNumberSet(String key, Number... val) { - super.put(key, InternalUtils.toBigDecimalSet(val)); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withBinarySet(String key, Set val) { - super.put(key, val); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withBinarySet(String key, byte[]... val) { - super.put(key, new LinkedHashSet(Arrays.asList(val))); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withList(String key, List val) { - super.put(key, val == null ? null : new ArrayList(val)); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given values as a list. - */ - public ValueMap withList(String key, Object... vals) { - super.put(key, - vals == null ? null : new ArrayList(Arrays.asList(vals))); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * given value. - */ - public ValueMap withMap(String key, Map val) { - super.put(key, val); - return this; - } - - /** - * Sets the value of the specified key in the current ValueMap to the - * boolean value. - */ - public ValueMap withBoolean(String key, boolean val) { - super.put(key, Boolean.valueOf(val)); - return this; - } - - /** - * Sets the value of the specified key to null. - */ - public ValueMap withNull(String key) { - super.put(key, null); - return this; - } - - /** - * Sets the value of the specified key to an object represented by the JSON - * structure passed. - */ - public ValueMap withJson(String key, String jsonValue) { - super.put(key, VALUE_CONFORMER.transform(JacksonUtils.fromJsonString(jsonValue, Object.class))); - return this; - } - - /** - * Sets the value of the specified key to the given value. A - * value can be a - *
      - *
    • Number
    • - *
    • String
    • DefaultMetricCollectorFactory - *
    • binary (ie byte array or byte buffer)
    • - *
    • boolean
    • - *
    • null
    • - *
    • list (of any of the types on this list)
    • - *
    • map (with string key to value of any of the types on this list)
    • - *
    • set (of any of the types on this list)
    • - *
    - */ - public ValueMap with(String key, Object val) { - if (val == this) { - throw new IllegalArgumentException("Self reference is not allowed"); - } - // TODO: fail fast if val is not a supported type - super.put(key, val); - return this; - } -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueMapAndWithJsonSupportTest.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueMapAndWithJsonSupportTest.java deleted file mode 100644 index 022ae23ec5b1..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/document/utils/ValueMapAndWithJsonSupportTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document.utils; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.core.IsCollectionContaining.hasItems; -import static org.junit.Assert.assertThat; - -import java.util.List; -import java.util.Map; -import org.junit.Test; -import software.amazon.awssdk.core.exception.SdkClientException; - - -public class ValueMapAndWithJsonSupportTest { - - private static final String NO_JSON_STRING = "nojson"; - private static final String KEY = "somekey"; - - @Test(expected = SdkClientException.class) - public void valueMapCreationshouldFailIfNoJsonstringIsUsedAsValue() { - new ValueMap().withJson("a", NO_JSON_STRING); - } - - @Test - @SuppressWarnings("unchecked") - public void valueMapShouldReturnAProperDeserializedJsonMap() { - String json = "{ \"fruit\" : \"pear\" , \"color\" : \"green\" }"; - - ValueMap valueMap = new ValueMap().withJson(KEY, json); - Map actual = (Map) valueMap.get(KEY); - - assertThat(actual.size(), is(2)); - assertThat((String) actual.get("fruit"), is("pear")); - assertThat((String) actual.get("color"), is("green")); - } - - @Test - @SuppressWarnings("unchecked") - public void valueMapShouldReturnAProperDeserializedJsonList() { - String json = "[\"red\",\"green\",\"blue\"]"; - - ValueMap valueMap = new ValueMap().withJson(KEY, json); - List actual = (List) valueMap.get(KEY); - - assertThat(actual.size(), is(3)); - assertThat(actual, hasItems("red", "green", "blue")); - } - -} diff --git a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/util/TableUtils.java b/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/util/TableUtils.java deleted file mode 100644 index c85b577d77b0..000000000000 --- a/test/dynamodbdocument-v1/src/test/java/software/amazon/awssdk/services/dynamodb/util/TableUtils.java +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.util; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; -import software.amazon.awssdk.services.dynamodb.model.ResourceInUseException; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; - -/** - * Utility methods for working with DynamoDB tables. - * - *
    - * // ... create DynamoDB table ...
    - * try {
    - *     waitUntilActive(dynamoDB, myTableName());
    - * } catch (SdkClientException e) {
    - *     // table didn't become active
    - * }
    - * // ... start making calls to table ...
    - * 
    - */ -public class TableUtils { - - private static final int DEFAULT_WAIT_TIMEOUT = 10 * 60 * 1000; - private static final int DEFAULT_WAIT_INTERVAL = 20 * 1000; - /** - * The logging utility. - */ - private static final Logger log = LoggerFactory.getLogger(TableUtils.class); - - /** - * Waits up to 10 minutes for a specified DynamoDB table to resolve, - * indicating that it exists. If the table doesn't return a result after - * this time, a SdkClientException is thrown. - * - * @param dynamo - * The DynamoDB client to use to make requests. - * @param tableName - * The name of the table being resolved. - * - * @throws SdkClientException - * If the specified table does not resolve before this method - * times out and stops polling. - * @throws InterruptedException - * If the thread is interrupted while waiting for the table to - * resolve. - */ - public static void waitUntilExists(final DynamoDbClient dynamo, final String tableName) - throws InterruptedException { - waitUntilExists(dynamo, tableName, DEFAULT_WAIT_TIMEOUT, DEFAULT_WAIT_INTERVAL); - } - - /** - * Waits up to a specified amount of time for a specified DynamoDB table to - * resolve, indicating that it exists. If the table doesn't return a result - * after this time, a SdkClientException is thrown. - * - * @param dynamo - * The DynamoDB client to use to make requests. - * @param tableName - * The name of the table being resolved. - * @param timeout - * The maximum number of milliseconds to wait. - * @param interval - * The poll interval in milliseconds. - * - * @throws SdkClientException - * If the specified table does not resolve before this method - * times out and stops polling. - * @throws InterruptedException - * If the thread is interrupted while waiting for the table to - * resolve. - */ - public static void waitUntilExists(final DynamoDbClient dynamo, final String tableName, final int timeout, - final int interval) throws InterruptedException { - TableDescription table = waitForTableDescription(dynamo, tableName, null, timeout, interval); - - if (table == null) { - throw SdkClientException.builder().message("Table " + tableName + " never returned a result").build(); - } - } - - /** - * Waits up to 10 minutes for a specified DynamoDB table to move into the - * ACTIVE state. If the table does not exist or does not - * transition to the ACTIVE state after this time, then - * SdkClientException is thrown. - * - * @param dynamo - * The DynamoDB client to use to make requests. - * @param tableName - * The name of the table whose status is being checked. - * - * @throws TableNeverTransitionedToStateException - * If the specified table does not exist or does not transition - * into the ACTIVE state before this method times - * out and stops polling. - * @throws InterruptedException - * If the thread is interrupted while waiting for the table to - * transition into the ACTIVE state. - */ - public static void waitUntilActive(final DynamoDbClient dynamo, final String tableName) - throws InterruptedException, TableNeverTransitionedToStateException { - waitUntilActive(dynamo, tableName, DEFAULT_WAIT_TIMEOUT, DEFAULT_WAIT_INTERVAL); - } - - /** - * Waits up to a specified amount of time for a specified DynamoDB table to - * move into the ACTIVE state. If the table does not exist or - * does not transition to the ACTIVE state after this time, - * then a SdkClientException is thrown. - * - * @param dynamo - * The DynamoDB client to use to make requests. - * @param tableName - * The name of the table whose status is being checked. - * @param timeout - * The maximum number of milliseconds to wait. - * @param interval - * The poll interval in milliseconds. - * - * @throws TableNeverTransitionedToStateException - * If the specified table does not exist or does not transition - * into the ACTIVE state before this method times - * out and stops polling. - * @throws InterruptedException - * If the thread is interrupted while waiting for the table to - * transition into the ACTIVE state. - */ - public static void waitUntilActive(final DynamoDbClient dynamo, final String tableName, final int timeout, - final int interval) throws InterruptedException, TableNeverTransitionedToStateException { - TableDescription table = waitForTableDescription(dynamo, tableName, TableStatus.ACTIVE, timeout, interval); - - if (table == null || !table.tableStatus().equals(TableStatus.ACTIVE)) { - throw new TableNeverTransitionedToStateException(tableName, TableStatus.ACTIVE); - } - } - - /** - * Wait for the table to reach the desired status and returns the table - * description - * - * @param dynamo - * Dynamo client to use - * @param tableName - * Table name to poll status of - * @param desiredStatus - * Desired {@link TableStatus} to wait for. If null this method - * simply waits until DescribeTable returns something non-null - * (i.e. any status) - * @param timeout - * Timeout in milliseconds to continue to poll for desired status - * @param interval - * Time to wait in milliseconds between poll attempts - * @return Null if DescribeTables never returns a result, otherwise the - * result of the last poll attempt (which may or may not have the - * desired state) - * @throws {@link - * IllegalArgumentException} If timeout or interval is invalid - */ - private static TableDescription waitForTableDescription(final DynamoDbClient dynamo, final String tableName, - TableStatus desiredStatus, final int timeout, final int interval) - throws InterruptedException, IllegalArgumentException { - if (timeout < 0) { - throw new IllegalArgumentException("Timeout must be >= 0"); - } - if (interval <= 0 || interval >= timeout) { - throw new IllegalArgumentException("Interval must be > 0 and < timeout"); - } - long startTime = System.currentTimeMillis(); - long endTime = startTime + timeout; - - TableDescription table = null; - while (System.currentTimeMillis() < endTime) { - try { - table = dynamo.describeTable(DescribeTableRequest.builder().tableName(tableName).build()).table(); - if (desiredStatus == null || table.tableStatus().equals(desiredStatus)) { - return table; - - } - } catch (ResourceNotFoundException rnfe) { - // ResourceNotFound means the table doesn't exist yet, - // so ignore this error and just keep polling. - } - - Thread.sleep(interval); - } - return table; - } - - /** - * Creates the table and ignores any errors if it already exists. - * @param dynamo The Dynamo client to use. - * @param createTableRequest The create table request. - * @return True if created, false otherwise. - */ - public static boolean createTableIfNotExists(final DynamoDbClient dynamo, final CreateTableRequest createTableRequest) { - try { - dynamo.createTable(createTableRequest); - return true; - } catch (final ResourceInUseException e) { - if (log.isTraceEnabled()) { - log.trace("Table " + createTableRequest.tableName() + " already exists", e); - } - } - return false; - } - - /** - * Deletes the table and ignores any errors if it doesn't exist. - * @param dynamo The Dynamo client to use. - * @param deleteTableRequest The delete table request. - * @return True if deleted, false otherwise. - */ - public static boolean deleteTableIfExists(final DynamoDbClient dynamo, final DeleteTableRequest deleteTableRequest) { - try { - dynamo.deleteTable(deleteTableRequest); - return true; - } catch (final ResourceNotFoundException e) { - if (log.isTraceEnabled()) { - log.trace("Table " + deleteTableRequest.tableName() + " does not exist", e); - } - } - return false; - } - - /** - * Thrown by {@link TableUtils} when a table never reaches a desired state - */ - public static class TableNeverTransitionedToStateException extends SdkClientException { - - private static final long serialVersionUID = 8920567021104846647L; - - public TableNeverTransitionedToStateException(String tableName, TableStatus desiredStatus) { - super(SdkClientException.builder() - .message("Table " + tableName + " never transitioned to desired state of " + - desiredStatus.toString())); - } - - } - -} diff --git a/test/dynamodbmapper-v1/pom.xml b/test/dynamodbmapper-v1/pom.xml deleted file mode 100644 index d7f0f900f77c..000000000000 --- a/test/dynamodbmapper-v1/pom.xml +++ /dev/null @@ -1,125 +0,0 @@ - - - - - 4.0.0 - - aws-sdk-java-pom - software.amazon.awssdk - 2.7.16-SNAPSHOT - ../../pom.xml - - dynamodbmapper-v1 - AWS Java SDK :: Test :: Amazon DynamoDB Mapper v1 - DynamoDB Mapper largely unchanged from v1. The v1 Mapper is kept for testing purposes only. All classes are in the test directories to prevent use in application code. - https://aws.amazon.com/sdkforjava - - - ../.. - - - - - - software.amazon.awssdk - bom-internal - ${project.version} - pom - import - - - - - - - software.amazon.awssdk - auth - ${awsjavasdk.version} - test - - - software.amazon.awssdk - regions - ${awsjavasdk.version} - test - - - software.amazon.awssdk - annotations - ${awsjavasdk.version} - test - - - software.amazon.awssdk - utils - ${awsjavasdk.version} - test - - - software.amazon.awssdk - sdk-core - ${awsjavasdk.version} - test - - - software.amazon.awssdk - aws-core - ${awsjavasdk.version} - test - - - test-utils - software.amazon.awssdk - ${awsjavasdk.version} - test - - - org.assertj - assertj-core - test - - - dynamodb - software.amazon.awssdk - ${awsjavasdk.version} - test - - - s3 - software.amazon.awssdk - ${awsjavasdk.version} - test - - - service-test-utils - software.amazon.awssdk - ${awsjavasdk.version} - test - - - junit - junit - test - - - mockito-core - org.mockito - test - - - diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/DynamoDBMapperIntegrationTestBase.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/DynamoDBMapperIntegrationTestBase.java deleted file mode 100644 index 2e47fbc9e3f5..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/DynamoDBMapperIntegrationTestBase.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb; - -import java.nio.ByteBuffer; -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.pojos.BinaryAttributeByteBufferClass; -import utils.test.util.DynamoDBIntegrationTestBase; -import utils.test.util.DynamoDBTestBase; - -public class DynamoDBMapperIntegrationTestBase extends DynamoDBIntegrationTestBase { - - public static void setUpMapperTestBase() { - DynamoDBTestBase.setUpTestBase(); - } - - /* - * Utility methods - */ - protected static BinaryAttributeByteBufferClass getUniqueByteBufferObject(int contentLength) { - BinaryAttributeByteBufferClass obj = new BinaryAttributeByteBufferClass(); - obj.setKey(String.valueOf(startKey++)); - obj.setBinaryAttribute(ByteBuffer.wrap(generateByteArray(contentLength))); - Set byteBufferSet = new HashSet(); - byteBufferSet.add(ByteBuffer.wrap(generateByteArray(contentLength))); - obj.setBinarySetAttribute(byteBufferSet); - return obj; - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/GsiAlwaysUpdateIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/GsiAlwaysUpdateIntegrationTest.java deleted file mode 100644 index 99836b9b2813..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/GsiAlwaysUpdateIntegrationTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb; - -import static org.junit.Assert.assertNotEquals; - -import java.util.UUID; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTableMapper; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; -import software.amazon.awssdk.services.dynamodb.pojos.GsiWithAlwaysUpdateTimestamp; -import software.amazon.awssdk.testutils.Waiter; - -public class GsiAlwaysUpdateIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String TABLE_NAME = - GsiAlwaysUpdateIntegrationTest.class.getSimpleName() + "-" + System.currentTimeMillis(); - - private DynamoDbClient ddb; - private DynamoDbTableMapper mapper; - - @Before - public void setup() { - ddb = DynamoDbClient.builder() - .region(Region.US_WEST_2) - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .build(); - mapper = new DynamoDbMapper(ddb, DynamoDbMapperConfig.builder() - .withTableNameOverride(new DynamoDbMapperConfig.TableNameOverride(TABLE_NAME)) - .build()).newTableMapper(GsiWithAlwaysUpdateTimestamp.class); - mapper.createTable(ProvisionedThroughput.builder().readCapacityUnits(5L).writeCapacityUnits(5L).build()); - Waiter.run(() -> ddb.describeTable(r -> r.tableName(TABLE_NAME))) - .ignoringException(ResourceNotFoundException.class) - .until(r -> r.table().tableStatus() == TableStatus.ACTIVE) - .orFail(); - } - - @After - public void tearDown() { - mapper.deleteTableIfExists(); - Waiter.run(() -> ddb.describeTable(r -> r.tableName(TABLE_NAME))) - .untilException(ResourceNotFoundException.class) - .orFail(); - } - - @Test - public void pojoWithAlwaysGenerateGsi_SavesCorrectly() throws InterruptedException { - final String hashKey = UUID.randomUUID().toString(); - final String rangeKey = UUID.randomUUID().toString(); - - mapper.save(new GsiWithAlwaysUpdateTimestamp() - .setHashKey(hashKey) - .setRangeKey(rangeKey)); - final GsiWithAlwaysUpdateTimestamp created = mapper.load(hashKey, rangeKey); - // Have to store it since the mapper will auto update any generated values in the saved object. - Long createdDate = created.getLastModifiedDate(); - // Need to wait a bit for the timestamps to actually be different - Thread.sleep(1000); - mapper.save(created); - final GsiWithAlwaysUpdateTimestamp updated = mapper.load(hashKey, rangeKey); - assertNotEquals(createdDate, updated.getLastModifiedDate()); - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/TableUtils.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/TableUtils.java deleted file mode 100644 index 87919310b889..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/TableUtils.java +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; -import software.amazon.awssdk.services.dynamodb.model.ResourceInUseException; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; - -/** - * Utility methods for working with DynamoDB tables. - * - *
    - * // ... create DynamoDB table ...
    - * try {
    - *     waitUntilActive(dynamoDB, myTableName());
    - * } catch (SdkClientException e) {
    - *     // table didn't become active
    - * }
    - * // ... start making calls to table ...
    - * 
    - */ -public class TableUtils { - - private static final int DEFAULT_WAIT_TIMEOUT = 10 * 60 * 1000; - private static final int DEFAULT_WAIT_INTERVAL = 20 * 1000; - /** - * The logging utility. - */ - private static final Logger log = LoggerFactory.getLogger(TableUtils.class); - - /** - * Waits up to 10 minutes for a specified DynamoDB table to resolve, - * indicating that it exists. If the table doesn't return a result after - * this time, a SdkClientException is thrown. - * - * @param dynamo - * The DynamoDB client to use to make requests. - * @param tableName - * The name of the table being resolved. - * - * @throws SdkClientException - * If the specified table does not resolve before this method - * times out and stops polling. - * @throws InterruptedException - * If the thread is interrupted while waiting for the table to - * resolve. - */ - public static void waitUntilExists(final DynamoDbClient dynamo, final String tableName) - throws InterruptedException { - waitUntilExists(dynamo, tableName, DEFAULT_WAIT_TIMEOUT, DEFAULT_WAIT_INTERVAL); - } - - /** - * Waits up to a specified amount of time for a specified DynamoDB table to - * resolve, indicating that it exists. If the table doesn't return a result - * after this time, a SdkClientException is thrown. - * - * @param dynamo - * The DynamoDB client to use to make requests. - * @param tableName - * The name of the table being resolved. - * @param timeout - * The maximum number of milliseconds to wait. - * @param interval - * The poll interval in milliseconds. - * - * @throws SdkClientException - * If the specified table does not resolve before this method - * times out and stops polling. - * @throws InterruptedException - * If the thread is interrupted while waiting for the table to - * resolve. - */ - public static void waitUntilExists(final DynamoDbClient dynamo, final String tableName, final int timeout, - final int interval) throws InterruptedException { - TableDescription table = waitForTableDescription(dynamo, tableName, null, timeout, interval); - - if (table == null) { - throw SdkClientException.builder().message("Table " + tableName + " never returned a result").build(); - } - } - - /** - * Waits up to 10 minutes for a specified DynamoDB table to move into the - * ACTIVE state. If the table does not exist or does not - * transition to the ACTIVE state after this time, then - * SdkClientException is thrown. - * - * @param dynamo - * The DynamoDB client to use to make requests. - * @param tableName - * The name of the table whose status is being checked. - * - * @throws TableNeverTransitionedToStateException - * If the specified table does not exist or does not transition - * into the ACTIVE state before this method times - * out and stops polling. - * @throws InterruptedException - * If the thread is interrupted while waiting for the table to - * transition into the ACTIVE state. - */ - public static void waitUntilActive(final DynamoDbClient dynamo, final String tableName) - throws InterruptedException, TableNeverTransitionedToStateException { - waitUntilActive(dynamo, tableName, DEFAULT_WAIT_TIMEOUT, DEFAULT_WAIT_INTERVAL); - } - - /** - * Waits up to a specified amount of time for a specified DynamoDB table to - * move into the ACTIVE state. If the table does not exist or - * does not transition to the ACTIVE state after this time, - * then a SdkClientException is thrown. - * - * @param dynamo - * The DynamoDB client to use to make requests. - * @param tableName - * The name of the table whose status is being checked. - * @param timeout - * The maximum number of milliseconds to wait. - * @param interval - * The poll interval in milliseconds. - * - * @throws TableNeverTransitionedToStateException - * If the specified table does not exist or does not transition - * into the ACTIVE state before this method times - * out and stops polling. - * @throws InterruptedException - * If the thread is interrupted while waiting for the table to - * transition into the ACTIVE state. - */ - public static void waitUntilActive(final DynamoDbClient dynamo, final String tableName, final int timeout, - final int interval) throws InterruptedException, TableNeverTransitionedToStateException { - TableDescription table = waitForTableDescription(dynamo, tableName, TableStatus.ACTIVE, timeout, interval); - - if (table == null || !table.tableStatus().equals(TableStatus.ACTIVE)) { - throw new TableNeverTransitionedToStateException(tableName, TableStatus.ACTIVE); - } - } - - /** - * Wait for the table to reach the desired status and returns the table - * description - * - * @param dynamo - * Dynamo client to use - * @param tableName - * Table name to poll status of - * @param desiredStatus - * Desired {@link TableStatus} to wait for. If null this method - * simply waits until DescribeTable returns something non-null - * (i.e. any status) - * @param timeout - * Timeout in milliseconds to continue to poll for desired status - * @param interval - * Time to wait in milliseconds between poll attempts - * @return Null if DescribeTables never returns a result, otherwise the - * result of the last poll attempt (which may or may not have the - * desired state) - * @throws {@link - * IllegalArgumentException} If timeout or interval is invalid - */ - private static TableDescription waitForTableDescription(final DynamoDbClient dynamo, final String tableName, - TableStatus desiredStatus, final int timeout, final int interval) - throws InterruptedException, IllegalArgumentException { - if (timeout < 0) { - throw new IllegalArgumentException("Timeout must be >= 0"); - } - if (interval <= 0 || interval >= timeout) { - throw new IllegalArgumentException("Interval must be > 0 and < timeout"); - } - long startTime = System.currentTimeMillis(); - long endTime = startTime + timeout; - - TableDescription table = null; - while (System.currentTimeMillis() < endTime) { - try { - table = dynamo.describeTable(DescribeTableRequest.builder().tableName(tableName).build()).table(); - if (desiredStatus == null || table.tableStatus().equals(desiredStatus)) { - return table; - - } - } catch (ResourceNotFoundException rnfe) { - // ResourceNotFound means the table doesn't exist yet, - // so ignore this error and just keep polling. - } - - Thread.sleep(interval); - } - return table; - } - - /** - * Creates the table and ignores any errors if it already exists. - * @param dynamo The Dynamo client to use. - * @param createTableRequest The create table request. - * @return True if created, false otherwise. - */ - public static boolean createTableIfNotExists(final DynamoDbClient dynamo, final CreateTableRequest createTableRequest) { - try { - dynamo.createTable(createTableRequest); - return true; - } catch (final ResourceInUseException e) { - if (log.isTraceEnabled()) { - log.trace("Table " + createTableRequest.tableName() + " already exists", e); - } - } - return false; - } - - /** - * Deletes the table and ignores any errors if it doesn't exist. - * @param dynamo The Dynamo client to use. - * @param deleteTableRequest The delete table request. - * @return True if deleted, false otherwise. - */ - public static boolean deleteTableIfExists(final DynamoDbClient dynamo, final DeleteTableRequest deleteTableRequest) { - try { - dynamo.deleteTable(deleteTableRequest); - return true; - } catch (final ResourceNotFoundException e) { - if (log.isTraceEnabled()) { - log.trace("Table " + deleteTableRequest.tableName() + " does not exist", e); - } - } - return false; - } - - /** - * Thrown by {@link TableUtils} when a table never reaches a desired state - */ - public static class TableNeverTransitionedToStateException extends SdkClientException { - - private static final long serialVersionUID = 8920567021104846647L; - - public TableNeverTransitionedToStateException(String tableName, TableStatus desiredStatus) { - super(SdkClientException.builder() - .message("Table " + tableName + " never transitioned to desired state of " + - desiredStatus.toString())); - } - - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadIntegrationTest.java deleted file mode 100644 index 166ab59766ef..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadIntegrationTest.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.ConsistentRead; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.SaveBehavior; -import software.amazon.awssdk.services.dynamodb.mapper.NumberSetAttributeClass; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.pojos.RangeKeyClass; - -public class BatchLoadIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - // We don't start with the current system millis like other tests because - // it's out of the range of some data types - private static int start = 1; - private static int byteStart = 1; - private static int startKeyDebug = 1; - DynamoDbMapper mapper = new DynamoDbMapper(dynamo, DynamoDbMapperConfig.builder() - .withSaveBehavior(SaveBehavior.UPDATE) - .withConsistentReads(ConsistentRead.CONSISTENT) - .build()); - - @BeforeClass - public static void setUp() throws Exception { - setUpTableWithRangeAttribute(); - } - - @AfterClass - public static void tearDown() { - try { - dynamo.deleteTable(DeleteTableRequest.builder().tableName(TABLE_WITH_RANGE_ATTRIBUTE).build()); - } catch (Exception e) { - // Ignore. - } - waitForTableToBecomeDeleted(TABLE_WITH_RANGE_ATTRIBUTE); - } - - @Test - public void testBatchLoad() throws InterruptedException { - // To see whether batchGet can handle more than 100 items per request - final int numItems = 200; - List objs = new ArrayList(); - List keyPairs = new LinkedList(); - Class clazz = null; - for (int i = 0; i < numItems; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - objs.add(obj); - clazz = obj.getClass(); - keyPairs.add(new KeyPair().withHashKey(obj.getKey())); - } - - mapper.batchSave(objs); - - Map, List> itemsToGet = new HashMap, List>(); - Map> response = null; - itemsToGet.put(clazz, keyPairs); - response = mapper.batchLoad(itemsToGet); - List items = response.get(TABLE_NAME); - assertEquals(numItems, items.size()); - - for (Object item : items) { - assertTrue(objs.contains(item)); - } - } - - @Test - public void testMultipleTables() { - final int numItems = 55; - Map, List> itemsToGet = new HashMap, List>(); - Class clazz = null; - List keyPairs = new LinkedList(); - List objs = new ArrayList(); - for (int i = 0; i < numItems * 2; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - clazz = obj.getClass(); - keyPairs.add(new KeyPair().withHashKey(obj.getKey())); - objs.add(obj); - } - itemsToGet.put(clazz, keyPairs); - keyPairs = new LinkedList(); - for (int i = 0; i < numItems; i++) { - RangeKeyClass obj = getUniqueRangeKeyObject(); - clazz = obj.getClass(); - keyPairs.add(new KeyPair().withHashKey(obj.getKey()).withRangeKey(obj.getRangeKey())); - objs.add(obj); - } - itemsToGet.put(clazz, keyPairs); - Collections.shuffle(objs); - - mapper.batchSave(objs); - - Map> response = null; - itemsToGet.put(clazz, keyPairs); - response = mapper.batchLoad(itemsToGet); - - List itemsFromTableOne = response.get(TABLE_NAME); - List itemsFromTableTwo = response.get(TABLE_WITH_RANGE_ATTRIBUTE); - - assertEquals(numItems * 2, itemsFromTableOne.size()); - assertEquals(numItems, itemsFromTableTwo.size()); - - for (Object item : itemsFromTableOne) { - assertTrue(objs.contains(item)); - } - - for (Object item : itemsFromTableTwo) { - assertTrue(objs.contains(item)); - } - } - - @Test - public void testBoudaryCases() { - // The request is an empty Map. - Map, List> itemsToGet = new HashMap, List>(); - Map> response = null; - response = mapper.batchLoad(itemsToGet); - assertTrue(response.isEmpty()); - - // The request only contains invalid key pairs - List keyPairs = new LinkedList(); - Class clazz = getUniqueNumericObject().getClass(); - keyPairs.add(new KeyPair().withHashKey("non-existent-key")); - itemsToGet.clear(); - itemsToGet.put(clazz, keyPairs); - response = mapper.batchLoad(itemsToGet); - assertNotNull(response); - List items = response.get(TABLE_NAME); - assertNotNull(items); - assertEquals(0, items.size()); - - // The request does not contain any key pairs. - itemsToGet.put(clazz, new LinkedList()); - response = mapper.batchLoad(itemsToGet); - assertTrue(response.isEmpty()); - } - - @Test - public void testExponentialBackOffForBatchGetInMapper() - throws NoSuchFieldException, SecurityException, - IllegalArgumentException, IllegalAccessException { - long startTime = System.currentTimeMillis(); - long maxBackOffTimePerRetry = DynamoDbMapper.MAX_BACKOFF_IN_MILLISECONDS; - int NoOfRetries = DynamoDbMapper.BATCH_GET_MAX_RETRY_COUNT_ALL_KEYS; - - List objs = new ArrayList(); - NumberSetAttributeClass obj = getUniqueNumericObject(); - objs.add(obj); - DynamoDbClient mockClient = mock(DynamoDbClient.class); - when(mockClient.batchGetItem(any(BatchGetItemRequest.class))).thenAnswer(new Answer() { - @Override - public BatchGetItemResponse answer(InvocationOnMock invocation) throws Throwable { - Thread.sleep(3000); - BatchGetItemResponse result = BatchGetItemResponse.builder() - .responses(new HashMap<>()) - .unprocessedKeys(((BatchGetItemRequest) invocation.getArguments()[0]).requestItems()) - .build(); - return result; - } - }); - DynamoDbMapper mapper = new DynamoDbMapper(mockClient); - try { - mapper.batchLoad(objs); - fail("Expecting an exception due to exceed of number of retries."); - } catch (Exception e) { - e.printStackTrace(); - long endTime = System.currentTimeMillis(); - assertTrue(((endTime - startTime)) > (maxBackOffTimePerRetry - * NoOfRetries)); - } - } - - private NumberSetAttributeClass getUniqueNumericObject() { - NumberSetAttributeClass obj = new NumberSetAttributeClass(); - obj.setKey(String.valueOf(startKeyDebug++)); - obj.setBigDecimalAttribute(toSet(new BigDecimal(startKey++), new BigDecimal(startKey++), new BigDecimal(startKey++))); - obj.setBigIntegerAttribute( - toSet(new BigInteger("" + startKey++), new BigInteger("" + startKey++), new BigInteger("" + startKey++))); - obj.setByteObjectAttribute(toSet(new Byte(nextByte()), new Byte(nextByte()), new Byte(nextByte()))); - obj.setDoubleObjectAttribute(toSet(new Double("" + start++), new Double("" + start++), new Double("" + start++))); - obj.setFloatObjectAttribute(toSet(new Float("" + start++), new Float("" + start++), new Float("" + start++))); - obj.setIntegerAttribute(toSet(new Integer("" + start++), new Integer("" + start++), new Integer("" + start++))); - obj.setLongObjectAttribute(toSet(new Long("" + start++), new Long("" + start++), new Long("" + start++))); - obj.setBooleanAttribute(toSet(true, false)); - obj.setDateAttribute(toSet(new Date(startKey++), new Date(startKey++), new Date(startKey++))); - Set cals = new HashSet(); - for (Date d : obj.getDateAttribute()) { - Calendar cal = GregorianCalendar.getInstance(); - cal.setTime(d); - cals.add(cal); - } - obj.setCalendarAttribute(toSet(cals)); - return obj; - } - - private RangeKeyClass getUniqueRangeKeyObject() { - RangeKeyClass obj = new RangeKeyClass(); - obj.setKey(startKey++); - obj.setIntegerAttribute(toSet(start++, start++, start++)); - obj.setBigDecimalAttribute(new BigDecimal(startKey++)); - obj.setRangeKey(start++); - obj.setStringAttribute("" + startKey++); - obj.setStringSetAttribute(toSet("" + startKey++, "" + startKey++, "" + startKey++)); - return obj; - } - - private String nextByte() { - return "" + byteStart++ % Byte.MAX_VALUE; - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDBS3IntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDBS3IntegrationTest.java deleted file mode 100644 index c9e448162bf2..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDBS3IntegrationTest.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import java.io.ByteArrayOutputStream; -import java.util.UUID; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.pojos.S3LinksTestClass; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.testutils.RandomTempFile; - -@Ignore -// Revisit S3 -public class DynamoDBS3IntegrationTest extends DynamoDBS3IntegrationTestBase { - - private static final long OBJECT_SIZE = 123; - - @Test - public void testCredentialContext() throws Exception { - tryCreateItem(new DynamoDbMapper(dynamo, CREDENTIALS_PROVIDER_CHAIN)); - } - - @Test - public void testManuallyFilledContext() throws Exception { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo, CREDENTIALS_PROVIDER_CHAIN); - S3ClientCache s3cc = mapper.s3ClientCache(); - s3cc.useClient(s3East, Region.US_EAST_1); - s3cc.useClient(s3West, Region.US_WEST_2); - tryCreateItem(mapper); - } - - public void tryCreateItem(DynamoDbMapper mapper) throws Exception { - String westKey = UUID.randomUUID().toString(); - String eastKey = UUID.randomUUID().toString(); - - S3LinksTestClass obj = new S3LinksTestClass(); - obj.setKey("" + ++startKey); - S3Link linkWest = mapper.createS3Link(Region.US_WEST_2, DynamoDBS3IntegrationTestBase.WEST_BUCKET, westKey); - obj.setS3LinkWest(linkWest); - mapper.save(obj); - obj = mapper.load(S3LinksTestClass.class, obj.getKey()); - - assertObjectDoesntExist(s3West, obj.s3LinkWest().bucketName(), westKey); - - linkWest.getAmazonS3Client().putObject(PutObjectRequest.builder() - .bucket(linkWest.bucketName()) - .key(linkWest.getKey()) - .build(), - RequestBody.fromFile(new RandomTempFile(westKey, OBJECT_SIZE))); - - assertObjectExists(s3West, obj.s3LinkWest().bucketName(), westKey); - - S3Link linkEast = mapper.createS3Link(Region.US_EAST_1, DynamoDBS3IntegrationTestBase.EAST_BUCKET, eastKey); - obj.setS3LinkEast(linkEast); - assertObjectDoesntExist(s3East, obj.s3LinkEast().bucketName(), eastKey); - - linkEast.getAmazonS3Client().putObject(PutObjectRequest.builder() - .bucket(linkEast.bucketName()) - .key(linkEast.getKey()) - .build(), - RequestBody.fromFile(new RandomTempFile(westKey, OBJECT_SIZE))); - mapper.save(obj); - - assertObjectExists(s3West, obj.s3LinkWest().bucketName(), westKey); - assertObjectExists(s3East, obj.s3LinkEast().bucketName(), eastKey); - - obj = mapper.load(S3LinksTestClass.class, obj.getKey()); - - assertEquals(westKey, obj.s3LinkWest().getKey()); - assertEquals(eastKey, obj.s3LinkEast().getKey()); - System.err.println(obj.s3LinkWest().toJson()); - System.err.println(obj.s3LinkEast().toJson()); - mapper.delete(obj); - - // Test the convenience methods on S3Link - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - linkEast.downloadTo(baos); - assertEquals(OBJECT_SIZE, baos.toByteArray().length); - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDBS3IntegrationTestBase.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDBS3IntegrationTestBase.java deleted file mode 100644 index c344783974dd..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDBS3IntegrationTestBase.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -/* - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://aws.amazon.com/apache2.0 - * - * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES - * OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.fail; -import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; - -import java.util.Iterator; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import software.amazon.awssdk.core.exception.SdkServiceException; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3Client; -import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; -import software.amazon.awssdk.services.s3.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; -import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; -import software.amazon.awssdk.services.s3.model.HeadObjectRequest; -import software.amazon.awssdk.services.s3.model.ListObjectsRequest; -import software.amazon.awssdk.services.s3.model.ListObjectsResponse; -import software.amazon.awssdk.services.s3.model.S3Object; -import utils.test.util.DynamoDBIntegrationTestBase; - -public class DynamoDBS3IntegrationTestBase extends DynamoDBIntegrationTestBase { - public static final String WEST_BUCKET = temporaryBucketName("java-dynamo-s3-integ-test-west"); - public static final String EAST_BUCKET = temporaryBucketName("java-dynamo-s3-integ-test-east"); - - protected static S3Client s3East; - protected static S3Client s3West; - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBIntegrationTestBase.setUp(); - s3East = S3Client.builder() - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .region(Region.US_EAST_1) - .build(); - - s3West = S3Client.builder() - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .region(Region.US_WEST_2) - .build(); - - createBucket(s3East, EAST_BUCKET, null); - createBucket(s3West, WEST_BUCKET, Region.US_WEST_2.id()); - } - - @AfterClass - public static void tearDown() { - deleteBucketAndAllContents(s3East, EAST_BUCKET); - deleteBucketAndAllContents(s3West, WEST_BUCKET); - } - - /** - * Deletes all objects in the specified bucket, and then deletes the bucket. - * - * @param s3 The AmazonS3 client to use. - * @param bucketName The bucket to empty and delete. - */ - protected static void deleteBucketAndAllContents(S3Client s3, String bucketName) { - ListObjectsResponse response = s3.listObjects(ListObjectsRequest.builder() - .bucket(bucketName) - .build()); - - while (true) { - for (Iterator iterator = response.contents().iterator(); iterator.hasNext(); ) { - S3Object objectSummary = (S3Object) iterator.next(); - s3.deleteObject(DeleteObjectRequest.builder() - .bucket(bucketName) - .key(objectSummary.key()) - .build()); - } - - if (response.isTruncated()) { - response = s3.listObjects(ListObjectsRequest.builder() - .marker(response.nextMarker()) - .bucket(bucketName) - .build()); - } else { - break; - } - } - ; - - s3.deleteBucket(DeleteBucketRequest.builder().bucket(bucketName).build()); - } - - /** - * Creates a bucket and waits for it to exist. - * - * @param s3 The AmazonS# client to use. - * @param bucketName The name of the bucket to create. - */ - protected static void createBucket(S3Client s3, String bucketName, String region) throws InterruptedException { - s3.createBucket(CreateBucketRequest.builder() - .bucket(bucketName) - .createBucketConfiguration(CreateBucketConfiguration.builder() - .locationConstraint(region) - .build()) - .build()); - - Thread.sleep(1000); - } - - protected static void maxPollTimeExceeded() { - throw new RuntimeException("Max poll time exceeded"); - } - - /** - * Asserts that the object stored in the specified bucket and key doesn't - * exist If it does exist, this method will fail the current test. - * - * @param s3 The AmazonS3 client to use. - * @param bucketName The name of the bucket containing the object to test. - * @param key The key under which the object is stored in the specified - * bucket. - */ - protected void assertObjectDoesntExist(S3Client s3, String bucketName, String key) throws Exception { - long timeoutTime = System.currentTimeMillis() + 10000; - - while (true) { - try { - s3.headObject(HeadObjectRequest.builder().bucket(bucketName).key(key).build()); - Thread.sleep(1000); - if (System.currentTimeMillis() > timeoutTime) { - fail("object " + bucketName + "/" + key + " still exists"); - } - } catch (SdkServiceException exception) { - /* - * We expect a 404 indicating that the object version we requested - * doesn't exist. If we get anything other than that, then we want - * to let the exception keep going up the chain. - */ - if (exception.statusCode() != 404) { - throw exception; - } - return; // doesn't exist! - } - } - } - - /** - * Asserts that the object stored in the specified bucket and key exists. If - * it doesn't exist, this method will fail the current test. - * - * @param s3 The AmazonS3 client to use. - * @param bucketName The name of the bucket containing the object to test. - * @param key The key under which the object is stored in the specified - * bucket. - */ - protected void assertObjectExists(S3Client s3, String bucketName, String key) throws Exception { - long timeoutTime = System.currentTimeMillis() + 10000; - - while (true) { - try { - s3.headObject(HeadObjectRequest.builder().bucket(bucketName).key(key).build()); - return; // exists! - } catch (SdkServiceException exception) { - /* - * We expect a 404 indicating that the object version we requested - * doesn't exist. If we get anything other than that, then we want - * to let the exception keep going up the chain. - */ - if (exception.statusCode() != 404) { - throw exception; - } - Thread.sleep(1000); - if (System.currentTimeMillis() > timeoutTime) { - fail("object " + bucketName + "/" + key + " doesn't exist"); - } - } - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperExpressionsIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperExpressionsIntegrationTest.java deleted file mode 100644 index 465ddf6a0894..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperExpressionsIntegrationTest.java +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertTrue; - -import java.io.FileNotFoundException; -import java.io.IOException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.utils.ImmutableMap; -import software.amazon.awssdk.utils.ImmutableMap.Builder; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.model.ResourceInUseException; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; -import software.amazon.awssdk.testutils.Waiter; -import software.amazon.awssdk.testutils.service.AwsTestBase; - -public class DynamoDbMapperExpressionsIntegrationTest extends AwsTestBase { - - /** - * Reference to the mapper used for this testing - */ - protected static DynamoDbMapper mapper; - - /** - * Reference to the client being used by the mapper. - */ - protected static DynamoDbClient client; - - /** - * Table name to be used for this testing - */ - static final String TABLENAME = "java-sdk-mapper-customer"; - - /** - * Attribute name of the hash key - */ - private static final String HASH_KEY = "customerId"; - - /** - * Attribute name of the range key - */ - private static final String RANGE_KEY = "addressType"; - - /** - * Status of the table - */ - private static final String TABLE_STATUS_ACTIVE = "ACTIVE"; - - /** - * Sleep time in milli seconds for the table to become active. - */ - private static final long SLEEP_TIME_IN_MILLIS = 5000; - - /** - * Provisioned Throughput read capacity for the table. - */ - private static final long READ_CAPACITY = 10; - - /** - * Provisioned Throughput write capacity for the table. - */ - private static final long WRITE_CAPACITY = 10; - - private static final String FIRST_CUSTOMER_ID = "1000"; - private static final String ADDRESS_TYPE_HOME = "home"; - private static final String ADDRESS_TYPE_WORK = "work"; - - @BeforeClass - public static void setUp() throws FileNotFoundException, IOException, - InterruptedException { - setUpCredentials(); - client = DynamoDbClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - mapper = new DynamoDbMapper(client); - try { - client.createTable(CreateTableRequest.builder() - .tableName(TABLENAME) - .keySchema(KeySchemaElement.builder().attributeName(HASH_KEY).keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName(RANGE_KEY).keyType(KeyType.RANGE).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName(HASH_KEY).attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName(RANGE_KEY).attributeType(ScalarAttributeType.S).build()) - .provisionedThroughput(ProvisionedThroughput.builder() - .readCapacityUnits(READ_CAPACITY) - .writeCapacityUnits(WRITE_CAPACITY) - .build()) - .build()); - } catch (ResourceInUseException ex) { - ex.printStackTrace(); - } - waitForTableCreation(); - fillInData(); - } - - public static void fillInData() { - final Builder record1 = ImmutableMap - .builder(); - record1.put(HASH_KEY, AttributeValue.builder().n(FIRST_CUSTOMER_ID).build()) - .put(RANGE_KEY, AttributeValue.builder().s(ADDRESS_TYPE_WORK).build()) - .put("AddressLine1", - AttributeValue.builder().s("1918 8th Aven").build()) - .put("city", AttributeValue.builder().s("seattle").build()) - .put("state", AttributeValue.builder().s("WA").build()) - .put("zipcode", AttributeValue.builder().n("98104").build()); - final Builder record2 = ImmutableMap - .builder(); - record2.put(HASH_KEY, AttributeValue.builder().n(FIRST_CUSTOMER_ID).build()) - .put(RANGE_KEY, AttributeValue.builder().s(ADDRESS_TYPE_HOME).build()) - .put("AddressLine1", - AttributeValue.builder().s("15606 NE 40th ST").build()) - .put("city", AttributeValue.builder().s("redmond").build()) - .put("state", AttributeValue.builder().s("WA").build()) - .put("zipcode", AttributeValue.builder().n("98052").build()); - - client.putItem(PutItemRequest.builder().tableName(TABLENAME).item(record1.build()).build()); - client.putItem(PutItemRequest.builder().tableName(TABLENAME).item(record2.build()).build()); - } - - public static void waitForTableCreation() throws InterruptedException { - Waiter.run(() -> client.describeTable(r -> r.tableName(TABLENAME))) - .until(r -> r.table().tableStatus() == TableStatus.ACTIVE) - .orFail(); - } - - @AfterClass - public static void tearDown() throws Exception { - try { - if (client != null) { - client.deleteTable(DeleteTableRequest.builder().tableName(TABLENAME).build()); - } - } catch (Exception e) { - // Ignored or expected. - } finally { - if (client != null) { - client.close(); - } - } - } - - /** - * Queries for a record based on hash and range key. Provider a filter - * expression that filters results. - */ - @Test - public void testQueryFilterExpression() { - Customer customer = new Customer(); - customer.setCustomerId(Long.valueOf(FIRST_CUSTOMER_ID)); - - DynamoDbQueryExpression queryExpression = - new DynamoDbQueryExpression() - .withHashKeyValues(customer) - .withRangeKeyCondition(RANGE_KEY, Condition.builder() - .comparisonOperator(ComparisonOperator.EQ) - .attributeValueList(AttributeValue.builder().s(ADDRESS_TYPE_HOME).build()) - .build()); - PaginatedQueryList results = mapper.query(Customer.class, - queryExpression); - assertTrue(results.size() == 1); - - final Builder builder = ImmutableMap - .builder(); - builder.put(":zipcode", AttributeValue.builder().n("98109").build()); - - queryExpression = queryExpression - .withFilterExpression("zipcode = :zipcode") - .withExpressionAttributeValues(builder.build()); - results = mapper.query(Customer.class, queryExpression); - assertTrue(results.size() == 0); - } - - /** - * Queries using key condition expression. - */ - @Test - public void testKeyConditionExpression() { - Customer customer = new Customer(); - customer.setCustomerId(Long.valueOf(FIRST_CUSTOMER_ID)); - - DynamoDbQueryExpression query = - new DynamoDbQueryExpression() - .withKeyConditionExpression( - "customerId = :customerId AND addressType = :addressType"); - final Builder builder = - ImmutableMap.builder(); - builder.put(":customerId", AttributeValue.builder().n(FIRST_CUSTOMER_ID).build()) - .put(":addressType", AttributeValue.builder().s(ADDRESS_TYPE_HOME).build()) - ; - query.withExpressionAttributeValues(builder.build()); - - PaginatedQueryList results = mapper.query(Customer.class, query); - assertTrue(results.size() == 1); - - builder.put(":zipcode", AttributeValue.builder().n("98109").build()); - query.withFilterExpression("zipcode = :zipcode") - .withExpressionAttributeValues(builder.build()); - - results = mapper.query(Customer.class, query); - assertTrue(results.size() == 0); - } - - /** - * Scan the table and filters the results based on the filter expression - * provided. - */ - @Test - public void testScanFilterExpression() { - Customer customer = new Customer(); - customer.setCustomerId(Long.valueOf(FIRST_CUSTOMER_ID)); - - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression(); - - PaginatedScanList results = mapper.scan(Customer.class, - scanExpression); - assertTrue(results.size() == 2); - - final Builder attributeValueMapBuilder = ImmutableMap - .builder(); - attributeValueMapBuilder - .put(":state", AttributeValue.builder().s("WA").build()); - - final Builder attributeNameMapBuilder = ImmutableMap - .builder(); - attributeNameMapBuilder.put("#statename", "state"); - - scanExpression = scanExpression - .withFilterExpression("#statename = :state") - .withExpressionAttributeValues(attributeValueMapBuilder.build()) - .withExpressionAttributeNames(attributeNameMapBuilder.build()); - results = mapper.scan(Customer.class, scanExpression); - assertTrue(results.size() == 2); - } - - /** - * Performs delete operation with a condition expression specified. Delete - * should fail as the condition in the conditional expression evaluates to - * false. - */ - @Test - public void testDeleteConditionalExpression() { - Customer customer = new Customer(); - customer.setCustomerId(Long.valueOf(FIRST_CUSTOMER_ID)); - customer.setAddressType(ADDRESS_TYPE_WORK); - - Builder expectedMapBuilder = ImmutableMap - .builder(); - expectedMapBuilder.put("zipcode", ExpectedAttributeValue.builder() - .attributeValueList(AttributeValue.builder().n("98052").build()) - .comparisonOperator(ComparisonOperator.EQ).build()); - - DynamoDbDeleteExpression deleteExpression = new DynamoDbDeleteExpression(); - deleteExpression.setConditionExpression("zipcode = :zipcode"); - - final Builder attributeValueMapBuilder = ImmutableMap - .builder(); - attributeValueMapBuilder.put(":zipcode", - AttributeValue.builder().n("98052").build()); - deleteExpression.setExpressionAttributeValues(attributeValueMapBuilder - .build()); - try { - mapper.delete(customer, deleteExpression); - } catch (Exception e) { - assertTrue(e instanceof ConditionalCheckFailedException); - } - } - - // Note don't move Customer to top level, or else it would break the release - // pipeline, as the integration test will not be copied over causing - // compilation failure - @DynamoDbTable(tableName = DynamoDbMapperExpressionsIntegrationTest.TABLENAME) - public static class Customer { - - private long customerId; - - private String addressType; - - private String addressLine1; - - private String city; - - private String state; - - private int zipcode; - - @DynamoDbAttribute(attributeName = "customerId") - @DynamoDbHashKey(attributeName = "customerId") - public long getCustomerId() { - return customerId; - } - - public void setCustomerId(long customerId) { - this.customerId = customerId; - } - - @DynamoDbAttribute(attributeName = "addressType") - @DynamoDbRangeKey(attributeName = "addressType") - public String getAddressType() { - return addressType; - } - - public void setAddressType(String addressType) { - this.addressType = addressType; - } - - @DynamoDbAttribute(attributeName = "AddressLine1") - public String getAddressLine1() { - return addressLine1; - } - - public void setAddressLine1(String addressLine1) { - this.addressLine1 = addressLine1; - } - - @DynamoDbAttribute(attributeName = "city") - public String getCity() { - return city; - } - - public void setCity(String city) { - this.city = city; - } - - @DynamoDbAttribute(attributeName = "state") - public String state() { - return state; - } - - public void setState(String state) { - this.state = state; - } - - @DynamoDbAttribute(attributeName = "zipcode") - public int getZipcode() { - return zipcode; - } - - public void setZipcode(int zipcode) { - this.zipcode = zipcode; - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/EnumMarshallerIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/EnumMarshallerIntegrationTest.java deleted file mode 100644 index 4a27f4184592..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/EnumMarshallerIntegrationTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; - -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; - -/** - * Status tests for {@code EnumMarshaller}. - */ -public class EnumMarshallerIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - @Test - public void testNullEnumValue() { - final DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - final TestObject object1 = new TestObject(); - - assertNull(object1.getStatus()); - - mapper.save(object1); - - final TestObject object2 = mapper.load(TestObject.class, object1.getKey()); - - assertNull(object2.getStatus()); - } - - @Test - public void testMarshalling() { - final DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - final TestObject object1 = new TestObject(); - - object1.setStatus(TestObject.Status.Y); - - mapper.save(object1); - - assertNotNull(object1.getKey()); - assertNotNull(object1.getStatus()); - - final TestObject object2 = mapper.load(TestObject.class, object1.getKey()); - - assertEquals(object1.getKey(), object2.getKey()); - assertEquals(object1.getStatus(), object2.getStatus()); - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class TestObject { - private String key; - - - private Status status; - - - - @DynamoDbHashKey - @DynamoDbAutoGeneratedKey - public String getKey() { - return this.key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbMarshalling(marshallerClass = StatusEnumMarshaller.class) - public Status getStatus() { - return this.status; - } - - public void setStatus(Status status) { - this.status = status; - } - - public static enum Status { - X, - Y, - Z - } - - public static class StatusEnumMarshaller extends AbstractEnumMarshaller { - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonIntegrationTest.java deleted file mode 100644 index 2f9b1db9f33f..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonIntegrationTest.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.TableNameOverride; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; -import software.amazon.awssdk.testutils.Waiter; -import software.amazon.awssdk.testutils.service.AwsTestBase; - -public class JsonIntegrationTest extends AwsTestBase { - - private static final String TABLE_NAME = "test-table-" - + UUID.randomUUID().toString(); - - private static DynamoDbClient client; - private static DynamoDbMapper mapper; - - @BeforeClass - public static void setup() throws Exception { - setUpCredentials(); - client = DynamoDbClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).region(Region.US_WEST_2).build(); - - mapper = new DynamoDbMapper( - client, - new DynamoDbMapperConfig.Builder() - .withConversionSchema(ConversionSchemas.V2) - .withTableNameOverride(TableNameOverride - .withTableNameReplacement(TABLE_NAME)) - .withConsistentReads(DynamoDbMapperConfig.ConsistentRead.CONSISTENT) - .build()); - - CreateTableRequest request = mapper - .generateCreateTableRequest(TestClass.class).toBuilder() - .provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(1L).writeCapacityUnits(1L).build()) - .build(); - - client.createTable(request); - - Waiter.run(() -> client.describeTable(r -> r.tableName(TABLE_NAME))) - .until(r -> r.table().tableStatus() == TableStatus.ACTIVE) - .orFail(); - } - - @AfterClass - public static void cleanup() { - if (client == null) { - return; - } - - try { - client.deleteTable(DeleteTableRequest.builder().tableName(TABLE_NAME).build()); - } catch (ResourceNotFoundException e) { - // Ignored or expected. - } - } - - private static boolean eq(T one, T two) { - if (one == null) { - return (two == null); - } else { - return one.equals(two); - } - } - - @Test - @Ignore - public void testIt() { - final ChildClass child1 = new ChildClass(); - child1.setBool(true); - - final ChildClass child2 = new ChildClass(); - child2.setBool(true); - - final ChildClass parent = new ChildClass(); - parent.setFirstChild(child1); - parent.setOtherChildren(Arrays.asList(child1, child2)); - parent.setNamedChildren(new HashMap() {{ - put("one", child1); - put("two", child2); - }}); - - TestClass test = new TestClass(); - test.setId("test"); - test.setListOfMaps(Arrays.>asList( - new HashMap() {{ - put("parent", parent); - }}, - new HashMap() {{ - put("parent", parent); - }}, - null - )); - test.setMapOfLists(new HashMap>() {{ - put("parent", Arrays.asList(child1, child2)); - put("child2", Collections.emptyList()); - put("child1", null); - }}); - - mapper.save(test); - - TestClass result = mapper.load(TestClass.class, "test"); - - Assert.assertEquals(test, result); - } - - @DynamoDbTable(tableName = "") - public static class TestClass { - - private String id; - private List> listOfMaps; - private Map> mapOfLists; - - @DynamoDbHashKey - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public List> getListOfMaps() { - return listOfMaps; - } - - public void setListOfMaps(List> listOfMaps) { - this.listOfMaps = listOfMaps; - } - - public Map> getMapOfLists() { - return mapOfLists; - } - - public void setMapOfLists(Map> mapOfLists) { - this.mapOfLists = mapOfLists; - } - - @Override - public boolean equals(Object obj) { - TestClass other = (TestClass) obj; - - return (eq(id, other.id) - && eq(listOfMaps, other.listOfMaps) - && eq(mapOfLists, other.mapOfLists)); - } - - @Override - public String toString() { - return "{id=" + id + ", listOfMaps=" + listOfMaps + ", mapOfLists=" - + mapOfLists + "}"; - } - } - - @DynamoDbDocument - public static class ChildClass { - - private boolean bool; - - private ChildClass firstChild; - private List otherChildren; - private Map namedChildren; - - public boolean isBool() { - return bool; - } - - public void setBool(boolean bool) { - this.bool = bool; - } - - public ChildClass getFirstChild() { - return firstChild; - } - - public void setFirstChild(ChildClass firstChild) { - this.firstChild = firstChild; - } - - public List getOtherChildren() { - return otherChildren; - } - - public void setOtherChildren(List otherChildren) { - this.otherChildren = otherChildren; - } - - public Map getNamedChildren() { - return namedChildren; - } - - public void setNamedChildren(Map namedChildren) { - this.namedChildren = namedChildren; - } - - @Override - public boolean equals(Object obj) { - ChildClass other = (ChildClass) obj; - - return (eq(bool, other.bool) - && eq(firstChild, other.firstChild) - && eq(otherChildren, other.otherChildren) - && eq(namedChildren, other.namedChildren)); - } - - @Override - public String toString() { - return "{bool=" + bool + ", firstChild=" + firstChild - + ", otherChildren=" + otherChildren + ", namedChildren=" - + namedChildren + "}"; - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonMarshallerIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonMarshallerIntegrationTest.java deleted file mode 100644 index 29863e4130a6..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonMarshallerIntegrationTest.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.UUID; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; - -/** - * Status tests for {@code JsonMarshaller}. - */ -public class JsonMarshallerIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - @Test - public void testMarshalling() { - final DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - final TestObject object1 = new TestObject(); - object1.setOneItem(new TestObject.OneItem()); - object1.addOneItem(new TestObject.OneItem(UUID.randomUUID().toString(), 1)); - object1.addOneItem(new TestObject.OneItem(UUID.randomUUID().toString(), 2)); - object1.addOneItem(new TestObject.OneItem(UUID.randomUUID().toString(), 3)); - object1.setTwoItem(new TestObject.TwoItem()); - object1.addTwoItem(new TestObject.TwoItem(UUID.randomUUID().toString(), new Date())); - - mapper.save(object1); - - assertNotNull(object1.getKey()); - - assertNotNull(object1.getOneItem()); - assertNotNull(object1.getOneItems()); - assertEquals(3, object1.getOneItems().size()); - - assertNotNull(object1.getTwoItem()); - assertNotNull(object1.getTwoItems()); - assertEquals(1, object1.getTwoItems().size()); - - final TestObject object2 = mapper.load(TestObject.class, object1.getKey()); - - assertEquals(object1.getKey(), object2.getKey()); - - assertEquals(object1.getOneItem().getId(), object2.getOneItem().getId()); - assertEquals(object1.getOneItem().getQuantity(), object2.getOneItem().getQuantity()); - assertEquals(object1.getOneItems().size(), object2.getOneItems().size()); - - for (int i = 0, its = object1.getOneItems().size(); i < its; i++) { - assertEquals(object1.getOneItems().get(i).getId(), object2.getOneItems().get(i).getId()); - assertEquals(object1.getOneItems().get(i).getQuantity(), object2.getOneItems().get(i).getQuantity()); - } - - assertEquals(object1.getTwoItem().getId(), object2.getTwoItem().getId()); - assertEquals(object1.getTwoItem().getDate(), object2.getTwoItem().getDate()); - assertEquals(object1.getTwoItems().size(), object2.getTwoItems().size()); - - for (int i = 0, its = object1.getTwoItems().size(); i < its; i++) { - assertEquals(object1.getTwoItems().get(i).getId(), object2.getTwoItems().get(i).getId()); - assertEquals(object1.getTwoItems().get(i).getDate(), object2.getTwoItems().get(i).getDate()); - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class TestObject { - private String key; - - - private OneItem aitem; - - - private List oneItems; - - - private TwoItem bitem; - - - private List twoItems; - - @DynamoDbHashKey - @DynamoDbAutoGeneratedKey - public String getKey() { - return this.key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbMarshalling(marshallerClass = OneItemJsonMarshaller.class) - public OneItem getOneItem() { - return this.aitem; - } - - public void setOneItem(OneItem aitem) { - this.aitem = aitem; - } - - @DynamoDbMarshalling(marshallerClass = OneListJsonMarshaller.class) - public List getOneItems() { - return this.oneItems; - } - - public void setOneItems(List oneItems) { - this.oneItems = oneItems; - } - - public void addOneItem(OneItem aitem) { - if (this.oneItems == null) { - this.oneItems = new ArrayList(); - } - this.oneItems.add(aitem); - } - - @DynamoDbMarshalling(marshallerClass = TwoItemJsonMarshaller.class) - public TwoItem getTwoItem() { - return this.bitem; - } - - public void setTwoItem(TwoItem bitem) { - this.bitem = bitem; - } - - @DynamoDbMarshalling(marshallerClass = TwoListJsonMarshaller.class) - public List getTwoItems() { - return this.twoItems; - } - - public void setTwoItems(List twoItems) { - this.twoItems = twoItems; - } - - public void addTwoItem(TwoItem bitem) { - if (this.twoItems == null) { - this.twoItems = new ArrayList(); - } - this.twoItems.add(bitem); - } - - public static class OneItemJsonMarshaller extends JsonMarshaller { - } - - public static class OneListJsonMarshaller extends JsonMarshaller { - public OneListJsonMarshaller() { - super(Type.class); - } - - ; - - public static final class Type extends ArrayList { - } - } - - public static class TwoItemJsonMarshaller extends JsonMarshaller { - } - - public static class TwoListJsonMarshaller extends JsonMarshaller { - public TwoListJsonMarshaller() { - super(Type.class); - } - - ; - - public static final class Type extends ArrayList { - } - } - - public static class OneItem { - private String id; - private Integer quantity; - - public OneItem(String id, Integer quantity) { - this.id = id; - this.quantity = quantity; - } - - public OneItem() { - this(null, null); - } - - public String getId() { - return this.id; - } - - public void setId(String id) { - this.id = id; - } - - public Integer getQuantity() { - return this.quantity; - } - - public void setQuantity(Integer quantity) { - this.quantity = quantity; - } - } - - public static class TwoItem { - private String id; - private Date date; - - public TwoItem(String id, Date date) { - this.id = id; - this.date = date; - } - - public TwoItem() { - this(null, null); - } - - public String getId() { - return this.id; - } - - public void setId(String id) { - this.id = id; - } - - public Date getDate() { - return this.date; - } - - public void setDate(Date date) { - this.date = date; - } - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3ClientCacheIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3ClientCacheIntegrationTest.java deleted file mode 100644 index d20d9cd134fb..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3ClientCacheIntegrationTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.net.URI; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3Client; - -@Ignore -// FIXME: Depends on S3 properly parsing region information from the endpoint (see AmazonS3#getRegionName()) -public class S3ClientCacheIntegrationTest { - private AwsBasicCredentials credentials; - - @Before - public void setUp() { - credentials = AwsBasicCredentials.create("mock", "mock"); - } - - @Test - public void testBadClientCache() throws Exception { - S3ClientCache s3cc = new S3ClientCache(credentials); - S3Client notAnAWSEndpoint = S3Client.builder() - .credentialsProvider(StaticCredentialsProvider.create(credentials)) - .endpointOverride(new URI("i.am.an.invalid.aws.endpoint.com")) - .build(); - - try { - s3cc.useClient(notAnAWSEndpoint, Region.US_EAST_2); - } catch (IllegalStateException e) { - assertTrue(e.getMessage().contains("No valid region has been specified. Unable to return region name")); - return; - } - - fail("Expected exception to be thrown"); - } - - @Test - public void testNonExistantRegion() throws Exception { - S3ClientCache s3cc = new S3ClientCache(credentials); - S3Client notAnAWSEndpoint = S3Client.builder() - .credentialsProvider(StaticCredentialsProvider.create(credentials)) - .endpointOverride(new URI("s3.mordor.amazonaws.com")) - .build(); - - try { - s3cc.useClient(notAnAWSEndpoint, Region.US_EAST_2); - } catch (IllegalStateException e) { - assertEquals("No valid region has been specified. Unable to return region name", e.getMessage()); - return; - } - - fail("Expected IllegalStateException to be thrown"); - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/document/QueryIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/document/QueryIntegrationTest.java deleted file mode 100644 index def8a2fcdd40..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/document/QueryIntegrationTest.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.document; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Random; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbQueryExpression; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.pojos.RangeKeyClass; - -/** - * Integration tests for the query operation on DynamoDBMapper. - */ -public class QueryIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final long HASH_KEY = System.currentTimeMillis(); - private static final int TEST_ITEM_NUMBER = 500; - private static RangeKeyClass hashKeyObject; - private static DynamoDbMapper mapper; - - @BeforeClass - public static void setUp() throws Exception { - setUpTableWithRangeAttribute(); - - DynamoDbMapperConfig mapperConfig = new DynamoDbMapperConfig(DynamoDbMapperConfig.ConsistentRead.CONSISTENT); - mapper = new DynamoDbMapper(dynamo, mapperConfig); - - putTestData(mapper, TEST_ITEM_NUMBER); - - hashKeyObject = new RangeKeyClass(); - hashKeyObject.setKey(HASH_KEY); - } - - /** - * Use BatchSave to put some test data into the tested table. Each item is - * hash-keyed by the same value, and range-keyed by numbers starting from 0. - */ - private static void putTestData(DynamoDbMapper mapper, int itemNumber) { - List objs = new ArrayList(); - for (int i = 0; i < itemNumber; i++) { - RangeKeyClass obj = new RangeKeyClass(); - obj.setKey(HASH_KEY); - obj.setRangeKey(i); - obj.setBigDecimalAttribute(new BigDecimal(i)); - objs.add(obj); - } - mapper.batchSave(objs); - } - - @Test - public void testQueryWithPrimaryRangeKey() throws Exception { - DynamoDbQueryExpression queryExpression = - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyObject) - .withRangeKeyCondition( - "rangeKey", - Condition.builder() - .comparisonOperator(ComparisonOperator.GT) - .attributeValueList(AttributeValue.builder().n("1.0").build()) - .build()) - .withLimit(11); - List list = mapper.query(RangeKeyClass.class, queryExpression); - - int count = 0; - Iterator iterator = list.iterator(); - while (iterator.hasNext()) { - count++; - RangeKeyClass next = iterator.next(); - assertTrue(next.getRangeKey() > 1.00); - } - - int numMatchingObjects = TEST_ITEM_NUMBER - 2; - assertEquals(count, numMatchingObjects); - assertEquals(numMatchingObjects, list.size()); - - assertNotNull(list.get(list.size() / 2)); - assertTrue(list.contains(list.get(list.size() / 2))); - assertEquals(numMatchingObjects, list.toArray().length); - - Thread.sleep(250); - int totalCount = mapper.count(RangeKeyClass.class, queryExpression); - assertEquals(numMatchingObjects, totalCount); - - /** - * Tests query with only hash key - */ - queryExpression = new DynamoDbQueryExpression().withHashKeyValues(hashKeyObject); - list = mapper.query(RangeKeyClass.class, queryExpression); - assertEquals(TEST_ITEM_NUMBER, list.size()); - } - - /** - * Tests making queries using query filter on non-key attributes. - */ - @Test - public void testQueryFilter() { - // A random filter condition to be applied to the query. - Random random = new Random(); - int randomFilterValue = random.nextInt(TEST_ITEM_NUMBER); - Condition filterCondition = Condition.builder() - .comparisonOperator(ComparisonOperator.LT) - .attributeValueList( - AttributeValue.builder().n(Integer.toString(randomFilterValue)).build()).build(); - - /* - * (1) Apply the filter on the range key, in form of key condition - */ - DynamoDbQueryExpression queryWithRangeKeyCondition = - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyObject) - .withRangeKeyCondition("rangeKey", filterCondition); - List rangeKeyConditionResult = mapper.query(RangeKeyClass.class, queryWithRangeKeyCondition); - - /* - * (2) Apply the filter on the bigDecimalAttribute, in form of query filter - */ - DynamoDbQueryExpression queryWithQueryFilterCondition = - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyObject) - .withQueryFilter(Collections.singletonMap("bigDecimalAttribute", filterCondition)); - List queryFilterResult = mapper.query(RangeKeyClass.class, queryWithQueryFilterCondition); - - assertEquals(rangeKeyConditionResult.size(), queryFilterResult.size()); - for (int i = 0; i < rangeKeyConditionResult.size(); i++) { - assertEquals(rangeKeyConditionResult.get(i), queryFilterResult.get(i)); - } - } - - /** - * Tests that exception should be raised when user provides an index name - * when making query with the primary range key. - */ - @Test - public void testUnnecessaryIndexNameException() { - try { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - long hashKey = System.currentTimeMillis(); - RangeKeyClass keyObject = new RangeKeyClass(); - keyObject.setKey(hashKey); - DynamoDbQueryExpression queryExpression = new DynamoDbQueryExpression() - .withHashKeyValues(keyObject); - queryExpression.withRangeKeyCondition("rangeKey", - Condition.builder().comparisonOperator(ComparisonOperator.GT.toString()) - .attributeValueList( - AttributeValue.builder().n("1.0").build()).build()).withLimit(11) - .withIndexName("some_index"); - mapper.query(RangeKeyClass.class, queryExpression); - fail("User should not provide index name when making query with the primary range key"); - } catch (IllegalArgumentException expected) { - System.out.println(expected.getMessage()); - } catch (Exception e) { - fail("Should trigger SdkClientException."); - } - - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AbstractKeyAndValIntegrationTestCase.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AbstractKeyAndValIntegrationTestCase.java deleted file mode 100644 index 3b9f495f3b73..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AbstractKeyAndValIntegrationTestCase.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; - -import java.util.ArrayList; -import java.util.List; -import org.junit.After; -import org.junit.Before; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.pojos.KeyAndVal; - -/** - * Tests updating component attribute fields correctly. - */ -public abstract class AbstractKeyAndValIntegrationTestCase extends DynamoDBMapperIntegrationTestBase { - - /** - * The DynamoDBMapper instance. - */ - protected DynamoDbMapper util; - - /** - * Sets up the test case. - */ - protected final void setUpTest(final DynamoDbMapperConfig.SaveBehavior saveBehavior) { - this.util = new DynamoDbMapper(dynamo, new DynamoDbMapperConfig.Builder().withSaveBehavior(saveBehavior).build()); - } - - /** - * Sets up the test case. - */ - @Before - public void setUpTest() { - setUpTest(DynamoDbMapperConfig.DEFAULT.saveBehavior()); - } - - /** - * Tears down the test case. - */ - @After - public void tearDownTest() { - this.util = null; - } - - /** - * Assert that the object updated appropriately. - * - * @param changeExpected True if a change is expected. - * @param objects The objects. - */ - protected final void assertBeforeAndAfterChange(final boolean changeExpected, - final List> objects) { - final List befores = new ArrayList(objects.size()); - for (final KeyAndVal object : objects) { - befores.add(object.getVal()); - } - this.util.batchSave(objects); - for (int i = 0, its = objects.size(); i < its; i++) { - assertBeforeAndAfterChange(changeExpected, befores.get(i), objects.get(i).getVal()); - } - } - - /** - * Assert that the object updated appropriately. - * - * @param changeExpected True if a change is expected. - * @param object The object. - * @return The value if more assertions are required. - */ - protected final V assertBeforeAndAfterChange(final Boolean changeExpected, final KeyAndVal object) { - final V before = object.getVal(); - this.util.save(object); - final V after = object.getVal(); - if (changeExpected != null) { - assertBeforeAndAfterChange(changeExpected, before, after); - } - final KeyAndVal reload = this.util.load(object.getClass(), object.getKey()); - assertNotNull(reload); - if (changeExpected != null) { - assertBeforeAndAfterChange(false, after, reload.getVal()); - assertBeforeAndAfterChange(changeExpected, before, reload.getVal()); - } - return reload.getVal(); - } - - /** - * Assert that the object updated appropriately. - * - * @param changeExpected True if a change is expected. - * @param before The before value. - * @param after The after value. - */ - protected final void assertBeforeAndAfterChange(final boolean changeExpected, final V before, final V after) { - if (!changeExpected) { - assertEquals(String.format("Expected before[%s] and after[%s] to be equal", before, after), before, after); - } else if (before == null) { - assertNotNull(String.format("Expected after[%s] to not be null", after), after); - } else { - assertFalse(String.format("Expected before[%s] and after[%s] to not be equal", before, after), before.equals(after)); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AutoGeneratedKeysIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AutoGeneratedKeysIntegrationTest.java deleted file mode 100644 index f582c9d1d3f3..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AutoGeneratedKeysIntegrationTest.java +++ /dev/null @@ -1,1112 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; - -import java.util.Collections; -import java.util.UUID; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.utils.ImmutableMap; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.TableUtils; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIndexHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIndexRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbSaveExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.Projection; -import software.amazon.awssdk.services.dynamodb.model.ProjectionType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; - -/** - * Tests using auto-generated keys for range keys, hash keys, or both. - */ -public class AutoGeneratedKeysIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String TABLE_NAME = "aws-java-sdk-string-range"; - - private static final String GSI_NAME = "gsi-with-autogenerated-keys"; - private static final String GSI_HASH_KEY = "gis-hash-key"; - private static final String GSI_RANGE_KEY = "gis-range-key"; - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBMapperIntegrationTestBase.setUp(); - - String keyName = DynamoDBMapperIntegrationTestBase.KEY_NAME; - String rangeKeyAttributeName = "rangeKey"; - - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TABLE_NAME) - .keySchema( - KeySchemaElement.builder().attributeName(keyName).keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName(rangeKeyAttributeName).keyType(KeyType.RANGE).build()) - .globalSecondaryIndexes(GlobalSecondaryIndex.builder() - .indexName(GSI_NAME) - .keySchema( - KeySchemaElement.builder().attributeName(GSI_HASH_KEY).keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName(GSI_RANGE_KEY).keyType(KeyType.RANGE).build()) - .projection(Projection.builder().projectionType(ProjectionType.ALL).build()) - .provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(3L).writeCapacityUnits(3L).build()).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName(keyName).attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName(rangeKeyAttributeName).attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName(GSI_HASH_KEY).attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName(GSI_RANGE_KEY).attributeType(ScalarAttributeType.S).build()) - .provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(10L).writeCapacityUnits(5L).build()).build(); - - if (TableUtils.createTableIfNotExists(dynamo, createTableRequest)) { - TableUtils.waitUntilActive(dynamo, TABLE_NAME); - } - } - - @Test - public void testHashKeyRangeKeyBothAutogenerated() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - HashKeyRangeKeyBothAutoGenerated obj = new HashKeyRangeKeyBothAutoGenerated(); - obj.setOtherAttribute("blah"); - - assertNull(obj.getKey()); - assertNull(obj.getRangeKey()); - mapper.save(obj); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - HashKeyRangeKeyBothAutoGenerated other = mapper.load(HashKeyRangeKeyBothAutoGenerated.class, obj.getKey(), - obj.getRangeKey()); - assertEquals(other, obj); - } - - @Test - public void testHashKeyRangeKeyBothAutogeneratedBatchWrite() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - HashKeyRangeKeyBothAutoGenerated obj = new HashKeyRangeKeyBothAutoGenerated(); - obj.setOtherAttribute("blah"); - HashKeyRangeKeyBothAutoGenerated obj2 = new HashKeyRangeKeyBothAutoGenerated(); - obj2.setOtherAttribute("blah"); - - assertNull(obj.getKey()); - assertNull(obj.getRangeKey()); - assertNull(obj2.getKey()); - assertNull(obj2.getRangeKey()); - mapper.batchSave(obj, obj2); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - assertNotNull(obj2.getKey()); - assertNotNull(obj2.getRangeKey()); - - assertEquals(mapper.load(HashKeyRangeKeyBothAutoGenerated.class, obj.getKey(), - obj.getRangeKey()), obj); - assertEquals(mapper.load(HashKeyRangeKeyBothAutoGenerated.class, obj2.getKey(), - obj2.getRangeKey()), obj2); - } - - /** - * Tests providing additional expected conditions when saving item with - * auto-generated keys. - */ - @Test - public void testAutogeneratedKeyWithUserProvidedExpectedConditions() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - HashKeyRangeKeyBothAutoGenerated obj = new HashKeyRangeKeyBothAutoGenerated(); - obj.setOtherAttribute("blah"); - - assertNull(obj.getKey()); - assertNull(obj.getRangeKey()); - - // Add additional expected conditions via DynamoDBSaveExpression. - // Expected conditions joined by AND are compatible with the conditions - // for auto-generated keys. - DynamoDbSaveExpression saveExpression = new DynamoDbSaveExpression(); - saveExpression - .withExpected(Collections.singletonMap( - "otherAttribute", ExpectedAttributeValue.builder().exists(false).build())) - .withConditionalOperator(ConditionalOperator.AND); - // The save should succeed since the user provided conditions are joined by AND. - mapper.save(obj, saveExpression); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - HashKeyRangeKeyBothAutoGenerated other = mapper.load(HashKeyRangeKeyBothAutoGenerated.class, obj.getKey(), - obj.getRangeKey()); - assertEquals(other, obj); - - // Change the conditional operator to OR. - // IllegalArgumentException is expected since the additional expected - // conditions cannot be joined with the conditions for auto-generated - // keys. - saveExpression.setConditionalOperator(ConditionalOperator.OR); - try { - mapper.save(new HashKeyRangeKeyBothAutoGenerated(), saveExpression); - } catch (IllegalArgumentException expected) { - // Expected. - } - - // User-provided OR conditions should work if they completely override the generated conditions. - saveExpression - .withExpected(ImmutableMap.of( - "otherAttribute", ExpectedAttributeValue.builder().exists(false).build(), - "key", ExpectedAttributeValue.builder().exists(false).build(), - "rangeKey", ExpectedAttributeValue.builder().exists(false).build())) - .withConditionalOperator(ConditionalOperator.OR); - mapper.save(new HashKeyRangeKeyBothAutoGenerated(), saveExpression); - - saveExpression - .withExpected(ImmutableMap.of( - "otherAttribute", ExpectedAttributeValue.builder().value(AttributeValue.builder().s("non-existent-value").build()).build(), - "key", ExpectedAttributeValue.builder().value(AttributeValue.builder().s("non-existent-value").build()).build(), - "rangeKey", ExpectedAttributeValue.builder().value(AttributeValue.builder().s("non-existent-value").build()).build())) - .withConditionalOperator(ConditionalOperator.OR); - try { - mapper.save(new HashKeyRangeKeyBothAutoGenerated(), saveExpression); - } catch (ConditionalCheckFailedException expected) { - // Expected. - } - } - - @Test - public void testHashKeyAutogenerated() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - HashKeyAutoGenerated obj = new HashKeyAutoGenerated(); - obj.setOtherAttribute("blah"); - obj.setRangeKey("" + System.currentTimeMillis()); - - assertNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - mapper.save(obj); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - HashKeyAutoGenerated other = mapper.load(HashKeyAutoGenerated.class, obj.getKey(), obj.getRangeKey()); - assertEquals(other, obj); - } - - @Test - public void testRangeKeyAutogenerated() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - RangeKeyAutoGenerated obj = new RangeKeyAutoGenerated(); - obj.setOtherAttribute("blah"); - obj.setKey("" + System.currentTimeMillis()); - - assertNotNull(obj.getKey()); - assertNull(obj.getRangeKey()); - mapper.save(obj); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - RangeKeyAutoGenerated other = mapper.load(RangeKeyAutoGenerated.class, obj.getKey(), obj.getRangeKey()); - assertEquals(other, obj); - } - - @Test - public void testNothingAutogenerated() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - NothingAutoGenerated obj = new NothingAutoGenerated(); - obj.setOtherAttribute("blah"); - obj.setKey("" + System.currentTimeMillis()); - obj.setRangeKey("" + System.currentTimeMillis()); - - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - mapper.save(obj); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - NothingAutoGenerated other = mapper.load(NothingAutoGenerated.class, obj.getKey(), obj.getRangeKey()); - assertEquals(other, obj); - } - - @Test - public void testNothingAutogeneratedErrors() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - NothingAutoGenerated obj = new NothingAutoGenerated(); - - try { - mapper.save(obj); - fail("Expected a mapping exception"); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setKey("" + System.currentTimeMillis()); - try { - mapper.save(obj); - fail("Expected a mapping exception"); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setRangeKey("" + System.currentTimeMillis()); - obj.setKey(null); - try { - mapper.save(obj); - fail("Expected a mapping exception"); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setRangeKey(""); - obj.setKey("" + System.currentTimeMillis()); - try { - mapper.save(obj); - fail("Expected a mapping exception"); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setRangeKey("" + System.currentTimeMillis()); - mapper.save(obj); - } - - @Test - public void testHashKeyRangeKeyBothAutogeneratedKeyOnly() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - HashKeyRangeKeyBothAutoGeneratedKeyOnly obj = new HashKeyRangeKeyBothAutoGeneratedKeyOnly(); - - assertNull(obj.getKey()); - assertNull(obj.getRangeKey()); - mapper.save(obj); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - HashKeyRangeKeyBothAutoGeneratedKeyOnly other = mapper.load(HashKeyRangeKeyBothAutoGeneratedKeyOnly.class, obj.getKey(), - obj.getRangeKey()); - assertEquals(other, obj); - } - - @Test - public void testHashKeyAutogeneratedKeyOnly() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - HashKeyAutoGeneratedKeyOnly obj = new HashKeyAutoGeneratedKeyOnly(); - obj.setRangeKey("" + System.currentTimeMillis()); - - assertNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - mapper.save(obj); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - HashKeyAutoGeneratedKeyOnly other = mapper.load(HashKeyAutoGeneratedKeyOnly.class, obj.getKey(), obj.getRangeKey()); - assertEquals(other, obj); - } - - @Test - public void testRangeKeyAutogeneratedKeyOnly() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - RangeKeyAutoGeneratedKeyOnly obj = new RangeKeyAutoGeneratedKeyOnly(); - obj.setKey("" + System.currentTimeMillis()); - - assertNotNull(obj.getKey()); - assertNull(obj.getRangeKey()); - mapper.save(obj); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - RangeKeyAutoGeneratedKeyOnly other = mapper.load(RangeKeyAutoGeneratedKeyOnly.class, obj.getKey(), obj.getRangeKey()); - assertEquals(other, obj); - } - - @Test - public void testNothingAutogeneratedKeyOnly() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - NothingAutoGeneratedKeyOnly obj = new NothingAutoGeneratedKeyOnly(); - obj.setKey("" + System.currentTimeMillis()); - obj.setRangeKey("" + System.currentTimeMillis()); - - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - mapper.save(obj); - assertNotNull(obj.getKey()); - assertNotNull(obj.getRangeKey()); - - NothingAutoGeneratedKeyOnly other = mapper.load(NothingAutoGeneratedKeyOnly.class, obj.getKey(), obj.getRangeKey()); - assertEquals(other, obj); - } - - @Test - public void testNothingAutogeneratedKeyOnlyErrors() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - NothingAutoGeneratedKeyOnly obj = new NothingAutoGeneratedKeyOnly(); - - try { - mapper.save(obj); - fail("Expected a mapping exception"); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setKey("" + System.currentTimeMillis()); - try { - mapper.save(obj); - fail("Expected a mapping exception"); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setRangeKey("" + System.currentTimeMillis()); - obj.setKey(null); - try { - mapper.save(obj); - fail("Expected a mapping exception"); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setRangeKey(""); - obj.setKey("" + System.currentTimeMillis()); - try { - mapper.save(obj); - fail("Expected a mapping exception"); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setRangeKey("" + System.currentTimeMillis()); - mapper.save(obj); - } - - @Test - public void testIndexKeyWithAutogeneratedAnnotation_StillRequirePrimaryKeyValue() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - PrimaryKeysNotAutogeneratedIndexKeysAutogenerated obj = new PrimaryKeysNotAutogeneratedIndexKeysAutogenerated(); - - try { - mapper.save(obj); - fail("DynamoDBMappingException is expected."); - } catch (DynamoDbMappingException expected) { - // Expected. - } - - obj.setGsiHashKey("foo"); - obj.setGsiRangeKey("foo"); - try { - mapper.save(obj); - fail("DynamoDBMappingException is expected."); - } catch (DynamoDbMappingException expected) { - // Expected. - } - } - - @Test - public void testIndexKeyWithAutogeneratedAnnotation_AutogenerateIndexKeyValueIfNull() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - PrimaryKeysNotAutogeneratedIndexKeysAutogenerated obj = new PrimaryKeysNotAutogeneratedIndexKeysAutogenerated(); - - String randomPrimaryKeyValue = UUID.randomUUID().toString(); - obj.setKey(randomPrimaryKeyValue); - obj.setRangeKey(randomPrimaryKeyValue); - - assertNull(obj.getGsiHashKey()); - assertNull(obj.getGsiRangeKey()); - mapper.save(obj); - - // check in-memory value - assertNotNull(obj.getGsiHashKey()); - assertNotNull(obj.getGsiRangeKey()); - - PrimaryKeysNotAutogeneratedIndexKeysAutogenerated retrieved = mapper.load(obj); - assertEquals(obj, retrieved); - } - - @Test - public void testIndexKeyWithAutogeneratedAnnotation_DoNotAutogenerateIndexKeyValueIfAlreadySpecified() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - PrimaryKeysNotAutogeneratedIndexKeysAutogenerated obj = new PrimaryKeysNotAutogeneratedIndexKeysAutogenerated(); - - String randomValue = UUID.randomUUID().toString(); - obj.setKey(randomValue); - obj.setRangeKey(randomValue); - obj.setGsiHashKey(randomValue); - obj.setGsiRangeKey(randomValue); - mapper.save(obj); - - // check in-memory value - assertEquals(randomValue, obj.getGsiHashKey()); - assertEquals(randomValue, obj.getGsiRangeKey()); - - PrimaryKeysNotAutogeneratedIndexKeysAutogenerated retrieved = mapper.load(obj); - assertEquals(obj, retrieved); - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class HashKeyRangeKeyBothAutoGenerated { - - private String key; - private String rangeKey; - private String otherAttribute; - - @DynamoDbAutoGeneratedKey - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAutoGeneratedKey - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - public String getOtherAttribute() { - return otherAttribute; - } - - public void setOtherAttribute(String otherAttribute) { - this.otherAttribute = otherAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((otherAttribute == null) ? 0 : otherAttribute.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - HashKeyRangeKeyBothAutoGenerated other = (HashKeyRangeKeyBothAutoGenerated) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (otherAttribute == null) { - if (other.otherAttribute != null) { - return false; - } - } else if (!otherAttribute.equals(other.otherAttribute)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - return true; - } - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class HashKeyAutoGenerated { - - private String key; - private String rangeKey; - private String otherAttribute; - - @DynamoDbAutoGeneratedKey - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - public String getOtherAttribute() { - return otherAttribute; - } - - public void setOtherAttribute(String otherAttribute) { - this.otherAttribute = otherAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((otherAttribute == null) ? 0 : otherAttribute.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - HashKeyAutoGenerated other = (HashKeyAutoGenerated) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (otherAttribute == null) { - if (other.otherAttribute != null) { - return false; - } - } else if (!otherAttribute.equals(other.otherAttribute)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - return true; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-string-range") - public static class RangeKeyAutoGenerated { - - private String key; - private String rangeKey; - private String otherAttribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAutoGeneratedKey - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - public String getOtherAttribute() { - return otherAttribute; - } - - public void setOtherAttribute(String otherAttribute) { - this.otherAttribute = otherAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((otherAttribute == null) ? 0 : otherAttribute.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - RangeKeyAutoGenerated other = (RangeKeyAutoGenerated) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (otherAttribute == null) { - if (other.otherAttribute != null) { - return false; - } - } else if (!otherAttribute.equals(other.otherAttribute)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - return true; - } - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class NothingAutoGenerated { - - private String key; - private String rangeKey; - private String otherAttribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - public String getOtherAttribute() { - return otherAttribute; - } - - public void setOtherAttribute(String otherAttribute) { - this.otherAttribute = otherAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((otherAttribute == null) ? 0 : otherAttribute.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - NothingAutoGenerated other = (NothingAutoGenerated) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (otherAttribute == null) { - if (other.otherAttribute != null) { - return false; - } - } else if (!otherAttribute.equals(other.otherAttribute)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - return true; - } - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class HashKeyRangeKeyBothAutoGeneratedKeyOnly { - - private String key; - private String rangeKey; - - @DynamoDbAutoGeneratedKey - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAutoGeneratedKey - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - HashKeyRangeKeyBothAutoGeneratedKeyOnly other = (HashKeyRangeKeyBothAutoGeneratedKeyOnly) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - return true; - } - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class HashKeyAutoGeneratedKeyOnly { - - private String key; - private String rangeKey; - - @DynamoDbAutoGeneratedKey - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - HashKeyAutoGeneratedKeyOnly other = (HashKeyAutoGeneratedKeyOnly) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - return true; - } - - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class RangeKeyAutoGeneratedKeyOnly { - - private String key; - private String rangeKey; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAutoGeneratedKey - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - RangeKeyAutoGeneratedKeyOnly other = (RangeKeyAutoGeneratedKeyOnly) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - return true; - } - - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class NothingAutoGeneratedKeyOnly { - - private String key; - private String rangeKey; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - NothingAutoGeneratedKeyOnly other = (NothingAutoGeneratedKeyOnly) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - return true; - } - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class PrimaryKeysNotAutogeneratedIndexKeysAutogenerated { - - private String key; - private String rangeKey; - private String gsiHashKey; - private String gsiRangeKey; - - private static boolean isEqual(Object a, Object b) { - if (a == null || b == null) { - return a == null && b == null; - } - return a.equals(b); - } - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - @DynamoDbIndexHashKey(globalSecondaryIndexName = GSI_NAME, attributeName = GSI_HASH_KEY) - @DynamoDbAutoGeneratedKey - public String getGsiHashKey() { - return gsiHashKey; - } - - public void setGsiHashKey(String gsiHashKey) { - this.gsiHashKey = gsiHashKey; - } - - @DynamoDbIndexRangeKey(globalSecondaryIndexName = GSI_NAME, attributeName = GSI_RANGE_KEY) - @DynamoDbAutoGeneratedKey - public String getGsiRangeKey() { - return gsiRangeKey; - } - - public void setGsiRangeKey(String gsiRangeKey) { - this.gsiRangeKey = gsiRangeKey; - } - - @Override - public boolean equals(Object object) { - if (!(object instanceof PrimaryKeysNotAutogeneratedIndexKeysAutogenerated)) { - return false; - } - PrimaryKeysNotAutogeneratedIndexKeysAutogenerated other = (PrimaryKeysNotAutogeneratedIndexKeysAutogenerated) object; - - return isEqual(this.getKey(), other.getKey()) - && isEqual(this.getRangeKey(), other.getRangeKey()) - && isEqual(this.getGsiHashKey(), other.getGsiHashKey()) - && isEqual(this.getGsiRangeKey(), other.getGsiRangeKey()); - } - - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AutoGeneratedTimestampIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AutoGeneratedTimestampIntegrationTest.java deleted file mode 100644 index 03048dd2d003..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/AutoGeneratedTimestampIntegrationTest.java +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.List; -import java.util.UUID; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGenerateStrategy; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedTimestamp; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.SaveBehavior; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; -import software.amazon.awssdk.services.dynamodb.pojos.KeyAndVal; - -/** - * Tests updating component attribute fields correctly. - */ -public class AutoGeneratedTimestampIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test using {@code Calendar}. - */ - @Test - public void testCalendarType() { - final KeyAndCalendarTimestamp object = new KeyAndCalendarTimestamp(); - assertBeforeAndAfterChange(true, object); - } - - /** - * Test using {@code Date}. - */ - @Test - public void testDateType() { - final KeyAndDateTimestamp object = new KeyAndDateTimestamp(); - - assertBeforeAndAfterChange(true, object); - } - - /** - * Test using a {@code Long}. - */ - @Test - public void testLongType() { - final KeyAndLongTimestamp object = new KeyAndLongTimestamp(); - - assertBeforeAndAfterChange(true, object); - } - - /** - * Test {@code DynamoDBAutoGenerateStrategy} of {@code ALWAYS}. - */ - @Test - public void testAlwaysStrategy() { - final KeyAndDateTimestamp object = new KeyAndDateTimestamp(); - - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(true, object); - } - - /** - * Test {@code DynamoDBAutoGenerateStrategy} of {@code ALWAYS}. - */ - @Test - public void testAlwaysStrategyUpdateSkipNullAttribute() { - setUpTest(SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - - final KeyAndDateTimestamp object = new KeyAndDateTimestamp(); - - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(true, object); - } - - /** - * Test {@code DynamoDBAutoGenerateStrategy} of {@code CREATE}. - */ - @Test - public void testCreateStrategy() { - final KeyAndOnCreateDateTimestamp object = new KeyAndOnCreateDateTimestamp(); - - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(false, object); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test {@code DynamoDBAutoGenerateStrategy} of {@code CREATE}. - */ - @Test - public void testCreateStrategyUpdateSkipNullAttributes() { - setUpTest(SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - - final KeyAndOnCreateDateTimestamp object = new KeyAndOnCreateDateTimestamp(); - - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(false, object); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test {@code DynamoDBAutoGenerateStrategy} of {@code ALWAYS}. - */ - @Test - public void testAlwaysNoKey() { - final NoKeyAndOnAlwaysDateTimestamp object = new NoKeyAndOnAlwaysDateTimestamp(); - object.setKey(UUID.randomUUID().toString()); - - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(true, object); - } - - /** - * Test {@code DynamoDBAutoGenerateStrategy} of {@code ALWAYS}. - */ - @Test - public void testAlwaysNoKeyUpdateSkipNullAttributes() { - setUpTest(SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - - final NoKeyAndOnAlwaysDateTimestamp object = new NoKeyAndOnAlwaysDateTimestamp(); - object.setKey(UUID.randomUUID().toString()); - - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(true, object); - } - - /** - * Test {@code DynamoDBAutoGenerateStrategy} of {@code CREATE}. - */ - @Test - public void testCreateNoKey() { - final NoKeyAndOnCreateDateTimestamp object = new NoKeyAndOnCreateDateTimestamp(); - object.setKey(UUID.randomUUID().toString()); - - assertBeforeAndAfterChange(true, object); - assertBeforeAndAfterChange(false, object); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test {@code DynamoDBAutoGenerateStrategy} of {@code CREATE}. - */ - @Test - public void testCreateNoKeyUpdateSkipNullAttributes() { - setUpTest(SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - - final NoKeyAndOnCreateDateTimestamp object = new NoKeyAndOnCreateDateTimestamp(); - object.setKey(UUID.randomUUID().toString()); - - assertBeforeAndAfterChange(false, object); - assertBeforeAndAfterChange(false, object); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test the batch save. - */ - @Test - public void testAlwaysOnBatchSave() { - final List objects = new ArrayList(); - for (int i = 0; i < 10; i++) { - objects.add(new KeyAndDateTimestamp()); - } - - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(true, objects); - } - - /** - * Test the batch save. - */ - @Test - public void testAlwaysOnBatchSaveUpdateSkipNullAttributes() { - setUpTest(SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - - final List objects = new ArrayList(); - for (int i = 0; i < 10; i++) { - objects.add(new KeyAndDateTimestamp()); - } - - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(true, objects); - } - - /** - * Test the batch save. - */ - @Test - public void testCreateOnBatchSave() { - final List objects = new ArrayList(); - for (int i = 0; i < 10; i++) { - objects.add(new KeyAndOnCreateDateTimestamp()); - } - - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(false, objects); - assertBeforeAndAfterChange(false, objects); - } - - /** - * Test the batch save. - */ - @Test - public void testCreateOnBatchSaveUpdateSkipNullAttributes() { - setUpTest(SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - - final List objects = new ArrayList(); - for (int i = 0; i < 10; i++) { - objects.add(new KeyAndOnCreateDateTimestamp()); - } - - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(false, objects); - assertBeforeAndAfterChange(false, objects); - } - - /** - * Test the batch save. - */ - @Test - public void testAlwaysNoKeyOnBatchSave() { - final List objects = new ArrayList(); - for (int i = 0; i < 10; i++) { - final NoKeyAndOnAlwaysDateTimestamp object = new NoKeyAndOnAlwaysDateTimestamp(); - object.setKey(UUID.randomUUID().toString()); - objects.add(object); - } - - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(true, objects); - } - - /** - * Test the batch save. - */ - @Test - public void testAlwaysNoKeyOnBatchSaveUpdateSkipNullAttributes() { - setUpTest(SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - - final List objects = new ArrayList(); - for (int i = 0; i < 10; i++) { - final NoKeyAndOnAlwaysDateTimestamp object = new NoKeyAndOnAlwaysDateTimestamp(); - object.setKey(UUID.randomUUID().toString()); - objects.add(object); - } - - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(true, objects); - } - - /** - * Test the batch save. - */ - @Test - public void testCreateNoKeyOnBatchSave() { - final List objects = new ArrayList(); - for (int i = 0; i < 10; i++) { - final NoKeyAndOnCreateDateTimestamp object = new NoKeyAndOnCreateDateTimestamp(); - object.setKey(UUID.randomUUID().toString()); - objects.add(object); - } - - assertBeforeAndAfterChange(true, objects); - assertBeforeAndAfterChange(false, objects); - assertBeforeAndAfterChange(false, objects); - } - - /** - * Test the batch save. - */ - @Test - public void testCreateNoKeyOnBatchSaveUpdateSkipNullAttributes() { - setUpTest(SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - - final List objects = new ArrayList(); - for (int i = 0; i < 10; i++) { - final NoKeyAndOnCreateDateTimestamp object = new NoKeyAndOnCreateDateTimestamp(); - object.setKey(UUID.randomUUID().toString()); - objects.add(object); - } - - assertBeforeAndAfterChange(false, objects); - assertBeforeAndAfterChange(false, objects); - assertBeforeAndAfterChange(false, objects); - } - - /** - * An object with {@code Calendar}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndCalendarTimestamp extends AutoKeyAndVal { - @DynamoDbAutoGeneratedTimestamp - public Calendar getVal() { - return super.getVal(); - } - - public void setVal(final Calendar val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDateTimestamp extends AutoKeyAndVal { - @DynamoDbAutoGeneratedTimestamp - public Date getVal() { - return super.getVal(); - } - - public void setVal(final Date val) { - super.setVal(val); - } - } - - /** - * An object with {@code Long}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndLongTimestamp extends AutoKeyAndVal { - @DynamoDbAutoGeneratedTimestamp - public Long getVal() { - return super.getVal(); - } - - public void setVal(final Long val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date} only on {@code CREATE}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndOnCreateDateTimestamp extends AutoKeyAndVal { - @DynamoDbAutoGeneratedTimestamp(strategy = DynamoDbAutoGenerateStrategy.CREATE) - public Date getVal() { - return super.getVal(); - } - - public void setVal(final Date val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date} not auto-generted key. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class NoKeyAndOnAlwaysDateTimestamp extends KeyAndVal { - @DynamoDbHashKey - public String getKey() { - return super.getKey(); - } - - public void setKey(final String key) { - super.setKey(key); - } - - @DynamoDbAutoGeneratedTimestamp - public Date getVal() { - return super.getVal(); - } - - public void setVal(final Date val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date} not auto-generted key. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class NoKeyAndOnCreateDateTimestamp extends KeyAndVal { - @DynamoDbHashKey - public String getKey() { - return super.getKey(); - } - - public void setKey(final String key) { - super.setKey(key); - } - - @DynamoDbAutoGeneratedTimestamp(strategy = DynamoDbAutoGenerateStrategy.CREATE) - public Date getVal() { - return super.getVal(); - } - - public void setVal(final Date val) { - super.setVal(val); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/BatchWriteIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/BatchWriteIntegrationTest.java deleted file mode 100644 index d17b90385d4f..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/BatchWriteIntegrationTest.java +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper.FailedBatch; -import software.amazon.awssdk.services.dynamodb.pojos.BinaryAttributeByteBufferClass; -import software.amazon.awssdk.services.dynamodb.pojos.RangeKeyClass; - -/** - * Tests batch write calls - */ -public class BatchWriteIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - // We don't start with the current system millis like other tests because - // it's out of the range of some data types - private static int start = 1; - private static int byteStart = 1; - private static int startKeyDebug = 1; - - @BeforeClass - public static void setUp() throws Exception { - setUpTableWithRangeAttribute(); - } - - @Test - public void testBatchSave() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 40; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - objs.add(obj); - } - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - List failedBatches = mapper.batchSave(objs); - - assertEquals(0, failedBatches.size()); - - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass loaded = mapper.load(NumberSetAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - } - - @Test - public void testBatchSaveAsArray() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 40; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - objs.add(obj); - } - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - NumberSetAttributeClass[] objsArray = objs.toArray(new NumberSetAttributeClass[objs.size()]); - mapper.batchSave((Object[]) objsArray); - - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass loaded = mapper.load(NumberSetAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - } - - @Test - public void testBatchSaveAsListFromArray() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 40; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - objs.add(obj); - } - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - NumberSetAttributeClass[] objsArray = objs.toArray(new NumberSetAttributeClass[objs.size()]); - mapper.batchSave(Arrays.asList(objsArray)); - - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass loaded = mapper.load(NumberSetAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - } - - @Test - public void testBatchDelete() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 40; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - objs.add(obj); - } - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.batchSave(objs); - - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass loaded = mapper.load(NumberSetAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - - // Delete the odd ones - int i = 0; - List toDelete = new LinkedList(); - for (NumberSetAttributeClass obj : objs) { - if (i++ % 2 == 0) { - toDelete.add(obj); - } - } - - mapper.batchDelete(toDelete); - - i = 0; - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass loaded = mapper.load(NumberSetAttributeClass.class, obj.getKey()); - if (i++ % 2 == 0) { - assertNull(loaded); - } else { - assertEquals(obj, loaded); - } - } - } - - @Test - public void testBatchSaveAndDelete() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 40; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - objs.add(obj); - } - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.batchSave(objs); - - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass loaded = mapper.load(NumberSetAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - - // Delete the odd ones - int i = 0; - List toDelete = new LinkedList(); - for (NumberSetAttributeClass obj : objs) { - if (i++ % 2 == 0) { - toDelete.add(obj); - } - } - - // And add a bunch of new ones - List toSave = new LinkedList(); - for (i = 0; i < 50; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - toSave.add(obj); - } - - mapper.batchWrite(toSave, toDelete); - - i = 0; - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass loaded = mapper.load(NumberSetAttributeClass.class, obj.getKey()); - if (i++ % 2 == 0) { - assertNull(loaded); - } else { - assertEquals(obj, loaded); - } - } - - for (NumberSetAttributeClass obj : toSave) { - NumberSetAttributeClass loaded = mapper.load(NumberSetAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - } - - @Test - public void testMultipleTables() throws Exception { - - List objs = new ArrayList(); - int numItems = 10; - for (int i = 0; i < numItems; i++) { - NumberSetAttributeClass obj = getUniqueNumericObject(); - objs.add(obj); - } - for (int i = 0; i < numItems; i++) { - RangeKeyClass obj = getUniqueRangeKeyObject(); - objs.add(obj); - } - Collections.shuffle(objs); - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - List failedBatches = mapper.batchSave(objs); - assertEquals(failedBatches.size(), 0); - - for (Object obj : objs) { - Object loaded = null; - if (obj instanceof NumberSetAttributeClass) { - loaded = mapper.load(NumberSetAttributeClass.class, ((NumberSetAttributeClass) obj).getKey()); - } else if (obj instanceof RangeKeyClass) { - loaded = mapper.load(RangeKeyClass.class, ((RangeKeyClass) obj).getKey(), - ((RangeKeyClass) obj).getRangeKey()); - } else { - fail(); - } - assertEquals(obj, loaded); - } - - // Delete the odd ones - int i = 0; - List toDelete = new LinkedList(); - for (Object obj : objs) { - if (i++ % 2 == 0) { - toDelete.add(obj); - } - } - - // And add a bunch of new ones - List toSave = new LinkedList(); - for (i = 0; i < numItems; i++) { - if (i % 2 == 0) { - toSave.add(getUniqueNumericObject()); - } else { - toSave.add(getUniqueRangeKeyObject()); - } - } - - failedBatches = mapper.batchWrite(toSave, toDelete); - assertEquals(0, failedBatches.size()); - - i = 0; - for (Object obj : objs) { - Object loaded = null; - if (obj instanceof NumberSetAttributeClass) { - loaded = mapper.load(NumberSetAttributeClass.class, ((NumberSetAttributeClass) obj).getKey()); - } else if (obj instanceof RangeKeyClass) { - loaded = mapper.load(RangeKeyClass.class, ((RangeKeyClass) obj).getKey(), - ((RangeKeyClass) obj).getRangeKey()); - } else { - fail(); - } - - if (i++ % 2 == 0) { - assertNull(loaded); - } else { - assertEquals(obj, loaded); - } - } - - for (Object obj : toSave) { - Object loaded = null; - if (obj instanceof NumberSetAttributeClass) { - loaded = mapper.load(NumberSetAttributeClass.class, ((NumberSetAttributeClass) obj).getKey()); - } else if (obj instanceof RangeKeyClass) { - loaded = mapper.load(RangeKeyClass.class, ((RangeKeyClass) obj).getKey(), - ((RangeKeyClass) obj).getRangeKey()); - } else { - fail(); - } - assertEquals(obj, loaded); - } - } - - /** - * Test whether it finish processing all the items even if the first batch is failed. - */ - @Test - public void testErrorHandling() { - - List objs = new ArrayList(); - int numItems = 25; - - for (int i = 0; i < numItems; i++) { - NoSuchTableClass obj = getuniqueBadObject(); - objs.add(obj); - } - - for (int i = 0; i < numItems; i++) { - RangeKeyClass obj = getUniqueRangeKeyObject(); - objs.add(obj); - } - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - // The failed batch - List failedBatches = mapper.batchSave(objs); - assertEquals(1, failedBatches.size()); - assertEquals(numItems, failedBatches.get(0).getUnprocessedItems().get("tableNotExist").size()); - - // The second batch succeeds, get them back - for (Object obj : objs.subList(25, 50)) { - RangeKeyClass loaded = mapper - .load(RangeKeyClass.class, ((RangeKeyClass) obj).getKey(), ((RangeKeyClass) obj).getRangeKey()); - assertEquals(obj, loaded); - } - } - - /** - * Test whether we can split large batch request into small pieces. - */ - // DynamoDB changed their error for requests that are too large from a - // 413 (RequestEntityTooLarge) to a generic 400 (ValidationException), so - // the mapper's batch-splitting logic is broken. Not sure there's a good - // fix client-side without the service changing back to 413 so we can - // distinguish this case from other ValidationExceptions. - // @Test - public void testLargeRequestEntity() { - - // The total batch size is beyond 1M, test whether our client can split - // the batch correctly - List objs = new ArrayList(); - - int numItems = 25; - final int CONTENT_LENGTH = 1024 * 25; - - for (int i = 0; i < numItems; i++) { - BinaryAttributeByteBufferClass obj = getUniqueByteBufferObject(CONTENT_LENGTH); - objs.add(obj); - } - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - List failedBatches = mapper.batchSave(objs); - assertEquals(0, failedBatches.size()); - - // Get these objects back - for (BinaryAttributeByteBufferClass obj : objs) { - BinaryAttributeByteBufferClass loaded = mapper.load(BinaryAttributeByteBufferClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - - // There are three super large item together with some small ones, test - // whether we can successfully - // save these small items. - objs.clear(); - numItems = 10; - List largeObjs = new ArrayList(); - - // Put three super large item(beyond 64k) - largeObjs.add(getUniqueByteBufferObject(CONTENT_LENGTH * 30)); - largeObjs.add(getUniqueByteBufferObject(CONTENT_LENGTH * 30)); - largeObjs.add(getUniqueByteBufferObject(CONTENT_LENGTH * 30)); - for (int i = 0; i < numItems - 3; i++) { - BinaryAttributeByteBufferClass obj = getUniqueByteBufferObject(CONTENT_LENGTH / 25); - objs.add(obj); - } - - objs.addAll(largeObjs); - - failedBatches = mapper.batchSave(objs); - assertEquals(3, failedBatches.size()); - objs.removeAll(largeObjs); - - // Get these small objects back - for (BinaryAttributeByteBufferClass obj : objs) { - BinaryAttributeByteBufferClass loaded = mapper.load(BinaryAttributeByteBufferClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - - // The whole batch is super large objects, none of them will be - // processed - largeObjs.clear(); - for (int i = 0; i < 5; i++) { - BinaryAttributeByteBufferClass obj = getUniqueByteBufferObject(CONTENT_LENGTH * 30); - largeObjs.add(obj); - } - failedBatches = mapper.batchSave(largeObjs); - assertEquals(5, failedBatches.size()); - } - - - private NoSuchTableClass getuniqueBadObject() { - NoSuchTableClass obj = new NoSuchTableClass(); - obj.setKey(String.valueOf(startKeyDebug++)); - return obj; - } - - private NumberSetAttributeClass getUniqueNumericObject() { - NumberSetAttributeClass obj = new NumberSetAttributeClass(); - obj.setKey(String.valueOf(startKeyDebug++)); - obj.setBigDecimalAttribute(toSet(new BigDecimal(startKey++), new BigDecimal(startKey++), new BigDecimal(startKey++))); - obj.setBigIntegerAttribute( - toSet(new BigInteger("" + startKey++), new BigInteger("" + startKey++), new BigInteger("" + startKey++))); - obj.setByteObjectAttribute(toSet(new Byte(nextByte()), new Byte(nextByte()), new Byte(nextByte()))); - obj.setDoubleObjectAttribute(toSet(new Double("" + start++), new Double("" + start++), new Double("" + start++))); - obj.setFloatObjectAttribute(toSet(new Float("" + start++), new Float("" + start++), new Float("" + start++))); - obj.setIntegerAttribute(toSet(new Integer("" + start++), new Integer("" + start++), new Integer("" + start++))); - obj.setLongObjectAttribute(toSet(new Long("" + start++), new Long("" + start++), new Long("" + start++))); - obj.setBooleanAttribute(toSet(true, false)); - obj.setDateAttribute(toSet(new Date(startKey++), new Date(startKey++), new Date(startKey++))); - Set cals = new HashSet(); - for (Date d : obj.getDateAttribute()) { - Calendar cal = GregorianCalendar.getInstance(); - cal.setTime(d); - cals.add(cal); - } - obj.setCalendarAttribute(toSet(cals)); - return obj; - } - - private RangeKeyClass getUniqueRangeKeyObject() { - RangeKeyClass obj = new RangeKeyClass(); - obj.setKey(startKey++); - obj.setIntegerAttribute(toSet(start++, start++, start++)); - obj.setBigDecimalAttribute(new BigDecimal(startKey++)); - obj.setRangeKey(start++); - obj.setStringAttribute("" + startKey++); - obj.setStringSetAttribute(toSet("" + startKey++, "" + startKey++, "" + startKey++)); - return obj; - } - - private String nextByte() { - return "" + byteStart++ % Byte.MAX_VALUE; - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/BinaryAttributesIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/BinaryAttributesIntegrationTest.java deleted file mode 100644 index 32a95998d2be..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/BinaryAttributesIntegrationTest.java +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.pojos.BinaryAttributeByteArrayClass; -import software.amazon.awssdk.services.dynamodb.pojos.BinaryAttributeByteBufferClass; - -/** - * Tests simple string attributes - */ -public class BinaryAttributesIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String BINARY_ATTRIBUTE = "binaryAttribute"; - private static final String BINARY_SET_ATTRIBUTE = "binarySetAttribute"; - private static final List> ATTRIBUTES = new LinkedList>(); - private static final int CONTENT_LENGTH = 512; - - // Test data - static { - Map attr = new HashMap(); - attr.put(KEY_NAME, AttributeValue.builder().s("" + startKey++).build()); - attr.put(BINARY_ATTRIBUTE, AttributeValue.builder().b(SdkBytes.fromByteArray(generateByteArray(CONTENT_LENGTH))).build()); - attr.put(BINARY_SET_ATTRIBUTE, AttributeValue.builder().bs(SdkBytes.fromByteArray(generateByteArray(CONTENT_LENGTH)), - SdkBytes.fromByteArray(generateByteArray(CONTENT_LENGTH + 1))).build()); - ATTRIBUTES.add(attr); - - } - - ; - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBMapperIntegrationTestBase.setUp(); - - // Insert the data - for (Map attr : ATTRIBUTES) { - dynamo.putItem(PutItemRequest.builder().tableName(TABLE_NAME).item(attr).build()); - } - } - - @Test - public void testLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - for (Map attr : ATTRIBUTES) { - // test BinaryAttributeClass - BinaryAttributeByteBufferClass x = util.load(BinaryAttributeByteBufferClass.class, attr.get(KEY_NAME).s()); - assertEquals(x.getKey(), attr.get(KEY_NAME).s()); - assertEquals(x.getBinaryAttribute(), ByteBuffer.wrap(generateByteArray(CONTENT_LENGTH))); - assertTrue(x.getBinarySetAttribute().contains(ByteBuffer.wrap(generateByteArray(CONTENT_LENGTH)))); - assertTrue(x.getBinarySetAttribute().contains(ByteBuffer.wrap(generateByteArray(CONTENT_LENGTH + 1)))); - - // test BinaryAttributeByteArrayClass - BinaryAttributeByteArrayClass y = util.load(BinaryAttributeByteArrayClass.class, attr.get(KEY_NAME).s()); - assertEquals(y.getKey(), attr.get(KEY_NAME).s()); - assertTrue(Arrays.equals(y.getBinaryAttribute(), (generateByteArray(CONTENT_LENGTH)))); - assertEquals(2, y.getBinarySetAttribute().size()); - assertTrue(setContainsBytes(y.getBinarySetAttribute(), generateByteArray(CONTENT_LENGTH))); - assertTrue(setContainsBytes(y.getBinarySetAttribute(), generateByteArray(CONTENT_LENGTH + 1))); - } - - } - - @Test - public void testSave() { - // test BinaryAttributeClass - List byteBufferObjs = new ArrayList(); - for (int i = 0; i < 5; i++) { - BinaryAttributeByteBufferClass obj = getUniqueByteBufferObject(CONTENT_LENGTH); - byteBufferObjs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (BinaryAttributeByteBufferClass obj : byteBufferObjs) { - util.save(obj); - } - - for (BinaryAttributeByteBufferClass obj : byteBufferObjs) { - BinaryAttributeByteBufferClass loaded = util.load(BinaryAttributeByteBufferClass.class, obj.getKey()); - assertEquals(loaded.getKey(), obj.getKey()); - assertEquals(loaded.getBinaryAttribute(), ByteBuffer.wrap(generateByteArray(CONTENT_LENGTH))); - assertTrue(loaded.getBinarySetAttribute().contains(ByteBuffer.wrap(generateByteArray(CONTENT_LENGTH)))); - } - - // test BinaryAttributeByteArrayClass - List bytesObjs = new ArrayList(); - for (int i = 0; i < 5; i++) { - BinaryAttributeByteArrayClass obj = getUniqueBytesObject(CONTENT_LENGTH); - bytesObjs.add(obj); - } - - for (BinaryAttributeByteArrayClass obj : bytesObjs) { - util.save(obj); - } - - for (BinaryAttributeByteArrayClass obj : bytesObjs) { - BinaryAttributeByteArrayClass loaded = util.load(BinaryAttributeByteArrayClass.class, obj.getKey()); - assertEquals(loaded.getKey(), obj.getKey()); - assertTrue(Arrays.equals(loaded.getBinaryAttribute(), (generateByteArray(CONTENT_LENGTH)))); - assertEquals(1, loaded.getBinarySetAttribute().size()); - assertTrue(setContainsBytes(loaded.getBinarySetAttribute(), generateByteArray(CONTENT_LENGTH))); - } - } - - /** - * Tests saving an incomplete object into DynamoDB - */ - @Test - public void testIncompleteObject() { - // test BinaryAttributeClass - BinaryAttributeByteBufferClass byteBufferObj = getUniqueByteBufferObject(CONTENT_LENGTH); - byteBufferObj.setBinarySetAttribute(null); - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(byteBufferObj); - - BinaryAttributeByteBufferClass loadedX = util.load(BinaryAttributeByteBufferClass.class, byteBufferObj.getKey()); - assertEquals(loadedX.getKey(), byteBufferObj.getKey()); - assertEquals(loadedX.getBinaryAttribute(), ByteBuffer.wrap(generateByteArray(CONTENT_LENGTH))); - assertEquals(loadedX.getBinarySetAttribute(), null); - - - // test removing an attribute - assertNotNull(byteBufferObj.getBinaryAttribute()); - byteBufferObj.setBinaryAttribute(null); - util.save(byteBufferObj); - - loadedX = util.load(BinaryAttributeByteBufferClass.class, byteBufferObj.getKey()); - assertEquals(loadedX.getKey(), byteBufferObj.getKey()); - assertEquals(loadedX.getBinaryAttribute(), null); - assertEquals(loadedX.getBinarySetAttribute(), null); - - // test BinaryAttributeByteArrayClass - BinaryAttributeByteArrayClass bytesObj = getUniqueBytesObject(CONTENT_LENGTH); - bytesObj.setBinarySetAttribute(null); - util.save(bytesObj); - - BinaryAttributeByteArrayClass loadedY = util.load(BinaryAttributeByteArrayClass.class, bytesObj.getKey()); - assertEquals(loadedY.getKey(), bytesObj.getKey()); - assertTrue(Arrays.equals(loadedY.getBinaryAttribute(), generateByteArray(CONTENT_LENGTH))); - assertEquals(loadedY.getBinarySetAttribute(), null); - - - // test removing an attribute - assertNotNull(bytesObj.getBinaryAttribute()); - bytesObj.setBinaryAttribute(null); - util.save(bytesObj); - - loadedY = util.load(BinaryAttributeByteArrayClass.class, bytesObj.getKey()); - assertEquals(loadedY.getKey(), bytesObj.getKey()); - assertEquals(loadedY.getBinaryAttribute(), null); - assertEquals(loadedY.getBinarySetAttribute(), null); - } - - @Test - public void testUpdate() { - // test BinaryAttributeClass - List byteBufferObjs = new ArrayList(); - for (int i = 0; i < 5; i++) { - BinaryAttributeByteBufferClass obj = getUniqueByteBufferObject(CONTENT_LENGTH); - byteBufferObjs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (BinaryAttributeByteBufferClass obj : byteBufferObjs) { - util.save(obj); - } - - for (BinaryAttributeByteBufferClass obj : byteBufferObjs) { - BinaryAttributeByteBufferClass replacement = getUniqueByteBufferObject(CONTENT_LENGTH - 1); - replacement.setKey(obj.getKey()); - util.save(replacement); - - BinaryAttributeByteBufferClass loaded = util.load(BinaryAttributeByteBufferClass.class, obj.getKey()); - assertEquals(loaded.getKey(), obj.getKey()); - assertEquals(loaded.getBinaryAttribute(), ByteBuffer.wrap(generateByteArray(CONTENT_LENGTH - 1))); - assertTrue(loaded.getBinarySetAttribute().contains(ByteBuffer.wrap(generateByteArray(CONTENT_LENGTH - 1)))); - - } - - // test BinaryAttributeByteArrayClass - List bytesObj = new ArrayList(); - for (int i = 0; i < 5; i++) { - BinaryAttributeByteArrayClass obj = getUniqueBytesObject(CONTENT_LENGTH); - bytesObj.add(obj); - } - - for (BinaryAttributeByteArrayClass obj : bytesObj) { - util.save(obj); - } - - for (BinaryAttributeByteArrayClass obj : bytesObj) { - BinaryAttributeByteArrayClass replacement = getUniqueBytesObject(CONTENT_LENGTH - 1); - replacement.setKey(obj.getKey()); - util.save(replacement); - - BinaryAttributeByteArrayClass loaded = util.load(BinaryAttributeByteArrayClass.class, obj.getKey()); - assertEquals(loaded.getKey(), obj.getKey()); - assertTrue(Arrays.equals(loaded.getBinaryAttribute(), (generateByteArray(CONTENT_LENGTH - 1)))); - assertEquals(1, loaded.getBinarySetAttribute().size()); - assertTrue(setContainsBytes(loaded.getBinarySetAttribute(), generateByteArray(CONTENT_LENGTH - 1))); - - } - } - - @Test - @Ignore // FIXME: Mapper needs to be be updated to be aware of AutoConstructMap - public void testDelete() throws Exception { - // test BinaryAttributeClass - BinaryAttributeByteBufferClass byteBufferObj = getUniqueByteBufferObject(CONTENT_LENGTH); - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(byteBufferObj); - - util.delete(byteBufferObj); - assertNull(util.load(BinaryAttributeByteBufferClass.class, byteBufferObj.getKey())); - - // test BinaryAttributeByteArrayClass - BinaryAttributeByteArrayClass bytesObj = getUniqueBytesObject(CONTENT_LENGTH); - util.save(bytesObj); - - util.delete(bytesObj); - assertNull(util.load(BinaryAttributeByteArrayClass.class, bytesObj.getKey())); - - } - - private BinaryAttributeByteArrayClass getUniqueBytesObject(int contentLength) { - BinaryAttributeByteArrayClass obj = new BinaryAttributeByteArrayClass(); - obj.setKey(String.valueOf(startKey++)); - obj.setBinaryAttribute(generateByteArray(contentLength)); - Set byteArray = new HashSet(); - byteArray.add(generateByteArray(contentLength)); - obj.setBinarySetAttribute(byteArray); - return obj; - } - - private boolean setContainsBytes(Set set, byte[] bytes) { - Iterator iter = set.iterator(); - while (iter.hasNext()) { - if (Arrays.equals(iter.next(), bytes)) { - return true; - } - } - return false; - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ComplexTypeIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ComplexTypeIntegrationTest.java deleted file mode 100644 index 91cc92f72c57..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ComplexTypeIntegrationTest.java +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; - -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.MappingJsonFactory; -import java.io.StringReader; -import java.io.StringWriter; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.List; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConverted; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConvertedJson; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConverter; - -/** - * Tests of the configuration object - */ -public class ComplexTypeIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - // We don't start with the current system millis like other tests because - // it's out of the range of some data types - private static int start = 1; - private static int byteStart = -127; - - @Test - public void testComplexTypes() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - ComplexClass obj = getUniqueObject(); - util.save(obj); - ComplexClass loaded = util.load(ComplexClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - - private ComplexClass getUniqueObject() { - ComplexClass obj = new ComplexClass(); - obj.setKey(String.valueOf(startKey++)); - obj.setBigDecimalAttribute(new BigDecimal(startKey++)); - obj.setBigIntegerAttribute(new BigInteger("" + startKey++)); - obj.setByteAttribute((byte) byteStart++); - obj.setByteObjectAttribute(new Byte("" + byteStart++)); - obj.setDoubleAttribute(new Double("" + start++)); - obj.setDoubleObjectAttribute(new Double("" + start++)); - obj.setFloatAttribute(new Float("" + start++)); - obj.setFloatObjectAttribute(new Float("" + start++)); - obj.setIntAttribute(new Integer("" + start++)); - obj.setIntegerAttribute(new Integer("" + start++)); - obj.setLongAttribute(new Long("" + start++)); - obj.setLongObjectAttribute(new Long("" + start++)); - obj.setDateAttribute(new Date(startKey++)); - obj.setBooleanAttribute(start++ % 2 == 0); - obj.setBooleanObjectAttribute(start++ % 2 == 0); - obj.setExtraField("" + startKey++); - Calendar cal = GregorianCalendar.getInstance(); - cal.setTime(new Date(startKey++)); - obj.setCalendarAttribute(cal); - obj.setComplexNestedType(new ComplexNestedType("" + start++, start++, new ComplexNestedType("" + start++, - start++, null))); - List complexTypes = new ArrayList(); - complexTypes.add(new ComplexNestedType("" + start++, start++, - new ComplexNestedType("" + start++, start++, null))); - complexTypes.add(new ComplexNestedType("" + start++, start++, new ComplexNestedType("" + start++, start++, null))); - complexTypes.add(new ComplexNestedType("" + start++, start++, new ComplexNestedType("" + start++, start++, null))); - obj.setComplexNestedTypeList(complexTypes); - return obj; - } - - /** - * Tests using a complex type for a (string) key - */ - @Test - public void testComplexKey() throws Exception { - ComplexKey obj = new ComplexKey(); - ComplexNestedType key = new ComplexNestedType(); - key.setIntValue(start++); - key.setStringValue("" + start++); - obj.setKey(key); - obj.setOtherAttribute("" + start++); - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - mapper.save(obj); - ComplexKey loaded = mapper.load(ComplexKey.class, obj.getKey()); - assertEquals(obj, loaded); - } - - public static final class ComplexNestedListTypeMarshaller implements DynamoDbTypeConverter> { - @Override - public String convert(final List object) { - try { - StringWriter writer = new StringWriter(); - JsonFactory jsonFactory = new MappingJsonFactory(); - JsonGenerator jsonGenerator = jsonFactory.createJsonGenerator(writer); - jsonGenerator.writeObject(object); - return writer.toString(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - @Override - public List unconvert(String obj) { - try { - JsonFactory jsonFactory = new MappingJsonFactory(); - JsonParser jsonParser = jsonFactory.createJsonParser(new StringReader(obj)); - return jsonParser.readValueAs(new TypeReference>() { - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static final class ComplexClass extends NumberAttributeClass { - - private String extraField; - private ComplexNestedType complexNestedType; - private List complexNestedTypeList; - - @DynamoDbTypeConvertedJson - public ComplexNestedType getComplexNestedType() { - return complexNestedType; - } - - public void setComplexNestedType(ComplexNestedType complexNestedType) { - this.complexNestedType = complexNestedType; - } - - @DynamoDbTypeConverted(converter = ComplexNestedListTypeMarshaller.class) - public List getComplexNestedTypeList() { - return complexNestedTypeList; - } - - public void setComplexNestedTypeList(List complexNestedTypeList) { - this.complexNestedTypeList = complexNestedTypeList; - } - - public String getExtraField() { - return extraField; - } - - public void setExtraField(String extraField) { - this.extraField = extraField; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((complexNestedType == null) ? 0 : complexNestedType.hashCode()); - result = prime * result + ((complexNestedTypeList == null) ? 0 : complexNestedTypeList.hashCode()); - result = prime * result + ((extraField == null) ? 0 : extraField.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - ComplexClass other = (ComplexClass) obj; - if (complexNestedType == null) { - if (other.complexNestedType != null) { - return false; - } - } else if (!complexNestedType.equals(other.complexNestedType)) { - return false; - } - if (complexNestedTypeList == null) { - if (other.complexNestedTypeList != null) { - return false; - } - } else if (!complexNestedTypeList.equals(other.complexNestedTypeList)) { - return false; - } - if (extraField == null) { - if (other.extraField != null) { - return false; - } - } else if (!extraField.equals(other.extraField)) { - return false; - } - return true; - } - - } - - public static final class ComplexNestedType { - - private String stringValue; - private Integer intValue; - private ComplexNestedType nestedType; - - public ComplexNestedType() { - } - - public ComplexNestedType(String stringValue, Integer intValue, ComplexNestedType nestedType) { - super(); - this.stringValue = stringValue; - this.intValue = intValue; - this.nestedType = nestedType; - } - - public String getStringValue() { - return stringValue; - } - - public void setStringValue(String stringValue) { - this.stringValue = stringValue; - } - - public Integer getIntValue() { - return intValue; - } - - public void setIntValue(Integer intValue) { - this.intValue = intValue; - } - - public ComplexNestedType getNestedType() { - return nestedType; - } - - public void setNestedType(ComplexNestedType nestedType) { - this.nestedType = nestedType; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((intValue == null) ? 0 : intValue.hashCode()); - result = prime * result + ((nestedType == null) ? 0 : nestedType.hashCode()); - result = prime * result + ((stringValue == null) ? 0 : stringValue.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - ComplexNestedType other = (ComplexNestedType) obj; - if (intValue == null) { - if (other.intValue != null) { - return false; - } - } else if (!intValue.equals(other.intValue)) { - return false; - } - if (nestedType == null) { - if (other.nestedType != null) { - return false; - } - } else if (!nestedType.equals(other.nestedType)) { - return false; - } - if (stringValue == null) { - if (other.stringValue != null) { - return false; - } - } else if (!stringValue.equals(other.stringValue)) { - return false; - } - return true; - } - - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static final class ComplexKey { - - private ComplexNestedType key; - private String otherAttribute; - - @DynamoDbHashKey - @DynamoDbTypeConvertedJson - public ComplexNestedType getKey() { - return key; - } - - public void setKey(ComplexNestedType key) { - this.key = key; - } - - public String getOtherAttribute() { - return otherAttribute; - } - - public void setOtherAttribute(String otherAttribute) { - this.otherAttribute = otherAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((otherAttribute == null) ? 0 : otherAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - ComplexKey other = (ComplexKey) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (otherAttribute == null) { - if (other.otherAttribute != null) { - return false; - } - } else if (!otherAttribute.equals(other.otherAttribute)) { - return false; - } - return true; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ConfigurationIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ConfigurationIntegrationTest.java deleted file mode 100644 index ebb4eb1f7a40..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ConfigurationIntegrationTest.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.UUID; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.ObjectTableNameResolver; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.SaveBehavior; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.TableNameOverride; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.TableNameResolver; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Tests of the configuration object - */ -public class ConfigurationIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - // We don't start with the current system millis like other tests because - // it's out of the range of some data types - private static int start = 1; - private static int byteStart = -127; - - @Test - public void testClobber() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo, new DynamoDbMapperConfig(SaveBehavior.CLOBBER)); - - NumberAttributeClassExtended obj = getUniqueObject(); - util.save(obj); - assertEquals(obj, util.load(obj.getClass(), obj.getKey())); - - NumberAttributeClass copy = copy(obj); - util.save(copy); - assertEquals(copy, util.load(copy.getClass(), obj.getKey())); - - // We should have lost the extra field because of the clobber behavior - assertNull(util.load(NumberAttributeClassExtended.class, obj.getKey()).getExtraField()); - - // Now test overriding the clobber behavior on a per-save basis - obj = getUniqueObject(); - util.save(obj); - assertEquals(obj, util.load(obj.getClass(), obj.getKey())); - - copy = copy(obj); - util.save(copy, new DynamoDbMapperConfig(SaveBehavior.UPDATE)); - assertEquals(copy, util.load(copy.getClass(), obj.getKey())); - - // We shouldn't have lost any extra info - assertNotNull(util.load(NumberAttributeClassExtended.class, obj.getKey()).getExtraField()); - } - - @Test - public void testTableOverride() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - TableOverrideTestClass obj = new TableOverrideTestClass(); - obj.setOtherField(UUID.randomUUID().toString()); - - try { - util.save(obj); - fail("Expected an exception"); - } catch (Exception e) { - // Ignored or expected. - } - - util.save(obj, new DynamoDbMapperConfig(new TableNameOverride("aws-java-sdk-util"))); - - try { - util.load(TableOverrideTestClass.class, obj.getKey()); - fail("Expected an exception"); - } catch (Exception e) { - // Ignored or expected. - } - - Object loaded = util.load(TableOverrideTestClass.class, obj.getKey(), - new DynamoDbMapperConfig(TableNameOverride.withTableNamePrefix("aws-"))); - assertEquals(loaded, obj); - - try { - util.delete(obj); - fail("Expected an exception"); - } catch (Exception e) { - // Ignored or expected. - } - - util.delete(obj, new DynamoDbMapperConfig(TableNameOverride.withTableNamePrefix("aws-"))); - } - - @Test - public void testTableNameResolver() { - final String REAL_TABLE_NAME = "aws-java-sdk-util"; - - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - final TableOverrideTestClass obj = new TableOverrideTestClass(); - obj.setOtherField(UUID.randomUUID().toString()); - - try { - mapper.save(obj); - fail("Expected an exception, because the POJO is annotated with a non-existent table."); - } catch (Exception e) { - // Ignored or expected. - } - - // Use TableNameResolver to save to the real table - mapper.save(obj, new DynamoDbMapperConfig(new TableNameResolver() { - @Override - public String getTableName(Class clazz, DynamoDbMapperConfig config) { - if (clazz.equals(TableOverrideTestClass.class)) { - return REAL_TABLE_NAME; - } - throw new RuntimeException("Unexpected data object type."); - } - })); - - try { - mapper.load(TableOverrideTestClass.class, obj.getKey()); - fail("Expected an exception, because the POJO is annotated with a non-existent table."); - } catch (Exception e) { - // Ignored or expected. - } - - // Use ObjectTableNameResolver to load from the real table - Object loaded = mapper.load(obj, - new DynamoDbMapperConfig(new ObjectTableNameResolver() { - @Override - public String getTableName(Object objectToLoad, DynamoDbMapperConfig config) { - if (objectToLoad == obj) { - return REAL_TABLE_NAME; - } - throw new RuntimeException("Unexpected data object."); - } - })); - assertEquals(loaded, obj); - - try { - mapper.delete(obj); - fail("Expected an exception, because the POJO is annotated with a non-existent table."); - } catch (Exception e) { - // Ignored or expected. - } - - // When used at the same time, ObjectTableNameResolver should have the highest priority - - final String NON_EXISTENT_TABLE_NAME = UUID.randomUUID().toString(); - mapper.delete(obj, new DynamoDbMapperConfig.Builder() - .withTableNameOverride(new TableNameOverride(NON_EXISTENT_TABLE_NAME)) - .withTableNameResolver(new TableNameResolver() { - @Override - public String getTableName(Class clazz, DynamoDbMapperConfig config) { - return NON_EXISTENT_TABLE_NAME; - } - }) - .withObjectTableNameResolver(new ObjectTableNameResolver() { - @Override - public String getTableName(Object object, DynamoDbMapperConfig config) { - return REAL_TABLE_NAME; - } - }).build()); - } - - private NumberAttributeClassExtended getUniqueObject() { - NumberAttributeClassExtended obj = new NumberAttributeClassExtended(); - obj.setKey(String.valueOf(startKey++)); - obj.setBigDecimalAttribute(new BigDecimal(startKey++)); - obj.setBigIntegerAttribute(new BigInteger("" + startKey++)); - obj.setByteAttribute((byte) byteStart++); - obj.setByteObjectAttribute(new Byte("" + byteStart++)); - obj.setDoubleAttribute(new Double("" + start++)); - obj.setDoubleObjectAttribute(new Double("" + start++)); - obj.setFloatAttribute(new Float("" + start++)); - obj.setFloatObjectAttribute(new Float("" + start++)); - obj.setIntAttribute(new Integer("" + start++)); - obj.setIntegerAttribute(new Integer("" + start++)); - obj.setLongAttribute(new Long("" + start++)); - obj.setLongObjectAttribute(new Long("" + start++)); - obj.setDateAttribute(new Date(startKey++)); - obj.setBooleanAttribute(start++ % 2 == 0); - obj.setBooleanObjectAttribute(start++ % 2 == 0); - obj.setExtraField("" + startKey++); - Calendar cal = GregorianCalendar.getInstance(); - cal.setTime(new Date(startKey++)); - obj.setCalendarAttribute(cal); - return obj; - } - - private NumberAttributeClass copy(NumberAttributeClassExtended obj) { - NumberAttributeClass copy = new NumberAttributeClass(); - copy.setKey(obj.getKey()); - copy.setBigDecimalAttribute(obj.getBigDecimalAttribute()); - copy.setBigIntegerAttribute(obj.getBigIntegerAttribute()); - copy.setByteAttribute(obj.getByteAttribute()); - copy.setByteObjectAttribute(obj.getByteObjectAttribute()); - copy.setDoubleAttribute(obj.getDoubleAttribute()); - copy.setDoubleObjectAttribute(obj.getDoubleObjectAttribute()); - copy.setFloatAttribute(obj.getFloatAttribute()); - copy.setFloatObjectAttribute(obj.getFloatObjectAttribute()); - copy.setIntAttribute(obj.getIntAttribute()); - copy.setIntegerAttribute(obj.getIntegerAttribute()); - copy.setLongAttribute(obj.getLongAttribute()); - copy.setLongObjectAttribute(obj.getLongObjectAttribute()); - copy.setDateAttribute(obj.getDateAttribute()); - copy.setBooleanAttribute(obj.isBooleanAttribute()); - copy.setBooleanObjectAttribute(obj.getBooleanObjectAttribute()); - return copy; - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static final class NumberAttributeClassExtended extends NumberAttributeClass { - - private String extraField; - - public String getExtraField() { - return extraField; - } - - public void setExtraField(String extraField) { - this.extraField = extraField; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((extraField == null) ? 0 : extraField.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - NumberAttributeClassExtended other = (NumberAttributeClassExtended) obj; - if (extraField == null) { - if (other.extraField != null) { - return false; - } - } else if (!extraField.equals(other.extraField)) { - return false; - } - return true; - } - } - - @DynamoDbTable(tableName = "java-sdk-util") // doesn't exist - public static final class TableOverrideTestClass { - - private String key; - private String otherField; - - @DynamoDbAutoGeneratedKey - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getOtherField() { - return otherField; - } - - public void setOtherField(String otherField) { - this.otherField = otherField; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((otherField == null) ? 0 : otherField.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TableOverrideTestClass other = (TableOverrideTestClass) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (otherField == null) { - if (other.otherField != null) { - return false; - } - } else if (!otherField.equals(other.otherField)) { - return false; - } - return true; - } - - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/CrossSdkIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/CrossSdkIntegrationTest.java deleted file mode 100644 index b6830c4acb60..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/CrossSdkIntegrationTest.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.Calendar; -import java.util.Date; -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.TableUtils; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.pojos.CrossSdkVerificationClass; - - -/** - * Cross-SDK acceptance test. More of a smoke test, verifies that the formats - * used by each program's ORM can be read by the others. - */ -public class CrossSdkIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String TABLE_NAME = "aws-xsdk"; - - private static final String HASH_KEY = "3530a51a-0760-47d2-bfcb-158320d6188a"; - private static final String RANGE_KEY = "61cdf81e-792f-4dd8-a812-a16185bfbf60"; - - private static int start = 1; - - // @BeforeClass - public static void setUp() throws Exception { - setUpCredentials(); - dynamo = DynamoDbClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - - // Create a table - String keyName = DynamoDBMapperIntegrationTestBase.KEY_NAME; - String rangeKey = "rangeKey"; - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TABLE_NAME) - .keySchema(KeySchemaElement.builder().attributeName(keyName).keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName(rangeKey).keyType(KeyType.RANGE).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName(keyName).attributeType( - ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName(rangeKey).attributeType( - ScalarAttributeType.S).build()) - .provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(10L) - .writeCapacityUnits(10L).build()) - .build(); - - if (TableUtils.createTableIfNotExists(dynamo, createTableRequest)) { - TableUtils.waitUntilActive(dynamo, TABLE_NAME); - } - } - - @Test - public void disabled() { - } - - // This record written by the .NET mapper no longer exists, so this test - // NPEs. If we want to add back something similar we should generate some - // items using the .NET mapper and check a serialized form of them into - // this package so this can be run as a unit test. - // @Test - public void testLoad() throws Exception { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - CrossSdkVerificationClass obj = mapper.load(CrossSdkVerificationClass.class, HASH_KEY, RANGE_KEY); - - Long originalVersion = obj.getVersion(); - - assertNotNull(obj); - assertNotNull(obj.getKey()); - assertEquals(obj.getKey(), HASH_KEY); - assertNotNull(obj.getRangeKey()); - assertEquals(obj.getRangeKey(), RANGE_KEY); - assertNotNull(originalVersion); - assertNotNull(obj.bigDecimalAttribute()); - assertNotNull(obj.bigDecimalSetAttribute()); - assertEquals(3, obj.bigDecimalSetAttribute().size()); - assertNotNull(obj.bigIntegerAttribute()); - assertNotNull(obj.bigIntegerSetAttribute()); - assertEquals(3, obj.bigIntegerSetAttribute().size()); - assertNotNull(obj.booleanAttribute()); - assertNotNull(obj.booleanSetAttribute()); - assertEquals(2, obj.booleanSetAttribute().size()); - assertNotNull(obj.byteAttribute()); - assertNotNull(obj.byteSetAttribute()); - assertEquals(3, obj.byteSetAttribute().size()); - assertNotNull(obj.getCalendarAttribute()); - assertNotNull(obj.getCalendarSetAttribute()); - assertEquals(3, obj.getCalendarSetAttribute().size()); - assertNotNull(obj.getDateAttribute()); - assertNotNull(obj.getDateSetAttribute()); - assertEquals(3, obj.getDateSetAttribute().size()); - assertNotNull(obj.getDoubleAttribute()); - assertNotNull(obj.getDoubleSetAttribute()); - assertEquals(3, obj.getDoubleSetAttribute().size()); - assertNotNull(obj.getFloatAttribute()); - assertNotNull(obj.getFloatSetAttribute()); - assertEquals(3, obj.getFloatSetAttribute().size()); - assertNotNull(obj.getIntegerAttribute()); - assertNotNull(obj.getIntegerSetAttribute()); - assertEquals(3, obj.getIntegerSetAttribute().size()); - assertNotNull(obj.longAttribute()); - assertNotNull(obj.longSetAttribute()); - assertEquals(3, obj.longSetAttribute().size()); - assertNotNull(obj.stringSetAttribute()); - assertEquals(3, obj.stringSetAttribute().size()); - - updateObjectValues(obj); - - mapper.save(obj); - assertFalse(originalVersion.equals(obj.getVersion())); - - CrossSdkVerificationClass loaded = mapper.load(CrossSdkVerificationClass.class, HASH_KEY, RANGE_KEY); - assertEquals(loaded, obj); - } - - /** - * Updates all values in the object (except for the keys and version) - */ - private void updateObjectValues(CrossSdkVerificationClass obj) { - obj.setBigDecimalAttribute(obj.bigDecimalAttribute().add(BigDecimal.ONE)); - Set bigDecimals = new HashSet(); - for (BigDecimal d : obj.bigDecimalSetAttribute()) { - bigDecimals.add(d.add(BigDecimal.ONE)); - } - obj.setBigDecimalSetAttribute(bigDecimals); - - obj.setBigIntegerAttribute(obj.bigIntegerAttribute().add(BigInteger.ONE)); - Set bigInts = new HashSet(); - for (BigInteger d : obj.bigIntegerSetAttribute()) { - bigInts.add(d.add(BigInteger.ONE)); - } - obj.setBigIntegerSetAttribute(bigInts); - - obj.setBooleanAttribute(!obj.booleanAttribute()); - - obj.setByteAttribute((byte) ((obj.byteAttribute() + 1) % Byte.MAX_VALUE)); - Set bytes = new HashSet(); - for (Byte b : obj.byteSetAttribute()) { - bytes.add((byte) ((b + 1) % Byte.MAX_VALUE)); - } - - obj.getCalendarAttribute().setTime(new Date(obj.getCalendarAttribute().getTimeInMillis() + 1000)); - for (Calendar c : obj.getCalendarSetAttribute()) { - c.setTime(new Date(c.getTimeInMillis() + 1000)); - } - - obj.getDateAttribute().setTime(obj.getDateAttribute().getTime() + 1000); - for (Date d : obj.getDateSetAttribute()) { - d.setTime(d.getTime() + 1000); - } - - obj.setDoubleAttribute(obj.getDoubleAttribute() + 1.0); - Set doubleSet = new HashSet(); - for (Double d : obj.getDoubleSetAttribute()) { - doubleSet.add(d + 1.0); - } - obj.setDoubleSetAttribute(doubleSet); - - obj.setFloatAttribute((float) (obj.getFloatAttribute() + 1.0)); - Set floatSet = new HashSet(); - for (Float f : obj.getFloatSetAttribute()) { - floatSet.add(f + 1.0f); - } - obj.setFloatSetAttribute(floatSet); - - obj.setIntegerAttribute(obj.getIntegerAttribute() + 1); - Set intSet = new HashSet(); - for (Integer i : obj.getIntegerSetAttribute()) { - intSet.add(i + 1); - } - obj.setIntegerSetAttribute(intSet); - - obj.setLastUpdater("java-sdk"); - - obj.setLongAttribute(obj.longAttribute() + 1); - Set longSet = new HashSet(); - for (Long l : obj.longSetAttribute()) { - longSet.add(l + 1); - } - obj.setLongSetAttribute(longSet); - - obj.setStringSetAttribute( - toSet(UUID.randomUUID().toString(), UUID.randomUUID().toString(), UUID.randomUUID().toString())); - } - - /** - * Used to set up the original object, no longer used. - */ - @SuppressWarnings("unused") - private CrossSdkVerificationClass getUniqueObject() { - CrossSdkVerificationClass obj = new CrossSdkVerificationClass(); - obj.setKey(HASH_KEY); - obj.setRangeKey(RANGE_KEY); - obj.setBigDecimalAttribute(new BigDecimal(start++)); - obj.setBigDecimalSetAttribute(toSet(new BigDecimal(start++), new BigDecimal(start++), new BigDecimal(start++))); - obj.setBigIntegerAttribute(new BigInteger("" + start++)); - obj.setBigIntegerSetAttribute( - toSet(new BigInteger("" + start++), new BigInteger("" + start++), new BigInteger("" + start++))); - obj.setBooleanAttribute(start++ % 2 == 0); - obj.setBooleanSetAttribute(toSet(true, false)); - obj.setByteAttribute((byte) start++); - obj.setByteSetAttribute(toSet((byte) start++, (byte) start++, (byte) start++)); - obj.setCalendarAttribute(getUniqueCalendar()); - obj.setCalendarSetAttribute(toSet(getUniqueCalendar(), getUniqueCalendar(), getUniqueCalendar())); - obj.setDateAttribute(new Date(start++)); - obj.setDateSetAttribute(toSet(new Date(start++), new Date(start++), new Date(start++))); - obj.setDoubleAttribute((double) start++); - obj.setDoubleSetAttribute(toSet((double) start++, (double) start++, (double) start++)); - obj.setFloatAttribute((float) start++); - obj.setFloatSetAttribute(toSet((float) start++, (float) start++, (float) start++)); - obj.setIntegerAttribute(start++); - obj.setIntegerSetAttribute(toSet(start++, start++, start++)); - obj.setLongAttribute((long) start++); - obj.setLongSetAttribute(toSet((long) start++, (long) start++, (long) start++)); - obj.setStringSetAttribute(toSet("" + start++, "" + start++, "" + start++)); - return obj; - } - - private Calendar getUniqueCalendar() { - Calendar cal = Calendar.getInstance(); - cal.setTime(new Date(start++)); - return cal; - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/DelimitedIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/DelimitedIntegrationTest.java deleted file mode 100644 index 2aff58fdef8e..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/DelimitedIntegrationTest.java +++ /dev/null @@ -1,486 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; - -import java.util.Collections; -import java.util.Date; -import java.util.Set; -import java.util.UUID; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedTimestamp; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbDelimited; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConvertedTimestamp; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; -import software.amazon.awssdk.services.dynamodb.pojos.Currency; -import software.amazon.awssdk.services.dynamodb.pojos.DateRange; -import software.amazon.awssdk.services.dynamodb.pojos.KeyAndVal; -import software.amazon.awssdk.services.dynamodb.pojos.PhoneNumber; - -/** - * Tests updating component attribute fields correctly. - */ -public class DelimitedIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test using {@code PhoneNumber}. - */ - @Test - public void testPhoneNumber() { - final KeyAndPhoneNumber object = new KeyAndPhoneNumber(); - object.setVal(new PhoneNumber("206", "266", "1000")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test - public void testPhoneNumberAreaCodeNull() { - final KeyAndPhoneNumber object = new KeyAndPhoneNumber(); - object.setVal(new PhoneNumber(null, "266", "1000")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test - public void testPhoneNumberAreaCodeEmpty() { - final KeyAndPhoneNumber object = new KeyAndPhoneNumber(); - object.setVal(new PhoneNumber("", "266", "1000")); - - final PhoneNumber after = assertBeforeAndAfterChange(null, object); - assertNotNull(after); - assertNull(after.getAreaCode()); - assertEquals(object.getVal().getExchange(), after.getExchange()); - assertEquals(object.getVal().getSubscriber(), after.getSubscriber()); - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test - public void testPhoneNumberNull() { - final KeyAndPhoneNumber object = new KeyAndPhoneNumber(); - object.setVal(null); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test - public void testPhoneNumberAllNull() { - final KeyAndPhoneNumber object = new KeyAndPhoneNumber(); - object.setVal(new PhoneNumber(null, null, null)); - - final PhoneNumber after = assertBeforeAndAfterChange(null, object); - assertNull(after); //<- and empty object produces a null val - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test - public void testKeyAndSpecialCharacterDelimitedierPhoneNumber() { - final KeyAndSpecialCharacterDelimitedierPhoneNumber object = new KeyAndSpecialCharacterDelimitedierPhoneNumber(); - object.setVal(new PhoneNumber("206", "867", "5309")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test - public void testKeyAndDefaultDelimitederPhoneNumber() { - final KeyAndDefaultDelimitederPhoneNumber object = new KeyAndDefaultDelimitederPhoneNumber(); - object.setVal(new PhoneNumber("206", "867", "5309")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test - public void testPhoneNumberDelimitedOnType() { - final KeyAndPhoneNumberDelimitedOnType object = new KeyAndPhoneNumberDelimitedOnType(); - object.setVal(new KeyAndPhoneNumberDelimitedOnType.Value()); - object.getVal().setAreaCode("206"); - object.getVal().setExchange("266"); - object.getVal().setSubscriber("1000"); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test(expected = DynamoDbMappingException.class) - public void testKeyAndNoAttributeNamesPhoneNumber() { - final KeyAndNoAttributeNamesPhoneNumber object = new KeyAndNoAttributeNamesPhoneNumber(); - object.setVal(new PhoneNumber("206", "867", "5309")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code PhoneNumber}. - */ - @Test(expected = DynamoDbMappingException.class) - public void testExceptionOnPhoneNumberSet() { - final KeyAndPhoneNumberSet object = new KeyAndPhoneNumberSet(); - object.setVal(Collections.singleton(new PhoneNumber("206", "266", "1000"))); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testFormatDateRange() throws Exception { - final KeyAndFormatDateRange object = new KeyAndFormatDateRange(); - object.setVal(new KeyAndFormatDateRange.FormatDateRange()); - object.getVal().setStart(new Date(System.currentTimeMillis() - 6000L)); - object.getVal().setEnd(new Date(System.currentTimeMillis() + 6000L)); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testFormatDateRangeStartNull() throws Exception { - final KeyAndFormatDateRange object = new KeyAndFormatDateRange(); - object.setVal(new KeyAndFormatDateRange.FormatDateRange()); - object.getVal().setStart(null); - object.getVal().setEnd(new Date()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testFormatDateRangeEndNull() throws Exception { - final KeyAndFormatDateRange object = new KeyAndFormatDateRange(); - object.setVal(new KeyAndFormatDateRange.FormatDateRange()); - object.getVal().setStart(new Date()); - object.getVal().setEnd(null); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testFormatDateRangeStartAndEndNull() { - final KeyAndFormatDateRange object = new KeyAndFormatDateRange(); - object.setVal(new KeyAndFormatDateRange.FormatDateRange()); - object.getVal().setStart(null); - object.getVal().setEnd(null); - - final KeyAndFormatDateRange.FormatDateRange after = assertBeforeAndAfterChange(null, object); - assertNull(after); //<- and empty object produces a null val - } - - /** - * Test using {@code DateRange}. - */ - @Test - @Ignore("Behavior is different with java.time; cannot parse 'yyyMMdd' formatted date to ZonedDateTime.") - public void testDelimitedKeyAndDate() { - final DelimitedKeyAndDate object = new DelimitedKeyAndDate(); - object.setKey(new DelimitedKeyAndDate.Key()); - object.getKey().setKey(UUID.randomUUID().toString()); - object.getKey().setVal(new Date()); - assertBeforeAndAfterChange(true, object); - } - - /** - * Test marshalling. - */ - @Test - public void testKeyAndCurrency() { - final KeyAndCurrency object = new KeyAndCurrency(); - object.setVal(new Currency(12.95D, "USD")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test marshalling. - */ - @Test(expected = DynamoDbMappingException.class) - public void testKeyAndNestedCurrency() { - final KeyAndNestedCurrency object = new KeyAndNestedCurrency(); - object.setVal(new KeyAndNestedCurrency.Value()); - object.getVal().setKey(UUID.randomUUID().toString()); - object.getVal().setVal(new Currency(12.95D, "USD")); - assertBeforeAndAfterChange(false, object); - } - - /** - * An object with {@code PhoneNumber}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndSpecialCharacterDelimitedierPhoneNumber extends AutoKeyAndVal { - @DynamoDbDelimited(attributeNames = {"areaCode", "exchange", "subscriber"}, delimiter = '\\') - public PhoneNumber getVal() { - return super.getVal(); - } - - @Override - public void setVal(final PhoneNumber val) { - super.setVal(val); - } - } - - /** - * An object with {@code PhoneNumber}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDefaultDelimitederPhoneNumber extends AutoKeyAndVal { - @DynamoDbDelimited(attributeNames = {"areaCode", "exchange", "subscriber"}) - public PhoneNumber getVal() { - return super.getVal(); - } - - @Override - public void setVal(final PhoneNumber val) { - super.setVal(val); - } - } - - /** - * An object with {@code PhoneNumber}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndPhoneNumber extends AutoKeyAndVal { - @DynamoDbDelimited(attributeNames = {"areaCode", "exchange", "subscriber"}, delimiter = '-') - public PhoneNumber getVal() { - return super.getVal(); - } - - @Override - public void setVal(final PhoneNumber val) { - super.setVal(val); - } - } - - /** - * An object with {@code PhoneNumber}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndPhoneNumberDelimitedOnType extends AutoKeyAndVal { - public Value getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Value val) { - super.setVal(val); - } - - @DynamoDbDelimited(attributeNames = {"areaCode", "exchange", "subscriber"}, delimiter = '.') - public static class Value extends PhoneNumber { - } - } - - /** - * An object with {@code PhoneNumber}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndNoAttributeNamesPhoneNumber extends AutoKeyAndVal { - @DynamoDbDelimited(attributeNames = {}) - public PhoneNumber getVal() { - return super.getVal(); - } - - @Override - public void setVal(final PhoneNumber val) { - super.setVal(val); - } - } - - /** - * An object with {@code PhoneNumber}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndPhoneNumberSet extends AutoKeyAndVal> { - @DynamoDbDelimited(attributeNames = {"areaCode", "exchange", "subscriber"}, delimiter = '-') - public Set getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Set val) { - super.setVal(val); - } - } - - /** - * An object with {@code DateRange}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDateRange extends AutoKeyAndVal { - @DynamoDbDelimited(attributeNames = {"start", "end"}) - public DateRange getVal() { - return super.getVal(); - } - - @Override - public void setVal(final DateRange val) { - super.setVal(val); - } - } - - /** - * An object with {@code DateRange}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndFormatDateRange extends AutoKeyAndVal { - @DynamoDbDelimited(attributeNames = {"start", "end"}) - public FormatDateRange getVal() { - return super.getVal(); - } - - @Override - public void setVal(final FormatDateRange val) { - super.setVal(val); - } - - public static class FormatDateRange extends DateRange { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSS") - public Date getStart() { - return super.getStart(); - } - - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSS") - public Date getEnd() { - return super.getEnd(); - } - } - } - - /** - * An object with {@code DateRange}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class DelimitedKeyAndDate extends KeyAndVal { - @DynamoDbHashKey - @DynamoDbDelimited(attributeNames = {"key", "val"}) - public DelimitedKeyAndDate.Key getKey() { - return super.getKey(); - } - - @Override - public void setKey(final DelimitedKeyAndDate.Key key) { - super.setKey(key); - } - - @DynamoDbAutoGeneratedTimestamp - public Date getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Date val) { - super.setVal(val); - } - - public static class Key extends KeyAndVal { - @Override - public String getKey() { - return super.getKey(); - } - - @Override - public void setKey(final String key) { - super.setKey(key); - } - - @DynamoDbTypeConvertedTimestamp(pattern = "yyyyMMdd") - public Date getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Date val) { - super.setVal(val); - } - } - } - - /** - * An object with a complex type. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndCurrency extends AutoKeyAndVal { - @DynamoDbDelimited(attributeNames = {"amount", "unit"}, delimiter = '$') - public Currency getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Currency val) { - super.setVal(val); - } - } - - /** - * An object with a complex type. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndNestedCurrency extends AutoKeyAndVal { - @DynamoDbDelimited(attributeNames = {"key", "val"}, delimiter = '#') - public KeyAndNestedCurrency.Value getVal() { - return super.getVal(); - } - - @Override - public void setVal(final KeyAndNestedCurrency.Value val) { - super.setVal(val); - } - - public static class Value extends KeyAndVal { - @Override - public String getKey() { - return super.getKey(); - } - - @Override - public void setKey(final String key) { - super.setKey(key); - } - - @Override - public Currency getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Currency val) { - super.setVal(val); - } - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ExceptionHandlingIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ExceptionHandlingIntegrationTest.java deleted file mode 100644 index 08548ff85f98..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ExceptionHandlingIntegrationTest.java +++ /dev/null @@ -1,564 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.ConversionSchemas; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbVersionAttribute; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; - -/** - * Tests of exception handling - */ -public class ExceptionHandlingIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - @Test(expected = DynamoDbMappingException.class) - public void testNoTableAnnotation() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(new NoTableAnnotation()); - } - - @Test(expected = DynamoDbMappingException.class) - public void testNoTableAnnotationLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.load(NoTableAnnotation.class, "abc"); - } - - @Test(expected = DynamoDbMappingException.class) - public void testNoDefaultConstructor() { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - NoDefaultConstructor obj = new NoDefaultConstructor("" + startKey++, "abc"); - util.save(obj); - util.load(NoDefaultConstructor.class, obj.getKey()); - } - - @Test(expected = DynamoDbMappingException.class) - public void testNoHashKeyGetter() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(new NoKeyGetterDefined()); - } - - @Test(expected = DynamoDbMappingException.class) - public void testNoHashKeyGetterLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.load(NoKeyGetterDefined.class, "abc"); - } - - @Test(expected = DynamoDbMappingException.class) - public void testPrivateKeyGetter() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(new PrivateKeyGetter()); - } - - @Test(expected = DynamoDbMappingException.class) - public void testPrivateKeyGetterLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.load(PrivateKeyGetter.class, "abc"); - } - - @Test(expected = DynamoDbMappingException.class) - public void testPrivateKeySetter() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(new PrivateKeySetter()); - } - - /* - * To trigger this error, we need for a service object to be present, so - * we'll insert one manually. - */ - @Test(expected = DynamoDbMappingException.class) - public void testPrivateKeySetterLoad() throws Exception { - Map attr = new HashMap(); - attr.put(KEY_NAME, AttributeValue.builder().s("abc").build()); - dynamo.putItem(PutItemRequest.builder().tableName("aws-java-sdk-util").item(attr).build()); - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.load(PrivateKeySetter.class, "abc"); - } - - @Test(expected = DynamoDbMappingException.class) - public void testPrivateSetterLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - PrivateSetter object = new PrivateSetter(); - object.setStringProperty("value"); - util.save(object); - util.load(PrivateSetter.class, object.getKey()); - } - - @Test(expected = DynamoDbMappingException.class) - public void testOverloadedSetter() { - OverloadedSetter obj = new OverloadedSetter(); - obj.setKey("" + startKey++); - obj.setAttribute("abc", "123"); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.save(obj); - - mapper.load(OverloadedSetter.class, obj.getKey()); - } - - @Test(expected = DynamoDbMappingException.class) - public void testWrongTypeForSetter() { - WrongTypeForSetter obj = new WrongTypeForSetter(); - obj.setKey("" + startKey++); - obj.setAttribute(123); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.save(obj); - - mapper.load(WrongTypeForSetter.class, obj.getKey()); - } - - @Test(expected = DynamoDbMappingException.class) - public void testWrongDataType() { - Map attr = new HashMap(); - attr.put("integerProperty", AttributeValue.builder().s("abc").build()); - attr.put(KEY_NAME, AttributeValue.builder().s("" + startKey++).build()); - dynamo.putItem(PutItemRequest.builder().tableName("aws-java-sdk-util").item(attr).build()); - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.load(NumericFields.class, attr.get(KEY_NAME).s()); - } - - @Test(expected = DynamoDbMappingException.class) - public void testWrongDataType2() { - Map attr = new HashMap(); - attr.put("integerProperty", AttributeValue.builder().ns("1", "2", "3").build()); - attr.put(KEY_NAME, AttributeValue.builder().s("" + startKey++).build()); - dynamo.putItem(PutItemRequest.builder().tableName("aws-java-sdk-util").item(attr).build()); - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.load(NumericFields.class, attr.get(KEY_NAME).s()); - } - - // Complex types are not supported by the V1 conversion schema - @Test(expected = DynamoDbMappingException.class) - public void testComplexTypeFailure() { - DynamoDbMapperConfig config = new DynamoDbMapperConfig(ConversionSchemas.V1); - DynamoDbMapper util = new DynamoDbMapper(dynamo, config); - - ComplexType complexType = new ComplexType("" + startKey++, new ComplexType("" + startKey++, null)); - util.save(complexType); - } - - @Test(expected = DynamoDbMappingException.class) - public void testUnsupportedHashKeyType() { - ComplexType complexType = new ComplexType("" + startKey++, new ComplexType("" + startKey++, null)); - ComplexHashKeyType obj = new ComplexHashKeyType(); - obj.setKey(complexType); - obj.setAttribute("abc"); - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(obj); - } - - // Lists are not supported by the V1 conversion schema. - @Test(expected = DynamoDbMappingException.class) - public void testNonsetCollection() { - DynamoDbMapperConfig config = new DynamoDbMapperConfig(ConversionSchemas.V1); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo, config); - - NonSetCollectionType obj = new NonSetCollectionType(); - obj.setKey("" + startKey++); - obj.setBadlyMapped(new ArrayList()); - obj.getBadlyMapped().add("abc"); - mapper.save(obj); - } - - @Test(expected = DynamoDbMappingException.class) - public void testFractionalVersionAttribute() { - FractionalVersionAttribute obj = new FractionalVersionAttribute(); - obj.setKey("" + startKey++); - obj.setVersion(0d); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.save(obj); - } - - @Test(expected = DynamoDbMappingException.class) - public void testAutoGeneratedIntegerHashKey() { - AutoGeneratedIntegerKey obj = new AutoGeneratedIntegerKey(); - obj.setValue("fdgfdsgf"); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.save(obj); - } - - @Test(expected = DynamoDbMappingException.class) - public void testAutoGeneratedIntegerRangeKey() { - AutoGeneratedIntegerRangeKey obj = new AutoGeneratedIntegerRangeKey(); - obj.setKey("Bldadsfa"); - obj.setValue("fdgfdsgf"); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.save(obj); - } - - public static class NoTableAnnotation { - - private String key; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class NoDefaultConstructor { - - private String key; - private String attribute; - - public NoDefaultConstructor(String key, String attribute) { - super(); - this.key = key; - this.attribute = attribute; - } - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getAttribute() { - return attribute; - } - - public void setAttribute(String attribute) { - this.attribute = attribute; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class NoKeyGetterDefined { - - @SuppressWarnings("unused") - private String key; - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class PrivateKeyGetter { - - private String key; - - @SuppressWarnings("unused") - @DynamoDbHashKey - private String getKey() { - return key; - } - - @SuppressWarnings("unused") - private void setKey(String key) { - this.key = key; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class PrivateKeySetter { - - private String key; - - @DynamoDbHashKey - @DynamoDbAutoGeneratedKey - public String getKey() { - return key; - } - - @SuppressWarnings("unused") - private void setKey(String key) { - this.key = key; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class PrivateSetter { - - private String key; - private String stringProperty; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String stringProperty() { - return stringProperty; - } - - private void setStringProperty(String stringProperty) { - this.stringProperty = stringProperty; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class OverloadedSetter { - - private String key; - private String attribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getAttribute() { - return attribute; - } - - public void setAttribute(String attribute, String unused) { - this.attribute = attribute; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class WrongTypeForSetter { - - private String key; - private String attribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getAttribute() { - return attribute; - } - - public void setAttribute(Integer attribute) { - this.attribute = String.valueOf(attribute); - } - - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class NumericFields { - - private String key; - private Integer integerProperty; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public Integer getIntegerProperty() { - return integerProperty; - } - - public void setIntegerProperty(Integer integerProperty) { - this.integerProperty = integerProperty; - } - - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class ComplexType { - - public String key; - public ComplexType type; - - public ComplexType(String key, ComplexType type) { - super(); - this.key = key; - this.type = type; - } - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public ComplexType getType() { - return type; - } - - public void setType(ComplexType type) { - this.type = type; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class ComplexHashKeyType { - - private ComplexType key; - private String attribute; - - @DynamoDbHashKey - public ComplexType getKey() { - return key; - } - - public void setKey(ComplexType key) { - this.key = key; - } - - public String getAttribute() { - return attribute; - } - - public void setAttribute(String attribute) { - this.attribute = attribute; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class NonSetCollectionType { - - private String key; - private List badlyMapped; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public List getBadlyMapped() { - return badlyMapped; - } - - public void setBadlyMapped(List badlyMapped) { - this.badlyMapped = badlyMapped; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class FractionalVersionAttribute { - - private String key; - private Double version; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbVersionAttribute - public Double getVersion() { - return version; - } - - public void setVersion(Double version) { - this.version = version; - } - - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class AutoGeneratedIntegerKey { - - private Integer key; - private String value; - - @DynamoDbHashKey - @DynamoDbAutoGeneratedKey - public Integer getKey() { - return key; - } - - public void setKey(Integer key) { - this.key = key; - } - - public String value() { - return value; - } - - public void setValue(String value) { - this.value = value; - } - - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class AutoGeneratedIntegerRangeKey { - - private String key; - private Integer rangekey; - private String value; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAutoGeneratedKey - @DynamoDbRangeKey - public Integer getRangekey() { - return rangekey; - } - - public void setRangekey(Integer rangekey) { - this.rangekey = rangekey; - } - - public String value() { - return value; - } - - public void setValue(String value) { - this.value = value; - } - - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/FlattenedIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/FlattenedIntegrationTest.java deleted file mode 100644 index 3779b1358886..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/FlattenedIntegrationTest.java +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; - -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Set; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGenerateStrategy; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedTimestamp; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbFlattened; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.pojos.AuditRecord; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; -import software.amazon.awssdk.services.dynamodb.pojos.Currency; -import software.amazon.awssdk.services.dynamodb.pojos.DateRange; - -/** - * Tests updating component attribute fields correctly. - */ -public class FlattenedIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test the mappings. - */ - @Test - public void testAuditRecord() { - final KeyAndAuditRecord object = new KeyAndAuditRecord(); - final AuditRecord auditRecord = assertBeforeAndAfterChange(true, object); - assertNotNull(auditRecord.getCreatedDate()); - assertNotNull(auditRecord.getLastModifiedDate()); - assertEquals(Long.valueOf(1L), auditRecord.getVersionNumber()); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testDateRange() throws Exception { - final KeyAndDateRange object = new KeyAndDateRange(); - object.setVal(new DateRange(new Date(), -60000L, 60000L)); - - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testDateRangeIsNull() { - final KeyAndDateRange object = new KeyAndDateRange(); - - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testDateRangeStartAndEndIsNull() { - final KeyAndDateRange object = new KeyAndDateRange(); - object.setVal(new DateRange()); - - final DateRange after = assertBeforeAndAfterChange(null, object); - assertNull(after); //<- an empty date range produces null object - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testDateRangeStartIsNull() throws Exception { - final KeyAndDateRange object = new KeyAndDateRange(); - object.setVal(new DateRange(null, new Date())); - - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testDateRangeEndIsNull() throws Exception { - final KeyAndDateRange object = new KeyAndDateRange(); - object.setVal(new DateRange(new Date(), null)); - - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test - public void testAutoDateRange() throws Exception { - final KeyAndAutoDateRange object = new KeyAndAutoDateRange(); - - assertBeforeAndAfterChange(true, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test(expected = DynamoDbMappingException.class) - public void testKeyAndUnknownAttribute() { - final KeyAndUnknownAttribute object = new KeyAndUnknownAttribute(); - object.setVal(new DateRange(new Date(), -60000L, 60000L)); - - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test(expected = DynamoDbMappingException.class) - public void testDateRangeSet() { - final KeyAndDateRangeSet object = new KeyAndDateRangeSet(); - object.setVal(Collections.singleton(new DateRange(new Date(), -60000L, 60000L))); - - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code DateRange}. - */ - @Test(expected = DynamoDbMappingException.class) - public void testDateRangeList() { - final KeyAndDateRangeList object = new KeyAndDateRangeList(); - object.setVal(Collections.singletonList(new DateRange(new Date(), -60000L, 60000L))); - - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code Currency}. - */ - @Test - public void testKeyAndCurrencies() { - final KeyAndCurrencies object = new KeyAndCurrencies(); - object.setVal(new Currency(1000000D, "CAD")); - object.setOther(new Currency(99.99D, "USD")); - - assertBeforeAndAfterChange(false, object); - - final KeyAndCurrencies reload = util.load(object.getClass(), object.getKey()); - assertEquals(object.getVal(), reload.getVal()); - assertEquals(object.getOther(), reload.getOther()); - } - - /** - * test object. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndAuditRecord extends AutoKeyAndVal { - public AuditRecord getVal() { - return super.getVal(); - } - - @Override - public void setVal(final AuditRecord val) { - super.setVal(val); - } - } - - /** - * An object with {@code DateRange}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDateRange extends AutoKeyAndVal { - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "start", attributeName = "DateRangeStart"), - @DynamoDbAttribute(mappedBy = "end", attributeName = "DateRangeEnd")}) - public DateRange getVal() { - return super.getVal(); - } - - @Override - public void setVal(final DateRange val) { - super.setVal(val); - } - } - - /** - * An object with {@code DateRange}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndAutoDateRange extends AutoKeyAndVal { - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "start", attributeName = "CreatedDate"), - @DynamoDbAttribute(mappedBy = "end", attributeName = "LastModifiedDate")}) - public AutoDateRange getVal() { - return super.getVal(); - } - - @Override - public void setVal(final AutoDateRange val) { - super.setVal(val); - } - - public static class AutoDateRange extends DateRange { - @DynamoDbAutoGeneratedTimestamp(strategy = DynamoDbAutoGenerateStrategy.CREATE) - public Date start() { - return super.getStart(); - } - - @DynamoDbAutoGeneratedTimestamp(strategy = DynamoDbAutoGenerateStrategy.ALWAYS) - public Date getEnd() { - return super.getEnd(); - } - } - } - - /** - * An object with {@code DateRange}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndUnknownAttribute extends AutoKeyAndVal { - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "xstart", attributeName = "DateRangeStart"), - @DynamoDbAttribute(mappedBy = "end", attributeName = "DateRangeEnd")}) - public DateRange getVal() { - return super.getVal(); - } - - @Override - public void setVal(final DateRange val) { - super.setVal(val); - } - } - - /** - * An object with {@code DateRange}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDateRangeSet extends AutoKeyAndVal> { - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "start", attributeName = "starts"), - @DynamoDbAttribute(mappedBy = "end", attributeName = "ends")}) - public Set getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Set val) { - super.setVal(val); - } - } - - /** - * An object with {@code DateRange}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDateRangeList extends AutoKeyAndVal> { - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "start", attributeName = "starts"), - @DynamoDbAttribute(mappedBy = "end", attributeName = "ends")}) - public List getVal() { - return super.getVal(); - } - - @Override - public void setVal(final List val) { - super.setVal(val); - } - } - - /** - * An object with {@code Currency}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndCurrencies extends AutoKeyAndVal { - private Currency other; - - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "amount", attributeName = "firstAmount"), - @DynamoDbAttribute(mappedBy = "unit", attributeName = "firstUnit")}) - public Currency getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Currency val) { - super.setVal(val); - } - - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "amount", attributeName = "secondAmount"), - @DynamoDbAttribute(mappedBy = "unit", attributeName = "secondUnit")}) - public Currency getOther() { - return this.other; - } - - public void setOther(final Currency other) { - this.other = other; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/GenerateCreateTableRequestIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/GenerateCreateTableRequestIntegrationTest.java deleted file mode 100644 index 85c3589763ff..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/GenerateCreateTableRequestIntegrationTest.java +++ /dev/null @@ -1,260 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.util.Arrays; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.ImmutableObjectUtils; -import software.amazon.awssdk.services.dynamodb.TableUtils; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndexDescription; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndexDescription; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; -import software.amazon.awssdk.testutils.UnorderedCollectionComparator; -import utils.test.util.DynamoDBTestBase; - -/** - * Tests that the CreateTableRequest generated by DynamoDBMapper.generateCreateTableRequest - * correctly creates the expected table. - */ -public class GenerateCreateTableRequestIntegrationTest extends DynamoDBTestBase { - - private static final ProvisionedThroughput DEFAULT_CAPACITY = ProvisionedThroughput.builder().readCapacityUnits(5L).writeCapacityUnits(5L).build(); - private static DynamoDbMapper mapper; - private static Set testedTableName = new HashSet<>(); - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBTestBase.setUpTestBase(); - mapper = new DynamoDbMapper(dynamo); - } - - @AfterClass - public static void tearDown() { - for (String tableName : testedTableName) { - dynamo.deleteTable(DeleteTableRequest.builder().tableName(tableName).build()); - } - } - - private static void setProvisionedThroughput(CreateTableRequest request, ProvisionedThroughput throughput) { - ImmutableObjectUtils.setObjectMember(request, "provisionedThroughput", throughput); - //request.setProvisionedThroughput(throughput); - if (request.globalSecondaryIndexes() != null) { - for (GlobalSecondaryIndex gsi : request.globalSecondaryIndexes()) { - ImmutableObjectUtils.setObjectMember(gsi, "provisionedThroughput", throughput); - //gsi.setProvisionedThroughput(throughput); - } - } - } - - private static boolean equalLsi(Collection a, Collection b) { - return UnorderedCollectionComparator.equalUnorderedCollections(a, b, new LocalSecondaryIndexDefinitionComparator()); - } - - private static boolean equalGsi(Collection a, Collection b) { - return UnorderedCollectionComparator.equalUnorderedCollections(a, b, new GlobalSecondaryIndexDefinitionComparator()); - } - - private static String appendCurrentTimeToTableName(CreateTableRequest request) { - String appendedName = String.format("%s-%d", request.tableName(), System.currentTimeMillis()); - ImmutableObjectUtils.setObjectMember(request, "tableName", appendedName); - /// /request.setTableName(appendedName); - return appendedName; - } - - @Test - public void testParseIndexRangeKeyClass() throws Exception { - CreateTableRequest request = mapper.generateCreateTableRequest(IndexRangeKeyClass.class); - String createdTableName = appendCurrentTimeToTableName(request); - testedTableName.add(createdTableName); - setProvisionedThroughput(request, DEFAULT_CAPACITY); - - TableDescription createdTableDescription = dynamo.createTable(request).tableDescription(); - - assertEquals(createdTableName, createdTableDescription.tableName()); - List expectedKeyElements = Arrays.asList( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("rangeKey").keyType(KeyType.RANGE).build() - ); - assertEquals(expectedKeyElements, createdTableDescription.keySchema()); - - List expectedAttrDefinitions = Arrays.asList( - AttributeDefinition.builder().attributeName("key").attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName("rangeKey").attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName("indexFooRangeKey").attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName("indexBarRangeKey").attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName("multipleIndexRangeKey").attributeType(ScalarAttributeType.N).build() - ); - assertTrue(UnorderedCollectionComparator.equalUnorderedCollections( - expectedAttrDefinitions, - createdTableDescription.attributeDefinitions())); - - List expectedLsi = Arrays.asList( - LocalSecondaryIndex.builder() - .indexName("index_foo") - .keySchema( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexFooRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("index_bar") - .keySchema( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexBarRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("index_foo_copy") - .keySchema( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("multipleIndexRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("index_bar_copy") - .keySchema( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("multipleIndexRangeKey").keyType(KeyType.RANGE).build()).build()); - assertTrue(equalLsi(expectedLsi, createdTableDescription.localSecondaryIndexes())); - - assertNull(request.globalSecondaryIndexes()); - assertEquals(DEFAULT_CAPACITY, request.provisionedThroughput()); - - // Only one table with indexes can be created simultaneously - TableUtils.waitUntilActive(dynamo, createdTableName); - } - - @Test - public void testComplexIndexedHashRangeClass() throws Exception { - CreateTableRequest request = mapper.generateCreateTableRequest(MapperQueryExpressionTest.HashRangeClass.class); - String createdTableName = appendCurrentTimeToTableName(request); - testedTableName.add(createdTableName); - setProvisionedThroughput(request, DEFAULT_CAPACITY); - - TableDescription createdTableDescription = dynamo.createTable(request).tableDescription(); - - assertEquals(createdTableName, createdTableDescription.tableName()); - List expectedKeyElements = Arrays.asList( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("primaryRangeKey").keyType(KeyType.RANGE).build() - ); - assertEquals(expectedKeyElements, createdTableDescription.keySchema()); - - List expectedAttrDefinitions = Arrays.asList( - AttributeDefinition.builder().attributeName("primaryHashKey").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("indexHashKey").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("primaryRangeKey").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("indexRangeKey").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("anotherIndexRangeKey").attributeType(ScalarAttributeType.S).build() - ); - assertTrue(UnorderedCollectionComparator.equalUnorderedCollections( - expectedAttrDefinitions, - createdTableDescription.attributeDefinitions())); - - List expectedLsi = Arrays.asList( - LocalSecondaryIndex.builder() - .indexName("LSI-primary-range") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("primaryRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("LSI-index-range-1") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("LSI-index-range-2") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("LSI-index-range-3") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("anotherIndexRangeKey").keyType(KeyType.RANGE).build()).build()); - assertTrue(equalLsi(expectedLsi, createdTableDescription.localSecondaryIndexes())); - - List expectedGsi = Arrays.asList( - GlobalSecondaryIndex.builder() - .indexName("GSI-primary-hash-index-range-1") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build(), - GlobalSecondaryIndex.builder() - .indexName("GSI-primary-hash-index-range-2") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("anotherIndexRangeKey").keyType(KeyType.RANGE).build()).build(), - GlobalSecondaryIndex.builder() - .indexName("GSI-index-hash-primary-range") - .keySchema( - KeySchemaElement.builder().attributeName("indexHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("primaryRangeKey").keyType(KeyType.RANGE).build()).build(), - GlobalSecondaryIndex.builder() - .indexName("GSI-index-hash-index-range-1") - .keySchema( - KeySchemaElement.builder().attributeName("indexHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build(), - GlobalSecondaryIndex.builder() - .indexName("GSI-index-hash-index-range-2") - .keySchema( - KeySchemaElement.builder().attributeName("indexHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build()); - assertTrue(equalGsi(expectedGsi, createdTableDescription.globalSecondaryIndexes())); - - assertEquals(DEFAULT_CAPACITY, request.provisionedThroughput()); - - // Only one table with indexes can be created simultaneously - TableUtils.waitUntilActive(dynamo, createdTableName); - } - - private static class LocalSecondaryIndexDefinitionComparator - implements - UnorderedCollectionComparator.CrossTypeComparator { - - @Override - public boolean equals(LocalSecondaryIndex a, LocalSecondaryIndexDescription b) { - return a.indexName().equals(b.indexName()) - && a.keySchema().equals(b.keySchema()); - } - - } - - private static class GlobalSecondaryIndexDefinitionComparator - implements - UnorderedCollectionComparator.CrossTypeComparator { - - @Override - public boolean equals(GlobalSecondaryIndex a, GlobalSecondaryIndexDescription b) { - return a.indexName().equals(b.indexName()) - && a.keySchema().equals(b.keySchema()); - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/HashKeyOnlyTableWithGSIIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/HashKeyOnlyTableWithGSIIntegrationTest.java deleted file mode 100644 index 07c67da56270..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/HashKeyOnlyTableWithGSIIntegrationTest.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; - -import java.util.ArrayList; -import java.util.List; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.TableUtils; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIndexHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIndexRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbQueryExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.PaginatedQueryList; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.Projection; -import software.amazon.awssdk.services.dynamodb.model.ProjectionType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import utils.test.util.DynamoDBTestBase; - -/** - * Integration test for GSI support with a table that has no primary range key (only a primary hash key). - */ -public class HashKeyOnlyTableWithGSIIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - public static final String HASH_KEY_ONLY_TABLE_NAME = "no-primary-range-key-gsi-test"; - - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBTestBase.setUpTestBase(); - List keySchema = new ArrayList(); - keySchema.add(KeySchemaElement.builder().attributeName("id").keyType(KeyType.HASH).build()); - - CreateTableRequest req = CreateTableRequest.builder() - .tableName(HASH_KEY_ONLY_TABLE_NAME) - .keySchema(keySchema) - .provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(10L).writeCapacityUnits(10L).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName("id").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("status").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("ts").attributeType(ScalarAttributeType.S).build()) - .globalSecondaryIndexes( - GlobalSecondaryIndex.builder() - .provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(10L).writeCapacityUnits(10L).build()) - .indexName("statusAndCreation") - .keySchema( - KeySchemaElement.builder().attributeName("status").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("ts").keyType(KeyType.RANGE).build()) - .projection( - Projection.builder().projectionType(ProjectionType.ALL).build()).build()).build(); - - TableUtils.createTableIfNotExists(dynamo, req); - TableUtils.waitUntilActive(dynamo, HASH_KEY_ONLY_TABLE_NAME); - } - - @AfterClass - public static void tearDown() throws Exception { - dynamo.deleteTable(DeleteTableRequest.builder().tableName(HASH_KEY_ONLY_TABLE_NAME).build()); - } - - /** - * Tests that we can query using the hash/range GSI on our hash-key only table. - */ - @Test - public void testGSIQuery() throws Exception { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - String status = "foo-status"; - - User user = new User(); - user.setId("123"); - user.setStatus(status); - user.setTs("321"); - mapper.save(user); - - DynamoDbQueryExpression expr = new DynamoDbQueryExpression() - .withIndexName("statusAndCreation") - .withLimit(100) - .withConsistentRead(false) - .withHashKeyValues(user) - .withRangeKeyCondition("ts", - Condition.builder() - .comparisonOperator(ComparisonOperator.GT) - .attributeValueList(AttributeValue.builder().s("100").build()).build()); - - PaginatedQueryList query = mapper.query(User.class, expr); - assertEquals(1, query.size()); - assertEquals(status, query.get(0).getStatus()); - } - - @DynamoDbTable(tableName = HASH_KEY_ONLY_TABLE_NAME) - public static class User { - private String id; - private String status; - private String ts; - - @DynamoDbHashKey - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - @DynamoDbIndexHashKey(globalSecondaryIndexName = "statusAndCreation") - public String getStatus() { - return status; - } - - public void setStatus(String status) { - this.status = status; - } - - @DynamoDbIndexRangeKey(globalSecondaryIndexName = "statusAndCreation") - public String getTs() { - return ts; - } - - public void setTs(String ts) { - this.ts = ts; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/IndexRangeKeyAttributesIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/IndexRangeKeyAttributesIntegrationTest.java deleted file mode 100644 index 7733f9d575bd..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/IndexRangeKeyAttributesIntegrationTest.java +++ /dev/null @@ -1,439 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -/* - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://aws.amazon.com/apache2.0 - * - * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES - * OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.ConsistentRead; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbQueryExpression; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; - -/** - * Tests that index range keys are properly handled as common attribute - * when items are loaded, saved/updated by using primary key. - * Also tests using index range keys for queries. - */ -public class IndexRangeKeyAttributesIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String RANGE_KEY = "rangeKey"; - private static final String INDEX_FOO_RANGE_KEY = "indexFooRangeKey"; - private static final String INDEX_BAR_RANGE_KEY = "indexBarRangeKey"; - private static final String MULTIPLE_INDEX_RANGE_KEY = "multipleIndexRangeKey"; - private static final String FOO_ATTRIBUTE = "fooAttribute"; - private static final String BAR_ATTRIBUTE = "barAttribute"; - private static final String VERSION_ATTRIBUTE = "version"; - private static final List> attrs = new LinkedList>(); - private static final List hashKeyValues = new LinkedList(); - private static final int totalHash = 5; - private static final int rangePerHash = 64; - private static final int indexFooRangeStep = 2; - private static final int indexBarRangeStep = 4; - private static final int multipleIndexRangeStep = 8; - private static DynamoDbMapper mapper; - // We don't start with the current system millis like other tests because - // it's out of the range of some data types - private static int start = 1; - - // Test data - static { - for (int i = 0; i < totalHash; i++) { - long hashKeyValue = startKey++; - hashKeyValues.add(hashKeyValue); - for (int j = 0; j < rangePerHash; j++) { - Map attr = new HashMap(); - attr.put(KEY_NAME, AttributeValue.builder().n("" + hashKeyValue).build()); - attr.put(RANGE_KEY, AttributeValue.builder().n("" + j).build()); - if (j % indexFooRangeStep == 0) { - attr.put(INDEX_FOO_RANGE_KEY, AttributeValue.builder().n("" + j).build()); - } - if (j % indexBarRangeStep == 0) { - attr.put(INDEX_BAR_RANGE_KEY, AttributeValue.builder().n("" + j).build()); - } - if (j % multipleIndexRangeStep == 0) { - attr.put(MULTIPLE_INDEX_RANGE_KEY, AttributeValue.builder().n("" + j).build()); - } - attr.put(FOO_ATTRIBUTE, AttributeValue.builder().s(UUID.randomUUID().toString()).build()); - attr.put(BAR_ATTRIBUTE, AttributeValue.builder().s(UUID.randomUUID().toString()).build()); - attr.put(VERSION_ATTRIBUTE, AttributeValue.builder().n("1").build()); - - attrs.add(attr); - } - } - } - - ; - - @BeforeClass - public static void setUp() throws Exception { - boolean recreateTable = false; - setUpTableWithIndexRangeAttribute(recreateTable); - - // Insert the data - for (Map attr : attrs) { - dynamo.putItem(PutItemRequest.builder().tableName(TABLE_WITH_INDEX_RANGE_ATTRIBUTE).item(attr).build()); - } - - mapper = new DynamoDbMapper(dynamo, - new DynamoDbMapperConfig(ConsistentRead.CONSISTENT)); - } - - /** - * Tests that attribute annotated with @DynamoDBIndexRangeKey is properly set in the loaded object. - */ - @Test - public void testLoad() throws Exception { - for (Map attr : attrs) { - IndexRangeKeyClass x = mapper.load(newIndexRangeKey(Long.parseLong(attr.get(KEY_NAME).n()), - Double.parseDouble(attr.get(RANGE_KEY).n()))); - - // Convert all numbers to the most inclusive type for easy - // comparison - assertEquals(new BigDecimal(x.getKey()), new BigDecimal(attr.get(KEY_NAME).n())); - assertEquals(new BigDecimal(x.getRangeKey()), new BigDecimal(attr.get(RANGE_KEY).n())); - if (null == attr.get(INDEX_FOO_RANGE_KEY)) { - assertNull(x.getIndexFooRangeKeyWithFakeName()); - } else { - assertEquals(new BigDecimal(x.getIndexFooRangeKeyWithFakeName()), - new BigDecimal(attr.get(INDEX_FOO_RANGE_KEY).n())); - } - if (null == attr.get(INDEX_BAR_RANGE_KEY)) { - assertNull(x.getIndexBarRangeKey()); - } else { - assertEquals(new BigDecimal(x.getIndexBarRangeKey()), new BigDecimal(attr.get(INDEX_BAR_RANGE_KEY).n())); - } - assertEquals(new BigDecimal(x.getVersion()), new BigDecimal(attr.get(VERSION_ATTRIBUTE).n())); - assertEquals(x.getFooAttribute(), attr.get(FOO_ATTRIBUTE).s()); - assertEquals(x.getBarAttribute(), attr.get(BAR_ATTRIBUTE).s()); - - } - } - - private IndexRangeKeyClass newIndexRangeKey(long hashKey, double rangeKey) { - IndexRangeKeyClass obj = new IndexRangeKeyClass(); - obj.setKey(hashKey); - obj.setRangeKey(rangeKey); - return obj; - } - - /** - * Tests that attribute annotated with @DynamoDBIndexRangeKey is properly saved. - */ - @Test - public void testSave() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - IndexRangeKeyClass obj = getUniqueObject(); - objs.add(obj); - } - - for (IndexRangeKeyClass obj : objs) { - mapper.save(obj); - } - - for (IndexRangeKeyClass obj : objs) { - IndexRangeKeyClass loaded = mapper.load(IndexRangeKeyClass.class, obj.getKey(), obj.getRangeKey()); - assertEquals(obj, loaded); - } - } - - /** - * Tests that version attribute is still working as expected. - */ - @Test - public void testUpdate() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - IndexRangeKeyClass obj = getUniqueObject(); - objs.add(obj); - } - - for (IndexRangeKeyClass obj : objs) { - mapper.save(obj); - } - - for (IndexRangeKeyClass obj : objs) { - IndexRangeKeyClass replacement = getUniqueObject(); - replacement.setKey(obj.getKey()); - replacement.setRangeKey(obj.getRangeKey()); - replacement.setVersion(obj.getVersion()); - mapper.save(replacement); - - IndexRangeKeyClass loadedObject = mapper.load(IndexRangeKeyClass.class, obj.getKey(), obj.getRangeKey()); - assertEquals(replacement, loadedObject); - - // If we try to update the old version, we should get an error - replacement.setVersion(replacement.getVersion() - 1); - try { - mapper.save(replacement); - fail("Should have thrown an exception"); - } catch (Exception expected) { - // Ignored or expected. - } - } - } - - /** - * Tests making queries on local secondary index - */ - @Test - public void testQueryWithIndexRangekey() { - int indexFooRangePerHash = rangePerHash / indexFooRangeStep; - int indexBarRangePerHash = rangePerHash / indexBarRangeStep; - for (long hashKeyValue : hashKeyValues) { - IndexRangeKeyClass hashKeyItem = new IndexRangeKeyClass(); - hashKeyItem.setKey(hashKeyValue); - - /** - * Query items by primary range key - */ - List result = mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition(RANGE_KEY, - Condition.builder() - .attributeValueList( - AttributeValue.builder() - .n("0").build()) - .comparisonOperator( - ComparisonOperator.GE - .toString()).build())); - assertTrue(rangePerHash == result.size()); - // check that all attributes are retrieved - for (IndexRangeKeyClass itemInFooIndex : result) { - assertNotNull(itemInFooIndex.getFooAttribute()); - assertNotNull(itemInFooIndex.getBarAttribute()); - } - - /** - * Query items on index_foo - */ - result = mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition(INDEX_FOO_RANGE_KEY, - Condition.builder() - .attributeValueList(AttributeValue.builder().n("0").build()) - .comparisonOperator( - ComparisonOperator.GE.toString()).build())); - assertTrue(indexFooRangePerHash == result.size()); - // check that only the projected attributes are retrieved - for (IndexRangeKeyClass itemInFooIndex : result) { - assertNotNull(itemInFooIndex.getFooAttribute()); - assertNull(itemInFooIndex.getBarAttribute()); - } - - /** - * Query items on index_bar - */ - result = mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition(INDEX_BAR_RANGE_KEY, - Condition.builder() - .attributeValueList(AttributeValue.builder().n("0").build()) - .comparisonOperator( - ComparisonOperator.GE.toString()).build())); - assertTrue(indexBarRangePerHash == result.size()); - // check that only the projected attributes are retrieved - for (IndexRangeKeyClass itemInBarIndex : result) { - assertNull(itemInBarIndex.getFooAttribute()); - assertNotNull(itemInBarIndex.getBarAttribute()); - } - } - } - - /** - * Tests the exception when user specifies an invalid range key name in the query. - */ - @Test - public void testInvalidRangeKeyNameException() { - IndexRangeKeyClass hashKeyItem = new IndexRangeKeyClass(); - hashKeyItem.setKey(0); - try { - mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition("some_range_key", - Condition.builder() - .attributeValueList(AttributeValue.builder().n("0").build()) - .comparisonOperator(ComparisonOperator.GE.toString()).build())); - fail("some_range_key is not a valid range key name."); - } catch (DynamoDbMappingException e) { - System.out.println(e.getMessage()); - } catch (Exception e) { - fail("Should trigger an DynamoDBMappingException."); - } - } - - /** - * Tests the exception when user specifies an invalid index name in the query. - */ - @Test - public void testInvalidIndexNameException() { - IndexRangeKeyClass hashKeyItem = new IndexRangeKeyClass(); - hashKeyItem.setKey(0); - try { - mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition(INDEX_BAR_RANGE_KEY, - Condition.builder() - .attributeValueList(AttributeValue.builder().n("0").build()) - .comparisonOperator(ComparisonOperator.GE.toString()).build()) - .withIndexName("some_index")); - fail("some_index is not a valid index name."); - } catch (IllegalArgumentException iae) { - System.out.println(iae.getMessage()); - } catch (Exception e) { - fail("Should trigger an IllegalArgumentException."); - } - } - - /** - * Tests making queries by using range key that is shared by multiple indexes. - */ - @Test - public void testQueryWithRangeKeyForMultipleIndexes() { - int multipleIndexRangePerHash = rangePerHash / multipleIndexRangeStep; - for (long hashKeyValue : hashKeyValues) { - IndexRangeKeyClass hashKeyItem = new IndexRangeKeyClass(); - hashKeyItem.setKey(hashKeyValue); - - /** - * Query items by a range key that is shared by multiple indexes - */ - List result = mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition(MULTIPLE_INDEX_RANGE_KEY, - Condition.builder() - .attributeValueList( - AttributeValue.builder() - .n("0").build()) - .comparisonOperator( - ComparisonOperator.GE - .toString()).build()) - .withIndexName("index_foo_copy")); - assertTrue(multipleIndexRangePerHash == result.size()); - // check that only the projected attributes are retrieved - for (IndexRangeKeyClass itemInFooIndex : result) { - assertNotNull(itemInFooIndex.getFooAttribute()); - assertNull(itemInFooIndex.getBarAttribute()); - } - result = mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition(MULTIPLE_INDEX_RANGE_KEY, - Condition.builder() - .attributeValueList(AttributeValue.builder().n("0").build()) - .comparisonOperator( - ComparisonOperator.GE.toString()).build()) - .withIndexName("index_bar_copy")); - assertTrue(multipleIndexRangePerHash == result.size()); - // check that only the projected attributes are retrieved - for (IndexRangeKeyClass itemInFooIndex : result) { - assertNull(itemInFooIndex.getFooAttribute()); - assertNotNull(itemInFooIndex.getBarAttribute()); - } - - /** - * Exception when user doesn't specify which index to use - */ - try { - mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition(MULTIPLE_INDEX_RANGE_KEY, - Condition.builder() - .attributeValueList(AttributeValue.builder().n("0").build()) - .comparisonOperator(ComparisonOperator.GE.toString()).build())); - fail("No index name is specified when query with a range key shared by multiple indexes"); - } catch (IllegalArgumentException iae) { - System.out.println(iae.getMessage()); - } catch (Exception e) { - fail("Should trigger an IllegalArgumentException."); - } - - /** - * Exception when user uses an invalid index name - */ - try { - mapper.query(IndexRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyItem) - .withRangeKeyCondition(MULTIPLE_INDEX_RANGE_KEY, - Condition.builder() - .attributeValueList(AttributeValue.builder().n("0").build()) - .comparisonOperator(ComparisonOperator.GE.toString()).build()) - .withIndexName("index_foo")); - fail("index_foo is not annotated as part of the localSecondaryIndexNames in " + - "the @DynamoDBIndexRangeKey annotation of multipleIndexRangeKey"); - } catch (IllegalArgumentException iae) { - System.out.println(iae.getMessage()); - } catch (Exception e) { - fail("Should trigger an IllegalArgumentException."); - } - } - - } - - - private IndexRangeKeyClass getUniqueObject() { - IndexRangeKeyClass obj = new IndexRangeKeyClass(); - obj.setKey(startKey++); - obj.setRangeKey((double) start++); - obj.setIndexFooRangeKeyWithFakeName((double) start++); - obj.setIndexBarRangeKey((double) start++); - obj.setFooAttribute("" + startKey++); - obj.setBarAttribute("" + startKey++); - return obj; - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/InheritanceIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/InheritanceIntegrationTest.java deleted file mode 100644 index ec8097b12cf0..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/InheritanceIntegrationTest.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; - -import java.util.ArrayList; -import java.util.List; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Tests inheritance behavior in DynamoDB mapper. - */ -public class InheritanceIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - @Test - public void testSubClass() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - SubClass obj = getUniqueObject(new SubClass()); - obj.setSubField("" + startKey++); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (Object obj : objs) { - util.save(obj); - assertEquals(util.load(SubClass.class, ((SubClass) obj).getKey()), obj); - } - } - - @Test - public void testSubsubClass() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - SubsubClass obj = getUniqueObject(new SubsubClass()); - obj.setSubField("" + startKey++); - obj.setSubsubField("" + startKey++); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (SubsubClass obj : objs) { - util.save(obj); - assertEquals(util.load(SubsubClass.class, obj.getKey()), obj); - } - } - - @Test(expected = DynamoDbMappingException.class) - public void testImplementation() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - Implementation obj = new Implementation(); - obj.setKey("" + startKey++); - obj.setAttribute("" + startKey++); - objs.add(obj); - } - - // Saving new objects with a null version field should populate it - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (Interface obj : objs) { - util.save(obj); - assertEquals(util.load(Implementation.class, obj.getKey()), obj); - } - } - - private T getUniqueObject(T obj) { - obj.setKey("" + startKey++); - obj.setNormalStringAttribute("" + startKey++); - return obj; - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static interface Interface { - - @DynamoDbHashKey - public String getKey(); - - public void setKey(String key); - - @DynamoDbAttribute - public String getAttribute(); - - public void setAttribute(String attribute); - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class BaseClass { - - protected String key; - protected String normalStringAttribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAttribute - public String getNormalStringAttribute() { - return normalStringAttribute; - } - - public void setNormalStringAttribute(String normalStringAttribute) { - this.normalStringAttribute = normalStringAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((normalStringAttribute == null) ? 0 : normalStringAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - BaseClass other = (BaseClass) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (normalStringAttribute == null) { - if (other.normalStringAttribute != null) { - return false; - } - } else if (!normalStringAttribute.equals(other.normalStringAttribute)) { - return false; - } - return true; - } - } - - public static class SubClass extends BaseClass { - - private String subField; - - public String getSubField() { - return subField; - } - - public void setSubField(String subField) { - this.subField = subField; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((subField == null) ? 0 : subField.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - SubClass other = (SubClass) obj; - if (subField == null) { - if (other.subField != null) { - return false; - } - } else if (!subField.equals(other.subField)) { - return false; - } - return true; - } - - } - - public static class SubsubClass extends SubClass { - - private String subsubField; - - public String getSubsubField() { - return subsubField; - } - - public void setSubsubField(String subsubField) { - this.subsubField = subsubField; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((subsubField == null) ? 0 : subsubField.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - SubsubClass other = (SubsubClass) obj; - if (subsubField == null) { - if (other.subsubField != null) { - return false; - } - } else if (!subsubField.equals(other.subsubField)) { - return false; - } - return true; - } - } - - public static class Implementation implements Interface { - - private String key; - private String attribute; - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getAttribute() { - return attribute; - } - - public void setAttribute(String attribute) { - this.attribute = attribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((attribute == null) ? 0 : attribute.hashCode()); - result = prime * result + ((key == null) ? 0 : key.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Implementation other = (Implementation) obj; - if (attribute == null) { - if (other.attribute != null) { - return false; - } - } else if (!attribute.equals(other.attribute)) { - return false; - } - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - return true; - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/KeyOnlyPutIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/KeyOnlyPutIntegrationTest.java deleted file mode 100644 index 256670bfaaa3..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/KeyOnlyPutIntegrationTest.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbSaveExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import utils.test.util.DynamoDBIntegrationTestBase; - -public class KeyOnlyPutIntegrationTest extends DynamoDBIntegrationTestBase { - @Test - public void testKeyOnlyPut() throws Exception { - /* - * Testing this scenario - * (1) An empty table with the schema: - * - * "key" (HASH) - * - * (2) A POJO class: - * "key" (HASH), "attribute" (NON-KEY) - * - * (3) Save operation by some user: - * - item : {"key" : "some value"} - * - user-specified expected values : {"attribute" : {Exist : true}} - * - SaveBehavior : UPDATE (default) - * - * (4) Expected behavior - * ConditionalCheckFailedException, and the table should remain empty. - */ - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - HashAndAttribute obj = getUniqueObject(new HashAndAttribute()); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (HashAndAttribute obj : objs) { - try { - DynamoDbSaveExpression saveExpression = new DynamoDbSaveExpression(); - Map expected = new HashMap(); - ExpectedAttributeValue expectedVersion = ExpectedAttributeValue.builder() - .value(AttributeValue.builder() - .s("SomeNonExistantValue").build()) - .exists(true).build(); - expected.put("normalStringAttribute", expectedVersion); - saveExpression.setExpected(expected); - - util.save(obj, saveExpression); - fail("This should fail, expected clause should block an insert."); - } catch (ConditionalCheckFailedException e) { - // Ignored or expected. - } - assertNull(util.load(HashAndAttribute.class, obj.getKey())); - - //this should succeed without the expected clause - obj.setNormalStringAttribute("to-be-deleted"); - util.save(obj); - obj.setNormalStringAttribute(null); - util.save(obj); - Object loaded = util.load(HashAndAttribute.class, obj.getKey()); - assertEquals("Expected " + obj.toString() + ", but was " + loaded.toString(), obj, loaded); - } - } - - private T getUniqueObject(T obj) { - obj.setKey("" + startKey++); - return obj; - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class HashAndAttribute { - - protected String key; - protected String normalStringAttribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAttribute - public String normalStringAttribute() { - return normalStringAttribute; - } - - public void setNormalStringAttribute(String normalStringAttribute) { - this.normalStringAttribute = normalStringAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((normalStringAttribute == null) ? 0 : normalStringAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - HashAndAttribute other = (HashAndAttribute) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (normalStringAttribute == null) { - if (other.normalStringAttribute != null) { - return false; - } - } else if (!normalStringAttribute.equals(other.normalStringAttribute)) { - return false; - } - return true; - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperLoadingStrategyConfigIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperLoadingStrategyConfigIntegrationTest.java deleted file mode 100644 index d466637c1dce..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperLoadingStrategyConfigIntegrationTest.java +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.lang.reflect.Field; -import java.util.ArrayList; -import java.util.List; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.ConsistentRead; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.PaginationLoadingStrategy; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbQueryExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbScanExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.PaginatedList; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.pojos.RangeKeyClass; - -/** - * Integration tests for PaginationLoadingStrategy configuration - */ -public class MapperLoadingStrategyConfigIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static long hashKey = System.currentTimeMillis(); - private static int PAGE_SIZE = 5; - private static int PARALLEL_SEGMENT = 3; - private static int OBJECTS_NUM = 50; - private static int RESULTS_NUM = OBJECTS_NUM - 2; // condition: rangeKey > 1.0 - - @BeforeClass - public static void setUp() throws Exception { - setUpTableWithRangeAttribute(); - createTestData(); - } - - private static void createTestData() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - List objs = new ArrayList(); - for (int i = 0; i < OBJECTS_NUM; i++) { - RangeKeyClass obj = new RangeKeyClass(); - obj.setKey(hashKey); - obj.setRangeKey(i); - objs.add(obj); - } - - mapper.batchSave(objs); - } - - private static PaginatedList getTestPaginatedQueryList(PaginationLoadingStrategy paginationLoadingStrategy) { - DynamoDbMapperConfig mapperConfig = new DynamoDbMapperConfig(ConsistentRead.CONSISTENT); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo, mapperConfig); - - // Construct the query expression for the tested hash-key value and any range-key value greater that 1.0 - RangeKeyClass keyObject = new RangeKeyClass(); - keyObject.setKey(hashKey); - DynamoDbQueryExpression queryExpression = new DynamoDbQueryExpression() - .withHashKeyValues(keyObject); - queryExpression.withRangeKeyCondition("rangeKey", - Condition.builder().comparisonOperator(ComparisonOperator.GT.toString()) - .attributeValueList( - AttributeValue.builder().n("1.0").build()).build()).withLimit(PAGE_SIZE); - - return mapper.query(RangeKeyClass.class, queryExpression, new DynamoDbMapperConfig(paginationLoadingStrategy)); - } - - private static PaginatedList getTestPaginatedScanList(PaginationLoadingStrategy paginationLoadingStrategy) { - DynamoDbMapperConfig mapperConfig = new DynamoDbMapperConfig(ConsistentRead.CONSISTENT); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo, mapperConfig); - - // Construct the scan expression with the exact same conditions - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression(); - scanExpression.addFilterCondition("key", - Condition.builder().comparisonOperator(ComparisonOperator.EQ).attributeValueList( - AttributeValue.builder().n(Long.toString(hashKey)).build()).build()); - scanExpression.addFilterCondition("rangeKey", - Condition.builder().comparisonOperator(ComparisonOperator.GT).attributeValueList( - AttributeValue.builder().n("1.0").build()).build()); - scanExpression.setLimit(PAGE_SIZE); - - return mapper.scan(RangeKeyClass.class, scanExpression, new DynamoDbMapperConfig(paginationLoadingStrategy)); - } - - private static PaginatedList getTestPaginatedParallelScanList( - PaginationLoadingStrategy paginationLoadingStrategy) { - DynamoDbMapperConfig mapperConfig = new DynamoDbMapperConfig(ConsistentRead.CONSISTENT); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo, mapperConfig); - - // Construct the scan expression with the exact same conditions - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression(); - scanExpression.addFilterCondition("key", - Condition.builder().comparisonOperator(ComparisonOperator.EQ).attributeValueList( - AttributeValue.builder().n(Long.toString(hashKey)).build()).build()); - scanExpression.addFilterCondition("rangeKey", - Condition.builder().comparisonOperator(ComparisonOperator.GT).attributeValueList( - AttributeValue.builder().n("1.0").build()).build()); - scanExpression.setLimit(PAGE_SIZE); - - return mapper.parallelScan(RangeKeyClass.class, scanExpression, PARALLEL_SEGMENT, - new DynamoDbMapperConfig(paginationLoadingStrategy)); - } - - private static void testAllPaginatedListOperations(PaginatedList list) { - - // (1) isEmpty() - assertFalse(list.isEmpty()); - - // (2) get(int n) - assertNotNull(list.get(RESULTS_NUM / 2)); - - // (3) contains(Object org0) - RangeKeyClass obj = new RangeKeyClass(); - obj.setKey(hashKey); - obj.setRangeKey(0); - assertFalse(list.contains(obj)); - obj.setRangeKey(2); - assertTrue(list.contains(obj)); - - // (4) subList(int org0, int arg1) - List subList = list.subList(0, RESULTS_NUM); - assertEquals(RESULTS_NUM, subList.size()); - try { - list.subList(0, RESULTS_NUM + 1); - fail("IndexOutOfBoundsException is IndexOutOfBoundsException but not thrown"); - } catch (IndexOutOfBoundsException e) { - // Ignored or expected. - } - - // (5) indexOf(Object org0) - assertTrue(list.indexOf(obj) < RESULTS_NUM); - - // (6) loadAllResults() - list.loadAllResults(); - - // (7) size() - assertEquals(RESULTS_NUM, list.size()); - - } - - private static void testPaginatedListIterator(PaginatedList list) { - for (RangeKeyClass item : list) { - assertEquals(hashKey, item.getKey()); - assertTrue(item.getRangeKey() < OBJECTS_NUM); - } - - // make sure the list could be iterated again - for (RangeKeyClass item : list) { - assertEquals(hashKey, item.getKey()); - assertTrue(item.getRangeKey() < OBJECTS_NUM); - } - } - - private static void testIterationOnlyPaginatedListOperations(PaginatedList list) { - - // Unsupported operations - - // (1) isEmpty() - try { - list.isEmpty(); - fail("UnsupportedOperationException expected but is not thrown"); - } catch (UnsupportedOperationException e) { - // Ignored or expected. - } - - // (2) get(int n) - try { - list.get(RESULTS_NUM / 2); - fail("UnsupportedOperationException expected but is not thrown"); - } catch (UnsupportedOperationException e) { - // Ignored or expected. - } - - // (3) contains(Object org0) - try { - list.contains(new RangeKeyClass()); - fail("UnsupportedOperationException expected but is not thrown"); - } catch (UnsupportedOperationException e) { - // Ignored or expected. - } - - // (4) subList(int org0, int arg1) - try { - list.subList(0, RESULTS_NUM); - fail("UnsupportedOperationException expected but is not thrown"); - } catch (UnsupportedOperationException e) { - // Ignored or expected. - } - - // (5) indexOf(Object org0) - try { - list.indexOf(new RangeKeyClass()); - fail("UnsupportedOperationException expected but is not thrown"); - } catch (UnsupportedOperationException e) { - // Ignored or expected. - } - - // (6) loadAllResults() - try { - list.loadAllResults(); - fail("UnsupportedOperationException expected but is not thrown"); - } catch (UnsupportedOperationException e) { - // Ignored or expected. - } - - // (7) size() - try { - list.size(); - fail("UnsupportedOperationException expected but is not thrown"); - } catch (UnsupportedOperationException e) { - // Ignored or expected. - } - ; - - // Could be iterated once - for (RangeKeyClass item : list) { - assertEquals(hashKey, item.getKey()); - assertTrue(item.getRangeKey() < OBJECTS_NUM); - // At most one page of results in memeory - assertTrue(loadedResultsNumber(list) <= PAGE_SIZE); - } - - // not twice - try { - for (@SuppressWarnings("unused") RangeKeyClass item : list) { - fail("UnsupportedOperationException expected but is not thrown"); - } - } catch (UnsupportedOperationException e) { - // Ignored or expected. - } - - } - - /** - * Use reflection to get the size of the private allResults field - **/ - @SuppressWarnings("unchecked") - private static int loadedResultsNumber(PaginatedList list) { - Field privateAllResults = null; - try { - privateAllResults = list.getClass().getSuperclass().getDeclaredField("allResults"); - } catch (SecurityException e) { - fail(e.getMessage()); - } catch (NoSuchFieldException e) { - fail(e.getMessage()); - } - privateAllResults.setAccessible(true); - List allResults = null; - try { - allResults = (List) privateAllResults.get(list); - } catch (IllegalArgumentException e) { - fail(e.getMessage()); - } catch (IllegalAccessException e) { - fail(e.getMessage()); - } - return allResults.size(); - } - - @Test - public void testLazyLoading() { - // Get all the paginated lists using the tested loading strategy - PaginatedList queryList = getTestPaginatedQueryList(PaginationLoadingStrategy.LAZY_LOADING); - PaginatedList scanList = getTestPaginatedScanList(PaginationLoadingStrategy.LAZY_LOADING); - PaginatedList parallelScanList = getTestPaginatedParallelScanList(PaginationLoadingStrategy.LAZY_LOADING); - - // check that only at most one page of results are loaded up to this point - assertTrue(loadedResultsNumber(queryList) <= PAGE_SIZE); - assertTrue(loadedResultsNumber(scanList) <= PAGE_SIZE); - assertTrue(loadedResultsNumber(parallelScanList) <= PAGE_SIZE * PARALLEL_SEGMENT); - - testAllPaginatedListOperations(queryList); - testAllPaginatedListOperations(scanList); - testAllPaginatedListOperations(parallelScanList); - - // Re-construct the paginated lists and test the iterator behavior - queryList = getTestPaginatedQueryList(PaginationLoadingStrategy.LAZY_LOADING); - scanList = getTestPaginatedScanList(PaginationLoadingStrategy.LAZY_LOADING); - parallelScanList = getTestPaginatedParallelScanList(PaginationLoadingStrategy.LAZY_LOADING); - - testPaginatedListIterator(queryList); - testPaginatedListIterator(scanList); - testPaginatedListIterator(parallelScanList); - - } - - @Test - public void testEagerLoading() { - // Get all the paginated lists using the tested loading strategy - PaginatedList queryList = getTestPaginatedQueryList(PaginationLoadingStrategy.EAGER_LOADING); - PaginatedList scanList = getTestPaginatedScanList(PaginationLoadingStrategy.EAGER_LOADING); - PaginatedList parallelScanList = getTestPaginatedParallelScanList(PaginationLoadingStrategy.EAGER_LOADING); - - // check that all results have been loaded - assertEquals(RESULTS_NUM, loadedResultsNumber(queryList)); - assertEquals(RESULTS_NUM, loadedResultsNumber(scanList)); - assertEquals(RESULTS_NUM, loadedResultsNumber(parallelScanList)); - - testAllPaginatedListOperations(queryList); - testAllPaginatedListOperations(scanList); - testAllPaginatedListOperations(parallelScanList); - - // Re-construct the paginated lists and test the iterator behavior - queryList = getTestPaginatedQueryList(PaginationLoadingStrategy.LAZY_LOADING); - scanList = getTestPaginatedScanList(PaginationLoadingStrategy.LAZY_LOADING); - parallelScanList = getTestPaginatedParallelScanList(PaginationLoadingStrategy.LAZY_LOADING); - - testPaginatedListIterator(queryList); - testPaginatedListIterator(scanList); - testPaginatedListIterator(parallelScanList); - } - - @Test - public void testIterationOnly() { - // Get all the paginated lists using the tested loading strategy - PaginatedList queryList = getTestPaginatedQueryList(PaginationLoadingStrategy.ITERATION_ONLY); - PaginatedList scanList = getTestPaginatedScanList(PaginationLoadingStrategy.ITERATION_ONLY); - PaginatedList parallelScanList = getTestPaginatedParallelScanList( - PaginationLoadingStrategy.ITERATION_ONLY); - - // check that only at most one page of results are loaded up to this point - assertTrue(loadedResultsNumber(queryList) <= PAGE_SIZE); - assertTrue(loadedResultsNumber(scanList) <= PAGE_SIZE); - assertTrue(loadedResultsNumber(parallelScanList) <= PAGE_SIZE * PARALLEL_SEGMENT); - - testIterationOnlyPaginatedListOperations(queryList); - testIterationOnlyPaginatedListOperations(scanList); - testIterationOnlyPaginatedListOperations(parallelScanList); - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperSaveConfigIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperSaveConfigIntegrationTest.java deleted file mode 100644 index cf4feaa0f0fe..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperSaveConfigIntegrationTest.java +++ /dev/null @@ -1,537 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import org.junit.Test; -import software.amazon.awssdk.awscore.exception.AwsServiceException; -import software.amazon.awssdk.core.exception.SdkServiceException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; - -/** - * Tests the behavior of save method of DynamoDBMapper under different - * SaveBehavior configurations. - */ -public class MapperSaveConfigIntegrationTest extends MapperSaveConfigTestBase { - - /********************************************* - ** UPDATE (default) ** - *********************************************/ - - private static TestItem putRandomUniqueItem(String nonKeyAttributeValue, Set stringSetAttributeValue) { - String hashKeyValue = UUID.randomUUID().toString(); - Long rangeKeyValue = System.currentTimeMillis(); - Map item = new HashMap(); - item.put(hashKeyName, AttributeValue.builder().s(hashKeyValue).build()); - item.put(rangeKeyName, AttributeValue.builder().n(rangeKeyValue.toString()).build()); - if (null != nonKeyAttributeValue) { - item.put(nonKeyAttributeName, AttributeValue.builder().s(nonKeyAttributeValue).build()); - } - if (null != stringSetAttributeValue) { - item.put(stringSetAttributeName, AttributeValue.builder().ss(stringSetAttributeValue).build()); - } - dynamo.putItem(PutItemRequest.builder().tableName(tableName).item(item).build()); - - /* Returns the item as a modeled object. */ - TestItem testItem = new TestItem(); - testItem.setHashKey(hashKeyValue); - testItem.setRangeKey(rangeKeyValue); - testItem.setNonKeyAttribute(nonKeyAttributeValue); - testItem.setStringSetAttribute(stringSetAttributeValue); - return testItem; - } - - private static Set generateRandomStringSet(int size) { - Set result = new HashSet(); - for (int i = 0; i < size; i++) { - result.add(UUID.randomUUID().toString()); - } - return result; - } - - private static boolean assertSetEquals(Set expected, Set actual) { - if (expected == null || actual == null) { - return (expected == null && actual == null); - } - if (expected.size() != actual.size()) { - return false; - } - for (Object item : expected) { - if (!actual.contains(item)) { - return false; - } - } - return true; - } - - /** - * Tests that a key-only object could be saved with - * UPDATE configuration, even when the key has already existed in the table. - */ - @Test - public void testDefaultWithOnlyKeyAttributesSpecifiedRecordInTable() - throws Exception { - - /* First put a new item (with non-key attribute)*/ - TestItem testItem = putRandomUniqueItem("foo", null); - - /* Put an key-only object with the same key. */ - testItem.setNonKeyAttribute(null); - - dynamoMapper.save(testItem, defaultConfig); - - /* The non-key attribute should be nulled out. */ - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - } - - /********************************************* - ** UPDATE_SKIP_NULL_ATTRIBUTES ** - *********************************************/ - - /** - * Tests an edge case that we have fixed according a forum bug report. If - * the object is only specified with key attributes, and such key is not - * present in the table, we should add this object by a key-only put - * request even if it is using UPDATE configuration. - */ - @Test - public void testDefaultWithOnlyKeyAttributesSpecifiedRecordNotInTable() - throws Exception { - TestItem testItem = new TestItem(); - testItem.setHashKey(UUID.randomUUID().toString()); - testItem.setRangeKey(System.currentTimeMillis()); - - dynamoMapper.save(testItem, defaultConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - } - - /** - * Update an existing item in the table. - */ - @Test - public void testDefaultWithKeyAndNonKeyAttributesSpecifiedRecordInTable() - throws Exception { - - /* First put a new item (without non-key attribute)*/ - TestItem testItem = putRandomUniqueItem(null, null); - String hashKeyValue = testItem.getHashKey(); - Long rangeKeyValue = testItem.getRangeKey(); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(hashKeyValue, returnedObject.getHashKey()); - assertEquals(rangeKeyValue, returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - - /* Put an updated object with the same key and an additional non-key attribute. */ - testItem.setHashKey(hashKeyValue); - testItem.setRangeKey(rangeKeyValue); - testItem.setNonKeyAttribute("update"); - - dynamoMapper.save(testItem, defaultConfig); - returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(testItem.getNonKeyAttribute(), returnedObject.getNonKeyAttribute()); - } - - /** - * Use UPDATE to put a new item in the table. - */ - @Test - public void testDefaultWithKeyAndNonKeyAttributesSpecifiedRecordNotInTable() - throws Exception { - TestItem testItem = new TestItem(); - testItem.setHashKey(UUID.randomUUID().toString()); - testItem.setRangeKey(System.currentTimeMillis()); - testItem.setNonKeyAttribute("new item"); - - dynamoMapper.save(testItem, defaultConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(testItem.getNonKeyAttribute(), returnedObject.getNonKeyAttribute()); - } - - /** - * When using UPDATE_SKIP_NULL_ATTRIBUTES, key-only update on existing item - * should not affect the item at all, since all the null-valued non-key - * attributes are ignored. - */ - @Test - public void testUpdateSkipNullWithOnlyKeyAttributesSpecifiedRecordInTable() - throws Exception { - - /* First put a new item (with non-key attribute)*/ - TestItem testItem = putRandomUniqueItem("foo", null); - - /* Put an key-only object with the same key. */ - testItem.setNonKeyAttribute(null); - - dynamoMapper.save(testItem, updateSkipNullConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - /* The non-key attribute should not be removed. */ - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals("foo", returnedObject.getNonKeyAttribute()); - } - - /********************************************* - ** APPEND_SET ** - *********************************************/ - - /** - * The behavior should be the same as UPDATE. - */ - @Test - public void testUpdateSkipNullWithOnlyKeyAttributesSpecifiedRecordNotInTable() - throws Exception { - TestItem testItem = new TestItem(); - testItem.setHashKey(UUID.randomUUID().toString()); - testItem.setRangeKey(System.currentTimeMillis()); - - dynamoMapper.save(testItem, updateSkipNullConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - } - - /** - * Use UPDATE_SKIP_NULL_ATTRIBUTES to update an existing item in the table. - */ - @Test - public void testUpdateSkipNullWithKeyAndNonKeyAttributesSpecifiedRecordInTable() - throws Exception { - - /* First put a new item (without non-key attribute)*/ - TestItem testItem = putRandomUniqueItem(null, null); - String hashKeyValue = testItem.getHashKey(); - Long rangeKeyValue = testItem.getRangeKey(); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(hashKeyValue, returnedObject.getHashKey()); - assertEquals(rangeKeyValue, returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - - /* Put an updated object with the same key and an additional non-key attribute. */ - String nonKeyAttributeValue = "update"; - testItem.setHashKey(hashKeyValue); - testItem.setRangeKey(rangeKeyValue); - testItem.setNonKeyAttribute(nonKeyAttributeValue); - - dynamoMapper.save(testItem, updateSkipNullConfig); - returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(testItem.getNonKeyAttribute(), returnedObject.getNonKeyAttribute()); - - /* At last, save the object again, but with non-key attribute set as null. - * This should not change the existing item. - */ - testItem.setNonKeyAttribute(null); - dynamoMapper.save(testItem, updateSkipNullConfig); - returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(nonKeyAttributeValue, returnedObject.getNonKeyAttribute()); - } - - /** - * Use UPDATE_SKIP_NULL_ATTRIBUTES to put a new item in the table. - */ - @Test - public void testUpdateSkipNullWithKeyAndNonKeyAttributesSpecifiedRecordNotInTable() - throws Exception { - TestItem testItem = new TestItem(); - testItem.setHashKey(UUID.randomUUID().toString()); - testItem.setRangeKey(System.currentTimeMillis()); - testItem.setNonKeyAttribute("new item"); - - dynamoMapper.save(testItem, updateSkipNullConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(testItem.getNonKeyAttribute(), returnedObject.getNonKeyAttribute()); - } - - /** - * The behavior should be the same as UPDATE_SKIP_NULL_ATTRIBUTES. - */ - @Test - public void testAppendSetWithOnlyKeyAttributesSpecifiedRecordInTable() - throws Exception { - - /* First put a new item (with non-key attributes)*/ - Set randomSet = generateRandomStringSet(3); - TestItem testItem = putRandomUniqueItem("foo", randomSet); - - /* Put an key-only object with the same key. */ - testItem.setNonKeyAttribute(null); - testItem.setStringSetAttribute(null); - - dynamoMapper.save(testItem, appendSetConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - /* The non-key attribute should not be removed. */ - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals("foo", returnedObject.getNonKeyAttribute()); - assertTrue(assertSetEquals(randomSet, returnedObject.getStringSetAttribute())); - } - - /********************************************* - ** CLOBBER ** - *********************************************/ - - /** - * The behavior should be the same as UPDATE and UPDATE_SKIP_NULL_ATTRIBUTES. - */ - @Test - public void testAppendSetWithOnlyKeyAttributesSpecifiedRecordNotInTable() - throws Exception { - TestItem testItem = new TestItem(); - testItem.setHashKey(UUID.randomUUID().toString()); - testItem.setRangeKey(System.currentTimeMillis()); - - dynamoMapper.save(testItem, appendSetConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - assertNull(returnedObject.getStringSetAttribute()); - } - - /** - * Use APPEND_SET to update an existing item in the table. - */ - @Test - public void testAppendSetWithKeyAndNonKeyAttributesSpecifiedRecordInTable() - throws Exception { - - /* First put a new item (without non-key attribute)*/ - TestItem testItem = putRandomUniqueItem(null, null); - String hashKeyValue = testItem.getHashKey(); - Long rangeKeyValue = testItem.getRangeKey(); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(hashKeyValue, returnedObject.getHashKey()); - assertEquals(rangeKeyValue, returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - assertNull(returnedObject.getStringSetAttribute()); - - /* Put an updated object with the same key and an additional non-key attribute. */ - String nonKeyAttributeValue = "update"; - Set stringSetAttributeValue = generateRandomStringSet(3); - testItem.setHashKey(hashKeyValue); - testItem.setRangeKey(rangeKeyValue); - testItem.setNonKeyAttribute(nonKeyAttributeValue); - testItem.setStringSetAttribute(stringSetAttributeValue); - - dynamoMapper.save(testItem, appendSetConfig); - returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(testItem.getNonKeyAttribute(), returnedObject.getNonKeyAttribute()); - assertTrue(assertSetEquals(testItem.getStringSetAttribute(), returnedObject.getStringSetAttribute())); - - /* Override nonKeyAttribute and append stringSetAttribute. */ - testItem.setNonKeyAttribute("blabla"); - Set appendSetAttribute = generateRandomStringSet(3); - testItem.setStringSetAttribute(appendSetAttribute); - dynamoMapper.save(testItem, appendSetConfig); - returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals("blabla", returnedObject.getNonKeyAttribute()); - // expected set after the append - stringSetAttributeValue.addAll(appendSetAttribute); - assertTrue(assertSetEquals(stringSetAttributeValue, returnedObject.getStringSetAttribute())); - - /* Append on an existing scalar attribute would result in an exception. */ - TestAppendToScalarItem testAppendToScalarItem = new TestAppendToScalarItem(); - testAppendToScalarItem.setHashKey(testItem.getHashKey()); - testAppendToScalarItem.setRangeKey(testItem.getRangeKey()); - // this fake set attribute actually points to a scalar attribute - testAppendToScalarItem.setFakeStringSetAttribute(generateRandomStringSet(1)); - try { - dynamoMapper.save(testAppendToScalarItem, appendSetConfig); - fail("Should have thrown a 'Type mismatch' service exception."); - } catch (AwsServiceException exception) { - assertEquals("ValidationException", exception.awsErrorDetails().errorCode()); - } - } - - /** - * Use APPEND_SET to put a new item in the table. - */ - @Test - public void testAppendSetWithKeyAndNonKeyAttributesSpecifiedRecordNotInTable() - throws Exception { - TestItem testItem = new TestItem(); - testItem.setHashKey(UUID.randomUUID().toString()); - testItem.setRangeKey(System.currentTimeMillis()); - testItem.setNonKeyAttribute("new item"); - testItem.setStringSetAttribute(generateRandomStringSet(3)); - - dynamoMapper.save(testItem, appendSetConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(testItem.getNonKeyAttribute(), returnedObject.getNonKeyAttribute()); - assertEquals(testItem.getStringSetAttribute(), returnedObject.getStringSetAttribute()); - - } - - /** - * Use CLOBBER to override the existing item by saving a key-only object. - */ - @Test - public void testClobberWithOnlyKeyAttributesSpecifiedRecordInTable() - throws Exception { - /* Put the item with non-key attribute. */ - TestItem testItem = putRandomUniqueItem("foo", null); - - /* Override the item by saving a key-only object. */ - testItem.setNonKeyAttribute(null); - dynamoMapper.save(testItem, clobberConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - } - - /** - * Use CLOBBER to put a new item with only key attributes. - */ - @Test - public void testClobberWithOnlyKeyAttributesSpecifiedRecordNotInTable() - throws Exception { - TestItem testItem = new TestItem(); - testItem.setHashKey(UUID.randomUUID().toString()); - testItem.setRangeKey(System.currentTimeMillis()); - - dynamoMapper.save(testItem, clobberConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertNull(returnedObject.getNonKeyAttribute()); - } - - /** - * Use CLOBBER to override the existing item. - */ - @Test - public void testClobberWithKeyAndNonKeyAttributesSpecifiedRecordInTable() - throws Exception { - /* Put the item with non-key attribute. */ - TestItem testItem = putRandomUniqueItem("foo", null); - - /* Override the item. */ - testItem.setNonKeyAttribute("not foo"); - dynamoMapper.save(testItem, clobberConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(testItem.getNonKeyAttribute(), returnedObject.getNonKeyAttribute()); - } - - /** - * Use CLOBBER to put a new item. - */ - @Test - public void testClobberWithKeyAndNonKeyAttributesSpecifiedRecordNotInTable() - throws Exception { - TestItem testItem = new TestItem(); - testItem.setHashKey(UUID.randomUUID().toString()); - testItem.setRangeKey(System.currentTimeMillis()); - testItem.setNonKeyAttribute("new item"); - - dynamoMapper.save(testItem, clobberConfig); - - TestItem returnedObject = (TestItem) dynamoMapper.load(testItem); - - assertNotNull(returnedObject); - assertEquals(testItem.getHashKey(), returnedObject.getHashKey()); - assertEquals(testItem.getRangeKey(), returnedObject.getRangeKey()); - assertEquals(testItem.getNonKeyAttribute(), returnedObject.getNonKeyAttribute()); - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperSaveConfigTestBase.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperSaveConfigTestBase.java deleted file mode 100644 index 723dfd941c4a..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/MapperSaveConfigTestBase.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -import java.util.Set; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.TableUtils; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.SaveBehavior; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; -import utils.test.util.DynamoDBIntegrationTestBase; - -public class MapperSaveConfigTestBase extends DynamoDBIntegrationTestBase { - - protected static final DynamoDbMapperConfig defaultConfig = new DynamoDbMapperConfig( - SaveBehavior.UPDATE); - protected static final DynamoDbMapperConfig updateSkipNullConfig = new DynamoDbMapperConfig( - SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES); - protected static final DynamoDbMapperConfig appendSetConfig = new DynamoDbMapperConfig( - SaveBehavior.APPEND_SET); - protected static final DynamoDbMapperConfig clobberConfig = new DynamoDbMapperConfig( - SaveBehavior.CLOBBER); - protected static final String tableName = "aws-java-sdk-dynamodb-mapper-save-config-test"; - protected static final String hashKeyName = "hashKey"; - protected static final String rangeKeyName = "rangeKey"; - protected static final String nonKeyAttributeName = "nonKeyAttribute"; - protected static final String stringSetAttributeName = "stringSetAttribute"; - /** - * Read capacity for the test table being created in Amazon DynamoDB. - */ - protected static final Long READ_CAPACITY = 10L; - /** - * Write capacity for the test table being created in Amazon DynamoDB. - */ - protected static final Long WRITE_CAPACITY = 5L; - /** - * Provisioned Throughput for the test table created in Amazon DynamoDB - */ - protected static final ProvisionedThroughput DEFAULT_PROVISIONED_THROUGHPUT = ProvisionedThroughput.builder() - .readCapacityUnits(READ_CAPACITY).writeCapacityUnits( - WRITE_CAPACITY).build(); - protected static DynamoDbMapper dynamoMapper; - - @BeforeClass - public static void setUp() throws Exception { - setUpCredentials(); - dynamo = DynamoDbClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - dynamoMapper = new DynamoDbMapper(dynamo); - - createTestTable(DEFAULT_PROVISIONED_THROUGHPUT); - TableUtils.waitUntilActive(dynamo, tableName); - } - - @AfterClass - public static void tearDown() { - dynamo.deleteTable(DeleteTableRequest.builder().tableName(tableName).build()); - } - - /** - * Helper method to create a table in Amazon DynamoDB - */ - protected static void createTestTable( - ProvisionedThroughput provisionedThroughput) { - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(tableName) - .keySchema( - KeySchemaElement.builder().attributeName( - hashKeyName).keyType( - KeyType.HASH).build(), - KeySchemaElement.builder().attributeName( - rangeKeyName).keyType( - KeyType.RANGE).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName( - hashKeyName).attributeType( - ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName( - rangeKeyName).attributeType( - ScalarAttributeType.N).build()) - .provisionedThroughput(provisionedThroughput) - .build(); - - TableDescription createdTableDescription = dynamo.createTable( - createTableRequest).tableDescription(); - System.out.println("Created Table: " + createdTableDescription); - assertEquals(tableName, createdTableDescription.tableName()); - assertNotNull(createdTableDescription.tableStatus()); - assertEquals(hashKeyName, createdTableDescription - .keySchema().get(0).attributeName()); - assertEquals(KeyType.HASH, createdTableDescription - .keySchema().get(0).keyType()); - assertEquals(rangeKeyName, createdTableDescription - .keySchema().get(1).attributeName()); - assertEquals(KeyType.RANGE, createdTableDescription - .keySchema().get(1).keyType()); - } - - @DynamoDbTable(tableName = tableName) - public static class TestItem { - - private String hashKey; - private Long rangeKey; - private String nonKeyAttribute; - private Set stringSetAttribute; - - @DynamoDbHashKey(attributeName = hashKeyName) - public String getHashKey() { - return hashKey; - } - - public void setHashKey(String hashKey) { - this.hashKey = hashKey; - } - - @DynamoDbRangeKey(attributeName = rangeKeyName) - public Long getRangeKey() { - return rangeKey; - } - - public void setRangeKey(Long rangeKey) { - this.rangeKey = rangeKey; - } - - @DynamoDbAttribute(attributeName = nonKeyAttributeName) - public String getNonKeyAttribute() { - return nonKeyAttribute; - } - - public void setNonKeyAttribute(String nonKeyAttribute) { - this.nonKeyAttribute = nonKeyAttribute; - } - - @DynamoDbAttribute(attributeName = stringSetAttributeName) - public Set getStringSetAttribute() { - return stringSetAttribute; - } - - public void setStringSetAttribute(Set stringSetAttribute) { - this.stringSetAttribute = stringSetAttribute; - } - - } - - @DynamoDbTable(tableName = tableName) - public static class TestAppendToScalarItem { - - private String hashKey; - private Long rangeKey; - private Set fakeStringSetAttribute; - - @DynamoDbHashKey(attributeName = hashKeyName) - public String getHashKey() { - return hashKey; - } - - public void setHashKey(String hashKey) { - this.hashKey = hashKey; - } - - @DynamoDbRangeKey(attributeName = rangeKeyName) - public Long getRangeKey() { - return rangeKey; - } - - public void setRangeKey(Long rangeKey) { - this.rangeKey = rangeKey; - } - - @DynamoDbAttribute(attributeName = nonKeyAttributeName) - public Set getFakeStringSetAttribute() { - return fakeStringSetAttribute; - } - - public void setFakeStringSetAttribute(Set stringSetAttribute) { - this.fakeStringSetAttribute = stringSetAttribute; - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NoSuchTableClass.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NoSuchTableClass.java deleted file mode 100644 index d3d91bfb4f80..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NoSuchTableClass.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -@DynamoDbTable(tableName = "tableNotExist") -public class NoSuchTableClass { - - private String key; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumberAttributeClass.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumberAttributeClass.java deleted file mode 100644 index 858c6692f634..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumberAttributeClass.java +++ /dev/null @@ -1,375 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.Calendar; -import java.util.Date; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIgnore; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Simple domain class with numeric attributes - */ -@DynamoDbTable(tableName = "aws-java-sdk-util") -public class NumberAttributeClass { - - private String key; - private int intAttribute; - private Integer integerAttribute; - private double doubleAttribute; - private Double doubleObjectAttribute; - private float floatAttribute; - private Float floatObjectAttribute; - private BigDecimal bigDecimalAttribute; - private BigInteger bigIntegerAttribute; - private long longAttribute; - private Long longObjectAttribute; - private short shortAttribute; - private Short shortObjectAttribute; - private byte byteAttribute; - private Byte byteObjectAttribute; - private Date dateAttribute; - private Calendar calendarAttribute; - private Boolean booleanObjectAttribute; - private boolean booleanAttribute; - private String ignored = "notSent"; - - @DynamoDbAutoGeneratedKey - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public int getIntAttribute() { - return intAttribute; - } - - public void setIntAttribute(int intAttribute) { - this.intAttribute = intAttribute; - } - - public Integer getIntegerAttribute() { - return integerAttribute; - } - - public void setIntegerAttribute(Integer integerAttribute) { - this.integerAttribute = integerAttribute; - } - - public double getDoubleAttribute() { - return doubleAttribute; - } - - public void setDoubleAttribute(double doubleAttribute) { - this.doubleAttribute = doubleAttribute; - } - - public Double getDoubleObjectAttribute() { - return doubleObjectAttribute; - } - - public void setDoubleObjectAttribute(Double doubleObjectAttribute) { - this.doubleObjectAttribute = doubleObjectAttribute; - } - - @DynamoDbAttribute - public float getFloatAttribute() { - return floatAttribute; - } - - public void setFloatAttribute(float floatAttribute) { - this.floatAttribute = floatAttribute; - } - - public Float getFloatObjectAttribute() { - return floatObjectAttribute; - } - - public void setFloatObjectAttribute(Float floatObjectAttribute) { - this.floatObjectAttribute = floatObjectAttribute; - } - - public BigDecimal getBigDecimalAttribute() { - return bigDecimalAttribute; - } - - public void setBigDecimalAttribute(BigDecimal bigDecimalAttribute) { - this.bigDecimalAttribute = bigDecimalAttribute; - } - - public BigInteger getBigIntegerAttribute() { - return bigIntegerAttribute; - } - - public void setBigIntegerAttribute(BigInteger bigIntegerAttribute) { - this.bigIntegerAttribute = bigIntegerAttribute; - } - - public long getLongAttribute() { - return longAttribute; - } - - public void setLongAttribute(long longAttribute) { - this.longAttribute = longAttribute; - } - - public Long getLongObjectAttribute() { - return longObjectAttribute; - } - - public void setLongObjectAttribute(Long longObjectAttribute) { - this.longObjectAttribute = longObjectAttribute; - } - - public byte getByteAttribute() { - return byteAttribute; - } - - public void setByteAttribute(byte byteAttribute) { - this.byteAttribute = byteAttribute; - } - - public Byte getByteObjectAttribute() { - return byteObjectAttribute; - } - - public void setByteObjectAttribute(Byte byteObjectAttribute) { - this.byteObjectAttribute = byteObjectAttribute; - } - - public Date getDateAttribute() { - return dateAttribute; - } - - public void setDateAttribute(Date dateAttribute) { - this.dateAttribute = dateAttribute; - } - - public Calendar getCalendarAttribute() { - return calendarAttribute; - } - - public void setCalendarAttribute(Calendar calendarAttribute) { - this.calendarAttribute = calendarAttribute; - } - - public Boolean getBooleanObjectAttribute() { - return booleanObjectAttribute; - } - - public void setBooleanObjectAttribute(Boolean booleanObjectAttribute) { - this.booleanObjectAttribute = booleanObjectAttribute; - } - - public boolean isBooleanAttribute() { - return booleanAttribute; - } - - public void setBooleanAttribute(boolean booleanAttribute) { - this.booleanAttribute = booleanAttribute; - } - - @DynamoDbIgnore - public String getIgnored() { - return ignored; - } - - public void setIgnored(String ignored) { - this.ignored = ignored; - } - - public short getShortAttribute() { - return shortAttribute; - } - - public void setShortAttribute(short shortAttribute) { - this.shortAttribute = shortAttribute; - } - - public Short getShortObjectAttribute() { - return shortObjectAttribute; - } - - public void setShortObjectAttribute(Short shortObjectAttribute) { - this.shortObjectAttribute = shortObjectAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((bigDecimalAttribute == null) ? 0 : bigDecimalAttribute.hashCode()); - result = prime * result + ((bigIntegerAttribute == null) ? 0 : bigIntegerAttribute.hashCode()); - result = prime * result + (booleanAttribute ? 1231 : 1237); - result = prime * result + ((booleanObjectAttribute == null) ? 0 : booleanObjectAttribute.hashCode()); - result = prime * result + byteAttribute; - result = prime * result + ((byteObjectAttribute == null) ? 0 : byteObjectAttribute.hashCode()); - result = prime * result + ((calendarAttribute == null) ? 0 : calendarAttribute.hashCode()); - result = prime * result + ((dateAttribute == null) ? 0 : dateAttribute.hashCode()); - long temp; - temp = Double.doubleToLongBits(doubleAttribute); - result = prime * result + (int) (temp ^ (temp >>> 32)); - result = prime * result + ((doubleObjectAttribute == null) ? 0 : doubleObjectAttribute.hashCode()); - result = prime * result + Float.floatToIntBits(floatAttribute); - result = prime * result + ((floatObjectAttribute == null) ? 0 : floatObjectAttribute.hashCode()); - result = prime * result + ((ignored == null) ? 0 : ignored.hashCode()); - result = prime * result + intAttribute; - result = prime * result + ((integerAttribute == null) ? 0 : integerAttribute.hashCode()); - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + (int) (longAttribute ^ (longAttribute >>> 32)); - result = prime * result + ((longObjectAttribute == null) ? 0 : longObjectAttribute.hashCode()); - result = prime * result + shortAttribute; - result = prime * result + ((shortObjectAttribute == null) ? 0 : shortObjectAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - NumberAttributeClass other = (NumberAttributeClass) obj; - if (bigDecimalAttribute == null) { - if (other.bigDecimalAttribute != null) { - return false; - } - } else if (!bigDecimalAttribute.equals(other.bigDecimalAttribute)) { - return false; - } - if (bigIntegerAttribute == null) { - if (other.bigIntegerAttribute != null) { - return false; - } - } else if (!bigIntegerAttribute.equals(other.bigIntegerAttribute)) { - return false; - } - if (booleanAttribute != other.booleanAttribute) { - return false; - } - if (booleanObjectAttribute == null) { - if (other.booleanObjectAttribute != null) { - return false; - } - } else if (!booleanObjectAttribute.equals(other.booleanObjectAttribute)) { - return false; - } - if (byteAttribute != other.byteAttribute) { - return false; - } - if (byteObjectAttribute == null) { - if (other.byteObjectAttribute != null) { - return false; - } - } else if (!byteObjectAttribute.equals(other.byteObjectAttribute)) { - return false; - } - if (calendarAttribute == null) { - if (other.calendarAttribute != null) { - return false; - } - } else if (!calendarAttribute.equals(other.calendarAttribute)) { - return false; - } - if (dateAttribute == null) { - if (other.dateAttribute != null) { - return false; - } - } else if (!dateAttribute.equals(other.dateAttribute)) { - return false; - } - if (Double.doubleToLongBits(doubleAttribute) != Double.doubleToLongBits(other.doubleAttribute)) { - return false; - } - if (doubleObjectAttribute == null) { - if (other.doubleObjectAttribute != null) { - return false; - } - } else if (!doubleObjectAttribute.equals(other.doubleObjectAttribute)) { - return false; - } - if (Float.floatToIntBits(floatAttribute) != Float.floatToIntBits(other.floatAttribute)) { - return false; - } - if (floatObjectAttribute == null) { - if (other.floatObjectAttribute != null) { - return false; - } - } else if (!floatObjectAttribute.equals(other.floatObjectAttribute)) { - return false; - } - if (ignored == null) { - if (other.ignored != null) { - return false; - } - } else if (!ignored.equals(other.ignored)) { - return false; - } - if (intAttribute != other.intAttribute) { - return false; - } - if (integerAttribute == null) { - if (other.integerAttribute != null) { - return false; - } - } else if (!integerAttribute.equals(other.integerAttribute)) { - return false; - } - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (longAttribute != other.longAttribute) { - return false; - } - if (longObjectAttribute == null) { - if (other.longObjectAttribute != null) { - return false; - } - } else if (!longObjectAttribute.equals(other.longObjectAttribute)) { - return false; - } - if (shortAttribute != other.shortAttribute) { - return false; - } - if (shortObjectAttribute == null) { - if (other.shortObjectAttribute != null) { - return false; - } - } else if (!shortObjectAttribute.equals(other.shortObjectAttribute)) { - return false; - } - return true; - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumberSetAttributeClass.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumberSetAttributeClass.java deleted file mode 100644 index a1fbfb4ac27f..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumberSetAttributeClass.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.Calendar; -import java.util.Date; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Simple domain class with numeric attributes - */ -@DynamoDbTable(tableName = "aws-java-sdk-util") -public class NumberSetAttributeClass { - - private String key; - private Set integerAttribute; - private Set doubleObjectAttribute; - private Set floatObjectAttribute; - private Set bigDecimalAttribute; - private Set bigIntegerAttribute; - private Set longObjectAttribute; - private Set byteObjectAttribute; - private Set dateAttribute; - private Set calendarAttribute; - private Set booleanAttribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAttribute - public Set getIntegerAttribute() { - return integerAttribute; - } - - public void setIntegerAttribute(Set integerAttribute) { - this.integerAttribute = integerAttribute; - } - - @DynamoDbAttribute - public Set getDoubleObjectAttribute() { - return doubleObjectAttribute; - } - - public void setDoubleObjectAttribute(Set doubleObjectAttribute) { - this.doubleObjectAttribute = doubleObjectAttribute; - } - - @DynamoDbAttribute - public Set getFloatObjectAttribute() { - return floatObjectAttribute; - } - - public void setFloatObjectAttribute(Set floatObjectAttribute) { - this.floatObjectAttribute = floatObjectAttribute; - } - - @DynamoDbAttribute - public Set getBigDecimalAttribute() { - return bigDecimalAttribute; - } - - public void setBigDecimalAttribute(Set bigDecimalAttribute) { - this.bigDecimalAttribute = bigDecimalAttribute; - } - - @DynamoDbAttribute - public Set getBigIntegerAttribute() { - return bigIntegerAttribute; - } - - public void setBigIntegerAttribute(Set bigIntegerAttribute) { - this.bigIntegerAttribute = bigIntegerAttribute; - } - - @DynamoDbAttribute - public Set getLongObjectAttribute() { - return longObjectAttribute; - } - - public void setLongObjectAttribute(Set longObjectAttribute) { - this.longObjectAttribute = longObjectAttribute; - } - - @DynamoDbAttribute - public Set getByteObjectAttribute() { - return byteObjectAttribute; - } - - public void setByteObjectAttribute(Set byteObjectAttribute) { - this.byteObjectAttribute = byteObjectAttribute; - } - - @DynamoDbAttribute - public Set getDateAttribute() { - return dateAttribute; - } - - public void setDateAttribute(Set dateAttribute) { - this.dateAttribute = dateAttribute; - } - - @DynamoDbAttribute - public Set getCalendarAttribute() { - return calendarAttribute; - } - - public void setCalendarAttribute(Set calendarAttribute) { - this.calendarAttribute = calendarAttribute; - } - - @DynamoDbAttribute - public Set getBooleanAttribute() { - return booleanAttribute; - } - - public void setBooleanAttribute(Set booleanAttribute) { - this.booleanAttribute = booleanAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((bigDecimalAttribute == null) ? 0 : bigDecimalAttribute.hashCode()); - result = prime * result + ((bigIntegerAttribute == null) ? 0 : bigIntegerAttribute.hashCode()); - result = prime * result + ((booleanAttribute == null) ? 0 : booleanAttribute.hashCode()); - result = prime * result + ((byteObjectAttribute == null) ? 0 : byteObjectAttribute.hashCode()); - result = prime * result + ((calendarAttribute == null) ? 0 : calendarAttribute.hashCode()); - result = prime * result + ((dateAttribute == null) ? 0 : dateAttribute.hashCode()); - result = prime * result + ((doubleObjectAttribute == null) ? 0 : doubleObjectAttribute.hashCode()); - result = prime * result + ((floatObjectAttribute == null) ? 0 : floatObjectAttribute.hashCode()); - result = prime * result + ((integerAttribute == null) ? 0 : integerAttribute.hashCode()); - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((longObjectAttribute == null) ? 0 : longObjectAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - NumberSetAttributeClass other = (NumberSetAttributeClass) obj; - if (bigDecimalAttribute == null) { - if (other.bigDecimalAttribute != null) { - return false; - } - } else if (!bigDecimalAttribute.equals(other.bigDecimalAttribute)) { - return false; - } - if (bigIntegerAttribute == null) { - if (other.bigIntegerAttribute != null) { - return false; - } - } else if (!bigIntegerAttribute.equals(other.bigIntegerAttribute)) { - return false; - } - if (booleanAttribute == null) { - if (other.booleanAttribute != null) { - return false; - } - } else if (!booleanAttribute.equals(other.booleanAttribute)) { - return false; - } - if (byteObjectAttribute == null) { - if (other.byteObjectAttribute != null) { - return false; - } - } else if (!byteObjectAttribute.equals(other.byteObjectAttribute)) { - return false; - } - if (calendarAttribute == null) { - if (other.calendarAttribute != null) { - return false; - } - } else if (!calendarAttribute.equals(other.calendarAttribute)) { - return false; - } - if (dateAttribute == null) { - if (other.dateAttribute != null) { - return false; - } - } else if (!dateAttribute.equals(other.dateAttribute)) { - return false; - } - if (doubleObjectAttribute == null) { - if (other.doubleObjectAttribute != null) { - return false; - } - } else if (!doubleObjectAttribute.equals(other.doubleObjectAttribute)) { - return false; - } - if (floatObjectAttribute == null) { - if (other.floatObjectAttribute != null) { - return false; - } - } else if (!floatObjectAttribute.equals(other.floatObjectAttribute)) { - return false; - } - if (integerAttribute == null) { - if (other.integerAttribute != null) { - return false; - } - } else if (!integerAttribute.equals(other.integerAttribute)) { - return false; - } - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (longObjectAttribute == null) { - if (other.longObjectAttribute != null) { - return false; - } - } else if (!longObjectAttribute.equals(other.longObjectAttribute)) { - return false; - } - return true; - } - - /* (non-Javadoc) - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - return "NumberSetAttributeClass [key=" + key; - // + ", integerAttribute=" + integerAttribute - // + ", doubleObjectAttribute=" + doubleObjectAttribute + ", floatObjectAttribute=" + floatObjectAttribute - // + ", bigDecimalAttribute=" + bigDecimalAttribute + ", bigIntegerAttribute=" + bigIntegerAttribute - // + ", longObjectAttribute=" + longObjectAttribute + ", byteObjectAttribute=" + byteObjectAttribute - // + ", dateAttribute=" + dateAttribute + ", calendarAttribute=" + calendarAttribute - // + ", booleanAttribute=" + booleanAttribute + "]"; - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumericSetAttributesIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumericSetAttributesIntegrationTest.java deleted file mode 100644 index 8b7138d72058..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/NumericSetAttributesIntegrationTest.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; - -/** - * Tests string set attributes - */ -public class NumericSetAttributesIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String INTEGER_ATTRIBUTE = "integerAttribute"; - private static final String FLOAT_OBJECT_ATTRIBUTE = "floatObjectAttribute"; - private static final String DOUBLE_OBJECT_ATTRIBUTE = "doubleObjectAttribute"; - private static final String BIG_INTEGER_ATTRIBUTE = "bigIntegerAttribute"; - private static final String BIG_DECIMAL_ATTRIBUTE = "bigDecimalAttribute"; - private static final String LONG_OBJECT_ATTRIBUTE = "longObjectAttribute"; - private static final String BYTE_OBJECT_ATTRIBUTE = "byteObjectAttribute"; - private static final String BOOLEAN_ATTRIBUTE = "booleanAttribute"; - private static final List> attrs = new LinkedList>(); - // We don't start with the current system millis like other tests because - // it's out of the range of some data types - private static int start = 1; - private static int byteStart = 1; - - // Test data - static { - for (int i = 0; i < 5; i++) { - Map attr = new HashMap(); - attr.put(KEY_NAME, AttributeValue.builder().s("" + start++).build()); - attr.put(INTEGER_ATTRIBUTE, AttributeValue.builder().ns("" + start++, "" + start++, "" + start++).build()); - attr.put(FLOAT_OBJECT_ATTRIBUTE, AttributeValue.builder().ns("" + start++, "" + start++, "" + start++).build()); - attr.put(DOUBLE_OBJECT_ATTRIBUTE, AttributeValue.builder().ns("" + start++, "" + start++, "" + start++).build()); - attr.put(BIG_INTEGER_ATTRIBUTE, AttributeValue.builder().ns("" + start++, "" + start++, "" + start++).build()); - attr.put(BIG_DECIMAL_ATTRIBUTE, AttributeValue.builder().ns("" + start++, "" + start++, "" + start++).build()); - attr.put(LONG_OBJECT_ATTRIBUTE, AttributeValue.builder().ns("" + start++, "" + start++, "" + start++).build()); - attr.put(BYTE_OBJECT_ATTRIBUTE, AttributeValue.builder().ns("" + byteStart++, "" + byteStart++, "" + byteStart++).build()); - attr.put(BOOLEAN_ATTRIBUTE, AttributeValue.builder().ns("0", "1").build()); - attrs.add(attr); - } - } - - ; - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBMapperIntegrationTestBase.setUp(); - - // Insert the data - for (Map attr : attrs) { - dynamo.putItem(PutItemRequest.builder().tableName(TABLE_NAME).item(attr).build()); - } - } - - @Test - public void testLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - for (Map attr : attrs) { - NumberSetAttributeClass x = util.load(NumberSetAttributeClass.class, attr.get(KEY_NAME).s()); - assertEquals(x.getKey(), attr.get(KEY_NAME).s()); - - // Convert all numbers to the most inclusive type for easy comparison - assertNumericSetsEquals(x.getBigDecimalAttribute(), attr.get(BIG_DECIMAL_ATTRIBUTE).ns()); - assertNumericSetsEquals(x.getBigIntegerAttribute(), attr.get(BIG_INTEGER_ATTRIBUTE).ns()); - assertNumericSetsEquals(x.getFloatObjectAttribute(), attr.get(FLOAT_OBJECT_ATTRIBUTE).ns()); - assertNumericSetsEquals(x.getDoubleObjectAttribute(), attr.get(DOUBLE_OBJECT_ATTRIBUTE).ns()); - assertNumericSetsEquals(x.getIntegerAttribute(), attr.get(INTEGER_ATTRIBUTE).ns()); - assertNumericSetsEquals(x.getLongObjectAttribute(), attr.get(LONG_OBJECT_ATTRIBUTE).ns()); - assertNumericSetsEquals(x.getByteObjectAttribute(), attr.get(BYTE_OBJECT_ATTRIBUTE).ns()); - assertSetsEqual(toSet("0", "1"), attr.get(BOOLEAN_ATTRIBUTE).ns()); - } - } - - @Test - public void testSave() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - NumberSetAttributeClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (NumberSetAttributeClass obj : objs) { - util.save(obj); - } - - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass loaded = util.load(NumberSetAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - } - - @Test - public void testUpdate() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - NumberSetAttributeClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (NumberSetAttributeClass obj : objs) { - util.save(obj); - } - - for (NumberSetAttributeClass obj : objs) { - NumberSetAttributeClass replacement = getUniqueObject(); - replacement.setKey(obj.getKey()); - util.save(replacement); - assertEquals(replacement, util.load(NumberSetAttributeClass.class, obj.getKey())); - } - } - - private NumberSetAttributeClass getUniqueObject() { - NumberSetAttributeClass obj = new NumberSetAttributeClass(); - obj.setKey(String.valueOf(startKey++)); - obj.setBigDecimalAttribute(toSet(new BigDecimal(startKey++), new BigDecimal(startKey++), new BigDecimal(startKey++))); - obj.setBigIntegerAttribute( - toSet(new BigInteger("" + startKey++), new BigInteger("" + startKey++), new BigInteger("" + startKey++))); - obj.setByteObjectAttribute(toSet(new Byte("" + byteStart++), new Byte("" + byteStart++), new Byte("" + byteStart++))); - obj.setDoubleObjectAttribute(toSet(new Double("" + start++), new Double("" + start++), new Double("" + start++))); - obj.setFloatObjectAttribute(toSet(new Float("" + start++), new Float("" + start++), new Float("" + start++))); - obj.setIntegerAttribute(toSet(new Integer("" + start++), new Integer("" + start++), new Integer("" + start++))); - obj.setLongObjectAttribute(toSet(new Long("" + start++), new Long("" + start++), new Long("" + start++))); - obj.setBooleanAttribute(toSet(true, false)); - obj.setDateAttribute(toSet(new Date(startKey++), new Date(startKey++), new Date(startKey++))); - Set cals = new HashSet(); - for (Date d : obj.getDateAttribute()) { - Calendar cal = GregorianCalendar.getInstance(); - cal.setTime(d); - cals.add(cal); - } - obj.setCalendarAttribute(toSet(cals)); - return obj; - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/QueryIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/QueryIntegrationTest.java deleted file mode 100644 index fc5838014dca..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/QueryIntegrationTest.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Random; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbQueryExpression; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.pojos.RangeKeyClass; - -/** - * Integration tests for the query operation on DynamoDBMapper. - */ -public class QueryIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final long HASH_KEY = System.currentTimeMillis(); - private static final int TEST_ITEM_NUMBER = 500; - private static RangeKeyClass hashKeyObject; - private static DynamoDbMapper mapper; - - @BeforeClass - public static void setUp() throws Exception { - setUpTableWithRangeAttribute(); - - DynamoDbMapperConfig mapperConfig = new DynamoDbMapperConfig(DynamoDbMapperConfig.ConsistentRead.CONSISTENT); - mapper = new DynamoDbMapper(dynamo, mapperConfig); - - putTestData(mapper, TEST_ITEM_NUMBER); - - hashKeyObject = new RangeKeyClass(); - hashKeyObject.setKey(HASH_KEY); - } - - /** - * Use BatchSave to put some test data into the tested table. Each item is - * hash-keyed by the same value, and range-keyed by numbers starting from 0. - */ - private static void putTestData(DynamoDbMapper mapper, int itemNumber) { - List objs = new ArrayList(); - for (int i = 0; i < itemNumber; i++) { - RangeKeyClass obj = new RangeKeyClass(); - obj.setKey(HASH_KEY); - obj.setRangeKey(i); - obj.setBigDecimalAttribute(new BigDecimal(i)); - objs.add(obj); - } - mapper.batchSave(objs); - } - - @Test - public void testQueryWithPrimaryRangeKey() throws Exception { - DynamoDbQueryExpression queryExpression = - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyObject) - .withRangeKeyCondition( - "rangeKey", - Condition.builder() - .comparisonOperator(ComparisonOperator.GT) - .attributeValueList(AttributeValue.builder().n("1.0").build()).build()) - .withLimit(11); - List list = mapper.query(RangeKeyClass.class, queryExpression); - - int count = 0; - Iterator iterator = list.iterator(); - while (iterator.hasNext()) { - count++; - RangeKeyClass next = iterator.next(); - assertTrue(next.getRangeKey() > 1.00); - } - - int numMatchingObjects = TEST_ITEM_NUMBER - 2; - assertEquals(count, numMatchingObjects); - assertEquals(numMatchingObjects, list.size()); - - assertNotNull(list.get(list.size() / 2)); - assertTrue(list.contains(list.get(list.size() / 2))); - assertEquals(numMatchingObjects, list.toArray().length); - - Thread.sleep(250); - int totalCount = mapper.count(RangeKeyClass.class, queryExpression); - assertEquals(numMatchingObjects, totalCount); - - /** - * Tests query with only hash key - */ - queryExpression = new DynamoDbQueryExpression().withHashKeyValues(hashKeyObject); - list = mapper.query(RangeKeyClass.class, queryExpression); - assertEquals(TEST_ITEM_NUMBER, list.size()); - } - - /** - * Tests making queries using query filter on non-key attributes. - */ - @Test - public void testQueryFilter() { - // A random filter condition to be applied to the query. - Random random = new Random(); - int randomFilterValue = random.nextInt(TEST_ITEM_NUMBER); - Condition filterCondition = Condition.builder() - .comparisonOperator(ComparisonOperator.LT) - .attributeValueList( - AttributeValue.builder().n(Integer.toString(randomFilterValue)).build()).build(); - - /* - * (1) Apply the filter on the range key, in form of key condition - */ - DynamoDbQueryExpression queryWithRangeKeyCondition = - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyObject) - .withRangeKeyCondition("rangeKey", filterCondition); - List rangeKeyConditionResult = mapper.query(RangeKeyClass.class, queryWithRangeKeyCondition); - - /* - * (2) Apply the filter on the bigDecimalAttribute, in form of query filter - */ - DynamoDbQueryExpression queryWithQueryFilterCondition = - new DynamoDbQueryExpression() - .withHashKeyValues(hashKeyObject) - .withQueryFilter(Collections.singletonMap("bigDecimalAttribute", filterCondition)); - List queryFilterResult = mapper.query(RangeKeyClass.class, queryWithQueryFilterCondition); - - assertEquals(rangeKeyConditionResult.size(), queryFilterResult.size()); - for (int i = 0; i < rangeKeyConditionResult.size(); i++) { - assertEquals(rangeKeyConditionResult.get(i), queryFilterResult.get(i)); - } - } - - /** - * Tests that exception should be raised when user provides an index name - * when making query with the primary range key. - */ - @Test - public void testUnnecessaryIndexNameException() { - try { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - long hashKey = System.currentTimeMillis(); - RangeKeyClass keyObject = new RangeKeyClass(); - keyObject.setKey(hashKey); - DynamoDbQueryExpression queryExpression = new DynamoDbQueryExpression() - .withHashKeyValues(keyObject); - queryExpression.withRangeKeyCondition("rangeKey", - Condition.builder().comparisonOperator(ComparisonOperator.GT.toString()) - .attributeValueList( - AttributeValue.builder().n("1.0").build()).build()).withLimit(11) - .withIndexName("some_index"); - mapper.query(RangeKeyClass.class, queryExpression); - fail("User should not provide index name when making query with the primary range key"); - } catch (IllegalArgumentException expected) { - System.out.println(expected.getMessage()); - } catch (Exception e) { - fail("Should trigger SdkClientException."); - } - - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/RangeKeyAttributesIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/RangeKeyAttributesIntegrationTest.java deleted file mode 100644 index 32f1f6f642f0..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/RangeKeyAttributesIntegrationTest.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.pojos.RangeKeyClass; - -/** - * Tests range and hash key combination - */ -public class RangeKeyAttributesIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String RANGE_KEY = "rangeKey"; - private static final String INTEGER_ATTRIBUTE = "integerSetAttribute"; - private static final String BIG_DECIMAL_ATTRIBUTE = "bigDecimalAttribute"; - private static final String STRING_SET_ATTRIBUTE = "stringSetAttribute"; - private static final String STRING_ATTRIBUTE = "stringAttribute"; - private static final String VERSION_ATTRIBUTE = "version"; - private static final List> attrs = new LinkedList>(); - // We don't start with the current system millis like other tests because - // it's out of the range of some data types - private static int start = 1; - - // Test data - static { - for (int i = 0; i < 5; i++) { - Map attr = new HashMap(); - attr.put(KEY_NAME, AttributeValue.builder().n("" + startKey++).build()); - attr.put(RANGE_KEY, AttributeValue.builder().n("" + start++).build()); - attr.put(INTEGER_ATTRIBUTE, AttributeValue.builder().ns("" + start++, "" + start++, "" + start++).build()); - attr.put(BIG_DECIMAL_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(STRING_ATTRIBUTE, AttributeValue.builder().s("" + start++).build()); - attr.put(STRING_SET_ATTRIBUTE, AttributeValue.builder().ss("" + start++, "" + start++, "" + start++).build()); - attr.put(VERSION_ATTRIBUTE, AttributeValue.builder().n("1").build()); - - attrs.add(attr); - } - } - - ; - - @BeforeClass - public static void setUp() throws Exception { - setUpTableWithRangeAttribute(); - - // Insert the data - for (Map attr : attrs) { - dynamo.putItem(PutItemRequest.builder().tableName(TABLE_WITH_RANGE_ATTRIBUTE).item(attr).build()); - } - } - - @Test - public void testLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - for (Map attr : attrs) { - RangeKeyClass x = util.load(newRangeKey(Long.parseLong(attr.get(KEY_NAME).n()), - Double.parseDouble(attr.get(RANGE_KEY).n()))); - - // Convert all numbers to the most inclusive type for easy - // comparison - assertEquals(new BigDecimal(x.getKey()), new BigDecimal(attr.get(KEY_NAME).n())); - assertEquals(new BigDecimal(x.getRangeKey()), new BigDecimal(attr.get(RANGE_KEY).n())); - assertEquals(new BigDecimal(x.getVersion()), new BigDecimal(attr.get(VERSION_ATTRIBUTE).n())); - assertEquals(x.getBigDecimalAttribute(), new BigDecimal(attr.get(BIG_DECIMAL_ATTRIBUTE).n())); - assertNumericSetsEquals(x.getIntegerAttribute(), attr.get(INTEGER_ATTRIBUTE).ns()); - assertEquals(x.getStringAttribute(), attr.get(STRING_ATTRIBUTE).s()); - assertSetsEqual(x.getStringSetAttribute(), toSet(attr.get(STRING_SET_ATTRIBUTE).ss())); - } - } - - private RangeKeyClass newRangeKey(long hashKey, double rangeKey) { - RangeKeyClass obj = new RangeKeyClass(); - obj.setKey(hashKey); - obj.setRangeKey(rangeKey); - return obj; - } - - @Test - public void testSave() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - RangeKeyClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (RangeKeyClass obj : objs) { - util.save(obj); - } - - for (RangeKeyClass obj : objs) { - RangeKeyClass loaded = util.load(RangeKeyClass.class, obj.getKey(), obj.getRangeKey()); - assertEquals(obj, loaded); - } - } - - @Test - public void testUpdate() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - RangeKeyClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (RangeKeyClass obj : objs) { - util.save(obj); - } - - for (RangeKeyClass obj : objs) { - RangeKeyClass replacement = getUniqueObject(); - replacement.setKey(obj.getKey()); - replacement.setRangeKey(obj.getRangeKey()); - replacement.setVersion(obj.getVersion()); - util.save(replacement); - - RangeKeyClass loadedObject = util.load(RangeKeyClass.class, obj.getKey(), obj.getRangeKey()); - assertEquals(replacement, loadedObject); - - // If we try to update the old version, we should get an error - replacement.setVersion(replacement.getVersion() - 1); - try { - util.save(replacement); - fail("Should have thrown an exception"); - } catch (Exception expected) { - // Ignored or expected. - } - } - } - - private RangeKeyClass getUniqueObject() { - RangeKeyClass obj = new RangeKeyClass(); - obj.setKey(startKey++); - obj.setIntegerAttribute(toSet(start++, start++, start++)); - obj.setBigDecimalAttribute(new BigDecimal(startKey++)); - obj.setRangeKey(start++); - obj.setStringAttribute("" + startKey++); - obj.setStringSetAttribute(toSet("" + startKey++, "" + startKey++, "" + startKey++)); - return obj; - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ScalarAttributeIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ScalarAttributeIntegrationTest.java deleted file mode 100644 index 840daaf8a03b..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ScalarAttributeIntegrationTest.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import java.util.UUID; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbScalarAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; - -/** - * Status tests for {@code ScalarAttribute}. - */ -public class ScalarAttributeIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test with a non-null enum val. - */ - @Test - public void testMarshalling() { - final KeyAndBinaryUuid object = new KeyAndBinaryUuid(); - object.setVal(UUID.randomUUID()); - assertBeforeAndAfterChange(false, object); - } - - /** - * An object with an enumeration. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndBinaryUuid extends AutoKeyAndVal { - @DynamoDbScalarAttribute(type = ScalarAttributeType.B) - public UUID getVal() { - return super.getVal(); - } - - @Override - public void setVal(final UUID val) { - super.setVal(val); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ScanIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ScanIntegrationTest.java deleted file mode 100644 index 2c1a5a8515b6..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/ScanIntegrationTest.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.awscore.exception.AwsServiceException; -import software.amazon.awssdk.utils.ImmutableMap; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.TableUtils; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbScanExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.PaginatedParallelScanList; -import software.amazon.awssdk.services.dynamodb.datamodeling.PaginatedScanList; -import software.amazon.awssdk.services.dynamodb.datamodeling.ScanResultPage; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; - -/** - * Integration tests for the scan operation on DynamoDBMapper. - */ -public class ScanIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String TABLE_NAME = "aws-java-sdk-util-scan"; - /** - * We set a small limit in order to test the behavior of PaginatedList - * when it could not load all the scan result in one batch. - */ - private static final int SCAN_LIMIT = 10; - private static final int PARALLEL_SCAN_SEGMENTS = 5; - - private static void createTestData() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (int i = 0; i < 500; i++) { - util.save(new SimpleClass(Integer.toString(i), Integer.toString(i))); - } - } - - @BeforeClass - public static void setUpTestData() throws Exception { - String keyName = "id"; - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TABLE_NAME) - .keySchema(KeySchemaElement.builder().attributeName(keyName).keyType(KeyType.HASH).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName(keyName).attributeType( - ScalarAttributeType.S).build()) - .provisionedThroughput(ProvisionedThroughput.builder() - .readCapacityUnits(10L) - .writeCapacityUnits(5L).build()) - .build(); - - TableUtils.createTableIfNotExists(dynamo, createTableRequest); - TableUtils.waitUntilActive(dynamo, TABLE_NAME); - - createTestData(); - } - - - @Test - public void testScan() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression().withLimit(SCAN_LIMIT); - scanExpression - .addFilterCondition("value", Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL.toString()).build()); - scanExpression - .addFilterCondition("extraData", Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL.toString()).build()); - List list = util.scan(SimpleClass.class, scanExpression); - - int count = 0; - Iterator iterator = list.iterator(); - while (iterator.hasNext()) { - count++; - SimpleClass next = iterator.next(); - assertNotNull(next.getExtraData()); - assertNotNull(next.getValue()); - } - - int totalCount = util.count(SimpleClass.class, scanExpression); - - assertNotNull(list.get(totalCount / 2)); - assertEquals(totalCount, count); - assertEquals(totalCount, list.size()); - - assertTrue(list.contains(list.get(list.size() / 2))); - assertEquals(count, list.toArray().length); - } - - /** - * Tests scanning the table with AND/OR logic operator. - */ - @Test - public void testScanWithConditionalOperator() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression() - .withLimit(SCAN_LIMIT) - .withScanFilter(ImmutableMap.of( - "value", Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL).build(), - "non-existent-field", Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL).build() - )) - .withConditionalOperator(ConditionalOperator.AND); - - List andConditionResult = mapper.scan(SimpleClass.class, scanExpression); - assertTrue(andConditionResult.isEmpty()); - - List orConditionResult = mapper.scan(SimpleClass.class, - scanExpression.withConditionalOperator(ConditionalOperator.OR)); - assertFalse(orConditionResult.isEmpty()); - } - - @Test - public void testParallelScan() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression().withLimit(SCAN_LIMIT); - scanExpression - .addFilterCondition("value", Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL.toString()).build()); - scanExpression - .addFilterCondition("extraData", Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL.toString()).build()); - - PaginatedParallelScanList parallelScanList = util - .parallelScan(SimpleClass.class, scanExpression, PARALLEL_SCAN_SEGMENTS); - int count = 0; - Iterator iterator = parallelScanList.iterator(); - HashMap allDataAppearance = new HashMap(); - for (int i = 0; i < 500; i++) { - allDataAppearance.put("" + i, false); - } - while (iterator.hasNext()) { - count++; - SimpleClass next = iterator.next(); - assertNotNull(next.getExtraData()); - assertNotNull(next.getValue()); - allDataAppearance.put(next.getId(), true); - } - assertFalse(allDataAppearance.values().contains(false)); - - int totalCount = util.count(SimpleClass.class, scanExpression); - - assertNotNull(parallelScanList.get(totalCount / 2)); - assertEquals(totalCount, count); - assertEquals(totalCount, parallelScanList.size()); - - assertTrue(parallelScanList.contains(parallelScanList.get(parallelScanList.size() / 2))); - assertEquals(count, parallelScanList.toArray().length); - - } - - @Test - public void testParallelScanPerformance() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression().withLimit(SCAN_LIMIT); - scanExpression - .addFilterCondition("value", Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL.toString()).build()); - scanExpression - .addFilterCondition("extraData", Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL.toString()).build()); - - long startTime = System.currentTimeMillis(); - PaginatedScanList scanList = util.scan(SimpleClass.class, scanExpression); - scanList.loadAllResults(); - long fullTableScanTime = System.currentTimeMillis() - startTime; - startTime = System.currentTimeMillis(); - PaginatedParallelScanList parallelScanList = util - .parallelScan(SimpleClass.class, scanExpression, PARALLEL_SCAN_SEGMENTS); - parallelScanList.loadAllResults(); - long parallelScanTime = System.currentTimeMillis() - startTime; - assertEquals(scanList.size(), parallelScanList.size()); - assertTrue(fullTableScanTime > parallelScanTime); - System.out.println("fullTableScanTime : " + fullTableScanTime + "(ms), parallelScanTime : " + parallelScanTime + "(ms)."); - } - - @Test - public void testParallelScanExceptionHandling() { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - int INVALID_LIMIT = 0; - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression().withLimit(INVALID_LIMIT); - try { - PaginatedParallelScanList parallelScanList = util - .parallelScan(SimpleClass.class, scanExpression, PARALLEL_SCAN_SEGMENTS); - fail("Should have seen the SdkServiceException"); - } catch (AwsServiceException exception) { - assertNotNull(exception.awsErrorDetails().errorCode()); - assertNotNull(exception.getMessage()); - } catch (Exception e) { - fail("Should have seen the SdkServiceException"); - } - - } - - @Test - public void testScanPage() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - DynamoDbScanExpression scanExpression = new DynamoDbScanExpression(); - scanExpression.addFilterCondition("value", - Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL.toString()).build()); - scanExpression.addFilterCondition("extraData", - Condition.builder().comparisonOperator(ComparisonOperator.NOT_NULL.toString()).build()); - int limit = 3; - scanExpression.setLimit(limit); - ScanResultPage result = util.scanPage(SimpleClass.class, scanExpression); - - int count = 0; - Iterator iterator = result.getResults().iterator(); - Set seen = new HashSet(); - while (iterator.hasNext()) { - count++; - SimpleClass next = iterator.next(); - assertNotNull(next.getExtraData()); - assertNotNull(next.getValue()); - assertTrue(seen.add(next)); - } - - assertEquals(limit, count); - assertEquals(count, result.getResults().toArray().length); - - scanExpression.setExclusiveStartKey(result.lastEvaluatedKey()); - result = util.scanPage(SimpleClass.class, scanExpression); - - iterator = result.getResults().iterator(); - count = 0; - while (iterator.hasNext()) { - count++; - SimpleClass next = iterator.next(); - assertNotNull(next.getExtraData()); - assertNotNull(next.getValue()); - assertTrue(seen.add(next)); - } - - assertEquals(limit, count); - assertEquals(count, result.getResults().toArray().length); - - } - - @DynamoDbTable(tableName = "aws-java-sdk-util-scan") - public static final class SimpleClass { - private String id; - private String value; - private String extraData; - - - public SimpleClass() { - } - - public SimpleClass(String id, String value) { - this.id = id; - this.value = value; - this.extraData = UUID.randomUUID().toString(); - } - - @DynamoDbHashKey - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getValue() { - return value; - } - - public void setValue(String value) { - this.value = value; - } - - public String getExtraData() { - return extraData; - } - - public void setExtraData(String extraData) { - this.extraData = extraData; - } - } -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/SimpleNumericAttributesIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/SimpleNumericAttributesIntegrationTest.java deleted file mode 100644 index 8a2f023bcde6..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/SimpleNumericAttributesIntegrationTest.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; - -/** - * Tests numeric attributes - */ -public class SimpleNumericAttributesIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String INT_ATTRIBUTE = "intAttribute"; - private static final String INTEGER_ATTRIBUTE = "integerAttribute"; - private static final String FLOAT_ATTRIBUTE = "floatAttribute"; - private static final String FLOAT_OBJECT_ATTRIBUTE = "floatObjectAttribute"; - private static final String DOUBLE_ATTRIBUTE = "doubleAttribute"; - private static final String DOUBLE_OBJECT_ATTRIBUTE = "doubleObjectAttribute"; - private static final String BIG_INTEGER_ATTRIBUTE = "bigIntegerAttribute"; - private static final String BIG_DECIMAL_ATTRIBUTE = "bigDecimalAttribute"; - private static final String LONG_ATTRIBUTE = "longAttribute"; - private static final String LONG_OBJECT_ATTRIBUTE = "longObjectAttribute"; - private static final String BYTE_ATTRIBUTE = "byteAttribute"; - private static final String BYTE_OBJECT_ATTRIBUTE = "byteObjectAttribute"; - private static final String BOOLEAN_ATTRIBUTE = "booleanAttribute"; - private static final String BOOLEAN_OBJECT_ATTRIBUTE = "booleanObjectAttribute"; - private static final String SHORT_ATTRIBUTE = "shortAttribute"; - private static final String SHORT_OBJECT_ATTRIBUTE = "shortObjectAttribute"; - private static final List> attrs = new LinkedList>(); - // We don't start with the current system millis like other tests because - // it's out of the range of some data types - private static int start = 1; - private static int byteStart = -127; - - // Test data - static { - for (int i = 0; i < 5; i++) { - Map attr = new HashMap(); - attr.put(KEY_NAME, AttributeValue.builder().s("" + start++).build()); - attr.put(INT_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(INTEGER_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(FLOAT_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(FLOAT_OBJECT_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(DOUBLE_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(DOUBLE_OBJECT_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(BIG_INTEGER_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(BIG_DECIMAL_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(LONG_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(LONG_OBJECT_ATTRIBUTE, AttributeValue.builder().n("" + start++).build()); - attr.put(BYTE_ATTRIBUTE, AttributeValue.builder().n("" + byteStart++).build()); - attr.put(BYTE_OBJECT_ATTRIBUTE, AttributeValue.builder().n("" + byteStart++).build()); - attr.put(BOOLEAN_ATTRIBUTE, AttributeValue.builder().n(start++ % 2 == 0 ? "1" : "0").build()); - attr.put(BOOLEAN_OBJECT_ATTRIBUTE, AttributeValue.builder().n(start++ % 2 == 0 ? "1" : "0").build()); - attr.put(SHORT_ATTRIBUTE, AttributeValue.builder().n("" + byteStart++).build()); - attr.put(SHORT_OBJECT_ATTRIBUTE, AttributeValue.builder().n("" + byteStart++).build()); - attrs.add(attr); - } - } - - ; - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBMapperIntegrationTestBase.setUp(); - - // Insert the data - for (Map attr : attrs) { - dynamo.putItem(PutItemRequest.builder().tableName(TABLE_NAME).item(attr).build()); - } - } - - private NumberAttributeClass getKeyObject(String key) { - NumberAttributeClass obj = new NumberAttributeClass(); - obj.setKey(key); - return obj; - } - - @Test - public void testLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - for (Map attr : attrs) { - NumberAttributeClass x = util.load(getKeyObject(attr.get(KEY_NAME).s())); - assertEquals(x.getKey(), attr.get(KEY_NAME).s()); - - // Convert all numbers to the most inclusive type for easy comparison - assertEquals(x.getBigDecimalAttribute(), new BigDecimal(attr.get(BIG_DECIMAL_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getBigIntegerAttribute()), new BigDecimal(attr.get(BIG_INTEGER_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getFloatAttribute()), new BigDecimal(attr.get(FLOAT_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getFloatObjectAttribute()), new BigDecimal(attr.get(FLOAT_OBJECT_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getDoubleAttribute()), new BigDecimal(attr.get(DOUBLE_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getDoubleObjectAttribute()), new BigDecimal(attr.get(DOUBLE_OBJECT_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getIntAttribute()), new BigDecimal(attr.get(INT_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getIntegerAttribute()), new BigDecimal(attr.get(INTEGER_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getLongAttribute()), new BigDecimal(attr.get(LONG_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getLongObjectAttribute()), new BigDecimal(attr.get(LONG_OBJECT_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getByteAttribute()), new BigDecimal(attr.get(BYTE_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getByteObjectAttribute()), new BigDecimal(attr.get(BYTE_OBJECT_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getShortAttribute()), new BigDecimal(attr.get(SHORT_ATTRIBUTE).n())); - assertEquals(new BigDecimal(x.getShortObjectAttribute()), new BigDecimal(attr.get(SHORT_OBJECT_ATTRIBUTE).n())); - assertEquals(x.isBooleanAttribute(), attr.get(BOOLEAN_ATTRIBUTE).n().equals("1")); - assertEquals(x.getBooleanObjectAttribute(), attr.get(BOOLEAN_OBJECT_ATTRIBUTE).n().equals("1")); - } - - // Test loading an object that doesn't exist - assertNull(util.load(getKeyObject("does not exist"))); - } - - @Test - public void testSave() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - NumberAttributeClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (NumberAttributeClass obj : objs) { - util.save(obj); - } - - for (NumberAttributeClass obj : objs) { - NumberAttributeClass loaded = util.load(obj); - loaded.setIgnored(obj.getIgnored()); - assertEquals(obj, loaded); - } - } - - @Test - public void testUpdate() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - NumberAttributeClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (NumberAttributeClass obj : objs) { - util.save(obj); - } - - for (NumberAttributeClass obj : objs) { - NumberAttributeClass replacement = getUniqueObject(); - replacement.setKey(obj.getKey()); - util.save(replacement); - - NumberAttributeClass loadedObject = util.load(obj); - - // The ignored attribute isn't handled by big bird, so we have to - // set it manually here before doing the comparison. - assertFalse(replacement.getIgnored().equals(loadedObject.getIgnored())); - loadedObject.setIgnored(replacement.getIgnored()); - assertEquals(replacement, loadedObject); - } - } - - /** - * Tests automatically setting a hash key upon saving. - */ - @Test - public void testSetHashKey() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - NumberAttributeClass obj = getUniqueObject(); - obj.setKey(null); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (NumberAttributeClass obj : objs) { - assertNull(obj.getKey()); - util.save(obj); - assertNotNull(obj.getKey()); - NumberAttributeClass loadedObject = util.load(obj); - - // The ignored attribute isn't handled by big bird, so we have to - // set it manually here before doing the comparison. - assertFalse(obj.getIgnored().equals(loadedObject.getIgnored())); - loadedObject.setIgnored(obj.getIgnored()); - assertEquals(obj, loadedObject); - } - } - - @Test - public void testDelete() throws Exception { - NumberAttributeClass obj = getUniqueObject(); - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(obj); - - NumberAttributeClass loaded = util.load(NumberAttributeClass.class, obj.getKey()); - loaded.setIgnored(obj.getIgnored()); - assertEquals(obj, loaded); - - util.delete(obj); - assertNull(util.load(NumberAttributeClass.class, obj.getKey())); - } - - @Test - public void performanceTest() throws Exception { - NumberAttributeClass obj = getUniqueObject(); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.save(obj); - - GetItemResponse item = dynamo.getItem(GetItemRequest.builder().tableName("aws-java-sdk-util").key( - mapKey(KEY_NAME, AttributeValue.builder().s(obj.getKey()).build())).build()); - - long start = System.currentTimeMillis(); - for (int i = 0; i < 10000; i++) { - mapper.marshallIntoObject(NumberAttributeClass.class, item.item()); - } - - long end = System.currentTimeMillis(); - - System.err.println("time: " + (end - start)); - } - - private NumberAttributeClass getUniqueObject() { - NumberAttributeClass obj = new NumberAttributeClass(); - obj.setKey(String.valueOf(startKey++)); - obj.setBigDecimalAttribute(new BigDecimal(startKey++)); - obj.setBigIntegerAttribute(new BigInteger("" + startKey++)); - obj.setByteAttribute((byte) byteStart++); - obj.setByteObjectAttribute(new Byte("" + byteStart++)); - obj.setDoubleAttribute(new Double("" + start++)); - obj.setDoubleObjectAttribute(new Double("" + start++)); - obj.setFloatAttribute(new Float("" + start++)); - obj.setFloatObjectAttribute(new Float("" + start++)); - obj.setIntAttribute(new Integer("" + start++)); - obj.setIntegerAttribute(new Integer("" + start++)); - obj.setLongAttribute(new Long("" + start++)); - obj.setLongObjectAttribute(new Long("" + start++)); - obj.setShortAttribute(new Short("" + start++)); - obj.setShortObjectAttribute(new Short("" + start++)); - obj.setDateAttribute(new Date(startKey++)); - obj.setBooleanAttribute(start++ % 2 == 0); - obj.setBooleanObjectAttribute(start++ % 2 == 0); - obj.setIgnored("" + start++); - Calendar cal = GregorianCalendar.getInstance(); - cal.setTime(new Date(startKey++)); - obj.setCalendarAttribute(cal); - return obj; - } - - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/SimpleStringAttributesIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/SimpleStringAttributesIntegrationTest.java deleted file mode 100644 index f65fff6ffd7a..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/SimpleStringAttributesIntegrationTest.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.ConsistentRead; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.SaveBehavior; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.pojos.StringAttributeClass; - -/** - * Tests simple string attributes - */ -public class SimpleStringAttributesIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String ORIGINAL_NAME_ATTRIBUTE = "originalName"; - private static final String STRING_ATTRIBUTE = "stringAttribute"; - private static final List> attrs = new LinkedList>(); - - // Test data - static { - for (int i = 0; i < 5; i++) { - Map attr = new HashMap(); - attr.put(KEY_NAME, AttributeValue.builder().s("" + startKey++).build()); - attr.put(STRING_ATTRIBUTE, AttributeValue.builder().s("" + startKey++).build()); - attr.put(ORIGINAL_NAME_ATTRIBUTE, AttributeValue.builder().s("" + startKey++).build()); - attrs.add(attr); - } - } - - ; - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBMapperIntegrationTestBase.setUp(); - - // Insert the data - for (Map attr : attrs) { - dynamo.putItem(PutItemRequest.builder().tableName(TABLE_NAME).item(attr).build()); - } - } - - @Test - public void testLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - for (Map attr : attrs) { - StringAttributeClass x = util.load(StringAttributeClass.class, attr.get(KEY_NAME).s()); - assertEquals(x.getKey(), attr.get(KEY_NAME).s()); - assertEquals(x.getStringAttribute(), attr.get(STRING_ATTRIBUTE).s()); - assertEquals(x.getRenamedAttribute(), attr.get(ORIGINAL_NAME_ATTRIBUTE).s()); - } - - } - - @Test - public void testSave() { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - StringAttributeClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (StringAttributeClass obj : objs) { - util.save(obj); - } - - for (StringAttributeClass obj : objs) { - StringAttributeClass loaded = util.load(StringAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - } - - /** - * Tests saving an incomplete object into DynamoDB - */ - @Test - public void testIncompleteObject() { - StringAttributeClass obj = getUniqueObject(); - obj.setStringAttribute(null); - DynamoDbMapper util = new DynamoDbMapper(dynamo); - util.save(obj); - - assertEquals(obj, util.load(StringAttributeClass.class, obj.getKey())); - - // test removing an attribute - assertNotNull(obj.getRenamedAttribute()); - obj.setRenamedAttribute(null); - util.save(obj); - assertEquals(obj, util.load(StringAttributeClass.class, obj.getKey())); - } - - @Test - public void testUpdate() { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - StringAttributeClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (StringAttributeClass obj : objs) { - util.save(obj); - } - - for (StringAttributeClass obj : objs) { - StringAttributeClass replacement = getUniqueObject(); - replacement.setKey(obj.getKey()); - util.save(replacement); - - assertEquals(replacement, util.load(StringAttributeClass.class, obj.getKey())); - } - } - - @Test - public void testSaveOnlyKey() { - KeyOnly obj = new KeyOnly(); - obj.setKey("" + startKey++); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.save(obj); - - KeyOnly loaded = mapper.load(KeyOnly.class, obj.getKey(), new DynamoDbMapperConfig( - DynamoDbMapperConfig.ConsistentRead.CONSISTENT)); - assertEquals(obj, loaded); - - // saving again shouldn't be an error - mapper.save(obj); - } - - @Test - public void testSaveOnlyKeyClobber() { - KeyOnly obj = new KeyOnly(); - obj.setKey("" + startKey++); - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - mapper.save(obj, new DynamoDbMapperConfig(SaveBehavior.CLOBBER)); - - KeyOnly loaded = mapper.load(KeyOnly.class, obj.getKey(), new DynamoDbMapperConfig(ConsistentRead.CONSISTENT)); - assertEquals(obj, loaded); - - // saving again shouldn't be an error - mapper.save(obj, new DynamoDbMapperConfig(SaveBehavior.CLOBBER)); - } - - private StringAttributeClass getUniqueObject() { - StringAttributeClass obj = new StringAttributeClass(); - obj.setKey(String.valueOf(startKey++)); - obj.setRenamedAttribute(String.valueOf(startKey++)); - obj.setStringAttribute(String.valueOf(startKey++)); - return obj; - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static final class KeyOnly { - private String key; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - KeyOnly other = (KeyOnly) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - return true; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/StringSetAttributesIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/StringSetAttributesIntegrationTest.java deleted file mode 100644 index 0033006f1c81..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/StringSetAttributesIntegrationTest.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.pojos.StringSetAttributeClass; - - -/** - * Tests string set attributes - */ -public class StringSetAttributesIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - private static final String ORIGINAL_NAME_ATTRIBUTE = "originalName"; - private static final String STRING_SET_ATTRIBUTE = "stringSetAttribute"; - private static final String EXTRA_ATTRIBUTE = "extra"; - private static final List> attrs = new LinkedList>(); - - // Test data - static { - for (int i = 0; i < 5; i++) { - Map attr = new HashMap(); - attr.put(KEY_NAME, AttributeValue.builder().s("" + startKey++).build()); - attr.put(STRING_SET_ATTRIBUTE, AttributeValue.builder().ss("" + ++startKey, "" + ++startKey, "" + ++startKey).build()); - attr.put(ORIGINAL_NAME_ATTRIBUTE, AttributeValue.builder().ss("" + ++startKey, "" + ++startKey, "" + ++startKey).build()); - attr.put(EXTRA_ATTRIBUTE, AttributeValue.builder().ss("" + ++startKey, "" + ++startKey, "" + ++startKey).build()); - attrs.add(attr); - } - } - - ; - - @BeforeClass - public static void setUp() throws Exception { - DynamoDBMapperIntegrationTestBase.setUp(); - - // Insert the data - for (Map attr : attrs) { - dynamo.putItem(PutItemRequest.builder().tableName(TABLE_NAME).item(attr).build()); - } - } - - @Test - public void testLoad() throws Exception { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - for (Map attr : attrs) { - StringSetAttributeClass x = util.load(StringSetAttributeClass.class, attr.get(KEY_NAME).s()); - assertEquals(x.getKey(), attr.get(KEY_NAME).s()); - assertSetsEqual(x.getStringSetAttribute(), toSet(attr.get(STRING_SET_ATTRIBUTE).ss())); - assertSetsEqual(x.getStringSetAttributeRenamed(), toSet(attr.get(ORIGINAL_NAME_ATTRIBUTE).ss())); - } - } - - /** - * Tests saving only some attributes of an object. - */ - @Test - public void testIncompleteObject() { - DynamoDbMapper util = new DynamoDbMapper(dynamo); - - StringSetAttributeClass obj = getUniqueObject(); - obj.setStringSetAttribute(null); - util.save(obj); - - assertEquals(obj, util.load(StringSetAttributeClass.class, obj.getKey())); - - obj.setStringSetAttributeRenamed(null); - util.save(obj); - assertEquals(obj, util.load(StringSetAttributeClass.class, obj.getKey())); - } - - @Test - public void testSave() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - StringSetAttributeClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (StringSetAttributeClass obj : objs) { - util.save(obj); - } - - for (StringSetAttributeClass obj : objs) { - StringSetAttributeClass loaded = util.load(StringSetAttributeClass.class, obj.getKey()); - assertEquals(obj, loaded); - } - } - - @Test - public void testUpdate() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - StringSetAttributeClass obj = getUniqueObject(); - objs.add(obj); - } - - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (StringSetAttributeClass obj : objs) { - util.save(obj); - } - - for (StringSetAttributeClass obj : objs) { - StringSetAttributeClass replacement = getUniqueObject(); - replacement.setKey(obj.getKey()); - util.save(replacement); - - assertEquals(replacement, util.load(StringSetAttributeClass.class, obj.getKey())); - } - } - - private StringSetAttributeClass getUniqueObject() { - StringSetAttributeClass obj = new StringSetAttributeClass(); - obj.setKey(String.valueOf(startKey++)); - obj.setStringSetAttribute(toSet(String.valueOf(startKey++), String.valueOf(startKey++), String.valueOf(startKey++))); - obj.setStringSetAttributeRenamed( - toSet(String.valueOf(startKey++), String.valueOf(startKey++), String.valueOf(startKey++))); - return obj; - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TableMapperIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TableMapperIntegrationTest.java deleted file mode 100644 index da07c518f590..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TableMapperIntegrationTest.java +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; - -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.UUID; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedTimestamp; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbQueryExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTableMapper; -import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; - -/** - * Tests updating component attribute fields correctly. - */ -public class TableMapperIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test using {@code Date}. - */ - @Test - public void testSaveIfNotExists() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - - mapper.saveIfNotExists(object); - } - - /** - * Test using {@code Date}. - */ - @Test(expected = ConditionalCheckFailedException.class) - public void testSaveIfNotExistsWhenExists() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - - mapper.saveIfNotExists(object); - mapper.saveIfNotExists(object); - } - - /** - * Test using {@code Date}. - */ - @Test - public void testSaveWhenExists() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - - mapper.saveIfNotExists(object); - mapper.save(object); - } - - /** - * Test using {@code Date}. - */ - @Test(expected = ConditionalCheckFailedException.class) - public void testSaveIfExistsWhenNotExists() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setKey(UUID.randomUUID().toString()); - - mapper.saveIfExists(object); - } - - /** - * Test using {@code Date}. - */ - @Test - public void testDeleteIfExistsWhenExists() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - - mapper.saveIfNotExists(object); - mapper.deleteIfExists(object); - } - - /** - * Test using {@code Date}. - */ - @Test(expected = ConditionalCheckFailedException.class) - public void testDeleteIfExistsWhenNotExists() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setKey(UUID.randomUUID().toString()); - - mapper.deleteIfExists(object); - } - - /** - * Test batch load with no results. - */ - @Test - public void testBatchLoadItemList() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object1 = new KeyAndDateValue(); - final KeyAndDateValue object2 = new KeyAndDateValue(); - - assertEquals(0, mapper.batchSave(Arrays.asList(object1, object2)).size()); - assertEquals(2, mapper.batchLoad(Arrays.asList(object1, object2)).size()); - assertEquals(0, mapper.batchDelete(Arrays.asList(object1, object2)).size()); - assertEquals(0, mapper.batchLoad(Arrays.asList(object1, object2)).size()); - } - - /** - * Test batch load with no results. - */ - @Test - public void testBatchLoadItemListOnNull() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - assertEquals(0, mapper.batchLoad((List) null).size()); - } - - /** - * Test batch load with no results. - */ - @Test - public void testBatchLoadItemListOnEmpty() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - assertEquals(0, mapper.batchLoad(Collections.emptyList()).size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryCount() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.count(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true))); - } - - /** - * Test a query. - */ - @Test - public void testQueryBeginsWith() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", mapper.field("queryDate") - .beginsWith(object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryBetween() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", mapper.field("queryDate") - .between(object.getQueryDate(), object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryGreaterThanOrEqualTo() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", - mapper.field("queryDate").ge(object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryGreaterThan() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(0, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", - mapper.field("queryDate").gt(object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryEqualTo() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", - mapper.field("queryDate").eq(object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryIn() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", - mapper.field("queryDate").in(object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryIsNull() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(0, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", mapper.field("queryDate").isNull()) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryLessThanOrEqualTo() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", - mapper.field("queryDate").le(object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryLessThan() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(0, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", - mapper.field("queryDate").lt(object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryNotEqualTo() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(0, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", - mapper.field("queryDate").ne(object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryNotNull() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", mapper.field("queryDate").notNull()) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryAnyBetween() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", mapper.field("queryDate") - .betweenAny(object.getQueryDate(), object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryAnyBetweenLoNull() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", mapper.field("queryDate") - .betweenAny(null, object.getQueryDate())) - ).getResults().size()); - } - - /** - * Test a query. - */ - @Test - public void testQueryAnyBetweenHiNull() { - final DynamoDbTableMapper mapper = util.newTableMapper(KeyAndDateValue.class); - - final KeyAndDateValue object = new KeyAndDateValue(); - object.setQueryDate(new Date()); - - mapper.saveIfNotExists(object); - - assertEquals(1, mapper.queryPage(new DynamoDbQueryExpression() - .withHashKeyValues(object).withConsistentRead(true) - .withQueryFilterEntry("queryDate", mapper.field("queryDate") - .betweenAny(object.getQueryDate(), null)) - ).getResults().size()); - } - - /** - * An object with {@code Date}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDateValue extends AutoKeyAndVal { - private Date queryDate; - - @DynamoDbAutoGeneratedTimestamp - public Date getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Date val) { - super.setVal(val); - } - - @DynamoDbAttribute(attributeName = "queryDate") - public Date getQueryDate() { - return this.queryDate; - } - - public void setQueryDate(final Date queryDate) { - this.queryDate = queryDate; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConvertedJsonIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConvertedJsonIntegrationTest.java deleted file mode 100644 index 55b483691315..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConvertedJsonIntegrationTest.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertNotNull; - -import java.util.ArrayList; -import java.util.List; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConvertedJson; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; -import software.amazon.awssdk.services.dynamodb.pojos.Currency; - -/** - * Integration tests for {@code DynamoDBTypeConvertedJson}. - */ -public class TypeConvertedJsonIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test marshalling. - */ - @Test - public void testMarshalling() { - final KeyAndCurrency object = new KeyAndCurrency(); - object.setVal(new Currency(12.95D, "USD")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test marshalling a list. - */ - @Test - public void testListMarshalling() { - final KeyAndCurrencyList object = new KeyAndCurrencyList(); - object.setVal(new ArrayList()); - object.getVal().add(new Currency(1.99D, "CAD")); - object.getVal().add(new Currency(2.99D, "CAD")); - - final List after = assertBeforeAndAfterChange(false, object); - for (final Currency currency : after) { - assertNotNull(currency.getAmount()); - assertNotNull(currency.getUnit()); - } - } - - /** - * An object with a complex type. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndCurrency extends AutoKeyAndVal { - @DynamoDbTypeConvertedJson - public Currency getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Currency val) { - super.setVal(val); - } - } - - /** - * An object with a complex type. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndCurrencyList extends AutoKeyAndVal> { - @DynamoDbTypeConvertedJson(targetType = CurrencyListType.class) - public List getVal() { - return super.getVal(); - } - - @Override - public void setVal(final List val) { - super.setVal(val); - } - - public static final class CurrencyListType extends ArrayList { - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConvertedTimestampIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConvertedTimestampIntegrationTest.java deleted file mode 100644 index 4a1109a992ba..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConvertedTimestampIntegrationTest.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import java.time.format.DateTimeParseException; -import java.util.Calendar; -import java.util.Date; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConvertedTimestamp; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; - -/** - * Tests updating component attribute fields correctly. - */ -public class TypeConvertedTimestampIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test timestamp formatting. - */ - @Test - public void testCalendarTimestamp() throws Exception { - final KeyAndCalendarTimestamp object = new KeyAndCalendarTimestamp(); - object.setVal(Calendar.getInstance()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test - public void testCalendarTimestampNull() { - final KeyAndCalendarTimestamp object = new KeyAndCalendarTimestamp(); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test - public void testDateTimestamp() throws Exception { - final KeyAndDateTimestamp object = new KeyAndDateTimestamp(); - object.setVal(Calendar.getInstance().getTime()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test - public void testDateTimestampNull() { - final KeyAndDateTimestamp object = new KeyAndDateTimestamp(); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test - public void testLongTimestamp() throws Exception { - final KeyAndLongTimestamp object = new KeyAndLongTimestamp(); - object.setVal(Calendar.getInstance().getTime().getTime()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test - public void testLongTimestampNull() { - final KeyAndLongTimestamp object = new KeyAndLongTimestamp(); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test - public void testEstCalendarTimestamp() throws Exception { - final KeyAndEstCalendarTimestamp object = new KeyAndEstCalendarTimestamp(); - object.setVal(Calendar.getInstance()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test - public void testEstDateTimestamp() { - final KeyAndEstDateTimestamp object = new KeyAndEstDateTimestamp(); - object.setVal(Calendar.getInstance().getTime()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test - public void testEstLongTimestamp() { - final KeyAndEstLongTimestamp object = new KeyAndEstLongTimestamp(); - object.setVal(Calendar.getInstance().getTime().getTime()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test(expected = DateTimeParseException.class) - public void testStringNotTimestamp() { - final KeyAndStringTimestamp object = new KeyAndStringTimestamp(); - object.setVal("NotTimestamp"); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test(expected = DynamoDbMappingException.class) - @Ignore("This behavior is different with the java.time classes because you can construct a formatter using an empty " - + "string as a pattern.") - public void testEmptyPattern() throws Exception { - final KeyAndEmptyPattern object = new KeyAndEmptyPattern(); - object.setVal(Calendar.getInstance().getTime()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test timestamp formatting. - */ - @Test(expected = DynamoDbMappingException.class) - public void testInvalidPattern() throws Exception { - final KeyAndInvalidPattern object = new KeyAndInvalidPattern(); - object.setVal(Calendar.getInstance().getTime()); - assertBeforeAndAfterChange(false, object); - } - - /** - * An object with {@code Calendar}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndCalendarTimestamp extends AutoKeyAndVal { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSSz") - public Calendar getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Calendar val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDateTimestamp extends AutoKeyAndVal { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSSz") - public Date getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Date val) { - super.setVal(val); - } - } - - /** - * An object with {@code Long}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndLongTimestamp extends AutoKeyAndVal { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSSz") - public Long getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Long val) { - super.setVal(val); - } - } - - /** - * An object with {@code Calendar}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndEstCalendarTimestamp extends AutoKeyAndVal { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSSz", timeZone = "America/New_York") - public Calendar getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Calendar val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndEstDateTimestamp extends AutoKeyAndVal { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSSz", timeZone = "America/New_York") - public Date getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Date val) { - super.setVal(val); - } - } - - /** - * An object with {@code Long}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndEstLongTimestamp extends AutoKeyAndVal { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSSz", timeZone = "America/New_York") - public Long getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Long val) { - super.setVal(val); - } - } - - /** - * An object with {@code String}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndStringTimestamp extends AutoKeyAndVal { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSSz") - public String getVal() { - return super.getVal(); - } - - @Override - public void setVal(final String val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndEmptyPattern extends KeyAndDateTimestamp { - @DynamoDbTypeConvertedTimestamp(pattern = "") - public Date getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Date val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndEmptyTimeZone extends KeyAndDateTimestamp { - @DynamoDbTypeConvertedTimestamp(pattern = "yyyy MMddHHmmssSSSz", timeZone = "") - public Date getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Date val) { - super.setVal(val); - } - } - - /** - * An object with {@code Date}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndInvalidPattern extends KeyAndDateTimestamp { - @DynamoDbTypeConvertedTimestamp(pattern = "invalid") - public Date getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Date val) { - super.setVal(val); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConverterIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConverterIntegrationTest.java deleted file mode 100644 index e11f8a17dcdd..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypeConverterIntegrationTest.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConverted; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConverter; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; -import software.amazon.awssdk.services.dynamodb.pojos.Currency; - -/** - * Tests updating component attribute fields correctly. - */ -public class TypeConverterIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test using {@code Currency}. - */ - @Test - public void testStringCurrency() { - final KeyAndStringCurrency object = new KeyAndStringCurrency(); - object.setVal(new Currency(79.99D, "CAD")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code Currency}. - */ - @Test - public void testStringCurrencyNull() { - final KeyAndStringCurrency object = new KeyAndStringCurrency(); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code Currency}. - */ - @Test(expected = DynamoDbMappingException.class) //<- does not yet support lists/maps - public void testCurrency() { - final KeyAndCurrency object = new KeyAndCurrency(); - object.setVal(new Currency(69.99D, "CAD")); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code Currency}. - */ - @Test - public void testStringSetCurrency() { - final KeyAndStringSetCurrency object = new KeyAndStringSetCurrency(); - object.setVal(new HashSet(Arrays.asList(new Currency(4.99D, "USD"), new Currency(5.99D, "USD")))); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code Currency}. - */ - @Test - public void testStringSetCurrencyNull() { - final KeyAndStringSetCurrency object = new KeyAndStringSetCurrency(); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code Currency}. - */ - @Test - public void testDoubleCurrency() { - final KeyAndDoubleCurrency object = new KeyAndDoubleCurrency(); - object.setVal(new Currency(99.99D, "CAD")); - - final Currency currency = assertBeforeAndAfterChange(null, object); - assertEquals(object.getVal().getAmount(), currency.getAmount()); - assertEquals("USD", currency.getUnit()); - } - - /** - * Test using {@code Currency}. - */ - @Test - public void testDoubleCurrencyNull() { - final KeyAndDoubleCurrency object = new KeyAndDoubleCurrency(); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code Currency}. - */ - @Test - public void testDoubleSetCurrency() { - final KeyAndDoubleSetCurrency object = new KeyAndDoubleSetCurrency(); - object.setVal(new HashSet(Arrays.asList(new Currency(28.99D, "USD"), new Currency(29.99D, "USD")))); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test using {@code Currency}. - */ - @Test - public void testDoubleSetCurrencyNull() { - final KeyAndDoubleSetCurrency object = new KeyAndDoubleSetCurrency(); - assertBeforeAndAfterChange(false, object); - } - - /** - * An object with {@code Currency}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndStringCurrency extends AutoKeyAndVal { - @CurrencyFormat(separator = "-") - public Currency getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Currency val) { - super.setVal(val); - } - - @DynamoDbTypeConverted(converter = StringCurrencyConverter.class) - @Retention(RetentionPolicy.RUNTIME) - @Target({ElementType.METHOD, ElementType.TYPE}) - public static @interface CurrencyFormat { - String separator() default " "; - } - - public static final class StringCurrencyConverter implements DynamoDbTypeConverter { - private final CurrencyFormat f; - - public StringCurrencyConverter(final Class targetType, final CurrencyFormat f) { - this.f = f; - } - - @Override - public String convert(final Currency object) { - return new StringBuilder().append(object.getAmount()).append(f.separator()).append(object.getUnit()).toString(); - } - - @Override - public Currency unconvert(final String object) { - final String[] splits = object.split(f.separator()); - return new Currency(Double.valueOf(splits[0]), splits[1]); - } - } - } - - /** - * An object with {@code Currency}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndCurrency extends AutoKeyAndVal { - @DynamoDbTypeConverted(converter = NoConvertCurrencyConverter.class) - public Currency getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Currency val) { - super.setVal(val); - } - - public static final class NoConvertCurrencyConverter implements DynamoDbTypeConverter { - @Override - public Currency convert(final Currency object) { - return object; - } - - @Override - public Currency unconvert(final Currency object) { - return object; - } - } - } - - /** - * An object with {@code Currency}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndStringSetCurrency extends AutoKeyAndVal> { - @CurrencyFormat(separator = "-") - public Set getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Set val) { - super.setVal(val); - } - - @DynamoDbTypeConverted(converter = StringSetCurrencyConverter.class) - @Retention(RetentionPolicy.RUNTIME) - @Target({ElementType.METHOD, ElementType.TYPE}) - public static @interface CurrencyFormat { - String separator() default " "; - } - - public static final class StringSetCurrencyConverter implements DynamoDbTypeConverter, Set> { - private final CurrencyFormat f; - - public StringSetCurrencyConverter(final Class targetType, final CurrencyFormat f) { - this.f = f; - } - - @Override - public Set convert(final Set object) { - final Set objects = new HashSet(); - for (final Currency o : object) { - objects.add(new StringBuilder().append(o.getAmount()).append(f.separator()).append(o.getUnit()).toString()); - } - return objects; - } - - @Override - public Set unconvert(final Set object) { - final Set objects = new HashSet(); - for (final String o : object) { - final String[] splits = o.split(f.separator()); - objects.add(new Currency(Double.valueOf(splits[0]), splits[1])); - } - return objects; - } - } - } - - /** - * An object with {@code Currency}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDoubleCurrency extends AutoKeyAndVal { - @CurrencyFormat - public Currency getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Currency val) { - super.setVal(val); - } - - @DynamoDbTypeConverted(converter = DoubleCurrencyConverter.class) - @Retention(RetentionPolicy.RUNTIME) - @Target({ElementType.METHOD, ElementType.TYPE}) - public static @interface CurrencyFormat { - String separator() default " "; - - String unit() default "USD"; - } - - public static final class DoubleCurrencyConverter implements DynamoDbTypeConverter { - private final CurrencyFormat f; - - public DoubleCurrencyConverter(final Class targetType, final CurrencyFormat f) { - this.f = f; - } - - @Override - public Double convert(final Currency object) { - return object.getAmount(); - } - - @Override - public Currency unconvert(final Double object) { - return new Currency(object, f.unit()); - } - } - } - - /** - * An object with {@code Currency}. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDoubleSetCurrency extends AutoKeyAndVal> { - @CurrencyFormat - public Set getVal() { - return super.getVal(); - } - - @Override - public void setVal(final Set val) { - super.setVal(val); - } - - @DynamoDbTypeConverted(converter = DoubleSetCurrencyConverter.class) - @Retention(RetentionPolicy.RUNTIME) - @Target({ElementType.METHOD, ElementType.TYPE}) - public static @interface CurrencyFormat { - String separator() default " "; - - String unit() default "USD"; - } - - public static final class DoubleSetCurrencyConverter implements DynamoDbTypeConverter, Set> { - private final CurrencyFormat f; - - public DoubleSetCurrencyConverter(final Class targetType, final CurrencyFormat f) { - this.f = f; - } - - @Override - public Set convert(final Set object) { - final Set objects = new HashSet(); - for (final Currency o : object) { - objects.add(o.getAmount()); - } - return objects; - } - - @Override - public Set unconvert(final Set object) { - final Set objects = new HashSet(); - for (final Double o : object) { - objects.add(new Currency(o, f.unit())); - } - return objects; - } - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypedIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypedIntegrationTest.java deleted file mode 100644 index 926e84d39075..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/TypedIntegrationTest.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; - -import java.util.HashMap; -import java.util.Map; -import java.util.UUID; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedDefault; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.DynamoDbAttributeType; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTyped; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; - -/** - * Status tests for {@code Typed}. - */ -public class TypedIntegrationTest extends AbstractKeyAndValIntegrationTestCase { - - /** - * Test the mappings. - */ - @Test - public void testMarshalling() { - final KeyAndBinaryUuid object = new KeyAndBinaryUuid(); - object.setVal(UUID.randomUUID()); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test with a null enum val. - */ - @Test - public void testNullEnumValue() { - final KeyAndStatus object = new KeyAndStatus(); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test with a non-null enum val. - */ - @Test - public void testEnumMarshalling() { - final KeyAndStatus object = new KeyAndStatus(); - object.setVal(KeyAndStatus.Status.Y); - assertBeforeAndAfterChange(false, object); - } - - /** - * Test with a null enum val. - */ - @Test - public void testDefaultEnumValue() { - final KeyAndDefaultStatus object = new KeyAndDefaultStatus(); - final KeyAndStatus.Status value = assertBeforeAndAfterChange(true, object); - assertEquals(KeyAndStatus.Status.Z, value); - } - - /** - * Test the mappings. - */ - @Test - public void testNativeMap() { - final Map map = new HashMap(); - map.put("A", AttributeValue.builder().n("123").build()); - - final KeyAndNativeValue object = new KeyAndNativeValue(); - object.setVal(AttributeValue.builder().m(map).build()); - assertBeforeAndAfterChange(false, object); - } - - /** - * test object. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndBinaryUuid extends AutoKeyAndVal { - @DynamoDbTyped(DynamoDbAttributeType.B) - public UUID getVal() { - return super.getVal(); - } - - @Override - public void setVal(final UUID val) { - super.setVal(val); - } - } - - /** - * An object with an enumeration. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndStatus extends AutoKeyAndVal { - @DynamoDbTyped(DynamoDbAttributeType.S) - public Status getVal() { - return super.getVal(); - } - - ; - - @Override - public void setVal(final Status val) { - super.setVal(val); - } - - public static enum Status { - X, - Y, - Z - } - } - - /** - * An object with an enumeration. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndDefaultStatus extends KeyAndStatus { - @DynamoDbAutoGeneratedDefault("Z") - @DynamoDbTyped(DynamoDbAttributeType.S) - public Status getVal() { - return super.getVal(); - } - } - - /** - * test object. - */ - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class KeyAndNativeValue extends AutoKeyAndVal { - @DynamoDbTyped(DynamoDbAttributeType.M) - public AttributeValue getVal() { - return super.getVal(); - } - - @Override - public void setVal(final AttributeValue val) { - super.setVal(val); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/VersionAttributeUpdateIntegrationTest.java b/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/VersionAttributeUpdateIntegrationTest.java deleted file mode 100644 index a41864745597..000000000000 --- a/test/dynamodbmapper-v1/src/it/java/software/amazon/awssdk/services/dynamodb/mapper/VersionAttributeUpdateIntegrationTest.java +++ /dev/null @@ -1,664 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; - -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.junit.Test; -import software.amazon.awssdk.utils.ImmutableMap; -import software.amazon.awssdk.services.dynamodb.DynamoDBMapperIntegrationTestBase; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbDeleteExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.SaveBehavior; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbSaveExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbVersionAttribute; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; - -/** - * Tests updating version fields correctly - */ -public class VersionAttributeUpdateIntegrationTest extends DynamoDBMapperIntegrationTestBase { - - @Test(expected = DynamoDbMappingException.class) - public void testStringVersion() throws Exception { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - StringVersionField obj = getUniqueObject(new StringVersionField()); - objs.add(obj); - } - - // Saving new objects with a null version field should populate it - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (StringVersionField obj : objs) { - assertNull(obj.getVersion()); - util.save(obj); - assertNotNull(obj.getVersion()); - assertEquals(obj, util.load(StringVersionField.class, obj.getKey())); - } - } - - @Test - public void testBigIntegerVersion() { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - BigIntegerVersionField obj = getUniqueObject(new BigIntegerVersionField()); - objs.add(obj); - } - - // Saving new objects with a null version field should populate it - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (BigIntegerVersionField obj : objs) { - assertNull(obj.getVersion()); - util.save(obj); - assertNotNull(obj.getVersion()); - - assertEquals(obj, util.load(BigIntegerVersionField.class, obj.getKey())); - } - - for (BigIntegerVersionField obj : objs) { - BigIntegerVersionField replacement = getUniqueObject(new BigIntegerVersionField()); - replacement.setKey(obj.getKey()); - replacement.setVersion(obj.getVersion()); - - util.save(replacement); - // The version field should have changed in memory - assertFalse(obj.getVersion().equals(replacement.getVersion())); - - BigIntegerVersionField loadedObject = util.load(BigIntegerVersionField.class, obj.getKey()); - assertEquals(replacement, loadedObject); - - // Trying to update the object again should trigger a concurrency - // exception - try { - util.save(obj); - fail("Should have thrown an exception"); - } catch (Exception expected) { - // Ignored or expected. - } - - // Now try again overlaying the correct version number by using a saveExpression - // this should not throw the conditional check failed exception - try { - DynamoDbSaveExpression saveExpression = new DynamoDbSaveExpression(); - Map expected = new HashMap(); - ExpectedAttributeValue expectedVersion = ExpectedAttributeValue.builder() - .value(AttributeValue.builder() - .n(obj.getVersion().add(BigInteger.valueOf(1)).toString()).build()).build(); - expected.put("version", expectedVersion); - saveExpression.setExpected(expected); - util.save(obj, saveExpression); - } catch (Exception expected) { - fail("This should succeed, version was updated."); - } - } - } - - @Test - public void testIntegerVersion() { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - IntegerVersionField obj = getUniqueObject(new IntegerVersionField()); - objs.add(obj); - } - - // Saving new objects with a null version field should populate it - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (IntegerVersionField obj : objs) { - assertNull(obj.getNotCalledVersion()); - util.save(obj); - assertNotNull(obj.getNotCalledVersion()); - - assertEquals(obj, util.load(IntegerVersionField.class, obj.getKey())); - } - - for (IntegerVersionField obj : objs) { - IntegerVersionField replacement = getUniqueObject(new IntegerVersionField()); - replacement.setKey(obj.getKey()); - replacement.setNotCalledVersion(obj.getNotCalledVersion()); - - util.save(replacement); - // The version field should have changed in memory - assertFalse(obj.getNotCalledVersion().equals(replacement.getNotCalledVersion())); - - IntegerVersionField loadedObject = util.load(IntegerVersionField.class, obj.getKey()); - assertEquals(replacement, loadedObject); - - // Trying to update the object again should trigger a concurrency - // exception - try { - util.save(obj); - fail("Should have thrown an exception"); - } catch (Exception expected) { - // Ignored or expected. - } - - // Trying to delete the object should also fail - try { - util.delete(obj); - fail("Should have thrown an exception"); - } catch (Exception expected) { - // Ignored or expected. - } - - // But specifying CLOBBER will allow deletion - util.save(obj, new DynamoDbMapperConfig(SaveBehavior.CLOBBER)); - - // Trying to delete with the wrong version should fail - try { - //version is now 2 in db, set object version to 3. - obj.setNotCalledVersion(3); - util.delete(obj); - fail("Should have thrown an exception"); - } catch (Exception expected) { - // Ignored or expected. - } - - // Now try deleting again overlaying the correct version number by using a deleteExpression - // this should not throw the conditional check failed exception - try { - DynamoDbDeleteExpression deleteExpression = new DynamoDbDeleteExpression(); - Map expected = new HashMap(); - ExpectedAttributeValue expectedVersion = ExpectedAttributeValue.builder() - .value(AttributeValue.builder() - .n("2").build()).build(); //version is still 2 in db - expected.put("version", expectedVersion); - deleteExpression.setExpected(expected); - util.delete(obj, deleteExpression); - } catch (Exception expected) { - fail("This should succeed, version was updated."); - } - } - } - - /** - * Tests providing additional expected conditions when saving and deleting - * item with versioned fields. - */ - @Test - public void testVersionedAttributeWithUserProvidedExpectedConditions() { - DynamoDbMapper mapper = new DynamoDbMapper(dynamo); - IntegerVersionField versionedObject = getUniqueObject(new IntegerVersionField()); - assertNull(versionedObject.getNotCalledVersion()); - - // Add additional expected conditions via DynamoDBSaveExpression. - // Expected conditions joined by AND are compatible with the conditions - // for auto-generated keys. - DynamoDbSaveExpression saveExpression = new DynamoDbSaveExpression() - .withExpected(Collections.singletonMap( - "otherAttribute", ExpectedAttributeValue.builder().exists(false).build())) - .withConditionalOperator(ConditionalOperator.AND); - // The save should succeed since the user provided conditions are joined by AND. - mapper.save(versionedObject, saveExpression); - // The version field should be populated - assertNotNull(versionedObject.getNotCalledVersion()); - IntegerVersionField other = mapper.load(IntegerVersionField.class, versionedObject.getKey()); - assertEquals(other, versionedObject); - - // delete should also work - DynamoDbDeleteExpression deleteExpression = new DynamoDbDeleteExpression() - .withExpected(Collections.singletonMap( - "otherAttribute", ExpectedAttributeValue.builder().exists(false).build())) - .withConditionalOperator(ConditionalOperator.AND); - mapper.delete(versionedObject, deleteExpression); - - // Change the conditional operator to OR. - // IllegalArgumentException is expected since the additional expected - // conditions cannot be joined with the conditions for auto-generated - // keys. - saveExpression.setConditionalOperator(ConditionalOperator.OR); - deleteExpression.setConditionalOperator(ConditionalOperator.OR); - try { - mapper.save(getUniqueObject(new IntegerVersionField()), saveExpression); - } catch (IllegalArgumentException expected) { - // Ignored or expected. - } - try { - mapper.delete(getUniqueObject(new IntegerVersionField()), deleteExpression); - } catch (IllegalArgumentException expected) { - // Ignored or expected. - } - - // User-provided OR conditions should work if they completely override - // the generated conditions for the version field. - Map goodConditions = - ImmutableMap.of( - "otherAttribute", ExpectedAttributeValue.builder().exists(false).build(), - "version", ExpectedAttributeValue.builder().exists(false).build() - ); - Map badConditions = - ImmutableMap.of( - "otherAttribute", ExpectedAttributeValue.builder().value(AttributeValue.builder().s("non-existent-value").build()).build(), - "version", ExpectedAttributeValue.builder().value(AttributeValue.builder().n("-1").build()).build() - ); - - IntegerVersionField newObj = getUniqueObject(new IntegerVersionField()); - saveExpression.setExpected(badConditions); - try { - mapper.save(newObj, saveExpression); - } catch (ConditionalCheckFailedException expected) { - // Ignored or expected. - } - - saveExpression.setExpected(goodConditions); - mapper.save(newObj, saveExpression); - - deleteExpression.setExpected(badConditions); - try { - mapper.delete(newObj, deleteExpression); - } catch (ConditionalCheckFailedException expected) { - // Ignored or expected. - } - - deleteExpression.setExpected(goodConditions); - mapper.delete(newObj, deleteExpression); - } - - @Test - public void testByteVersion() { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - ByteVersionField obj = getUniqueObject(new ByteVersionField()); - objs.add(obj); - } - - // Saving new objects with a null version field should populate it - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (ByteVersionField obj : objs) { - assertNull(obj.getVersion()); - util.save(obj); - assertNotNull(obj.getVersion()); - - assertEquals(obj, util.load(ByteVersionField.class, obj.getKey())); - } - - for (ByteVersionField obj : objs) { - ByteVersionField replacement = getUniqueObject(new ByteVersionField()); - replacement.setKey(obj.getKey()); - replacement.setVersion(obj.getVersion()); - - util.save(replacement); - // The version field should have changed in memory - assertFalse(obj.getVersion().equals(replacement.getVersion())); - - ByteVersionField loadedObject = util.load(ByteVersionField.class, obj.getKey()); - assertEquals(replacement, loadedObject); - - // Trying to update the object again should trigger a concurrency - // exception - try { - util.save(obj); - fail("Should have thrown an exception"); - } catch (Exception expected) { - // Ignored or expected. - } - } - } - - @Test - public void testLongVersion() { - List objs = new ArrayList(); - for (int i = 0; i < 5; i++) { - LongVersionField obj = getUniqueObject(new LongVersionField()); - objs.add(obj); - } - - // Saving new objects with a null version field should populate it - DynamoDbMapper util = new DynamoDbMapper(dynamo); - for (LongVersionField obj : objs) { - assertNull(obj.getVersion()); - util.save(obj); - assertNotNull(obj.getVersion()); - - assertEquals(obj, util.load(LongVersionField.class, obj.getKey())); - } - - for (LongVersionField obj : objs) { - LongVersionField replacement = getUniqueObject(new LongVersionField()); - replacement.setKey(obj.getKey()); - replacement.setVersion(obj.getVersion()); - - util.save(replacement); - // The version field should have changed in memory - assertFalse(obj.getVersion().equals(replacement.getVersion())); - - LongVersionField loadedObject = util.load(LongVersionField.class, obj.getKey()); - assertEquals(replacement, loadedObject); - - // Trying to update the object again should trigger a concurrency - // exception - try { - util.save(obj); - fail("Should have thrown an exception"); - } catch (Exception expected) { - // Ignored or expected. - } - } - } - - private T getUniqueObject(T obj) { - obj.setKey("" + startKey++); - obj.setNormalStringAttribute("" + startKey++); - return obj; - } - - @DynamoDbTable(tableName = "aws-java-sdk-util") - public static class VersionFieldBaseClass { - - protected String key; - protected String normalStringAttribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAttribute - public String getNormalStringAttribute() { - return normalStringAttribute; - } - - public void setNormalStringAttribute(String normalStringAttribute) { - this.normalStringAttribute = normalStringAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((normalStringAttribute == null) ? 0 : normalStringAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - VersionFieldBaseClass other = (VersionFieldBaseClass) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (normalStringAttribute == null) { - if (other.normalStringAttribute != null) { - return false; - } - } else if (!normalStringAttribute.equals(other.normalStringAttribute)) { - return false; - } - return true; - } - } - - public static class StringVersionField extends VersionFieldBaseClass { - - private String version; - - @DynamoDbVersionAttribute - public String getVersion() { - return version; - } - - public void setVersion(String version) { - this.version = version; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((version == null) ? 0 : version.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - StringVersionField other = (StringVersionField) obj; - if (version == null) { - if (other.version != null) { - return false; - } - } else if (!version.equals(other.version)) { - return false; - } - return true; - } - } - - public static class BigIntegerVersionField extends VersionFieldBaseClass { - - private BigInteger version; - - @DynamoDbVersionAttribute - public BigInteger getVersion() { - return version; - } - - public void setVersion(BigInteger version) { - this.version = version; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((version == null) ? 0 : version.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - BigIntegerVersionField other = (BigIntegerVersionField) obj; - if (version == null) { - if (other.version != null) { - return false; - } - } else if (!version.equals(other.version)) { - return false; - } - return true; - } - - @Override - public String toString() { - return "BigIntegerVersionField [version=" + version + ", key=" + key + ", normalStringAttribute=" - + normalStringAttribute + "]"; - } - } - - public static final class IntegerVersionField extends VersionFieldBaseClass { - - private Integer notCalledVersion; - - // Making sure that we can substitute attribute names as necessary - @DynamoDbVersionAttribute(attributeName = "version") - public Integer getNotCalledVersion() { - return notCalledVersion; - } - - public void setNotCalledVersion(Integer getNotCalledVersion) { - this.notCalledVersion = getNotCalledVersion; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((notCalledVersion == null) ? 0 : notCalledVersion.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - IntegerVersionField other = (IntegerVersionField) obj; - if (notCalledVersion == null) { - if (other.notCalledVersion != null) { - return false; - } - } else if (!notCalledVersion.equals(other.notCalledVersion)) { - return false; - } - return true; - } - } - - public static final class ByteVersionField extends VersionFieldBaseClass { - - private Byte version; - - @DynamoDbVersionAttribute - public Byte getVersion() { - return version; - } - - public void setVersion(Byte version) { - this.version = version; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((version == null) ? 0 : version.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - ByteVersionField other = (ByteVersionField) obj; - if (version == null) { - if (other.version != null) { - return false; - } - } else if (!version.equals(other.version)) { - return false; - } - return true; - } - } - - public static final class LongVersionField extends VersionFieldBaseClass { - - private Long version; - - @DynamoDbVersionAttribute - public Long getVersion() { - return version; - } - - public void setVersion(Long version) { - this.version = version; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((version == null) ? 0 : version.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - LongVersionField other = (LongVersionField) obj; - if (version == null) { - if (other.version != null) { - return false; - } - } else if (!version.equals(other.version)) { - return false; - } - return true; - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/ConvenientMapSetterTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/ConvenientMapSetterTest.java deleted file mode 100644 index c10c5a41bcbd..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/ConvenientMapSetterTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb; - -import org.junit.Test; - -/** - * Tests on using convenient map setters. - */ -public class ConvenientMapSetterTest { - - /** Test on using map entry adder method. */ - @Test - public void testMapEntryAdderMethod() { -// NOTE(dongie): Convenience setters are not generated -// PutItemRequest putItemRequest = new PutItemRequest() -// .addItemEntry("hash-key", AttributeValue.builder().withS("1")) -// .addItemEntry("range-key", AttributeValue.builder().withS("2")) -// .addItemEntry("attribute", AttributeValue.builder().withS("3")); -// -// Map item = putItemRequest.getItem(); -// assertEquals(3, item.size()); -// assertEquals("1", item.get("hash-key").s()); -// assertEquals("2", item.get("range-key").s()); -// assertEquals("3", item.get("attribute").s()); -// -// putItemRequest.clearItemEntries(); -// assertNull(putItemRequest.getItem()); -// } -// -// /** Test on using predefined map entry setter to provide map parameter. */ -// @Test -// public void testPredefinedMapEntryMethod() { -// ScanRequest scanRequest = new ScanRequest().withExclusiveStartKey( -// new AbstractMap.SimpleEntry("hash-key", AttributeValue.builder().withS("1")), -// new AbstractMap.SimpleEntry("range-key", AttributeValue.builder().withS("2"))); -// -// Map item = scanRequest.getExclusiveStartKey(); -// assertEquals(2, item.size()); -// assertEquals("1", item.get("hash-key").s()); -// assertEquals("2", item.get("range-key").s()); -// } -// -// /** Test on IllegalArgumentException when providing duplicated keys. */ -// @Test(expected = IllegalArgumentException.class) -// public void testDuplicatedKeysException() { -// new PutItemRequest() -// .addItemEntry("hash-key", AttributeValue.builder().withS("1")) -// .addItemEntry("hash-key", AttributeValue.builder().withS("2")); -// } -// -// /** Test on handling null entry objects. */ -// @Test -// public void testNullEntryException() { -// // hashKey is set as not nullable, and rangeKey is nullable -// // so this call should be fine. -// ScanRequest scanRequest = new ScanRequest().withExclusiveStartKey( -// new AbstractMap.SimpleEntry("hash-key", AttributeValue.builder().withS("1")), -// null); -// -// // but this call should throw IllegalArgumentException. -// try { -// scanRequest.withExclusiveStartKey( -// null, -// new AbstractMap.SimpleEntry("hash-key", AttributeValue.builder().withS("1"))); -// fail("Should throw IllegalArgumentException."); -// } catch (IllegalArgumentException iae) { -// // Ignored or expected. -// } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/ImmutableObjectUtils.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/ImmutableObjectUtils.java deleted file mode 100644 index 4139283ae24d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/ImmutableObjectUtils.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb; - -import java.util.Arrays; -import software.amazon.awssdk.annotations.SdkProtectedApi; - -@SdkProtectedApi -public final class ImmutableObjectUtils { - - private ImmutableObjectUtils() { - } - - public static void setObjectMember(Object o, String memberName, T value) { - Arrays.stream(o.getClass().getDeclaredFields()) - .filter(f -> f.getName().equals(memberName)) - .findFirst() - .ifPresent(f -> { - f.setAccessible(true); - try { - f.set(o, value); - } catch (IllegalAccessException e) { - throw new RuntimeException("Unable to reflectively set member " + memberName); - } - }); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/TypeConvertedJsonTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/TypeConvertedJsonTest.java deleted file mode 100644 index f612e462af29..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/TypeConvertedJsonTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import software.amazon.awssdk.utils.ImmutableMap; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConvertedJson; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; - -@RunWith(MockitoJUnitRunner.class) -public class TypeConvertedJsonTest { - - private static final String HASH_KEY = "1234"; - - @Mock - private DynamoDbClient ddb; - - @Test - public void responseWithUnmappedField_IgnoresUnknownFieldAndUnmarshallsCorrectly() { - final DynamoDbMapper mapper = new DynamoDbMapper(ddb); - when(ddb.getItem(any(GetItemRequest.class))) - .thenReturn(GetItemResponse.builder().item( - ImmutableMap.of("hashKey", AttributeValue.builder().s(HASH_KEY).build(), - "jsonMappedPojo", AttributeValue.builder().s( - "{\"knownField\": \"knownValue\", \"unknownField\": \"unknownValue\"}").build() - )).build()); - - final TopLevelPojo pojo = mapper.load(new TopLevelPojo().setHashKey(HASH_KEY)); - assertEquals("knownValue", pojo.getJsonMappedPojo().getKnownField()); - } - - @DynamoDbTable(tableName = "TestTable") - public static class TopLevelPojo { - - @DynamoDbHashKey - private String hashKey; - - @DynamoDbTypeConvertedJson - private JsonMappedPojo jsonMappedPojo; - - public String getHashKey() { - return hashKey; - } - - public TopLevelPojo setHashKey(String hashKey) { - this.hashKey = hashKey; - return this; - } - - public JsonMappedPojo getJsonMappedPojo() { - return jsonMappedPojo; - } - - public void setJsonMappedPojo(JsonMappedPojo jsonMappedPojo) { - this.jsonMappedPojo = jsonMappedPojo; - } - } - - public static class JsonMappedPojo { - - private String knownField; - - public String getKnownField() { - return knownField; - } - - public void setKnownField(String knownField) { - this.knownField = knownField; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AbstractDynamoDbMapper.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AbstractDynamoDbMapper.java deleted file mode 100644 index 0956648cab9f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AbstractDynamoDbMapper.java +++ /dev/null @@ -1,336 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper.FailedBatch; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; - -/** - * Abstract implementation of {@code IDynamoDBMapper}. Convenient method forms pass through to the - * corresponding overload that takes a request object, which throws an - * {@code UnsupportedOperationException}. - */ -public class AbstractDynamoDbMapper implements IDynamoDbMapper { - - private final DynamoDbMapperConfig config; - - protected AbstractDynamoDbMapper(final DynamoDbMapperConfig defaults) { - this.config = DynamoDbMapperConfig.DEFAULT.merge(defaults); - } - - protected AbstractDynamoDbMapper() { - this(DynamoDbMapperConfig.DEFAULT); - } - - protected final String getTableName(Class clazz, Object object, DynamoDbMapperConfig config) { - if (object != null && config.getObjectTableNameResolver() != null) { - return config.getObjectTableNameResolver().getTableName(object, config); - } - return getTableName(clazz, config); - } - - protected final String getTableName(Class clazz, DynamoDbMapperConfig config) { - if (config.getTableNameResolver() == null) { - return DynamoDbMapperConfig.DefaultTableNameResolver.INSTANCE.getTableName(clazz, config); - } - return config.getTableNameResolver().getTableName(clazz, config); - } - - protected final DynamoDbMapperConfig mergeConfig(DynamoDbMapperConfig overrides) { - return this.config.merge(overrides); - } - - @Override - public DynamoDbMapperTableModel getTableModel(Class clazz) { - return getTableModel(clazz, config); - } - - @Override - public DynamoDbMapperTableModel getTableModel(Class clazz, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public T load(Class clazz, Object hashKey, DynamoDbMapperConfig config) { - return load(clazz, hashKey, (Object) null, config); - } - - @Override - public T load(Class clazz, Object hashKey) { - return load(clazz, hashKey, (Object) null, config); - } - - @Override - public T load(Class clazz, Object hashKey, Object rangeKey) { - return load(clazz, hashKey, rangeKey, config); - } - - @Override - public T load(Class clazz, Object hashKey, Object rangeKey, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public T load(T keyObject) { - return load(keyObject, config); - } - - @Override - public T load(T keyObject, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public T marshallIntoObject(Class clazz, Map itemAttributes) { - return marshallIntoObject(clazz, itemAttributes, config); - } - - public T marshallIntoObject(Class clazz, Map itemAttributes, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public List marshallIntoObjects(Class clazz, List> itemAttributes) { - return marshallIntoObjects(clazz, itemAttributes, config); - } - - public List marshallIntoObjects(Class clazz, List> itemAttributes, - DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public void save(T object) { - save(object, (DynamoDbSaveExpression) null, config); - } - - @Override - public void save(T object, DynamoDbSaveExpression saveExpression) { - save(object, saveExpression, config); - } - - @Override - public void save(T object, DynamoDbMapperConfig config) { - save(object, (DynamoDbSaveExpression) null, config); - } - - @Override - public void save(T object, DynamoDbSaveExpression saveExpression, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public void delete(Object object) { - delete(object, (DynamoDbDeleteExpression) null, config); - } - - @Override - public void delete(Object object, DynamoDbDeleteExpression deleteExpression) { - delete(object, deleteExpression, config); - } - - @Override - public void delete(Object object, DynamoDbMapperConfig config) { - delete(object, (DynamoDbDeleteExpression) null, config); - } - - @Override - public void delete(T object, DynamoDbDeleteExpression deleteExpression, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public List batchDelete(Iterable objectsToDelete) { - return batchWrite(Collections.emptyList(), objectsToDelete, config); - } - - @Override - public List batchDelete(Object... objectsToDelete) { - return batchWrite(Collections.emptyList(), Arrays.asList(objectsToDelete), config); - } - - @Override - public List batchSave(Iterable objectsToSave) { - return batchWrite(objectsToSave, Collections.emptyList(), config); - } - - @Override - public List batchSave(Object... objectsToSave) { - return batchWrite(Arrays.asList(objectsToSave), Collections.emptyList(), config); - } - - @Override - public List batchWrite(Iterable objectsToWrite, - Iterable objectsToDelete) { - return batchWrite(objectsToWrite, objectsToDelete, config); - } - - @Override - public List batchWrite(Iterable objectsToWrite, - Iterable objectsToDelete, - DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public Map> batchLoad(Iterable itemsToGet) { - return batchLoad(itemsToGet, config); - } - - @Override - public Map> batchLoad(Iterable itemsToGet, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public Map> batchLoad(Map, List> itemsToGet) { - return batchLoad(itemsToGet, config); - } - - @Override - public Map> batchLoad(Map, List> itemsToGet, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public PaginatedScanList scan(Class clazz, DynamoDbScanExpression scanExpression) { - return scan(clazz, scanExpression, config); - } - - @Override - public PaginatedScanList scan(Class clazz, - DynamoDbScanExpression scanExpression, - DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public PaginatedParallelScanList parallelScan(Class clazz, - DynamoDbScanExpression scanExpression, - int totalSegments) { - return parallelScan(clazz, scanExpression, totalSegments, config); - } - - @Override - public PaginatedParallelScanList parallelScan(Class clazz, - DynamoDbScanExpression scanExpression, - int totalSegments, - DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public ScanResultPage scanPage(Class clazz, DynamoDbScanExpression scanExpression) { - return scanPage(clazz, scanExpression, config); - } - - @Override - public ScanResultPage scanPage(Class clazz, - DynamoDbScanExpression scanExpression, - DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public int count(Class clazz, DynamoDbScanExpression scanExpression) { - return count(clazz, scanExpression, config); - } - - @Override - public int count(Class clazz, DynamoDbScanExpression scanExpression, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public int count(Class clazz, DynamoDbQueryExpression queryExpression) { - return count(clazz, queryExpression, config); - } - - @Override - public int count(Class clazz, DynamoDbQueryExpression queryExpression, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public PaginatedQueryList query(Class clazz, DynamoDbQueryExpression queryExpression) { - return query(clazz, queryExpression, config); - } - - @Override - public PaginatedQueryList query(Class clazz, - DynamoDbQueryExpression queryExpression, - DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public QueryResultPage queryPage(Class clazz, DynamoDbQueryExpression queryExpression) { - return queryPage(clazz, queryExpression, config); - } - - @Override - public QueryResultPage queryPage(Class clazz, - DynamoDbQueryExpression queryExpression, - DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public S3ClientCache s3ClientCache() { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public S3Link createS3Link(String bucketName, String key) { - return createS3Link((Region) null, bucketName, key); - } - - @Override - public S3Link createS3Link(Region s3region, String bucketName, String key) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public S3Link createS3Link(String s3region, String bucketName, String key) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public CreateTableRequest generateCreateTableRequest(Class clazz) { - return generateCreateTableRequest(clazz, config); - } - - public CreateTableRequest generateCreateTableRequest(Class clazz, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - - @Override - public DeleteTableRequest generateDeleteTableRequest(Class clazz) { - return generateDeleteTableRequest(clazz, config); - } - - public DeleteTableRequest generateDeleteTableRequest(Class clazz, DynamoDbMapperConfig config) { - throw new UnsupportedOperationException("operation not supported in " + getClass()); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AbstractEnumMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AbstractEnumMarshaller.java deleted file mode 100644 index 0bca5de2c7b3..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AbstractEnumMarshaller.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static software.amazon.awssdk.core.internal.util.ThrowableUtils.failure; - -/** - * Generic marshaller for enumerations. - * - * Please note, there are some risks in distributed systems when using - * enumerations as attributes intead of simply using a String. - * When adding new values to the enumeration, the enum only changes must - * be deployed before the enumeration value can be persisted. This will - * ensure that all systems have the correct code to map it from the item - * record in DynamoDB to your objects. - * - * @see DynamoDbMarshaller - * - * @deprecated Replaced by {@link DynamoDbTypeConvertedEnum} - */ -@Deprecated -public abstract class AbstractEnumMarshaller> implements DynamoDbMarshaller { - - @Override - public String marshall(final T obj) { - try { - return obj.name(); - } catch (final RuntimeException e) { - throw failure(e, "Unable to marshall the instance of " + obj.getClass() + " into a string"); - } - } - - @Override - public T unmarshall(final Class clazz, final String obj) { - try { - return Enum.valueOf(clazz, obj); - } catch (final RuntimeException e) { - throw failure(e, "Unable to unmarshall the string " + obj + " into " + clazz); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ArgumentMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ArgumentMarshaller.java deleted file mode 100644 index 250b043ce8ac..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ArgumentMarshaller.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * Interface to make it possible to cache the expensive type determination - * behavior. - */ -public interface ArgumentMarshaller { - - /** - * Marshalls the object given into an AttributeValue. - */ - AttributeValue marshall(Object obj); - - interface BooleanAttributeMarshaller extends ArgumentMarshaller { - } - - interface StringAttributeMarshaller extends ArgumentMarshaller { - } - - interface NumberAttributeMarshaller extends ArgumentMarshaller { - } - - interface BinaryAttributeMarshaller extends ArgumentMarshaller { - } - - interface StringSetAttributeMarshaller extends ArgumentMarshaller { - } - - interface NumberSetAttributeMarshaller extends ArgumentMarshaller { - } - - interface BinarySetAttributeMarshaller extends ArgumentMarshaller { - } - - interface ListAttributeMarshaller extends ArgumentMarshaller { - } - - interface MapAttributeMarshaller extends ArgumentMarshaller { - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ArgumentUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ArgumentUnmarshaller.java deleted file mode 100644 index 85f74ed3c169..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ArgumentUnmarshaller.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import java.text.ParseException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * Unmarshaller interface to make it possible to cache the expensive - * type-determination behavior necessary when turning a service result back - * into an object. - */ -public interface ArgumentUnmarshaller { - - /** - * Asserts that the value given can be processed using the setter given. - */ - void typeCheck(AttributeValue value, Method setter); - - /** - * Unmarshalls the {@link AttributeValue} given into an instance of the - * appropriate type, as determined by {@link DynamoDbMapper} - * - * @throws ParseException when unable to parse a date string - */ - Object unmarshall(AttributeValue value) throws ParseException; -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformer.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformer.java deleted file mode 100644 index 0a97c4ed7b20..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformer.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A hook allowing a custom transform/untransform of the raw attribute - * values immediately before writing them into DynamoDB and immediately - * after reading them out of DynamoDB, but with extra context about - * the model class not available at the raw DynamoDbClient level. - *

    - * This interface contains both a {@code transform} method and a corresponding - * {@code untransform} method. These methods SHOULD be inverses, such that - * untransform(transform(value)) == value. - */ -public interface AttributeTransformer { - /** - * Transforms the input set of attribute values derived from the model - * object before writing them into DynamoDB. - * - * @param parameters transformation parameters - * @return the transformed attribute value map - */ - Map transform(Parameters parameters); - - /** - * Untransform the input set of attribute values read from DynamoDB before - * creating a model object from them. - * - * @param parameters transformation parameters - * @return the untransformed attribute value map - */ - Map untransform(Parameters parameters); - - /** - * Parameters for the {@code transform} and {@code untransform} methods, - * so we don't have to break the interface in order to add additional - * parameters. - *

    - * Consuming code should NOT implement this interface. - */ - interface Parameters { - /** - * Returns the raw attribute values to be transformed or untransformed. - * The returned map is not modifiable. - * - * @return the raw attribute values to transform or untransform - */ - Map getAttributeValues(); - - /** - * Returns true if this transformation is being called as part of a - * partial update operation. If true, the attributes returned by - * {@link #getAttributeValues()} do not represent the entire new - * item, but only a snapshot of the attributes which are getting - * new values. - *

    - * Implementations which do not support transforming a partial - * view of an item (for example, because they need to calculate a - * signature based on all of the item's attributes that won't be valid - * if only a subset of the attributes are taken into consideration) - * should check this flag and throw an exception rather than than - * corrupting the data in DynamoDB. - *

    - * This method always returns {@code false} for instances passed to - * {@link AttributeTransformer#untransform(Parameters)}. - * - * @return true if this operation is a partial update, false otherwise - */ - boolean isPartialUpdate(); - - /** - * @return the type of the model class we're transforming to or from - */ - Class modelClass(); - - /** - * @return the mapper config for this operation - */ - DynamoDbMapperConfig mapperConfig(); - - /** - * @return the name of the DynamoDB table the attributes were read - * from or will be written to - */ - String getTableName(); - - /** - * @return the name of the hash key for the table - */ - String getHashKeyName(); - - /** - * @return the name of the range key for the table, if it has one, - * otherwise {@code null} - */ - String getRangeKeyName(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformerChain.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformerChain.java deleted file mode 100644 index e6ae9badba01..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformerChain.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A virtual {@code AttributeTransformer} that transforms and untransforms - * attributes by running them through a cascading series of child - * {@code AttributeTransformer} instances. - */ -public class AttributeTransformerChain implements AttributeTransformer { - - private final List transformers; - - /** - * Creates a new transformer chain from the given array of transformers. - * When transforming attributes, these transformers are invoked from first - * to last; when untransforming they are invoked in the opposite order. - * - * @param transformers the chain of transformers. - */ - public AttributeTransformerChain( - final AttributeTransformer... transformers) { - - this(Arrays.asList(transformers)); - } - - /** - * Creates a new transformer chain from the given list of transformers. - * When transforming attributes, these transformers are invoked from first - * to last; when untransforming they are invoked in the opposite order. - * - * @param transformers the chain of transformers. - */ - public AttributeTransformerChain( - final List transformers) { - - this.transformers = Collections.unmodifiableList( - new ArrayList(transformers)); - } - - /** - * @return the transformers in this chain - */ - public List getTransformers() { - return transformers; - } - - @Override - public Map transform( - final Parameters parameters) { - - ProxyParameters proxy = new ProxyParameters(parameters); - - for (int i = 0; i < transformers.size(); ++i) { - proxy.setAttributeValues(transformers.get(i).transform(proxy)); - } - - return proxy.getAttributeValues(); - } - - @Override - public Map untransform( - final Parameters parameters) { - - ProxyParameters proxy = new ProxyParameters(parameters); - - for (int i = transformers.size() - 1; i >= 0; --i) { - proxy.setAttributeValues(transformers.get(i).untransform(proxy)); - } - - return proxy.getAttributeValues(); - } - - @Override - public String toString() { - return transformers.toString(); - } - - /** - * A {@code Parameters} proxy that intercepts calls to - * {@code getAttributeValues} and overrides the return value. - */ - private static class ProxyParameters implements Parameters { - - private final Parameters delegate; - private Map values; - - /** - * Create a new proxy wrapping the given {@code Parameters} object. - * - * @param delegate the parameters object to wrap - */ - ProxyParameters(final Parameters delegate) { - this.delegate = delegate; - this.values = delegate.getAttributeValues(); - } - - @Override - public Map getAttributeValues() { - return values; - } - - /** - * Changes the attribute values for this instance. - * - * @param values the new values - */ - public void setAttributeValues( - final Map values) { - this.values = Collections.unmodifiableMap(values); - } - - @Override - public boolean isPartialUpdate() { - return delegate.isPartialUpdate(); - } - - @Override - public Class modelClass() { - return delegate.modelClass(); - } - - @Override - public DynamoDbMapperConfig mapperConfig() { - return delegate.mapperConfig(); - } - - @Override - public String getTableName() { - return delegate.getTableName(); - } - - @Override - public String getHashKeyName() { - return delegate.getHashKeyName(); - } - - @Override - public String getRangeKeyName() { - return delegate.getRangeKeyName(); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformerChainTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformerChainTest.java deleted file mode 100644 index d5be9c5680bb..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/AttributeTransformerChainTest.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.AttributeTransformer.Parameters; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class AttributeTransformerChainTest { - @Test - public void testTransformEmptyChain() { - - AttributeTransformer transformer = new AttributeTransformerChain( - Collections.emptyList() - ); - - Map values = - new HashMap(); - - Parameters params = new TestParameters(values); - - Map result = transformer.transform(params); - Assert.assertSame(values, result); - Assert.assertTrue(values.isEmpty()); - } - - @Test - public void testUntransformEmptyChain() { - - AttributeTransformer transformer = new AttributeTransformerChain( - Collections.emptyList() - ); - - Map values = - new HashMap(); - - Parameters params = new TestParameters(values); - - Map result = transformer.untransform(params); - Assert.assertSame(values, result); - Assert.assertTrue(values.isEmpty()); - } - - @Test - public void testTransform() { - - AttributeTransformer transformer1 = new TestTransformer(".one"); - AttributeTransformer transformer2 = new TestTransformer(".two"); - - AttributeTransformer chain = - new AttributeTransformerChain(transformer1, transformer2); - - Map values = - new HashMap(); - - values.put("test1", AttributeValue.builder().s("foo").build()); - values.put("test2", AttributeValue.builder().s("bar").build()); - - Parameters params = new TestParameters(values); - - Map result = chain.transform(params); - - Assert.assertNotNull(result); - Assert.assertEquals(2, result.size()); - - Assert.assertEquals("foo.one.two", result.get("test1").s()); - Assert.assertEquals("bar.one.two", result.get("test2").s()); - } - - @Test - public void testUntransform() { - - AttributeTransformer transformer1 = new TestTransformer(".one"); - AttributeTransformer transformer2 = new TestTransformer(".two"); - - AttributeTransformer chain = - new AttributeTransformerChain(transformer1, transformer2); - - Map values = - new HashMap(); - - values.put("test1", AttributeValue.builder().s("foo.one.two").build()); - values.put("test2", AttributeValue.builder().s("bar.one.two").build()); - - Parameters params = new TestParameters(values); - - Map result = chain.untransform(params); - - Assert.assertNotNull(result); - Assert.assertEquals(2, result.size()); - - Assert.assertEquals("foo", result.get("test1").s()); - Assert.assertEquals("bar", result.get("test2").s()); - } - - @Test - public void testRoundTrip() { - - AttributeTransformer transformer1 = new TestTransformer(".one"); - AttributeTransformer transformer2 = new TestTransformer(".two"); - - AttributeTransformer chain = - new AttributeTransformerChain(transformer1, transformer2); - - Map values = - new HashMap(); - - values.put("test1", AttributeValue.builder().s("foo").build()); - values.put("test2", AttributeValue.builder().s("bar").build()); - - Parameters params = new TestParameters(values); - - Map result = chain.transform(params); - - params = new TestParameters(result); - - result = chain.untransform(params); - - Assert.assertEquals(values, result); - } - - private static class TestTransformer implements AttributeTransformer { - - private final String appendMe; - - public TestTransformer(final String appendMe) { - this.appendMe = appendMe; - } - - @Override - public Map transform( - final Parameters parameters) { - - Map rval = - new HashMap(); - - for (Map.Entry entry - : parameters.getAttributeValues().entrySet()) { - - rval.put(entry.getKey(), transform(entry.getValue())); - } - - return rval; - } - - @Override - public Map untransform( - final Parameters parameters) { - - Map rval = - new HashMap(); - - for (Map.Entry entry - : parameters.getAttributeValues().entrySet()) { - - rval.put(entry.getKey(), untransform(entry.getValue())); - } - - return rval; - } - - private AttributeValue transform(AttributeValue value) { - return AttributeValue.builder().s(value.s() + appendMe).build(); - } - - private AttributeValue untransform(AttributeValue value) { - String s = value.s(); - if (s.endsWith(appendMe)) { - return AttributeValue.builder().s( - s.substring(0, s.length() - appendMe.length())).build(); - } else { - return value; - } - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadContext.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadContext.java deleted file mode 100644 index 8a9375d401a3..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadContext.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; -import software.amazon.awssdk.utils.Validate; - - -/** - * Container object that has information about the batch load request made to DynamoDB. - * - * @author avinam - */ -public class BatchLoadContext { - /** - * The BatchGetItemRequest. - */ - private BatchGetItemRequest batchGetItemRequest; - /** - * The BatchGetItemResponse returned by the DynamoDB client. - */ - private BatchGetItemResponse batchGetItemResponse; - /** - * The number of times the request has been retried. - */ - private int retriesAttempted; - - /** - * Instantiates a new BatchLoadContext. - * @param batchGetItemRequest see {@link BatchGetItemRequest}. - * */ - public BatchLoadContext(BatchGetItemRequest batchGetItemRequest) { - this.batchGetItemRequest = Validate.paramNotNull(batchGetItemRequest, "batchGetItemRequest"); - this.batchGetItemResponse = null; - this.retriesAttempted = 0; - } - - public BatchGetItemRequest getBatchGetItemRequest() { - return batchGetItemRequest; - } - - public void setBatchGetItemRequest(BatchGetItemRequest batchGetItemRequest) { - this.batchGetItemRequest = batchGetItemRequest; - } - - /** - * @return the BatchGetItemResponse - */ - public BatchGetItemResponse batchGetItemResponse() { - return batchGetItemResponse; - } - - /** - * @return the BatchGetItemResponse - */ - public void setBatchGetItemResponse(BatchGetItemResponse batchGetItemResponse) { - this.batchGetItemResponse = batchGetItemResponse; - } - - - /** - * @return the BatchGetItemRequest. - */ - public BatchGetItemRequest batchGetItemRequest() { - return batchGetItemRequest; - } - - /** - * Gets the retriesAttempted. - * - * @return the retriesAttempted - */ - public int getRetriesAttempted() { - return retriesAttempted; - } - - /** - * Sets retriesAttempted. - * - * @param retriesAttempted the number of retries attempted - */ - public void setRetriesAttempted(int retriesAttempted) { - this.retriesAttempted = retriesAttempted; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadRetryStrategyTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadRetryStrategyTest.java deleted file mode 100644 index 43f9aab165f0..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchLoadRetryStrategyTest.java +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -// Commenting out the test class as its broken and we don't support mapper yet -@RunWith(MockitoJUnitRunner.class) -public class BatchLoadRetryStrategyTest { - - @Test - public void dummyTest() { - - } - -// private static final String TABLE_NAME = "tableName"; -// private static final String TABLE_NAME2 = "tableName2"; -// private static final String TABLE_NAME3 = "tableName3"; -// private static final String HASH_ATTR = "hash"; -// -// // private static BatchGetItemResponse batchGetItemResponse; -// private static List itemsToGet; -// -// static { -// -// itemsToGet = new ArrayList(); -// itemsToGet.add(new Item3("Bruce Wayne")); -// itemsToGet.add(new Item2("Is")); -// itemsToGet.add(new Item("Batman")); -// } -// -// @Rule -// public final ExpectedException thrown = ExpectedException.none(); -// -// @Mock -// private DynamoDbClient ddbMock; -// @Mock -// private BatchGetItemRequest mockItemRequest; -// @Mock -// private BatchGetItemResponse mockItemResult; -// -// @Test -// public void testBatchReadCallFailure_NoRetry() { -// when(ddbMock.batchGetItem(any(DynamoDbRequest.class))) -// .thenReturn(buildDefaultGetItemResponse().toBuilder().unprocessedKeys(buildUnprocessedKeysMap(1)).build()); -// DynamoDbMapperConfig config = -// getConfigWithCustomBatchLoadRetryStrategy(new DynamoDbMapperConfig.NoRetryBatchLoadRetryStrategy()); -// DynamoDbMapper mapper = new DynamoDbMapper(ddbMock, config); -// -// thrown.expect(BatchGetItemException.class); -// mapper.batchLoad(itemsToGet); -// verify(ddbMock, times(1)).batchGetItem(any(BatchGetItemRequest.class)); -// } -// -// @Test -// public void testBatchReadCallFailure_Retry() { -// when(ddbMock.batchGetItem(any(BatchGetItemRequest.class))) -// .thenReturn(buildDefaultGetItemResponse().toBuilder().unprocessedKeys(buildUnprocessedKeysMap(1)).build()); -// -// DynamoDbMapper mapper = new DynamoDbMapper(ddbMock, getConfigWithCustomBatchLoadRetryStrategy(new BatchLoadRetryStrategyWithNoDelay(3))); -// -// -// thrown.expect(BatchGetItemException.class); -// mapper.batchLoad(itemsToGet); -// verify(ddbMock, times(4)).batchGetItem(any(BatchGetItemRequest.class)); -// } -// -// @Test -// public void testBatchReadCallSuccess_Retry() { -// when(ddbMock.batchGetItem(any(BatchGetItemRequest.class))) -// .thenReturn(buildDefaultGetItemResponse().toBuilder().unprocessedKeys(new HashMap<>(1)).build()); -// -// DynamoDbMapperConfig config = -// getConfigWithCustomBatchLoadRetryStrategy(new DynamoDbMapperConfig.DefaultBatchLoadRetryStrategy()); -// DynamoDbMapper mapper = new DynamoDbMapper(ddbMock, config); -// -// mapper.batchLoad(itemsToGet); -// verify(ddbMock, times(1)).batchGetItem(any(BatchGetItemRequest.class)); -// } -// -// @Test -// public void testBatchReadCallFailure_Retry_RetryOnCompleteFailure() { -// when(ddbMock.batchGetItem(any(BatchGetItemRequest.class))) -// .thenReturn(buildDefaultGetItemResponse().toBuilder().unprocessedKeys(buildUnprocessedKeysMap(3)).build()); -// DynamoDbMapperConfig config = -// getConfigWithCustomBatchLoadRetryStrategy(new DynamoDbMapperConfig.DefaultBatchLoadRetryStrategy()); -// DynamoDbMapper mapper = new DynamoDbMapper(ddbMock, config); -// -// thrown.expect(BatchGetItemException.class); -// mapper.batchLoad(itemsToGet); -// verify(ddbMock, times(6)).batchGetItem(any(BatchGetItemRequest.class)); -// } -// -// @Test -// public void testBatchReadCallFailure_NoRetry_RetryOnCompleteFailure() { -// when(ddbMock.batchGetItem(any(BatchGetItemRequest.class))) -// .thenReturn(buildDefaultGetItemResponse().toBuilder().unprocessedKeys(buildUnprocessedKeysMap(3)).build()); -// DynamoDbMapperConfig config = -// getConfigWithCustomBatchLoadRetryStrategy(new DynamoDbMapperConfig.NoRetryBatchLoadRetryStrategy()); -// DynamoDbMapper mapper = new DynamoDbMapper(ddbMock, config); -// -// thrown.expect(BatchGetItemException.class); -// mapper.batchLoad(itemsToGet); -// verify(ddbMock, times(1)).batchGetItem(any(BatchGetItemRequest.class)); -// } -// -// @Test -// public void testNoDelayOnPartialFailure_DefaultRetry() { -// BatchLoadRetryStrategy defaultRetryStrategy = new DynamoDbMapperConfig.DefaultBatchLoadRetryStrategy(); -// when(mockItemResult.unprocessedKeys()).thenReturn(buildUnprocessedKeysMap(2)); -// when(mockItemRequest.requestItems()).thenReturn(buildUnprocessedKeysMap(3)); -// BatchLoadContext context = new BatchLoadContext(mockItemRequest); -// context.setBatchGetItemResponse(mockItemResult); -// context.setRetriesAttempted(2); -// assertEquals(0, defaultRetryStrategy.getDelayBeforeNextRetry(context)); -// } -// -// @Test -// public void testDelayOnPartialFailure_DefaultRetry() { -// BatchLoadRetryStrategy defaultRetryStrategy = new DynamoDbMapperConfig.DefaultBatchLoadRetryStrategy(); -// when(mockItemResult.unprocessedKeys()).thenReturn(buildUnprocessedKeysMap(3)); -// when(mockItemRequest.requestItems()).thenReturn(buildUnprocessedKeysMap(3)); -// -// BatchLoadContext context = new BatchLoadContext(mockItemRequest); -// context.setBatchGetItemResponse(mockItemResult); -// context.setRetriesAttempted(2); -// assertTrue(defaultRetryStrategy.getDelayBeforeNextRetry(context) > 0); -// } -// -// private DynamoDbMapperConfig getConfigWithCustomBatchLoadRetryStrategy(final BatchLoadRetryStrategy batchReadRetryStrategy) { -// return new DynamoDbMapperConfig.Builder().withBatchLoadRetryStrategy(batchReadRetryStrategy).build(); -// } -// -// private Map buildUnprocessedKeysMap(final int size) { -// final Map unproccessedKeys = new HashMap(size); -// for (int i = 0; i < size; i++) { -// unproccessedKeys.put("test" + i, KeysAndAttributes.builder().build()); -// } -// -// return unproccessedKeys; -// } -// -// private BatchGetItemResponse buildDefaultGetItemResponse() { -// -// final Map>> map = new HashMap>>(); -// return BatchGetItemResponse.builder().responses(map).build(); -// -// } -// -// static class BatchLoadRetryStrategyWithNoDelay implements BatchLoadRetryStrategy { -// -// private final int maxRetry; -// -// public BatchLoadRetryStrategyWithNoDelay(final int maxRetry) { -// this.maxRetry = maxRetry; -// } -// -// /** -// * @see BatchLoadRetryStrategy#maxRetryOnUnprocessedKeys(java.util.Map, java.util.Map) -// */ -// @Override -// public boolean shouldRetry(final BatchLoadContext batchLoadContext) { -// return batchLoadContext.getRetriesAttempted() < maxRetry; -// } -// -// /** -// * @see BatchLoadRetryStrategy#getDelayBeforeNextRetry(java.util.Map, int) -// */ -// @Override -// public long getDelayBeforeNextRetry(final BatchLoadContext batchLoadContext) { -// return 0; -// } -// -// -// } -// -// @DynamoDbTable(tableName = TABLE_NAME) -// public static class Item { -// -// private String hash; -// -// public Item(final String hash) { -// this.hash = hash; -// } -// -// @DynamoDbAttribute(attributeName = HASH_ATTR) -// @DynamoDbHashKey -// public String getHash() { -// return hash; -// } -// -// public void setHash(final String hash) { -// this.hash = hash; -// } -// -// public WriteRequest toPutSaveRequest() { -// return WriteRequest.builder() -// .putRequest(PutRequest.builder() -// .item(Collections.singletonMap(HASH_ATTR, AttributeValue.builder().s(hash).build())) -// .build()) -// .build(); -// } -// } -// -// @DynamoDbTable(tableName = TABLE_NAME2) -// public static class Item2 { -// -// private String hash; -// -// public Item2(final String hash) { -// this.hash = hash; -// } -// -// @DynamoDbAttribute(attributeName = HASH_ATTR) -// @DynamoDbHashKey -// public String getHash() { -// return hash; -// } -// -// public void setHash(final String hash) { -// this.hash = hash; -// } -// -// public WriteRequest toPutSaveRequest() { -// return WriteRequest.builder() -// .putRequest(PutRequest.builder() -// .item(Collections.singletonMap(HASH_ATTR, AttributeValue.builder().s(hash) -// .build())) -// .build()) -// .build(); -// } -// } -// -// @DynamoDbTable(tableName = TABLE_NAME3) -// public static class Item3 { -// -// private String hash; -// -// public Item3(final String hash) { -// this.hash = hash; -// } -// -// @DynamoDbAttribute(attributeName = HASH_ATTR) -// @DynamoDbHashKey -// public String getHash() { -// return hash; -// } -// -// public void setHash(final String hash) { -// this.hash = hash; -// } -// -// public WriteRequest toPutSaveRequest() { -// return WriteRequest.builder() -// .putRequest(PutRequest.builder() -// .item(Collections.singletonMap(HASH_ATTR, AttributeValue.builder() -// .s(hash) -// .build())) -// .build()) -// .build(); -// } -// } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchWriteRetryStrategyTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchWriteRetryStrategyTest.java deleted file mode 100644 index 5d69001b2412..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/BatchWriteRetryStrategyTest.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import junit.framework.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper.FailedBatch; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.BatchWriteRetryStrategy; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemResponse; -import software.amazon.awssdk.services.dynamodb.model.PutRequest; -import software.amazon.awssdk.services.dynamodb.model.WriteRequest; - -@RunWith(MockitoJUnitRunner.class) -public class BatchWriteRetryStrategyTest { - - private static final int MAX_RETRY = 10; - private static final String TABLE_NAME = "tableName"; - private static final String HASH_ATTR = "hash"; - - private static Map> unprocessedItems; - - static { - WriteRequest writeReq = WriteRequest.builder() - .putRequest(PutRequest.builder() - .item(Collections.singletonMap( - HASH_ATTR, - AttributeValue.builder().s("foo").build())) - .build()) - .build(); - - unprocessedItems = Collections.singletonMap(TABLE_NAME, - Arrays.asList(writeReq)); - } - - @Mock - private DynamoDbClient ddbMock; - - private DynamoDbMapper mapper; - - @Before - public void setup() { - mapper = new DynamoDbMapper( - ddbMock, - getConfigWithCustomBatchWriteRetryStrategy( - new BatchWriteRetryStrategyWithNoDelay(MAX_RETRY))); - } - - @Test - public void testBatchWriteItemCallSuccess_NoRetry() { - when(ddbMock.batchWriteItem(any(BatchWriteItemRequest.class))) - .thenReturn(BatchWriteItemResponse.builder().unprocessedItems(Collections.>emptyMap()).build()); - - List failedBatches = mapper.batchSave(new Item("foo")); - - verify(ddbMock, times(1)).batchWriteItem(any(BatchWriteItemRequest.class)); - Assert.assertEquals(0, failedBatches.size()); - } - - @Test - public void testUnprocessedItemReturned_BatchWriteItemCallNotExceedMaxRetry() { - when(ddbMock.batchWriteItem(any(BatchWriteItemRequest.class))) - .thenReturn(BatchWriteItemResponse.builder().unprocessedItems(unprocessedItems).build()); - - List failedBatches = mapper.batchSave(new Item("foo")); - verify(ddbMock, times(MAX_RETRY + 1)).batchWriteItem(any(BatchWriteItemRequest.class)); - - Assert.assertEquals(1, failedBatches.size()); - FailedBatch failedBatch = failedBatches.get(0); - - Assert.assertEquals( - "Failed batch should contain the same UnprocessedItems returned in the BatchWriteItem response.", - unprocessedItems, - failedBatch.getUnprocessedItems()); - Assert.assertNull( - "No exception should be set if the batch failed after max retry", - failedBatch.getException()); - } - - @Test - public void testExceptionThrown_NoRetry() { - - RuntimeException exception = new RuntimeException("BOOM"); - - when(ddbMock.batchWriteItem(any(BatchWriteItemRequest.class))).thenThrow(exception); - - // put a random item - Item item = new Item(UUID.randomUUID().toString()); - List failedBatches = mapper.batchSave(item); - - Assert.assertEquals(1, failedBatches.size()); - FailedBatch failedBatch = failedBatches.get(0); - - Assert.assertEquals( - "Failed batch should contain all the input items for batchWrite", - Collections.singletonMap(TABLE_NAME, Arrays.asList(item.toPutSaveRequest())), - failedBatch.getUnprocessedItems()); - Assert.assertSame( - "The exception should be the same as one thrown by BatchWriteItem", - exception, - failedBatch.getException()); - } - - private DynamoDbMapperConfig getConfigWithCustomBatchWriteRetryStrategy( - BatchWriteRetryStrategy batchWriteRetryStrategy) { - return new DynamoDbMapperConfig.Builder() - .withBatchWriteRetryStrategy(batchWriteRetryStrategy) - .build(); - } - - private static class BatchWriteRetryStrategyWithNoDelay implements - BatchWriteRetryStrategy { - - private final int maxRetry; - - public BatchWriteRetryStrategyWithNoDelay(int maxRety) { - this.maxRetry = maxRety; - } - - @Override - public int maxRetryOnUnprocessedItems( - Map> batchWriteItemInput) { - return maxRetry; - } - - @Override - public long getDelayBeforeRetryUnprocessedItems( - Map> unprocessedItems, - int retriesAttempted) { - return 0; - } - - } - - @DynamoDbTable(tableName = TABLE_NAME) - public static class Item { - - private String hash; - - public Item(String hash) { - this.hash = hash; - } - - @DynamoDbHashKey - @DynamoDbAttribute(attributeName = HASH_ATTR) - public String getHash() { - return hash; - } - - public void setHash(String hash) { - this.hash = hash; - } - - public WriteRequest toPutSaveRequest() { - return WriteRequest.builder() - .putRequest(PutRequest.builder() - .item(Collections.singletonMap(HASH_ATTR, AttributeValue.builder().s(hash).build())) - .build()) - .build(); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/CachingMarshallerSetTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/CachingMarshallerSetTest.java deleted file mode 100644 index 621d59254c8b..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/CachingMarshallerSetTest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import java.lang.reflect.Type; -import java.util.ArrayDeque; -import java.util.Deque; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.ConversionSchemas.CachingMarshallerSet; -import software.amazon.awssdk.services.dynamodb.datamodeling.ConversionSchemas.MarshallerSet; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.pojos.TestClass; - -public class CachingMarshallerSetTest { - - private static final TestMarshallerSet MOCK = new TestMarshallerSet(); - private static final MarshallerSet SUT = new CachingMarshallerSet(MOCK); - - @Test - public void testIt() throws Exception { - ArgumentMarshaller marshaller = new ArgumentMarshaller() { - @Override - public AttributeValue marshall(Object value) { - return null; - } - }; - - MOCK.queue.add(marshaller); - - ArgumentMarshaller result = SUT.marshaller( - TestClass.class.getMethod("getString")); - - Assert.assertSame(marshaller, result); - - result = SUT.marshaller(TestClass.class.getMethod("getString")); - - Assert.assertSame(marshaller, result); - - ArgumentMarshaller marshaller2 = new ArgumentMarshaller() { - @Override - public AttributeValue marshall(Object value) { - return null; - } - }; - - MOCK.queue.add(marshaller2); - - result = SUT.marshaller(TestClass.class.getMethod("getInt")); - - Assert.assertSame(marshaller2, result); - } - - private static class TestMarshallerSet implements MarshallerSet { - - private final Deque queue = - new ArrayDeque(); - - private final Deque memberQueue = - new ArrayDeque(); - - @Override - public ArgumentMarshaller marshaller(Method getter) { - return queue.remove(); - } - - @Override - public ArgumentMarshaller memberMarshaller(Type memberType) { - return memberQueue.remove(); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/CachingUnmarshallerSetTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/CachingUnmarshallerSetTest.java deleted file mode 100644 index cb1afce82b2e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/CachingUnmarshallerSetTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import java.lang.reflect.Type; -import java.util.ArrayDeque; -import java.util.Deque; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.ConversionSchemas.CachingUnmarshallerSet; -import software.amazon.awssdk.services.dynamodb.datamodeling.ConversionSchemas.UnmarshallerSet; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.pojos.TestClass; - -public class CachingUnmarshallerSetTest { - - private static final TestUnmarshallerSet MOCK = new TestUnmarshallerSet(); - private static final UnmarshallerSet SUT = new CachingUnmarshallerSet(MOCK); - - @Test - public void testIt() throws Exception { - ArgumentUnmarshaller unmarshaller = new ArgumentUnmarshaller() { - @Override - public void typeCheck(AttributeValue value, Method setter) { - } - - @Override - public Object unmarshall(AttributeValue value) { - return null; - } - }; - - MOCK.queue.add(unmarshaller); - - ArgumentUnmarshaller result = SUT.getUnmarshaller( - TestClass.class.getMethod("getString"), - TestClass.class.getMethod("setString", String.class)); - - Assert.assertSame(unmarshaller, result); - - result = SUT.getUnmarshaller( - TestClass.class.getMethod("getString"), - TestClass.class.getMethod("setString", String.class)); - - Assert.assertSame(unmarshaller, result); - - ArgumentUnmarshaller unmarshaller2 = new ArgumentUnmarshaller() { - @Override - public void typeCheck(AttributeValue value, Method setter) { - } - - @Override - public Object unmarshall(AttributeValue value) { - return null; - } - }; - - MOCK.queue.add(unmarshaller2); - - result = SUT.getUnmarshaller( - TestClass.class.getMethod("getInt"), - TestClass.class.getMethod("setInt", int.class)); - - Assert.assertSame(unmarshaller2, result); - } - - private static class TestUnmarshallerSet implements UnmarshallerSet { - - private final Deque queue = - new ArrayDeque(); - - private final Deque memberQueue = - new ArrayDeque(); - - @Override - public ArgumentUnmarshaller getUnmarshaller(Method getter, Method setter) { - return queue.remove(); - } - - @Override - public ArgumentUnmarshaller memberUnmarshaller(Type type) { - return memberQueue.remove(); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConfigureS3LinksTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConfigureS3LinksTest.java deleted file mode 100644 index 34e9f14eb9ef..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConfigureS3LinksTest.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; - -import org.junit.Before; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.regions.Region; - -public class ConfigureS3LinksTest { - - private S3ClientCache s3cc; - - @Before - public void setUp() throws Exception { - s3cc = new S3ClientCache(AwsBasicCredentials.create("mock", "mock")); - } - - @Test - public void testS3LinkWithStringRegion() { - CorrectTestClass obj = new CorrectTestClass(); - S3Link s3 = new S3Link(s3cc, "ap-southeast-1", "nonexisting-test-bucketname2", "key"); - obj.setS3(s3); - - assertNotNull(obj.s3()); - assertEquals("nonexisting-test-bucketname2", obj.s3().bucketName()); - assertSame(Region.AP_SOUTHEAST_1.id(), obj.s3().s3Region().id()); - assertSame("ap-southeast-1", obj.s3().getRegion()); - } - - @Test - public void testManyS3LinksClass() { - ManyS3LinksTestClass obj = new ManyS3LinksTestClass(); - assertNull(obj.s31()); - } - - @DynamoDbTable(tableName = "nonexisting-test-tablename") - public static class CorrectTestClass { - - private String hk; - private S3Link s3; - - public CorrectTestClass() { - } - - @DynamoDbHashKey - public String getHk() { - return hk; - } - - public void setHk(String hk) { - this.hk = hk; - } - - public S3Link s3() { - return s3; - } - - public void setS3(S3Link s3) { - this.s3 = s3; - } - } - - @DynamoDbTable(tableName = "nonexisting-test-tablename") - public static class ManyS3LinksTestClass { - - private String hk; - private S3Link s31; - private S3Link s32; - private S3Link s33; - private S3Link s34; - private S3Link s35; - private S3Link s36; - - public ManyS3LinksTestClass() { - } - - @DynamoDbHashKey - public String getHk() { - return hk; - } - - public void setHk(String hk) { - this.hk = hk; - } - - public S3Link s31() { - return s31; - } - - public void setS31(S3Link s31) { - this.s31 = s31; - } - - public S3Link s32() { - return s32; - } - - public void setS32(S3Link s32) { - this.s32 = s32; - } - - public S3Link s33() { - return s33; - } - - public void setS33(S3Link s33) { - this.s33 = s33; - } - - public S3Link s34() { - return s34; - } - - public void setS34(S3Link s34) { - this.s34 = s34; - } - - public S3Link s35() { - return s35; - } - - public void setS35(S3Link s35) { - this.s35 = s35; - } - - public S3Link s36() { - return s36; - } - - public void setS36(S3Link s36) { - this.s36 = s36; - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionSchema.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionSchema.java deleted file mode 100644 index 5aa31c40fbeb..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionSchema.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.HashMap; -import java.util.Map; - -/** - * A strategy for mapping between Java types and DynamoDB types. Serves as a - * factory for {@code ItemConverter} instances that implement this mapping. - * Standard implementations are available in the {@link ConversionSchemas} - * class. - */ -public interface ConversionSchema { - - /** - * Creates an {@code ItemConverter}, injecting dependencies from the - * {@code DynamoDBMapper} that needs it. - * - * @param dependencies the dependencies to inject - * @return a new ItemConverter - */ - ItemConverter getConverter(Dependencies dependencies); - - /** - * Dependency injection for the {@code ItemConverter}s that this - * {@code ConversionSchema} generates. - */ - class Dependencies { - - private final Map, Object> values; - - public Dependencies() { - values = new HashMap, Object>(); - } - - @SuppressWarnings("unchecked") - public T get(Class clazz) { - return (T) values.get(clazz); - } - - public Dependencies with(Class clazz, T value) { - values.put(clazz, value); - return this; - } - - @Override - public String toString() { - return values.toString(); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionSchemas.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionSchemas.java deleted file mode 100644 index 4249ecce85e6..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionSchemas.java +++ /dev/null @@ -1,1493 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.BinaryAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.BinarySetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.BooleanAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.ListAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.MapAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.NumberAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.NumberSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.DynamoDbAttributeType; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardBeanProperties.Bean; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardModelFactories.Rule; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardModelFactories.RuleFactory; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.BooleanSetToNumberSetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.BooleanToBooleanMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.BooleanToNumberMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.ByteArraySetToBinarySetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.ByteArrayToBinaryMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.ByteBufferSetToBinarySetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.ByteBufferToBinaryMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.CalendarSetToStringSetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.CalendarToStringMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.CollectionToListMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.CustomMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.DateSetToStringSetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.DateToStringMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.MapToMapMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.NumberSetToNumberSetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.NumberToNumberMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.ObjectSetToStringSetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.ObjectToMapMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.ObjectToStringMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.S3LinkToStringMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.StringSetToStringSetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.StringToStringMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.marshallers.UuidSetToStringSetMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.BigDecimalSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.BigDecimalUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.BigIntegerSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.BigIntegerUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.BooleanSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.BooleanUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ByteArraySetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ByteArrayUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ByteBufferSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ByteBufferUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ByteSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ByteUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.CalendarSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.CalendarUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.CustomUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.DateSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.DateUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.DoubleSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.DoubleUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.FloatSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.FloatUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.IntegerSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.IntegerUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ListUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.LongSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.LongUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.MapUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.NullableUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ObjectSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ObjectUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.S3LinkUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ShortSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.ShortUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.StringSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.StringUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.UuidSetUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers.UuidUnmarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * Pre-defined strategies for mapping between Java types and DynamoDB types. - */ -public final class ConversionSchemas { - - /** - * The V1 schema mapping, which retains strict backwards compatibility with - * the original DynamoDB data model. In particular, it marshals Java - * Booleans as DynamoDB Numbers rather than the newer Boolean type, and does - * not support marshaling Lists or Maps. It can unmarshal - * values written in newer formats to ease migration. - *

    - * Use me if you have other code still using an old version of the SDK that - * does not understand the new List and Map types and want to ensure that - * you don't accidentally start writing values using these types. - */ - public static final ConversionSchema V1 = v1Builder("V1ConversionSchema").build(); - /** - * A V2 conversion schema which retains backwards compatibility with the - * V1 conversion schema for existing DynamoDB types, but adds the ability - * to marshall recursive structures using the new List and Map types. This - * is currently the default conversion schema. - */ - public static final ConversionSchema V2_COMPATIBLE = v2CompatibleBuilder( - "V2CompatibleConversionSchema").build(); - /** - * The native V2 conversion schema. This schema breaks compatibility with - * older versions of the mapper that only support the V1 schema by - * storing booleans as native DynamoDB Booleans rather than as a 1 or 0 - * in a DynamoDB Number. Switching to the V2 schema will prevent older - * versions of the mapper from reading items you write that contain - * booleans. - */ - public static final ConversionSchema V2 = v2Builder("V2ConversionSchema").build(); - static final ConversionSchema DEFAULT = V2_COMPATIBLE; - private static final Logger log = - LoggerFactory.getLogger(ConversionSchemas.class); - - ConversionSchemas() { - throw new UnsupportedOperationException(); - } - - /** - * A ConversionSchema builder that defaults to building {@link #V1}. - */ - public static Builder v1Builder(String name) { - return new Builder(name, V1MarshallerSet.marshallers(), V1MarshallerSet.setMarshallers(), - StandardUnmarshallerSet.unmarshallers(), - StandardUnmarshallerSet.setUnmarshallers()); - } - - /** - * A ConversionSchema builder that defaults to building {@link #V2_COMPATIBLE}. - */ - public static Builder v2CompatibleBuilder(String name) { - return new Builder(name, V2CompatibleMarshallerSet.marshallers(), - V2CompatibleMarshallerSet.setMarshallers(), - StandardUnmarshallerSet.unmarshallers(), - StandardUnmarshallerSet.setUnmarshallers()); - } - - /** - * A ConversionSchema builder that defaults to building {@link #V2}. - */ - public static Builder v2Builder(String name) { - return new Builder(name, V2MarshallerSet.marshallers(), V2MarshallerSet.setMarshallers(), - StandardUnmarshallerSet.unmarshallers(), - StandardUnmarshallerSet.setUnmarshallers()); - } - - private static void addStandardDateMarshallers( - List> list) { - - list.add(Pair.of(Date.class, - DateToStringMarshaller.instance())); - list.add(Pair.of(Calendar.class, - CalendarToStringMarshaller.instance())); - } - - private static void addV1BooleanMarshallers( - List> list) { - - list.add(Pair.of(Boolean.class, - BooleanToNumberMarshaller.instance())); - list.add(Pair.of(boolean.class, - BooleanToNumberMarshaller.instance())); - } - - private static void addV2BooleanMarshallers( - List> list) { - - list.add(Pair.of(Boolean.class, - BooleanToBooleanMarshaller.instance())); - list.add(Pair.of(boolean.class, - BooleanToBooleanMarshaller.instance())); - } - - private static void addStandardNumberMarshallers( - List> list) { - - list.add(Pair.of(Number.class, - NumberToNumberMarshaller.instance())); - list.add(Pair.of(byte.class, - NumberToNumberMarshaller.instance())); - list.add(Pair.of(short.class, - NumberToNumberMarshaller.instance())); - list.add(Pair.of(int.class, - NumberToNumberMarshaller.instance())); - list.add(Pair.of(long.class, - NumberToNumberMarshaller.instance())); - list.add(Pair.of(float.class, - NumberToNumberMarshaller.instance())); - list.add(Pair.of(double.class, - NumberToNumberMarshaller.instance())); - } - - private static void addStandardStringMarshallers( - List> list) { - - list.add(Pair.of(String.class, - StringToStringMarshaller.instance())); - - list.add(Pair.of(UUID.class, - ObjectToStringMarshaller.instance())); - } - - private static void addStandardBinaryMarshallers( - List> list) { - - list.add(Pair.of(ByteBuffer.class, - ByteBufferToBinaryMarshaller.instance())); - list.add(Pair.of(byte[].class, - ByteArrayToBinaryMarshaller.instance())); - } - - private static void addStandardS3LinkMarshallers( - List> list) { - - list.add(Pair.of(S3Link.class, - S3LinkToStringMarshaller.instance())); - } - - private static void addStandardDateSetMarshallers( - List> list) { - - list.add(Pair.of(Date.class, - DateSetToStringSetMarshaller.instance())); - list.add(Pair.of(Calendar.class, - CalendarSetToStringSetMarshaller.instance())); - } - - private static void addStandardNumberSetMarshallers( - List> list) { - - list.add(Pair.of(Number.class, - NumberSetToNumberSetMarshaller.instance())); - list.add(Pair.of(byte.class, - NumberSetToNumberSetMarshaller.instance())); - list.add(Pair.of(short.class, - NumberSetToNumberSetMarshaller.instance())); - list.add(Pair.of(int.class, - NumberSetToNumberSetMarshaller.instance())); - list.add(Pair.of(long.class, - NumberSetToNumberSetMarshaller.instance())); - list.add(Pair.of(float.class, - NumberSetToNumberSetMarshaller.instance())); - list.add(Pair.of(double.class, - NumberSetToNumberSetMarshaller.instance())); - } - - private static void addStandardStringSetMarshallers( - List> list) { - - list.add(Pair.of(String.class, - StringSetToStringSetMarshaller.instance())); - - list.add(Pair.of(UUID.class, - UuidSetToStringSetMarshaller.instance())); - } - - private static void addStandardBinarySetMarshallers( - List> list) { - - list.add(Pair.of(ByteBuffer.class, - ByteBufferSetToBinarySetMarshaller.instance())); - list.add(Pair.of(byte[].class, - ByteArraySetToBinarySetMarshaller.instance())); - } - - private static void addV1BooleanSetMarshallers( - List> list) { - - list.add(Pair.of(Boolean.class, - BooleanSetToNumberSetMarshaller.instance())); - list.add(Pair.of(boolean.class, - BooleanSetToNumberSetMarshaller.instance())); - } - - private static Class unwrapGenericSetParam(Type setType) { - if (!(setType instanceof ParameterizedType)) { - log.warn("Set type {} is not a ParameterizedType, using default marshaller and unmarshaller", setType); - return Object.class; - } - - ParameterizedType ptype = (ParameterizedType) setType; - Type[] arguments = ptype.getActualTypeArguments(); - - if (arguments.length != 1) { - log.warn("Set type {} does not have exactly one type argument, using default marshaller and unmarshaller", setType); - return Object.class; - } - - if (arguments[0].toString().equals("byte[]")) { - return byte[].class; - } else { - return (Class) arguments[0]; - } - } - - private static Class resolveClass(Type type) { - Type localType = type; - if (localType instanceof ParameterizedType) { - localType = ((ParameterizedType) type).getRawType(); - } - if (!(localType instanceof Class)) { - throw new DynamoDbMappingException("Cannot resolve class for type " - + type); - } - return (Class) localType; - } - - private static T find(Class needle, List> haystack) { - for (Pair pair : haystack) { - if (pair.key.isAssignableFrom(needle)) { - return pair.value; - } - } - return null; - } - - interface MarshallerSet { - ArgumentMarshaller marshaller(Method getter); - - ArgumentMarshaller memberMarshaller(Type memberType); - } - - interface UnmarshallerSet { - ArgumentUnmarshaller getUnmarshaller(Method getter, Method setter); - - ArgumentUnmarshaller memberUnmarshaller(Type memberType); - } - - public static class Builder { - - private final String name; - private final List> marshallers; - private final List> setMarshallers; - private final List> unmarshallers; - private final List> setUnmarshallers; - - Builder(String name, List> marshallers, - List> setMarshallers, - List> unmarshallers, - List> setUnmarshallers) { - this.name = name; - this.marshallers = marshallers; - this.setMarshallers = setMarshallers; - this.unmarshallers = unmarshallers; - this.setUnmarshallers = setUnmarshallers; - } - - /** - * Adds marshaling of a type to the schema. Types are in LIFO order, so the last type added - * will be the first matched. - */ - public Builder addFirstType(Class clazz, ArgumentMarshaller marshaller, - ArgumentUnmarshaller unmarshaller) { - this.marshallers.add(0, Pair.of(clazz, marshaller)); - this.unmarshallers.add(0, Pair.of(clazz, unmarshaller)); - return this; - } - - /** - * Adds marshaling of a Set of a type to the schema. Types are in LIFO order, so the last - * type added will be the first matched. - */ - public Builder addFirstSetType(Class clazz, ArgumentMarshaller marshaller, - ArgumentUnmarshaller unmarshaller) { - this.setMarshallers.add(0, Pair.of(clazz, marshaller)); - this.setUnmarshallers.add(0, Pair.of(clazz, unmarshaller)); - return this; - } - - public ConversionSchema build() { - return new StandardConversionSchema(name, new AbstractMarshallerSet(marshallers, - setMarshallers), - new StandardUnmarshallerSet(unmarshallers, - setUnmarshallers)); - } - } - - static class StandardConversionSchema implements ConversionSchema { - - private final String name; - private final MarshallerSet marshallers; - private final UnmarshallerSet unmarshallers; - - StandardConversionSchema( - String name, - MarshallerSet marshallers, - UnmarshallerSet unmarshallers) { - - this.name = name; - this.marshallers = new CachingMarshallerSet( - new AnnotationAwareMarshallerSet(marshallers)); - - this.unmarshallers = new CachingUnmarshallerSet( - new AnnotationAwareUnmarshallerSet(unmarshallers)); - } - - @Override - public ItemConverter getConverter(Dependencies dependencies) { - - S3ClientCache s3cc = dependencies.get(S3ClientCache.class); - - return new StandardItemConverter( - marshallers, - unmarshallers, - s3cc); - } - - @Override - public String toString() { - return name; - } - } - - static class StandardItemConverter implements ItemConverter { - - private final MarshallerSet marshallerSet; - private final UnmarshallerSet unmarshallerSet; - private final S3ClientCache s3cc; - - StandardItemConverter( - MarshallerSet marshallerSet, - UnmarshallerSet unmarshallerSet, - S3ClientCache s3cc) { - - this.marshallerSet = marshallerSet; - this.unmarshallerSet = unmarshallerSet; - this.s3cc = s3cc; - } - - private static Object unmarshall( - ArgumentUnmarshaller unmarshaller, - Method setter, - AttributeValue value) { - - unmarshaller.typeCheck(value, setter); - - try { - - return unmarshaller.unmarshall(value); - - } catch (IllegalArgumentException e) { - throw new DynamoDbMappingException( - "Couldn't unmarshall value " + value + " for " + setter, - e); - - } catch (ParseException e) { - throw new DynamoDbMappingException( - "Error attempting to parse date string " + value + " for " - + setter, - e); - } - } - - private static T createObject(Class clazz) { - try { - - return clazz.newInstance(); - - } catch (InstantiationException e) { - throw new DynamoDbMappingException( - "Failed to instantiate new instance of class", e); - - } catch (IllegalAccessException e) { - throw new DynamoDbMappingException( - "Failed to instantiate new instance of class", e); - } - } - - @Override - public DynamoDbMapperFieldModel getFieldModel(Method getter) { - final ArgumentMarshaller marshaller = marshaller(getter); - - final DynamoDbAttributeType attributeType; - if (marshaller instanceof StringAttributeMarshaller) { - attributeType = DynamoDbAttributeType.S; - } else if (marshaller instanceof NumberAttributeMarshaller) { - attributeType = DynamoDbAttributeType.N; - } else if (marshaller instanceof BinaryAttributeMarshaller) { - attributeType = DynamoDbAttributeType.B; - } else if (marshaller instanceof StringSetAttributeMarshaller) { - attributeType = DynamoDbAttributeType.SS; - } else if (marshaller instanceof NumberSetAttributeMarshaller) { - attributeType = DynamoDbAttributeType.NS; - } else if (marshaller instanceof BinarySetAttributeMarshaller) { - attributeType = DynamoDbAttributeType.BS; - } else if (marshaller instanceof BooleanAttributeMarshaller) { - attributeType = DynamoDbAttributeType.BOOL; - } else if (marshaller instanceof ListAttributeMarshaller) { - attributeType = DynamoDbAttributeType.L; - } else if (marshaller instanceof MapAttributeMarshaller) { - attributeType = DynamoDbAttributeType.M; - } else { - throw new DynamoDbMappingException( - "Unrecognized marshaller type for " + getter + ": " - + marshaller); - } - - // Note, generating the attribute name using this method is not - // actually correct for @DynamoDBFlattened attributes, however, - // its the best that can be done given only the method. The - // proper way to get this information is using the model factory. - final StandardAnnotationMaps.FieldMap annotations = StandardAnnotationMaps.of(getter, null); - final DynamoDbMapperFieldModel.Builder builder = new DynamoDbMapperFieldModel.Builder(void.class, annotations); - builder.with(attributeType); - return builder.build(); - } - - @Override - public AttributeValue convert(Method getter, Object object) { - if (object == null) { - return null; - } - - ArgumentMarshaller marshaller = marshaller(getter); - return marshaller.marshall(object); - } - - @Override - public Map convert(Object object) { - if (object == null) { - return null; - } - - Class clazz = (Class) object.getClass(); - Map result = - new HashMap(); - - for (final Bean bean : StandardBeanProperties.of(clazz).map().values()) { - Object getterResult = bean.reflect().get(object); - if (getterResult != null) { - AttributeValue value = convert(bean.type().getter(), getterResult); - if (value != null) { - result.put(bean.properties().attributeName(), value); - } - } - } - - return result; - } - - private ArgumentMarshaller marshaller(Method getter) { - ArgumentMarshaller marshaller = - marshallerSet.marshaller(getter); - - marshaller = augment(getter.getGenericReturnType(), marshaller); - - return marshaller; - } - - private ArgumentMarshaller memberMarshaller(Type type) { - ArgumentMarshaller marshaller = - marshallerSet.memberMarshaller(type); - - marshaller = augment(type, marshaller); - - return marshaller; - } - - private ArgumentMarshaller augment( - Type type, - ArgumentMarshaller marshaller) { - - if (marshaller instanceof CollectionToListMarshaller) { - return getCollectionToListMarshaller(type); - } - - if (marshaller instanceof MapToMapMarshaller) { - return mapToMapMarshaller(type); - } - if (marshaller instanceof ObjectToMapMarshaller) { - return getObjectToMapMarshaller(type); - } - - return marshaller; - } - - private ArgumentMarshaller getCollectionToListMarshaller(Type type) { - if (!(type instanceof ParameterizedType)) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the " - + "Collection type " + type + ", which is not " - + "parameterized."); - } - - ParameterizedType ptype = (ParameterizedType) type; - Type[] args = ptype.getActualTypeArguments(); - - if (args == null || args.length != 1) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the " - + "Collection type " + type + "; unexpected number of " - + "type arguments."); - } - - ArgumentMarshaller memberMarshaller = - memberMarshaller(args[0]); - - return new CollectionToListMarshaller(memberMarshaller); - } - - private ArgumentMarshaller mapToMapMarshaller(Type type) { - if (!(type instanceof ParameterizedType)) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the Map " - + "type " + type + ", which is not parameterized."); - } - - ParameterizedType ptype = (ParameterizedType) type; - Type[] args = ptype.getActualTypeArguments(); - - if (args == null || args.length != 2) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the Map " - + "type " + type + "; unexpected number of type " - + "arguments."); - } - - if (args[0] != String.class) { - throw new DynamoDbMappingException( - "Only Map is supported."); - } - - ArgumentMarshaller memberMarshaller = - memberMarshaller(args[1]); - - return new MapToMapMarshaller(memberMarshaller); - } - - private ArgumentMarshaller getObjectToMapMarshaller(Type type) { - Type localType = type; - if (localType instanceof ParameterizedType) { - localType = ((ParameterizedType) localType).getRawType(); - } - - if (!(localType instanceof Class)) { - throw new DynamoDbMappingException( - "Cannot convert " + type + " to a class"); - } - - Class clazz = (Class) localType; - if (StandardAnnotationMaps.of(clazz).attributeType() != DynamoDbAttributeType.M) { - throw new DynamoDbMappingException( - "Cannot marshall type " + type - + " without a custom marshaler or @DynamoDBDocument " - + "annotation."); - } - - return new ObjectToMapMarshaller(this); - } - - @Override - public Object unconvert( - Method getter, - Method setter, - AttributeValue value) { - - ArgumentUnmarshaller unmarshaller = getUnmarshaller(getter, setter); - return unmarshall(unmarshaller, setter, value); - } - - @Override - public T unconvert( - Class clazz, - Map value) { - - T result = createObject(clazz); - if (value == null || value.isEmpty()) { - return result; - } - - for (final Bean bean : StandardBeanProperties.of(clazz).map().values()) { - AttributeValue av = value.get(bean.properties().attributeName()); - if (av != null) { - ArgumentUnmarshaller unmarshaller = getUnmarshaller(bean.type().getter(), bean.type().setter()); - Object unmarshalled = unmarshall(unmarshaller, bean.type().setter(), av); - bean.reflect().set(result, unmarshalled); - } - } - - return result; - } - - private ArgumentUnmarshaller getUnmarshaller( - Method getter, - Method setter) { - - ArgumentUnmarshaller unmarshaller = - unmarshallerSet.getUnmarshaller(getter, setter); - - unmarshaller = augment( - setter.getGenericParameterTypes()[0], unmarshaller); - - return new NullableUnmarshaller(unmarshaller); - } - - private ArgumentUnmarshaller memberUnmarshaller(Type type) { - ArgumentUnmarshaller unmarshaller = - unmarshallerSet.memberUnmarshaller(type); - - unmarshaller = augment(type, unmarshaller); - - return new NullableUnmarshaller(unmarshaller); - } - - private ArgumentUnmarshaller augment( - Type type, - ArgumentUnmarshaller unmarshaller) { - - // Inject our s3 client cache if it's an S3LinkUnmarshaller. - if (unmarshaller instanceof S3LinkUnmarshaller) { - return new S3LinkUnmarshaller(s3cc); - } - - // Inject an appropriate member-type unmarshaller if it's a list, - // object-set, or map unmarshaller. - if (unmarshaller instanceof ObjectSetUnmarshaller) { - return getObjectSetUnmarshaller(type); - } - - if (unmarshaller instanceof ListUnmarshaller) { - return listUnmarshaller(type); - } - - if (unmarshaller instanceof MapUnmarshaller) { - return mapUnmarshaller(type); - } - - // Inject ourselves to recursively unmarshall things if it's an - // ObjectUnmarshaller. - if (unmarshaller instanceof ObjectUnmarshaller) { - return getObjectUnmarshaller(type); - } - - return unmarshaller; - } - - private ArgumentUnmarshaller getObjectSetUnmarshaller(Type type) { - if (!(type instanceof ParameterizedType)) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the Set " - + "type " + type + ", which is not parameterized."); - } - - ParameterizedType ptype = (ParameterizedType) type; - Type[] args = ptype.getActualTypeArguments(); - - if (args == null || args.length != 1) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the Set " - + "type " + type + "; unexpected number of type " - + "arguments."); - } - - ArgumentUnmarshaller memberUnmarshaller = - memberUnmarshaller(args[0]); - - return new ObjectSetUnmarshaller(memberUnmarshaller); - } - - private ArgumentUnmarshaller listUnmarshaller(Type type) { - if (!(type instanceof ParameterizedType)) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the List " - + "type " + type + ", which is not parameterized."); - } - - ParameterizedType ptype = (ParameterizedType) type; - Type[] args = ptype.getActualTypeArguments(); - - if (args == null || args.length != 1) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the List " - + "type " + type + "; unexpected number of type " - + "arguments."); - } - - ArgumentUnmarshaller memberUnmarshaller = - memberUnmarshaller(args[0]); - - return new ListUnmarshaller(memberUnmarshaller); - } - - private ArgumentUnmarshaller mapUnmarshaller(Type type) { - if (!(type instanceof ParameterizedType)) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the Map " - + "type " + type + ", which is not parameterized."); - } - - ParameterizedType ptype = (ParameterizedType) type; - Type[] args = ptype.getActualTypeArguments(); - - if (args == null || args.length != 2) { - throw new DynamoDbMappingException( - "Cannot tell what type of objects belong in the Map " - + "type " + type + "; unexpected number of type " - + "arguments."); - } - - if (args[0] != String.class) { - throw new DynamoDbMappingException( - "Only Map is supported."); - } - - ArgumentUnmarshaller memberUnmarshaller = - memberUnmarshaller(args[1]); - - return new MapUnmarshaller(memberUnmarshaller); - } - - private ArgumentUnmarshaller getObjectUnmarshaller(Type type) { - Type localType = type; - if (localType instanceof ParameterizedType) { - localType = ((ParameterizedType) type).getRawType(); - } - - if (!(localType instanceof Class)) { - throw new DynamoDbMappingException( - "Cannot convert " + type + " to a class"); - } - - Class clazz = (Class) localType; - if (StandardAnnotationMaps.of(clazz).attributeType() != DynamoDbAttributeType.M) { - throw new DynamoDbMappingException( - "Cannot unmarshall to type " + type - + " without a custom marshaler or @DynamoDBDocument " - + "annotation."); - } - - return new ObjectUnmarshaller(this, clazz); - } - - } - - static final class V2MarshallerSet { - - private static List> marshallers() { - List> list = - new ArrayList>(); - - // Use the new V2 boolean marshallers. - addStandardDateMarshallers(list); - addV2BooleanMarshallers(list); - addStandardNumberMarshallers(list); - addStandardStringMarshallers(list); - addStandardBinaryMarshallers(list); - addStandardS3LinkMarshallers(list); - - // Add marshallers for the new list and map types. - list.add(Pair.of(List.class, CollectionToListMarshaller.instance())); - list.add(Pair.of(Map.class, MapToMapMarshaller.instance())); - - // Make sure I'm last since I'll catch anything. - list.add(Pair.of(Object.class, ObjectToMapMarshaller.instance())); - - return list; - } - - private static List> setMarshallers() { - List> list = - new ArrayList>(); - - // No more Set -> NS or Set -> SS marshallers - addStandardDateSetMarshallers(list); - addStandardNumberSetMarshallers(list); - addStandardStringSetMarshallers(list); - addStandardBinarySetMarshallers(list); - - // Make sure I'm last since I'll catch anything. - list.add(Pair.of( - Object.class, - CollectionToListMarshaller.instance())); - - return list; - } - } - - static final class V2CompatibleMarshallerSet { - - private static List> marshallers() { - List> list = - new ArrayList>(); - - // Keep the old v1 boolean marshallers for compatibility. - addStandardDateMarshallers(list); - addV1BooleanMarshallers(list); - addStandardNumberMarshallers(list); - addStandardStringMarshallers(list); - addStandardBinaryMarshallers(list); - addStandardS3LinkMarshallers(list); - - // Add marshallers for the new list and map types. - list.add(Pair.of(List.class, CollectionToListMarshaller.instance())); - list.add(Pair.of(Map.class, MapToMapMarshaller.instance())); - - // Make sure I'm last since I'll catch anything. - list.add(Pair.of(Object.class, ObjectToMapMarshaller.instance())); - - return list; - } - - private static List> setMarshallers() { - List> list = - new ArrayList>(); - - addStandardDateSetMarshallers(list); - addV1BooleanSetMarshallers(list); - addStandardNumberSetMarshallers(list); - addStandardStringSetMarshallers(list); - addStandardBinarySetMarshallers(list); - - // If all else fails, fall back to this default marshaler to - // retain backwards-compatible behavior. - list.add(Pair.of(Object.class, ObjectSetToStringSetMarshaller.instance())); - - return list; - } - } - - static final class V1MarshallerSet { - - private static List> marshallers() { - List> list = - new ArrayList>(); - - addStandardDateMarshallers(list); - addV1BooleanMarshallers(list); - addStandardNumberMarshallers(list); - addStandardStringMarshallers(list); - addStandardBinaryMarshallers(list); - addStandardS3LinkMarshallers(list); - - return list; - } - - private static List> setMarshallers() { - List> list = - new ArrayList>(); - - addStandardDateSetMarshallers(list); - addV1BooleanSetMarshallers(list); - addStandardNumberSetMarshallers(list); - addStandardStringSetMarshallers(list); - addStandardBinarySetMarshallers(list); - - // If all else fails, fall back to this default marshaler to - // retain backwards-compatible behavior. - list.add(Pair.of(Object.class, - ObjectSetToStringSetMarshaller.instance())); - - return list; - } - } - - private static class AbstractMarshallerSet implements MarshallerSet { - - private final List> marshallers; - private final List> setMarshallers; - - AbstractMarshallerSet( - List> marshallers, - List> setMarshallers) { - - this.marshallers = marshallers; - this.setMarshallers = setMarshallers; - } - - @Override - public ArgumentMarshaller marshaller(Method getter) { - Class returnType = getter.getReturnType(); - - if (Set.class.isAssignableFrom(returnType)) { - Class memberType = - unwrapGenericSetParam(getter.getGenericReturnType()); - - return set(getter, memberType); - } else { - return scalar(getter, returnType); - } - } - - @Override - public ArgumentMarshaller memberMarshaller(Type memberType) { - Class clazz = resolveClass(memberType); - if (Set.class.isAssignableFrom(clazz)) { - Class setMemberType = unwrapGenericSetParam(memberType); - return set(null, setMemberType); - } else { - return scalar(null, clazz); - } - } - - private ArgumentMarshaller scalar(Method getter, Class type) { - ArgumentMarshaller marshaller = find(type, marshallers); - if (marshaller == null) { - - String className = "?"; - String methodName = "?"; - if (getter != null) { - className = getter.getDeclaringClass().toString(); - methodName = getter.getName(); - } - - throw new DynamoDbMappingException( - "Cannot marshall return type " + type - + " of method " + className + "." + methodName - + " without a custom marshaler."); - } - - return marshaller; - } - - private ArgumentMarshaller set(Method getter, Class memberType) { - ArgumentMarshaller marshaller = find(memberType, setMarshallers); - if (marshaller == null) { - - String className = "?"; - String methodName = "?"; - if (getter != null) { - className = getter.getDeclaringClass().toString(); - methodName = getter.getName(); - } - - throw new DynamoDbMappingException( - "Cannot marshall return type Set<" + memberType - + "> of method " + className + "." + methodName - + " without a custom marshaller."); - } - - return marshaller; - } - } - - static class StandardUnmarshallerSet implements UnmarshallerSet { - - private final List> unmarshallers; - private final List> setUnmarshallers; - - StandardUnmarshallerSet() { - this(unmarshallers(), setUnmarshallers()); - } - - StandardUnmarshallerSet( - List> unmarshallers, - List> setUnmarshallers) { - - this.unmarshallers = unmarshallers; - this.setUnmarshallers = setUnmarshallers; - } - - private static List> unmarshallers() { - List> list = - new ArrayList>(); - - list.add(Pair.of(double.class, DoubleUnmarshaller.instance())); - list.add(Pair.of(Double.class, DoubleUnmarshaller.instance())); - - list.add(Pair.of(BigDecimal.class, - BigDecimalUnmarshaller.instance())); - list.add(Pair.of(BigInteger.class, - BigIntegerUnmarshaller.instance())); - - list.add(Pair.of(int.class, IntegerUnmarshaller.instance())); - list.add(Pair.of(Integer.class, IntegerUnmarshaller.instance())); - - list.add(Pair.of(float.class, FloatUnmarshaller.instance())); - list.add(Pair.of(Float.class, FloatUnmarshaller.instance())); - - list.add(Pair.of(byte.class, ByteUnmarshaller.instance())); - list.add(Pair.of(Byte.class, ByteUnmarshaller.instance())); - - list.add(Pair.of(long.class, LongUnmarshaller.instance())); - list.add(Pair.of(Long.class, LongUnmarshaller.instance())); - - list.add(Pair.of(short.class, ShortUnmarshaller.instance())); - list.add(Pair.of(Short.class, ShortUnmarshaller.instance())); - - list.add(Pair.of(boolean.class, BooleanUnmarshaller.instance())); - list.add(Pair.of(Boolean.class, BooleanUnmarshaller.instance())); - - list.add(Pair.of(Date.class, DateUnmarshaller.instance())); - list.add(Pair.of(Calendar.class, CalendarUnmarshaller.instance())); - - list.add(Pair.of(ByteBuffer.class, - ByteBufferUnmarshaller.instance())); - list.add(Pair.of(byte[].class, - ByteArrayUnmarshaller.instance())); - - list.add(Pair.of(S3Link.class, S3LinkUnmarshaller.instance())); - list.add(Pair.of(UUID.class, UuidUnmarshaller.instance())); - list.add(Pair.of(String.class, StringUnmarshaller.instance())); - - list.add(Pair.of(List.class, ListUnmarshaller.instance())); - list.add(Pair.of(Map.class, MapUnmarshaller.instance())); - - // Make sure I'm last since I'll catch all other types. - list.add(Pair.of(Object.class, ObjectUnmarshaller.instance())); - - return list; - } - - private static List> setUnmarshallers() { - List> list = - new ArrayList>(); - - list.add(Pair.of(double.class, DoubleSetUnmarshaller.instance())); - list.add(Pair.of(Double.class, DoubleSetUnmarshaller.instance())); - - list.add(Pair.of(BigDecimal.class, - BigDecimalSetUnmarshaller.instance())); - list.add(Pair.of(BigInteger.class, - BigIntegerSetUnmarshaller.instance())); - - list.add(Pair.of(int.class, IntegerSetUnmarshaller.instance())); - list.add(Pair.of(Integer.class, IntegerSetUnmarshaller.instance())); - - list.add(Pair.of(float.class, FloatSetUnmarshaller.instance())); - list.add(Pair.of(Float.class, FloatSetUnmarshaller.instance())); - - list.add(Pair.of(byte.class, ByteSetUnmarshaller.instance())); - list.add(Pair.of(Byte.class, ByteSetUnmarshaller.instance())); - - list.add(Pair.of(long.class, LongSetUnmarshaller.instance())); - list.add(Pair.of(Long.class, LongSetUnmarshaller.instance())); - - list.add(Pair.of(short.class, ShortSetUnmarshaller.instance())); - list.add(Pair.of(Short.class, ShortSetUnmarshaller.instance())); - - list.add(Pair.of(boolean.class, BooleanSetUnmarshaller.instance())); - list.add(Pair.of(Boolean.class, BooleanSetUnmarshaller.instance())); - - list.add(Pair.of(Date.class, DateSetUnmarshaller.instance())); - list.add(Pair.of(Calendar.class, - CalendarSetUnmarshaller.instance())); - - list.add(Pair.of(ByteBuffer.class, - ByteBufferSetUnmarshaller.instance())); - list.add(Pair.of(byte[].class, - ByteArraySetUnmarshaller.instance())); - - list.add(Pair.of(UUID.class, UuidSetUnmarshaller.instance())); - list.add(Pair.of(String.class, StringSetUnmarshaller.instance())); - - // Make sure I'm last since I'll catch all other types. - list.add(Pair.of(Object.class, ObjectSetUnmarshaller.instance())); - - return list; - } - - @Override - public ArgumentUnmarshaller getUnmarshaller( - Method getter, - Method setter) { - - if (setter.getParameterTypes().length != 1) { - throw new DynamoDbMappingException( - "Expected exactly one agument to " + setter); - } - Class paramType = setter.getParameterTypes()[0]; - - if (Set.class.isAssignableFrom(paramType)) { - - paramType = unwrapGenericSetParam( - setter.getGenericParameterTypes()[0]); - - return set(setter, paramType); - - } else { - return scalar(setter, paramType); - } - } - - @Override - public ArgumentUnmarshaller memberUnmarshaller(Type memberType) { - Class clazz = resolveClass(memberType); - if (Set.class.isAssignableFrom(clazz)) { - Class setMemberType = unwrapGenericSetParam(memberType); - return set(null, setMemberType); - } else { - return scalar(null, clazz); - } - } - - private ArgumentUnmarshaller set(Method setter, Class paramType) { - ArgumentUnmarshaller unmarshaller = - find(paramType, setUnmarshallers); - - String className = "?"; - String methodName = "?"; - if (setter != null) { - className = setter.getDeclaringClass().toString(); - methodName = setter.getName(); - } - - if (unmarshaller == null) { - throw new DynamoDbMappingException( - "Cannot unmarshall to parameter type Set<" - + paramType + "> of method " - + className + "." + methodName + " without a custom " - + "unmarshaler."); - } - - return unmarshaller; - } - - private ArgumentUnmarshaller scalar(Method setter, Class type) { - ArgumentUnmarshaller unmarshaller = find(type, unmarshallers); - - String className = "?"; - String methodName = "?"; - if (setter != null) { - className = setter.getDeclaringClass().toString(); - methodName = setter.getName(); - } - - if (unmarshaller == null) { - throw new DynamoDbMappingException( - "Cannot unmarshall to parameter type " + type - + "of method " + className + "." + methodName - + " without a custom unmarshaler."); - } - - return unmarshaller; - } - } - - private static class Pair { - - public final Class key; - public final T value; - - private Pair(Class key, T value) { - this.key = key; - this.value = value; - } - - public static Pair of( - Class key, - ArgumentMarshaller value) { - - return new Pair(key, value); - } - - public static Pair of( - Class key, - ArgumentUnmarshaller value) { - - return new Pair(key, value); - } - } - - static class AnnotationAwareMarshallerSet - implements MarshallerSet { - - private final MarshallerSet wrapped; - - AnnotationAwareMarshallerSet(MarshallerSet wrapped) { - this.wrapped = wrapped; - } - - @Override - public ArgumentMarshaller marshaller(Method getter) { - final StandardAnnotationMaps.FieldMap annotations = StandardAnnotationMaps.of(getter, null); - final DynamoDbMarshalling marshalling = annotations.actualOf(DynamoDbMarshalling.class); - if (marshalling != null) { - return new CustomMarshaller(marshalling.marshallerClass()); - } else if (annotations.actualOf(DynamoDbNativeBoolean.class) != null) { - return BooleanToBooleanMarshaller.instance(); - } - return wrapped.marshaller(getter); - } - - @Override - public ArgumentMarshaller memberMarshaller(Type memberType) { - return wrapped.memberMarshaller(memberType); - } - } - - static class AnnotationAwareUnmarshallerSet - implements UnmarshallerSet { - - private final UnmarshallerSet wrapped; - - AnnotationAwareUnmarshallerSet(UnmarshallerSet wrapped) { - this.wrapped = wrapped; - } - - @Override - public ArgumentUnmarshaller getUnmarshaller( - Method getter, - Method setter) { - final StandardAnnotationMaps.FieldMap annotations = StandardAnnotationMaps.of(getter, null); - final DynamoDbMarshalling marshalling = annotations.actualOf(DynamoDbMarshalling.class); - if (marshalling != null) { - return new CustomUnmarshaller(getter.getReturnType(), marshalling.marshallerClass()); - } - return wrapped.getUnmarshaller(getter, setter); - } - - @Override - public ArgumentUnmarshaller memberUnmarshaller(Type c) { - return wrapped.memberUnmarshaller(c); - } - } - - static class CachingMarshallerSet implements MarshallerSet { - - private final Map cache = - new HashMap(); - - private final Map memberCache = - new HashMap(); - - private final MarshallerSet wrapped; - - CachingMarshallerSet(MarshallerSet wrapped) { - this.wrapped = wrapped; - } - - @Override - public ArgumentMarshaller marshaller(Method getter) { - synchronized (cache) { - ArgumentMarshaller marshaler = cache.get(getter); - if (marshaler != null) { - return marshaler; - } - - marshaler = wrapped.marshaller(getter); - cache.put(getter, marshaler); - return marshaler; - } - } - - @Override - public ArgumentMarshaller memberMarshaller(Type memberType) { - synchronized (memberCache) { - ArgumentMarshaller marshaller = memberCache.get(memberType); - if (marshaller != null) { - return marshaller; - } - - marshaller = wrapped.memberMarshaller(memberType); - memberCache.put(memberType, marshaller); - return marshaller; - } - } - } - - static class CachingUnmarshallerSet implements UnmarshallerSet { - - private final Map cache = - new HashMap(); - - private final Map memberCache = - new HashMap(); - - private final UnmarshallerSet wrapped; - - CachingUnmarshallerSet(UnmarshallerSet wrapped) { - this.wrapped = wrapped; - } - - @Override - public ArgumentUnmarshaller getUnmarshaller( - Method getter, - Method setter) { - - synchronized (cache) { - ArgumentUnmarshaller unmarshaler = cache.get(getter); - if (unmarshaler != null) { - return unmarshaler; - } - - unmarshaler = wrapped.getUnmarshaller(getter, setter); - cache.put(getter, unmarshaler); - return unmarshaler; - } - } - - @Override - public ArgumentUnmarshaller memberUnmarshaller(Type memberType) { - synchronized (memberCache) { - ArgumentUnmarshaller unmarshaller = memberCache.get(memberType); - if (unmarshaller != null) { - return unmarshaller; - } - - unmarshaller = wrapped.memberUnmarshaller(memberType); - memberCache.put(memberType, unmarshaller); - return unmarshaller; - } - } - } - - /** - * {@link AttributeValue} converter with {@link ItemConverter} - */ - static class ItemConverterRuleFactory implements RuleFactory { - private final RuleFactory typeConverters; - private final ItemConverter converter; - private final boolean customSchema; - - ItemConverterRuleFactory(DynamoDbMapperConfig config, S3Link.Factory s3Links, RuleFactory typeConverters) { - final ConversionSchema.Dependencies depends = - new ConversionSchema.Dependencies().with(S3ClientCache.class, s3Links.s3ClientCache()); - final ConversionSchema schema = config.getConversionSchema(); - - this.customSchema = (schema != V1 && schema != V2_COMPATIBLE && schema != V2); - this.converter = schema.getConverter(depends); - this.typeConverters = typeConverters; - } - - @Override - public Rule getRule(ConvertibleType type) { - if (customSchema && type.typeConverter() == null) { - return new ItemConverterRule(type); - } else { - return typeConverters.getRule(type); - } - } - - private final class ItemConverterRule implements Rule, DynamoDbTypeConverter { - private final ConvertibleType type; - - private ItemConverterRule(final ConvertibleType type) { - this.type = type; - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return true; - } - - @Override - public DynamoDbTypeConverter newConverter(ConvertibleType type) { - return this; - } - - @Override - public DynamoDbAttributeType getAttributeType() { - try { - return converter.getFieldModel(type.getter()).attributeType(); - } catch (final DynamoDbMappingException no) { - // Ignored or expected. - } - return DynamoDbAttributeType.NULL; - } - - @Override - public AttributeValue convert(final V object) { - return converter.convert(type.getter(), object); - } - - @Override - public V unconvert(final AttributeValue object) { - return (V) converter.unconvert(type.getter(), type.setter(), object); - } - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionToAttributeValuesTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionToAttributeValuesTest.java deleted file mode 100644 index c17b4012a848..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConversionToAttributeValuesTest.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class ConversionToAttributeValuesTest { - - private DynamoDbMapperModelFactory models; - private DynamoDbMapperConfig finalConfig; - - public static boolean equals(Object o1, Object o2) { - if (o1 == o2) { - return true; - } - if (o1 != null) { - return o1.equals(o2); - } - return false; - } - - public static int hash(Object... objs) { - int hash = 7; - for (int i = 0; i < objs.length; ++i) { - hash = hash * 31 + objs[i].hashCode(); - } - return hash; - } - - @Before - public void setUp() throws Exception { - finalConfig = new DynamoDbMapperConfig.Builder() - .withTypeConverterFactory(DynamoDbMapperConfig.DEFAULT.getTypeConverterFactory()) - .withConversionSchema(ConversionSchemas.V2) - .build(); - this.models = StandardModelFactories.of(S3Link.Factory.of(null)); - } - - @Test - public void converterFailsForSubProperty() throws Exception { - DynamoDbMapperTableModel tableModel = getTable(ConverterData.class); - Map withSubData = tableModel.convert(new ConverterData()); - assertEquals("bar", tableModel.unconvert(withSubData).subDocument().getaData().value()); - } - - private DynamoDbMapperTableModel getTable(Class clazz) { - return this.models.getTableFactory(finalConfig).getTable(clazz); - } - - @DynamoDbTable(tableName = "test") - public static class ConverterData { - - @DynamoDbTypeConverted(converter = CustomDataConverter.class) - CustomData customConverted; - @DynamoDbHashKey - private String key; - private ConverterSubDocument subDocument; - - public ConverterData() { - customConverted = new CustomData("foo"); - subDocument = new ConverterSubDocument(); - subDocument.setaData(new CustomData("bar")); - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public ConverterSubDocument subDocument() { - return subDocument; - } - - public void setSubDocument(ConverterSubDocument subProperty) { - this.subDocument = subProperty; - } - - public CustomData getCustomConverted() { - return customConverted; - } - - public void setCustomConverted(CustomData customConverted) { - this.customConverted = customConverted; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ConverterData that = (ConverterData) o; - return ConversionToAttributeValuesTest.equals(subDocument, that.subDocument); - } - - @Override - public int hashCode() { - return ConversionToAttributeValuesTest.hash(subDocument); - } - - } - - @DynamoDbDocument - public static class ConverterSubDocument { - - @DynamoDbTypeConverted(converter = CustomDataConverter.class) - private CustomData aData; - - public CustomData getaData() { - return aData; - } - - public void setaData(CustomData aData) { - this.aData = aData; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ConverterSubDocument that = (ConverterSubDocument) o; - return ConversionToAttributeValuesTest.equals(aData, that.aData); - } - - @Override - public int hashCode() { - return ConversionToAttributeValuesTest.hash(aData); - } - } - - public static class CustomData { - - private final String value; - - public CustomData(String value) { - this.value = value; - } - - public String value() { - return value; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - CustomData that = (CustomData) o; - return ConversionToAttributeValuesTest.equals(value, that.value); - } - - @Override - public int hashCode() { - return ConversionToAttributeValuesTest.hash(value); - } - } - - public static class CustomDataConverter implements DynamoDbTypeConverter { - - public String convert(CustomData object) { - return object.value(); - } - - public CustomData unconvert(String object) { - return new CustomData(object); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConvertibleType.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConvertibleType.java deleted file mode 100644 index 8fdc4bfd8f39..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ConvertibleType.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.DynamoDbAttributeType; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardAnnotationMaps.TypedMap; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Scalar; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Vector; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; - -/** - * Generic type helper. - */ -@SdkInternalApi -final class ConvertibleType { - - private final DynamoDbTypeConverter typeConverter; - private final DynamoDbAttributeType attributeType; - private final ConvertibleType[] params; - private final Class targetType; - - @Deprecated - private final Method getter; - - @Deprecated - private final Method setter; - - /** - * Constructs a new parameter type. - */ - @SuppressWarnings("unchecked") - private ConvertibleType(Type genericType, TypedMap annotations, Method getter) { - this.typeConverter = annotations.typeConverter(); - this.attributeType = annotations.attributeType(); - - if (typeConverter != null) { - final ConvertibleType target = ConvertibleType.of(typeConverter); - this.targetType = target.targetType; - this.params = target.params; - } else if (genericType instanceof ParameterizedType) { - final Type[] paramTypes = ((ParameterizedType) genericType).getActualTypeArguments(); - this.targetType = annotations.targetType(); - this.params = new ConvertibleType[paramTypes.length]; - for (int i = 0; i < paramTypes.length; i++) { - this.params[i] = ConvertibleType.of(paramTypes[i]); - } - } else { - this.targetType = annotations.targetType(); - this.params = new ConvertibleType[0]; - } - - this.setter = getter == null ? null : StandardBeanProperties.MethodReflect.setterOf(getter); - this.getter = getter; - } - - /** - * Returns the conversion type for the method and annotations. - */ - static ConvertibleType of(Method getter, TypedMap annotations) { - return new ConvertibleType(getter.getGenericReturnType(), annotations, getter); - } - - /** - * Returns the conversion type for the converter. - */ - private static ConvertibleType of(final DynamoDbTypeConverter converter) { - final Class clazz = converter.getClass(); - if (!clazz.isInterface()) { - for (Class c = clazz; Object.class != c; c = c.getSuperclass()) { - for (final Type genericType : c.getGenericInterfaces()) { - final ConvertibleType type = ConvertibleType.of(genericType); - if (type.is(DynamoDbTypeConverter.class)) { - if (type.params.length == 2 && type.param(0).targetType() != Object.class) { - return type.param(0); - } - } - } - } - final ConvertibleType type = ConvertibleType.of(clazz.getGenericSuperclass()); - if (type.is(DynamoDbTypeConverter.class)) { - if (type.params.length > 0 && type.param(0).targetType() != Object.class) { - return type.param(0); - } - } - } - throw new DynamoDbMappingException("could not resolve type of " + clazz); - } - - /** - * Returns the conversion type for the generic type. - */ - private static ConvertibleType of(Type genericType) { - final Class targetType; - if (genericType instanceof Class) { - targetType = (Class) genericType; - } else if (genericType instanceof ParameterizedType) { - targetType = (Class) ((ParameterizedType) genericType).getRawType(); - } else if (genericType.toString().equals("byte[]")) { - targetType = (Class) byte[].class; - } else { - targetType = (Class) Object.class; - } - final TypedMap annotations = StandardAnnotationMaps.of(targetType); - return new ConvertibleType(genericType, annotations, null); - } - - /** - * Gets the target custom type-converter. - */ - DynamoDbTypeConverter typeConverter() { - return (DynamoDbTypeConverter) this.typeConverter; - } - - /** - * Gets the overriding attribute type. - */ - DynamoDbAttributeType attributeType() { - return this.attributeType; - } - - /** - * Gets the getter method. - */ - @Deprecated - Method getter() { - return this.getter; - } - - /** - * Gets the setter method. - */ - @Deprecated - Method setter() { - return this.setter; - } - - /** - * Gets the scalar parameter types. - */ - ConvertibleType param(final int index) { - return this.params.length > index ? (ConvertibleType) this.params[index] : null; - } - - /** - * Returns true if the types match. - */ - boolean is(ScalarAttributeType scalarAttributeType, Vector vector) { - return param(0) != null && param(0).is(scalarAttributeType) && is(vector); - } - - /** - * Returns true if the types match. - */ - boolean is(ScalarAttributeType scalarAttributeType) { - return Scalar.of(targetType()).is(scalarAttributeType); - } - - /** - * Returns true if the types match. - */ - boolean is(Scalar scalar) { - return scalar.is(targetType()); - } - - /** - * Returns true if the types match. - */ - boolean is(Vector vector) { - return vector.is(targetType()); - } - - /** - * Returns true if the types match. - */ - boolean is(Class type) { - return type.isAssignableFrom(targetType()); - } - - /** - * Gets the raw scalar type. - */ - Class targetType() { - return this.targetType; - } - - /** - * {@inheritDoc} - */ - @Override - public String toString() { - final StringBuilder builder = new StringBuilder(targetType().getSimpleName()); - if (this.params.length > 0) { - builder.append("<"); - for (int i = 0; i < this.params.length; i++) { - builder.append(i == 0 ? "" : ",").append(this.params[i]); - } - builder.append(">"); - } - return builder.toString(); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDb.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDb.java deleted file mode 100644 index 752f78a75661..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDb.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation to mark other annotations as being part of DynamoDB. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.ANNOTATION_TYPE) -public @interface DynamoDb { -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAttribute.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAttribute.java deleted file mode 100644 index 2204f11207e9..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAttribute.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Interface for marking a class property as an attribute in a DynamoDB table. - * Applied to the getter method or the class field for a modeled property. If - * the annotation is applied directly to the class field, the corresponding - * getter and setter must be declared in the same class. - *

    - * This annotation is optional when the name of the DynamoDB attribute matches - * the name of the property declared in the class. When they differ, use this - * annotation with the attributeName() parameter to specify which DynamoDB - * attribute this property corresponds to. Furthermore, the - * {@link DynamoDbMapper} class assumes Java naming conventions, and will - * lower-case the first character of a getter method's property name to - * determine the name of the property. E.g., a method value() will map to the - * DynamoDB attribute "value". Similarly, a method isValid() maps to the - * DynamoDB attribute "valid". - *

    - * Even getter method not marked with this annotation are assumed to be modeled - * properties, unless marked with {@link DynamoDbIgnore}. - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbAttribute { - - /** - * Optional parameter when the name of the attribute as stored in DynamoDB - * should differ from the name used by the getter / setter. - */ - String attributeName() default ""; - - /** - * Optional parameter when using {@link DynamoDbFlattened}; identifies - * the field/property name on the target class to map as the attribute. - * @see DynamoDbFlattened - */ - String mappedBy() default ""; -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerateStrategy.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerateStrategy.java deleted file mode 100644 index 085c350c7890..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerateStrategy.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -/** - * Enumeration of possible auto-generation strategies. - * @see DynamoDbAutoGeneratedTimestamp - */ -public enum DynamoDbAutoGenerateStrategy { - - /** - * Instructs to always generate both on create and update. - */ - ALWAYS, - - /** - * Instructs to generate on create only. - */ - CREATE; - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerated.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerated.java deleted file mode 100644 index fb4638496d31..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerated.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation to mark a property as using a custom auto-generator. - * - *

    May be annotated on a user-defined annotation to pass additional - * properties to the {@link DynamoDbAutoGenerator}.

    - * - *
    - * @DynamoDBHashKey
    - * @CustomGeneratedKey(prefix="test-") //<- user-defined annotation
    - * public String getKey()
    - * 
    - * - *

    Where,

    - *
    - * @DynamoDBAutoGenerated(generator=CustomGeneratedKey.Generator.class)
    - * @Retention(RetentionPolicy.RUNTIME)
    - * @Target({ElementType.METHOD})
    - * public @interface CustomGeneratedKey {
    - *     String prefix() default "";
    - *
    - *     public static class Generator implements DynamoDBAutoGenerator<String> {
    - *         private final String prefix;
    - *         public Generator(final Class<String> targetType, final CustomGeneratedKey annotation) {
    - *             this.prefix = annotation.prefix();
    - *         }
    - *         public Generator() { //<- required if annotating directly
    - *             this.prefix = "";
    - *         }
    - *         @Override
    - *         public DynamoDBAutoGenerateStrategy getGenerateStrategy() {
    - *             return DynamoDBAutoGenerateStrategy.CREATE;
    - *         }
    - *         @Override
    - *         public final String generate(final String currentValue) {
    - *             return prefix + UUID.randomUUID.toString();
    - *         }
    - *     }
    - * }
    - * 
    - * - *

    Alternately, the property/field may be annotated directly (which requires - * the generator to provide a default constructor),

    - *
    - * @DynamoDBAutoGenerated(generator=CustomGeneratedKey.Generator.class)
    - * public String getKey()
    - * 
    - * - *

    May be used as a meta-annotation.

    - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbAutoGenerated { - - /** - * The auto-generator class for this property. - */ - @SuppressWarnings("rawtypes") - Class generator(); - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedDefault.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedDefault.java deleted file mode 100644 index 6d67662b062d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedDefault.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation to assign a default value on creation if value is null. - * - *
    - * @DynamoDBAutoGeneratedDefault("OPEN")
    - * public String status()
    - * 
    - * - *

    Only compatible with standard string types.

    - * - */ -@DynamoDb -@DynamoDbAutoGenerated(generator = DynamoDbAutoGeneratedDefault.Generator.class) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbAutoGeneratedDefault { - - /** - * The default value. - */ - String value(); - - - /** - * Default generator. - */ - final class Generator extends DynamoDbAutoGenerator.AbstractGenerator { - private final DynamoDbTypeConverter converter; - private final String defaultValue; - - Generator(Class targetType, DynamoDbAutoGeneratedDefault annotation) { - super(DynamoDbAutoGenerateStrategy.CREATE); - this.converter = StandardTypeConverters.factory().getConverter(targetType, String.class); - this.defaultValue = annotation.value(); - } - - @Override - public T generate(T currentValue) { - return converter.convert(defaultValue); - } - } - - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedKey.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedKey.java deleted file mode 100644 index 64d4d0bc4e5b..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedKey.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for marking a hash key or range key property in a class to - * auto-generate this key. Only String typed keys can be auto generated, and are - * given a random UUID. The annotation can be applied to either the getter - * method or the class field for the auto-generated key property. If the - * annotation is applied directly to the class field, the corresponding getter - * and setter must be declared in the same class. This annotation can be applied - * to both primary and index keys. - * - * @see DynamoDbGeneratedUuid - * @see java.util.UUID - */ -@DynamoDbGeneratedUuid(DynamoDbAutoGenerateStrategy.CREATE) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbAutoGeneratedKey { - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedTimestamp.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedTimestamp.java deleted file mode 100644 index 553573def7b8..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGeneratedTimestamp.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Date; - -/** - * Annotation for auto-generating a date/timestamp. - * - *
    - * @DynamoDBAutoGeneratedTimestamp(strategy=DynamoDBAutoGenerateStrategy.CREATE)
    - * public Date getCreatedDate() { return createdDate; }
    - * public void setCreatedDate(Date createdDate) { this.createdDate = createdDate; }
    - *
    - * @DynamoDBAutoGeneratedTimestamp(strategy=DynamoDBAutoGenerateStrategy.ALWAYS)
    - * public Date lastUpdatedDate() { return lastUpdatedDate; }
    - * public void setLastUpdatedDate(Date lastUpdatedDate) { this.lastUpdatedDate = lastUpdatedDate; }
    - * 
    - * - *

    Supports the standard {@link Date} type-conversions; such as - * {@link java.util.Calendar}, {@link Long}.

    - * - *

    Primitives such as {@code long} are not supported since the unset - * (or null) state can't be detected.

    - * - *

    Compatible with {@link DynamoDbTypeConvertedTimestamp}

    - */ -@DynamoDbAutoGenerated(generator = DynamoDbAutoGeneratedTimestamp.Generator.class) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbAutoGeneratedTimestamp { - - /** - * The auto-generation strategy; default is {@code ALWAYS}. - * @see DynamoDbAutoGenerateStrategy - */ - DynamoDbAutoGenerateStrategy strategy() default DynamoDbAutoGenerateStrategy.ALWAYS; - - /** - * Default generator. - */ - final class Generator extends DynamoDbAutoGenerator.AbstractGenerator { - private final DynamoDbTypeConverter converter; - - Generator(Class targetType, DynamoDbAutoGeneratedTimestamp annotation) { - super(annotation.strategy()); - this.converter = StandardTypeConverters.factory().getConverter(targetType, Date.class); - } - - @Override - public T generate(T currentValue) { - return converter.convert(new Date()); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerator.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerator.java deleted file mode 100644 index 56c1dd17b6ac..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbAutoGenerator.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import software.amazon.awssdk.annotations.SdkInternalApi; - -/** - * Generator interface for auto-generating attribute values. - * - *

    Auto-generation may be controlled by {@link DynamoDbAutoGenerateStrategy}, - - *

    {@link DynamoDbAutoGenerateStrategy#CREATE}, instructs to generate when - * creating the item. The mapper, determines an item is new, or overwriting, - * if it's current value is {@code null}. There is a limitiation when performing - * partial updates using either, - * {@link DynamoDbMapperConfig.SaveBehavior#UPDATE_SKIP_NULL_ATTRIBUTES}, or - * {@link DynamoDbMapperConfig.SaveBehavior#APPEND_SET}. A new value will only - * be generated if the mapper is also generating the key.

    - * - *

    {@link DynamoDbAutoGenerateStrategy#ALWAYS}, instructs to always generate - * a new value, applied on any save or batch write operation. - * - *

    May be used in combination with {@link DynamoDbAutoGenerated}.

    - * - * @param The object's field/property value type. - * - * @see DynamoDbAutoGenerated - */ -public interface DynamoDbAutoGenerator { - - /** - * Gets the auto-generate strategy. - */ - DynamoDbAutoGenerateStrategy getGenerateStrategy(); - - /** - * Generates a new value given the current value (or null) if applicable. - */ - T generate(T currentValue); - - /** - * A generator which holds the {@link DynamoDbAutoGenerateStrategy}. - */ - @SdkInternalApi - abstract class AbstractGenerator implements DynamoDbAutoGenerator { - private final DynamoDbAutoGenerateStrategy strategy; - - protected AbstractGenerator(DynamoDbAutoGenerateStrategy strategy) { - this.strategy = strategy; - } - - @Override - public DynamoDbAutoGenerateStrategy getGenerateStrategy() { - return this.strategy; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbConvertedBool.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbConvertedBool.java deleted file mode 100644 index 85020102d6a0..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbConvertedBool.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation to convert a {@link Boolean} to the DynamoDB {@code S} type. - * - *
    - * @DynamoDBConvertedBool(DynamoDBConvertedBool.Format.Y_N)
    - * public boolean isTesting()
    - * 
    - * - *

    The standard V1 and V2 compatible conversion schemas will, by default, - * serialize booleans using the DynamoDB {@code N} type, with a value of '1' - * representing 'true' and a value of '0' representing 'false'. To force the - * {@code N} conversion in other schemas, - *

    - * @DynamoDBTyped(DynamoDBAttributeType.N)
    - * public boolean isTesting()
    - * 
    - * - *

    The standard V2 conversion schema will by default serialize booleans - * natively using the DynamoDB {@code BOOL} type. To force the native - * {@code BOOL} conversion in other schemas, - *

    - * @DynamoDBTyped(DynamoDBAttributeType.BOOL)
    - * public boolean isTesting()
    - * 
    - * - *

    May be used as a meta-annotation.

    - */ -@DynamoDb -@DynamoDbTypeConverted(converter = DynamoDbConvertedBool.Converter.class) -@DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.S) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbConvertedBool { - - /** - * The format type for converting to and from {@link String}. - */ - Format value(); - - - - /** - * Enumeration of the supported format options. - */ - enum Format { - true_false, T_F, Y_N - } - - /** - * Boolean type converter. - */ - final class Converter implements DynamoDbTypeConverter { - private final String valueTrue; - private final String valueFalse; - - Converter(Class targetType, DynamoDbConvertedBool annotation) { - this.valueTrue = annotation.value().name().split("_")[0]; - this.valueFalse = annotation.value().name().split("_")[1]; - } - - @Override - public String convert(final Boolean object) { - return Boolean.TRUE.equals(object) ? valueTrue : valueFalse; - } - - @Override - public Boolean unconvert(final String object) { - return valueTrue.equals(object) ? Boolean.TRUE : Boolean.FALSE; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDeleteExpression.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDeleteExpression.java deleted file mode 100644 index 63669c53b280..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDeleteExpression.java +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; - -/** - * Enables adding options to a delete operation. - * For example, you may want to delete only if an attribute has a particular value. - * @see DynamoDbMapper#delete(Object, DynamoDbDeleteExpression) - */ -public class DynamoDbDeleteExpression { - - /** Optional expected attributes. */ - private Map expectedAttributes; - - /** The logical operator on the expected attribute conditions. */ - private String conditionalOperator; - - /** - * A condition that must be satisfied in order for a conditional - * DeleteItem to succeed. - */ - private String conditionExpression; - - /** - * One or more substitution variables for simplifying complex - * expressions. - */ - private java.util.Map expressionAttributeNames; - - /** - * One or more values that can be substituted in an expression. - */ - private java.util.Map expressionAttributeValues; - - /** - * Gets the map of attribute names to expected attribute values to check on delete. - * - * @return The map of attribute names to expected attribute value conditions to check on delete - */ - public Map getExpected() { - return expectedAttributes; - } - - /** - * Sets the expected condition to the map of attribute names to expected attribute values given. - * - * @param expectedAttributes - * The map of attribute names to expected attribute value conditions to check on delete - */ - public void setExpected(Map expectedAttributes) { - this.expectedAttributes = expectedAttributes; - } - - /** - * Sets the expected condition to the map of attribute names to expected - * attribute values given and returns a pointer to this object for - * method-chaining. - * - * @param expectedAttributes - * The map of attribute names to expected attribute value - * conditions to check on delete - */ - public DynamoDbDeleteExpression withExpected(Map expectedAttributes) { - setExpected(expectedAttributes); - return this; - } - - /** - * Adds one entry to the expected conditions and returns a pointer to this - * object for method-chaining. - * - * @param attributeName - * The name of the attribute. - * @param expected - * The expected attribute value. - */ - public DynamoDbDeleteExpression withExpectedEntry(String attributeName, ExpectedAttributeValue expected) { - if (expectedAttributes == null) { - expectedAttributes = new HashMap(); - } - expectedAttributes.put(attributeName, expected); - return this; - } - - /** - * Returns the logical operator on the expected attribute conditions of this - * delete operation. - */ - public String getConditionalOperator() { - return conditionalOperator; - } - - /** - * Sets the logical operator on the expected attribute conditions of this - * delete operation. - */ - public void setConditionalOperator(String conditionalOperator) { - this.conditionalOperator = conditionalOperator; - } - - /** - * Sets the logical operator on the expected attribute conditions of this - * delete operation. - */ - public void setConditionalOperator(ConditionalOperator conditionalOperator) { - setConditionalOperator(conditionalOperator.toString()); - } - - /** - * Sets the logical operator on the expected attribute conditions of this - * delete operation and returns a pointer to this object for - * method-chaining. - */ - public DynamoDbDeleteExpression withConditionalOperator(String conditionalOperator) { - setConditionalOperator(conditionalOperator); - return this; - } - - /** - * Sets the logical operator on the expected attribute conditions of this - * delete operation and returns a pointer to this object for - * method-chaining. - */ - public DynamoDbDeleteExpression withConditionalOperator(ConditionalOperator conditionalOperator) { - return withConditionalOperator(conditionalOperator.toString()); - } - - /** - * A condition that must be satisfied in order for a conditional DeleteItem - * to succeed. - * - * @see DeleteItemRequest#getConditionExpression() - */ - public String getConditionExpression() { - return conditionExpression; - } - - /** - * A condition that must be satisfied in order for a conditional DeleteItem - * to succeed. - * - * @see DeleteItemRequest#setConditionExpression() - */ - public void setConditionExpression(String conditionExpression) { - this.conditionExpression = conditionExpression; - } - - /** - * A condition that must be satisfied in order for a conditional DeleteItem - * to succeed. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * - * @see DeleteItemRequest#withConditionExpression(String) - */ - public DynamoDbDeleteExpression withConditionExpression( - String conditionExpression) { - this.conditionExpression = conditionExpression; - return this; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @return One or more substitution variables for simplifying complex - * expressions. - * @see DeleteItemRequest#getExpressionAttributeNames() - */ - public java.util.Map getExpressionAttributeNames() { - - return expressionAttributeNames; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @param expressionAttributeNames - * One or more substitution variables for simplifying complex - * expressions. - * @see DeleteItemRequest#setExpressionAttributeNames(Map) - */ - public void setExpressionAttributeNames( - java.util.Map expressionAttributeNames) { - this.expressionAttributeNames = expressionAttributeNames; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @param expressionAttributeNames - * One or more substitution variables for simplifying complex - * expressions. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * @see DeleteItemRequest#withExpressionAttributeNames(Map) - */ - public DynamoDbDeleteExpression withExpressionAttributeNames( - java.util.Map expressionAttributeNames) { - setExpressionAttributeNames(expressionAttributeNames); - return this; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * The method adds a new key-value pair into ExpressionAttributeNames - * parameter, and returns a reference to this object so that method calls - * can be chained together. - * - * @param key - * The key of the entry to be added into - * ExpressionAttributeNames. - * @param value - * The corresponding value of the entry to be added into - * ExpressionAttributeNames. - * - * @see DeleteItemRequest#addExpressionAttributeNamesEntry(String, String) - */ - public DynamoDbDeleteExpression addExpressionAttributeNamesEntry( - String key, String value) { - if (null == this.expressionAttributeNames) { - this.expressionAttributeNames = new java.util.HashMap(); - } - if (this.expressionAttributeNames.containsKey(key)) { - throw new IllegalArgumentException("Duplicated keys (" + key + ") are provided."); - } - this.expressionAttributeNames.put(key, value); - return this; - } - - /** - * Removes all the entries added into ExpressionAttributeNames. - *

    - * Returns a reference to this object so that method calls can be chained - * together. - */ - public DynamoDbDeleteExpression clearExpressionAttributeNamesEntries() { - this.expressionAttributeNames = null; - return this; - } - - /** - * One or more values that can be substituted in an expression. - * - * @return One or more values that can be substituted in an expression. - * @see DeleteItemRequest#getExpressionAttributeValues() - */ - public java.util.Map getExpressionAttributeValues() { - return expressionAttributeValues; - } - - /** - * One or more values that can be substituted in an expression. - * - * @param expressionAttributeValues - * One or more values that can be substituted in an expression. - * - * @see DeleteItemRequest#setExpressionAttributeValues(Map) - */ - public void setExpressionAttributeValues( - java.util.Map expressionAttributeValues) { - this.expressionAttributeValues = expressionAttributeValues; - } - - /** - * One or more values that can be substituted in an expression. - * - * @param expressionAttributeValues - * One or more values that can be substituted in an expression. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * @see DeleteItemRequest#withExpressionAttributeValues(Map) - */ - public DynamoDbDeleteExpression withExpressionAttributeValues( - java.util.Map expressionAttributeValues) { - setExpressionAttributeValues(expressionAttributeValues); - return this; - } - - /** - * One or more values that can be substituted in an expression. The method - * adds a new key-value pair into ExpressionAttributeValues parameter, and - * returns a reference to this object so that method calls can be chained - * together. - * - * @param key - * The key of the entry to be added into - * ExpressionAttributeValues. - * @param value - * The corresponding value of the entry to be added into - * ExpressionAttributeValues. - * - * @see DeleteItemRequest#addExpressionAttributeValuesEntry(String, - * AttributeValue) - */ - public DynamoDbDeleteExpression addExpressionAttributeValuesEntry( - String key, AttributeValue value) { - if (null == this.expressionAttributeValues) { - this.expressionAttributeValues = new java.util.HashMap(); - } - if (this.expressionAttributeValues.containsKey(key)) { - throw new IllegalArgumentException("Duplicated keys (" + key + ") are provided."); - } - this.expressionAttributeValues.put(key, value); - return this; - } - - /** - * Removes all the entries added into ExpressionAttributeValues. - *

    - * Returns a reference to this object so that method calls can be chained - * together. - */ - public DynamoDbDeleteExpression clearExpressionAttributeValuesEntries() { - this.expressionAttributeValues = null; - return this; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDelimited.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDelimited.java deleted file mode 100644 index fe7f91664570..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDelimited.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.regex.Pattern; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardBeanProperties.Bean; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardBeanProperties.BeanMap; - -/** - * Annotation to convert an object into a single delimited {@link String} - * attribute. - * - *

    - * @DynamoDBDelimited(
    - *     attributeNames={"areaCode","exchange","subscriber"},
    - *     delimiter='-'
    - * )
    - * public PhoneNumber getPhoneNumber()
    - * 
    - * - *

    Where,

    - *
    - * public class PhoneNumber {
    - *     private String areaCode;
    - *     private String exchange;
    - *     private String subscriber;
    - *
    - *     public String getAreaCode() { return areaCode; }
    - *     public void setAreaCode(String areaCode) { this.areaCode = areaCode; }
    - *
    - *     public String getExchange() { return exchange; }
    - *     public void setExchange(String exchange) { this.exchange = exchange; }
    - *
    - *     public String subscriber() { return subscriber; }
    - *     public void setSubscriber(String subscriber) { this.subscriber = subscriber; }
    - * }
    - * 
    - * - *

    Would write,

    - *
      - *
    • PhoneNumber("206","266","1000") = "206-266-1000"
    • - *
    • PhoneNumber("206",null,"1000") = "206--1000"
    • - *
    • PhoneNumber("206",null,null) = "206--"
    • - *
    • PhoneNumber(null,"266","1000") = "-266-1000"
    • - *
    • PhoneNumber(null,"266",null) = "-266-"
    • - *
    • PhoneNumber(null,null,"1000") = "--1000"
    • - *
    • PhoneNumber(null,null,null) = null
    • - *
    • null = null
    • - *
    - * - * Conversely, reading not fully formatted values from DynamoDB given, - *
      - *
    • "" = empty string not allowed by DDB but would produce empty object
    • - *
    • "--" = PhoneNumber(null,null,null)
    • - *
    • "-----" = PhoneNumber(null,null,null)
    • - *
    • "206" = PhoneNumber("206",null,null)
    • - *
    • "206-266" = PhoneNumber("206","266",null)
    • - *
    • "206-266-1000-1234-5678" = PhoneNumber("206","266","1000")
    • - *
    - * - *

    The converter does not protect against values which may also contain the - * delimiter. If more advanced conversion is required, consider implementing, - * a custom {@link DynamoDbTypeConverter}.

    - * - *

    New delimited values may always be appended to the string, however, there - * are some risks in distributed systems where, if one system has updated - * delimiting instructions and begins to persist new values, other systems, - * which also persist that same data, would effectively truncate it back to the - * original format.

    - * - *

    Auto-generated annotations are not supported on field/property.

    - * - *

    TYpe-converted annotations, annotated by {@link DynamoDbTypeConverted}, - * where the output type is {@link String} are supported. - * - *

    May be used as a meta-annotation.

    - */ -@DynamoDb -@DynamoDbTypeConverted(converter = DynamoDbDelimited.Converter.class) -@DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.S) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbDelimited { - - /** - * The delimiter for separating attribute values; default is |. - */ - char delimiter() default '|'; - - /** - * The ordered list of attribute/field names. - */ - String[] attributeNames(); - - /** - * Type converter for string delimited attributes. - */ - final class Converter implements DynamoDbTypeConverter { - private final Field[] fields; - private final Class targetType; - private final String delimiter; - - Converter(Class targetType, DynamoDbDelimited annotation) { - final BeanMap beans = new BeanMap(targetType, true); - - final String[] names = annotation.attributeNames(); - if (names.length <= 1) { - throw new DynamoDbMappingException(targetType + - " missing attributeNames in @DynamoDBDelimited; must specify two or " + - "more attribute names"); - } - - this.delimiter = String.valueOf(annotation.delimiter()); - this.fields = new Field[names.length]; - this.targetType = targetType; - - for (int i = 0; i < fields.length; i++) { - if (beans.containsKey(names[i]) == false) { - throw new DynamoDbMappingException(targetType + " does not map %s on model " + names[i]); - } - this.fields[i] = new Field(targetType, beans.get(names[i])); - } - } - - @Override - public String convert(final T object) { - final StringBuilder string = new StringBuilder(); - for (int i = 0; i < fields.length; i++) { - if (i > 0) { - string.append(delimiter); - } - final String value = fields[i].get(object); - if (value != null) { - if (value.contains(delimiter)) { - throw new DynamoDbMappingException(String.format( - "%s[%s] field value \"%s\" must not contain delimiter %s", - targetType, fields[i].bean.properties().attributeName(), value, delimiter - )); - } - string.append(value); - } - } - return string.length() < fields.length ? null : string.toString(); - } - - @Override - public T unconvert(final String string) { - final T object = StandardBeanProperties.DeclaringReflect.newInstance(targetType); - final String[] values = string.split(Pattern.quote(delimiter)); - for (int i = 0, its = Math.min(fields.length, values.length); i < its; i++) { - fields[i].set(object, values[i]); - } - return object; - } - - private static final class Field { - private final DynamoDbTypeConverter converter; - private final Bean bean; - - private Field(final Class type, final Bean bean) { - if (bean.type().typeConverter() == null) { - this.converter = StandardTypeConverters.factory().getConverter(String.class, bean.type().targetType()); - } else { - this.converter = bean.type().typeConverter(); - } - this.bean = bean; - } - - private String get(final T object) { - final V value = bean.reflect().get(object); - if (value == null) { - return null; - } - return converter.convert(value); - } - - private void set(final T object, final String string) { - if (!string.isEmpty()) { - final V value = converter.unconvert(string); - if (value != null) { - bean.reflect().set(object, value); - } - } - } - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDocument.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDocument.java deleted file mode 100644 index 3ba251997c4b..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbDocument.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Inherited; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * An annotation that marks a class which can be serialized to a DynamoDB - * document or sub-document. Behaves exactly the same as {@link DynamoDbTable}, - * but without requiring you to specify a {@code tableName}. - */ -@DynamoDb -@DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.M) -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -@Inherited -public @interface DynamoDbDocument { - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbFlattened.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbFlattened.java deleted file mode 100644 index b8fec9e0febb..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbFlattened.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for flattening a complex type. - * - *
    - * @DynamoDBFlattened(attributes={
    - *     @DynamoDBAttribute(mappedBy="start", attributeName="effectiveStartDate"),
    - *     @DynamoDBAttribute(mappedBy="end", attributeName="effectiveEndDate")})
    - * public DateRange getEffectiveRange() { return effectiveRange; }
    - * public void setEffectiveRange(DateRange effectiveRange) { this.effectiveRange = effectiveRange; }
    - *
    - * @DynamoDBFlattened(attributes={
    - *     @DynamoDBAttribute(mappedBy="start", attributeName="extensionstartDate"),
    - *     @DynamoDBAttribute(mappedBy="end", attributeName="extensionEndDate")})
    - * public DateRange getExtensionRange() { return extensionRange; }
    - * public void setExtensionRange(DateRange extensionRange) { this.extensionRange = extensionRange; }
    - * 
    - * - *

    Where,

    - *
    - * public class DateRange {
    - *     private Date start;
    - *     private Date end;
    - *
    - *     public Date start() { return start; }
    - *     public void setStart(Date start) { this.start = start; }
    - *
    - *     public Date getEnd() { return end; }
    - *     public void setEnd(Date end) { this.end = end; }
    - * }
    - * 
    - * - *

    Attributes defined within the complex type may also be annotated.

    - * - *

    May be used as a meta-annotation.

    - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbFlattened { - - /** - * Indicates the attributes that should be flattened. - */ - DynamoDbAttribute[] attributes() default {}; - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbGeneratedUuid.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbGeneratedUuid.java deleted file mode 100644 index d3dd5dec6ddc..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbGeneratedUuid.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.UUID; - -/** - * Annotation for auto-generating a {@link UUID}. - * - *
    - * @DynamoDBGeneratedUuid(DynamoDBAutoGenerateStrategy.CREATE)
    - * public UUID getKey()
    - * 
    - * - *

    When applied to a key field, only the strategy - * {@link DynamoDbAutoGenerateStrategy#CREATE} is supported.

    - * - *

    The short-formed {@link DynamoDbAutoGeneratedKey} may also be used for - * create only.

    - * - *

    May be used as a meta-annotation.

    - * - * @see java.util.UUID - */ -@DynamoDb -@DynamoDbAutoGenerated(generator = DynamoDbGeneratedUuid.Generator.class) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbGeneratedUuid { - - /** - * The auto-generation strategy. - */ - DynamoDbAutoGenerateStrategy value(); - - /** - * Default generator. - */ - final class Generator extends DynamoDbAutoGenerator.AbstractGenerator { - private final DynamoDbTypeConverter converter; - - Generator(Class targetType, DynamoDbGeneratedUuid annotation) { - super(annotation.value()); - this.converter = StandardTypeConverters.factory().getConverter(targetType, UUID.class); - } - - @Override - public T generate(final T currentValue) { - return converter.convert(UUID.randomUUID()); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbHashKey.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbHashKey.java deleted file mode 100644 index fcb5473b6684..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbHashKey.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for marking a property as the hash key for a modeled class. - * Applied to the getter method or the class field for a hash key property. If - * the annotation is applied directly to the class field, the corresponding - * getter and setter must be declared in the same class. - *

    - * This annotation is required. - */ -@DynamoDb -@DynamoDbKeyed(software.amazon.awssdk.services.dynamodb.model.KeyType.HASH) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbHashKey { - - /** - * Optional parameter when the name of the attribute as stored in DynamoDB - * should differ from the name used by the getter / setter. - */ - String attributeName() default ""; -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIgnore.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIgnore.java deleted file mode 100644 index 3768eafb0645..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIgnore.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for marking a class property as non-modeled. Applied to the getter - * method or the class field for a non-modeled property. If the annotation is - * applied directly to the class field, the corresponding getter and setter must - * be declared in the same class. - *

    - * All getter methods not marked with this annotation are assumed to be modeled - * properties and included in any save() requests. - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbIgnore { - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIndexHashKey.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIndexHashKey.java deleted file mode 100644 index 820dabe58360..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIndexHashKey.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for marking a property in a class as the attribute to be used as - * the hash key for one or more global secondary indexes on a DynamoDB table. - * Applied to the getter method or the class field for the index hash key - * property. If the annotation is applied directly to the class field, the - * corresponding getter and setter must be declared in the same class. - *

    - * This annotation is required if this attribute will be used as index key for - * item queries. - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbIndexHashKey { - - /** - * Optional parameter when the name of the attribute as stored in DynamoDB - * should differ from the name used by the getter / setter. - */ - String attributeName() default ""; - - /** - * Parameter for the name of the global secondary index. - *

    - * This is required if this attribute is the index key for only one global secondary - * index. - */ - String globalSecondaryIndexName() default ""; - - /** - * Parameter for the names of the global secondary indexes. - * This is required if this attribute is the index key for multiple global secondary - * indexes. - */ - String[] globalSecondaryIndexNames() default {}; - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIndexRangeKey.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIndexRangeKey.java deleted file mode 100644 index f107796dbb63..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbIndexRangeKey.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for marking a property in a class as the attribute to be used as - * range key for one or more local secondary indexes on a DynamoDB table. - * Applied to the getter method or the class field for the indexed range key - * property. If the annotation is applied directly to the class field, the - * corresponding getter and setter must be declared in the same class. - *

    - * This annotation is required if this attribute will be used as index key for - * item queries. - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbIndexRangeKey { - - /** - * Optional parameter when the name of the attribute as stored in DynamoDB - * should differ from the name used by the getter / setter. - */ - String attributeName() default ""; - - /** - * Parameter for the name of the local secondary index. - *

    - * This is required if this attribute is the index key for only one local secondary - * index. - */ - String localSecondaryIndexName() default ""; - - /** - * Parameter for the names of the local secondary indexes. - *

    - * This is required if this attribute is the index key for multiple local secondary - * indexes. - */ - String[] localSecondaryIndexNames() default {}; - - /** - * Parameter for the name of the global secondary index. - *

    - * This is required if this attribute is the index key for only one global secondary - * index. - */ - String globalSecondaryIndexName() default ""; - - /** - * Parameter for the names of the global secondary indexes. - *

    - * This is required if this attribute is the index key for multiple global secondary - * indexes. - */ - String[] globalSecondaryIndexNames() default {}; - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbKeyed.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbKeyed.java deleted file mode 100644 index f22b8a490de1..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbKeyed.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import software.amazon.awssdk.services.dynamodb.model.KeyType; - -/** - * Annotation for marking a property a key for a modeled class. - * - *

    - * @DynamoDBKeyed(KeyType.HASH)
    - * public UUID getKey()
    - * 
    - * - *

    Alternately, the short-formed {@link DynamoDbHashKey}, and - * {@link DynamoDbRangeKey} may be used directly on the field/getter.

    - * - *

    May be used as a meta-annotation.

    - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbKeyed { - - /** - * The primary key type; either {@code HASH} or {@code RANGE}. - */ - KeyType value(); - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapper.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapper.java deleted file mode 100644 index cf0ba352910e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapper.java +++ /dev/null @@ -1,2367 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static java.util.stream.Collectors.toMap; -import static software.amazon.awssdk.services.dynamodb.model.KeyType.HASH; -import static software.amazon.awssdk.services.dynamodb.model.KeyType.RANGE; - -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.awscore.AwsRequest; -import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.exception.SdkServiceException; -import software.amazon.awssdk.core.retry.RetryUtils; -import software.amazon.awssdk.core.util.SdkAutoConstructMap; -import software.amazon.awssdk.core.util.VersionInfo; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.BatchLoadRetryStrategy; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.BatchWriteRetryStrategy; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.ConsistentRead; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.SaveBehavior; -import software.amazon.awssdk.services.dynamodb.model.AttributeAction; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemResponse; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.model.PutItemResponse; -import software.amazon.awssdk.services.dynamodb.model.PutRequest; -import software.amazon.awssdk.services.dynamodb.model.QueryRequest; -import software.amazon.awssdk.services.dynamodb.model.QueryResponse; -import software.amazon.awssdk.services.dynamodb.model.ReturnValue; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; -import software.amazon.awssdk.services.dynamodb.model.Select; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; -import software.amazon.awssdk.services.dynamodb.model.WriteRequest; - -/** - * Object mapper for domain-object interaction with DynamoDB. - *

    - * To use, define a domain class that represents an item in a DynamoDB table and - * annotate it with the annotations found in the - * software.amazon.awssdk.services.dynamodbv2.datamodeling package. In order to allow the - * mapper to correctly persist the data, each modeled property in the domain - * class should be accessible via getter and setter methods, and each property - * annotation should be either applied to the getter method or the class field. - * A minimal example using getter annotations: - * - *

    - * @DynamoDBTable(tableName = "TestTable")
    - * public class TestClass {
    - *
    - *     private Long key;
    - *     private double rangeKey;
    - *     private Long version;
    - *
    - *     private Set<Integer> integerSetAttribute;
    - *
    - *     @DynamoDBHashKey
    - *     public Long getKey() {
    - *         return key;
    - *     }
    - *
    - *     public void setKey(Long key) {
    - *         this.key = key;
    - *     }
    - *
    - *     @DynamoDBRangeKey
    - *     public double getRangeKey() {
    - *         return rangeKey;
    - *     }
    - *
    - *     public void setRangeKey(double rangeKey) {
    - *         this.rangeKey = rangeKey;
    - *     }
    - *
    - *     @DynamoDBAttribute(attributeName = "integerSetAttribute")
    - *     public Set<Integer> getIntegerAttribute() {
    - *         return integerSetAttribute;
    - *     }
    - *
    - *     public void setIntegerAttribute(Set<Integer> integerAttribute) {
    - *         this.integerSetAttribute = integerAttribute;
    - *     }
    - *
    - *     @DynamoDBVersionAttribute
    - *     public Long getVersion() {
    - *         return version;
    - *     }
    - *
    - *     public void setVersion(Long version) {
    - *         this.version = version;
    - *     }
    - * }
    - * 
    - *

    - * Save instances of annotated classes to DynamoDB, retrieve them, and delete - * them using the {@link DynamoDbMapper} class, as in the following example. - * - *

    - * DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient);
    - * Long hashKey = 105L;
    - * double rangeKey = 1.0d;
    - * TestClass obj = mapper.load(TestClass.class, hashKey, rangeKey);
    - * obj.getIntegerAttribute().add(42);
    - * mapper.save(obj);
    - * mapper.delete(obj);
    - * 
    - *

    - * If you don't have your DynamoDB table set up yet, you can use - * {@link DynamoDbMapper#generateCreateTableRequest(Class)} to construct the - * {@link CreateTableRequest} for the table represented by your annotated class. - * - *

    - * DynamoDbClient dynamoDBClient = new AmazonDynamoDbClient();
    - * DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient);
    - * CreateTableRequest req = mapper.generateCreateTableRequest(TestClass.class);
    - * // Table provision throughput is still required since it cannot be specified in your POJO
    - * req.setProvisionedThroughput(new ProvisionedThroughput(5L, 5L));
    - * // Fire off the CreateTableRequest using the low-level client
    - * dynamoDBClient.createTable(req);
    - * 
    - *

    - * When using the save, load, and delete methods, {@link DynamoDbMapper} will - * throw {@link DynamoDbMappingException}s to indicate that domain classes are - * incorrectly annotated or otherwise incompatible with this class. Service - * exceptions will always be propagated as {@link SdkClientException}, and - * DynamoDB-specific subclasses such as {@link ConditionalCheckFailedException} - * will be used when possible. - *

    - * This class is thread-safe and can be shared between threads. It's also very - * lightweight, so it doesn't need to be. - * - * @see DynamoDbTable - * @see DynamoDbHashKey - * @see DynamoDbRangeKey - * @see DynamoDbAutoGeneratedKey - * @see DynamoDbAttribute - * @see DynamoDbVersionAttribute - * @see DynamoDbIgnore - * @see DynamoDbMarshalling - * @see DynamoDbMapperConfig - */ -public class DynamoDbMapper extends AbstractDynamoDbMapper { - - /** - * The max back off time for batch get. The configuration for batch write - * has been moved to DynamoDBMapperConfig - */ - protected static final long MAX_BACKOFF_IN_MILLISECONDS = 1000 * 3L; - /** The max number of items allowed in a BatchWrite request. */ - protected static final int MAX_ITEMS_PER_BATCH = 25; - /** - * This retry count is applicable only when every batch get item request - * results in no data retrieved from server and the un processed keys is - * same as request items - */ - protected static final int BATCH_GET_MAX_RETRY_COUNT_ALL_KEYS = 5; - /** - * User agent for requests made using the {@link DynamoDbMapper}. - */ - private static final String USER_AGENT_NAME = - DynamoDbMapper.class.getName(); - private static final String USER_AGENT_BATCH_OPERATION_NAME = - DynamoDbMapper.class.getName() + "_batch_operation"; - private static final Logger log = LoggerFactory.getLogger(DynamoDbMapper.class); - private final DynamoDbClient db; - private final DynamoDbMapperModelFactory models; - private final S3Link.Factory s3Links; - private final AttributeTransformer transformer; - - /** - * Constructs a new mapper with the service object given, using the default - * configuration. - * - * @param dynamoDb - * The service object to use for all service calls. - * @see DynamoDbMapperConfig#DEFAULT - */ - public DynamoDbMapper(final DynamoDbClient dynamoDb) { - this(dynamoDb, DynamoDbMapperConfig.DEFAULT, null, null); - } - - - /** - * Constructs a new mapper with the service object and configuration given. - * - * @param dynamoDb - * The service object to use for all service calls. - * @param config - * The default configuration to use for all service calls. It can - * be overridden on a per-operation basis. - */ - public DynamoDbMapper( - final DynamoDbClient dynamoDb, - final DynamoDbMapperConfig config) { - - this(dynamoDb, config, null, null); - } - - /** - * Constructs a new mapper with the service object and S3 client cache - * given, using the default configuration. - * - * @param ddb - * The service object to use for all service calls. - * @param s3CredentialProvider - * The credentials provider for accessing S3. - * Relevant only if {@link S3Link} is involved. - * @see DynamoDbMapperConfig#DEFAULT - */ - public DynamoDbMapper( - final DynamoDbClient ddb, - final AwsCredentialsProvider s3CredentialProvider) { - - this(ddb, DynamoDbMapperConfig.DEFAULT, s3CredentialProvider); - } - - /** - * Constructs a new mapper with the given service object, configuration, - * and transform hook. - * - * @param dynamoDb - * the service object to use for all service calls - * @param config - * the default configuration to use for all service calls. It - * can be overridden on a per-operation basis - * @param transformer - * The custom attribute transformer to invoke when serializing or - * deserializing an object. - */ - public DynamoDbMapper( - final DynamoDbClient dynamoDb, - final DynamoDbMapperConfig config, - final AttributeTransformer transformer) { - - this(dynamoDb, config, transformer, null); - } - - /** - * Constructs a new mapper with the service object, configuration, and S3 - * client cache given. - * - * @param dynamoDb - * The service object to use for all service calls. - * @param config - * The default configuration to use for all service calls. It can - * be overridden on a per-operation basis. - * @param s3CredentialProvider - * The credentials provider for accessing S3. - * Relevant only if {@link S3Link} is involved. - */ - public DynamoDbMapper( - final DynamoDbClient dynamoDb, - final DynamoDbMapperConfig config, - final AwsCredentialsProvider s3CredentialProvider) { - - this(dynamoDb, config, null, validate(s3CredentialProvider)); - } - - /** - * Constructor with all parameters. - * - * @param dynamoDb - * The service object to use for all service calls. - * @param config - * The default configuration to use for all service calls. It can - * be overridden on a per-operation basis. - * @param transformer - * The custom attribute transformer to invoke when serializing or - * deserializing an object. - * @param s3CredentialsProvider - * The credentials provider for accessing S3. - * Relevant only if {@link S3Link} is involved. - */ - public DynamoDbMapper( - final DynamoDbClient dynamoDb, - final DynamoDbMapperConfig config, - final AttributeTransformer transformer, - final AwsCredentialsProvider s3CredentialsProvider) { - super(config); - - failFastOnIncompatibleSubclass(getClass()); - - this.db = dynamoDb; - this.transformer = transformer; - - this.s3Links = S3Link.Factory.of(s3CredentialsProvider); - - this.models = StandardModelFactories.of(this.s3Links); - } - - /** - * Fail fast when trying to create a subclass of the DynamoDBMapper that - * attempts to override one of the old {@code transformAttributes} methods. - */ - private static void failFastOnIncompatibleSubclass(Class clazz) { - while (clazz != DynamoDbMapper.class) { - Class[] classOverride = new Class[] { - Class.class, - Map.class - }; - Class[] nameOverride = new Class[] { - String.class, - String.class, - Map.class - }; - - for (Method method : clazz.getDeclaredMethods()) { - if (method.getName().equals("transformAttributes")) { - Class[] params = method.getParameterTypes(); - if (Arrays.equals(params, classOverride) - || Arrays.equals(params, nameOverride)) { - - throw new IllegalStateException( - "The deprecated transformAttributes method is " - + "no longer supported as of 1.9.0. Use an " - + "AttributeTransformer to inject custom " - + "attribute transformation logic."); - } - } - } - - clazz = clazz.getSuperclass(); - } - } - - /** - * Throws an exception if the given credentials provider is {@code null}. - */ - private static AwsCredentialsProvider validate( - final AwsCredentialsProvider provider) { - if (provider == null) { - throw new IllegalArgumentException( - "s3 credentials provider must not be null"); - } - return provider; - } - - private static boolean isNullOrEmpty(Map map) { - return map == null || map.isEmpty(); - } - - /** - * Determnes if any of the primary keys require auto-generation. - */ - private static boolean anyKeyGeneratable( - final DynamoDbMapperTableModel model, - final T object, - final SaveBehavior saveBehavior) { - for (final DynamoDbMapperFieldModel field : model.keys()) { - if (canGenerate(model, object, saveBehavior, field)) { - return true; - } - } - return false; - } - - /** - * Determines if the mapping value can be auto-generated. - */ - private static boolean canGenerate( - final DynamoDbMapperTableModel model, - final T object, - final SaveBehavior saveBehavior, - final DynamoDbMapperFieldModel field) { - if (field.getGenerateStrategy() == null) { - return false; - } else if (field.getGenerateStrategy() == DynamoDbAutoGenerateStrategy.ALWAYS) { - return true; - } else if (field.get(object) != null) { - return false; - } else if (field.keyType() != null || field.indexed()) { - return true; - } else if (saveBehavior == SaveBehavior.CLOBBER) { - return true; - } else if (saveBehavior == SaveBehavior.UPDATE) { - return true; - } else if (anyKeyGeneratable(model, object, saveBehavior)) { - return true; - } - return false; - } - - /** - * Utility method for checking the validity of both hash and range key - * conditions. It also tries to infer the correct index name from the POJO - * annotation, if such information is not directly specified by the user. - * - * @param clazz - * The domain class of the queried items. - * @param queryRequest - * The QueryRequest object to be sent to service. - * @param hashKeyConditions - * All the hash key EQ conditions extracted from the POJO object. - * The mapper will choose one of them that could be applied together with - * the user-specified (if any) index name and range key conditions. Or it - * throws error if more than one conditions are applicable for the query. - * @param rangeKeyConditions - * The range conditions specified by the user. We currently only - * allow at most one range key condition. - */ - private static QueryRequest processKeyConditions( - QueryRequest queryRequest, - final DynamoDbQueryExpression expression, - final DynamoDbMapperTableModel model) { - // Hash key (primary or index) condition - final Map hashKeyConditions = new LinkedHashMap(); - if (expression.getHashKeyValues() != null) { - for (final DynamoDbMapperFieldModel field : model.fields()) { - if (field.keyType() == HASH || !field.globalSecondaryIndexNames(HASH).isEmpty()) { - final Object value = field.get(expression.getHashKeyValues()); - if (value != null) { - hashKeyConditions.put(field.name(), field.eq(value)); - } - } - } - } - - // Range key (primary or index) conditions - final Map rangeKeyConditions = expression.getRangeKeyConditions(); - - // There should be least one hash key condition. - final String keyCondExpression = queryRequest.keyConditionExpression(); - if (keyCondExpression == null) { - if (isNullOrEmpty(hashKeyConditions)) { - throw new IllegalArgumentException("Illegal query expression: No hash key condition is found in the query"); - } - } else { - if (!isNullOrEmpty(hashKeyConditions)) { - throw new IllegalArgumentException("Illegal query expression: Either the hash key conditions or the key " + - "condition expression must be specified but not both."); - } - if (!isNullOrEmpty(rangeKeyConditions)) { - throw new IllegalArgumentException("Illegal query expression: The range key conditions can only be specified " + - "when the key condition expression is not specified."); - } - // key condition expression is in use - return queryRequest; - } - // We don't allow multiple range key conditions. - if (rangeKeyConditions != null && rangeKeyConditions.size() > 1) { - throw new IllegalArgumentException( - "Illegal query expression: Conditions on multiple range keys (" - + rangeKeyConditions.keySet().toString() - + ") are found in the query. DynamoDB service only accepts up to ONE range key condition."); - } - final boolean hasRangeKeyCondition = (rangeKeyConditions != null) - && (!rangeKeyConditions.isEmpty()); - final String userProvidedIndexName = queryRequest.indexName(); - final String primaryHashKeyName = model.hashKey().name(); - - // First collect the names of all the global/local secondary indexes that could be applied to this query. - // If the user explicitly specified an index name, we also need to - // 1) check the index is applicable for both hash and range key conditions - // 2) choose one hash key condition if there are more than one of them - boolean hasPrimaryHashKeyCondition = false; - final Map> annotatedGsisOnHashKeys = new HashMap>(); - String hashKeyNameForThisQuery = null; - - boolean hasPrimaryRangeKeyCondition = false; - final Set annotatedLsisOnRangeKey = new HashSet(); - final Set annotatedGsisOnRangeKey = new HashSet(); - - // Range key condition - String rangeKeyNameForThisQuery = null; - if (hasRangeKeyCondition) { - for (String rangeKeyName : rangeKeyConditions.keySet()) { - rangeKeyNameForThisQuery = rangeKeyName; - - final DynamoDbMapperFieldModel rk = model.field(rangeKeyName); - - if (rk.keyType() == RANGE) { - hasPrimaryRangeKeyCondition = true; - } - - annotatedLsisOnRangeKey.addAll(rk.localSecondaryIndexNames()); - annotatedGsisOnRangeKey.addAll(rk.globalSecondaryIndexNames(RANGE)); - } - - if (!hasPrimaryRangeKeyCondition - && annotatedLsisOnRangeKey.isEmpty() - && annotatedGsisOnRangeKey.isEmpty()) { - throw new DynamoDbMappingException( - "The query contains a condition on a range key (" + - rangeKeyNameForThisQuery + ") " + - "that is not annotated with either @DynamoDBRangeKey or @DynamoDBIndexRangeKey."); - } - } - - final boolean userProvidedLsiWithRangeKeyCondition = (userProvidedIndexName != null) - && (annotatedLsisOnRangeKey.contains(userProvidedIndexName)); - final boolean hashOnlyLsiQuery = (userProvidedIndexName != null) - && (!hasRangeKeyCondition) - && model.localSecondaryIndex(userProvidedIndexName) != null; - final boolean userProvidedLsi = userProvidedLsiWithRangeKeyCondition || hashOnlyLsiQuery; - - final boolean userProvidedGsiWithRangeKeyCondition = (userProvidedIndexName != null) - && (annotatedGsisOnRangeKey.contains(userProvidedIndexName)); - final boolean hashOnlyGsiQuery = (userProvidedIndexName != null) - && (!hasRangeKeyCondition) - && model.globalSecondaryIndex(userProvidedIndexName) != null; - final boolean userProvidedGsi = userProvidedGsiWithRangeKeyCondition || hashOnlyGsiQuery; - - if (userProvidedLsi && userProvidedGsi) { - throw new DynamoDbMappingException( - "Invalid query: " + - "Index \"" + userProvidedIndexName + "\" " + - "is annotateded as both a LSI and a GSI for attribute."); - } - - // Hash key conditions - for (String hashKeyName : hashKeyConditions.keySet()) { - if (hashKeyName.equals(primaryHashKeyName)) { - hasPrimaryHashKeyCondition = true; - } - - final DynamoDbMapperFieldModel hk = model.field(hashKeyName); - - Collection annotatedGsiNames = hk.globalSecondaryIndexNames(HASH); - annotatedGsisOnHashKeys.put(hashKeyName, - annotatedGsiNames == null ? new HashSet<>() : new HashSet<>(annotatedGsiNames)); - - // Additional validation if the user provided an index name. - if (userProvidedIndexName != null) { - boolean foundHashKeyConditionValidWithUserProvidedIndex = false; - if (userProvidedLsi && hashKeyName.equals(primaryHashKeyName)) { - // found an applicable hash key condition (primary hash + LSI range) - foundHashKeyConditionValidWithUserProvidedIndex = true; - } else if (userProvidedGsi && - annotatedGsiNames != null && annotatedGsiNames.contains(userProvidedIndexName)) { - // found an applicable hash key condition (GSI hash + range) - foundHashKeyConditionValidWithUserProvidedIndex = true; - } - if (foundHashKeyConditionValidWithUserProvidedIndex) { - if (hashKeyNameForThisQuery != null) { - throw new IllegalArgumentException( - "Ambiguous query expression: More than one hash key EQ conditions (" + - hashKeyNameForThisQuery + ", " + hashKeyName + - ") are applicable to the specified index (" - + userProvidedIndexName + "). " + - "Please provide only one of them in the query expression."); - } else { - // found an applicable hash key condition - hashKeyNameForThisQuery = hashKeyName; - } - } - } - } - - // Collate all the key conditions - Map keyConditions = new HashMap(); - - // With user-provided index name - if (userProvidedIndexName != null) { - if (hasRangeKeyCondition - && (!userProvidedLsi) - && (!userProvidedGsi)) { - throw new IllegalArgumentException( - "Illegal query expression: No range key condition is applicable to the specified index (" - + userProvidedIndexName + "). "); - } - if (hashKeyNameForThisQuery == null) { - throw new IllegalArgumentException( - "Illegal query expression: No hash key condition is applicable to the specified index (" - + userProvidedIndexName + "). "); - } - - keyConditions.put(hashKeyNameForThisQuery, hashKeyConditions.get(hashKeyNameForThisQuery)); - if (hasRangeKeyCondition) { - keyConditions.putAll(rangeKeyConditions); - } - } else { - // Infer the index name by finding the index shared by both hash and range key annotations. - if (hasRangeKeyCondition) { - String inferredIndexName = null; - hashKeyNameForThisQuery = null; - if (hasPrimaryHashKeyCondition && hasPrimaryRangeKeyCondition) { - // Found valid query: primary hash + range key conditions - hashKeyNameForThisQuery = primaryHashKeyName; - } else { - // Intersect the set of all the indexes applicable to the range key - // with the set of indexes applicable to each hash key condition. - for (Map.Entry> indexedHashKeys : annotatedGsisOnHashKeys.entrySet()) { - String hashKeyName = indexedHashKeys.getKey(); - Set annotatedGsisOnHashKey = indexedHashKeys.getValue(); - - boolean foundValidQueryExpressionWithInferredIndex = false; - String indexNameInferredByThisHashKey = null; - if (hashKeyName.equals(primaryHashKeyName)) { - if (annotatedLsisOnRangeKey.size() == 1) { - // Found valid query (Primary hash + LSI range conditions) - foundValidQueryExpressionWithInferredIndex = true; - indexNameInferredByThisHashKey = annotatedLsisOnRangeKey.iterator().next(); - } - } - - // We don't need the data in annotatedGSIsOnHashKeys afterwards, - // so it's safe to do the intersection in-place. - annotatedGsisOnHashKey.retainAll(annotatedGsisOnRangeKey); - if (annotatedGsisOnHashKey.size() == 1) { - // Found valid query (Hash + range conditions on a GSI) - if (foundValidQueryExpressionWithInferredIndex) { - hashKeyNameForThisQuery = hashKeyName; - inferredIndexName = indexNameInferredByThisHashKey; - } - - foundValidQueryExpressionWithInferredIndex = true; - indexNameInferredByThisHashKey = annotatedGsisOnHashKey.iterator().next(); - } - - if (foundValidQueryExpressionWithInferredIndex) { - if (hashKeyNameForThisQuery != null) { - throw new IllegalArgumentException( - "Ambiguous query expression: Found multiple valid queries: " + - "(Hash: \"" + hashKeyNameForThisQuery + "\", Range: \"" + rangeKeyNameForThisQuery + - "\", Index: \"" + inferredIndexName + "\") and " + - "(Hash: \"" + hashKeyName + "\", Range: \"" + rangeKeyNameForThisQuery + - "\", Index: \"" + indexNameInferredByThisHashKey + "\")."); - } else { - hashKeyNameForThisQuery = hashKeyName; - inferredIndexName = indexNameInferredByThisHashKey; - } - } - } - } - - if (hashKeyNameForThisQuery != null) { - keyConditions.put(hashKeyNameForThisQuery, hashKeyConditions.get(hashKeyNameForThisQuery)); - keyConditions.putAll(rangeKeyConditions); - queryRequest = queryRequest.toBuilder().indexName(inferredIndexName).build(); - } else { - throw new IllegalArgumentException( - "Illegal query expression: Cannot infer the index name from the query expression."); - } - - } else { - // No range key condition is specified. - if (hashKeyConditions.size() > 1) { - if (hasPrimaryHashKeyCondition) { - keyConditions.put(primaryHashKeyName, hashKeyConditions.get(primaryHashKeyName)); - } else { - throw new IllegalArgumentException( - "Ambiguous query expression: More than one index hash key EQ conditions (" + - hashKeyConditions.keySet() + ") are applicable to the query. Please provide only one of them " + - "in the query expression, or specify the appropriate index name."); - } - - } else { - // Only one hash key condition - Entry> entry = annotatedGsisOnHashKeys.entrySet().iterator().next(); - String hashKeyName = entry.getKey(); - Set annotatedGsisOnHashkey = entry.getValue(); - if (!hasPrimaryHashKeyCondition) { - if (annotatedGsisOnHashkey.size() == 1) { - // Set the index if the index hash key is only annotated with one GSI. - queryRequest = queryRequest.toBuilder().indexName(annotatedGsisOnHashkey.iterator().next()).build(); - } else if (annotatedGsisOnHashkey.size() > 1) { - throw new IllegalArgumentException( - "Ambiguous query expression: More than one GSIs (" + - annotatedGsisOnHashkey + - ") are applicable to the query. " + - "Please specify one of them in your query expression."); - } else { - throw new IllegalArgumentException( - "Illegal query expression: No GSI is found in the @DynamoDBIndexHashKey annotation for " + - "attribute \"" + hashKeyName + "\"."); - } - } - keyConditions.putAll(hashKeyConditions); - } - - } - } - - return queryRequest.toBuilder().keyConditions(keyConditions).build(); - } - - /** - * Returns a new map object that merges the two sets of expected value - * conditions (user-specified or imposed by the internal implementation of - * DynamoDBMapper). Internal assertion on an attribute will be overridden by - * any user-specified condition on the same attribute. - *

    - * Exception is thrown if the two sets of conditions cannot be combined - * together. - */ - private static Map mergeExpectedAttributeValueConditions( - Map internalAssertions, - Map userProvidedConditions, - String userProvidedConditionOperator) { - // If any of the condition map is null, simply return a copy of the other one. - if ((internalAssertions == null || internalAssertions.isEmpty()) - && (userProvidedConditions == null || userProvidedConditions.isEmpty())) { - return null; - } else if (internalAssertions == null) { - return new HashMap<>(userProvidedConditions); - } else if (userProvidedConditions == null) { - return new HashMap<>(internalAssertions); - } - - // Start from a copy of the internal conditions - Map mergedExpectedValues = - new HashMap(internalAssertions); - - // Remove internal conditions that are going to be overlaid by user-provided ones. - for (String attrName : userProvidedConditions.keySet()) { - mergedExpectedValues.remove(attrName); - } - - // All the generated internal conditions must be joined by AND. - // Throw an exception if the user specifies an OR operator, and that the - // internal conditions are not totally overlaid by the user-provided - // ones. - if (ConditionalOperator.OR.toString().equals(userProvidedConditionOperator) - && !mergedExpectedValues.isEmpty()) { - throw new IllegalArgumentException("Unable to assert the value of the fields " + mergedExpectedValues.keySet() + - ", since the expected value conditions cannot be combined with user-specified " + - "conditions joined by \"OR\". You can use SaveBehavior.CLOBBER to " + - "skip the assertion on these fields."); - } - - mergedExpectedValues.putAll(userProvidedConditions); - - return mergedExpectedValues; - } - - static X applyUserAgent(X request) { - final AwsRequestOverrideConfiguration newCfg = request.overrideConfiguration() - .map(c -> c.toBuilder()) - .orElse(AwsRequestOverrideConfiguration.builder()) - .addApiName(apiName -> apiName.name(USER_AGENT_NAME).version(VersionInfo.SDK_VERSION)) - .build(); - - return (X) request.toBuilder() - .overrideConfiguration(newCfg) - .build(); - } - - static X applyBatchOperationUserAgent(X request) { - final AwsRequestOverrideConfiguration newCfg = request.overrideConfiguration() - .map(c -> c.toBuilder()) - .orElse(AwsRequestOverrideConfiguration.builder()) - .addApiName(apiName -> apiName.name(USER_AGENT_BATCH_OPERATION_NAME).version(VersionInfo.SDK_VERSION)) - .build(); - - return (X) request.toBuilder() - .overrideConfiguration(newCfg) - .build(); - } - - /** - * Batch pause. - */ - private static void pause(long delay) { - if (delay <= 0) { - return; - } - try { - Thread.sleep(delay); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw SdkClientException.builder().message(e.getMessage()).cause(e).build(); - } - } - - @Override - public DynamoDbMapperTableModel getTableModel(Class clazz, DynamoDbMapperConfig config) { - return this.models.getTableFactory(config).getTable(clazz); - } - - @Override - public T load(T keyObject, DynamoDbMapperConfig config) { - @SuppressWarnings("unchecked") - Class clazz = (Class) keyObject.getClass(); - - config = mergeConfig(config); - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - - String tableName = getTableName(clazz, keyObject, config); - - GetItemRequest.Builder rqBuilder = GetItemRequest.builder(); - - Map key = model.convertKey(keyObject); - - rqBuilder.key(key); - rqBuilder.tableName(tableName); - rqBuilder.consistentRead(config.getConsistentRead() == ConsistentRead.CONSISTENT); - - GetItemRequest rq = rqBuilder.build(); - - GetItemResponse item = db.getItem(applyUserAgent(rq)); - Map itemAttributes = item.item(); - if (itemAttributes == null) { - return null; - } - - T object = privateMarshallIntoObject( - toParameters(itemAttributes, clazz, tableName, config)); - - return object; - } - - @Override - public T load(Class clazz, Object hashKey, Object rangeKey, DynamoDbMapperConfig config) { - config = mergeConfig(config); - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - T keyObject = model.createKey(hashKey, rangeKey); - return load(keyObject, config); - } - - @Override - public T marshallIntoObject(Class clazz, Map itemAttributes, DynamoDbMapperConfig config) { - config = mergeConfig(config); - - String tableName = getTableName(clazz, config); - - return privateMarshallIntoObject( - toParameters(itemAttributes, clazz, tableName, config)); - } - - /** - * The one true implementation of marshallIntoObject. - */ - private T privateMarshallIntoObject( - AttributeTransformer.Parameters parameters) { - - Class clazz = parameters.modelClass(); - Map values = untransformAttributes(parameters); - - final DynamoDbMapperTableModel model = getTableModel(clazz, parameters.mapperConfig()); - return model.unconvert(values); - } - - @Override - public List marshallIntoObjects(Class clazz, List> itemAttributes, - DynamoDbMapperConfig config) { - // If config is used in the future, be sure to mergeConfig. - // config = mergeConfig(config); - - List result = new ArrayList(itemAttributes.size()); - for (Map item : itemAttributes) { - result.add(marshallIntoObject(clazz, item)); - } - return result; - } - - /** - * A replacement for {@link #marshallIntoObjects(Class, List)} that takes - * an extra set of parameters to be tunneled through to - * {@code privateMarshalIntoObject} (if nothing along the way is - * overridden). It's package-private because some of the Paginated*List - * classes call back into it, but final because no one, even in this - * package, should ever override it. - */ - final List marshallIntoObjects( - final List> parameters) { - List result = new ArrayList(parameters.size()); - - for (AttributeTransformer.Parameters entry : parameters) { - result.add(privateMarshallIntoObject(entry)); - } - - return result; - } - - @Override - public void save(T object, - DynamoDbSaveExpression saveExpression, - final DynamoDbMapperConfig config) { - final DynamoDbMapperConfig finalConfig = mergeConfig(config); - - @SuppressWarnings("unchecked") - Class clazz = (Class) object.getClass(); - String tableName = getTableName(clazz, object, finalConfig); - - final DynamoDbMapperTableModel model = getTableModel(clazz, finalConfig); - - /* - * We force a putItem request instead of updateItem request either when - * CLOBBER is configured, or part of the primary key of the object needs - * to be auto-generated. - */ - boolean forcePut = (finalConfig.saveBehavior() == SaveBehavior.CLOBBER) - || anyKeyGeneratable(model, object, finalConfig.saveBehavior()); - - SaveObjectHandler saveObjectHandler; - - if (forcePut) { - saveObjectHandler = this.new SaveObjectHandler(clazz, object, - tableName, finalConfig, saveExpression) { - - @Override - protected void onPrimaryKeyAttributeValue(String attributeName, - AttributeValue keyAttributeValue) { - /* Treat key values as common attribute value updates. */ - getAttributeValueUpdates().put(attributeName, - AttributeValueUpdate.builder() - .value(keyAttributeValue) - .action("PUT").build()); - } - - /* Use default implementation of onNonKeyAttribute(...) */ - - @Override - protected void onNullNonKeyAttribute(String attributeName) { - /* When doing a force put, we can safely ignore the null-valued attributes. */ - return; - } - - @Override - protected void executeLowLevelRequest() { - /* Send a putItem request. */ - doPutItem(); - } - }; - } else { - saveObjectHandler = this.new SaveObjectHandler(clazz, object, - tableName, finalConfig, saveExpression) { - - @Override - protected void onPrimaryKeyAttributeValue(String attributeName, - AttributeValue keyAttributeValue) { - /* Put it in the key collection which is later used in the updateItem request. */ - getPrimaryKeyAttributeValues().put(attributeName, keyAttributeValue); - } - - - @Override - protected void onNonKeyAttribute(String attributeName, - AttributeValue currentValue) { - /* If it's a set attribute and the mapper is configured with APPEND_SET, - * we do an "ADD" update instead of the default "PUT". - */ - if (localSaveBehavior() == SaveBehavior.APPEND_SET) { - if (currentValue.bs() != null - || currentValue.ns() != null - || currentValue.ss() != null) { - getAttributeValueUpdates().put( - attributeName, - AttributeValueUpdate.builder().value( - currentValue).action("ADD").build()); - return; - } - } - /* Otherwise, we do the default "PUT" update. */ - super.onNonKeyAttribute(attributeName, currentValue); - } - - @Override - protected void onNullNonKeyAttribute(String attributeName) { - /* - * If UPDATE_SKIP_NULL_ATTRIBUTES or APPEND_SET is - * configured, we don't delete null value attributes. - */ - if (localSaveBehavior() == SaveBehavior.UPDATE_SKIP_NULL_ATTRIBUTES - || localSaveBehavior() == SaveBehavior.APPEND_SET) { - return; - } else { - /* Delete attributes that are set as null in the object. */ - getAttributeValueUpdates() - .put(attributeName, - AttributeValueUpdate.builder() - .action("DELETE") - .build()); - } - } - - @Override - protected void executeLowLevelRequest() { - UpdateItemResponse updateItemResult = doUpdateItem(); - - // The UpdateItem request is specified to return ALL_NEW - // attributes of the affected item. So if the returned - // UpdateItemResponse does not include any ReturnedAttributes, - // it indicates the UpdateItem failed silently (e.g. the - // key-only-put nightmare - - // https://forums.aws.amazon.com/thread.jspa?threadID=86798&tstart=25), - // in which case we should re-send a PutItem - // request instead. - if (updateItemResult.attributes() == null - || updateItemResult.attributes().isEmpty()) { - // Before we proceed with PutItem, we need to put all - // the key attributes (prepared for the - // UpdateItemRequest) into the AttributeValueUpdates - // collection. - for (String keyAttributeName : getPrimaryKeyAttributeValues().keySet()) { - AttributeValueUpdate value = AttributeValueUpdate.builder() - .value(getPrimaryKeyAttributeValues().get(keyAttributeName)) - .action("PUT").build(); - getAttributeValueUpdates().put(keyAttributeName, value); - } - - doPutItem(); - } - } - }; - } - - saveObjectHandler.execute(); - } - - @Override - public void delete(T object, DynamoDbDeleteExpression deleteExpression, DynamoDbMapperConfig config) { - config = mergeConfig(config); - - @SuppressWarnings("unchecked") - Class clazz = (Class) object.getClass(); - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - - String tableName = getTableName(clazz, object, config); - - Map key = model.convertKey(object); - - /* - * If there is a version field, make sure we assert its value. If the - * version field is null (only should happen in unusual circumstances), - * pretend it doesn't have a version field after all. - */ - Map internalAssertions = new HashMap(); - if (config.saveBehavior() != SaveBehavior.CLOBBER && model.versioned()) { - for (final DynamoDbMapperFieldModel field : model.versions()) { - final AttributeValue current = field.getAndConvert(object); - if (current == null) { - internalAssertions.put(field.name(), ExpectedAttributeValue.builder().exists(false).build()); - } else { - internalAssertions.put(field.name(), ExpectedAttributeValue.builder().exists(true).value(current).build()); - } - break; - } - } - - DeleteItemRequest req = DeleteItemRequest.builder() - .key(key) - .tableName(tableName) - .expected(internalAssertions) - .build(); - - if (deleteExpression != null) { - String conditionalExpression = deleteExpression.getConditionExpression(); - - if (conditionalExpression != null) { - if (!internalAssertions.isEmpty()) { - throw SdkClientException.builder() - .message("Condition Expressions cannot be used if a versioned attribute is present") - .build(); - } - - req = req.toBuilder() - .conditionExpression(conditionalExpression) - .expressionAttributeNames( - deleteExpression.getExpressionAttributeNames()) - .expressionAttributeValues( - deleteExpression.getExpressionAttributeValues()) - .build(); - } - - req = req.toBuilder() - .expected( - mergeExpectedAttributeValueConditions(internalAssertions, - deleteExpression.getExpected(), - deleteExpression.getConditionalOperator())) - .conditionalOperator( - deleteExpression.getConditionalOperator()) - .build(); - - } - db.deleteItem(applyUserAgent(req)); - } - - @Override - public List batchWrite(Iterable objectsToWrite, - Iterable objectsToDelete, - DynamoDbMapperConfig config) { - config = mergeConfig(config); - - List totalFailedBatches = new LinkedList(); - - StringListMap requestItems = new StringListMap(); - - List inMemoryUpdates = new LinkedList(); - for (Object toWrite : objectsToWrite) { - Class clazz = (Class) toWrite.getClass(); - String tableName = getTableName(clazz, toWrite, config); - - Map attributeValues = new HashMap(); - - // Look at every getter and construct a value object for it - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - for (final DynamoDbMapperFieldModel field : model.fields()) { - AttributeValue currentValue; - if (canGenerate(model, toWrite, config.saveBehavior(), field) && !field.versioned()) { - currentValue = field.convert(field.generate(field.get(toWrite))); - inMemoryUpdates.add(new ValueUpdate(field, currentValue, toWrite)); - } else { - currentValue = field.convert(field.get(toWrite)); - } - if (currentValue != null) { - attributeValues.put(field.name(), currentValue); - } - } - - if (!requestItems.containsKey(tableName)) { - requestItems.put(tableName, new LinkedList()); - } - - AttributeTransformer.Parameters parameters = - toParameters(attributeValues, clazz, tableName, config); - - requestItems.add(tableName, - WriteRequest.builder() - .putRequest(PutRequest.builder() - .item(transformAttributes(parameters)) - .build()) - .build()); - } - - for (Object toDelete : objectsToDelete) { - Class clazz = (Class) toDelete.getClass(); - - String tableName = getTableName(clazz, toDelete, config); - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - - Map key = model.convertKey(toDelete); - - requestItems.add(tableName, WriteRequest.builder() - .deleteRequest(DeleteRequest.builder() - .key(key) - .build()) - .build()); - } - - // Break into chunks of 25 items and make service requests to DynamoDB - for (final StringListMap batch : requestItems.subMaps(MAX_ITEMS_PER_BATCH, true)) { - List failedBatches = writeOneBatch(batch, config.batchWriteRetryStrategy()); - totalFailedBatches.addAll(failedBatches); - - // If contains throttling exception, we do a backoff - if (containsThrottlingException(failedBatches)) { - pause(config.batchWriteRetryStrategy().getDelayBeforeRetryUnprocessedItems( - Collections.unmodifiableMap(batch), 0)); - } - } - - // Once the entire batch is processed, update assigned keys in memory - for (ValueUpdate update : inMemoryUpdates) { - update.apply(); - } - - return totalFailedBatches; - } - - /** - * Process one batch of requests(max 25). It will divide the batch if - * receives request too large exception(the total size of the request is beyond 1M). - */ - private List writeOneBatch( - StringListMap batch, - BatchWriteRetryStrategy batchWriteRetryStrategy) { - - List failedBatches = new LinkedList(); - FailedBatch failedBatch = doBatchWriteItemWithRetry(batch, batchWriteRetryStrategy); - - if (failedBatch != null) { - // If the exception is request entity too large, we divide the batch - // into smaller parts. - - if (failedBatch.isRequestEntityTooLarge()) { - - // If only one item left, the item size must beyond 64k, which - // exceedes the limit. - - if (failedBatch.size() == 1) { - failedBatches.add(failedBatch); - } else { - for (final StringListMap subBatch : batch.subMaps(2, false)) { - failedBatches.addAll(writeOneBatch(subBatch, batchWriteRetryStrategy)); - } - } - - } else { - failedBatches.add(failedBatch); - } - - } - return failedBatches; - } - - /** - * Check whether there are throttling exception in the failed batches. - */ - private boolean containsThrottlingException(List failedBatches) { - for (FailedBatch failedBatch : failedBatches) { - if (failedBatch.isThrottling()) { - return true; - } - } - return false; - } - - /** - * Continue trying to process the batch and retry on UnproccessedItems as - * according to the specified BatchWriteRetryStrategy - */ - private FailedBatch doBatchWriteItemWithRetry( - Map> batch, - BatchWriteRetryStrategy batchWriteRetryStrategy) { - - BatchWriteItemResponse result = null; - int retries = 0; - int maxRetries = batchWriteRetryStrategy - .maxRetryOnUnprocessedItems(Collections - .unmodifiableMap(batch)); - - FailedBatch failedBatch = null; - Map> pendingItems = batch; - - while (true) { - try { - result = db.batchWriteItem(applyBatchOperationUserAgent( - BatchWriteItemRequest.builder().requestItems(pendingItems).build())); - } catch (Exception e) { - failedBatch = new FailedBatch(); - failedBatch.setUnprocessedItems(pendingItems); - failedBatch.setException(e); - return failedBatch; - } - pendingItems = result.unprocessedItems(); - - if (pendingItems.size() > 0) { - - // return pendingItems as a FailedBatch if we have exceeded max retry - if (maxRetries >= 0 && retries >= maxRetries) { - failedBatch = new FailedBatch(); - failedBatch.setUnprocessedItems(pendingItems); - failedBatch.setException(null); - return failedBatch; - } - - pause(batchWriteRetryStrategy.getDelayBeforeRetryUnprocessedItems( - Collections.unmodifiableMap(pendingItems), retries)); - retries++; - } else { - break; - } - } - return failedBatch; - } - - @Override - public Map> batchLoad(Iterable itemsToGet, DynamoDbMapperConfig config) { - config = mergeConfig(config); - boolean consistentReads = (config.getConsistentRead() == ConsistentRead.CONSISTENT); - - if (itemsToGet == null) { - return new HashMap<>(); - } - - Map>> requestItemLists = new HashMap<>(); - Map> classesByTableName = new HashMap>(); - Map> resultSet = new HashMap>(); - int count = 0; - - for (Object keyObject : itemsToGet) { - Class clazz = (Class) keyObject.getClass(); - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - - String tableName = getTableName(clazz, keyObject, config); - classesByTableName.put(tableName, clazz); - - requestItemLists.computeIfAbsent(tableName, ignored -> new LinkedList<>()).add(model.convertKey(keyObject)); - - // Reach the maximum number which can be handled in a single batchGet - if (++count == 100) { - Map requestItems = batchRequestItems(consistentReads, requestItemLists); - processBatchGetRequest(classesByTableName, requestItems, resultSet, config); - requestItemLists.clear(); - count = 0; - } - } - - if (count > 0) { - Map requestItems = batchRequestItems(consistentReads, requestItemLists); - processBatchGetRequest(classesByTableName, requestItems, resultSet, config); - } - - return resultSet; - } - - private Map batchRequestItems( - boolean consistentReads, - Map>> requestItemLists) { - return requestItemLists.entrySet().stream() - .collect(toMap(Entry::getKey, e -> KeysAndAttributes.builder() - .consistentRead(consistentReads) - .keys(e.getValue()) - .build())); - } - - @Override - public Map> batchLoad(Map, List> itemsToGet, DynamoDbMapperConfig config) { - config = mergeConfig(config); - List keys = new ArrayList(); - if (itemsToGet != null) { - for (Map.Entry, List> item : itemsToGet.entrySet()) { - Class clazz = item.getKey(); - List value = item.getValue(); - if (value != null) { - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - for (KeyPair keyPair : value) { - keys.add(model.createKey(keyPair.getHashKey(), keyPair.getRangeKey())); - } - } - } - } - return batchLoad(keys, config); - } - - /** - * @param config never null - */ - private void processBatchGetRequest( - final Map> classesByTableName, - final Map requestItems, - final Map> resultSet, - final DynamoDbMapperConfig config) { - - BatchGetItemResponse batchGetItemResponse = null; - BatchGetItemRequest batchGetItemRequest = BatchGetItemRequest.builder() - .requestItems(requestItems) - .build(); - - BatchLoadRetryStrategy batchLoadStrategy = config.batchLoadRetryStrategy(); - - BatchLoadContext batchLoadContext = new BatchLoadContext(batchGetItemRequest); - - int retries = 0; - - do { - if (batchGetItemResponse != null) { - retries++; - batchLoadContext.setRetriesAttempted(retries); - if (!isNullOrEmpty(batchGetItemResponse.unprocessedKeys())) { - pause(batchLoadStrategy.getDelayBeforeNextRetry(batchLoadContext)); - batchGetItemRequest = batchGetItemRequest.toBuilder() - .requestItems(batchGetItemResponse.unprocessedKeys()) - .build(); - batchLoadContext.setBatchGetItemRequest(batchGetItemRequest); - } - } - - batchGetItemResponse = db.batchGetItem(applyBatchOperationUserAgent(batchGetItemRequest)); - - Map>> responses = batchGetItemResponse.responses(); - for (Map.Entry>> entries : responses.entrySet()) { - String tableName = entries.getKey(); - List> items = entries.getValue(); - - List objects = resultSet.getOrDefault(tableName, new LinkedList<>()); - Class clazz = classesByTableName.get(tableName); - - for (Map item : items) { - AttributeTransformer.Parameters parameters = toParameters(item, clazz, tableName, config); - objects.add(privateMarshallIntoObject(parameters)); - } - - resultSet.put(tableName, objects); - } - - batchLoadContext.setBatchGetItemResponse(batchGetItemResponse); - - // the number of unprocessed keys and Batch Load Strategy will drive the number of retries - } while (batchLoadStrategy.shouldRetry(batchLoadContext)); - - if (!isNullOrEmpty(batchGetItemResponse.unprocessedKeys())) { - throw new BatchGetItemException("The BatchGetItemResponse has unprocessed keys after max retry attempts. Catch the " + - "BatchGetItemException to get the list of unprocessed keys.", - batchGetItemResponse.unprocessedKeys(), resultSet); - } - } - - @Override - public PaginatedScanList scan(Class clazz, - DynamoDbScanExpression scanExpression, - DynamoDbMapperConfig config) { - config = mergeConfig(config); - - ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); - - ScanResponse scanResult = db.scan(applyUserAgent(scanRequest)); - return new PaginatedScanList<>(this, clazz, db, scanRequest, scanResult, config.getPaginationLoadingStrategy(), config); - } - - @Override - public PaginatedParallelScanList parallelScan(Class clazz, - DynamoDbScanExpression scanExpression, - int totalSegments, - DynamoDbMapperConfig config) { - config = mergeConfig(config); - - // Create hard copies of the original scan request with difference segment number. - List parallelScanRequests = createParallelScanRequestsFromExpression(clazz, scanExpression, - totalSegments, config); - ParallelScanTask parallelScanTask = new ParallelScanTask(db, parallelScanRequests); - - return new PaginatedParallelScanList(this, clazz, db, parallelScanTask, config.getPaginationLoadingStrategy(), config); - } - - @Override - public ScanResultPage scanPage(Class clazz, - DynamoDbScanExpression scanExpression, - DynamoDbMapperConfig config) { - config = mergeConfig(config); - - ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); - - ScanResponse scanResult = db.scan(applyUserAgent(scanRequest)); - ScanResultPage result = new ScanResultPage(); - List> parameters = - toParameters(scanResult.items(), clazz, scanRequest.tableName(), config); - - result.setResults(marshallIntoObjects(parameters)); - result.setLastEvaluatedKey(scanResult.lastEvaluatedKey()); - result.setCount(scanResult.count()); - result.setScannedCount(scanResult.scannedCount()); - result.setConsumedCapacity(scanResult.consumedCapacity()); - - return result; - } - - @Override - public PaginatedQueryList query(Class clazz, - DynamoDbQueryExpression queryExpression, - DynamoDbMapperConfig config) { - config = mergeConfig(config); - - QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); - - QueryResponse queryResult = db.query(applyUserAgent(queryRequest)); - return new PaginatedQueryList(this, clazz, db, queryRequest, queryResult, - config.getPaginationLoadingStrategy(), config); - } - - @Override - public QueryResultPage queryPage(Class clazz, - DynamoDbQueryExpression queryExpression, - DynamoDbMapperConfig config) { - config = mergeConfig(config); - - QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); - - QueryResponse queryResult = db.query(applyUserAgent(queryRequest)); - QueryResultPage result = new QueryResultPage(); - List> parameters = - toParameters(queryResult.items(), clazz, queryRequest.tableName(), config); - - result.setResults(marshallIntoObjects(parameters)); - result.setLastEvaluatedKey(queryResult.lastEvaluatedKey()); - result.setCount(queryResult.count()); - result.setScannedCount(queryResult.scannedCount()); - result.setConsumedCapacity(queryResult.consumedCapacity()); - - return result; - } - - @Override - public int count(Class clazz, DynamoDbScanExpression scanExpression, DynamoDbMapperConfig config) { - config = mergeConfig(config); - - ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config); - scanRequest = scanRequest.toBuilder().select(Select.COUNT).build(); - - // Count scans can also be truncated for large datasets - int count = 0; - ScanResponse scanResult; - do { - scanResult = db.scan(applyUserAgent(scanRequest)); - count += scanResult.count(); - scanRequest = scanRequest.toBuilder().exclusiveStartKey(scanResult.lastEvaluatedKey()).build(); - } while (!(scanResult.lastEvaluatedKey() instanceof SdkAutoConstructMap)); - - return count; - } - - @Override - public int count(Class clazz, DynamoDbQueryExpression queryExpression, DynamoDbMapperConfig config) { - config = mergeConfig(config); - - QueryRequest queryRequest = createQueryRequestFromExpression(clazz, queryExpression, config); - queryRequest = queryRequest.toBuilder().select(Select.COUNT).build(); - - // Count queries can also be truncated for large datasets - int count = 0; - QueryResponse queryResult; - do { - queryResult = db.query(applyUserAgent(queryRequest)); - count += queryResult.count(); - queryRequest = queryRequest.toBuilder().exclusiveStartKey(queryResult.lastEvaluatedKey()).build(); - } while (!(queryResult.lastEvaluatedKey() instanceof SdkAutoConstructMap)); - - return count; - } - - /** - * @param config never null - */ - private ScanRequest createScanRequestFromExpression(Class clazz, DynamoDbScanExpression scanExpression, - DynamoDbMapperConfig config) { - ScanRequest scanRequest = ScanRequest.builder() - .tableName(getTableName(clazz, config)) - .indexName(scanExpression.getIndexName()) - .scanFilter(scanExpression.scanFilter()) - .limit(scanExpression.limit()) - .exclusiveStartKey(scanExpression.getExclusiveStartKey()) - .totalSegments(scanExpression.getTotalSegments()) - .segment(scanExpression.segment()) - .conditionalOperator(scanExpression.getConditionalOperator()) - .filterExpression(scanExpression.getFilterExpression()) - .expressionAttributeNames(scanExpression.getExpressionAttributeNames()) - .expressionAttributeValues(scanExpression.getExpressionAttributeValues()) - .select(scanExpression.select()) - .projectionExpression(scanExpression.getProjectionExpression()) - .returnConsumedCapacity(scanExpression.getReturnConsumedCapacity()) - .consistentRead(scanExpression.isConsistentRead()) - .build(); - - return applyUserAgent(scanRequest); - } - - /** - * @param config never null - */ - private List createParallelScanRequestsFromExpression(Class clazz, DynamoDbScanExpression scanExpression, - int totalSegments, DynamoDbMapperConfig config) { - if (totalSegments < 1) { - throw new IllegalArgumentException("Parallel scan should have at least one scan segment."); - } - if (scanExpression.getExclusiveStartKey() != null) { - log.info("The ExclusiveStartKey parameter specified in the DynamoDBScanExpression is ignored," - + " since the individual parallel scan request on each segment is applied on a separate key scope."); - } - if (scanExpression.segment() != null || scanExpression.getTotalSegments() != null) { - log.info("The Segment and TotalSegments parameters specified in the DynamoDBScanExpression are ignored."); - } - - List parallelScanRequests = new LinkedList(); - for (int segment = 0; segment < totalSegments; segment++) { - ScanRequest scanRequest = createScanRequestFromExpression(clazz, scanExpression, config) - .toBuilder() - .segment(segment) - .totalSegments(totalSegments) - .exclusiveStartKey(null) - .build(); - parallelScanRequests.add(scanRequest); - } - return parallelScanRequests; - } - - private QueryRequest createQueryRequestFromExpression(Class clazz, - DynamoDbQueryExpression xpress, DynamoDbMapperConfig config) { - - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - - QueryRequest request = QueryRequest.builder() - .consistentRead(xpress.isConsistentRead()) - .tableName(getTableName(clazz, xpress.getHashKeyValues(), config)) - .indexName(xpress.getIndexName()) - .keyConditionExpression(xpress.getKeyConditionExpression()) - .build(); - - request = processKeyConditions(request, xpress, model); - - request = request.toBuilder() - .scanIndexForward(xpress.isScanIndexForward()) - .limit(xpress.limit()) - .exclusiveStartKey(xpress.getExclusiveStartKey()) - .queryFilter(xpress.getQueryFilter()) - .conditionalOperator(xpress.getConditionalOperator()) - .select(xpress.select()) - .projectionExpression(xpress.getProjectionExpression()) - .filterExpression(xpress.getFilterExpression()) - .expressionAttributeNames(xpress.getExpressionAttributeNames()) - .expressionAttributeValues(xpress.getExpressionAttributeValues()) - .returnConsumedCapacity(xpress.getReturnConsumedCapacity()) - .build(); - - return applyUserAgent(request); - } - - private AttributeTransformer.Parameters toParameters( - final Map attributeValues, - final Class modelClass, - final String tableName, - final DynamoDbMapperConfig mapperConfig) { - - return toParameters(attributeValues, false, modelClass, tableName, mapperConfig); - } - - private AttributeTransformer.Parameters toParameters( - final Map attributeValues, - final boolean partialUpdate, - final Class modelClass, - final String tableName, - final DynamoDbMapperConfig mapperConfig) { - - return new TransformerParameters( - getTableModel(modelClass, mapperConfig), - attributeValues, - partialUpdate, - modelClass, - mapperConfig, - tableName); - } - - final List> toParameters( - final List> attributeValues, - final Class modelClass, - final String tableName, - final DynamoDbMapperConfig mapperConfig) { - List> rval = - new ArrayList>( - attributeValues.size()); - - for (Map item : attributeValues) { - rval.add(toParameters(item, modelClass, tableName, mapperConfig)); - } - - return rval; - } - - private Map untransformAttributes( - final AttributeTransformer.Parameters parameters) { - if (transformer != null) { - return transformer.untransform(parameters); - } else { - return parameters.getAttributeValues(); - } - } - - private Map transformAttributes( - final AttributeTransformer.Parameters parameters) { - - if (transformer != null) { - return transformer.transform(parameters); - } else { - return parameters.getAttributeValues(); - } - } - - @Override - public S3ClientCache s3ClientCache() { - return s3Links.s3ClientCache(); - } - - @Override - public S3Link createS3Link(Region s3region, String bucketName, String key) { - return s3Links.createS3Link(s3region, bucketName, key); - } - - @Override - public S3Link createS3Link(String s3region, String bucketName, String key) { - return s3Links.createS3Link(s3region, bucketName, key); - } - - @Override - public CreateTableRequest generateCreateTableRequest(Class clazz, DynamoDbMapperConfig config) { - config = mergeConfig(config); - final DynamoDbMapperTableModel model = getTableModel(clazz, config); - - List keySchemas = new ArrayList<>(); - keySchemas.add(KeySchemaElement.builder().attributeName(model.hashKey().name()).keyType(HASH).build()); - - final CreateTableRequest.Builder requestBuilder = CreateTableRequest.builder() - .tableName(getTableName(clazz, config)); - - if (model.rangeKeyIfExists() != null) { - keySchemas.add(KeySchemaElement.builder() - .attributeName(model.rangeKey().name()) - .keyType(RANGE) - .build()); - } - requestBuilder.globalSecondaryIndexes(model.globalSecondaryIndexes()) - .localSecondaryIndexes(model.localSecondaryIndexes()); - - List attributeDefinitions = new ArrayList<>(); - for (final DynamoDbMapperFieldModel field : model.fields()) { - if (field.keyType() != null || field.indexed()) { - AttributeDefinition attributeDefinition = AttributeDefinition.builder() - .attributeType(ScalarAttributeType.valueOf(field.attributeType().name())) - .attributeName(field.name()) - .build(); - - attributeDefinitions.add(attributeDefinition); - } - } - - requestBuilder.keySchema(keySchemas); - requestBuilder.attributeDefinitions(attributeDefinitions); - return requestBuilder.build(); - } - - @Override - public DeleteTableRequest generateDeleteTableRequest(Class clazz, DynamoDbMapperConfig config) { - config = mergeConfig(config); - DeleteTableRequest deleteTableRequest = DeleteTableRequest.builder() - .tableName(getTableName(clazz, config)) - .build(); - return deleteTableRequest; - } - - /** - * Creates a new table mapper using this mapper to perform operations. - * @param The object type which this mapper operates. - * @param The hash key value type. - * @param The range key value type; use ? if no range key. - * @param clazz The object class. - * @return The table mapper. - */ - public DynamoDbTableMapper newTableMapper(Class clazz) { - DynamoDbMapperConfig config = mergeConfig(null); - return new DynamoDbTableMapper(this.db, this, config, getTableModel(clazz, config)); - } - - /** - * The one true implementation of AttributeTransformer.Parameters. - */ - private static class TransformerParameters - implements AttributeTransformer.Parameters { - - private final DynamoDbMapperTableModel model; - private final Map attributeValues; - private final boolean partialUpdate; - private final Class modelClass; - private final DynamoDbMapperConfig mapperConfig; - private final String tableName; - - TransformerParameters( - final DynamoDbMapperTableModel model, - final Map attributeValues, - final boolean partialUpdate, - final Class modelClass, - final DynamoDbMapperConfig mapperConfig, - final String tableName) { - - this.model = model; - this.attributeValues = - Collections.unmodifiableMap(attributeValues); - this.partialUpdate = partialUpdate; - this.modelClass = modelClass; - this.mapperConfig = mapperConfig; - this.tableName = tableName; - } - - @Override - public Map getAttributeValues() { - return attributeValues; - } - - @Override - public boolean isPartialUpdate() { - return partialUpdate; - } - - @Override - public Class modelClass() { - return modelClass; - } - - @Override - public DynamoDbMapperConfig mapperConfig() { - return mapperConfig; - } - - @Override - public String getTableName() { - return tableName; - } - - @Override - public String getHashKeyName() { - return model.hashKey().name(); - } - - @Override - public String getRangeKeyName() { - return model.rangeKeyIfExists() == null ? null : model.rangeKey().name(); - } - } - - /** - * The return type of batchWrite, batchDelete and batchSave. - * - * It contains the information about the unprocessed items and the - * exception causing the failure. - */ - public static class FailedBatch { - private Map> unprocessedItems; - private Exception exception; - - public Map> getUnprocessedItems() { - return unprocessedItems; - } - - public void setUnprocessedItems(Map> unprocessedItems) { - this.unprocessedItems = unprocessedItems; - } - - public Exception getException() { - return exception; - } - - public void setException(Exception excetpion) { - this.exception = excetpion; - } - - private boolean isRequestEntityTooLarge() { - return exception instanceof SdkServiceException && - RetryUtils.isRequestEntityTooLargeException((SdkServiceException) exception); - } - - private boolean isThrottling() { - return exception instanceof SdkServiceException && ((SdkServiceException) exception).isThrottlingException(); - } - - private int size() { - int size = 0; - for (final List values : unprocessedItems.values()) { - size += values.size(); - } - return size; - } - } - - /** - * Used for batch operations where request data is grouped by table name. - */ - static final class StringListMap extends LinkedHashMap> { - private static final long serialVersionUID = -1L; - - public List getPutIfNotExists(final String key) { - List list = get(key); - if (list == null) { - list = new LinkedList<>(); - put(key, list); - } - return list; - } - - public boolean add(final String key, final T value) { - return getPutIfNotExists(key).add(value); - } - - public List> subMaps(final int size, boolean perMap) { - final LinkedList> maps = new LinkedList>(); - int index = 0; - int count = 0; - for (final Entry> entry : entrySet()) { - for (final T value : entry.getValue()) { - if (index == maps.size()) { - maps.add(new StringListMap()); - } - maps.get(index).add(entry.getKey(), value); - index = perMap ? (++count / size) : (++index % size); - } - } - return maps; - } - } - - public static final class BatchGetItemException extends SdkClientException { - private transient Map unprocessedKeys; - private transient Map> responses; - - public BatchGetItemException(String message, Map unprocessedKeys, - Map> responses) { - super(SdkClientException.builder().message(message)); - this.unprocessedKeys = unprocessedKeys; - this.responses = responses; - } - - /** - * Returns a map of tables and their respective keys that were not processed during the operation.. - */ - public Map getUnprocessedKeys() { - return unprocessedKeys; - } - - /** - * Returns a map of the loaded objects. Each key in the map is the name of a DynamoDB table. - * Each value in the map is a list of objects that have been loaded from that table. All - * objects for each table can be cast to the associated user defined type that is - * annotated as mapping that table. - */ - public Map> getResponses() { - return responses; - } - } - - /** - * The handler for saving object using DynamoDBMapper. Caller should - * implement the abstract methods to provide the expected behavior on each - * scenario, and this handler will take care of all the other basic workflow - * and common operations. - */ - protected abstract class SaveObjectHandler { - - protected final Object object; - protected final Class clazz; - /** - * Additional expected value conditions specified by the user. - */ - protected final Map userProvidedExpectedValueConditions; - /** - * Condition operator on the additional expected value conditions - * specified by the user. - */ - protected final String userProvidedConditionOperator; - private final String tableName; - private final DynamoDbMapperConfig saveConfig; - private final Map primaryKeys; - private final Map updateValues; - /** - * Any expected value conditions specified by the implementation of - * DynamoDBMapper, e.g. value assertions on versioned attributes. - */ - private final Map internalExpectedValueAssertions; - private final List inMemoryUpdates; - - /** - * Constructs a handler for saving the specified model object. - * - * @param object The model object to be saved. - * @param clazz The domain class of the object. - * @param tableName The table name. - * @param saveConfig The mapper configuration used for this save. - * @param saveExpression The save expression, including the user-provided conditions and an optional logic operator. - */ - public SaveObjectHandler( - Class clazz, - Object object, - String tableName, - DynamoDbMapperConfig saveConfig, - DynamoDbSaveExpression saveExpression) { - - this.clazz = clazz; - this.object = object; - this.tableName = tableName; - this.saveConfig = saveConfig; - - if (saveExpression != null) { - userProvidedExpectedValueConditions = saveExpression - .getExpected(); - userProvidedConditionOperator = saveExpression - .getConditionalOperator(); - } else { - userProvidedExpectedValueConditions = null; - userProvidedConditionOperator = null; - } - - updateValues = new HashMap<>(); - internalExpectedValueAssertions = new HashMap<>(); - inMemoryUpdates = new LinkedList<>(); - primaryKeys = new HashMap<>(); - } - - /** - * The general workflow of a save operation. - */ - public void execute() { - final DynamoDbMapperTableModel model = getTableModel((Class) clazz, saveConfig); - for (final DynamoDbMapperFieldModel field : model.fields()) { - if (canGenerate(model, object, localSaveBehavior(), field)) { - if (field.keyType() != null || field.indexed()) { - onAutoGenerateAssignableKey(field); - } else if (field.versioned()) { - onVersionAttribute(field); - } else { - onAutoGenerate(field); - } - } else if (field.keyType() != null) { - AttributeValue newAttributeValue = field.convert(field.get(object)); - if (newAttributeValue == null) { - throw new DynamoDbMappingException( - clazz.getSimpleName() + "[" + field.name() + "]; null or empty value for primary key" - ); - } - onPrimaryKeyAttributeValue(field.name(), newAttributeValue); - } else { - AttributeValue currentValue = field.convert(field.get(object)); - if (currentValue != null) { - onNonKeyAttribute(field.name(), currentValue); - } else { - onNullNonKeyAttribute(field.name()); - } - } - } - - /* - * Execute the implementation of the low level request. - */ - executeLowLevelRequest(); - - /* - * Finally, after the service call has succeeded, update the - * in-memory object with new field values as appropriate. This - * currently takes into account of auto-generated keys and versioned - * attributes. - */ - for (ValueUpdate update : inMemoryUpdates) { - update.apply(); - } - } - - /** - * Implement this method to do the necessary operations when a primary key - * attribute is set with some value. - * - * @param attributeName - * The name of the primary key attribute. - * @param keyAttributeValue - * The AttributeValue of the primary key attribute as specified in - * the object. - */ - protected abstract void onPrimaryKeyAttributeValue(String attributeName, AttributeValue keyAttributeValue); - - /** - * Implement this method for necessary operations when a non-key - * attribute is set a non-null value in the object. - * The default implementation simply adds a "PUT" update for the given attribute. - * - * @param attributeName - * The name of the non-key attribute. - * @param currentValue - * The updated value of the given attribute. - */ - protected void onNonKeyAttribute(String attributeName, AttributeValue currentValue) { - updateValues.put(attributeName, AttributeValueUpdate.builder() - .value(currentValue) - .action("PUT") - .build()); - } - - /** - * Implement this method for necessary operations when a non-key - * attribute is set null in the object. - * - * @param attributeName - * The name of the non-key attribute. - */ - protected abstract void onNullNonKeyAttribute(String attributeName); - - /** - * Implement this method to send the low-level request that is necessary - * to complete the save operation. - */ - protected abstract void executeLowLevelRequest(); - - /** Get the SaveBehavior used locally for this save operation. **/ - protected SaveBehavior localSaveBehavior() { - return saveConfig.saveBehavior(); - } - - /** Get the table name **/ - protected String getTableName() { - return tableName; - } - - /** Get the map of all the specified primamry keys of the saved object. **/ - protected Map getPrimaryKeyAttributeValues() { - return primaryKeys; - } - - /** Get the map of AttributeValueUpdate on each modeled attribute. **/ - protected Map getAttributeValueUpdates() { - return updateValues; - } - - /** - * Merge and return all the expected value conditions (either - * user-specified or imposed by the internal implementation of - * DynamoDBMapper) for this save operation. - */ - protected Map mergeExpectedAttributeValueConditions() { - return DynamoDbMapper.mergeExpectedAttributeValueConditions( - internalExpectedValueAssertions, - userProvidedExpectedValueConditions, - userProvidedConditionOperator); - } - - /** Get the list of all the necessary in-memory update on the object. **/ - protected List getInMemoryUpdates() { - return inMemoryUpdates; - } - - /** - * Save the item using a UpdateItem request. The handler will call this - * method if - *
      - *
    • CLOBBER configuration is not being used; - *
    • AND the item does not contain auto-generated key value; - *
    - *

    - * The ReturnedValues parameter for the UpdateItem request is set as - * ALL_NEW, which means the service should return all of the attributes - * of the new version of the item after the update. The handler will use - * the returned attributes to detect silent failure on the server-side. - */ - protected UpdateItemResponse doUpdateItem() { - UpdateItemRequest req = UpdateItemRequest.builder() - .tableName(getTableName()) - .key(getPrimaryKeyAttributeValues()) - .attributeUpdates( - transformAttributeUpdates( - this.clazz, - getTableName(), - getPrimaryKeyAttributeValues(), - getAttributeValueUpdates(), - saveConfig)) - .expected(mergeExpectedAttributeValueConditions()) - .conditionalOperator(userProvidedConditionOperator) - .returnValues(ReturnValue.ALL_NEW) - .build(); - - return db.updateItem(applyUserAgent(req)); - } - - /** - * Save the item using a PutItem request. The handler will call this - * method if - *

      - *
    • CLOBBER configuration is being used; - *
    • OR the item contains auto-generated key value; - *
    • OR an UpdateItem request has silently failed (200 response with - * no affected attribute), which indicates the key-only-put scenario - * that we used to handle by the keyOnlyPut(...) hack. - *
    - */ - protected PutItemResponse doPutItem() { - Map attributeValues = convertToItem(getAttributeValueUpdates()); - - attributeValues = transformAttributes( - toParameters(attributeValues, - this.clazz, - getTableName(), - saveConfig)); - PutItemRequest req = PutItemRequest.builder() - .tableName(getTableName()) - .item(attributeValues) - .expected(mergeExpectedAttributeValueConditions()) - .conditionalOperator(userProvidedConditionOperator) - .build(); - - return db.putItem(applyUserAgent(req)); - } - - /** - * Auto-generates the attribute value. - * @param field The mapping details. - */ - private void onAutoGenerate(DynamoDbMapperFieldModel field) { - AttributeValue value = field.convert(field.generate(field.get(object))); - updateValues.put(field.name(), AttributeValueUpdate.builder().action("PUT").value(value).build()); - inMemoryUpdates.add(new ValueUpdate(field, value, object)); - } - - /** - * Auto-generates the key. - */ - private void onAutoGenerateAssignableKey(DynamoDbMapperFieldModel field) { - // Generate the new key value first, then ensure it doesn't exist. - onAutoGenerate(field); - - if (localSaveBehavior() != SaveBehavior.CLOBBER - && !internalExpectedValueAssertions.containsKey(field.name()) - && field.getGenerateStrategy() != DynamoDbAutoGenerateStrategy.ALWAYS) { - // Add an expect clause to make sure that the item - // doesn't already exist, since it's supposed to be new - internalExpectedValueAssertions.put(field.name(), - ExpectedAttributeValue.builder() - .exists(false) - .build()); - } - } - - /** - * Auto-generates the version. - * @param field The mapping details. - */ - private void onVersionAttribute(DynamoDbMapperFieldModel field) { - if (localSaveBehavior() != SaveBehavior.CLOBBER - && !internalExpectedValueAssertions.containsKey(field.name())) { - // First establish the expected (current) value for the - // update call - // For new objects, insist that the value doesn't exist. - // For existing ones, insist it has the old value. - final Object current = field.get(object); - if (current == null) { - internalExpectedValueAssertions.put(field.name(), - ExpectedAttributeValue.builder() - .exists(false) - .build()); - } else { - internalExpectedValueAssertions.put(field.name(), - ExpectedAttributeValue.builder() - .exists(true) - .value(field.convert(current)) - .build()); - } - } - - // Generate the new version value - onAutoGenerate(field); - } - - /** - * Converts the {@link AttributeValueUpdate} map given to an equivalent - * {@link AttributeValue} map. - */ - private Map convertToItem(Map putValues) { - Map map = new HashMap(); - for (Entry entry : putValues.entrySet()) { - String attributeName = entry.getKey(); - AttributeValue attributeValue = entry.getValue().value(); - String attributeAction = entry.getValue().actionAsString(); - - /* - * AttributeValueUpdate allows nulls for its values, since they are - * semantically meaningful. AttributeValues never have null values. - */ - if (attributeValue != null - && !AttributeAction.DELETE.toString().equals(attributeAction)) { - map.put(attributeName, attributeValue); - } - } - return map; - } - - private Map transformAttributeUpdates( - final Class clazz, - final String tableName, - final Map keys, - final Map updateValues, - final DynamoDbMapperConfig config) { - Map item = convertToItem(updateValues); - - HashSet keysAdded = new HashSet(); - for (Map.Entry e : keys.entrySet()) { - if (!item.containsKey(e.getKey())) { - keysAdded.add(e.getKey()); - item.put(e.getKey(), e.getValue()); - } - } - - AttributeTransformer.Parameters parameters = - toParameters(item, true, clazz, tableName, config); - - String hashKey = parameters.getHashKeyName(); - - if (!item.containsKey(hashKey)) { - item.put(hashKey, keys.get(hashKey)); - } - - item = transformAttributes(parameters); - - for (Map.Entry entry : item.entrySet()) { - if (keysAdded.contains(entry.getKey())) { - // This was added in for context before calling - // transformAttributes, but isn't actually being changed. - continue; - } - - AttributeValueUpdate update = updateValues.get(entry.getKey()); - if (update != null) { - AttributeValue value = update.value().toBuilder() - .b(entry.getValue().b()) - .bs(entry.getValue().bs()) - .n(entry.getValue().n()) - .ns(entry.getValue().ns()) - .s(entry.getValue().s()) - .ss(entry.getValue().ss()) - .m(entry.getValue().m()) - .l(entry.getValue().l()) - .nul(entry.getValue().nul()) - .bool(entry.getValue().bool()).build(); - - update = update.toBuilder().value(value).build(); - updateValues.put(entry.getKey(), update); - } else { - updateValues.put(entry.getKey(), AttributeValueUpdate.builder() - .value(entry.getValue()) - .action("PUT") - .build()); - } - } - - return updateValues; - } - } - - private static final class ValueUpdate { - private final DynamoDbMapperFieldModel field; - private final AttributeValue newValue; - private final Object target; - - ValueUpdate( - DynamoDbMapperFieldModel field, - AttributeValue newValue, - Object target) { - - this.field = field; - this.newValue = newValue; - this.target = target; - } - - public void apply() { - field.set(target, field.unconvert(newValue)); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperConfig.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperConfig.java deleted file mode 100644 index 783d51e814e5..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperConfig.java +++ /dev/null @@ -1,1134 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.security.SecureRandom; -import java.util.List; -import java.util.Map; -import java.util.Random; -import software.amazon.awssdk.services.dynamodb.model.KeysAndAttributes; -import software.amazon.awssdk.services.dynamodb.model.WriteRequest; - -/** - * Immutable configuration object for service call behavior. An instance of this - * configuration is supplied to every {@link DynamoDbMapper} at construction; if - * not provided explicitly, {@link DynamoDbMapperConfig#DEFAULT} is used. New - * instances can be given to the mapper object on individual save, load, and - * delete operations to override the defaults. For example: - * - *
    - * DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient);
    - * // Force this read to be consistent
    - * DomainClass obj = mapper.load(DomainClass.class, key, ConsistentRead.CONSISTENT.config());
    - * // Force this save operation to use putItem rather than updateItem
    - * mapper.save(obj, SaveBehavior.CLOBBER.config());
    - * // Save the object into a different table
    - * mapper.save(obj, new TableNameOverride("AnotherTable").config());
    - * // Delete the object even if the version field is out of date
    - * mapper.delete(obj, SaveBehavior.CLOBBER.config());
    - * 
    - */ -public class DynamoDbMapperConfig { - - /** - * Default configuration; these defaults are also applied by the mapper - * when only partial configurations are specified. - * - * @see SaveBehavior#UPDATE - * @see ConsistentRead#EVENTUAL - * @see PaginationLoadingStrategy#LAZY_LOADING - * @see DefaultTableNameResolver#INSTANCE - * @see DefaultBatchWriteRetryStrategy#INSTANCE - * @see DefaultBatchLoadRetryStrategy#INSTANCE - * @see DynamoDbTypeConverterFactory#standard - * @see ConversionSchemas#DEFAULT - */ - public static final DynamoDbMapperConfig DEFAULT = builder() - .withSaveBehavior(SaveBehavior.UPDATE) - .withConsistentReads(ConsistentRead.EVENTUAL) - .withPaginationLoadingStrategy(PaginationLoadingStrategy.LAZY_LOADING) - .withTableNameResolver(DefaultTableNameResolver.INSTANCE) - .withBatchWriteRetryStrategy(DefaultBatchWriteRetryStrategy.INSTANCE) - .withBatchLoadRetryStrategy(DefaultBatchLoadRetryStrategy.INSTANCE) - .withTypeConverterFactory(DynamoDbTypeConverterFactory.standard()) - .withConversionSchema(ConversionSchemas.DEFAULT) - .build(); - private final SaveBehavior saveBehavior; - private final ConsistentRead consistentRead; - private final TableNameOverride tableNameOverride; - private final TableNameResolver tableNameResolver; - private final ObjectTableNameResolver objectTableNameResolver; - private final PaginationLoadingStrategy paginationLoadingStrategy; - private final ConversionSchema conversionschema; - private final BatchWriteRetryStrategy batchWriteRetryStrategy; - private final BatchLoadRetryStrategy batchLoadRetryStrategy; - private final DynamoDbTypeConverterFactory typeConverterFactory; - - /** - * Internal constructor; builds from the builder. - */ - private DynamoDbMapperConfig(final DynamoDbMapperConfig.Builder builder) { - this.saveBehavior = builder.saveBehavior; - this.consistentRead = builder.consistentRead; - this.tableNameOverride = builder.tableNameOverride; - this.tableNameResolver = builder.tableNameResolver; - this.objectTableNameResolver = builder.objectTableNameResolver; - this.paginationLoadingStrategy = builder.paginationLoadingStrategy; - this.conversionschema = builder.conversionschema; - this.batchWriteRetryStrategy = builder.batchWriteRetryStrategy; - this.batchLoadRetryStrategy = builder.batchLoadRetryStrategy; - this.typeConverterFactory = builder.typeConverterFactory; - } - - private DynamoDbMapperConfig( - SaveBehavior saveBehavior, - ConsistentRead consistentRead, - TableNameOverride tableNameOverride, - TableNameResolver tableNameResolver, - ObjectTableNameResolver objectTableNameResolver, - PaginationLoadingStrategy paginationLoadingStrategy, - ConversionSchema conversionschema, - BatchWriteRetryStrategy batchWriteRetryStrategy, - BatchLoadRetryStrategy batchLoadRetryStrategy) { - - this.saveBehavior = saveBehavior; - this.consistentRead = consistentRead; - this.tableNameOverride = tableNameOverride; - this.tableNameResolver = tableNameResolver; - this.objectTableNameResolver = objectTableNameResolver; - this.paginationLoadingStrategy = paginationLoadingStrategy; - this.conversionschema = conversionschema; - this.batchWriteRetryStrategy = batchWriteRetryStrategy; - this.batchLoadRetryStrategy = batchLoadRetryStrategy; - this.typeConverterFactory = null; - } - - /** - * Constructs a new configuration object with the save behavior given. - * @see SaveBehavior#config - */ - @Deprecated - public DynamoDbMapperConfig(SaveBehavior saveBehavior) { - this(saveBehavior, null, null, null, null, null, - DEFAULT.getConversionSchema(), DEFAULT.batchWriteRetryStrategy(), DEFAULT.batchLoadRetryStrategy()); - } - - /** - * Constructs a new configuration object with the consistent read behavior - * given. - * @see ConsistentRead#config - */ - @Deprecated - public DynamoDbMapperConfig(ConsistentRead consistentRead) { - this(null, consistentRead, null, null, null, null, - DEFAULT.getConversionSchema(), DEFAULT.batchWriteRetryStrategy(), DEFAULT.batchLoadRetryStrategy()); - } - - /** - * Constructs a new configuration object with the table name override given. - * @see TableNameOverride#config - */ - @Deprecated - public DynamoDbMapperConfig(TableNameOverride tableNameOverride) { - this(null, null, tableNameOverride, null, null, null, - DEFAULT.getConversionSchema(), DEFAULT.batchWriteRetryStrategy(), DEFAULT.batchLoadRetryStrategy()); - } - - /** - * Constructs a new configuration object with the table name resolver strategy given. - * @see DynamoDBConfig#builder - */ - @Deprecated - public DynamoDbMapperConfig(TableNameResolver tableNameResolver) { - this(null, null, null, tableNameResolver, null, null, - DEFAULT.getConversionSchema(), DEFAULT.batchWriteRetryStrategy(), DEFAULT.batchLoadRetryStrategy()); - } - - /** - * Constructs a new configuration object with the object table name resolver strategy given. - * @see DynamoDBConfig#builder - */ - @Deprecated - public DynamoDbMapperConfig(ObjectTableNameResolver objectTableNameResolver) { - this(null, null, null, null, objectTableNameResolver, null, - DEFAULT.getConversionSchema(), DEFAULT.batchWriteRetryStrategy(), DEFAULT.batchLoadRetryStrategy()); - } - - /** - * Constructs a new configuration object with the table name resolver strategies given. - * @see DynamoDBConfig#builder - */ - @Deprecated - public DynamoDbMapperConfig(TableNameResolver tableNameResolver, ObjectTableNameResolver objectTableNameResolver) { - this(null, null, null, tableNameResolver, objectTableNameResolver, null, - DEFAULT.getConversionSchema(), DEFAULT.batchWriteRetryStrategy(), DEFAULT.batchLoadRetryStrategy()); - } - - /** - * Constructs a new configuration object with the pagination loading - * strategy given. - * @see PaginationLoadingStrategy#config - */ - @Deprecated - public DynamoDbMapperConfig( - PaginationLoadingStrategy paginationLoadingStrategy) { - - this(null, null, null, null, null, paginationLoadingStrategy, - DEFAULT.getConversionSchema(), DEFAULT.batchWriteRetryStrategy(), DEFAULT.batchLoadRetryStrategy()); - } - - /** - * Constructs a new configuration object with the conversion schema given. - * @see DynamoDBConfig#builder - */ - @Deprecated - public DynamoDbMapperConfig(ConversionSchema conversionschema) { - this(null, null, null, null, null, null, - conversionschema, DEFAULT.batchWriteRetryStrategy(), DEFAULT.batchLoadRetryStrategy()); - } - - /** - * Constructs a new configuration object from two others: a set of defaults - * and a set of overrides. Any non-null overrides will be applied to the - * defaults. - *

    - * Used internally to merge the {@link DynamoDbMapperConfig} provided at - * construction with an overriding object for a particular operation. - * - * @param defaults - * The default mapper configuration values. - * @param overrides - * The overridden mapper configuration values. Any non-null - * config settings will be applied to the returned object. - * @see DynamoDBConfig#builder - */ - @Deprecated - public DynamoDbMapperConfig( - DynamoDbMapperConfig defaults, - DynamoDbMapperConfig overrides) { - this(builder().merge(defaults).merge(overrides)); - } - - /** - * Creates a new empty builder. - */ - public static Builder builder() { - return new Builder(false); - } - - /** - * Merges these configuration values with the specified overrides; may - * simply return this instance if overrides are the same or null. - * @param overrides The overrides to merge. - * @return This if the overrides are same or null, or a new merged config. - */ - final DynamoDbMapperConfig merge(final DynamoDbMapperConfig overrides) { - return overrides == null || this.equals(overrides) ? this : builder().merge(this).merge(overrides).build(); - } - - public BatchLoadRetryStrategy batchLoadRetryStrategy() { - return batchLoadRetryStrategy; - } - - /** - * Returns the save behavior for this configuration. - */ - public SaveBehavior saveBehavior() { - return saveBehavior; - } - - /** - * Returns the consistent read behavior for this configuration. - */ - public ConsistentRead getConsistentRead() { - return consistentRead; - } - - /** - * Returns the table name override for this configuration. This value will - * override the table name specified in a {@link DynamoDbTable} annotation, - * either by replacing the table name entirely or else by pre-pending a - * string to each table name. This is useful for partitioning data in - * multiple tables at runtime. - * - * @see TableNameOverride#withTableNamePrefix(String) - * @see TableNameOverride#withTableNameReplacement(String) - */ - public TableNameOverride getTableNameOverride() { - return tableNameOverride; - } - - /** - * Returns the table name resolver for this configuration. This value will - * be used to determine the table name for classes. It can be - * used for more powerful customization of table name than is possible using - * only {@link TableNameOverride}. - * - * @see TableNameResolver#getTableName(Class, DynamoDbMapperConfig) - */ - public TableNameResolver getTableNameResolver() { - return tableNameResolver; - } - - /** - * Returns the object table name resolver for this configuration. This value will - * be used to determine the table name for objects. It can be - * used for more powerful customization of table name than is possible using - * only {@link TableNameOverride}. - * - * @see ObjectTableNameResolver#getTableName(Object, DynamoDbMapperConfig) - */ - public ObjectTableNameResolver getObjectTableNameResolver() { - return objectTableNameResolver; - } - - /** - * Returns the pagination loading strategy for this configuration. - */ - public PaginationLoadingStrategy getPaginationLoadingStrategy() { - return paginationLoadingStrategy; - } - - /** - * @return the conversion schema for this config object - */ - public ConversionSchema getConversionSchema() { - return conversionschema; - } - - /** - * @return the BatchWriteRetryStrategy for this config object - */ - public BatchWriteRetryStrategy batchWriteRetryStrategy() { - return batchWriteRetryStrategy; - } - - /** - * @return the current type-converter factory - */ - public final DynamoDbTypeConverterFactory getTypeConverterFactory() { - return typeConverterFactory; - } - - /** - * Enumeration of behaviors for the save operation. - */ - public enum SaveBehavior { - /** - * UPDATE will not affect unmodeled attributes on a save operation and a - * null value for the modeled attribute will remove it from that item in - * DynamoDB. - *

    - * Because of the limitation of updateItem request, the implementation - * of UPDATE will send a putItem request when a key-only object is being - * saved, and it will send another updateItem request if the given - * key(s) already exists in the table. - *

    - * By default, the mapper uses UPDATE. - */ - UPDATE, - - /** - * UPDATE_SKIP_NULL_ATTRIBUTES is similar to UPDATE, except that it - * ignores any null value attribute(s) and will NOT remove them from - * that item in DynamoDB. It also guarantees to send only one single - * updateItem request, no matter the object is key-only or not. - */ - UPDATE_SKIP_NULL_ATTRIBUTES, - - /** - * CLOBBER will clear and replace all attributes, included unmodeled - * ones, (delete and recreate) on save. Versioned field constraints will - * also be disregarded. - */ - CLOBBER, - - /** - * APPEND_SET treats scalar attributes (String, Number, Binary) the same - * as UPDATE_SKIP_NULL_ATTRIBUTES does. However, for set attributes, it - * will append to the existing attribute value, instead of overriding - * it. Caller needs to make sure that the modeled attribute type matches - * the existing set type, otherwise it would result in a service - * exception. - */ - APPEND_SET; - - private final DynamoDbMapperConfig config = builder().withSaveBehavior(this).build(); - - public final DynamoDbMapperConfig config() { - return this.config; - } - } - - /** - * Enumeration of consistent read behavior. - *

    - * CONSISTENT uses consistent reads, EVENTUAL does not. Consistent reads - * have implications for performance and billing; see the service - * documentation for details. - *

    - * By default, the mapper uses eventual consistency. - */ - public enum ConsistentRead { - CONSISTENT, - EVENTUAL; - - private final DynamoDbMapperConfig config = builder().withConsistentReads(this).build(); - - public final DynamoDbMapperConfig config() { - return this.config; - } - } - - /** - * Enumeration of pagination loading strategy. - */ - public enum PaginationLoadingStrategy { - /** - * Paginated list is lazily loaded when possible, and all loaded results - * are kept in the memory. - *

    - * By default, the mapper uses LAZY_LOADING. - */ - LAZY_LOADING, - - /** - * Only supports using iterator to read from the paginated list. All - * other list operations will return UnsupportedOperationException - * immediately. During the iteration, the list will clear all the - * previous results before loading the next page, so that the list will - * keep at most one page of the loaded results in memory. This also - * means the list could only be iterated once. - *

    - * Use this configuration to reduce the memory overhead when handling - * large DynamoDB items. - */ - ITERATION_ONLY, - - /** - * Paginated list will eagerly load all the paginated results from - * DynamoDB as soon as the list is initialized. - */ - EAGER_LOADING; - - private final DynamoDbMapperConfig config = builder().withPaginationLoadingStrategy(this).build(); - - public final DynamoDbMapperConfig config() { - return this.config; - } - } - - /** - * Interface for a strategy used to determine the table name of an object based on it's class. - * This resolver is used when an object isn't available such as in - * {@link DynamoDbMapper#query(Class, DynamoDbQueryExpression)} - * - * @see ObjectTableNameResolver - * @author Raniz - */ - public interface TableNameResolver { - - /** - * Get the table name for a class. This method is used when an object is not available - * such as when creating requests for scan or query operations. - * - * @param clazz The class to get the table name for - * @param config The {@link DynamoDbMapperConfig} - * @return The table name to use for instances of clazz - */ - String getTableName(Class clazz, DynamoDbMapperConfig config); - } - - /** - * Interface for a strategy used to determine the table name of an object based on it's class. - * This resolver is used when an object is available such as in - * {@link DynamoDbMapper#316 - * (java.util.List)}. - * - * If no table name resolver for objects is set, {@link DynamoDbMapper} reverts to using the - * {@link TableNameResolver} on each object's class. - * - * @see TableNameResolver - * @author Raniz - */ - public interface ObjectTableNameResolver { - - /** - * Get the table name for an object. - * - * @param object The object to get the table name for - * @param config The {@link DynamoDbMapperConfig} - * @return The table name to use for object - */ - String getTableName(Object object, DynamoDbMapperConfig config); - - } - - /** - * DynamoDBMapper#batchWrite takes arbitrary number of save/delete requests - * and breaks them into smaller chunks that can be accepted by the service - * API. Each chunk will be sent to DynamoDB via the BatchWriteItem API, and - * if it fails because the table's provisioned throughput is exceeded or an - * internal processing failure occurs, the failed requests are returned in - * the UnprocessedItems response parameter. This interface allows you to - * control the retry strategy when such scenario occurs. - * - * @see DynamoDbMapper#batchWrite(List, List, DynamoDbMapperConfig) - * @see DynamoDB service API reference -- BatchWriteItem - */ - public interface BatchWriteRetryStrategy { - - /** - * Returns the max number of retries to be performed if the service - * returns UnprocessedItems in the response. - * - * @param batchWriteItemInput - * the one batch of write requests that is being sent to the - * BatchWriteItem API. - * @return max number of retries to be performed if the service returns - * UnprocessedItems in the response, or a negative value if you - * want it to keep retrying until all the UnprocessedItems are - * fulfilled. - */ - int maxRetryOnUnprocessedItems( - Map> batchWriteItemInput); - - /** - * Returns the delay (in milliseconds) before retrying on - * UnprocessedItems. - * - * @param unprocessedItems - * the UnprocessedItems returned by the service in the last - * BatchWriteItem call - * @param retriesAttempted - * The number of times we have attempted to resend - * UnprocessedItems. - * @return the delay (in milliseconds) before resending - * UnprocessedItems. - */ - long getDelayBeforeRetryUnprocessedItems( - Map> unprocessedItems, - int retriesAttempted); - } - - /** - * {@link DynamoDbMapper#batchLoad(List)} breaks the requested items in batches of maximum size 100. - * When calling the Dyanmo Db client, there is a chance that due to throttling, some unprocessed keys will be returned. - * This interfaces controls whether we need to retry these unprocessed keys and it also controls the strategy as to how - * retries should be handled. - */ - public interface BatchLoadRetryStrategy { - /** - * Checks if the batch load request should be retried. - * @param batchLoadContext see {@link BatchLoadContext} - * - * @return a boolean true or false value. - */ - boolean shouldRetry(BatchLoadContext batchLoadContext); - - /** - * Returns delay(in milliseconds) before retrying Unprocessed keys - * - * @param batchLoadContext see {@link BatchLoadContext} - * @return delay(in milliseconds) before attempting to read unprocessed keys - */ - long getDelayBeforeNextRetry(BatchLoadContext batchLoadContext); - } - - /** - * A fluent builder for DynamoDBMapperConfig objects. - */ - public static class Builder { - - private SaveBehavior saveBehavior; - private ConsistentRead consistentRead; - private TableNameOverride tableNameOverride; - private TableNameResolver tableNameResolver; - private ObjectTableNameResolver objectTableNameResolver; - private PaginationLoadingStrategy paginationLoadingStrategy; - private ConversionSchema conversionschema; - private BatchWriteRetryStrategy batchWriteRetryStrategy; - private BatchLoadRetryStrategy batchLoadRetryStrategy; - private DynamoDbTypeConverterFactory typeConverterFactory; - - /** - * Creates a new builder initialized with the {@link #DEFAULT} values. - */ - public Builder() { - this(true); - } - - /** - * Creates a new builder, optionally initialized with the defaults. - */ - private Builder(final boolean defaults) { - if (defaults == true) { - saveBehavior = DEFAULT.saveBehavior(); - consistentRead = DEFAULT.getConsistentRead(); - paginationLoadingStrategy = DEFAULT.getPaginationLoadingStrategy(); - conversionschema = DEFAULT.getConversionSchema(); - batchWriteRetryStrategy = DEFAULT.batchWriteRetryStrategy(); - batchLoadRetryStrategy = DEFAULT.batchLoadRetryStrategy(); - } - } - - /** - * Merges any non-null configuration values for the specified overrides. - */ - private Builder merge(final DynamoDbMapperConfig o) { - if (o == null) { - return this; - } - if (o.saveBehavior != null) { - saveBehavior = o.saveBehavior; - } - if (o.consistentRead != null) { - consistentRead = o.consistentRead; - } - if (o.tableNameOverride != null) { - tableNameOverride = o.tableNameOverride; - } - if (o.tableNameResolver != null) { - tableNameResolver = o.tableNameResolver; - } - if (o.objectTableNameResolver != null) { - objectTableNameResolver = o.objectTableNameResolver; - } - if (o.paginationLoadingStrategy != null) { - paginationLoadingStrategy = o.paginationLoadingStrategy; - } - if (o.conversionschema != null) { - conversionschema = o.conversionschema; - } - if (o.batchWriteRetryStrategy != null) { - batchWriteRetryStrategy = o.batchWriteRetryStrategy; - } - if (o.batchLoadRetryStrategy != null) { - batchLoadRetryStrategy = o.batchLoadRetryStrategy; - } - if (o.typeConverterFactory != null) { - typeConverterFactory = o.typeConverterFactory; - } - return this; - } - - /** - * @return the currently-configured save behavior - */ - public SaveBehavior saveBehavior() { - return saveBehavior; - } - - /** - * @param value the new save behavior - */ - public void setSaveBehavior(SaveBehavior value) { - saveBehavior = value; - } - - /** - * @param value the new save behavior - * @return this builder - */ - public Builder withSaveBehavior(SaveBehavior value) { - setSaveBehavior(value); - return this; - } - - - /** - * Returns the consistent read behavior. Currently - * this value is applied only in load and batch load operations of the - * DynamoDBMapper. - * @return the currently-configured consistent read behavior. - */ - public ConsistentRead getConsistentRead() { - return consistentRead; - } - - /** - * Sets the consistent read behavior. Currently - * this value is applied only in load and batch load operations of the - * DynamoDBMapper. - * @param value the new consistent read behavior. - */ - public void setConsistentRead(ConsistentRead value) { - consistentRead = value; - } - - /** - * Sets the consistent read behavior. Currently - * this value is applied only in load and batch load operations of the - * DynamoDBMapper. - * @param value the new consistent read behavior - * @return this builder. - * - */ - public Builder withConsistentReads(ConsistentRead value) { - setConsistentRead(value); - return this; - } - - - /** - * @return the current table name override - */ - public TableNameOverride getTableNameOverride() { - return tableNameOverride; - } - - /** - * @param value the new table name override - */ - public void setTableNameOverride(TableNameOverride value) { - tableNameOverride = value; - } - - /** - * @param value the new table name override - * @return this builder - */ - public Builder withTableNameOverride(TableNameOverride value) { - setTableNameOverride(value); - return this; - } - - - /** - * @return the current table name resolver - */ - public TableNameResolver getTableNameResolver() { - return tableNameResolver; - } - - /** - * @param value the new table name resolver - */ - public void setTableNameResolver(TableNameResolver value) { - tableNameResolver = value; - } - - /** - * @param value the new table name resolver - * @return this builder - */ - public Builder withTableNameResolver(TableNameResolver value) { - setTableNameResolver(value); - return this; - } - - - /** - * @return the current object table name resolver - */ - public ObjectTableNameResolver getObjectTableNameResolver() { - return objectTableNameResolver; - } - - /** - * @param value the new object table name resolver - */ - public void setObjectTableNameResolver(ObjectTableNameResolver value) { - objectTableNameResolver = value; - } - - /** - * @param value the new object table name resolver - * @return this builder - */ - public Builder withObjectTableNameResolver(ObjectTableNameResolver value) { - setObjectTableNameResolver(value); - return this; - } - - /** - * @return the currently-configured pagination loading strategy - */ - public PaginationLoadingStrategy getPaginationLoadingStrategy() { - return paginationLoadingStrategy; - } - - /** - * @param value the new pagination loading strategy - */ - public void setPaginationLoadingStrategy( - PaginationLoadingStrategy value) { - - paginationLoadingStrategy = value; - } - - /** - * @param value the new pagination loading strategy - * @return this builder - */ - public Builder withPaginationLoadingStrategy( - PaginationLoadingStrategy value) { - - setPaginationLoadingStrategy(value); - return this; - } - - /** - * @return the current conversion schema - */ - public ConversionSchema getConversionSchema() { - return conversionschema; - } - - /** - * @param value the new conversion schema - */ - public void setConversionSchema(ConversionSchema value) { - conversionschema = value; - } - - /** - * @param value the new conversion schema - * @return this builder - */ - public Builder withConversionSchema(ConversionSchema value) { - setConversionSchema(value); - return this; - } - - /** - * @return the current BatchWriteRetryStrategy - */ - public BatchWriteRetryStrategy batchWriteRetryStrategy() { - return batchWriteRetryStrategy; - } - - /** - * @param value the new BatchWriteRetryStrategy - */ - public void setBatchWriteRetryStrategy( - BatchWriteRetryStrategy value) { - this.batchWriteRetryStrategy = value; - } - - /** - * @param value the new BatchWriteRetryStrategy - * @return this builder - */ - public Builder withBatchWriteRetryStrategy( - BatchWriteRetryStrategy value) { - setBatchWriteRetryStrategy(value); - return this; - } - - public BatchLoadRetryStrategy batchLoadRetryStrategy() { - return batchLoadRetryStrategy; - } - - /** - * @param value the new BatchLoadRetryStrategy - */ - public void setBatchLoadRetryStrategy( - BatchLoadRetryStrategy value) { - this.batchLoadRetryStrategy = value; - } - - /** - * @param value the new BatchLoadRetryStrategy - * @return this builder - */ - public Builder withBatchLoadRetryStrategy( - BatchLoadRetryStrategy value) { - //set the no retry strategy if the user overrides the default with null - if (value == null) { - value = NoRetryBatchLoadRetryStrategy.INSTANCE; - } - setBatchLoadRetryStrategy(value); - return this; - } - - /** - * @return the current type-converter factory - */ - public final DynamoDbTypeConverterFactory getTypeConverterFactory() { - return typeConverterFactory; - } - - /** - * @param value the new type-converter factory - */ - public final void setTypeConverterFactory(DynamoDbTypeConverterFactory value) { - this.typeConverterFactory = value; - } - - /** - * The type-converter factory for scalar conversions. - *

    To override standard type-conversions,

    - *
    -         * DynamoDBMapperConfig config = DynamoDBMapperConfig.builder()
    -         *     .withTypeConverterFactory(DynamoDBTypeConverterFactory.standard().override()
    -         *         .with(String.class, MyObject.class, new StringToMyObjectConverter())
    -         *         .build())
    -         *     .build();
    -         * 
    - *

    Then, on the property, specify the attribute binding,

    - *
    -         * @DynamoDBTyped(DynamoDBAttributeType.S)
    -         * public MyObject myObject()
    -         * 
    - * @param value the new type-converter factory - * @return this builder - */ - public final Builder withTypeConverterFactory(DynamoDbTypeConverterFactory value) { - setTypeConverterFactory(value); - return this; - } - - /** - * Builds a new {@code DynamoDBMapperConfig} object. - * - * @return the new, immutable config object - */ - public DynamoDbMapperConfig build() { - return new DynamoDbMapperConfig(this); - } - } - - /** - * Allows overriding the table name declared on a domain class by the - * {@link DynamoDbTable} annotation. - */ - public static final class TableNameOverride { - - private final String tableNameOverride; - private final String tableNamePrefix; - private final DynamoDbMapperConfig config = builder().withTableNameOverride(this).build(); - - private TableNameOverride(String tableNameOverride, String tableNamePrefix) { - this.tableNameOverride = tableNameOverride; - this.tableNamePrefix = tableNamePrefix; - } - - /** - * @see TableNameOverride#withTableNameReplacement(String) - */ - public TableNameOverride(String tableNameOverride) { - this(tableNameOverride, null); - } - - /** - * Returns a new {@link TableNameOverride} object that will prepend the - * given string to every table name. - */ - public static TableNameOverride withTableNamePrefix( - String tableNamePrefix) { - - return new TableNameOverride(null, tableNamePrefix); - } - - /** - * Returns a new {@link TableNameOverride} object that will replace - * every table name in requests with the given string. - */ - public static TableNameOverride withTableNameReplacement( - String tableNameReplacement) { - - return new TableNameOverride(tableNameReplacement, null); - } - - /** - * Returns the table name to use for all requests. Exclusive with - * {@link TableNameOverride#getTableNamePrefix()} - * - * @see DynamoDbMapperConfig#getTableNameOverride() - */ - public String getTableName() { - return tableNameOverride; - } - - /** - * Returns the table name prefix to prepend the table name for all - * requests. Exclusive with {@link TableNameOverride#getTableName()} - * - * @see DynamoDbMapperConfig#getTableNameOverride() - */ - public String getTableNamePrefix() { - return tableNamePrefix; - } - - public DynamoDbMapperConfig config() { - return this.config; - } - } - - /** - * Default implementation of {@link TableNameResolver} that mimics the behavior - * of DynamoDBMapper before the addition of {@link TableNameResolver}. - * - * @author Raniz - */ - public static class DefaultTableNameResolver implements TableNameResolver { - public static final DefaultTableNameResolver INSTANCE = new DefaultTableNameResolver(); - private final DynamoDbMapperConfig config = builder().withTableNameResolver(this).build(); - - @Override - public String getTableName(Class clazz, DynamoDbMapperConfig config) { - final TableNameOverride override = config.getTableNameOverride(); - - if (override != null) { - final String tableName = override.getTableName(); - if (tableName != null) { - return tableName; - } - } - - final StandardBeanProperties.Beans beans = StandardBeanProperties.of(clazz); - if (beans.properties().tableName() == null) { - throw new DynamoDbMappingException(clazz + " not annotated with @DynamoDBTable"); - } - - final String prefix = override == null ? null : override.getTableNamePrefix(); - return prefix == null ? beans.properties().tableName() : prefix + beans.properties().tableName(); - } - - public final DynamoDbMapperConfig config() { - return this.config; - } - } - - /** - * This strategy, like name suggests will not attempt any retries on Unprocessed keys - * - * @author smihir - * - */ - public static class NoRetryBatchLoadRetryStrategy implements BatchLoadRetryStrategy { - public static final NoRetryBatchLoadRetryStrategy INSTANCE = new NoRetryBatchLoadRetryStrategy(); - private final DynamoDbMapperConfig config = builder().withBatchLoadRetryStrategy(this).build(); - - /* (non-Javadoc) - * @see BatchLoadRetryStrategy#maxRetryOnUnprocessedKeys(java.util.Map, java.util.Map) - */ - @Override - public boolean shouldRetry(final BatchLoadContext batchLoadContext) { - return false; - } - - /* (non-Javadoc) - * @see BatchLoadRetryStrategy#getDelayBeforeNextRetry(java.util.Map, int) - */ - @Override - public long getDelayBeforeNextRetry(final BatchLoadContext batchLoadContext) { - return -1; - } - - public final DynamoDbMapperConfig config() { - return this.config; - } - } - - /** - * This is the default strategy. - * If unprocessed keys is equal to requested keys, the request will retried 5 times with a back off strategy - * with maximum back off of 3 seconds - * If few of the keys have been processed, the retries happen without a delay. - * - * @author smihir - * - */ - public static class DefaultBatchLoadRetryStrategy implements BatchLoadRetryStrategy { - public static final DefaultBatchLoadRetryStrategy INSTANCE = new DefaultBatchLoadRetryStrategy(); - - private static final int MAX_RETRIES = 5; - private static final long MAX_BACKOFF_IN_MILLISECONDS = 1000 * 3L; - private final DynamoDbMapperConfig config = builder().withBatchLoadRetryStrategy(this).build(); - - @Override - public long getDelayBeforeNextRetry(final BatchLoadContext batchLoadContext) { - Map requestedKeys = batchLoadContext.batchGetItemRequest().requestItems(); - Map unprocessedKeys = batchLoadContext.batchGetItemResponse() - .unprocessedKeys(); - - long delay = 0; - //Exponential backoff only when all keys are unprocessed - if (unprocessedKeys != null && requestedKeys != null && unprocessedKeys.size() == requestedKeys.size()) { - Random random = new SecureRandom(); - long scaleFactor = 500L + random.nextInt(100); - int retriesAttempted = batchLoadContext.getRetriesAttempted(); - delay = (long) (Math.pow(2, retriesAttempted) * scaleFactor); - delay = Math.min(delay, MAX_BACKOFF_IN_MILLISECONDS); - } - return delay; - } - - @Override - public boolean shouldRetry(BatchLoadContext batchLoadContext) { - Map unprocessedKeys = batchLoadContext.batchGetItemResponse().unprocessedKeys(); - return unprocessedKeys != null && unprocessedKeys.size() > 0 && batchLoadContext.getRetriesAttempted() < MAX_RETRIES; - } - - public final DynamoDbMapperConfig config() { - return this.config; - } - } - - /** - * The default BatchWriteRetryStrategy which always retries on - * UnprocessedItem up to a maximum number of times and use exponential - * backoff with random scale factor. - */ - public static class DefaultBatchWriteRetryStrategy implements BatchWriteRetryStrategy { - public static final DefaultBatchWriteRetryStrategy INSTANCE = new DefaultBatchWriteRetryStrategy(); - - private static final long MAX_BACKOFF_IN_MILLISECONDS = 1_000 * 3L; - private static final int DEFAULT_MAX_RETRY = -1; - - private final int maxRetry; - private final DynamoDbMapperConfig config = builder().withBatchWriteRetryStrategy(this).build(); - - /** - * Keep retrying until success, with default backoff. - */ - public DefaultBatchWriteRetryStrategy() { - this(DEFAULT_MAX_RETRY); - } - - public DefaultBatchWriteRetryStrategy(int maxRetry) { - this.maxRetry = maxRetry; - } - - @Override - public int maxRetryOnUnprocessedItems( - Map> batchWriteItemInput) { - return maxRetry; - } - - @Override - public long getDelayBeforeRetryUnprocessedItems( - Map> unprocessedItems, - int retriesAttempted) { - - if (retriesAttempted < 0) { - return 0; - } - - Random random = new SecureRandom(); - long scaleFactor = 1_000L + random.nextInt(200); - long delay = (long) (Math.pow(2, retriesAttempted) * scaleFactor); - return Math.min(delay, MAX_BACKOFF_IN_MILLISECONDS); - } - - public final DynamoDbMapperConfig config() { - return this.config; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperFieldModel.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperFieldModel.java deleted file mode 100644 index 991f2d32a03d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperFieldModel.java +++ /dev/null @@ -1,514 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGenerateStrategy.ALWAYS; -import static software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Vector.LIST; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.BEGINS_WITH; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.BETWEEN; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.CONTAINS; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.EQ; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.GE; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.GT; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.IN; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.LE; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.LT; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.NE; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.NOT_CONTAINS; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.NOT_NULL; -import static software.amazon.awssdk.services.dynamodb.model.ComparisonOperator.NULL; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.KeyType; - -/** - * Field model. - * - * @param The object type. - * @param The field model type. - */ -public class DynamoDbMapperFieldModel implements DynamoDbAutoGenerator, DynamoDbTypeConverter { - - private final DynamoDbMapperFieldModel.Properties properties; - - - private final DynamoDbTypeConverter converter; - private final DynamoDbAttributeType attributeType; - private final DynamoDbMapperFieldModel.Reflect reflect; - - /** - * Creates a new field model instance. - * @param builder The builder. - */ - private DynamoDbMapperFieldModel(final DynamoDbMapperFieldModel.Builder builder) { - this.properties = builder.properties; - this.converter = builder.converter; - this.attributeType = builder.attributeType; - this.reflect = builder.reflect; - } - - /** - * @deprecated replaced by {@link DynamoDbMapperFieldModel#name} - */ - @Deprecated - public String getDynamoDbAttributeName() { - return properties.attributeName(); - } - - /** - * @deprecated replaced by {@link DynamoDbMapperFieldModel#attributeType} - */ - @Deprecated - public DynamoDbAttributeType getDynamoDbAttributeType() { - return attributeType; - } - - /** - * Gets the attribute name. - * @return The attribute name. - */ - public final String name() { - return properties.attributeName(); - } - - /** - * Gets the value from the object instance. - * @param object The object instance. - * @return The value. - */ - public final V get(final T object) { - return reflect.get(object); - } - - /** - * Sets the value on the object instance. - * @param object The object instance. - * @param value The value. - */ - public final void set(final T object, final V value) { - reflect.set(object, value); - } - - /** - * {@inheritDoc} - */ - @Override - public final DynamoDbAutoGenerateStrategy getGenerateStrategy() { - if (properties.autoGenerator() != null) { - return properties.autoGenerator().getGenerateStrategy(); - } - return null; - } - - /** - * {@inheritDoc} - */ - @Override - public final V generate(final V currentValue) { - return properties.autoGenerator().generate(currentValue); - } - - /** - * {@inheritDoc} - */ - @Override - public final AttributeValue convert(final V object) { - AttributeValue v = converter.convert(object); - return v; - } - - /** - * {@inheritDoc} - */ - @Override - public final V unconvert(final AttributeValue object) { - return converter.unconvert(object); - } - - /** - * Get the current value from the object and convert it. - * @param object The object instance. - * @return The converted value. - */ - public final AttributeValue getAndConvert(final T object) { - return convert(get(object)); - } - - /** - * Unconverts the value and sets it on the object. - * @param object The object instance. - * @param value The attribute value. - */ - public final void unconvertAndSet(final T object, final AttributeValue value) { - set(object, unconvert(value)); - } - - /** - * Gets the DynamoDB attribute type. - * @return The DynamoDB attribute type. - */ - public final DynamoDbAttributeType attributeType() { - return attributeType; - } - - /** - * Gets the key type. - * @return The key type if a key field, null otherwise. - */ - public final KeyType keyType() { - return properties.keyType(); - } - - /** - * Indicates if this attribute is a version attribute. - * @return True if it is, false otherwise. - */ - public final boolean versioned() { - return properties.versioned(); - } - - /** - * Gets the global secondary indexes. - * @param keyType The key type. - * @return The list of global secondary indexes. - */ - public final List globalSecondaryIndexNames(final KeyType keyType) { - if (properties.globalSecondaryIndexNames().containsKey(keyType)) { - return properties.globalSecondaryIndexNames().get(keyType); - } - return Collections.emptyList(); - } - - /** - * Gets the local secondary indexes. - * @return The list of local secondary indexes. - */ - public final List localSecondaryIndexNames() { - return properties.localSecondaryIndexNames(); - } - - /** - * Returns true if the field has any indexes. - * @return True if the propery matches. - */ - public final boolean indexed() { - return !properties.globalSecondaryIndexNames().isEmpty() || !properties.localSecondaryIndexNames().isEmpty(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#BEGINS_WITH - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition beginsWith(final V value) { - return Condition.builder().comparisonOperator(BEGINS_WITH).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified values. - * @param lo The start of the range (inclusive). - * @param hi The end of the range (inclusive). - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#BETWEEN - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition between(final V lo, final V hi) { - return Condition.builder().comparisonOperator(BETWEEN).attributeValueList(convert(lo), convert(hi)).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#CONTAINS - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition contains(final V value) { - return Condition.builder().comparisonOperator(CONTAINS).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#EQ - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition eq(final V value) { - return Condition.builder().comparisonOperator(EQ).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#GE - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition ge(final V value) { - return Condition.builder().comparisonOperator(GE).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#GT - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition gt(final V value) { - return Condition.builder().comparisonOperator(GT).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified values. - * @param values The values. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#IN - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition in(final Collection values) { - return Condition.builder().comparisonOperator(IN).attributeValueList(LIST.convert(values, this)).build(); - } - - /** - * Creates a condition which filters on the specified values. - * @param values The values. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#IN - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition in(final V... values) { - return in(Arrays.asList(values)); - } - - /** - * Creates a condition which filters on the specified value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#NULL - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition isNull() { - return Condition.builder().comparisonOperator(NULL).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#LE - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition le(final V value) { - return Condition.builder().comparisonOperator(LE).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#LT - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition lt(final V value) { - return Condition.builder().comparisonOperator(LT).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#NE - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition ne(final V value) { - return Condition.builder().comparisonOperator(NE).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @param value The value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#NOT_CONTAINS - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition notContains(final V value) { - return Condition.builder().comparisonOperator(NOT_CONTAINS).attributeValueList(convert(value)).build(); - } - - /** - * Creates a condition which filters on the specified value. - * @return The condition. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#NOT_NULL - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition notNull() { - return Condition.builder().comparisonOperator(NOT_NULL).build(); - } - - /** - * Creates a condition which filters on any non-null argument; if {@code lo} - * is null a {@code LE} condition is applied on {@code hi}, if {@code hi} - * is null a {@code GE} condition is applied on {@code lo}. - * @param lo The start of the range (inclusive). - * @param hi The end of the range (inclusive). - * @return The condition or null if both arguments are null. - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#BETWEEN - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#EQ - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#GE - * @see software.amazon.awssdk.services.dynamodb.model.ComparisonOperator#LE - * @see software.amazon.awssdk.services.dynamodb.model.Condition - */ - public final Condition betweenAny(final V lo, final V hi) { - return lo == null ? (hi == null ? null : le(hi)) : (hi == null ? ge(lo) : (lo.equals(hi) ? eq(lo) : between(lo, hi))); - } - - public enum DynamoDbAttributeType { - B, N, S, BS, NS, SS, BOOL, NULL, L, M - } - - /** - * The field model properties. - */ - interface Properties { - String attributeName(); - - KeyType keyType(); - - boolean versioned(); - - Map> globalSecondaryIndexNames(); - - List localSecondaryIndexNames(); - - DynamoDbAutoGenerator autoGenerator(); - - final class Immutable implements Properties { - private final String attributeName; - private final KeyType keyType; - private final boolean versioned; - private final Map> globalSecondaryIndexNames; - private final List localSecondaryIndexNames; - private final DynamoDbAutoGenerator autoGenerator; - - Immutable(final Properties properties) { - this.attributeName = properties.attributeName(); - this.keyType = properties.keyType(); - this.versioned = properties.versioned(); - this.globalSecondaryIndexNames = properties.globalSecondaryIndexNames(); - this.localSecondaryIndexNames = properties.localSecondaryIndexNames(); - this.autoGenerator = properties.autoGenerator(); - } - - @Override - public String attributeName() { - return this.attributeName; - } - - @Override - public KeyType keyType() { - return this.keyType; - } - - @Override - public boolean versioned() { - return this.versioned; - } - - @Override - public Map> globalSecondaryIndexNames() { - return this.globalSecondaryIndexNames; - } - - @Override - public List localSecondaryIndexNames() { - return this.localSecondaryIndexNames; - } - - @Override - public DynamoDbAutoGenerator autoGenerator() { - return this.autoGenerator; - } - } - } - - /** - * Get/set reflection operations. - * @param The object type. - * @param The value type. - */ - interface Reflect { - V get(T object); - - void set(T object, V value); - } - - /** - * {@link DynamoDbMapperFieldModel} builder. - */ - static class Builder { - private final DynamoDbMapperFieldModel.Properties properties; - private DynamoDbTypeConverter converter; - private DynamoDbMapperFieldModel.Reflect reflect; - private DynamoDbAttributeType attributeType; - private Class targetType; - - Builder(Class targetType, DynamoDbMapperFieldModel.Properties properties) { - this.properties = properties; - this.targetType = targetType; - } - - public final Builder with(DynamoDbTypeConverter converter) { - this.converter = converter; - return this; - } - - public final Builder with(DynamoDbAttributeType attributeType) { - this.attributeType = attributeType; - return this; - } - - public final Builder with(DynamoDbMapperFieldModel.Reflect reflect) { - this.reflect = reflect; - return this; - } - - public final DynamoDbMapperFieldModel build() { - final DynamoDbMapperFieldModel result = new DynamoDbMapperFieldModel(this); - if ((result.keyType() != null || result.indexed()) && !result.attributeType().name().matches("[BNS]")) { - throw new DynamoDbMappingException(String.format( - "%s[%s]; only scalar (B, N, or S) type allowed for key", - targetType.getSimpleName(), result.name() - )); - } else if (result.keyType() != null && result.getGenerateStrategy() == ALWAYS) { - throw new DynamoDbMappingException(String.format( - "%s[%s]; auto-generated key and ALWAYS not allowed", - targetType.getSimpleName(), result.name() - )); - } - return result; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperModelFactory.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperModelFactory.java deleted file mode 100644 index d8a2b337bcb8..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperModelFactory.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import software.amazon.awssdk.annotations.SdkInternalApi; - -/** - * {@link DynamoDbMapper} table model factory. - */ -@SdkInternalApi -public interface DynamoDbMapperModelFactory { - - /** - * Gets/creates the mapper's model factory. - */ - TableFactory getTableFactory(DynamoDbMapperConfig config); - - /** - * {@link DynamoDbMapperModelFactory} factory. - */ - interface TableFactory { - /** - * Gets the table model for the given type and configuration. - */ - DynamoDbMapperTableModel getTable(Class clazz); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperTableModel.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperTableModel.java deleted file mode 100644 index cddf08f518f5..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMapperTableModel.java +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static software.amazon.awssdk.services.dynamodb.model.KeyType.HASH; -import static software.amazon.awssdk.services.dynamodb.model.KeyType.RANGE; -import static software.amazon.awssdk.services.dynamodb.model.ProjectionType.KEYS_ONLY; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.Projection; - -/** - * Table model. - * - * @param The object type. - */ -public final class DynamoDbMapperTableModel implements DynamoDbTypeConverter, T> { - - private final Map globalSecondaryIndexes; - private final Map localSecondaryIndexes; - private final Map> versions; - private final Map> fields; - private final Map> keys; - private final Class targetType; - - /** - * Constructs a new table model for the specified class. - * @param builder The builder. - */ - private DynamoDbMapperTableModel(final DynamoDbMapperTableModel.Builder builder) { - this.globalSecondaryIndexes = builder.globalSecondaryIndexes(); - this.localSecondaryIndexes = builder.localSecondaryIndexes(); - this.versions = builder.versions(); - this.fields = builder.fields(); - this.keys = builder.keys(); - this.targetType = builder.targetType; - } - - /** - * Gets the object type. - * @return The object type. - */ - public Class targetType() { - return this.targetType; - } - - /** - * Gets all the field models for the given class. - * @return The field models. - */ - public Collection> fields() { - return fields.values(); - } - - /** - * Gets the field model for a given attribute. - * @param The field model's value type. - * @param attributeName The attribute name. - * @return The field model. - */ - @SuppressWarnings("unchecked") - public DynamoDbMapperFieldModel field(final String attributeName) { - final DynamoDbMapperFieldModel field = (DynamoDbMapperFieldModel) fields.get(attributeName); - if (field == null) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "[" + attributeName + "]; no mapping for attribute by name" - ); - } - return field; - } - - /** - * Gets all the key field models for the given class. - * @return The field models. - */ - public Collection> keys() { - return keys.values(); - } - - /** - * Gets the hash key field model for the specified type. - * @param The hash key type. - * @return The hash key field model. - * @throws DynamoDbMappingException If the hash key is not present. - */ - @SuppressWarnings("unchecked") - public DynamoDbMapperFieldModel hashKey() { - final DynamoDbMapperFieldModel field = (DynamoDbMapperFieldModel) keys.get(HASH); - if (field == null) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "; no mapping for HASH key" - ); - } - return field; - } - - /** - * Gets the range key field model for the specified type. - * @param The range key type. - * @return The range key field model. - * @throws DynamoDbMappingException If the range key is not present. - */ - @SuppressWarnings("unchecked") - public DynamoDbMapperFieldModel rangeKey() { - final DynamoDbMapperFieldModel field = (DynamoDbMapperFieldModel) keys.get(RANGE); - if (field == null) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "; no mapping for RANGE key" - ); - } - return field; - } - - /** - * Gets the range key field model for the specified type. - * @param The range key type. - * @return The range key field model, or null if not present. - */ - @SuppressWarnings("unchecked") - public DynamoDbMapperFieldModel rangeKeyIfExists() { - return (DynamoDbMapperFieldModel) keys.get(RANGE); - } - - /** - * Gets all the version fields for the given class. - * @return The field models. - */ - public Collection> versions() { - return versions.values(); - } - - /** - * Indicates if this table has any versioned attributes. - * @return True if any versioned attributes, false otherwise. - */ - public boolean versioned() { - return !versions.isEmpty(); - } - - /** - * Gets the global secondary indexes for the given class. - * @return The map of index name to GlobalSecondaryIndexes. - */ - public Collection globalSecondaryIndexes() { - if (globalSecondaryIndexes.isEmpty()) { - return null; - } - final Collection copies = new ArrayList(globalSecondaryIndexes.size()); - for (final String indexName : globalSecondaryIndexes.keySet()) { - copies.add(globalSecondaryIndex(indexName)); - } - return copies; - } - - /** - * Gets the global secondary index. - * @param indexName The index name. - * @return The global secondary index or null. - */ - public GlobalSecondaryIndex globalSecondaryIndex(final String indexName) { - if (!globalSecondaryIndexes.containsKey(indexName)) { - return null; - } - final GlobalSecondaryIndex gsi = globalSecondaryIndexes.get(indexName); - final GlobalSecondaryIndex.Builder copyBuilder = GlobalSecondaryIndex.builder() - .indexName(gsi.indexName()) - .projection(Projection.builder() - .projectionType(gsi.projection().projectionType()) - .build()); - List keySchemas = new ArrayList<>(); - for (final KeySchemaElement key : gsi.keySchema()) { - keySchemas.add(KeySchemaElement.builder().attributeName(key.attributeName()).keyType(key.keyType()).build()); - } - copyBuilder.keySchema(keySchemas); - return copyBuilder.build(); - } - - /** - * Gets the local secondary indexes for the given class. - * @return The map of index name to LocalSecondaryIndexes. - */ - public Collection localSecondaryIndexes() { - if (localSecondaryIndexes.isEmpty()) { - return null; - } - final Collection copies = new ArrayList(localSecondaryIndexes.size()); - for (final String indexName : localSecondaryIndexes.keySet()) { - copies.add(localSecondaryIndex(indexName)); - } - return copies; - } - - /** - * Gets the local secondary index by name. - * @param indexName The index name. - * @return The local secondary index, or null. - */ - public LocalSecondaryIndex localSecondaryIndex(final String indexName) { - if (!localSecondaryIndexes.containsKey(indexName)) { - return null; - } - final LocalSecondaryIndex lsi = localSecondaryIndexes.get(indexName); - final LocalSecondaryIndex.Builder copyBuilder = LocalSecondaryIndex.builder() - .indexName(lsi.indexName()) - .projection(Projection.builder() - .projectionType(lsi.projection().projectionType()) - .build()); - - List keySchemas = new ArrayList<>(); - for (final KeySchemaElement key : lsi.keySchema()) { - keySchemas.add(KeySchemaElement.builder() - .attributeName(key.attributeName()) - .keyType(key.keyType()) - .build()); - } - copyBuilder.keySchema(keySchemas); - return copyBuilder.build(); - } - - /** - * {@inheritDoc} - */ - @Override - public Map convert(final T object) { - final Map map = new LinkedHashMap(); - for (final DynamoDbMapperFieldModel field : fields()) { - try { - final AttributeValue value = field.getAndConvert(object); - if (value != null) { - map.put(field.name(), value); - } - } catch (final RuntimeException e) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "[" + field.name() + "]; could not convert attribute", e - ); - } - } - return map; - } - - /** - * {@inheritDoc} - */ - @Override - public T unconvert(final Map object) { - final T result = StandardBeanProperties.DeclaringReflect.newInstance(targetType); - if (!object.isEmpty()) { - for (final DynamoDbMapperFieldModel field : fields()) { - try { - final AttributeValue value = object.get(field.name()); - if (value != null) { - field.unconvertAndSet(result, value); - } - } catch (final RuntimeException e) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "[" + field.name() + "]; could not unconvert attribute", e - ); - } - } - } - return result; - } - - /** - * Creates a new object instance with the keys populated. - * @param The hash key type. - * @param The range key type. - * @param hashKey The hash key. - * @param rangeKey The range key (optional if not present on table). - * @return The new instance. - */ - public T createKey(final H hashKey, final R rangeKey) { - final T key = StandardBeanProperties.DeclaringReflect.newInstance(targetType); - if (hashKey != null) { - final DynamoDbMapperFieldModel hk = hashKey(); - hk.set(key, hashKey); - } - if (rangeKey != null) { - final DynamoDbMapperFieldModel rk = rangeKey(); - rk.set(key, rangeKey); - } - return key; - } - - /** - * Creates a new key map from the specified object. - * @param The hash key type. - * @param The range key type. - * @return The key map. - */ - public Map convertKey(final T key) { - final DynamoDbMapperFieldModel hk = this.hashKey(); - final DynamoDbMapperFieldModel rk = this.rangeKeyIfExists(); - return this.convertKey(hk.get(key), (rk == null ? (R) null : rk.get(key))); - } - - /** - * Creates a new key map from the specified hash and range key. - * @param The hash key type. - * @param The range key type. - * @param hashKey The hash key. - * @param rangeKey The range key (optional if not present on table). - * @return The key map. - */ - public Map convertKey(final H hashKey, final R rangeKey) { - final Map key = new LinkedHashMap(4); - final DynamoDbMapperFieldModel hk = this.hashKey(); - final AttributeValue hkValue = hashKey == null ? null : hk.convert(hashKey); - if (hkValue != null) { - key.put(hk.name(), hkValue); - } else { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "[" + hk.name() + "]; no HASH key value present" - ); - } - final DynamoDbMapperFieldModel rk = this.rangeKeyIfExists(); - final AttributeValue rkValue = rangeKey == null ? null : rk.convert(rangeKey); - if (rkValue != null) { - key.put(rk.name(), rkValue); - } else if (rk != null) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "[" + rk.name() + "]; no RANGE key value present" - ); - } - return key; - } - - /** - * The table model properties. - */ - interface Properties { - String tableName(); - - final class Immutable implements Properties { - private final String tableName; - - Immutable(final Properties properties) { - this.tableName = properties.tableName(); - } - - @Override - public String tableName() { - return this.tableName; - } - } - } - - /** - * {@link DynamoDbMapperTableModel} builder. - */ - static class Builder { - private final Map> versions; - private final Map> fields; - private final Map> keys; - private final Properties properties; - private final Class targetType; - - Builder(Class targetType, Properties properties) { - this.versions = new LinkedHashMap>(4); - this.fields = new LinkedHashMap>(); - this.keys = new EnumMap>(KeyType.class); - this.properties = properties; - this.targetType = targetType; - } - - public Builder with(final DynamoDbMapperFieldModel field) { - fields.put(field.name(), field); - if (field.keyType() != null) { - keys.put(field.keyType(), field); - } - if (field.versioned()) { - versions.put(field.name(), field); - } - return this; - } - - public Map globalSecondaryIndexes() { - final Map map = new LinkedHashMap(); - for (final DynamoDbMapperFieldModel field : fields.values()) { - for (final String indexName : field.globalSecondaryIndexNames(HASH)) { - final GlobalSecondaryIndex.Builder gsiBuilder = GlobalSecondaryIndex.builder() - .indexName(indexName) - .projection(Projection.builder() - .projectionType(KEYS_ONLY) - .build()) - .keySchema(KeySchemaElement.builder() - .attributeName(field.name()) - .keyType(HASH).build()); - if (map.put(indexName, gsiBuilder.build()) != null) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "[" + field.name() + "]; must not duplicate GSI " + indexName - ); - } - } - } - for (final DynamoDbMapperFieldModel field : fields.values()) { - for (final String indexName : field.globalSecondaryIndexNames(RANGE)) { - final GlobalSecondaryIndex gsi = map.get(indexName); - if (gsi == null) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "[" + field.name() + "]; no HASH key for GSI " + indexName - ); - } - - List keySchemas = new ArrayList<>(); - keySchemas.addAll(gsi.keySchema()); - - keySchemas.add(KeySchemaElement.builder() - .attributeName(field.name()) - .keyType(RANGE).build()); - - map.put(indexName, - gsi.toBuilder() - .keySchema(keySchemas).build()); - } - } - if (map.isEmpty()) { - return Collections.emptyMap(); - } - return Collections.unmodifiableMap(map); - } - - public Map localSecondaryIndexes() { - final Map map = new LinkedHashMap(); - for (final DynamoDbMapperFieldModel field : fields.values()) { - for (final String indexName : field.localSecondaryIndexNames()) { - final LocalSecondaryIndex.Builder lsiBuilder = LocalSecondaryIndex.builder() - .indexName(indexName) - .projection(Projection.builder() - .projectionType(KEYS_ONLY) - .build()) - .keySchema(KeySchemaElement.builder() - .attributeName(keys.get(HASH).name()) - .keyType(HASH).build(), - KeySchemaElement.builder() - .attributeName(field.name()) - .keyType(RANGE).build()); - if (map.put(indexName, lsiBuilder.build()) != null) { - throw new DynamoDbMappingException( - targetType.getSimpleName() + "[" + field.name() + "]; must not duplicate LSI " + indexName - ); - } - } - } - if (map.isEmpty()) { - return Collections.emptyMap(); - } - return Collections.unmodifiableMap(map); - } - - private Map> versions() { - if (versions.isEmpty()) { - return Collections.>emptyMap(); - } - return Collections.unmodifiableMap(versions); - } - - public Map> fields() { - if (fields.isEmpty()) { - return Collections.>emptyMap(); - } - return Collections.unmodifiableMap(fields); - } - - public Map> keys() { - if (keys.isEmpty()) { - return Collections.>emptyMap(); - } - return Collections.unmodifiableMap(keys); - } - - public DynamoDbMapperTableModel build() { - final DynamoDbMapperTableModel result = new DynamoDbMapperTableModel(this); - if (properties.tableName() != null) { - result.hashKey(); //<- make sure the hash key is present - } - return result; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMappingException.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMappingException.java deleted file mode 100644 index 054e93a5a54c..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMappingException.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -/** - * Generic exception for problems occurring when mapping DynamoDB items to Java - * objects or vice versa. Excludes service exceptions. - */ -public class DynamoDbMappingException extends RuntimeException { - - private static final long serialVersionUID = -4883173289978517967L; - - public DynamoDbMappingException() { - super(); - } - - public DynamoDbMappingException(String message, Throwable cause) { - super(message, cause); - } - - public DynamoDbMappingException(String message) { - super(message); - } - - public DynamoDbMappingException(Throwable cause) { - super(cause); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMappingsRegistry.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMappingsRegistry.java deleted file mode 100644 index bdf8b3b31c55..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMappingsRegistry.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardBeanProperties.Bean; - -/** - * Reflection assistant for {@link DynamoDbMapper} - * - * @deprecated Replaced by {@link StandardBeanProperties}/{@link StandardModelFactories} - */ -@Deprecated -@SdkInternalApi -final class DynamoDbMappingsRegistry { - - /** - * The default instance. - */ - private static final DynamoDbMappingsRegistry INSTANCE = new DynamoDbMappingsRegistry(); - /** - * The cache of class to mapping definition. - */ - private final ConcurrentMap, Mappings> mappings = new ConcurrentHashMap, Mappings>(); - - /** - * Gets the default instance. - * @return The default instance. - */ - static DynamoDbMappingsRegistry instance() { - return INSTANCE; - } - - /** - * Gets the mapping definition for a given class. - * @param clazz The class. - * @return The mapping definition. - */ - Mappings mappingsOf(final Class clazz) { - if (!mappings.containsKey(clazz)) { - mappings.putIfAbsent(clazz, new Mappings(clazz)); - } - return mappings.get(clazz); - } - - /** - * Holds the properties for mapping an object. - */ - static final class Mappings { - private final Map byNames = new HashMap(); - - private Mappings(final Class clazz) { - for (final Map.Entry> bean : - StandardBeanProperties.of((Class) clazz).map().entrySet()) { - final Mapping mapping = new Mapping(bean.getValue()); - byNames.put(mapping.getAttributeName(), mapping); - } - } - - Collection mappings() { - return byNames.values(); - } - } - - /** - * Holds the properties for mapping an object attribute. - */ - static final class Mapping { - private final Bean bean; - - private Mapping(final Bean bean) { - this.bean = bean; - } - - Method getter() { - return bean.type().getter(); - } - - boolean isPrimaryKey() { - return bean.properties().keyType() != null; - } - - boolean isVersion() { - return bean.properties().versioned(); - } - - String getAttributeName() { - return bean.properties().attributeName(); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMarshaller.java deleted file mode 100644 index 20251661db39..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMarshaller.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -/** - * Marshaller interface for storing complex types in DynamoDB as Strings. - * Implementors provide methods to transform instances of a class to and from - * Strings. - * - * @deprecated Replaced by {@link DynamoDbTypeConverter} - * - *

    A {@link DynamoDbTypeConverted} with {@link String} as source would - * perform the same conversion. Please consider, if your marshaller is thread - * safe before replacing. In the new implementation, a single instance of - * {@link DynamoDbTypeConverted} is created per field/attribute. In the old, - * an new instance of the marshaller was created for each call to - * {@code marshall} and {@code unmarshall}. If your marshaller/converter is not - * thread safe, it is recommended to specify a converter which will instantiate - * a new marshaller per call.

    - * - *
    - * public class CustomConverter<T> implements DynamoDBTypeConverter<String,T> {
    - *     @Override
    - *     public final String convert(final T object) {
    - *         return ...
    - *     }
    - *     @Override
    - *     public final T unconvert(final String object) {
    - *         return ...
    - *     }
    - * }
    - * 
    - */ -@Deprecated -public interface DynamoDbMarshaller { - - /** - * Turns an object of type T into its String representation. - */ - String marshall(T getterReturnResult); - - /** - * Turns a String representation of an object of type T into an object. - */ - T unmarshall(Class clazz, String obj); -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMarshalling.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMarshalling.java deleted file mode 100644 index 7480280a6218..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbMarshalling.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Set; - -/** - * Annotation to mark a property as using a custom marshaller. This is required - * when storing anything other than {@link String}s, {@link Number}s, and - * {@link Set}s of the same to DynamoDB. Any object that can be converted into a - * String representation and vice versa can be saved in this manner. This - * annotation can be applied to either the getter method or the class field for - * the specific property. If the annotation is applied directly to the class - * field, the corresponding getter and setter must be declared in the same - * class. - * - * @see DynamoDbMarshaller - * @see JsonMarshaller - * - * @deprecated Replaced by {@link DynamoDbTypeConverted} - * - *

    A {@link DynamoDbTypeConverted} with {@link String} as source would - * perform the same conversion. Please consider, if your marshaller is thread - * safe before replacing. In the new implementation, a single instance of - * {@link DynamoDbTypeConverted} is created per field/attribute. In the old, - * an new instance of the marshaller was created for each call to - * {@code marshall} and {@code unmarshall}. If your marshaller/converter is not - * thread safe, it is recommended to specify a converter which will instantiate - * a new marshaller per call.

    - * - *
    - * public class CustomConverter<T> implements DynamoDBTypeConverter<String,T> {
    - *     @Override
    - *     public final String convert(final T object) {
    - *         return ...
    - *     }
    - *     @Override
    - *     public final T unconvert(final String object) {
    - *         return ...
    - *     }
    - * }
    - * 
    - */ -@Deprecated -@DynamoDb -@DynamoDbTypeConverted(converter = DynamoDbMarshalling.Converter.class) -@DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.S) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbMarshalling { - - /** - * The class of the Marshaller that converts this property to and from a - * String. - */ - Class> marshallerClass(); - - /** - * Marshalling type converter. - */ - final class Converter implements DynamoDbTypeConverter { - private final Class> marshallerClass; - private final Class targetType; - - Converter(final Class targetType, final DynamoDbMarshalling annotation) { - this.marshallerClass = (Class>) annotation.marshallerClass(); - this.targetType = targetType; - } - - @Override - public String convert(final T object) { - return marshaller().marshall(object); - } - - @Override - public T unconvert(final String object) { - return marshaller().unmarshall(targetType, object); - } - - private DynamoDbMarshaller marshaller() { - try { - return marshallerClass.newInstance(); - } catch (final Exception e) { - throw new DynamoDbMappingException("Unable to instantiate marshaller " + marshallerClass, e); - } - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbNamed.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbNamed.java deleted file mode 100644 index 31ad11254c47..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbNamed.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for overriding a property's DynamoDB attribute name. - * - *
    - * @DynamoDBNamed("InternalStatus")
    - * public String status()
    - * 
    - * - *

    This annotation has the lowest precedence among other property/field - * annotations where {@code attributeName} may be specified.

    - * - *

    May be used as a meta-annotation.

    - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbNamed { - - /** - * Use when the name of the attribute as stored in DynamoDB should differ - * from the name used by the getter / setter. - */ - String value(); - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbNativeBoolean.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbNativeBoolean.java deleted file mode 100644 index 3a3aaa330354..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbNativeBoolean.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * An annotation that marks a {@code boolean} or {@code Boolean} attribute - * of a modeled class which should be serialized as a DynamoDB BOOL. For - * backwards compatibility with old versions of the {@code DynamoDBMapper}, - * by default booleans are serialized using the DynamoDB N type, with a value - * of '1' representing 'true' and a value of '0' representing 'false'. - *

    - * Using this annotation on the field definition or getter method definition - * for the attribute will cause it to be serialized as DynamoDB-native BOOL - * type. Old versions of the {@code DynamoDBMapper} which do not know about the - * BOOL type will be unable to read items containing BOOLs, so don't use me - * unless all readers of your table are using an updated version of the mapper. - * - * @deprecated - Replaced by {@link DynamoDbTyped} - */ -@Deprecated -@DynamoDb -@DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.BOOL) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbNativeBoolean { -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbQueryExpression.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbQueryExpression.java deleted file mode 100644 index d67fa1f1c71a..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbQueryExpression.java +++ /dev/null @@ -1,1325 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.QueryRequest; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.Select; - -/** - * A query expression. - */ -public class DynamoDbQueryExpression { - - private boolean consistentRead = true; - private boolean scanIndexForward = true; - private T hashKeyValues; - private Map rangeKeyConditions; - private Map exclusiveStartKey; - private Integer limit; - private String indexName; - private Map queryFilter; - private String conditionalOperator; - - /** - * Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any items that - * do not match the expression are not returned. - */ - private String filterExpression; - - /** - * The condition that specifies the key value(s) for items to be retrieved - * by the Query action. - */ - private String keyConditionExpression; - - /** - * One or more substitution variables for simplifying complex expressions. - */ - private Map expressionAttributeNames; - - /** - * One or more values that can be substituted in an expression. - */ - private Map expressionAttributeValues; - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index.

    • ALL_ATTRIBUTES - Returns all of - * the item attributes from the specified table or index. If you query a - * local secondary index, then for each matching item in the index - * DynamoDB will fetch the entire item from the parent table. If the - * index is configured to project all item attributes, then all of the - * data can be obtained from the local secondary index, and no fetching - * is required.

    • ALL_PROJECTED_ATTRIBUTES - - * Allowed only when querying an index. Retrieves all attributes that - * have been projected into the index. If the index is configured to - * project all attributes, this return value is equivalent to specifying - * ALL_ATTRIBUTES.

    • COUNT - - * Returns the number of matching items, rather than the matching items - * themselves.

    • SPECIFIC_ATTRIBUTES - Returns - * only the attributes listed in AttributesToGet. This return - * value is equivalent to specifying AttributesToGet without - * specifying any value for Select.

      If you query a local - * secondary index and request only attributes that are projected into - * that index, the operation will read only the index and not the table. - * If any of the requested attributes are not projected into the local - * secondary index, DynamoDB will fetch each of these attributes from the - * parent table. This extra fetching incurs additional throughput cost - * and latency.

      If you query a global secondary index, you can only - * request attributes that are projected into the index. Global secondary - * index queries cannot fetch attributes from the parent table.

    • - *

    If neither Select nor AttributesToGet are - * specified, DynamoDB defaults to ALL_ATTRIBUTES when - * accessing a table, and ALL_PROJECTED_ATTRIBUTES when - * accessing an index. You cannot use both Select and - * AttributesToGet together in a single request, unless the value - * for Select is SPECIFIC_ATTRIBUTES. (This usage is - * equivalent to specifying AttributesToGet without any value for - * Select.) - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - */ - private String select; - - /** - * A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - */ - private String projectionExpression; - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the query. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.queryPage}, not {@code DynamoDBMapper.query}. - */ - private String returnConsumedCapacity; - - /** - * Returns whether this query uses consistent reads. - */ - public boolean isConsistentRead() { - return consistentRead; - } - - /** - * Sets whether this query uses consistent reads. - */ - public void setConsistentRead(boolean consistentRead) { - this.consistentRead = consistentRead; - } - - /** - * Sets whether this query uses consistent reads and returns a pointer to - * this object for method-chaining. - */ - public DynamoDbQueryExpression withConsistentRead(boolean consistentRead) { - this.consistentRead = consistentRead; - return this; - } - - /** - * Returns whether this query scans forward. - */ - public boolean isScanIndexForward() { - return scanIndexForward; - } - - /** - * Sets whether this query scans forward. - */ - public void setScanIndexForward(boolean scanIndexForward) { - this.scanIndexForward = scanIndexForward; - } - - /** - * Sets whether this query scans forward and returns a pointer to this - * object for method-chaining. - */ - public DynamoDbQueryExpression withScanIndexForward(boolean scanIndexForward) { - this.scanIndexForward = scanIndexForward; - return this; - } - - /** - * Returns the exclusive start key for this query. - */ - public Map getExclusiveStartKey() { - return exclusiveStartKey; - } - - /** - * Sets the exclusive start key for this query. - */ - public void setExclusiveStartKey(Map exclusiveStartKey) { - this.exclusiveStartKey = exclusiveStartKey; - } - - /** - * Sets the exclusive start key for this query and returns a pointer to this - * object for method-chaining. - */ - public DynamoDbQueryExpression withExclusiveStartKey(Map exclusiveStartKey) { - this.exclusiveStartKey = exclusiveStartKey; - return this; - } - - /** - * Returns the maximum number of items to retrieve in each service request - * to DynamoDB. - *

    - * Note that when calling {@code DynamoDBMapper.query}, multiple requests - * are made to DynamoDB if needed to retrieve the entire result set. Setting - * this will limit the number of items retrieved by each request, NOT - * the total number of results that will be retrieved. Use - * {@code DynamoDBMapper.queryPage} to retrieve a single page of items from - * DynamoDB. - */ - public Integer limit() { - return limit; - } - - /** - * Sets the maximum number of items to retrieve in each service request to - * DynamoDB. - *

    - * Note that when calling {@code DynamoDBMapper.query}, multiple requests - * are made to DynamoDB if needed to retrieve the entire result set. Setting - * this will limit the number of items retrieved by each request, NOT - * the total number of results that will be retrieved. Use - * {@code DynamoDBMapper.queryPage} to retrieve a single page of items from - * DynamoDB. - */ - public void setLimit(Integer limit) { - this.limit = limit; - } - - /** - * Sets the maximum number of items to retrieve in each service request to - * DynamoDB and returns a pointer to this object for method-chaining. - *

    - * Note that when calling {@code DynamoDBMapper.query}, multiple requests - * are made to DynamoDB if needed to retrieve the entire result set. Setting - * this will limit the number of items retrieved by each request, NOT - * the total number of results that will be retrieved. Use - * {@code DynamoDBMapper.queryPage} to retrieve a single page of items from - * DynamoDB. - */ - public DynamoDbQueryExpression withLimit(Integer limit) { - this.limit = limit; - return this; - } - - /** - * Gets the hash key value(s) for this query. All hash key attributes for - * the table must be specified with this key object. - */ - public T getHashKeyValues() { - return hashKeyValues; - } - - - /** - * Sets the hash key value(s) for this query. All hash key attributes for - * the table must be specified with this key object. - * - * Note 1: Currently the DynamoDBMapper supports only one value per hash key. - * Note 2: Currently the Amazon DynamoDB supports only one hash key per - * table/index. - */ - public void setHashKeyValues(T hashKeyValues) { - this.hashKeyValues = hashKeyValues; - } - - /** - * Sets the hash key value(s) for this query. All hash key attributes for - * the table must be specified with this key object. - */ - public DynamoDbQueryExpression withHashKeyValues(T hashKObject) { - setHashKeyValues(hashKObject); - return this; - } - - /** - * Gets the range key condition for this query. All range key attributes for - * the table must be specified by attribute name in the map. - */ - public Map getRangeKeyConditions() { - return rangeKeyConditions; - } - - /** - * Sets the range key condition for this query. All range key attributes for - * the table must be specified by attribute name in the map. - * - * @param rangeKeyConditions a map from key name to condition - * NOTE: The current DynamoDB service only allows up to one - * range key condition per query. Providing more than one - * range key condition will result in a SdkClientException. - */ - public void setRangeKeyConditions(Map rangeKeyConditions) { - this.rangeKeyConditions = rangeKeyConditions; - } - - /** - * Sets the range key condition for this query. All range key attributes for - * the table must be specified by attribute name in the map. - * - * @param rangeKeyConditions a map from key name to condition - * NOTE: The current DynamoDB service only allows up to one range - * key condition per query. Providing more than one range key - * condition will result in a SdkClientException. - */ - public DynamoDbQueryExpression withRangeKeyConditions(Map rangeKeyConditions) { - setRangeKeyConditions(rangeKeyConditions); - return this; - } - - /** - * Sets one range key condition for this query, using the attribute name of - * the range key. All range key attributes for the table must be specified - * by using {@link DynamoDbRangeKey} or {@link DynamoDbIndexRangeKey} annotations - * before executing the query. - *

    - *
    If the attribute is the primary range key
    - *
    users should NOT set any index name for this query.
    - *
    If the attribute is an index range key
    - *
    - * {@link DynamoDbMapper} will automatically set the index name if the - * range key is annotated as only used by one local secondary index, - * otherwise users must set the index name manually by either - * {@link DynamoDbQueryExpression#setIndexName(String)} or - * {@link DynamoDbQueryExpression#withIndexName(String)}. - *
    - *
    - * - * @param rangeKeyAttributeName - * This can be either the primary range key of the table or an - * index range key. - * - * @param rangeKeyCondition - * Condition specified on the given range key for this query. - */ - public DynamoDbQueryExpression withRangeKeyCondition(String rangeKeyAttributeName, Condition rangeKeyCondition) { - if (rangeKeyConditions == null) { - rangeKeyConditions = new HashMap(); - } - rangeKeyConditions.put(rangeKeyAttributeName, rangeKeyCondition); - return this; - } - - /** - * Returns the name of the index to be used by this query. - */ - public String getIndexName() { - return indexName; - } - - /** - * Sets the name of the index to be used by this query. The hash key - * and/or range key of the index must be specified by adding - * {@link DynamoDbIndexHashKey} or {@code DynamoDBIndexRangeKey} - * annotations to the appropriate getter methods of the mapped - * object. - */ - public void setIndexName(String indexName) { - this.indexName = indexName; - } - - /** - * Sets the name of the index to be used by this query. The hash key - * and/or range key of the index must be specified by adding - * {@link DynamoDbIndexHashKey} or {@code DynamoDBIndexRangeKey} - * annotations to the appropriate getter methods of the mapped - * object. - *

    - * Returns a pointer to this object for method-chaining. - */ - public DynamoDbQueryExpression withIndexName(String indexName) { - setIndexName(indexName); - return this; - } - - /** - * Returns the query filter applied on this query. - */ - public Map getQueryFilter() { - return queryFilter; - } - - /** - * Sets the query filter applied on this query. - */ - public void setQueryFilter(Map queryFilter) { - this.queryFilter = queryFilter; - } - - /** - * Sets the query filter applied on this query. - *

    Returns a pointer to this object for method-chaining. - */ - public DynamoDbQueryExpression withQueryFilter(Map queryFilter) { - setQueryFilter(queryFilter); - return this; - } - - /** - * Adds a new condition to the the query filter. - *

    Returns a pointer to this object for method-chaining. - * - * @param attributeName - * The name of the attribute on which the specified condition - * operates. - * @param condition - * The filter condition applied on the attribute. - */ - public DynamoDbQueryExpression withQueryFilterEntry(String attributeName, Condition condition) { - if (queryFilter == null) { - queryFilter = new HashMap(); - } - queryFilter.put(attributeName, condition); - return this; - } - - /** - * Returns the logical operator on the query filter conditions. - */ - public String getConditionalOperator() { - return conditionalOperator; - } - - /** - * Sets the logical operator on the query filter conditions. - */ - public void setConditionalOperator(String conditionalOperator) { - this.conditionalOperator = conditionalOperator; - } - - /** - * Sets the logical operator on the query filter conditions. - */ - public void setConditionalOperator(ConditionalOperator conditionalOperator) { - this.conditionalOperator = conditionalOperator.toString(); - } - - /** - * Sets the logical operator on the query filter conditions. - *

    Returns a pointer to this object for method-chaining. - */ - public DynamoDbQueryExpression withConditionalOperator(String conditionalOperator) { - setConditionalOperator(conditionalOperator); - return this; - } - - /** - * Sets the logical operator on the query filter conditions. - *

    Returns a pointer to this object for method-chaining. - */ - public DynamoDbQueryExpression withConditionalOperator(ConditionalOperator conditionalOperator) { - setConditionalOperator(conditionalOperator); - return this; - } - - /** - * Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any items that - * do not match the expression are not returned. - * - * @return Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any - * items that do not match the expression are not returned. - * @see QueryRequest#getFilterExpression() - */ - public String getFilterExpression() { - return filterExpression; - } - - /** - * Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any items that - * do not match the expression are not returned. - * - * @param filterExpression - * Evaluates the query results and returns only the desired - * values. - *

    - * The condition you specify is applied to the items queried; any - * items that do not match the expression are not returned. - * @see QueryRequest#setFilterExpression(String) - */ - public void setFilterExpression(String filterExpression) { - this.filterExpression = filterExpression; - } - - /** - * Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any items that - * do not match the expression are not returned. - *

    - * Returns a reference to this object so that method calls can be chained - * together. - * - * @param filterExpression - * Evaluates the query results and returns only the desired - * values. - *

    - * The condition you specify is applied to the items queried; any - * items that do not match the expression are not returned. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * @see QueryRequest#withFilterExpression(String) - */ - public DynamoDbQueryExpression withFilterExpression( - String filterExpression) { - this.filterExpression = filterExpression; - return this; - } - - /** - * Returns the condition that specifies the key value(s) for items to be - * retrieved by the Query action. - *

    - * The condition must perform an equality test on a single hash key value. - * The condition can also test for one or more range key values. A - * Query can use KeyConditionExpression to retrieve a single - * item with a given hash and range key value, or several items that have - * the same hash key value but different range key values. - *

    - * The hash key equality test is required, and must be specified in the - * following format: - *

    - * hashAttributeName = :hashval - *

    - * If you also want to provide a range key condition, it must be combined - * using AND with the hash key condition. Following is an example, - * using the = comparison operator for the range key: - *

    - * hashAttributeName = :hashval AND - * rangeAttributeName = :rangeval - *

    - * Valid comparisons for the range key condition are as follows: - *

      - *
    • - *

      - * rangeAttributeName = :rangeval - true if - * the range key is equal to :rangeval.

    • - *
    • - *

      - * rangeAttributeName < :rangeval - true if - * the range key is less than :rangeval.

    • - *
    • - *

      - * rangeAttributeName <= :rangeval - true - * if the range key is less than or equal to :rangeval.

    • - *
    • - *

      - * rangeAttributeName > :rangeval - true if - * the range key is greater than :rangeval.

    • - *
    • - *

      - * rangeAttributeName >=:rangeval - true - * if the range key is greater than or equal to :rangeval.

    • - *
    • - *

      - * rangeAttributeName BETWEEN :rangeval1 - * AND :rangeval2 - true if the range key is less than - * or greater than :rangeval1, and less than or equal to - * :rangeval2.

    • - *
    • - *

      - * begins_with (rangeAttributeName, - * :rangeval) - true if the range key begins with a - * particular operand. Note that the function name begins_with - * is case-sensitive.

    • - *
    - *

    - * Use ExpressionAttributeValues (via {@link #withExpressionAttributeValues(Map)}) to - * replace tokens such as :hashval and :rangeval - * with actual values at runtime. - *

    - * You can optionally use ExpressionAttributeNames (via - * {@link #withExpressionAttributeNames(Map)}) to replace the names of the hash and range - * attributes with placeholder tokens. This might be necessary if an - * attribute name conflicts with a DynamoDB reserved word. For example, the - * following KeyConditionExpression causes an error because - * Size is a reserved word: - *

      - *
    • Size = :myval
    • - *
    - *

    - * To work around this, define a placeholder (such a #myval) to - * represent the attribute name Size. KeyConditionExpression - * then is as follows: - *

      - *
    • #S = - * :myval
    • - *
    - *

    - * For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide. - *

    - * For more information on ExpressionAttributeNames and - * ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon - * DynamoDB Developer Guide. - *

    - * KeyConditionExpression replaces the legacy KeyConditions - * parameter. - * - * @return The condition that specifies the key value(s) for items to be - * retrieved by the Query action. - *

    - * The condition must perform an equality test on a single hash key - * value. The condition can also test for one or more range key - * values. A Query can use KeyConditionExpression to - * retrieve a single item with a given hash and range key value, or - * several items that have the same hash key value but different - * range key values. - *

    - * The hash key equality test is required, and must be specified in - * the following format: - *

    - * hashAttributeName = :hashval - *

    - * If you also want to provide a range key condition, it must be - * combined using AND with the hash key condition. Following - * is an example, using the = comparison operator for the - * range key: - *

    - * hashAttributeName = :hashval - * AND rangeAttributeName = - * :rangeval - *

    - * Valid comparisons for the range key condition are as follows: - *

      - *
    • - *

      - * rangeAttributeName = :rangeval - - * true if the range key is equal to :rangeval.

    • - *
    • - *

      - * rangeAttributeName < :rangeval - - * true if the range key is less than :rangeval.

    • - *
    • - *

      - * rangeAttributeName <= :rangeval - * - true if the range key is less than or equal to - * :rangeval.

    • - *
    • - *

      - * rangeAttributeName > :rangeval - - * true if the range key is greater than :rangeval.

    • - *
    • - *

      - * rangeAttributeName >= :rangeval - * - true if the range key is greater than or equal to - * :rangeval.

    • - *
    • - *

      - * rangeAttributeName BETWEEN - * :rangeval1 AND :rangeval2 - true - * if the range key is less than or greater than - * :rangeval1, and less than or equal to - * :rangeval2.

    • - *
    • - *

      - * begins_with (rangeAttributeName, - * :rangeval) - true if the range key begins - * with a particular operand. Note that the function name - * begins_with is case-sensitive.

    • - *
    - *

    - * Use ExpressionAttributeValues (via - * {@link #withExpressionAttributeValues(Map)}) to replace tokens such as - * :hashval and :rangeval with actual - * values at runtime. - *

    - * You can optionally use ExpressionAttributeNames (via - * {@link #withExpressionAttributeNames(Map)}) to replace the names of the hash and - * range attributes with placeholder tokens. This might be necessary - * if an attribute name conflicts with a DynamoDB reserved word. For - * example, the following KeyConditionExpression causes an - * error because Size is a reserved word: - *

      - *
    • Size = :myval
    • - *
    - *

    - * To work around this, define a placeholder (such a - * #myval) to represent the attribute name Size. - * KeyConditionExpression then is as follows: - *

      - *
    • #S = - * :myval
    • - *
    - *

    - * For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer - * Guide. - *

    - * For more information on ExpressionAttributeNames and - * ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the - * Amazon DynamoDB Developer Guide. - *

    - * KeyConditionExpression replaces the legacy - * KeyConditions parameter. - */ - public String getKeyConditionExpression() { - return keyConditionExpression; - } - - /** - * Sets the condition that specifies the key value(s) for items to be - * retrieved by the Query action. - *

    - * The condition must perform an equality test on a single hash key value. - * The condition can also test for one or more range key values. A - * Query can use KeyConditionExpression to retrieve a single - * item with a given hash and range key value, or several items that have - * the same hash key value but different range key values. - *

    - * The hash key equality test is required, and must be specified in the - * following format: - *

    - * hashAttributeName = :hashval - *

    - * If you also want to provide a range key condition, it must be combined - * using AND with the hash key condition. Following is an example, - * using the = comparison operator for the range key: - *

    - * hashAttributeName = :hashval AND - * rangeAttributeName = :rangeval - *

    - * Valid comparisons for the range key condition are as follows: - *

      - *
    • - *

      - * rangeAttributeName = :rangeval - true if - * the range key is equal to :rangeval.

    • - *
    • - *

      - * rangeAttributeName < :rangeval - true if - * the range key is less than :rangeval.

    • - *
    • - *

      - * rangeAttributeName <= :rangeval - true - * if the range key is less than or equal to :rangeval.

    • - *
    • - *

      - * rangeAttributeName > :rangeval - true if - * the range key is greater than :rangeval.

    • - *
    • - *

      - * rangeAttributeName >= :rangeval - true - * if the range key is greater than or equal to :rangeval.

    • - *
    • - *

      - * rangeAttributeName BETWEEN :rangeval1 - * AND :rangeval2 - true if the range key is less than - * or greater than :rangeval1, and less than or equal to - * :rangeval2.

    • - *
    • - *

      - * begins_with (rangeAttributeName, - * :rangeval) - true if the range key begins with a - * particular operand. Note that the function name begins_with - * is case-sensitive.

    • - *
    - *

    - * Use ExpressionAttributeValues (via {@link #withExpressionAttributeValues(Map)}) to - * replace tokens such as :hashval and :rangeval - * with actual values at runtime. - *

    - * You can optionally use ExpressionAttributeNames via - * {@link #withExpressionAttributeNames(Map)}) to replace the names of the hash and range - * attributes with placeholder tokens. This might be necessary if an - * attribute name conflicts with a DynamoDB reserved word. For example, the - * following KeyConditionExpression causes an error because - * Size is a reserved word: - *

      - *
    • Size = :myval
    • - *
    - *

    - * To work around this, define a placeholder (such a #myval) to - * represent the attribute name Size. KeyConditionExpression - * then is as follows: - *

      - *
    • #S = - * :myval
    • - *
    - *

    - * For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide. - *

    - * For more information on ExpressionAttributeNames and - * ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon - * DynamoDB Developer Guide. - *

    - * KeyConditionExpression replaces the legacy KeyConditions - * parameter. - *

    - * When a key expression is specified, the corresponding name-map and - * value-map can optionally be specified via {@link #withExpressionAttributeNames(Map)} and - * {@link #withExpressionAttributeValues(Map)}. - */ - public void setKeyConditionExpression(String keyConditionExpression) { - this.keyConditionExpression = keyConditionExpression; - } - - public DynamoDbQueryExpression withKeyConditionExpression( - String keyConditionExpression) { - this.keyConditionExpression = keyConditionExpression; - return this; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @return One or more substitution variables for simplifying complex - * expressions. - * @see QueryRequest#getExpressionAttributeNames() - */ - public java.util.Map getExpressionAttributeNames() { - - return expressionAttributeNames; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @param expressionAttributeNames - * One or more substitution variables for simplifying complex - * expressions. - * @see QueryRequest#setExpressionAttributeNames(Map) - */ - public void setExpressionAttributeNames( - java.util.Map expressionAttributeNames) { - this.expressionAttributeNames = expressionAttributeNames; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @param expressionAttributeNames - * One or more substitution variables for simplifying complex - * expressions. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * @see QueryRequest#withExpressionAttributeNames(Map) - */ - public DynamoDbQueryExpression withExpressionAttributeNames( - java.util.Map expressionAttributeNames) { - setExpressionAttributeNames(expressionAttributeNames); - return this; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * The method adds a new key-value pair into ExpressionAttributeNames - * parameter, and returns a reference to this object so that method calls - * can be chained together. - * - * @param key - * The key of the entry to be added into - * ExpressionAttributeNames. - * @param value - * The corresponding value of the entry to be added into - * ExpressionAttributeNames. - * - * @see QueryRequest#addExpressionAttributeNamesEntry(String, String) - */ - public DynamoDbQueryExpression addExpressionAttributeNamesEntry( - String key, String value) { - if (null == this.expressionAttributeNames) { - this.expressionAttributeNames = new java.util.HashMap(); - } - if (this.expressionAttributeNames.containsKey(key)) { - throw new IllegalArgumentException("Duplicated keys (" + key + ") are provided."); - } - this.expressionAttributeNames.put(key, value); - return this; - } - - /** - * Removes all the entries added into ExpressionAttributeNames. - *

    - * Returns a reference to this object so that method calls can be chained - * together. - */ - public DynamoDbQueryExpression clearExpressionAttributeNamesEntries() { - this.expressionAttributeNames = null; - return this; - } - - /** - * One or more values that can be substituted in an expression. - * - * @return One or more values that can be substituted in an expression. - * - * @see QueryRequest#getExpressionAttributeValues() - */ - public java.util.Map getExpressionAttributeValues() { - - return expressionAttributeValues; - } - - /** - * One or more values that can be substituted in an expression. - * - * @param expressionAttributeValues - * One or more values that can be substituted in an expression. - * - * @see QueryRequest#setExpressionAttributeValues(Map) - */ - public void setExpressionAttributeValues( - java.util.Map expressionAttributeValues) { - this.expressionAttributeValues = expressionAttributeValues; - } - - /** - * One or more values that can be substituted in an expression. - * - * @param expressionAttributeValues - * One or more values that can be substituted in an expression. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * @see QueryRequest#withExpressionAttributeValues(Map) - */ - public DynamoDbQueryExpression withExpressionAttributeValues( - java.util.Map expressionAttributeValues) { - setExpressionAttributeValues(expressionAttributeValues); - return this; - } - - /** - * One or more values that can be substituted in an expression. The method - * adds a new key-value pair into ExpressionAttributeValues parameter, and - * returns a reference to this object so that method calls can be chained - * together. - * - * @param key - * The key of the entry to be added into - * ExpressionAttributeValues. - * @param value - * The corresponding value of the entry to be added into - * ExpressionAttributeValues. - * - * @see QueryRequest#addExpressionAttributeValuesEntry(String, - * AttributeValue) - */ - public DynamoDbQueryExpression addExpressionAttributeValuesEntry( - String key, AttributeValue value) { - if (null == this.expressionAttributeValues) { - this.expressionAttributeValues = new java.util.HashMap(); - } - if (this.expressionAttributeValues.containsKey(key)) { - throw new IllegalArgumentException("Duplicated keys (" + key + ") are provided."); - } - this.expressionAttributeValues.put(key, value); - return this; - } - - /** - * Removes all the entries added into ExpressionAttributeValues. - *

    - * Returns a reference to this object so that method calls can be chained - * together. - */ - public DynamoDbQueryExpression clearExpressionAttributeValuesEntries() { - this.expressionAttributeValues = null; - return this; - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @return The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public String select() { - return select; - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @param select The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public void setSelect(String select) { - this.select = select; - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @param select The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public void setSelect(Select select) { - this.select = select.toString(); - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Returns a reference to this object so that method calls can be chained together. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @param select The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @return A reference to this updated object so that method calls can be chained - * together. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public DynamoDbQueryExpression withSelect(String select) { - this.select = select; - return this; - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Returns a reference to this object so that method calls can be chained together. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @param select The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @return A reference to this updated object so that method calls can be chained - * together. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public DynamoDbQueryExpression withSelect(Select select) { - this.select = select.toString(); - return this; - } - - /** - * A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - * - * @return A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - */ - public String getProjectionExpression() { - return projectionExpression; - } - - /** - * A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - * - * @param projectionExpression A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - */ - public void setProjectionExpression(String projectionExpression) { - this.projectionExpression = projectionExpression; - } - - /** - * A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - *

    - * Returns a reference to this object so that method calls can be chained together. - * - * @param projectionExpression A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - * - * @return A reference to this updated object so that method calls can be chained - * together. - */ - public DynamoDbQueryExpression withProjectionExpression(String projectionExpression) { - this.projectionExpression = projectionExpression; - return this; - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the query. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.queryPage}, not {@code DynamoDBMapper.query}. - * - * @return A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public String getReturnConsumedCapacity() { - return returnConsumedCapacity; - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the query. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.queryPage}, not {@code DynamoDBMapper.query}. - * - * @param returnConsumedCapacity A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public void setReturnConsumedCapacity(String returnConsumedCapacity) { - this.returnConsumedCapacity = returnConsumedCapacity; - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the query. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.queryPage}, not {@code DynamoDBMapper.query}. - * - * @param returnConsumedCapacity A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public void setReturnConsumedCapacity(ReturnConsumedCapacity returnConsumedCapacity) { - this.returnConsumedCapacity = returnConsumedCapacity.toString(); - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Returns a reference to this object so that method calls can be chained together. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the query. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.queryPage}, not {@code DynamoDBMapper.query}. - * - * @param returnConsumedCapacity A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @return A reference to this updated object so that method calls can be chained - * together. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public DynamoDbQueryExpression withReturnConsumedCapacity(String returnConsumedCapacity) { - this.returnConsumedCapacity = returnConsumedCapacity; - return this; - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Returns a reference to this object so that method calls can be chained together. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the query. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.queryPage}, not {@code DynamoDBMapper.query}. - * - * @param returnConsumedCapacity A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @return A reference to this updated object so that method calls can be chained - * together. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public DynamoDbQueryExpression withReturnConsumedCapacity(ReturnConsumedCapacity returnConsumedCapacity) { - this.returnConsumedCapacity = returnConsumedCapacity.toString(); - return this; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbRangeKey.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbRangeKey.java deleted file mode 100644 index c2912a8017a8..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbRangeKey.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for marking a property in a class as the range key for a DynamoDB - * table. Applied to the getter method or the class field for the range key - * property. If the annotation is applied directly to the class field, the - * corresponding getter and setter must be declared in the same class. - *

    - * This annotation is required for tables that use a range key. - */ -@DynamoDb -@DynamoDbKeyed(software.amazon.awssdk.services.dynamodb.model.KeyType.RANGE) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbRangeKey { - - /** - * Optional parameter when the name of the attribute as stored in DynamoDB - * should differ from the name used by the getter / setter. - */ - String attributeName() default ""; - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbSaveExpression.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbSaveExpression.java deleted file mode 100644 index 1b4bd98cb763..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbSaveExpression.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; - -/** - * Enables adding options to a save operation. - * For example, you may want to save only if an attribute has a particular value. - * @see DynamoDbMapper#save(Object, DynamoDbSaveExpression) - */ -public class DynamoDbSaveExpression { - - /** Optional expected attributes. */ - private Map expectedAttributes; - - /** - * The logical operator on the expected value conditions of this save - * operation. - */ - private String conditionalOperator; - - /** - * Gets the map of attribute names to expected attribute values to check on save. - * - * @return The map of attribute names to expected attribute value conditions to check on save - */ - public Map getExpected() { - return expectedAttributes; - } - - /** - * Sets the expected condition to the map of attribute names to expected attribute values given. - * - * @param expectedAttributes - * The map of attribute names to expected attribute value conditions to check on save - */ - public void setExpected(Map expectedAttributes) { - this.expectedAttributes = expectedAttributes; - } - - /** - * Sets the expected condition to the map of attribute names to expected - * attribute values given and returns a pointer to this object for - * method-chaining. - * - * @param expectedAttributes - * The map of attribute names to expected attribute value - * conditions to check on save - */ - public DynamoDbSaveExpression withExpected(Map expectedAttributes) { - setExpected(expectedAttributes); - return this; - } - - /** - * Adds one entry to the expected conditions and returns a pointer to this - * object for method-chaining. - * - * @param attributeName - * The name of the attribute. - * @param expected - * The expected attribute value. - */ - public DynamoDbSaveExpression withExpectedEntry(String attributeName, ExpectedAttributeValue expected) { - if (expectedAttributes == null) { - expectedAttributes = new HashMap(); - } - expectedAttributes.put(attributeName, expected); - return this; - } - - /** - * Returns the logical operator on the expected value conditions of this save - * operation. - */ - public String getConditionalOperator() { - return conditionalOperator; - } - - /** - * Sets the logical operator on the expected value conditions of this save - * operation. - */ - public void setConditionalOperator(String conditionalOperator) { - this.conditionalOperator = conditionalOperator; - } - - /** - * Sets the logical operator on the expected value conditions of this save - * operation. - */ - public void setConditionalOperator(ConditionalOperator conditionalOperator) { - setConditionalOperator(conditionalOperator.toString()); - } - - /** - * Sets the logical operator on the expected value conditions of this save - * operation and returns a pointer to this object for method-chaining. - */ - public DynamoDbSaveExpression withConditionalOperator(String conditionalOperator) { - setConditionalOperator(conditionalOperator); - return this; - } - - /** - * Sets the logical operator on the expected value conditions of this save - * operation and returns a pointer to this object for method-chaining. - */ - public DynamoDbSaveExpression withConditionalOperator(ConditionalOperator conditionalOperator) { - return withConditionalOperator(conditionalOperator.toString()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbScalarAttribute.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbScalarAttribute.java deleted file mode 100644 index 7844543ca3b9..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbScalarAttribute.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; - -/** - * @Deprecated - Replaced by {@link DynamoDbTyped} - */ -@Deprecated -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbScalarAttribute { - - /** - * Optional parameter when the name of the attribute as stored in DynamoDB - * should differ from the name used by the getter / setter. - */ - String attributeName() default ""; - - /** - * The scalar attribute type. - * @see software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType - */ - ScalarAttributeType type(); - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbScanExpression.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbScanExpression.java deleted file mode 100644 index fe1facd5c22f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbScanExpression.java +++ /dev/null @@ -1,1017 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.ConditionalOperator; -import software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.Select; - -/** - * Options for filtering results from a scan operation. For example, callers can - * specify filter conditions so that only objects whose attributes match - * different conditions are returned (see {@link ComparisonOperator} for more - * information on the available comparison types). - * - * @see DynamoDbMapper#scan(Class, DynamoDbScanExpression) - */ -public class DynamoDbScanExpression { - - /** Optional filter to limit the results of the scan. */ - private Map scanFilter; - - /** The exclusive start key for this scan. */ - private Map exclusiveStartKey; - - /** The limit of items to scan during this scan. */ - private Integer limit; - - /** - * The total number of segments into which the scan will be divided. - * Only required for parallel scan operation. - */ - private Integer totalSegments; - - /** - * The ID (zero-based) of the segment to be scanned. - * Only required for parallel scan operation. - */ - private Integer segment; - - /** - * The logical operator on the filter conditions of this scan. - */ - private String conditionalOperator; - - /** - * Evaluates the scan results and returns only the desired values.

    The - * condition you specify is applied to the items scanned; any items that - * do not match the expression are not returned. - */ - private String filterExpression; - - /** - * One or more substitution variables for simplifying complex - * expressions. The following are some use cases for an - * ExpressionAttributeName:

    • Shorten an attribute name that - * is very long or unwieldy in an expression.

    • Create a - * placeholder for repeating occurrences of an attribute name in an - * expression.

    • Prevent special characters in an attribute - * name from being misinterpreted in an expression.

    Use - * the # character in an expression to dereference an attribute - * name. For example, consider the following expression: - *

    • order.customerInfo.LastName = "Smith" OR - * order.customerInfo.LastName = "Jones"

    Now suppose - * that you specified the following for ExpressionAttributeNames: - *

    • {"n":"order.customerInfo.LastName"}

    - *

    The expression can now be simplified as follows: - *

    • #n = "Smith" OR #n = "Jones"

    - */ - private java.util.Map expressionAttributeNames; - - /** - * One or more values that can be substituted in an expression.

    Use - * the : character in an expression to dereference an attribute - * value. For example, consider the following expression: - *

    • ProductStatus IN - * ("Available","Backordered","Discontinued")

    Now - * suppose that you specified the following for - * ExpressionAttributeValues:

    • { - * "a":{"S":"Available"}, "b":{"S":"Backordered"}, - * "d":{"S":"Discontinued"} }

    The expression can now - * be simplified as follows:

    • ProductStatus IN - * (:a,:b,:c)

    - */ - private java.util.Map expressionAttributeValues; - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index.
    • ALL_ATTRIBUTES - Returns all of - * the item attributes from the specified table or index. If you query a - * local secondary index, then for each matching item in the index - * DynamoDB will fetch the entire item from the parent table. If the - * index is configured to project all item attributes, then all of the - * data can be obtained from the local secondary index, and no fetching - * is required.

    • ALL_PROJECTED_ATTRIBUTES - - * Allowed only when querying an index. Retrieves all attributes that - * have been projected into the index. If the index is configured to - * project all attributes, this return value is equivalent to specifying - * ALL_ATTRIBUTES.

    • COUNT - - * Returns the number of matching items, rather than the matching items - * themselves.

    • SPECIFIC_ATTRIBUTES - Returns - * only the attributes listed in AttributesToGet. This return - * value is equivalent to specifying AttributesToGet without - * specifying any value for Select.

      If you query a local - * secondary index and request only attributes that are projected into - * that index, the operation will read only the index and not the table. - * If any of the requested attributes are not projected into the local - * secondary index, DynamoDB will fetch each of these attributes from the - * parent table. This extra fetching incurs additional throughput cost - * and latency.

      If you query a global secondary index, you can only - * request attributes that are projected into the index. Global secondary - * index queries cannot fetch attributes from the parent table.

    • - *

    If neither Select nor AttributesToGet are - * specified, DynamoDB defaults to ALL_ATTRIBUTES when - * accessing a table, and ALL_PROJECTED_ATTRIBUTES when - * accessing an index. You cannot use both Select and - * AttributesToGet together in a single request, unless the value - * for Select is SPECIFIC_ATTRIBUTES. (This usage is - * equivalent to specifying AttributesToGet without any value for - * Select.) - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - */ - private String select; - - /** - * A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - */ - private String projectionExpression; - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the scan. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.scanPage}, not {@code DynamoDBMapper.scan}. - */ - private String returnConsumedCapacity; - - /** - * Optional index name that can be specified for the scan operation. - */ - private String indexName; - - private Boolean consistentRead; - - /** - * Returns the name of the index to be used by this scan; or null if there - * is none. - */ - public String getIndexName() { - return indexName; - } - - /** - * Sets the name of the index to be used by this scan. - */ - public void setIndexName(String indexName) { - this.indexName = indexName; - } - - /** - * Sets the name of the index to be used by this scan. - *

    - * Returns a pointer to this object for method-chaining. - */ - public DynamoDbScanExpression withIndexName(String indexName) { - setIndexName(indexName); - return this; - } - - /** - * Returns the scan filter as a map of attribute names to conditions. - * - * @return The scan filter as a map of attribute names to conditions. - */ - public Map scanFilter() { - return scanFilter; - } - - /** - * Sets the scan filter to the map of attribute names to conditions given. - * - * @param scanFilter - * The map of attribute names to conditions to use when filtering - * scan results. - */ - public void setScanFilter(Map scanFilter) { - this.scanFilter = scanFilter; - } - - /** - * Sets the scan filter to the map of attribute names to conditions given - * and returns a pointer to this object for method-chaining. - * - * @param scanFilter - * The map of attribute names to conditions to use when filtering - * scan results. - */ - public DynamoDbScanExpression withScanFilter(Map scanFilter) { - setScanFilter(scanFilter); - return this; - } - - /** - * Adds a new filter condition to the current scan filter. - * - * @param attributeName - * The name of the attribute on which the specified condition - * operates. - * @param condition - * The condition which describes how the specified attribute is - * compared and if a row of data is included in the results - * returned by the scan operation. - */ - public void addFilterCondition(String attributeName, Condition condition) { - if (scanFilter == null) { - scanFilter = new HashMap(); - } - - scanFilter.put(attributeName, condition); - } - - /** - * Adds a new filter condition to the current scan filter and returns a - * pointer to this object for method-chaining. - * - * @param attributeName - * The name of the attribute on which the specified condition - * operates. - * @param condition - * The condition which describes how the specified attribute is - * compared and if a row of data is included in the results - * returned by the scan operation. - */ - public DynamoDbScanExpression withFilterConditionEntry(String attributeName, Condition condition) { - if (scanFilter == null) { - scanFilter = new HashMap(); - } - - scanFilter.put(attributeName, condition); - return this; - } - - - /** - * Returns the exclusive start key for this scan. - */ - public Map getExclusiveStartKey() { - return exclusiveStartKey; - } - - /** - * Sets the exclusive start key for this scan. - */ - public void setExclusiveStartKey(Map exclusiveStartKey) { - this.exclusiveStartKey = exclusiveStartKey; - } - - /** - * Sets the exclusive start key for this scan and returns a pointer to this - * object for method-chaining. - */ - public DynamoDbScanExpression withExclusiveStartKey(Map exclusiveStartKey) { - this.exclusiveStartKey = exclusiveStartKey; - return this; - } - - /** - * Returns the limit of items to scan during this scan. - *

    - * Use with caution. Please note that this is not the same as the - * number of items to return from the scan operation -- the operation will - * cease and return as soon as this many items are scanned, even if no - * matching results are found. Furthermore, {@link PaginatedScanList} will - * execute as many scan operations as necessary until it either reaches the - * end of the result set as indicated by DynamoDB or enough elements are - * available to fulfill the list operation (e.g. iteration). Therefore, - * except when scanning without a scan filter, it's usually bad practice to - * set a low limit, since doing so will often generate the same amount of - * traffic to DynamoDB but with a greater number of round trips and - * therefore more overall latency. - */ - public Integer limit() { - return limit; - } - - /** - * Sets the limit of items to scan during this scan. Please note that this - * is not the same as the number of items to return from the scan - * operation -- the operation will cease and return as soon as this many - * items are scanned, even if no matching results are found. - * - * @see DynamoDbScanExpression#limit() - */ - public void setLimit(Integer limit) { - this.limit = limit; - } - - /** - * Sets the limit of items to scan and returns a pointer to this object for - * method-chaining. Please note that this is not the same as the - * number of items to return from the scan operation -- the operation will - * cease and return as soon as this many items are scanned, even if no - * matching results are found. - * - * @see DynamoDbScanExpression#limit() - */ - public DynamoDbScanExpression withLimit(Integer limit) { - this.limit = limit; - return this; - } - - /** - * Returns the total number of segments into which the scan will be divided. - */ - public Integer getTotalSegments() { - return totalSegments; - } - - /** - * Sets the total number of segments into which the scan will be divided. - */ - public void setTotalSegments(Integer totalSegments) { - this.totalSegments = totalSegments; - } - - /** - * Sets the total number of segments into which the scan will be divided and - * returns a pointer to this object for method-chaining. - */ - public DynamoDbScanExpression withTotalSegments(Integer totalSegments) { - setTotalSegments(totalSegments); - return this; - } - - /** - * Returns the ID of the segment to be scanned. - */ - public Integer segment() { - return segment; - } - - /** - * Sets the ID of the segment to be scanned. - */ - public void setSegment(Integer segment) { - this.segment = segment; - } - - /** - * Sets the ID of the segment to be scanned and returns a pointer to this - * object for method-chaining. - */ - public DynamoDbScanExpression withSegment(Integer segment) { - setSegment(segment); - return this; - } - - /** - * Returns the logical operator on the filter conditions of this scan. - */ - public String getConditionalOperator() { - return conditionalOperator; - } - - /** - * Sets the logical operator on the filter conditions of this scan. - */ - public void setConditionalOperator(String conditionalOperator) { - this.conditionalOperator = conditionalOperator; - } - - /** - * Sets the logical operator on the filter conditions of this scan. - */ - public void setConditionalOperator(ConditionalOperator conditionalOperator) { - setConditionalOperator(conditionalOperator.toString()); - } - - /** - * Sets the logical operator on the filter conditions of this scan and - * returns a pointer to this object for method-chaining. - */ - public DynamoDbScanExpression withConditionalOperator(String conditionalOperator) { - setConditionalOperator(conditionalOperator); - return this; - } - - /** - * Sets the logical operator on the filter conditions of this scan and - * returns a pointer to this object for method-chaining. - */ - public DynamoDbScanExpression withConditionalOperator(ConditionalOperator conditionalOperator) { - return withConditionalOperator(conditionalOperator.toString()); - } - - /** - * Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any items that - * do not match the expression are not returned. - * - * @return Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any - * items that do not match the expression are not returned. - * @see ScanRequest#getFilterExpression() - */ - public String getFilterExpression() { - return filterExpression; - } - - /** - * Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any items that - * do not match the expression are not returned. - * - * @param filterExpression - * Evaluates the query results and returns only the desired - * values. - *

    - * The condition you specify is applied to the items queried; any - * items that do not match the expression are not returned. - * @see ScanRequest#setFilterExpression(String) - */ - public void setFilterExpression(String filterExpression) { - this.filterExpression = filterExpression; - } - - /** - * Evaluates the query results and returns only the desired values. - *

    - * The condition you specify is applied to the items queried; any items that - * do not match the expression are not returned. - *

    - * Returns a reference to this object so that method calls can be chained - * together. - * - * @param filterExpression - * Evaluates the query results and returns only the desired - * values. - *

    - * The condition you specify is applied to the items queried; any - * items that do not match the expression are not returned. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * @see ScanRequest#withFilterExpression(String) - */ - public DynamoDbScanExpression withFilterExpression(String filterExpression) { - this.filterExpression = filterExpression; - return this; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @return One or more substitution variables for simplifying complex - * expressions. - * @see scanRequest#getExpressionAttributeNames() - */ - public java.util.Map getExpressionAttributeNames() { - - return expressionAttributeNames; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @param expressionAttributeNames - * One or more substitution variables for simplifying complex - * expressions. - * @see ScanRequest#setExpressionAttributeNames(Map) - */ - public void setExpressionAttributeNames( - java.util.Map expressionAttributeNames) { - this.expressionAttributeNames = expressionAttributeNames; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * - * @param expressionAttributeNames - * One or more substitution variables for simplifying complex - * expressions. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * @see ScanRequest#withExpressionAttributeNames(Map) - */ - public DynamoDbScanExpression withExpressionAttributeNames( - java.util.Map expressionAttributeNames) { - setExpressionAttributeNames(expressionAttributeNames); - return this; - } - - /** - * One or more substitution variables for simplifying complex expressions. - * The method adds a new key-value pair into ExpressionAttributeNames - * parameter, and returns a reference to this object so that method calls - * can be chained together. - * - * @param key - * The key of the entry to be added into - * ExpressionAttributeNames. - * @param value - * The corresponding value of the entry to be added into - * ExpressionAttributeNames. - * - * @see ScanRequest#addExpressionAttributeNamesEntry(String, String) - */ - public DynamoDbScanExpression addExpressionAttributeNamesEntry(String key, - String value) { - if (null == this.expressionAttributeNames) { - this.expressionAttributeNames = new java.util.HashMap(); - } - if (this.expressionAttributeNames.containsKey(key)) { - throw new IllegalArgumentException("Duplicated keys (" + key + ") are provided."); - } - this.expressionAttributeNames.put(key, value); - return this; - } - - /** - * Removes all the entries added into ExpressionAttributeNames. - *

    - * Returns a reference to this object so that method calls can be chained - * together. - */ - public DynamoDbScanExpression clearExpressionAttributeNamesEntries() { - this.expressionAttributeNames = null; - return this; - } - - /** - * One or more values that can be substituted in an expression. - * - * @return One or more values that can be substituted in an expression. - * - * @see ScanRequest#getExpressionAttributeValues() - */ - public java.util.Map getExpressionAttributeValues() { - - return expressionAttributeValues; - } - - /** - * One or more values that can be substituted in an expression. - * - * @param expressionAttributeValues - * One or more values that can be substituted in an expression. - * - * @see ScanRequest#setExpressionAttributeValues(Map) - */ - public void setExpressionAttributeValues( - java.util.Map expressionAttributeValues) { - this.expressionAttributeValues = expressionAttributeValues; - } - - /** - * One or more values that can be substituted in an expression. - * - * @param expressionAttributeValues - * One or more values that can be substituted in an expression. - * - * @return A reference to this updated object so that method calls can be - * chained together. - * @see ScanRequest#withExpressionAttributeValues(Map) - */ - public DynamoDbScanExpression withExpressionAttributeValues( - java.util.Map expressionAttributeValues) { - setExpressionAttributeValues(expressionAttributeValues); - return this; - } - - /** - * One or more values that can be substituted in an expression. The method - * adds a new key-value pair into ExpressionAttributeValues parameter, and - * returns a reference to this object so that method calls can be chained - * together. - * - * @param key - * The key of the entry to be added into - * ExpressionAttributeValues. - * @param value - * The corresponding value of the entry to be added into - * ExpressionAttributeValues. - * - * @see ScanRequest#addExpressionAttributeValuesEntry(String, - * AttributeValue) - */ - public DynamoDbScanExpression addExpressionAttributeValuesEntry(String key, - AttributeValue value) { - if (null == this.expressionAttributeValues) { - this.expressionAttributeValues = new java.util.HashMap(); - } - if (this.expressionAttributeValues.containsKey(key)) { - throw new IllegalArgumentException("Duplicated keys (" + key + ") are provided."); - } - this.expressionAttributeValues.put(key, value); - return this; - } - - /** - * Removes all the entries added into ExpressionAttributeValues. - *

    - * Returns a reference to this object so that method calls can be chained - * together. - */ - public DynamoDbScanExpression clearExpressionAttributeValuesEntries() { - this.expressionAttributeValues = null; - return this; - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @return The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public String select() { - return select; - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @param select The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public void setSelect(String select) { - this.select = select; - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @param select The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public void setSelect(Select select) { - this.select = select.toString(); - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Returns a reference to this object so that method calls can be chained together. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @param select The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @return A reference to this updated object so that method calls can be chained - * together. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public DynamoDbScanExpression withSelect(String select) { - this.select = select; - return this; - } - - /** - * The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - *

    - * Returns a reference to this object so that method calls can be chained together. - *

    - * Constraints:
    - * Allowed Values: ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT - * - * @param select The attributes to be returned in the result. You can retrieve all item - * attributes, specific item attributes, the count of matching items, or - * in the case of an index, some or all of the attributes projected into - * the index. - * - * @return A reference to this updated object so that method calls can be chained - * together. - * - * @see software.amazon.awssdk.services.dynamodb.model.Select - */ - public DynamoDbScanExpression withSelect(Select select) { - this.select = select.toString(); - return this; - } - - /** - * A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - * - * @return A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - */ - public String getProjectionExpression() { - return projectionExpression; - } - - /** - * A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - * - * @param projectionExpression A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - */ - public void setProjectionExpression(String projectionExpression) { - this.projectionExpression = projectionExpression; - } - - /** - * A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - *

    - * Returns a reference to this object so that method calls can be chained together. - * - * @param projectionExpression A string that identifies one or more attributes to retrieve from the - * table. These attributes can include scalars, sets, or elements of a - * JSON document. The attributes in the expression must be separated by - * commas.

    If no attribute names are specified, then all attributes - * will be returned. If any of the requested attributes are not found, - * they will not appear in the result.

    For more information, go to Accessing - * Item Attributes in the Amazon DynamoDB Developer Guide. - * - * @return A reference to this updated object so that method calls can be chained - * together. - */ - public DynamoDbScanExpression withProjectionExpression(String projectionExpression) { - this.projectionExpression = projectionExpression; - return this; - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the scan. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.scanPage}, not {@code DynamoDBMapper.scan}. - * - * @return A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public String getReturnConsumedCapacity() { - return returnConsumedCapacity; - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the scan. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.scanPage}, not {@code DynamoDBMapper.scan}. - * - * @param returnConsumedCapacity A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public void setReturnConsumedCapacity(String returnConsumedCapacity) { - this.returnConsumedCapacity = returnConsumedCapacity; - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the scan. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.scanPage}, not {@code DynamoDBMapper.scan}. - * - * @param returnConsumedCapacity A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public void setReturnConsumedCapacity(ReturnConsumedCapacity returnConsumedCapacity) { - this.returnConsumedCapacity = returnConsumedCapacity.toString(); - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Returns a reference to this object so that method calls can be chained together. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the scan. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.scanPage}, not {@code DynamoDBMapper.scan}. - * - * @param returnConsumedCapacity A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @return A reference to this updated object so that method calls can be chained - * together. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public DynamoDbScanExpression withReturnConsumedCapacity(String returnConsumedCapacity) { - this.returnConsumedCapacity = returnConsumedCapacity; - return this; - } - - /** - * A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - *

    - * Returns a reference to this object so that method calls can be chained together. - *

    - * Constraints:
    - * Allowed Values: INDEXES, TOTAL, NONE - *

    - * If enabled, the underlying request to DynamoDB will include the - * configured parameter value and the low-level response from DynamoDB will - * include the amount of capacity consumed by the scan. Currently, the - * consumed capacity is only exposed through the DynamoDBMapper when you - * call {@code DynamoDBMapper.scanPage}, not {@code DynamoDBMapper.scan}. - * - * @param returnConsumedCapacity A value that if set to TOTAL, the response includes - * ConsumedCapacity data for tables and indexes. If set to - * INDEXES, the response includes ConsumedCapacity - * for indexes. If set to NONE (the default), - * ConsumedCapacity is not included in the response. - * - * @return A reference to this updated object so that method calls can be chained - * together. - * - * @see software.amazon.awssdk.services.dynamodb.model.ReturnConsumedCapacity - */ - public DynamoDbScanExpression withReturnConsumedCapacity(ReturnConsumedCapacity returnConsumedCapacity) { - this.returnConsumedCapacity = returnConsumedCapacity.toString(); - return this; - } - - /** - * Returns whether this scan uses consistent reads. - * - * @see ScanRequest#isConsistentRead() - */ - public Boolean isConsistentRead() { - return consistentRead; - } - - /** - * Sets whether this scan uses consistent reads. - */ - public void setConsistentRead(Boolean consistentRead) { - this.consistentRead = consistentRead; - } - - /** - * Sets whether this scan uses consistent reads and returns a reference - * to this object for method chaining. - */ - public DynamoDbScanExpression withConsistentRead(Boolean consistentRead) { - this.consistentRead = consistentRead; - return this; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTable.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTable.java deleted file mode 100644 index 56dfb28eb356..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTable.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Inherited; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.TableNameOverride; - - -/** - * Annotation to mark a class as a DynamoDB table. - *

    - * This annotation is inherited by subclasses, and can be overridden by them as - * well. - * - * @see TableNameOverride - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -@Inherited -public @interface DynamoDbTable { - - /** - * The name of the table to use for this class. - */ - String tableName(); - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTableMapper.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTableMapper.java deleted file mode 100644 index cac526178e23..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTableMapper.java +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ResourceInUseException; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; - -/** - * A wrapper for {@code DynamoDBMapper} which operates only on a specified - * class/table. All calls are forwarded to the underlying - * {@code DynamoDBMapper} which was used to create this table mapper. - * - * A minimal example using get annotations, - *

    - * @DynamoDBTable(tableName="TestTable")
    - * public class TestClass {
    - *     private Long key;
    - *     private String rangeKey;
    - *     private Double amount;
    - *     private Long version;
    - *
    - *     @DynamoDBHashKey
    - *     public Long getKey() { return key; }
    - *     public void setKey(Long key) { this.key = key; }
    - *
    - *     @DynamoDBRangeKey
    - *     public String getRangeKey() { return rangeKey; }
    - *     public void setRangeKey(String rangeKey) { this.rangeKey = rangeKey; }
    - *
    - *     @DynamoDBAttribute(attributeName="amount")
    - *     public Double getAmount() { return amount; }
    - *     public void setAmount(Double amount) { this.amount = amount; }
    - *
    - *     @DynamoDBVersionAttribute
    - *     public Long getVersion() { return version; }
    - *     public void setVersion(Long version) { this.version = version; }
    - * }
    - * 
    - * - * Initialize the DynamoDB mapper, - *
    - * DynamoDbClient dbClient = new AmazonDynamoDbClient();
    - * DynamoDBMapper dbMapper = new DynamoDBMapper(dbClient);
    - * 
    - * - * Then, create a new table mapper with hash and range key, - *
    - * DynamoDBTableMapper<TestClass,Long,String> mapper = dbMapper.newTableMapper(TestClass.class);
    - * 
    - * - * Or, if the table does not have a range key, - *
    - * DynamoDBTableMapper<TestClass,Long,?> table = dbMapper.newTableMapper(TestClass.class);
    - * 
    - * - * If you don't have your DynamoDB table set up yet, you can use, - *
    - * table.createTableIfNotExists(new ProvisionedThroughput(25L, 25L));
    - * 
    - * - * Save instances of annotated classes and retrieve them, - *
    - * TestClass object = new TestClass();
    - * object.setKey(1234L);
    - * object.setRangeKey("ABCD");
    - * object.setAmount(101D);
    - *
    - * try {
    - *     table.saveIfNotExists(object);
    - * } catch (ConditionalCheckFailedException e) {
    - *     // handle already existing
    - * }
    - * 
    - * - * Execute a query operation, - *
    - * int limit = 10;
    - * List<TestClass> objects = new ArrayList<TestClass>(limit);
    - *
    - * DynamoDBQueryExpression<TestClass> query = new DynamoDBQueryExpression()
    - *     .withRangeKeyCondition(table.rangeKey().name(), table.rangeKey().ge("ABAA"))
    - *     .withQueryFilterEntry("amount", table.field("amount").gt(100D))
    - *     .withHashKeyValues(1234L)
    - *     .withConsistentReads(true);
    - *
    - * QueryResponsePage<TestClass> results = new QueryResponsePage<TestClass>();
    - *
    - * do {
    - *     if (results.lastEvaluatedKey() != null) {
    - *         query.setExclusiveStartKey(results.lastEvaluatedKey());
    - *     }
    - *     query.setLimit(limit - objects.size());
    - *     results = mapper.query(query);
    - *     for (TestClass object : results.getResults()) {
    - *         objects.add(object);
    - *     }
    - * } while (results.lastEvaluatedKey() != null && objects.size() < limit)
    - * 
    - * - * @param The object type which this mapper operates. - * @param The hash key value type. - * @param The range key value type; use ? if no range key. - * - * @see DynamoDbMapper - * @see DynamoDbClient - */ -public final class DynamoDbTableMapper { - - private static final Logger log = LoggerFactory.getLogger(DynamoDbTableMapper.class); - - private final DynamoDbMapperTableModel model; - private final DynamoDbMapperFieldModel hk; - private final DynamoDbMapperFieldModel rk; - private final DynamoDbMapperConfig config; - private final DynamoDbMapper mapper; - private final DynamoDbClient db; - - /** - * Constructs a new table mapper for the given class. - * @param model The field model factory. - * @param mapper The DynamoDB mapper. - * @param db The service object to use for all service calls. - */ - protected DynamoDbTableMapper(DynamoDbClient db, DynamoDbMapper mapper, final DynamoDbMapperConfig config, - final DynamoDbMapperTableModel model) { - this.rk = model.rangeKeyIfExists(); - this.hk = model.hashKey(); - this.model = model; - this.config = config; - this.mapper = mapper; - this.db = db; - } - - /** - * Gets the field model for a given attribute. - * @param The field model's value type. - * @param attributeName The attribute name. - * @return The field model. - */ - public DynamoDbMapperFieldModel field(String attributeName) { - return this.model.field(attributeName); - } - - /** - * Gets the hash key field model for the specified type. - * @param The hash key type. - * @return The hash key field model. - * @throws DynamoDbMappingException If the hash key is not present. - */ - public DynamoDbMapperFieldModel hashKey() { - return this.model.hashKey(); - } - - /** - * Gets the range key field model for the specified type. - * @param The range key type. - * @return The range key field model. - * @throws DynamoDbMappingException If the range key is not present. - */ - public DynamoDbMapperFieldModel rangeKey() { - return this.model.rangeKey(); - } - - /** - * Retrieves multiple items from the table using their primary keys. - * @param itemsToGet The items to get. - * @return The list of objects. - * @see DynamoDbMapper#batchLoad - */ - public List batchLoad(Iterable itemsToGet) { - final Map> results = mapper.batchLoad(itemsToGet); - if (results.isEmpty()) { - return Collections.emptyList(); - } - return (List) results.get(mapper.getTableName(model.targetType(), config)); - } - - /** - * Saves the objects given using one or more calls to the batchWriteItem API. - * @param objectsToSave The objects to save. - * @return The list of failed batches. - * @see DynamoDbMapper#batchSave - */ - public List batchSave(Iterable objectsToSave) { - return mapper.batchWrite(objectsToSave, (Iterable) Collections.emptyList()); - } - - /** - * Deletes the objects given using one or more calls to the batchWtiteItem API. - * @param objectsToDelete The objects to delete. - * @return The list of failed batches. - * @see DynamoDbMapper#batchDelete - */ - public List batchDelete(Iterable objectsToDelete) { - return mapper.batchWrite((Iterable) Collections.emptyList(), objectsToDelete); - } - - /** - * Saves and deletes the objects given using one or more calls to the - * batchWriteItem API. - * @param objectsToWrite The objects to write. - * @param objectsToDelete The objects to delete. - * @return The list of failed batches. - * @see DynamoDbMapper#batchWrite - */ - public List batchWrite(Iterable objectsToWrite, Iterable objectsToDelete) { - return mapper.batchWrite(objectsToWrite, objectsToDelete); - } - - /** - * Loads an object with the hash key given. - * @param hashKey The hash key value. - * @return The object. - * @see DynamoDbMapper#load - */ - public T load(H hashKey) { - return mapper.load(model.targetType(), hashKey); - } - - /** - * Loads an object with the hash and range key. - * @param hashKey The hash key value. - * @param rangeKey The range key value. - * @return The object. - * @see DynamoDbMapper#load - */ - public T load(H hashKey, R rangeKey) { - return mapper.load(model.targetType(), hashKey, rangeKey); - } - - /** - * Saves the object given into DynamoDB. - * @param object The object to save. - * @see DynamoDbMapper#save - */ - public void save(T object) { - mapper.save(object); - } - - /** - * Saves the object given into DynamoDB using the specified saveExpression. - * @param object The object to save. - * @param saveExpression The save expression. - * @see DynamoDbMapper#save - */ - public void save(T object, DynamoDbSaveExpression saveExpression) { - mapper.save(object, saveExpression); - } - - /** - * Saves the object given into DynamoDB with the condition that the hash - * and if applicable, the range key, does not already exist. - * @param object The object to create. - * @throws ConditionalCheckFailedException If the object exists. - * @see DynamoDbMapper#save - * @see DynamoDbSaveExpression - * @see software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue - */ - public void saveIfNotExists(T object) throws ConditionalCheckFailedException { - final DynamoDbSaveExpression saveExpression = new DynamoDbSaveExpression(); - for (final DynamoDbMapperFieldModel key : model.keys()) { - saveExpression.withExpectedEntry(key.name(), ExpectedAttributeValue.builder() - .exists(false).build()); - } - mapper.save(object, saveExpression); - } - - /** - * Saves the object given into DynamoDB with the condition that the hash - * and, if applicable, the range key, already exist. - * @param object The object to update. - * @throws ConditionalCheckFailedException If the object does not exist. - * @see DynamoDbMapper#save - * @see DynamoDbSaveExpression - * @see software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue - */ - public void saveIfExists(T object) throws ConditionalCheckFailedException { - final DynamoDbSaveExpression saveExpression = new DynamoDbSaveExpression(); - for (final DynamoDbMapperFieldModel key : model.keys()) { - saveExpression.withExpectedEntry(key.name(), ExpectedAttributeValue.builder() - .exists(true).value(key.convert(key.get(object))).build()); - } - mapper.save(object, saveExpression); - } - - /** - * Deletes the given object from its DynamoDB table. - * @param object The object to delete. - * @see DynamoDbMapper#delete - */ - public void delete(final T object) { - mapper.delete(object); - } - - /** - * Deletes the given object from its DynamoDB table using the specified - * deleteExpression. - * @param object The object to delete. - * @param deleteExpression The delete expression. - * @see DynamoDbMapper#delete - */ - public void delete(final T object, final DynamoDbDeleteExpression deleteExpression) { - mapper.delete(object, deleteExpression); - } - - /** - * Deletes the given object from its DynamoDB table with the condition that - * the hash and, if applicable, the range key, already exist. - * @param object The object to delete. - * @throws ConditionalCheckFailedException If the object does not exist. - * @see DynamoDbMapper#delete - * @see DynamoDbDeleteExpression - * @see software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue - */ - public void deleteIfExists(T object) throws ConditionalCheckFailedException { - final DynamoDbDeleteExpression deleteExpression = new DynamoDbDeleteExpression(); - for (final DynamoDbMapperFieldModel key : model.keys()) { - deleteExpression.withExpectedEntry(key.name(), ExpectedAttributeValue.builder() - .exists(true).value(key.convert(key.get(object))).build()); - } - mapper.delete(object, deleteExpression); - } - - /** - * Evaluates the specified query expression and returns the count of matching - * items, without returning any of the actual item data - * @param queryExpression The query expression. - * @return The count. - * @see DynamoDbMapper#count - */ - public int count(DynamoDbQueryExpression queryExpression) { - return mapper.count(model.targetType(), queryExpression); - } - - /** - * Queries an Amazon DynamoDB table and returns the matching results as an - * unmodifiable list of instantiated objects. - * @param queryExpression The query expression. - * @return The query results. - * @see DynamoDbMapper#query - */ - public PaginatedQueryList query(DynamoDbQueryExpression queryExpression) { - return mapper.query(model.targetType(), queryExpression); - } - - /** - * Queries an Amazon DynamoDB table and returns a single page of matching - * results. - * @param queryExpression The query expression. - * @return The query results. - * @see DynamoDbMapper#query - */ - public QueryResultPage queryPage(DynamoDbQueryExpression queryExpression) { - return mapper.queryPage(model.targetType(), queryExpression); - } - - /** - * Evaluates the specified scan expression and returns the count of matching - * items, without returning any of the actual item data. - * @param scanExpression The scan expression. - * @return The count. - * @see DynamoDbMapper#count - */ - public int count(DynamoDbScanExpression scanExpression) { - return mapper.count(model.targetType(), scanExpression); - } - - /** - * Scans through an Amazon DynamoDB table and returns the matching results - * as an unmodifiable list of instantiated objects. - * @param scanExpression The scan expression. - * @return The scan results. - * @see DynamoDbMapper#scan - */ - public PaginatedScanList scan(DynamoDbScanExpression scanExpression) { - return mapper.scan(model.targetType(), scanExpression); - } - - /** - * Scans through an Amazon DynamoDB table and returns a single page of - * matching results. - * @param scanExpression The scan expression. - * @return The scan results. - * @see DynamoDbMapper#scanPage - */ - public ScanResultPage scanPage(DynamoDbScanExpression scanExpression) { - return mapper.scanPage(model.targetType(), scanExpression); - } - - /** - * Scans through an Amazon DynamoDB table on logically partitioned segments - * in parallel and returns the matching results in one unmodifiable list of - * instantiated objects. - * @param scanExpression The scan expression. - * @param totalSegments The total segments. - * @return The scan results. - * @see DynamoDbMapper#parallelScan - */ - public PaginatedParallelScanList parallelScan(DynamoDbScanExpression scanExpression, int totalSegments) { - return mapper.parallelScan(model.targetType(), scanExpression, totalSegments); - } - - /** - * Returns information about the table, including the current status of the - * table, when it was created, the primary key schema, and any indexes on - * the table. - * @return The describe table results. - * @see DynamoDbClient#describeTable - */ - public TableDescription describeTable() { - return db.describeTable(DescribeTableRequest.builder() - .tableName(mapper.getTableName(model.targetType(), config)) - .build()) - .table(); - } - - /** - * Creates the table with the specified throughput; also populates the same - * throughput for all global secondary indexes. - * @param throughput The provisioned throughput. - * @return The table decription. - * @see DynamoDbClient#createTable - * @see software.amazon.awssdk.services.dynamodb.model.CreateTableRequest - */ - public TableDescription createTable(ProvisionedThroughput throughput) { - CreateTableRequest request = mapper.generateCreateTableRequest(model.targetType()); - CreateTableRequest.Builder modified = request.toBuilder() - .provisionedThroughput(throughput); - if (request.globalSecondaryIndexes() != null) { - modified.globalSecondaryIndexes((Collection) null); - for (GlobalSecondaryIndex gsi : request.globalSecondaryIndexes()) { - gsi = gsi.toBuilder().provisionedThroughput(throughput).build(); - modified.globalSecondaryIndexes(gsi); - } - } - request = modified.build(); - return db.createTable(request).tableDescription(); - } - - /** - * Creates the table and ignores the {@code ResourceInUseException} if it - * ialready exists. - * @param throughput The provisioned throughput. - * @return True if created, or false if the table already existed. - * @see DynamoDbClient#createTable - * @see software.amazon.awssdk.services.dynamodb.model.CreateTableRequest - */ - public boolean createTableIfNotExists(ProvisionedThroughput throughput) { - try { - createTable(throughput); - } catch (final ResourceInUseException e) { - if (log.isTraceEnabled()) { - log.trace("Table already exists, no need to create", e); - } - return false; - } - return true; - } - - /** - * Deletes the table. - * @return The table decription. - * @see DynamoDbClient#deleteTable - * @see software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest - */ - public TableDescription deleteTable() { - return db.deleteTable( - mapper.generateDeleteTableRequest(model.targetType()) - ).tableDescription(); - } - - /** - * Deletes the table and ignores the {@code ResourceNotFoundException} if - * it does not already exist. - * @return True if the table was deleted, or false if the table did not exist. - * @see DynamoDbClient#deleteTable - * @see software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest - */ - public boolean deleteTableIfExists() { - try { - deleteTable(); - } catch (final ResourceNotFoundException e) { - if (log.isTraceEnabled()) { - log.trace("Table does not exist, no need to delete", e); - } - return false; - } - return true; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverted.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverted.java deleted file mode 100644 index 01b5e0758408..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverted.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation to mark a property as using a custom type-converter. - * - *

    May be annotated on a user-defined annotation to pass additional - * properties to the {@link DynamoDbTypeConverter}.

    - * - *
    - * @CurrencyFormat(separator=" ") //<- user-defined annotation
    - * public Currency getCurrency()
    - * 
    - * - *

    Where,

    - *
    - * public class Currency {
    - *     private Double amount;
    - *     private String unit;
    - *
    - *     public Double getAmount() { return amount; }
    - *     public void setAmount(Double amount) { this.amount = amount; }
    - *
    - *     public String getUnit() { return unit; }
    - *     public void setUnit(String unit) { this.unit = unit; }
    - * }
    - * 
    - * - *

    And user-defined annotation,

    - *
    - * @Target({ElementType.METHOD})
    - * @Retention(RetentionPolicy.RUNTIME)
    - * @DynamoDBTypeConverted(converter=CurrencyFormat.Converter.class)
    - * public @interface CurrencyFormat {
    - *
    - *     String separator() default " ";
    - *
    - *     public static class Converter implements DynamoDBTypeConverter<String,Currency> {
    - *         private final String separator;
    - *         public Converter(final Class<Currency> targetType, final CurrencyFormat annotation) {
    - *             this.separator = annotation.separator();
    - *         }
    - *         public Converter() {
    - *             this.separator = "|";
    - *         }
    - *         @Override
    - *         public String convert(final Currency o) {
    - *             return String.valueOf(o.getAmount()) + separator + o.getUnit();
    - *         }
    - *         @Override
    - *         public Currency unconvert(final String o) {
    - *             final String[] strings = o.split(separator);
    - *             final Currency currency = new Currency();
    - *             currency.setAmount(Double.valueOf(strings[0]));
    - *             currency.setUnit(strings[1]);
    - *             return currency;
    - *         }
    - *     }
    - * }
    - * 
    - * - *

    Alternately, the property/field may be annotated directly (which - * requires the converter to provide a default constructor or a constructor - * with only the {@code targetType}),

    - *
    - * @DynamoDBTypeConverted(converter=CurrencyFormat.Converter.class)
    - * public Currency getCurrency() { return currency; }
    - * 
    - * - *

    All converters are null-safe, a {@code null} value will never be passed - * to {@link DynamoDbTypeConverter#convert} - * or {@link DynamoDbTypeConverter#unconvert}.

    - * - *

    Precedence for selecting a type-converter first goes to getter annotations, - * then field, then finally type.

    - * - *

    May be used in combination with {@link DynamoDbTyped} to specify the - * attribute type binding.

    - *

    Compatible with {@link DynamoDbAutoGeneratedTimestamp}

    - * - *

    May be used as a meta-annotation.

    - */ -@DynamoDb -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbTypeConverted { - - /** - * The class of the converter for this property. - */ - @SuppressWarnings("rawtypes") - Class converter(); - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedEnum.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedEnum.java deleted file mode 100644 index a05e63d1bb4d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedEnum.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation to convert the enumeration value to a string. - * - *

    Alternately, the {@link DynamoDbTyped} annotation may be used,

    - *
    - * public static enum Status { OPEN, PENDING, CLOSED }
    - *
    - * @DynamoDBTyped(DynamoDBAttributeType.S)
    - * public Status status()
    - * 
    - * - *

    Please note, there are some risks in distributed systems when using - * enumerations as attributes intead of simply using a String. - * When adding new values to the enumeration, the enum only changes must - * be deployed before the enumeration value can be persisted. This will - * ensure that all systems have the correct code to map it from the item - * record in DynamoDB to your objects.

    - * - * @see DynamoDbTypeConverted - */ -@DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.S) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbTypeConvertedEnum { - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedJson.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedJson.java deleted file mode 100644 index 721ccd7badcd..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedJson.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.ObjectMapper; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * A simple JSON converter that uses the Jackson JSON processor. - * - *

    It shares all limitations of that library. For more information about - * Jackson, see: http://wiki.fasterxml.com/JacksonHome

    - * - *
    - * @DynamoDBTypeConvertedJson
    - * public Currency getCurrency()
    - * 
    - * - *

    Where,

    - *
    - * public class Currency {
    - *     private Double amount;
    - *     private String unit;
    - *
    - *     public Double getAmount() { return amount; }
    - *     public void setAmount(Double amount) { this.amount = amount; }
    - *
    - *     public String getUnit() { return unit; }
    - *     public void setUnit(String unit) { this.unit = unit; }
    - * }
    - * 
    - * - *

    Would write the following value to DynamoDB given,

    - *
      - *
    • Currency(79.99,"USD") = "{\"amount\":79.99,\"unit\":\"USD\"}"
    • - *
    - * - * @see DynamoDbTypeConverted - */ -@DynamoDbTypeConverted(converter = DynamoDbTypeConvertedJson.Converter.class) -@DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.S) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbTypeConvertedJson { - - /** - * The value type to use when calling the JSON mapper's {@code readValue}; - * a value of {@code Void.class} indicates to use the getter's type. - */ - Class targetType() default void.class; - - /** - * JSON type converter. - */ - final class Converter implements DynamoDbTypeConverter { - private static final ObjectMapper MAPPER = new ObjectMapper().disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); - private final Class targetType; - - Converter(Class targetType, DynamoDbTypeConvertedJson annotation) { - this.targetType = annotation.targetType() == void.class ? targetType : (Class) annotation.targetType(); - } - - @Override - public String convert(final T object) { - try { - return MAPPER.writeValueAsString(object); - } catch (final Exception e) { - throw new DynamoDbMappingException("Unable to write object to JSON", e); - } - } - - @Override - public T unconvert(final String object) { - try { - return MAPPER.readValue(object, targetType); - } catch (final Exception e) { - throw new DynamoDbMappingException("Unable to read JSON string", e); - } - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedTimestamp.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedTimestamp.java deleted file mode 100644 index b6a6429a94d6..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConvertedTimestamp.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Scalar.TIME_ZONE; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeFormatterBuilder; -import java.util.Date; -import java.util.TimeZone; - -/** - * Annotation to format a timestamp object using Java's standard date and time - * patterns. - * - *
    - * @DynamoDBTypeConvertedTimestamp(pattern="yyyyMMddHHmmssSSS", timeZone="UTC")
    - * public Date getCreatedDate()
    - * 
    - * - *

    Supports the standard {@link Date} type-conversions; such as - * {@link java.util.Calendar}, {@link Long}.

    - * - *

    Primitives such as {@code long} are not supported since the unset - * (or null) state can't be detected.

    - * - *

    Compatible with {@link DynamoDbAutoGeneratedTimestamp}

    - * - * @see DynamoDbAutoGeneratedTimestamp - * @see DynamoDbTypeConverted - * @see java.text.SimpleDateFormat - * @see java.util.TimeZone - */ -@DynamoDbTypeConverted(converter = DynamoDbTypeConvertedTimestamp.Converter.class) -@DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.S) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbTypeConvertedTimestamp { - - /** - * The pattern format; default is ISO8601. - * @see java.text.SimpleDateFormat - */ - String pattern() default "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"; - - /** - * The time zone; default is {@code UTC}. - * @see java.util.TimeZone - */ - String timeZone() default "UTC"; - - /** - * Timestamp format converter. - */ - final class Converter implements DynamoDbTypeConverter { - private final DynamoDbTypeConverter converter; - private final DateTimeFormatter formatter; - - Converter(Class targetType, DynamoDbTypeConvertedTimestamp annotation) { - this.formatter = new DateTimeFormatterBuilder() - .appendPattern(annotation.pattern()).toFormatter() - .withZone(TIME_ZONE.convert(annotation.timeZone()).toZoneId()); - this.converter = StandardTypeConverters.factory().getConverter(ZonedDateTime.class, targetType); - } - - @Override - public String convert(final T object) { - return formatter.format(converter.convert(object)); - } - - @Override - public T unconvert(final String object) { - return converter.unconvert(ZonedDateTime.parse(object, formatter)); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverter.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverter.java deleted file mode 100644 index 747f5e2d51f9..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverter.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import software.amazon.awssdk.annotations.SdkInternalApi; - -/** - * Interface for converting types. - * - * @param The DynamoDB standard type. - * @param The object's field/property type. - */ -public interface DynamoDbTypeConverter { - - /** - * Turns an object of type T into an object of type S. - */ - S convert(T object); - - /** - * Turns an object of type S into an object of type T. - */ - T unconvert(S object); - - /** - * An abstract converter with additional general purpose functions. - */ - @SdkInternalApi - abstract class AbstractConverter implements DynamoDbTypeConverter { - public static ExtendedConverter join(DynamoDbTypeConverter source, - DynamoDbTypeConverter target) { - return new ExtendedConverter(source, target); - } - - public static NullSafeConverter nullSafe(DynamoDbTypeConverter converter) { - return new NullSafeConverter(converter); - } - - public DynamoDbTypeConverter joinAll(DynamoDbTypeConverter... targets) { - AbstractConverter converter = (AbstractConverter) nullSafe(); - for (DynamoDbTypeConverter target : targets) { - if (target != null) { - converter = converter.join((DynamoDbTypeConverter) nullSafe(target)); - } - } - return converter; - } - - public ExtendedConverter join(DynamoDbTypeConverter target) { - return AbstractConverter.join(this, target); - } - - public NullSafeConverter nullSafe() { - return AbstractConverter.nullSafe(this); - } - } - - /** - * A converter which wraps a source and target converter. - */ - class ExtendedConverter extends AbstractConverter { - private final DynamoDbTypeConverter source; - private final DynamoDbTypeConverter target; - - public ExtendedConverter(DynamoDbTypeConverter source, DynamoDbTypeConverter target) { - this.source = source; - this.target = target; - } - - @Override - public S convert(final T o) { - U o1 = target.convert(o); - S o2 = source.convert(o1); - return o2; - //return source.convert(target.convert(o)); - } - - @Override - public T unconvert(final S o) { - return target.unconvert(source.unconvert(o)); - } - } - - /** - * A general purpose delegating converter. - */ - class DelegateConverter extends AbstractConverter { - private final DynamoDbTypeConverter delegate; - - public DelegateConverter(DynamoDbTypeConverter delegate) { - this.delegate = delegate; - } - - @Override - public S convert(final T object) { - return delegate.convert(object); - } - - @Override - public T unconvert(final S object) { - return delegate.unconvert(object); - } - } - - /** - * A converter which evaluates nullability before convert/unconvert. - */ - class NullSafeConverter extends DelegateConverter { - public NullSafeConverter(DynamoDbTypeConverter delegate) { - super(delegate); - } - - @Override - public S convert(final T object) { - return object == null ? null : super.convert(object); - } - - @Override - public T unconvert(final S object) { - return object == null ? null : super.unconvert(object); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverterFactory.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverterFactory.java deleted file mode 100644 index 604b94406e28..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTypeConverterFactory.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.LinkedHashMap; -import java.util.Map.Entry; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Vector; - -/** - * {@link DynamoDbTypeConverter} factory and supporting classes. - * - *

    To override standard type-conversions,

    - *
    - * DynamoDBMapperConfig config = DynamoDBMapperConfig.builder()
    - *     .withTypeConverterFactory(DynamoDBTypeConverterFactory.standard().override()
    - *         .with(String.class, MyObject.class, new StringToMyObjectConverter())
    - *         .build())
    - *     .build();
    - * 
    - *

    Then, on the property, specify the attribute binding,

    - *
    - * @DynamoDBTyped(DynamoDBAttributeType.S)
    - * public MyObject myObject()
    - * 
    - * - * @see DynamoDbMapperConfig - */ -public abstract class DynamoDbTypeConverterFactory { - - /** - * Returns the standard type-converter factory. To override, the factory, - * @see DynamoDbTypeConverterFactory#override - */ - public static DynamoDbTypeConverterFactory standard() { - return StandardTypeConverters.factory(); - } - - /** - * Gets the type-converter matching the target conversion type. - * @param The DynamoDB standard type. - * @param The object's field/property type. - * @param sourceType The source conversion type. - * @param targetType The target conversion type. - * @return The type-converter, or null if no match. - */ - public abstract DynamoDbTypeConverter getConverter(Class sourceType, Class targetType); - - /** - * Creates a type-converter factory builder using this factory as defaults. - */ - public final Builder override() { - return new Builder(this); - } - - /** - * Builder for overriding type-converters. - */ - public static final class Builder { - private final ConverterMap overrides = new ConverterMap(); - private final DynamoDbTypeConverterFactory defaults; - - private Builder(DynamoDbTypeConverterFactory defaults) { - this.defaults = defaults; - } - - public Builder with(Class sourceType, Class targetType, - DynamoDbTypeConverter converter) { - if (Vector.SET.is(sourceType) || Vector.LIST.is(sourceType) || Vector.MAP.is(sourceType)) { - throw new DynamoDbMappingException("type [" + sourceType + "] is not supported" + - "; type-converter factory only supports scalar conversions"); - } - overrides.put(sourceType, targetType, converter); - return this; - } - - public DynamoDbTypeConverterFactory build() { - return new OverrideFactory(defaults, overrides); - } - } - - /** - * A delegating {@link DynamoDbTypeConverterFactory}. - */ - public static class DelegateFactory extends DynamoDbTypeConverterFactory { - private final DynamoDbTypeConverterFactory delegate; - - public DelegateFactory(DynamoDbTypeConverterFactory delegate) { - this.delegate = delegate; - } - - @Override - public DynamoDbTypeConverter getConverter(Class sourceType, Class targetType) { - return delegate.getConverter(sourceType, targetType); - } - } - - /** - * Delegate factory to allow selected types to be overridden. - */ - private static class OverrideFactory extends DelegateFactory { - private final ConverterMap overrides; - - OverrideFactory(DynamoDbTypeConverterFactory defaults, ConverterMap overrides) { - super(defaults); - this.overrides = overrides; - } - - @Override - public DynamoDbTypeConverter getConverter(Class sourceType, Class targetType) { - DynamoDbTypeConverter converter = overrides.get(sourceType, targetType); - if (converter == null) { - converter = super.getConverter(sourceType, targetType); - } - return converter; - } - } - - /** - * Map of source and target pairs to the converter. - */ - private static final class ConverterMap extends LinkedHashMap, DynamoDbTypeConverter> { - private static final long serialVersionUID = -1L; - - public void put(Class sourceType, Class targetType, - DynamoDbTypeConverter converter) { - put(Key.of(sourceType, targetType), converter); - } - - @SuppressWarnings("unchecked") - public DynamoDbTypeConverter get(Class sourceType, Class targetType) { - for (final Entry, DynamoDbTypeConverter> entry : entrySet()) { - if (entry.getKey().isAssignableFrom(sourceType, targetType)) { - return (DynamoDbTypeConverter) entry.getValue(); - } - } - return null; - } - } - - /** - * Source and target conversion type pair. - */ - private static final class Key extends SimpleImmutableEntry, Class> { - private static final long serialVersionUID = -1L; - - private Key(Class sourceType, Class targetType) { - super(sourceType, targetType); - } - - public static Key of(Class sourceType, Class targetType) { - return new Key(sourceType, targetType); - } - - public boolean isAssignableFrom(Class sourceType, Class targetType) { - return getKey().isAssignableFrom(sourceType) && getValue().isAssignableFrom(targetType); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTyped.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTyped.java deleted file mode 100644 index 0deb4cedc976..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbTyped.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Inherited; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.DynamoDbAttributeType; - -/** - * Annotation to override the standard attribute type binding. - * - *
    - * @DynamoDBTyped(DynamoDBAttributeType.S)
    - * public MyObject myObject()
    - * 
    - - *

    Standard Types

    - *

    Standard types do not require the annotation if applying the default - * attribute binding for that type.

    - *

    String/{@code S} types,

    - *
      - *
    • {@link java.lang.Character}/{@code char}
    • - *
    • {@link java.lang.String}
    • - *
    • {@link java.net.URL}
    • - *
    • {@link java.net.URI}
    • - *
    • {@link java.util.Calendar}
    • - *
    • {@link java.util.Currency}
    • - *
    • {@link java.util.Date}
    • - *
    • {@link java.util.Locale}
    • - *
    • {@link java.util.TimeZone}
    • - *
    • {@link java.util.UUID}
    • - *
    • {@link S3Link}
    • - *
    - *

    Number/{@code N} types,

    - *
      - *
    • {@link java.math.BigDecimal}
    • - *
    • {@link java.math.BigInteger}
    • - *
    • {@link java.lang.Boolean}/{@code boolean}
    • - *
    • {@link java.lang.Byte}/{@code byte}
    • - *
    • {@link java.lang.Double}/{@code double}
    • - *
    • {@link java.lang.Float}/{@code float}
    • - *
    • {@link java.lang.Integer}/{@code int}
    • - *
    • {@link java.lang.Long}/{@code long}
    • - *
    • {@link java.lang.Short}/{@code short}
    • - *
    - *

    Binary/{@code B} types,

    - *
      - *
    • {@link java.nio.ByteBuffer}
    • - *
    • {@code byte[]}
    • - *
    - * - *

    {@link DynamoDbTypeConverter}

    - *

    A custom type-converter maybe applied to any attribute, either by - * annotation or by overriding the standard type-converter factory.

    - *
    - * DynamoDBMapperConfig config = DynamoDBMapperConfig.builder()
    - *     .withTypeConverterFactory(DynamoDBTypeConverterFactory.standard().override()
    - *         .with(String.class, MyObject.class, new StringToMyObjectConverter())
    - *         .build())
    - *     .build();
    - * 
    - *

    If the converter being applied is already a supported data type and - * the conversion is of the same attribute type, for instance, - * {@link java.util.Date} to {@link String} to {@code S}, - * the annotation may be omitted. The annotation is require for all non-standard - * types or if the attribute type binding is being overridden.

    - * - *

    {@link software.amazon.awssdk.services.dynamodb.model.AttributeValue}

    - *

    Direct native conversion is supported by default in all schemas. - * If the attribute is a primary or index key, it must specify either - * {@code B}, {@code N}, or {@code S}, otherwise, it may be omitted.

    - * - *

    {@link Boolean} to {@code BOOL}

    - *

    The standard V2 conversion schema will by default serialize booleans - * natively using the DynamoDB {@code BOOL} type.

    - *
    - * @DynamoDBTyped(DynamoDBAttributeType.BOOL)
    - * public boolean isTesting()
    - * 
    - * - *

    {@link Boolean} to {@code N}

    - *

    The standard V1 and V2 compatible conversion schemas will by default - * serialize booleans using the DynamoDB {@code N} type, with a value of '1' - * representing 'true' and a value of '0' representing 'false'.

    - *
    - * @DynamoDBTyped(DynamoDBAttributeType.N)
    - * public boolean isTesting()
    - * 
    - * - *

    {@link Enum} to {@code S}

    - *

    The {@code enum} type is only supported by override or custom converter. - * There are some risks in distributed systems when using enumerations as - * attributes intead of simply using a String. When adding new values to the - * enumeration, the enum only changes must deployed before the enumeration - * value can be persisted. This will ensure that all systems have the correct - * code to map it from the item record in DynamoDB to your objects.

    - *
    - * public enum Status { OPEN, PENDING, CLOSED };
    - *
    - * @DynamoDBTyped(DynamoDBAttributeType.S)
    - * public Status status()
    - * 
    - * - *

    {@link UUID} to {@code B}

    - *

    The {@code UUID} type will serialize to {@link String}/{@code S} by - * default in all conversion schemas. The schemas do support serializing to - * {@link ByteBuffer}/{@code B} by override.

    - *
    - * @DynamoDBTyped(DynamoDBAttributeType.B)
    - * public UUID getKey()
    - * 
    - * - *

    {@link Set} to {@code L}

    - *

    The standard V1 and V2 compatible conversion schemas do not by default - * support non-scalar {@code Set} types. They are supported in V2. In - * non-supported schemas, the {@link List}/{@code L} override may be applied - * to any {@code Set} type.

    - *
    - * @DynamoDBTyped(DynamoDBAttributeType.L)
    - * public Set<MyObject> myObjects()
    - * 
    - * - *

    {@link Object} to {@code M}

    - *

    Also supported as {@link DynamoDbDocument}.

    - *
    - * @DynamoDBTyped(DynamoDBAttributeType.M)
    - * public MyObject myObject()
    - * 
    - * - *

    May be combined with {@link DynamoDbTypeConverted}.

    - * - *

    May be used as a meta-annotation.

    - * - * @see DynamoDbTypeConverted - * @see DynamoDbTypeConverterFactory - */ -@DynamoDb -@Inherited -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbTyped { - - /** - * Use when the type of the attribute as stored in DynamoDB should differ - * from the standard type assigned by DynamoDBMapper. - */ - DynamoDbAttributeType value() default DynamoDbAttributeType.NULL; - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbVersionAttribute.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbVersionAttribute.java deleted file mode 100644 index bc1edd0ae667..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbVersionAttribute.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; - -/** - * Annotation for marking a property as an optimistic locking version attribute. - * - *

    Applied to the getter method or the class field for the class's version - * property. If the annotation is applied directly to the class field, the - * corresponding getter and setter must be declared in the same class. - * - *

    Alternately, the meta-annotation {@link DynamoDbVersioned} may be used - * to annotate a custom annotation, or directly to the field/getter.

    - * - *

    Only nullable, integral numeric types (e.g. Integer, Long) can be used as - * version properties. On a save() operation, the {@link DynamoDbMapper} will - * attempt to increment the version property and assert that the service's value - * matches the client's. New objects will be assigned a version of 1 when saved. - *

    - * Note that for batchWrite, and by extension batchSave and batchDelete, no - * version checks are performed, as required by the - * {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} - * API. - * - * @see DynamoDbVersioned - */ -@DynamoDb -@DynamoDbVersioned -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD}) -public @interface DynamoDbVersionAttribute { - - /** - * Optional parameter when the name of the attribute as stored in DynamoDB - * should differ from the name used by the getter / setter. - */ - String attributeName() default ""; - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbVersioned.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbVersioned.java deleted file mode 100644 index 5a7fd63b1503..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/DynamoDbVersioned.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.math.BigInteger; -import java.util.Arrays; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Scalar; - -/** - * Annotation for marking a property as an optimistic locking version attribute. - * - *

    - * @DynamoDBVersioned
    - * public Long getRecordVersionNumber()
    - * 
    - * - *

    Alternately, the convinience annotation {@link DynamoDbVersionAttribute} - * may be used if combining with an attribute name on a field/getter.

    - * - *

    Only nullable, integral numeric types (e.g. Integer, Long) can be used as - * version properties. On a save() operation, the {@link DynamoDbMapper} will - * attempt to increment the version property and assert that the service's value - * matches the client's.

    - * - *

    New objects will be assigned a version of 1 when saved.

    - * - *

    Note that for batchWrite, and by extension batchSave and batchDelete, - * no version checks are performed, as required by the - * {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} - * API.

    - * - *

    May be used as a meta-annotation.

    - * - * @see DynamoDbVersionAttribute - */ -@DynamoDb -@DynamoDbAutoGenerated(generator = DynamoDbVersioned.Generator.class) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) -public @interface DynamoDbVersioned { - - /** - * Version auto-generator. - */ - final class Generator extends DynamoDbAutoGenerator.AbstractGenerator { - private final Sequence sequence; - - Generator(Class targetType, DynamoDbVersioned annotation) { - super(DynamoDbAutoGenerateStrategy.ALWAYS); - this.sequence = Sequences.of(targetType); - } - - @Override - public T generate(final T currentValue) { - return currentValue == null ? sequence.init() : sequence.next(currentValue); - } - - private enum Sequences { - BIG_INTEGER(Scalar.BIG_INTEGER, new Sequence() { - @Override - public BigInteger init() { - return BigInteger.ONE; - } - - @Override - public BigInteger next(final BigInteger o) { - return o.add(BigInteger.ONE); - } - }), - - BYTE(Scalar.BYTE, new Sequence() { - @Override - public Byte init() { - return Byte.valueOf((byte) 1); - } - - @Override - public Byte next(final Byte o) { - return (byte) ((o + 1) % Byte.MAX_VALUE); - } - }), - - INTEGER(Scalar.INTEGER, new Sequence() { - @Override - public Integer init() { - return Integer.valueOf(1); - } - - @Override - public Integer next(final Integer o) { - return o + 1; - } - }), - - LONG(Scalar.LONG, new Sequence() { - @Override - public Long init() { - return Long.valueOf(1L); - } - - @Override - public Long next(final Long o) { - return o + 1L; - } - }), - - SHORT(Scalar.SHORT, new Sequence() { - @Override - public Short init() { - return Short.valueOf((short) 1); - } - - @Override - public Short next(final Short o) { - return (short) (o + 1); - } - }); - - private final Sequence sequence; - private final Scalar scalar; - - Sequences(final Scalar scalar, final Sequence sequence) { - this.sequence = sequence; - this.scalar = scalar; - } - - private static Sequence of(final Class targetType) { - for (final Sequences s : Sequences.values()) { - if (s.scalar.is(targetType)) { - return (Sequence) s.sequence; - } - } - throw new DynamoDbMappingException( - "type [" + targetType + "] is not supported; allowed only " + Arrays.toString(Sequences.values()) - ); - } - } - - interface Sequence { - T init(); - - T next(T o); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/GenerateDeleteTableRequestTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/GenerateDeleteTableRequestTest.java deleted file mode 100644 index 03688421a463..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/GenerateDeleteTableRequestTest.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; - -/** - * Unit tests for {@link DynamoDbMapper#generateDeleteTableRequest(Class)}. - */ -public class GenerateDeleteTableRequestTest { - - private static final String TABLE_PREFIX = "DEV-"; - private static final String TABLE_NAME = "OBJECTORMEXAMPLE"; - - @Test - public void tableNameNotOverriden_UsesTableNameAttributeInAnnotation() { - DynamoDbMapper dynamoDBMapper = new DynamoDbMapper(null); - DeleteTableRequest deleteTableRequest = dynamoDBMapper.generateDeleteTableRequest(ObjectORMExample.class); - assertEquals(deleteTableRequest.tableName(), TABLE_NAME); - } - - @Test - public void tableNameOverriddenInConfig_UsesPrefixedOverrideTableName() { - DynamoDbMapperConfig.TableNameOverride tableNameOverride = DynamoDbMapperConfig.TableNameOverride - .withTableNamePrefix(TABLE_PREFIX); - DynamoDbMapperConfig config = new DynamoDbMapperConfig(tableNameOverride); - DynamoDbMapper dynamoDBMapper = new DynamoDbMapper(null, config); - - DeleteTableRequest deleteTableRequest = dynamoDBMapper.generateDeleteTableRequest(ObjectORMExample.class); - assertEquals(deleteTableRequest.tableName(), TABLE_PREFIX.concat(TABLE_NAME)); - } - - @DynamoDbTable(tableName = TABLE_NAME) - private static class ObjectORMExample { - private String id; - - @DynamoDbHashKey - public final String getId() { - return this.id; - } - - public final void setId(String id) { - this.id = id; - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/IDynamoDbMapper.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/IDynamoDbMapper.java deleted file mode 100644 index 090615becfdd..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/IDynamoDbMapper.java +++ /dev/null @@ -1,690 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper.FailedBatch; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.PaginationLoadingStrategy; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.SaveBehavior; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; - -/** - * Interface for DynamoDBMapper. - * - *

    - * Note: Do not implement this interface, extend from {@link AbstractDynamoDbMapper} instead. - *

    - * - * @see DynamoDbMapper - * @see AbstractDynamoDbMapper - */ -public interface IDynamoDbMapper { - /** - * Get the table model for the class, using the default configuration. - * - * @see DynamoDbMapper#getTableModel(Class, DynamoDbMapperConfig) - */ - DynamoDbMapperTableModel getTableModel(Class clazz); - - /** - * Get the table model for the class using the provided configuration override. - */ - DynamoDbMapperTableModel getTableModel(Class clazz, DynamoDbMapperConfig config); - - /** - * Loads an object with the hash key given and a configuration override. This configuration - * overrides the default provided at object construction. - * - * @see DynamoDbMapper#load(Class, Object, Object, DynamoDbMapperConfig) - */ - T load(Class clazz, Object hashKey, DynamoDbMapperConfig config); - - /** - * Loads an object with the hash key given, using the default configuration. - * - * @see DynamoDbMapper#load(Class, Object, Object, DynamoDbMapperConfig) - */ - T load(Class clazz, Object hashKey); - - /** - * Loads an object with a hash and range key, using the default configuration. - * - * @see DynamoDbMapper#load(Class, Object, Object, DynamoDbMapperConfig) - */ - T load(Class clazz, Object hashKey, Object rangeKey); - - /** - * Returns an object whose keys match those of the prototype key object given, or null if no - * such item exists. - * - * @param keyObject - * An object of the class to load with the keys values to match. - * @see DynamoDbMapper#load(Object, DynamoDbMapperConfig) - */ - T load(T keyObject); - - /** - * Returns an object whose keys match those of the prototype key object given, or null if no - * such item exists. - * - * @param keyObject - * An object of the class to load with the keys values to match. - * @param config - * Configuration for the service call to retrieve the object from DynamoDB. This - * configuration overrides the default given at construction. - */ - T load(T keyObject, DynamoDbMapperConfig config); - - /** - * Returns an object with the given hash key, or null if no such object exists. - * - * @param clazz - * The class to load, corresponding to a DynamoDB table. - * @param hashKey - * The key of the object. - * @param rangeKey - * The range key of the object, or null for tables without a range key. - * @param config - * Configuration for the service call to retrieve the object from DynamoDB. This - * configuration overrides the default given at construction. - */ - T load(Class clazz, Object hashKey, Object rangeKey, DynamoDbMapperConfig config); - - /** - * Creates and fills in the attributes on an instance of the class given with the attributes - * given. - *

    - * This is accomplished by looking for getter methods annotated with an appropriate annotation, - * then looking for matching attribute names in the item attribute map. - *

    - * This method is no longer called by load/scan/query methods. If you are overriding this - * method, please switch to using an AttributeTransformer - * - * @param clazz - * The class to instantiate and hydrate - * @param itemAttributes - * The set of item attributes, keyed by attribute name. - */ - T marshallIntoObject(Class clazz, Map itemAttributes); - - /** - * Unmarshalls the list of item attributes into objects of type clazz. - *

    - * This method is no longer called by load/scan/query methods. If you are overriding this - * method, please switch to using an AttributeTransformer - * - * @see DynamoDbMapper#marshallIntoObject(Class, Map) - */ - List marshallIntoObjects(Class clazz, List> itemAttributes); - - /** - * Saves the object given into DynamoDB, using the default configuration. - * - * @see DynamoDbMapper#save(Object, DynamoDbSaveExpression, DynamoDbMapperConfig) - */ - void save(T object); - - /** - * Saves the object given into DynamoDB, using the default configuration and the specified - * saveExpression. - * - * @see DynamoDbMapper#save(Object, DynamoDbSaveExpression, DynamoDbMapperConfig) - */ - void save(T object, DynamoDbSaveExpression saveExpression); - - /** - * Saves the object given into DynamoDB, using the specified configuration. - * - * @see DynamoDbMapper#save(Object, DynamoDbSaveExpression, DynamoDbMapperConfig) - */ - void save(T object, DynamoDbMapperConfig config); - - /** - * Saves an item in DynamoDB. The service method used is determined by the - * {@link DynamoDbMapperConfig#saveBehavior()} value, to use either - * {@link DynamoDbClient#putItem(PutItemRequest)} or - * {@link DynamoDbClient#updateItem(UpdateItemRequest)}: - *

      - *
    • UPDATE (default) : UPDATE will not affect unmodeled attributes on a save operation - * and a null value for the modeled attribute will remove it from that item in DynamoDB. Because - * of the limitation of updateItem request, the implementation of UPDATE will send a putItem - * request when a key-only object is being saved, and it will send another updateItem request if - * the given key(s) already exists in the table.
    • - *
    • UPDATE_SKIP_NULL_ATTRIBUTES : Similar to UPDATE except that it ignores any null - * value attribute(s) and will NOT remove them from that item in DynamoDB. It also guarantees to - * send only one single updateItem request, no matter the object is key-only or not.
    • - *
    • CLOBBER : CLOBBER will clear and replace all attributes, included unmodeled ones, - * (delete and recreate) on save. Versioned field constraints will also be disregarded.
    • - *
    - * Any options specified in the saveExpression parameter will be overlaid on any constraints due - * to versioned attributes. - * - * @param object - * The object to save into DynamoDB - * @param saveExpression - * The options to apply to this save request - * @param config - * The configuration to use, which overrides the default provided at object - * construction. - * @see DynamoDbMapperConfig.SaveBehavior - */ - void save(T object, DynamoDbSaveExpression saveExpression, DynamoDbMapperConfig config); - - /** - * Deletes the given object from its DynamoDB table using the default configuration. - */ - void delete(Object object); - - /** - * Deletes the given object from its DynamoDB table using the specified deleteExpression and - * default configuration. - */ - void delete(Object object, DynamoDbDeleteExpression deleteExpression); - - /** - * Deletes the given object from its DynamoDB table using the specified configuration. - */ - void delete(Object object, DynamoDbMapperConfig config); - - /** - * Deletes the given object from its DynamoDB table using the provided deleteExpression and - * provided configuration. Any options specified in the deleteExpression parameter will be - * overlaid on any constraints due to versioned attributes. - * - * @param deleteExpression - * The options to apply to this delete request - * @param config - * Config override object. If {@link SaveBehavior#CLOBBER} is supplied, version - * fields will not be considered when deleting the object. - */ - void delete(T object, DynamoDbDeleteExpression deleteExpression, DynamoDbMapperConfig config); - - /** - * Deletes the objects given using one or more calls to the - * {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} API. No version checks are - * performed, as required by the API. - * - * @see DynamoDbMapper#batchWrite(Iterable, Iterable) - */ - List batchDelete(Iterable objectsToDelete); - - /** - * Deletes the objects given using one or more calls to the - * {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} API. No version checks are - * performed, as required by the API. - * - * @see DynamoDbMapper#batchWrite(Iterable, Iterable) - */ - List batchDelete(Object... objectsToDelete); - - /** - * Saves the objects given using one or more calls to the - * {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} API. No version checks are - * performed, as required by the API. - *

    - * This method ignores any SaveBehavior set on the mapper, and always behaves as if - * SaveBehavior.CLOBBER was specified, as the DynamoDbClient.batchWriteItem() request does not - * support updating existing items. - *

    - * This method fails to save the batch if the size of an individual object in the batch exceeds - * 400 KB. For more information on batch restrictions see, http://docs.aws.amazon - * .com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html - *

    - * - * @see DynamoDbMapper#batchWrite(Iterable, Iterable) - */ - List batchSave(Iterable objectsToSave); - - /** - * Saves the objects given using one or more calls to the - * {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} API. No version checks are - * performed, as required by the API. - *

    - * This method ignores any SaveBehavior set on the mapper, and always behaves as if - * SaveBehavior.CLOBBER was specified, as the DynamoDbClient.batchWriteItem() request does not - * support updating existing items. * - *

    - * This method fails to save the batch if the size of an individual object in the batch exceeds - * 400 KB. For more information on batch restrictions see, http://docs.aws.amazon - * .com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html - *

    - * - * @see DynamoDbMapper#batchWrite(Iterable, Iterable) - */ - List batchSave(Object... objectsToSave); - - /** - * Saves and deletes the objects given using one or more calls to the - * {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} API. No version checks are - * performed, as required by the API. - *

    - * This method ignores any SaveBehavior set on the mapper, and always behaves as if - * SaveBehavior.CLOBBER was specified, as the DynamoDbClient.batchWriteItem() request does not - * support updating existing items. - *

    - * This method fails to save the batch if the size of an individual object in the batch exceeds - * 400 KB. For more information on batch restrictions see, http://docs.aws.amazon - * .com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html - *

    - *

    - * If one of the write requests is for a table that is not present, this method does not throw a - * ResourceNotFoundException but returns a FailedBatch which includes this exception and the - * unprocessed items. - *

    - * - * @see DynamoDbMapper#batchWrite(Iterable, Iterable) - */ - List batchWrite(Iterable objectsToWrite, Iterable objectsToDelete); - - /** - * Saves and deletes the objects given using one or more calls to the - * {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} API. Use mapper config to - * control the retry strategy when UnprocessedItems are returned by the BatchWriteItem API - *

    - * This method fails to save the batch if the size of an individual object in the batch exceeds - * 400 KB. For more information on batch restrictions see, http://docs.aws.amazon - * .com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html - *

    - *

    - * If one of the write requests is for a table that is not present, this method does not throw a - * ResourceNotFoundException but returns a FailedBatch which includes this exception and the - * unprocessed items. - *

    - * - * @param objectsToWrite - * A list of objects to save to DynamoDB. No version checks are performed, as - * required by the {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} API. - * @param objectsToDelete - * A list of objects to delete from DynamoDB. No version checks are performed, - * as required by the {@link DynamoDbClient#batchWriteItem(BatchWriteItemRequest)} - * API. - * @param config - * Only {@link DynamoDbMapperConfig#getTableNameOverride()} and - * {@link DynamoDbMapperConfig#batchWriteRetryStrategy()} are considered. If - * TableNameOverride is specified, all objects in the two parameter lists will be - * considered to belong to the given table override. In particular, this method - * always acts as if SaveBehavior.CLOBBER was specified regardless of the - * value of the config parameter. - * @return A list of failed batches which includes the unprocessed items and the exceptions - * causing the failure. - * @see DynamoDbMapperConfig#getTableNameOverride() - * @see DynamoDbMapperConfig#batchWriteRetryStrategy() - */ - List batchWrite(Iterable objectsToWrite, - Iterable objectsToDelete, - DynamoDbMapperConfig config); - - /** - * Retrieves multiple items from multiple tables using their primary keys. - * - * @see DynamoDbMapper#batchLoad(List, DynamoDbMapperConfig) - * @return A map of the loaded objects. Each key in the map is the name of a DynamoDB table. - * Each value in the map is a list of objects that have been loaded from that table. All - * objects for each table can be cast to the associated user defined type that is - * annotated as mapping that table. - * @throws DynamoDbMapper.BatchGetItemException if all the requested items are not processed - * within the maximum number of retries. - */ - Map> batchLoad(Iterable itemsToGet); - - /** - * Retrieves multiple items from multiple tables using their primary keys. - * - * @param itemsToGet - * Key objects, corresponding to the class to fetch, with their primary key values - * set. - * @param config - * Only {@link DynamoDbMapperConfig#getTableNameOverride()} and - * {@link DynamoDbMapperConfig#getConsistentRead()} are considered. - * @return A map of the loaded objects. Each key in the map is the name of a DynamoDB table. - * Each value in the map is a list of objects that have been loaded from that table. All - * objects for each table can be cast to the associated user defined type that is - * annotated as mapping that table. - * @throws DynamoDbMapper.BatchGetItemException if all the requested items are not processed - * within the maximum number of retries. - */ - Map> batchLoad(Iterable itemsToGet, DynamoDbMapperConfig config); - - /** - * Retrieves the attributes for multiple items from multiple tables using their primary keys. - * {@link DynamoDbClient#batchGetItem(BatchGetItemRequest)} API. - * - * @return A map of the loaded objects. Each key in the map is the name of a DynamoDB table. - * Each value in the map is a list of objects that have been loaded from that table. All - * objects for each table can be cast to the associated user defined type that is - * annotated as mapping that table. - * @throws DynamoDbMapper.BatchGetItemException if all the requested items are not processed - * within the maximum number of retries. - * @see #batchLoad(List, DynamoDbMapperConfig) - * @see #batchLoad(Map, DynamoDbMapperConfig) - */ - Map> batchLoad(Map, List> itemsToGet); - - /** - * Retrieves multiple items from multiple tables using their primary keys. Valid only for tables - * with a single hash key, or a single hash and range key. For other schemas, use - * {@link DynamoDbMapper#batchLoad(List, DynamoDbMapperConfig)} - * - * @param itemsToGet - * Map from class to load to list of primary key attributes. - * @param config - * Only {@link DynamoDbMapperConfig#getTableNameOverride()} and - * {@link DynamoDbMapperConfig#getConsistentRead()} are considered. - * @return A map of the loaded objects. Each key in the map is the name of a DynamoDB table. - * Each value in the map is a list of objects that have been loaded from that table. All - * objects for each table can be cast to the associated user defined type that is - * annotated as mapping that table. - * @throws DynamoDbMapper.BatchGetItemException if all the requested items are not processed - * within the maximum number of retries. - */ - Map> batchLoad(Map, List> itemsToGet, DynamoDbMapperConfig config); - - /** - * Scans through an Amazon DynamoDB table and returns the matching results as an unmodifiable - * list of instantiated objects, using the default configuration. - * - * @see DynamoDbMapper#scan(Class, DynamoDbScanExpression, DynamoDbMapperConfig) - */ - PaginatedScanList scan(Class clazz, DynamoDbScanExpression scanExpression); - - /** - * Scans through an Amazon DynamoDB table and returns the matching results as an unmodifiable - * list of instantiated objects. The table to scan is determined by looking at the annotations - * on the specified class, which declares where to store the object data in Amazon DynamoDB, and - * the scan expression parameter allows the caller to filter results and control how the scan is - * executed. - *

    - * Callers should be aware that the returned list is unmodifiable, and any attempts to modify - * the list will result in an UnsupportedOperationException. - *

    - * You can specify the pagination loading strategy for this scan operation. By default, the list - * returned is lazily loaded when possible. - * - * @param - * The type of the objects being returned. - * @param clazz - * The class annotated with DynamoDB annotations describing how to store the object - * data in Amazon DynamoDB. - * @param scanExpression - * Details on how to run the scan, including any filters to apply to limit results. - * @param config - * The configuration to use for this scan, which overrides the default provided at - * object construction. - * @return An unmodifiable list of the objects constructed from the results of the scan - * operation. - * @see PaginatedScanList - * @see PaginationLoadingStrategy - */ - PaginatedScanList scan(Class clazz, DynamoDbScanExpression scanExpression, DynamoDbMapperConfig config); - - /** - * Scans through an Amazon DynamoDB table on logically partitioned segments in parallel and - * returns the matching results in one unmodifiable list of instantiated objects, using the - * default configuration. - * - * @see DynamoDbMapper#parallelScan(Class, DynamoDbScanExpression, int, DynamoDbMapperConfig) - */ - PaginatedParallelScanList parallelScan(Class clazz, - DynamoDbScanExpression scanExpression, - int totalSegments); - - /** - * Scans through an Amazon DynamoDB table on logically partitioned segments in parallel. This - * method will create a thread pool of the specified size, and each thread will issue scan - * requests for its assigned segment, following the returned continuation token, until the end - * of its segment. Callers should be responsible for setting the appropriate number of total - * segments. More scan segments would result in better performance but more consumed capacity of - * the table. The results are returned in one unmodifiable list of instantiated objects. The - * table to scan is determined by looking at the annotations on the specified class, which - * declares where to store the object data in Amazon DynamoDB, and the scan expression parameter - * allows the caller to filter results and control how the scan is executed. - *

    - * Callers should be aware that the returned list is unmodifiable, and any attempts to modify - * the list will result in an UnsupportedOperationException. - *

    - * You can specify the pagination loading strategy for this parallel scan operation. By default, - * the list returned is lazily loaded when possible. - * - * @param - * The type of the objects being returned. - * @param clazz - * The class annotated with DynamoDB annotations describing how to store the object - * data in Amazon DynamoDB. - * @param scanExpression - * Details on how to run the scan, including any filters to apply to limit results. - * @param totalSegments - * Number of total parallel scan segments. Range: 1 - 4096 - * @param config - * The configuration to use for this scan, which overrides the default provided at - * object construction. - * @return An unmodifiable list of the objects constructed from the results of the scan - * operation. - * @see PaginatedParallelScanList - * @see PaginationLoadingStrategy - */ - PaginatedParallelScanList parallelScan(Class clazz, - DynamoDbScanExpression scanExpression, - int totalSegments, - DynamoDbMapperConfig config); - - /** - * Scans through an Amazon DynamoDB table and returns a single page of matching results. The - * table to scan is determined by looking at the annotations on the specified class, which - * declares where to store the object data in AWS DynamoDB, and the scan expression parameter - * allows the caller to filter results and control how the scan is executed. - * - * @param - * The type of the objects being returned. - * @param clazz - * The class annotated with DynamoDB annotations describing how to store the object - * data in Amazon DynamoDB. - * @param scanExpression - * Details on how to run the scan, including any filters to apply to limit results. - * @param config - * The configuration to use for this scan, which overrides the default provided at - * object construction. - */ - ScanResultPage scanPage(Class clazz, DynamoDbScanExpression scanExpression, DynamoDbMapperConfig config); - - /** - * Scans through an Amazon DynamoDB table and returns a single page of matching results. - * - * @see DynamoDbMapper#scanPage(Class, DynamoDbScanExpression, DynamoDbMapperConfig) - */ - ScanResultPage scanPage(Class clazz, DynamoDbScanExpression scanExpression); - - /** - * Queries an Amazon DynamoDB table and returns the matching results as an unmodifiable list of - * instantiated objects, using the default configuration. - * - * @see DynamoDbMapper#query(Class, DynamoDbQueryExpression, DynamoDbMapperConfig) - */ - PaginatedQueryList query(Class clazz, DynamoDbQueryExpression queryExpression); - - /** - * Queries an Amazon DynamoDB table and returns the matching results as an unmodifiable list of - * instantiated objects. The table to query is determined by looking at the annotations on the - * specified class, which declares where to store the object data in Amazon DynamoDB, and the - * query expression parameter allows the caller to filter results and control how the query is - * executed. - *

    - * When the query is on any local/global secondary index, callers should be aware that the - * returned object(s) will only contain item attributes that are projected into the index. All - * the other unprojected attributes will be saved as type default values. - *

    - * Callers should also be aware that the returned list is unmodifiable, and any attempts to - * modify the list will result in an UnsupportedOperationException. - *

    - * You can specify the pagination loading strategy for this query operation. By default, the - * list returned is lazily loaded when possible. - * - * @param - * The type of the objects being returned. - * @param clazz - * The class annotated with DynamoDB annotations describing how to store the object - * data in Amazon DynamoDB. - * @param queryExpression - * Details on how to run the query, including any conditions on the key values - * @param config - * The configuration to use for this query, which overrides the default provided at - * object construction. - * @return An unmodifiable list of the objects constructed from the results of the query - * operation. - * @see PaginatedQueryList - * @see PaginationLoadingStrategy - */ - PaginatedQueryList query(Class clazz, - DynamoDbQueryExpression queryExpression, - DynamoDbMapperConfig config); - - /** - * Queries an Amazon DynamoDB table and returns a single page of matching results. The table to - * query is determined by looking at the annotations on the specified class, which declares - * where to store the object data in Amazon DynamoDB, and the query expression parameter allows - * the caller to filter results and control how the query is executed. - * - * @see DynamoDbMapper#queryPage(Class, DynamoDbQueryExpression, DynamoDbMapperConfig) - */ - QueryResultPage queryPage(Class clazz, DynamoDbQueryExpression queryExpression); - - /** - * Queries an Amazon DynamoDB table and returns a single page of matching results. The table to - * query is determined by looking at the annotations on the specified class, which declares - * where to store the object data in Amazon DynamoDB, and the query expression parameter allows - * the caller to filter results and control how the query is executed. - * - * @param - * The type of the objects being returned. - * @param clazz - * The class annotated with DynamoDB annotations describing how to store the object - * data in AWS DynamoDB. - * @param queryExpression - * Details on how to run the query, including any conditions on the key values - * @param config - * The configuration to use for this query, which overrides the default provided at - * object construction. - */ - QueryResultPage queryPage(Class clazz, - DynamoDbQueryExpression queryExpression, - DynamoDbMapperConfig config); - - /** - * Evaluates the specified scan expression and returns the count of matching items, without - * returning any of the actual item data, using the default configuration. - * - * @see DynamoDbMapper#count(Class, DynamoDbScanExpression, DynamoDbMapperConfig) - */ - int count(Class clazz, DynamoDbScanExpression scanExpression); - - /** - * Evaluates the specified scan expression and returns the count of matching items, without - * returning any of the actual item data. - *

    - * This operation will scan your entire table, and can therefore be very expensive. Use with - * caution. - * - * @param clazz - * The class mapped to a DynamoDB table. - * @param scanExpression - * The parameters for running the scan. - * @param config - * The configuration to use for this scan, which overrides the default provided at - * object construction. - * @return The count of matching items, without returning any of the actual item data. - */ - int count(Class clazz, DynamoDbScanExpression scanExpression, DynamoDbMapperConfig config); - - /** - * Evaluates the specified query expression and returns the count of matching items, without - * returning any of the actual item data, using the default configuration. - * - * @see DynamoDbMapper#count(Class, DynamoDbQueryExpression, DynamoDbMapperConfig) - */ - int count(Class clazz, DynamoDbQueryExpression queryExpression); - - /** - * Evaluates the specified query expression and returns the count of matching items, without - * returning any of the actual item data. - * - * @param clazz - * The class mapped to a DynamoDB table. - * @param queryExpression - * The parameters for running the scan. - * @param config - * The mapper configuration to use for the query, which overrides the default - * provided at object construction. - * @return The count of matching items, without returning any of the actual item data. - */ - int count(Class clazz, DynamoDbQueryExpression queryExpression, DynamoDbMapperConfig config); - - /** - * Returns the underlying {@link S3ClientCache} for accessing S3. - */ - S3ClientCache s3ClientCache(); - - /** - * Creates an S3Link with the specified bucket name and key using the default S3 region. This - * method requires the mapper to have been initialized with the necessary credentials for - * accessing S3. - * - * @throws IllegalStateException - * if the mapper has not been constructed with the necessary S3 AWS credentials. - */ - S3Link createS3Link(String bucketName, String key); - - /** - * Creates an S3Link with the specified region, bucket name and key. This method requires the - * mapper to have been initialized with the necessary credentials for accessing S3. - * - * @throws IllegalStateException - * if the mapper has not been constructed with the necessary S3 AWS credentials. - */ - S3Link createS3Link(Region s3region, String bucketName, String key); - - /** - * Creates an S3Link with the specified region, bucket name and key. This method requires the - * mapper to have been initialized with the necessary credentials for accessing S3. - * - * @throws IllegalStateException - * if the mapper has not been constructed with the necessary S3 AWS credentials. - */ - S3Link createS3Link(String s3region, String bucketName, String key); - - /** - * Parse the given POJO class and return the CreateTableRequest for the DynamoDB table it - * represents. Note that the returned request does not include the required - * ProvisionedThroughput parameters for the primary table and the GSIs, and that all secondary - * indexes are initialized with the default projection type - KEY_ONLY. - */ - CreateTableRequest generateCreateTableRequest(Class clazz); - - - /** - * Parse the given POJO class and return the DeleteTableRequest for the DynamoDB table it - * represents. - */ - DeleteTableRequest generateDeleteTableRequest(Class clazz); - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/IncompatibleSubclassTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/IncompatibleSubclassTest.java deleted file mode 100644 index f60ef765494e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/IncompatibleSubclassTest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.Map; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * Verify that we fail fast in case of incompatible subclasses that try to - * override the (now-removed) transformAttributes method. - */ -public class IncompatibleSubclassTest { - - @Test - public void testCompatibleSubclass() { - // Doesn't try to override one of the deprecated/removed - // transformAttributes methods; should be fine. - new CompatibleDynamoDbMapper(); - } - - @Test(expected = IllegalStateException.class) - public void testIncompatibleSubclass1() { - // "Overrides" transformAttributes(Class, Map); should fail fast. - new IncompatibleDynamoDbMapper1(); - } - - @Test(expected = IllegalStateException.class) - public void testIncompatibleSubclass2() { - // "Overrides" transformAttributes(String, String, Map); should fail - // fast. - new IncompatibleDynamoDbMapper2(); - } - - private static class CompatibleDynamoDbMapper extends DynamoDbMapper { - - public CompatibleDynamoDbMapper() { - super(null); - } - - protected void transformAttributes(boolean innocuous) { - } - } - - private static class IncompatibleDynamoDbMapper1 extends DynamoDbMapper { - - public IncompatibleDynamoDbMapper1() { - super(null); - } - - protected Map transformAttributes( - Class clazz, - Map attributeValues) { - - return null; - } - } - - private static class IncompatibleDynamoDbMapper2 extends DynamoDbMapper { - - public IncompatibleDynamoDbMapper2() { - super(null); - } - - protected Map transformAttributes( - String hashKey, - String rangeKey, - Map attributeValues) { - - return null; - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ItemConverter.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ItemConverter.java deleted file mode 100644 index d866365ec657..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ItemConverter.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * The concrete realization of a strategy for converting between Java objects - * and DynamoDB AttributeValues. Typically created by a - * {@link ConversionSchema}. - */ -public interface ItemConverter { - /** - * Returns the metadata (e.g. name, type) of the DynamoDB attribute that the - * return value of the given getter will be converted to. - * - * @param getter the getter method to inspect - * @return the metadata of the DynamoDB attribute that the result of the - * getter will be converted to - */ - DynamoDbMapperFieldModel getFieldModel(Method getter); - - /** - * Converts a Java object into a DynamoDB AttributeValue. Potentially able - * to handle both scalar and complex types. - * - * @param getter the getter that returned the value to be converted - * @param value the value to be converted - * @return the converted AttributeValue - */ - AttributeValue convert(Method getter, Object value); - - /** - * Converts an appropriately-annotated POJO into a Map of AttributeValues. - * - * @param value the POJO to convert - * @return the resulting map of attribute values - */ - Map convert(Object value); - - /** - * Reverses the {@link #convert(Method, Object)} method, turning a - * DynamoDB AttributeValue back into a Java object suitable for passing - * to the given setter. - * - * @param getter the getter for the value to be unconverted - * @param setter the setter for the value to be unconverted - * @param value the attribute value to be unconverted - * @return the unconverted Java object - */ - Object unconvert(Method getter, Method setter, AttributeValue value); - - /** - * Reverses the {@link #convert(Object)} method, turning a map of attribute - * values back into a POJO of the given class. - * - * @param the compile-time type of the object to create - * @param clazz the runtime type of the object to create - * @param values the the map of attribute values to unconvert - * @return the unconverted POJO - */ - T unconvert(Class clazz, Map values); -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonMarshaller.java deleted file mode 100644 index dd9da70b6f4d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/JsonMarshaller.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static software.amazon.awssdk.core.internal.util.ThrowableUtils.failure; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectWriter; - -/** - * A simple JSON marshaller that uses the Jackson JSON processor. It shares all limitations of that - * library. For more information about Jackson, see: http://wiki.fasterxml.com/JacksonHome - * - * @deprecated Replaced by {@link DynamoDbTypeConvertedJson} - */ -@Deprecated -public class JsonMarshaller implements DynamoDbMarshaller { - - private static final ObjectMapper MAPPER = new ObjectMapper(); - private static final ObjectWriter WRITER = MAPPER.writer(); - - /** - * The value type. - */ - private final Class valueType; - - /** - * Constructs the JSON marshaller instance. - * @param valueType The value type (for generic type erasure). - */ - public JsonMarshaller(final Class valueType) { - this.valueType = valueType; - } - - /** - * Constructs the JSON marshaller instance. - */ - public JsonMarshaller() { - this(null); - } - - /** - * Gets the value type. - * @return The value type. - */ - protected final Class valueType() { - return this.valueType; - } - - @Override - public String marshall(T obj) { - - try { - return WRITER.writeValueAsString(obj); - } catch (JsonProcessingException e) { - throw failure(e, - "Unable to marshall the instance of " + obj.getClass() - + "into a string"); - } - } - - @Override - public T unmarshall(Class clazz, String json) { - try { - return MAPPER.readValue(json, (valueType() == null ? clazz : valueType())); - } catch (Exception e) { - throw failure(e, "Unable to unmarshall the string " + json - + "into " + clazz); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/KeyPair.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/KeyPair.java deleted file mode 100644 index b1afbfee7de3..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/KeyPair.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -public class KeyPair { - private Object hashKey; - private Object rangeKey; - - public KeyPair withHashKey(Object hashkey) { - this.hashKey = hashkey; - return this; - } - - public KeyPair withRangeKey(Object rangeKey) { - this.rangeKey = rangeKey; - return this; - } - - public Object getHashKey() { - return this.hashKey; - } - - public void setHashKey(Object hashKey) { - this.hashKey = hashKey; - } - - public Object getRangeKey() { - return this.rangeKey; - } - - public void setRangeKey(Object rangeKey) { - this.rangeKey = rangeKey; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedList.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedList.java deleted file mode 100644 index 249cf1e68a33..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedList.java +++ /dev/null @@ -1,528 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.ListIterator; -import java.util.NoSuchElementException; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.PaginationLoadingStrategy; - -/** - * Unmodifiable list supporting paginated result sets from Amazon DynamoDB. - *

    - * Pages of results are fetched lazily from DynamoDB as they are needed. Some - * methods, such as {@link PaginatedList#size()} and - * {@link PaginatedList#toArray()}, require fetching the entire result set - * eagerly. See the javadoc of individual methods for details on which are lazy. - * - * @param - * The domain object type stored in this list. - */ -public abstract class PaginatedList implements List { - - private static final String UNMODIFIABLE_MESSAGE = "This is an unmodifiable list"; - - private static final String ITERATION_ONLY_UNSUPPORTED_OPERATION_MESSAGE = - " is not supported when using ITERATION_ONLY configuration."; - - /** - * Reference to the DynamoDB mapper for marshalling DynamoDB attributes back - * into objects - */ - protected final DynamoDbMapper mapper; - - /** - * The class annotated with DynamoDB tags declaring how to load/store - * objects into DynamoDB - */ - protected final Class clazz; - - /** The client for working with DynamoDB. */ - protected final DynamoDbClient dynamo; - - /** Tracks if all results have been loaded yet or not. */ - protected boolean allResultsLoaded = false; - - /** - * All currently loaded results for this list. - * - * In ITERATION_ONLY mode, this list will at most keep one page of the - * loaded results, and all previous results will be cleared from the memory. - */ - protected final List allResults; - /** Lazily loaded next results waiting to be added into allResults. */ - protected final List nextResults = new LinkedList(); - /** The pagination loading strategy for this paginated list **/ - private final PaginationLoadingStrategy paginationLoadingStrategy; - /** - * Keeps track on whether an iterator of the list has been retrieved. - * Only updated and checked when the list is in ITERATION_ONLY mode. - */ - private boolean iterationstarted = false; - - /** - * Constructs a PaginatedList instance using the default PaginationLoadingStrategy - */ - public PaginatedList(DynamoDbMapper mapper, Class clazz, DynamoDbClient dynamo) { - this(mapper, clazz, dynamo, null); - } - - /** - * Constructs a PaginatedList instance. - * - * @param mapper - * The mapper for marshalling DynamoDB attributes into objects. - * @param clazz - * The class of the annotated model. - * @param dynamo - * The DynamoDB client for making low-level request calls. - * @param paginationLoadingStrategy - * The strategy used for loading paginated results. Caller has to - * explicitly set this parameter, since the DynamoDBMapperConfig - * set in the mapper is not accessible here. If null value is - * provided, LAZY_LOADING will be set by default. - */ - public PaginatedList(DynamoDbMapper mapper, Class clazz, DynamoDbClient dynamo, - PaginationLoadingStrategy paginationLoadingStrategy) { - this.mapper = mapper; - this.clazz = clazz; - this.dynamo = dynamo; - this.paginationLoadingStrategy = paginationLoadingStrategy == null ? - PaginationLoadingStrategy.LAZY_LOADING : paginationLoadingStrategy; - - this.allResults = new ArrayList(); - - // Ideally, we should eagerly load all results here as soon as EAGER_LOADING is configured. - // But the implementation of loadAllResults() relies on a fully initialized sub-class object. - // So we have to do this in each sub-class constructor. - } - - /** - * Eagerly loads all results for this list. - *

    - * Not supported in ITERATION_ONLY mode. - *

    - */ - public synchronized void loadAllResults() { - checkUnsupportedOperationForIterationOnlyMode("loadAllResults()"); - - if (allResultsLoaded) { - return; - } - - while (nextResultsAvailable()) { - // Keep all loaded results - moveNextResults(false); - } - - allResultsLoaded = true; - } - - /** - * Returns whether there are more results available not yet included in the - * allResults member field. These could already have been fetched and are - * sitting in the nextResults buffer, or they could be fetched from the - * service opportunistically at the time this method is called. A return - * value of true guarantees that nextResults is non-empty. - */ - private boolean nextResultsAvailable() { - return !nextResults.isEmpty() || loadNextResults(); - } - - /** - * Attempts to load the next batch of results, if there are any, into the - * nextResults buffer. Returns whether there were any results to load. A - * return value of true guarantees that nextResults had items added to it. - */ - private synchronized boolean loadNextResults() { - if (atEndOfResults()) { - return false; - } - - do { - nextResults.addAll(fetchNextPage()); - } while (!atEndOfResults() && nextResults.isEmpty()); - - return !nextResults.isEmpty(); - } - - /** - * Moves the contents of the nextResults buffer into allResults and resets - * the buffer. - * - * @param clearPreviousResults - * Whether it should clear previous results in allResults field. - */ - private void moveNextResults(boolean clearPreviousResults) { - if (clearPreviousResults) { - allResults.clear(); - } - allResults.addAll(nextResults); - nextResults.clear(); - } - - /** - * Fetches the next page of results (which may be empty) and returns any - * items found. - */ - protected abstract List fetchNextPage(); - - /** - * Returns whether we have reached the end of the result set. - */ - protected abstract boolean atEndOfResults(); - - /** - * Returns an iterator over this list that lazily initializes results as - * necessary. - *

    - * If it configured with ITERARTION_ONLY mode, then the iterator - * could be only retrieved once, and any previously loaded results will be - * cleared in the memory during the iteration. - *

    - */ - @Override - public Iterator iterator() { - return new PaginatedListIterator(paginationLoadingStrategy == PaginationLoadingStrategy.ITERATION_ONLY); - } - - /** - * Returns whether the collection is empty. At most one (non-empty) page of - * results is loaded to make the check. - *

    - * Not supported in ITERATION_ONLY mode. - *

    - */ - @Override - public boolean isEmpty() { - checkUnsupportedOperationForIterationOnlyMode("isEmpty()"); - - return !iterator().hasNext(); - } - - /** - * Returns the Nth element of the list. Results are loaded until N elements - * are present, if necessary. - *

    - * Not supported in ITERATION_ONLY mode. - *

    - */ - @Override - public T get(int n) { - checkUnsupportedOperationForIterationOnlyMode("get(int n)"); - - while (allResults.size() <= n && nextResultsAvailable()) { - moveNextResults(false); - } - - return allResults.get(n); - } - - /** - * Returns whether the collection contains the given element. Results are - * loaded and checked incrementally until a match is found or the end of the - * result set is reached. - *

    - * Not supported in ITERATION_ONLY mode. - *

    - */ - @Override - public boolean contains(Object arg0) { - checkUnsupportedOperationForIterationOnlyMode("contains(Object arg0)"); - - if (allResults.contains(arg0)) { - return true; - } - - while (nextResultsAvailable()) { - boolean found = nextResults.contains(arg0); - moveNextResults(false); - if (found) { - return true; - } - } - - return false; - } - - /** - * Returns a sub-list in the range specified, loading more results as - * necessary. - *

    - * Not supported in ITERATION_ONLY mode. - *

    - */ - @Override - public List subList(int arg0, int arg1) { - checkUnsupportedOperationForIterationOnlyMode("subList(int arg0, int arg1)"); - - while (allResults.size() < arg1 && nextResultsAvailable()) { - moveNextResults(false); - } - - return Collections.unmodifiableList(allResults.subList(arg0, arg1)); - } - - /** - * Returns the first index of the object given in the list. Additional - * results are loaded incrementally as necessary. - *

    - * Not supported in ITERATION_ONLY mode. - *

    - */ - @Override - public int indexOf(Object arg0) { - checkUnsupportedOperationForIterationOnlyMode("indexOf(Object org0)"); - - int indexOf = allResults.indexOf(arg0); - if (indexOf >= 0) { - return indexOf; - } - - while (nextResultsAvailable()) { - indexOf = nextResults.indexOf(arg0); - int size = allResults.size(); - moveNextResults(false); - if (indexOf >= 0) { - return indexOf + size; - } - } - - return -1; - } - - @Override - public int size() { - loadAllResults(); - return allResults.size(); - } - - // Operations requiring the entire result set - - @Override - public boolean containsAll(Collection arg0) { - loadAllResults(); - return allResults.containsAll(arg0); - } - - @Override - public int lastIndexOf(Object arg0) { - loadAllResults(); - return allResults.lastIndexOf(arg0); - } - - @Override - public Object[] toArray() { - loadAllResults(); - return allResults.toArray(); - } - - @Override - public X[] toArray(X[] a) { - loadAllResults(); - return allResults.toArray(a); - } - - @Override - public ListIterator listIterator() { - throw new UnsupportedOperationException("ListIterators are not supported for this list"); - } - - // Unsupported Operations - - @Override - public ListIterator listIterator(int arg0) { - throw new UnsupportedOperationException("ListIterators are not supported for this list"); - } - - @Override - public boolean remove(Object arg0) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public T remove(int arg0) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public boolean removeAll(Collection arg0) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public boolean retainAll(Collection arg0) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public T set(int arg0, T arg1) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public boolean add(T arg0) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public void add(int arg0, T arg1) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public boolean addAll(Collection arg0) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public boolean addAll(int arg0, Collection arg1) { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - @Override - public void clear() { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - private void checkUnsupportedOperationForIterationOnlyMode(String methodSignature) { - if (this.paginationLoadingStrategy == PaginationLoadingStrategy.ITERATION_ONLY) { - throw new UnsupportedOperationException(methodSignature + ITERATION_ONLY_UNSUPPORTED_OPERATION_MESSAGE); - } - } - - private class PaginatedListIterator implements Iterator { - /** - * Whether this iterator is constructed by a PaginatedList in - * ITERATION_ONLY mode. - */ - private final boolean iterationOnly; - - /** - * A hard copy of the allResults list to prevent - * ConcurrentModificationExceptions. - * Only needed when the list is not in ITERNATION_ONLY mode. - */ - private final List allResultsCopy; - - private Iterator innerIterator; - - private int pos = 0; - - PaginatedListIterator(boolean iterationOnly) { - this.iterationOnly = iterationOnly; - - if (iterationOnly) { - synchronized (PaginatedList.this) { - if (iterationstarted) { - throw new UnsupportedOperationException("The list could only be iterated once in ITERATION_ONLY mode."); - } - iterationstarted = true; - } - - allResultsCopy = null; // not needed for ITERATION_ONLY mode - innerIterator = allResults.iterator(); - } else { - /* - * We make a copy of the allResults list to iterate over in order to - * avoid ConcurrentModificationExceptions caused by other methods - * loading more results into the list while someone iterates over it. - * This is a more severe problem than it might seem, because even - * innocuous-seeming operations such as contains() can modify the - * underlying result set. - */ - allResultsCopy = new ArrayList(); - allResultsCopy.addAll(allResults); - innerIterator = allResultsCopy.iterator(); - } - } - - @Override - public boolean hasNext() { - return innerIterator.hasNext() || shouldSyncWithAllResultsList() || - nextResultsAvailable(); - } - - /** - * If we aren't in ITERATION_ONLY mode then allResults is the authoriative source of - * results. If it's size has increased since this iterator was last synched with it then we - * have more results to process and need to re-sync allResultsCopy with allResults. - * - * @return True if more results are available in allResults then what we have currently - * snapshoted in the iterator, false otherwise. - */ - private boolean shouldSyncWithAllResultsList() { - return !iterationOnly && allResults.size() > allResultsCopy.size(); - } - - @Override - public T next() { - if (!innerIterator.hasNext()) { - /* - * We need to immediately fetch more results from the service, - * if - * -- it's in ITERATION_ONLY mode (which means innerIterator - * is always pointing at the "real" list of loaded results) - * OR it's not in ITERATION_ONLY and our private copy of the - * result list is already up to date with the full one. - */ - if (iterationOnly - || allResults.size() == allResultsCopy.size()) { - if (!nextResultsAvailable()) { - throw new NoSuchElementException(); - } - /* Clear previous results if it's in ITERATION_ONLY mode. */ - boolean clearPreviousResults = iterationOnly; - moveNextResults(clearPreviousResults); - } - - if (iterationOnly) { - /* - * allResults has been replaced with the latest page of results. - */ - innerIterator = allResults.iterator(); - } else { - /* - * Update our private results copy, and then update the inner iterator - */ - if (allResults.size() > allResultsCopy.size()) { - allResultsCopy.addAll(allResults.subList(allResultsCopy.size(), - allResults.size())); - } - - innerIterator = allResultsCopy.listIterator(pos); - } - } - - pos++; - return innerIterator.next(); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(UNMODIFIABLE_MESSAGE); - } - - } - - ; -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedParallelScanList.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedParallelScanList.java deleted file mode 100644 index 41638d43a9d3..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedParallelScanList.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.LinkedList; -import java.util.List; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.PaginationLoadingStrategy; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; - -/** - * Implementation of the List interface that represents the results from a parallel scan - * in AWS DynamoDB. Paginated results are loaded on demand when the user - * executes an operation that requires them. Some operations, such as size(), - * must fetch the entire list, but results are lazily fetched page by page when - * possible. - *

    - * This is an unmodifiable list, so callers should not invoke any operations - * that modify this list, otherwise they will throw an - * UnsupportedOperationException. - * - * @param - * The type of objects held in this list. - * @see PaginatedList - */ -public class PaginatedParallelScanList extends PaginatedList { - - /** The current parallel scan task which contains all the information about the scan request. */ - private final ParallelScanTask parallelScanTask; - - private final DynamoDbMapperConfig config; - - public PaginatedParallelScanList( - DynamoDbMapper mapper, - Class clazz, - DynamoDbClient dynamo, - ParallelScanTask parallelScanTask, - PaginationLoadingStrategy paginationLoadingStrategy, - DynamoDbMapperConfig config) { - super(mapper, clazz, dynamo, paginationLoadingStrategy); - - this.parallelScanTask = parallelScanTask; - this.config = config; - - // Marshall the first batch of results in all Results - allResults.addAll(marshalParallelScanResponsesIntoObjects(parallelScanTask.nextBatchOfScanResponses())); - - // If the results should be eagerly loaded at once - if (paginationLoadingStrategy == PaginationLoadingStrategy.EAGER_LOADING) { - loadAllResults(); - } - } - - @Override - protected boolean atEndOfResults() { - return parallelScanTask.isAllSegmentScanFinished(); - } - - @Override - protected List fetchNextPage() { - return marshalParallelScanResponsesIntoObjects(parallelScanTask.nextBatchOfScanResponses()); - } - - private List marshalParallelScanResponsesIntoObjects(List scanResults) { - List allItems = new LinkedList(); - for (ScanResponse scanResult : scanResults) { - if (null != scanResult) { - allItems.addAll(mapper.marshallIntoObjects( - mapper.toParameters( - scanResult.items(), - clazz, - parallelScanTask.getTableName(), - config))); - } - } - return allItems; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedQueryList.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedQueryList.java deleted file mode 100644 index 9b2cf6a9e25d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedQueryList.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.List; - -import software.amazon.awssdk.core.util.SdkAutoConstructMap; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.PaginationLoadingStrategy; -import software.amazon.awssdk.services.dynamodb.model.QueryRequest; -import software.amazon.awssdk.services.dynamodb.model.QueryResponse; - -/** - * Implementation of the List interface that represents the results from a query - * in AWS DynamoDB. Paginated results are loaded on demand when the user - * executes an operation that requires them. Some operations, such as size(), - * must fetch the entire list, but results are lazily fetched page by page when - * possible. - *

    - * This is an unmodifiable list, so callers should not invoke any operations - * that modify this list, otherwise they will throw an - * UnsupportedOperationException. - * - * @param - * The type of objects held in this list. - * @see PaginatedList - */ -public class PaginatedQueryList extends PaginatedList { - - /** The current query request. */ - private QueryRequest queryRequest; - - private final DynamoDbMapperConfig config; - - /** The current results for the last executed query operation. */ - private QueryResponse queryResult; - - public PaginatedQueryList( - DynamoDbMapper mapper, - Class clazz, - DynamoDbClient dynamo, - QueryRequest queryRequest, - QueryResponse queryResult, - PaginationLoadingStrategy paginationLoadingStrategy, - DynamoDbMapperConfig config) { - super(mapper, clazz, dynamo, paginationLoadingStrategy); - - this.queryRequest = queryRequest; - this.queryResult = queryResult; - this.config = config; - - - allResults.addAll(mapper.marshallIntoObjects( - mapper.toParameters( - queryResult.items(), - clazz, - queryRequest.tableName(), - config))); - - // If the results should be eagerly loaded at once - if (paginationLoadingStrategy == PaginationLoadingStrategy.EAGER_LOADING) { - loadAllResults(); - } - } - - @Override - protected synchronized boolean atEndOfResults() { - return queryResult.lastEvaluatedKey() instanceof SdkAutoConstructMap; - } - - @Override - protected synchronized List fetchNextPage() { - queryRequest = queryRequest.toBuilder().exclusiveStartKey(queryResult.lastEvaluatedKey()).build(); - queryResult = dynamo.query(DynamoDbMapper.applyUserAgent(queryRequest)); - return mapper.marshallIntoObjects(mapper.toParameters( - queryResult.items(), - clazz, - queryRequest.tableName(), - config)); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedScanList.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedScanList.java deleted file mode 100644 index 24d6a615389a..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedScanList.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.List; - -import software.amazon.awssdk.core.util.SdkAutoConstructMap; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig.PaginationLoadingStrategy; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; - -/** - * Implementation of the List interface that represents the results from a scan - * in AWS DynamoDB. Paginated results are loaded on demand when the user - * executes an operation that requires them. Some operations, such as size(), - * must fetch the entire list, but results are lazily fetched page by page when - * possible. - *

    - * This is an unmodifiable list, so callers should not invoke any operations - * that modify this list, otherwise they will throw an - * UnsupportedOperationException. - * - * @param - * The type of objects held in this list. - * @see PaginatedList - */ -public class PaginatedScanList extends PaginatedList { - - /** The current scan request. */ - private ScanRequest scanRequest; - - private final DynamoDbMapperConfig config; - - /** The current results for the last executed scan operation. */ - private ScanResponse scanResult; - - public PaginatedScanList( - DynamoDbMapper mapper, - Class clazz, - DynamoDbClient dynamo, - ScanRequest scanRequest, - ScanResponse scanResult, - PaginationLoadingStrategy paginationLoadingStrategy, - DynamoDbMapperConfig config) { - super(mapper, clazz, dynamo, paginationLoadingStrategy); - - this.scanRequest = scanRequest; - this.scanResult = scanResult; - this.config = config; - - allResults.addAll(mapper.marshallIntoObjects( - mapper.toParameters( - scanResult.items(), - clazz, - scanRequest.tableName(), - config))); - - // If the results should be eagerly loaded at once - if (paginationLoadingStrategy == PaginationLoadingStrategy.EAGER_LOADING) { - loadAllResults(); - } - } - - @Override - protected synchronized boolean atEndOfResults() { - return scanResult.lastEvaluatedKey() instanceof SdkAutoConstructMap; - } - - @Override - protected synchronized List fetchNextPage() { - scanRequest = scanRequest.toBuilder().exclusiveStartKey(scanResult.lastEvaluatedKey()).build(); - scanResult = dynamo.scan(DynamoDbMapper.applyUserAgent(scanRequest)); - return mapper.marshallIntoObjects(mapper.toParameters( - scanResult.items(), - clazz, - scanRequest.tableName(), - config)); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedScanTaskTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedScanTaskTest.java deleted file mode 100644 index 6740d6b59b8e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/PaginatedScanTaskTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.argThat; -import static org.mockito.Mockito.when; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentMatcher; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughputExceededException; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; - -@RunWith(MockitoJUnitRunner.class) -public class PaginatedScanTaskTest { - - private static final String TABLE_NAME = "FooTable"; - - private static final int TOTAL_SEGMENTS = 5; - - private ParallelScanTask parallelScanTask; - - private ExecutorService executorService; - - @Mock - private DynamoDbClient dynamoDB; - - /** - * Custom matcher to match argument based on it's segment number - * - * @param segmentNumber Segment number to match for this stub. - * @return Stubbed argument matcher - */ - private static ScanRequest isSegmentNumber(int segmentNumber) { - return argThat(new SegmentArgumentMatcher(segmentNumber)); - } - - @Before - public void setup() { - executorService = Executors.newSingleThreadExecutor(); - parallelScanTask = new ParallelScanTask(dynamoDB, createScanRequests(), executorService); - } - - /** - * A failed segment makes the scan task unusable and will always rethrow the same exception. In - * this case it makes sense to shutdown the executor so that applications can shutdown faster. A - * future enhancement could be to either retry failed segments, explicitly resume a failed scan, - * or include metadata in the thrown exception about the state of the scan at the time it was - * aborted. See PR #624 and Issue #624 for more details. - */ - @Test - public void segmentFailsToScan_ExecutorServiceIsShutdown() throws InterruptedException { - stubsuccessfulScan(0); - stubsuccessfulScan(1); - when(dynamoDB.scan(isSegmentNumber(2))) - .thenThrow(ProvisionedThroughputExceededException.builder().message("Slow Down!").build()); - stubsuccessfulScan(3); - stubsuccessfulScan(4); - - try { - parallelScanTask.nextBatchOfScanResponses(); - fail("Expected ProvisionedThroughputExceededException"); - } catch (ProvisionedThroughputExceededException expected) { - // Ignored or expected. - } - - executorService.awaitTermination(10, TimeUnit.SECONDS); - assertTrue(executorService.isShutdown()); - } - - /** - * Stub a successful scan of a segment with a precanned item to return. - * - * @param segmentNumber Segment to stub. - */ - private void stubsuccessfulScan(int segmentNumber) { - when(dynamoDB.scan(isSegmentNumber(segmentNumber))) - .thenReturn(ScanResponse.builder().items(generateItems()).build()); - } - - private Map generateItems() { - final int numItems = 10; - Map items = new HashMap(numItems); - for (int i = 0; i < numItems; i++) { - items.put(UUID.randomUUID().toString(), AttributeValue.builder().s("foo").build()); - } - return items; - } - - private List createScanRequests() { - final List scanRequests = new ArrayList(TOTAL_SEGMENTS); - for (int i = 0; i < TOTAL_SEGMENTS; i++) { - scanRequests.add(createScanRequest(i)); - } - return scanRequests; - } - - private ScanRequest createScanRequest(int segmentNumber) { - return ScanRequest.builder() - .tableName(TABLE_NAME) - .segment(segmentNumber) - .totalSegments(TOTAL_SEGMENTS) - .build(); - } - - /** - * Custom argument matcher to match a {@link ScanRequest} on the segment number. - */ - private static class SegmentArgumentMatcher extends ArgumentMatcher { - - private final int matchingSegmentNumber; - - private SegmentArgumentMatcher(int matchingSegmentNumber) { - this.matchingSegmentNumber = matchingSegmentNumber; - } - - @Override - public boolean matches(Object argument) { - if (!(argument instanceof ScanRequest)) { - return false; - } - return matchingSegmentNumber == ((ScanRequest) argument).segment(); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ParallelScanTask.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ParallelScanTask.java deleted file mode 100644 index 27ca9b46af00..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ParallelScanTask.java +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import software.amazon.awssdk.annotations.SdkTestInternalApi; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.exception.SdkServiceException; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; - -public class ParallelScanTask { - - /** - * The list of hard copies of ScanRequest with different segment number. - */ - private final List parallelScanRequests; - - private final int totalSegments; - - /** - * Cache all the future tasks, so that we can extract the exception when - * we see failed segment scan. - */ - private final List> segmentScanFutureTasks; - - /** - * Cache all the most recent ScanResponse on each segment. - */ - private final List segmentScanResponses; - - /** - * The current state of the scan on each segment. - * Used as the monitor for synchronization. - */ - private final List segmentScanstates; - private final DynamoDbClient dynamo; - private ExecutorService executorService; - - @Deprecated - public ParallelScanTask(DynamoDbMapper mapper, DynamoDbClient dynamo, List parallelScanRequests) { - this(dynamo, parallelScanRequests); - } - - ParallelScanTask(DynamoDbClient dynamo, List parallelScanRequests) { - this(dynamo, parallelScanRequests, Executors.newCachedThreadPool()); - } - - @SdkTestInternalApi - ParallelScanTask(DynamoDbClient dynamo, List parallelScanRequests, - ExecutorService executorService) { - this.dynamo = dynamo; - this.parallelScanRequests = parallelScanRequests; - this.totalSegments = parallelScanRequests.size(); - this.executorService = executorService; - - // Create synchronized views of the list to guarantee any changes are visible across all threads. - segmentScanFutureTasks = Collections - .synchronizedList(new ArrayList>(totalSegments)); - segmentScanResponses = Collections.synchronizedList(new ArrayList(totalSegments)); - segmentScanstates = Collections - .synchronizedList(new ArrayList(totalSegments)); - - initSegmentScanstates(); - } - - String getTableName() { - return parallelScanRequests.get(0).tableName(); - } - - public boolean isAllSegmentScanFinished() { - synchronized (segmentScanstates) { - for (int segment = 0; segment < totalSegments; segment++) { - if (segmentScanstates.get(segment) != SegmentScanstate.SegmentScanCompleted) { - return false; - } - } - // Shut down if all data have been scanned and loaded. - executorService.shutdown(); - return true; - } - } - - public List nextBatchOfScanResponses() throws SdkClientException { - /** - * Kick-off all the parallel scan tasks. - */ - startScanNextPages(); - /** - * Wait till all the tasks have finished. - */ - synchronized (segmentScanstates) { - while (segmentScanstates.contains(SegmentScanstate.Waiting) - || segmentScanstates.contains(SegmentScanstate.Scanning)) { - try { - segmentScanstates.wait(); - } catch (InterruptedException ie) { - throw SdkClientException.builder() - .message("Parallel scan interrupted by other thread.") - .cause(ie) - .build(); - } - } - /** - * Keep the lock on segmentScanstates until all the cached results are marshaled and returned. - */ - return marshalParallelScanResponses(); - } - - } - - private void startScanNextPages() { - for (int segment = 0; segment < totalSegments; segment++) { - final int currentSegment = segment; - final SegmentScanstate currentSegmentState = segmentScanstates.get(currentSegment); - /** - * Assert: Should never see any task in state of "Scanning" when starting a new batch. - */ - if (currentSegmentState == SegmentScanstate.Scanning) { - - throw SdkClientException.builder() - .message("Should never see a 'Scanning' state when starting parallel scans.") - .build(); - - } else if (currentSegmentState == SegmentScanstate.Failed || - currentSegmentState == SegmentScanstate.SegmentScanCompleted) { - /** - * Skip any failed or completed segment, and clear the corresponding cached result. - */ - segmentScanResponses.set(currentSegment, null); - continue; - } else { - /** - * Otherwise, submit a new future task and save it in segmentScanFutureTasks. - */ - // Update the state to "Scanning" and notify any waiting thread. - synchronized (segmentScanstates) { - segmentScanstates.set(currentSegment, SegmentScanstate.Scanning); - segmentScanstates.notifyAll(); - } - Future futureTask = executorService.submit(() -> { - try { - if (currentSegmentState == SegmentScanstate.HasNextPage) { - return scanNextPageOfSegment(currentSegment, true); - } else if (currentSegmentState == SegmentScanstate.Waiting) { - return scanNextPageOfSegment(currentSegment, false); - } else { - throw SdkClientException.builder().message("Should not start a new future task").build(); - } - } catch (Exception e) { - synchronized (segmentScanstates) { - segmentScanstates.set(currentSegment, SegmentScanstate.Failed); - segmentScanstates.notifyAll(); - executorService.shutdown(); - } - throw e; - } - }); - // Cache the future task (for getting the Exceptions in the working thread). - segmentScanFutureTasks.set(currentSegment, futureTask); - } - } - } - - private List marshalParallelScanResponses() { - List scanResults = new LinkedList(); - for (int segment = 0; segment < totalSegments; segment++) { - SegmentScanstate currentSegmentState = segmentScanstates.get(segment); - /** - * Rethrow the exception from any failed segment scan. - */ - if (currentSegmentState == SegmentScanstate.Failed) { - try { - segmentScanFutureTasks.get(segment).get(); - throw SdkClientException.builder().message("No Exception found in the failed scan task.").build(); - } catch (ExecutionException ee) { - Throwable cause = ee.getCause(); - if (cause instanceof SdkServiceException) { - throw (SdkServiceException) cause; - } else { - throw SdkClientException.builder() - .message("Internal error during the scan on segment #" + segment + ".") - .build(); - } - } catch (Exception e) { - throw SdkClientException.builder() - .message("Error during the scan on segment #" + segment + ".") - .cause(e) - .build(); - } - } else if (currentSegmentState == SegmentScanstate.HasNextPage || - currentSegmentState == SegmentScanstate.SegmentScanCompleted) { - /** - * Get the ScanResponse from cache if the segment scan has finished. - */ - ScanResponse scanResult = segmentScanResponses.get(segment); - scanResults.add(scanResult); - } else if (currentSegmentState == SegmentScanstate.Waiting - || currentSegmentState == SegmentScanstate.Scanning) { - throw SdkClientException.builder() - .message("Should never see a 'Scanning' or 'Waiting' state when marshalling parallel " + - "scan results.") - .build(); - } - } - return scanResults; - } - - private ScanResponse scanNextPageOfSegment(int currentSegment, boolean checkLastEvaluatedKey) { - ScanRequest segmentScanRequest = parallelScanRequests.get(currentSegment); - if (checkLastEvaluatedKey) { - ScanResponse lastScanResult = segmentScanResponses.get(currentSegment); - segmentScanRequest = segmentScanRequest.toBuilder().exclusiveStartKey(lastScanResult.lastEvaluatedKey()).build(); - } else { - segmentScanRequest = segmentScanRequest.toBuilder().exclusiveStartKey(null).build(); - } - ScanResponse scanResult = dynamo.scan(DynamoDbMapper.applyUserAgent(segmentScanRequest)); - - /** - * Cache the scan result in segmentScanResponses. - * We should never try to get these scan results by calling get() on the cached future tasks. - */ - segmentScanResponses.set(currentSegment, scanResult); - - /** - * Update the state and notify any waiting thread. - */ - synchronized (segmentScanstates) { - if (null == scanResult.lastEvaluatedKey()) { - segmentScanstates.set(currentSegment, SegmentScanstate.SegmentScanCompleted); - } else { - segmentScanstates.set(currentSegment, SegmentScanstate.HasNextPage); - } - segmentScanstates.notifyAll(); - } - return scanResult; - } - - private void initSegmentScanstates() { - for (int segment = 0; segment < totalSegments; segment++) { - segmentScanFutureTasks.add(null); - segmentScanResponses.add(null); - segmentScanstates.add(SegmentScanstate.Waiting); - } - } - - /** - * Enumeration of the possible states of the scan on a segment. - */ - private enum SegmentScanstate { - /** The scan on the segment is waiting for resources to execute and has not started yet. */ - Waiting, - - /** The scan is in process, and hasn't finished yet. */ - Scanning, - - /** The scan has already failed. */ - Failed, - - /** The scan on the current page has finished, but there are more pages in the segment to be scanned. */ - HasNextPage, - - /** The scan on the whole segment has completed. */ - SegmentScanCompleted, - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/QueryResultPage.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/QueryResultPage.java deleted file mode 100644 index 569de37430a1..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/QueryResultPage.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ConsumedCapacity; - -/** - * Container for a page of query results - */ -public class QueryResultPage { - - private List results; - private Map lastEvaluatedKey; - - private Integer count; - private Integer scannedCount; - private ConsumedCapacity consumedCapacity; - - /** - * Returns all matching items for this page of query results. - */ - public List getResults() { - return results; - } - - public void setResults(List results) { - this.results = results; - } - - /** - * Returns the last evaluated key, which can be used as the - * exclusiveStartKey to fetch the next page of results. Returns null if this - * is the last page of results. - * - * @return The key-value pairs which map from the attribute name of each component - * of the primary key to its value. - */ - public Map lastEvaluatedKey() { - return lastEvaluatedKey; - } - - public void setLastEvaluatedKey(Map lastEvaluatedKey) { - this.lastEvaluatedKey = lastEvaluatedKey; - } - - /** - * The number of items in the response.

    If you used a - * QueryFilter in the request, then Count is the number of - * items returned after the filter was applied, and ScannedCount - * is the number of matching items before> the filter was applied.

    If - * you did not use a filter in the request, then Count and - * ScannedCount are the same. - * - * @return The number of items in the response.

    If you used a - * QueryFilter in the request, then Count is the number of - * items returned after the filter was applied, and ScannedCount - * is the number of matching items before> the filter was applied.

    If - * you did not use a filter in the request, then Count and - * ScannedCount are the same. - */ - public Integer getCount() { - return count; - } - - public void setCount(Integer count) { - this.count = count; - } - - /** - * The number of items evaluated, before any QueryFilter is - * applied. A high ScannedCount value with few, or no, - * Count results indicates an inefficient Query operation. - * For more information, see Count - * and ScannedCount in the Amazon DynamoDB Developer Guide. - *

    If you did not use a filter in the request, then - * ScannedCount is the same as Count. - * - * @return The number of items evaluated, before any QueryFilter is - * applied. A high ScannedCount value with few, or no, - * Count results indicates an inefficient Query operation. - * For more information, see Count - * and ScannedCount in the Amazon DynamoDB Developer Guide. - *

    If you did not use a filter in the request, then - * ScannedCount is the same as Count. - */ - public Integer scannedCount() { - return scannedCount; - } - - public void setScannedCount(Integer scannedCount) { - this.scannedCount = scannedCount; - } - - /** - * The capacity units consumed by an operation. The data returned - * includes the total provisioned throughput consumed, along with - * statistics for the table and any indexes involved in the operation. - * ConsumedCapacity is only returned if the request asked for it. - * For more information, see Provisioned - * Throughput in the Amazon DynamoDB Developer Guide. - * - * @return The capacity units consumed by an operation. The data returned - * includes the total provisioned throughput consumed, along with - * statistics for the table and any indexes involved in the operation. - * ConsumedCapacity is only returned if the request asked for it. - * For more information, see Provisioned - * Throughput in the Amazon DynamoDB Developer Guide. - */ - public ConsumedCapacity getConsumedCapacity() { - return consumedCapacity; - } - - public void setConsumedCapacity(ConsumedCapacity consumedCapacity) { - this.consumedCapacity = consumedCapacity; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/RandomUuidMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/RandomUuidMarshaller.java deleted file mode 100644 index 4a66bc8ec5aa..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/RandomUuidMarshaller.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.UUID; - -public class RandomUuidMarshaller implements DynamoDbMarshaller, DynamoDbTypeConverter { - - public static final String randomUUID = UUID.randomUUID().toString(); - - @Override - public String marshall(Object getterReturnResult) { - return randomUUID; - } - - @Override - public Object unmarshall(Class clazz, String obj) { - return null; - } - - @Override - public String convert(final Object object) { - return randomUUID; - } - - @Override - public Object unconvert(final String object) { - return null; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ReflectionUtils.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ReflectionUtils.java deleted file mode 100644 index 9727ed0a6afd..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ReflectionUtils.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.annotation.Annotation; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import software.amazon.awssdk.utils.StringUtils; - -/** - * @deprecated Replaced by {@link StandardBeanProperties} - */ -@Deprecated -class ReflectionUtils { - - /** - * Returns the field name that corresponds to the given getter method, - * according to the Java naming convention. - * - * @param getter - * The getter method. - * @param forceCamelCase - * True if the returned field name should be in camel-case, i.e. - * the first letter is lower-cased. - */ - static String getFieldNameByGetter(Method getter, boolean forceCamelCase) { - String getterName = getter.getName(); - - String fieldNameWithUpperCamelCase = ""; - if (getterName.startsWith("get")) { - fieldNameWithUpperCamelCase = getterName.substring("get".length()); - } else if (getterName.startsWith("is")) { - fieldNameWithUpperCamelCase = getterName.substring("is".length()); - } - - if (fieldNameWithUpperCamelCase.length() == 0) { - throw new DynamoDbMappingException( - "Getter must begin with 'get' or 'is', and the field name must contain at least one character."); - } - - if (forceCamelCase) { - // Lowercase the first letter of the name - return StringUtils.lowerCase(fieldNameWithUpperCamelCase.substring(0, 1)) + fieldNameWithUpperCamelCase.substring(1); - } else { - return fieldNameWithUpperCamelCase; - } - - } - - /** - * Returns the Field object for the specified field name declared in the - * specified class. Returns null if no such field can be found. - * - * @param clazz - * The declaring class where the field will be reflected. This - * method will NOT attempt to reflect its superclass if such - * field is not found in this class. - * @param fieldName - * The case-sensitive name of the field to be searched. - */ - static Field getClassFieldByName(Class clazz, String fieldName) { - try { - return clazz.getDeclaredField(fieldName); - } catch (SecurityException e) { - throw new DynamoDbMappingException( - "Denied access to the [" + fieldName + "] field in class [" + clazz + "].", e); - } catch (NoSuchFieldException e) { - return null; - } - } - - /** - * This method searches for a specific type of annotation that is applied to - * either the specified getter method or its corresponding class field. - * Returns the annotation if it is found, else null. - */ - static T getAnnotationFromGetterOrField( - Method getter, Class annotationClass) { - // Check annotation on the getter method - T onGetter = getter.getAnnotation(annotationClass); - if (onGetter != null) { - return onGetter; - } - - // Check annotation on the corresponding field - String fieldName = getFieldNameByGetter(getter, true); - // Only consider the field declared in the same class where getter is defined. - Field field = getClassFieldByName(getter.getDeclaringClass(), fieldName); - T onField = null; - if (field != null) { - onField = field.getAnnotation(annotationClass); - } - return onField; - } - - /** - * Returns true if an annotation for the specified type is found on the - * getter method or its corresponding class field. - */ - static boolean getterOrFieldHasAnnotation( - Method getter, Class annotationClass) { - return getAnnotationFromGetterOrField(getter, annotationClass) != null; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3ClientCache.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3ClientCache.java deleted file mode 100644 index 9d2d1da59649..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3ClientCache.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3Client; -import software.amazon.awssdk.services.s3.S3Configuration; - -/** - * A smart Map for {@link S3Client} objects. {@link S3ClientCache} keeps the - * clients organized by region, and if provided {@link AwsCredentials} will - * create clients on the fly. Otherwise it just return clients given to it with - * {@link #useClient(S3Client, Region)}. - */ -public class S3ClientCache { - private final ConcurrentMap clientsByRegion = new ConcurrentHashMap(); - - private final AwsCredentialsProvider awscredentialsProvider; - - @Deprecated - S3ClientCache(AwsCredentials credentials) { - this(StaticCredentialsProvider.create(credentials)); - } - - /** - * Create a client cache using the given AWSCredentialsProvider. If - * {@link #getClient(Region)} is called and a client has not been - * provided for the region, the cache will instantiate one from the - * provided {@link AwsCredentialsProvider}. - * - * @param awsCredentialsProvider - * The credentials provider to use when creating new - * {@link S3Client}. - */ - S3ClientCache(AwsCredentialsProvider awsCredentialsProvider) { - this.awscredentialsProvider = awsCredentialsProvider; - } - - - /** - * Force the client cache to provide a certain client for the region which - * that client is configured. This can be useful to provide clients with - * different {@link S3Configuration}. - * - * @param client - * An {@link S3Client} to use in the cache. Its region will - * be detected automatically. - */ - public void useClient(S3Client client, Region region) { - clientsByRegion.put(region.id(), client); - } - - /** - * Returns a client for the requested region, or throws an exception when - * unable. - * - * @param region - * The region the returned {@link S3Client} will be - * configured to use. - * @return A client for the given region from the cache, either instantiated - * automatically from the provided {@link AwsCredentials} or - * provided with {@link #useClient(S3Client, Region)}. - * @throws IllegalArgumentException - * When a region is requested that has not been provided to the - * cache with {@link #useClient(S3Client, Region)}, and the cache - * has no {@link AwsCredentials} with which a client may be - * instantiated. - */ - public S3Client getClient(Region region) { - if (region == null) { - throw new IllegalArgumentException("S3 region must be specified"); - } - return getClient(region.id()); - } - - /** - * Returns a client for the requested region, or throws an exception when - * unable. - * - * @param region - * The region the returned {@link S3Client} will be - * configured to use. - * @return A client for the given region from the cache, either instantiated - * automatically from the provided {@link AwsCredentials} or - * provided with {@link #useClient(S3Client, Region)}. - * @throws IllegalArgumentException - * When a region is requested that has not been provided to the - * cache with {@link #useClient(S3Client, Region)}, and the cache - * has no {@link AwsCredentials} with which a client may be - * instantiated. - */ - public S3Client getClient(String region) { - if (region == null) { - throw new IllegalArgumentException("S3 region must be specified"); - } - S3Client client = clientsByRegion.get(region); - return client != null ? client : cacheClient(region); - } - - /** - * Returns a new client with region configured to - * region. - * Also updates the clientsByRegion map by associating the - * new client with region. - * - * @param region - * The region the returned {@link S3Client} will be - * configured to use. - * @return A new {@link S3Client} client with region set to region. - */ - private S3Client cacheClient(String region) { - if (awscredentialsProvider == null) { - throw new IllegalArgumentException("No credentials provider found to connect to S3"); - } - S3Client client = S3Client.builder().credentialsProvider(awscredentialsProvider).region(Region.of(region)).build(); - clientsByRegion.put(region, client); - return client; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3Link.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3Link.java deleted file mode 100644 index a316bb0d44d0..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3Link.java +++ /dev/null @@ -1,449 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.io.File; -import java.io.OutputStream; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.core.sync.ResponseTransformer; -import software.amazon.awssdk.core.util.json.JacksonUtils; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3Client; -import software.amazon.awssdk.services.s3.model.AccessControlPolicy; -import software.amazon.awssdk.services.s3.model.GetObjectRequest; -import software.amazon.awssdk.services.s3.model.GetObjectResponse; -import software.amazon.awssdk.services.s3.model.ObjectCannedACL; -import software.amazon.awssdk.services.s3.model.PutObjectAclRequest; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.PutObjectResponse; - -/** - * An S3 Link that works with {@link DynamoDbMapper}. - * An S3 link is persisted as a JSON string in DynamoDB. - * This link object can be used directly to upload/download files to S3. - * Alternatively, the underlying - * {@link S3Client} can be retrieved to - * provide full access API to S3. - *

    - * For example: - *

    - * AWSCredentialsProvider s3CredentialProvider = ...;
    - * DynamoDBMapper mapper = new DynamoDBMapper(..., s3CredentialProvider);
    - * String username = "jamestkirk";
    - *
    - * User user = new User();
    - * user.setUsername(username);
    - *
    - * // S3 region can be specified, but is optional
    - * S3Link s3link = mapper.createS3Link("my-company-user-avatars", username + ".jpg");
    - * user.setAvatar(s3link);
    - *
    - * // All meta information of the S3 resource is persisted in DynamoDB, including
    - * // region, bucket, and key
    - * mapper.save(user);
    - *
    - * // Upload file to S3 with the link saved in DynamoDB
    - * s3link.uploadFrom(new File("/path/to/all/those/user/avatars/" + username + ".jpg"));
    - * // Download file from S3 via an S3Link
    - * s3link.downloadTo(new File("/path/to/downloads/" + username + ".jpg"));
    - *
    - * // Full S3 API is available via the canonical AmazonS3Client and TransferManager API.
    - * // For example:
    - * AmazonS3Client s3 = s3link.getAmazonS3Client();
    - * TransferManager s3m = s3link.getTransferManager();
    - * // etc.
    - * 
    The User pojo class used above:
    - * @DynamoDBTable(tableName = "user-table")
    - * public class User {
    - *     private String username;
    - *     private S3Link avatar;
    - *
    - *     @DynamoDBHashKey
    - *     public String getUsername() {
    - *         return username;
    - *     }
    - *
    - *     public void setUsername(String username) {
    - *         this.username = username;
    - *     }
    - *
    - *     public S3Link getAvatar() {
    - *         return avatar;
    - *     }
    - *
    - *     public void setAvatar(S3Link avatar) {
    - *         this.avatar = avatar;
    - *     }
    - * }
    - * 
    - */ -public class S3Link { - private final S3ClientCache s3cc; - private final Id id; - - S3Link(S3ClientCache s3cc, String bucketName, String key) { - this(s3cc, new Id(bucketName, key)); - } - - S3Link(S3ClientCache s3cc, String region, String bucketName, String key) { - this(s3cc, new Id(region, bucketName, key)); - } - - private S3Link(S3ClientCache s3cc, Id id) { - this.s3cc = s3cc; - this.id = id; - - if (s3cc == null) { - throw new IllegalArgumentException("S3ClientCache must be configured for use with S3Link"); - } - if (id == null || id.bucket() == null || id.getKey() == null) { - throw new IllegalArgumentException("Bucket and key must be specified for S3Link"); - } - } - - /** - * Deserializes from a JSON string. - */ - public static S3Link fromJson(S3ClientCache s3cc, String json) { - Id id = JacksonUtils.fromJsonString(json, Id.class); - return new S3Link(s3cc, id); - } - - private static String convertRegionToString(Region region) { - return region.id(); - } - - public String getKey() { - return id.getKey(); - } - - public String bucketName() { - return id.bucket(); - } - - /** - * Returns the S3 region in {@link Region} format. - *

    - * Do not use this method if {@link S3Link} is created with a region not in {@link Region} enum. - * Use {@link #getRegion()} instead. - *

    - * - * @return S3 region. - */ - public Region s3Region() { - return Region.of(getRegion()); - } - - /** - * Returns the S3 region as string. - * - * @return region provided when creating the S3Link object. - * If no region is provided during S3Link creation, returns us-east-1. - */ - public String getRegion() { - return id.getRegionId() == null ? "us-east-1" : id.getRegionId(); - } - - /** - * Serializes into a JSON string. - * - * @return The string representation of the link to the S3 resource. - */ - public String toJson() { - return id.toJson(); - } - - public S3Client getAmazonS3Client() { - return s3cc.getClient(getRegion()); - } - - /** - * Convenience method to synchronously upload from the given file to the - * Amazon S3 object represented by this S3Link. - * - * @param source - * source file to upload from - * - * @return A {@link PutObjectResponse} object containing the information - * returned by Amazon S3 for the newly created object. - */ - public PutObjectResponse uploadFrom(final File source) { - return getAmazonS3Client().putObject(PutObjectRequest.builder() - .bucket(bucketName()) - .key(getKey()) - .build(), RequestBody.fromFile(source)); - } - - /** - * Convenience method to synchronously upload from the given buffer to the - * Amazon S3 object represented by this S3Link. - * - * @param buffer - * The buffer containing the data to upload. - * - * @return A {@link PutObjectResponse} object containing the information - * returned by Amazon S3 for the newly created object. - */ - public PutObjectResponse uploadFrom(final byte[] buffer) { - return getAmazonS3Client().putObject(PutObjectRequest.builder() - .bucket(bucketName()) - .key(getKey()) - .contentLength((long) buffer.length) - .build(), RequestBody.fromBytes(buffer)); - } - - /** - * Sets the access control list for the object represented by this S3Link. - * - * Note: Executing this method requires that the object already exists in - * Amazon S3. - * - * @param acl - * The access control list describing the new permissions for the - * object represented by this S3Link. - */ - public void setAcl(ObjectCannedACL acl) { - getAmazonS3Client().putObjectAcl(PutObjectAclRequest.builder().bucket(bucketName()).key(getKey()).acl(acl).build()); - } - - public void setAcl(AccessControlPolicy acl) { - getAmazonS3Client().putObjectAcl(PutObjectAclRequest.builder() - .accessControlPolicy(acl) - .bucket(bucketName()) - .key(getKey()) - .build()); - } - - /** - * Convenient method to synchronously download to the specified file from - * the S3 object represented by this S3Link. - * - * @param destination destination file to download to - * - * @return All S3 object metadata for the specified object. - * Returns null if constraints were specified but not met. - */ - public GetObjectResponse downloadTo(final File destination) { - return getAmazonS3Client().getObject(GetObjectRequest.builder() - .bucket(bucketName()) - .key(getKey()) - .build(), - ResponseTransformer.toFile(destination.toPath())); - } - - /** - * Downloads the data from the object represented by this S3Link to the - * specified output stream. - * - * @param output - * The output stream to write the object's data to. - * - * @return The object's metadata. - */ - public GetObjectResponse downloadTo(final OutputStream output) { - return getAmazonS3Client().getObject(GetObjectRequest.builder() - .bucket(bucketName()) - .key(getKey()) - .build(), - ResponseTransformer.toOutputStream(output)); - } - - /** - * JSON wrapper of an {@link S3Link} identifier, - * which consists of the S3 region id, bucket name and key. - * Sample JSON serialized form: - *
    -     * {"s3":{"bucket":"mybucket","key":"mykey","region":"us-west-2"}}
    -     * {"s3":{"bucket":"mybucket","key":"mykey","region":null}}
    -     * 
    - * Note for S3 a null region means US standard. - *

    - * @see Region - */ - static class Id { - @JsonProperty("s3") - private S3 s3; - - Id() { - } // used by Jackson to unmarshall - - Id(String bucketName, String key) { - this.s3 = new S3(bucketName, key); - } - - Id(String region, String bucketName, String key) { - this.s3 = new S3(region, bucketName, key); - } - - Id(S3 s3) { - this.s3 = s3; - } - - @JsonProperty("s3") - public S3 s3() { - return s3; - } - - @JsonIgnore - public String getRegionId() { - return s3.getRegionId(); - } - - @JsonIgnore - public String bucket() { - return s3.bucket(); - } - - @JsonIgnore - public String getKey() { - return s3.getKey(); - } - - String toJson() { - return JacksonUtils.toJsonString(this); - } - } - - /** - * Internal class for JSON serialization purposes. - *

    - * @see Id - */ - private static class S3 { - - /** - * The name of the S3 bucket containing the object to retrieve. - */ - @JsonProperty("bucket") - private String bucket; - - /** - * The key under which the desired S3 object is stored. - */ - @JsonProperty("key") - private String key; - - /** - * The region id of {@link Region} where the S3 object is stored. - */ - @JsonProperty("region") - private String regionId; - - @SuppressWarnings("unused") - S3() { - } // used by Jackson to unmarshall - - /** - * Constructs a new {@link S3} with all the required parameters. - * - * @param bucket - * The name of the bucket containing the desired object. - * @param key - * The key in the specified bucket under which the object is - * stored. - */ - S3(String bucket, String key) { - this(null, bucket, key); - } - - /** - * Constructs a new {@link S3} with all the required parameters. - * - * @param region - * The region where the S3 object is stored. - * @param bucket - * The name of the bucket containing the desired object. - * @param key - * The key in the specified bucket under which the object is - * stored. - */ - S3(String region, String bucket, String key) { - this.regionId = region; - this.bucket = bucket; - this.key = key; - } - - /** - * Gets the name of the bucket containing the object to be downloaded. - * - * @return The name of the bucket containing the object to be downloaded. - */ - @JsonProperty("bucket") - public String bucket() { - return bucket; - } - - /** - * Gets the key under which the object to be downloaded is stored. - * - * @return The key under which the object to be downloaded is stored. - */ - @JsonProperty("key") - public String getKey() { - return key; - } - - @JsonProperty("region") - public String getRegionId() { - return regionId; - } - } - - /** - * {@link S3Link} factory. - */ - public static final class Factory implements DynamoDbTypeConverter { - static final Factory DEFAULT = new Factory((S3ClientCache) null); - private final S3ClientCache s3cc; - - public Factory(final S3ClientCache s3cc) { - this.s3cc = s3cc; - } - - public static Factory of(final AwsCredentialsProvider provider) { - return provider == null ? DEFAULT : new Factory(new S3ClientCache(provider)); - } - - public S3Link createS3Link(Region s3region, String bucketName, String key) { - return createS3Link(convertRegionToString(s3region), bucketName, key); - } - - public S3Link createS3Link(String s3region, String bucketName, String key) { - if (s3ClientCache() == null) { - throw new IllegalStateException("Mapper must be constructed with S3 AWS Credentials to create S3Link"); - } - return new S3Link(s3ClientCache(), s3region, bucketName, key); - } - - public S3ClientCache s3ClientCache() { - return this.s3cc; - } - - @Override - public String convert(final S3Link o) { - return o.bucketName() == null || o.getKey() == null ? null : o.toJson(); - } - - @Override - public S3Link unconvert(final String o) { - return S3Link.fromJson(s3ClientCache(), o); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3LinkIdTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3LinkIdTest.java deleted file mode 100644 index fd8b4d42ddc3..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3LinkIdTest.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import org.junit.Test; -import software.amazon.awssdk.core.util.json.JacksonUtils; - -public class S3LinkIdTest { - - @Test - public void testToFromJson() { - String region = "ap-northeast-1"; - S3Link.Id id = new S3Link.Id(region, "bucket", "key"); - String json = id.toJson(); - S3Link.Id twin = JacksonUtils.fromJsonString(json, S3Link.Id.class); - assertEquals("bucket", twin.bucket()); - assertEquals("key", twin.getKey()); - assertEquals(region, twin.getRegionId()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3LinkTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3LinkTest.java deleted file mode 100644 index 8b5789868846..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/S3LinkTest.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; - -@Ignore // FIXME: Setup fails with "region cannot be null" -public class S3LinkTest { - private DynamoDbMapper mapper; - - @Before - public void setUp() { - AwsCredentials credentials = AwsBasicCredentials.create("mock", "mock"); - DynamoDbClient db = DynamoDbClient.builder() - .credentialsProvider(StaticCredentialsProvider.create(credentials)) - .region(Region.US_WEST_2) - .build(); - mapper = new DynamoDbMapper(db, StaticCredentialsProvider.create(credentials)); - } - - @Test(expected = IllegalArgumentException.class) - public void nullKey() { - mapper.createS3Link("bucket", null); - } - - @Test(expected = IllegalArgumentException.class) - public void nullBucketName() { - mapper.createS3Link(null, "key"); - } - - @Test - public void testToJson() { - S3Link testLink = mapper.createS3Link("bucket", "key"); - String json = testLink.toJson(); - - assertEquals(json, - "{\"s3\":{\"bucket\":\"bucket\",\"key\":\"key\",\"region\":\"us-east-1\"}}", - json); - testLink = mapper.createS3Link("bucket", "testKey"); - json = testLink.toJson(); - assertEquals(json, - "{\"s3\":{\"bucket\":\"bucket\",\"key\":\"testKey\",\"region\":\"us-east-1\"}}", - json); - - testLink = mapper.createS3Link(Region.AP_SOUTHEAST_2, "bucket", "testKey"); - json = testLink.toJson(); - assertEquals(json, - "{\"s3\":{\"bucket\":\"bucket\",\"key\":\"testKey\",\"region\":\"ap-southeast-2\"}}", - json); - - testLink = mapper.createS3Link(Region.AP_SOUTHEAST_2, "test-bucket", "testKey"); - json = testLink.toJson(); - assertEquals(json, - "{\"s3\":{\"bucket\":\"test-bucket\",\"key\":\"testKey\",\"region\":\"ap-southeast-2\"}}", - json); - - testLink = mapper.createS3Link(Region.AP_SOUTHEAST_2, "test-bucket", "test/key/with/slashes"); - json = testLink.toJson(); - assertEquals(json, - "{\"s3\":{\"bucket\":\"test-bucket\",\"key\":\"test/key/with/slashes\",\"region\":\"ap-southeast-2\"}}", - json); - - testLink = mapper.createS3Link("test-bucket", "test/key/with/slashes"); - json = testLink.toJson(); - assertEquals(json, - "{\"s3\":{\"bucket\":\"test-bucket\",\"key\":\"test/key/with/slashes\",\"region\":\"us-east-1\"}}", - json); - testLink = mapper.createS3Link(Region.AP_SOUTHEAST_2, "test-bucket", "test/key/with/slashes"); - json = testLink.toJson(); - assertEquals(json, - "{\"s3\":{\"bucket\":\"test-bucket\",\"key\":\"test/key/with/slashes\",\"region\":\"ap-southeast-2\"}}", - json); - } - - @Test - public void testFromJson() { - String json = "{\"s3\":{\"region\":\"ap-southeast-2\",\"bucket\":\"test-bucket\",\"key\":\"testKey\"}}"; - S3Link s3link = S3Link.fromJson(mapper.s3ClientCache(), json); - assertEquals("test-bucket", s3link.bucketName()); - assertEquals("ap-southeast-2", s3link.getRegion()); - assertEquals("testKey", s3link.getKey()); - } - - @Test - public void testDefaultRegion() { - S3Link testLink1 = mapper.createS3Link("bucket", "key"); - String json = testLink1.toJson(); - // Default to US_STANDARD if not specified - assertEquals(json, - "{\"s3\":{\"bucket\":\"bucket\",\"key\":\"key\",\"region\":\"us-east-1\"}}", - json); - // Default region changed to GovCloud - testLink1 = mapper.createS3Link(Region.US_GOV_WEST_1, "bucket", "key"); - json = testLink1.toJson(); - assertEquals(json, - "{\"s3\":{\"bucket\":\"bucket\",\"key\":\"key\",\"region\":\"us-gov-west-1\"}}", - json); - } - - @Test - public void testGetRegion_ReturnsUsEast1_Whens3LinkCreated_WithNullRegion() { - S3Link s3Link = mapper.createS3Link("bucket", "key"); - - assertEquals("us-east-1", s3Link.s3Region().id()); - assertEquals("us-east-1", s3Link.getRegion()); - } - - @Test - public void testGetRegion_ReturnsUsEast1_WhenS3LinkCreated_WithUsStandardRegion() { - S3Link s3Link = mapper.createS3Link(Region.US_EAST_1, "bucket", "key"); - - assertEquals("us-east-1", s3Link.s3Region().id()); - assertEquals("us-east-1", s3Link.getRegion()); - } - - @Test - public void testGetRegion_ReturnsUsEast1_Whens3LinkCreated_WithUsEast1Region() { - S3Link s3Link = mapper.createS3Link("us-east-1", "bucket", "key"); - - assertEquals("us-east-1", s3Link.s3Region().id()); - assertEquals("us-east-1", s3Link.getRegion()); - } - - @Test - public void testGetRegion_WithNonUsStandardRegion() { - S3Link s3Link = mapper.createS3Link(Region.EU_WEST_2, "bucket", "key"); - - assertEquals(Region.EU_WEST_2, s3Link.s3Region()); - assertEquals(Region.EU_WEST_2.id(), s3Link.getRegion()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ScanResultPage.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ScanResultPage.java deleted file mode 100644 index d3a8b28f722b..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/ScanResultPage.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ConsumedCapacity; - -/** - * Container for a page of scan results. - */ -public class ScanResultPage { - - private List results; - private Map lastEvaluatedKey; - - private Integer count; - private Integer scannedCount; - private ConsumedCapacity consumedCapacity; - - /** - * Returns all matching items for this page of scan results, which may be - * empty. - */ - public List getResults() { - return results; - } - - public void setResults(List results) { - this.results = results; - } - - /** - * Returns the last evaluated key, which can be used as the - * exclusiveStartKey to fetch the next page of results. Returns null if this - * is the last page of results. - * - * @return The key-value pairs which map from the attribute name of each component - * of the primary key to its value. - */ - public Map lastEvaluatedKey() { - return lastEvaluatedKey; - } - - public void setLastEvaluatedKey(Map lastEvaluatedKey) { - this.lastEvaluatedKey = lastEvaluatedKey; - } - - /** - * The number of items in the response.

    If you set ScanFilter - * in the request, then Count is the number of items returned - * after the filter was applied, and ScannedCount is the number of - * matching items before the filter was applied.

    If you did not use a - * filter in the request, then Count is the same as - * ScannedCount. - * - * @return The number of items in the response.

    If you set ScanFilter - * in the request, then Count is the number of items returned - * after the filter was applied, and ScannedCount is the number of - * matching items before the filter was applied.

    If you did not use a - * filter in the request, then Count is the same as - * ScannedCount. - */ - public Integer getCount() { - return count; - } - - public void setCount(Integer count) { - this.count = count; - } - - /** - * The number of items evaluated, before any ScanFilter is - * applied. A high ScannedCount value with few, or no, - * Count results indicates an inefficient Scan operation. - * For more information, see Count - * and ScannedCount in the Amazon DynamoDB Developer Guide. - *

    If you did not use a filter in the request, then - * ScannedCount is the same as Count. - * - * @return The number of items evaluated, before any ScanFilter is - * applied. A high ScannedCount value with few, or no, - * Count results indicates an inefficient Scan operation. - * For more information, see Count - * and ScannedCount in the Amazon DynamoDB Developer Guide. - *

    If you did not use a filter in the request, then - * ScannedCount is the same as Count. - */ - public Integer scannedCount() { - return scannedCount; - } - - public void setScannedCount(Integer scannedCount) { - this.scannedCount = scannedCount; - } - - /** - * The capacity units consumed by an operation. The data returned - * includes the total provisioned throughput consumed, along with - * statistics for the table and any indexes involved in the operation. - * ConsumedCapacity is only returned if the request asked for it. - * For more information, see Provisioned - * Throughput in the Amazon DynamoDB Developer Guide. - * - * @return The capacity units consumed by an operation. The data returned - * includes the total provisioned throughput consumed, along with - * statistics for the table and any indexes involved in the operation. - * ConsumedCapacity is only returned if the request asked for it. - * For more information, see Provisioned - * Throughput in the Amazon DynamoDB Developer Guide. - */ - public ConsumedCapacity getConsumedCapacity() { - return consumedCapacity; - } - - public void setConsumedCapacity(ConsumedCapacity consumedCapacity) { - this.consumedCapacity = consumedCapacity; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardAnnotationMaps.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardAnnotationMaps.java deleted file mode 100644 index 392753341a10..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardAnnotationMaps.java +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGenerateStrategy.CREATE; -import static software.amazon.awssdk.services.dynamodb.model.KeyType.HASH; -import static software.amazon.awssdk.services.dynamodb.model.KeyType.RANGE; - -import java.lang.annotation.Annotation; -import java.lang.reflect.AnnotatedElement; -import java.lang.reflect.Constructor; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.DynamoDbAttributeType; -import software.amazon.awssdk.services.dynamodb.model.KeyType; - -/** - * Map of DynamoDB annotations. - */ -@SdkInternalApi -final class StandardAnnotationMaps { - - /** - * Gets all the DynamoDB annotations for a given class. - */ - static TableMap of(Class clazz) { - final TableMap annotations = new TableMap(clazz); - annotations.putAll(clazz); - return annotations; - } - - /** - * Gets all the DynamoDB annotations; method annotations override field - * level annotations which override class/type level annotations. - */ - static FieldMap of(Method getter, String defaultName) { - final Class targetType = (Class) getter.getReturnType(); - final String fieldName = StandardBeanProperties.fieldNameOf(getter); - - Field declaredField = null; - try { - declaredField = getter.getDeclaringClass().getDeclaredField(fieldName); - } catch (final NoSuchFieldException no) { - // Ignored or expected. - } catch (final SecurityException e) { - throw new DynamoDbMappingException("no access to field for " + getter, e); - } - - if (defaultName == null) { - defaultName = fieldName; - } - - final FieldMap annotations = new FieldMap(targetType, defaultName); - annotations.putAll(targetType); - annotations.putAll(declaredField); - annotations.putAll(getter); - return annotations; - } - - /** - * Creates a new instance of the clazz with the target type and annotation - * as parameters if available. - */ - private static T overrideOf(Class clazz, Class targetType, Annotation annotation) { - try { - if (annotation != null) { - try { - Constructor c = clazz.getDeclaredConstructor(Class.class, annotation.annotationType()); - return c.newInstance(targetType, annotation); - } catch (final NoSuchMethodException no) { - // Ignored or expected. - } - } - try { - return clazz.getDeclaredConstructor(Class.class).newInstance(targetType); - } catch (final NoSuchMethodException no) { - // Ignored or expected. - } - return clazz.newInstance(); - } catch (final IllegalAccessException | InstantiationException | InvocationTargetException | RuntimeException e) { - throw new DynamoDbMappingException("could not instantiate " + clazz, e); - } - } - - /** - * Common type-conversions properties. - */ - private abstract static class AbstractAnnotationMap { - private final Annotations map = new Annotations(); - - /** - * Gets the actual annotation by type; if the type is not directly - * mapped then the meta-annotation is returned. - */ - final A actualOf(final Class annotationType) { - final Annotation annotation = this.map.get(annotationType); - if (annotation == null || annotation.annotationType() == annotationType) { - return (A) annotation; - } else if (annotation.annotationType().isAnnotationPresent(annotationType)) { - return annotation.annotationType().getAnnotation(annotationType); - } - throw new DynamoDbMappingException( - "could not resolve annotation by type" + - "; @" + annotationType.getSimpleName() + " not present on " + annotation - ); - } - - /** - * Puts all DynamoDB annotations into the map. - */ - final void putAll(AnnotatedElement annotated) { - if (annotated != null) { - this.map.putAll(new Annotations().putAll(annotated.getAnnotations())); - } - } - } - - /** - * Common type-conversions properties. - */ - abstract static class TypedMap extends AbstractAnnotationMap { - private final Class targetType; - - private TypedMap(final Class targetType) { - this.targetType = targetType; - } - - /** - * Gets the target type. - */ - final Class targetType() { - return this.targetType; - } - - /** - * Gets the attribute type from the {@link DynamoDbTyped} annotation - * if present. - */ - public DynamoDbAttributeType attributeType() { - final DynamoDbTyped annotation = actualOf(DynamoDbTyped.class); - if (annotation != null) { - return annotation.value(); - } - return null; - } - - /** - * Creates a new type-converter form the {@link DynamoDbTypeConverted} - * annotation if present. - */ - public DynamoDbTypeConverter typeConverter() { - Annotation annotation = super.map.get(DynamoDbTypeConverted.class); - if (annotation != null) { - final DynamoDbTypeConverted converted = actualOf(DynamoDbTypeConverted.class); - annotation = (converted == annotation ? null : annotation); - return overrideOf(converted.converter(), targetType, annotation); - } - return null; - } - - /** - * Creates a new auto-generator from the {@link DynamoDbAutoGenerated} - * annotation if present. - */ - public DynamoDbAutoGenerator autoGenerator() { - Annotation annotation = super.map.get(DynamoDbAutoGenerated.class); - if (annotation != null) { - final DynamoDbAutoGenerated generated = actualOf(DynamoDbAutoGenerated.class); - annotation = (generated == annotation ? null : annotation); - DynamoDbAutoGenerator generator = overrideOf(generated.generator(), targetType, annotation); - if (generator.getGenerateStrategy() == CREATE && targetType.isPrimitive()) { - throw new DynamoDbMappingException( - "type [" + targetType + "] is not supported for auto-generation" + - "; primitives are not allowed when auto-generate strategy is CREATE" - ); - } - return generator; - } - return null; - } - - /** - * Maps the attributes from the {@link DynamoDbFlattened} annotation. - */ - public Map attributes() { - final Map attributes = new LinkedHashMap(); - for (final DynamoDbAttribute a : actualOf(DynamoDbFlattened.class).attributes()) { - if (a.mappedBy().isEmpty() || a.attributeName().isEmpty()) { - throw new DynamoDbMappingException("@DynamoDBFlattened must specify mappedBy and attributeName"); - } else if (attributes.put(a.mappedBy(), a.attributeName()) != null) { - throw new DynamoDbMappingException("@DynamoDBFlattened must not duplicate mappedBy=" + a.mappedBy()); - } - } - if (attributes.isEmpty()) { - throw new DynamoDbMappingException("@DynamoDBFlattened must specify one or more attributes"); - } - return attributes; - } - - /** - * Returns true if the {@link DynamoDbFlattened} annotation is present. - */ - public boolean flattened() { - return actualOf(DynamoDbFlattened.class) != null; - } - } - - /** - * {@link DynamoDbMapperTableModel} annotations. - */ - static final class TableMap extends TypedMap implements DynamoDbMapperTableModel.Properties { - private TableMap(final Class targetType) { - super(targetType); - } - - /** - * {@inheritDoc} - */ - @Override - public DynamoDbAttributeType attributeType() { - DynamoDbAttributeType attributeType = super.attributeType(); - if (attributeType == null && actualOf(DynamoDbTable.class) != null) { - attributeType = DynamoDbAttributeType.M; - } - return attributeType; - } - - /** - * {@inheritDoc} - */ - @Override - public String tableName() { - final DynamoDbTable annotation = actualOf(DynamoDbTable.class); - if (annotation != null && !annotation.tableName().isEmpty()) { - return annotation.tableName(); - } - return null; - } - } - - /** - * {@link DynamoDbMapperFieldModel} annotations. - */ - static final class FieldMap extends TypedMap implements DynamoDbMapperFieldModel.Properties { - private final String defaultName; - - private FieldMap(Class targetType, String defaultName) { - super(targetType); - this.defaultName = defaultName; - } - - /** - * Returns true if the {@link DynamoDbIgnore} annotation is present. - */ - public boolean ignored() { - return actualOf(DynamoDbIgnore.class) != null; - } - - /** - * {@inheritDoc} - */ - @Override - public DynamoDbAttributeType attributeType() { - final DynamoDbScalarAttribute annotation = actualOf(DynamoDbScalarAttribute.class); - if (annotation != null) { - if (Set.class.isAssignableFrom(targetType())) { - return DynamoDbAttributeType.valueOf(annotation.type().name() + "S"); - } else { - return DynamoDbAttributeType.valueOf(annotation.type().name()); - } - } - return super.attributeType(); - } - - /** - * {@inheritDoc} - */ - @Override - public String attributeName() { - final DynamoDbHashKey hashKey = actualOf(DynamoDbHashKey.class); - if (hashKey != null && !hashKey.attributeName().isEmpty()) { - return hashKey.attributeName(); - } - final DynamoDbIndexHashKey indexHashKey = actualOf(DynamoDbIndexHashKey.class); - if (indexHashKey != null && !indexHashKey.attributeName().isEmpty()) { - return indexHashKey.attributeName(); - } - final DynamoDbRangeKey rangeKey = actualOf(DynamoDbRangeKey.class); - if (rangeKey != null && !rangeKey.attributeName().isEmpty()) { - return rangeKey.attributeName(); - } - final DynamoDbIndexRangeKey indexRangeKey = actualOf(DynamoDbIndexRangeKey.class); - if (indexRangeKey != null && !indexRangeKey.attributeName().isEmpty()) { - return indexRangeKey.attributeName(); - } - final DynamoDbAttribute attribute = actualOf(DynamoDbAttribute.class); - if (attribute != null && !attribute.attributeName().isEmpty()) { - return attribute.attributeName(); - } - final DynamoDbVersionAttribute versionAttribute = actualOf(DynamoDbVersionAttribute.class); - if (versionAttribute != null && !versionAttribute.attributeName().isEmpty()) { - return versionAttribute.attributeName(); - } - final DynamoDbScalarAttribute scalarAttribute = actualOf(DynamoDbScalarAttribute.class); - if (scalarAttribute != null && !scalarAttribute.attributeName().isEmpty()) { - return scalarAttribute.attributeName(); - } - final DynamoDbNamed annotation = actualOf(DynamoDbNamed.class); - if (annotation != null && !annotation.value().isEmpty()) { - return annotation.value(); - } - return this.defaultName; - } - - /** - * {@inheritDoc} - */ - @Override - public KeyType keyType() { - final DynamoDbKeyed annotation = actualOf(DynamoDbKeyed.class); - if (annotation != null) { - return annotation.value(); - } - return null; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean versioned() { - return actualOf(DynamoDbVersioned.class) != null; - } - - /** - * {@inheritDoc} - */ - @Override - public Map> globalSecondaryIndexNames() { - final Map> gsis = new EnumMap>(KeyType.class); - final DynamoDbIndexHashKey indexHashKey = actualOf(DynamoDbIndexHashKey.class); - if (indexHashKey != null) { - if (!indexHashKey.globalSecondaryIndexName().isEmpty()) { - if (indexHashKey.globalSecondaryIndexNames().length > 0) { - throw new DynamoDbMappingException("@DynamoDBIndexHashKey must not specify both HASH GSI name/names"); - } - gsis.put(HASH, Collections.singletonList(indexHashKey.globalSecondaryIndexName())); - } else if (indexHashKey.globalSecondaryIndexNames().length > 0) { - gsis.put(HASH, Collections.unmodifiableList(Arrays.asList(indexHashKey.globalSecondaryIndexNames()))); - } else { - throw new DynamoDbMappingException("@DynamoDBIndexHashKey must specify one of HASH GSI name/names"); - } - } - final DynamoDbIndexRangeKey indexRangeKey = actualOf(DynamoDbIndexRangeKey.class); - if (indexRangeKey != null) { - if (!indexRangeKey.globalSecondaryIndexName().isEmpty()) { - if (indexRangeKey.globalSecondaryIndexNames().length > 0) { - throw new DynamoDbMappingException("@DynamoDBIndexRangeKey must not specify both RANGE GSI name/names"); - } - gsis.put(RANGE, Collections.singletonList(indexRangeKey.globalSecondaryIndexName())); - } else if (indexRangeKey.globalSecondaryIndexNames().length > 0) { - gsis.put(RANGE, Collections.unmodifiableList(Arrays.asList(indexRangeKey.globalSecondaryIndexNames()))); - } else if (localSecondaryIndexNames().isEmpty()) { - throw new DynamoDbMappingException("@DynamoDBIndexRangeKey must specify RANGE GSI and/or LSI name/names"); - } - } - if (!gsis.isEmpty()) { - return Collections.unmodifiableMap(gsis); - } - return Collections.>emptyMap(); - } - - /** - * {@inheritDoc} - */ - @Override - public List localSecondaryIndexNames() { - final DynamoDbIndexRangeKey annotation = actualOf(DynamoDbIndexRangeKey.class); - if (annotation != null) { - if (!annotation.localSecondaryIndexName().isEmpty()) { - if (annotation.localSecondaryIndexNames().length > 0) { - throw new DynamoDbMappingException("@DynamoDBIndexRangeKey must not specify both LSI name/names"); - } - return Collections.singletonList(annotation.localSecondaryIndexName()); - } else if (annotation.localSecondaryIndexNames().length > 0) { - return Collections.unmodifiableList(Arrays.asList(annotation.localSecondaryIndexNames())); - } - } - return Collections.emptyList(); - } - } - - /** - * A map of annotation type to annotation. It will map any first level - * custom annotations to any DynamoDB annotation types that are present. - * It will support up to two levels of compounded DynamoDB annotations. - */ - private static final class Annotations extends LinkedHashMap, Annotation> { - private static final long serialVersionUID = -1L; - - /** - * Puts the annotation if it's DynamoDB; ensures there are no conflicts. - */ - public boolean putIfAnnotated(Class annotationType, Annotation annotation) { - if (!annotationType.isAnnotationPresent(DynamoDb.class)) { - return false; - } else { - annotation = put(annotationType, annotation); - if (annotation == null) { - return true; - } - } - throw new DynamoDbMappingException( - "conflicting annotations " + annotation + " and " + get(annotationType) + - "; allowed only one of @" + annotationType.getSimpleName() - ); - } - - /** - * Puts all DynamoDB annotations and meta-annotations in the map. - */ - public Annotations putAll(Annotation... annotations) { - for (final Annotation a1 : annotations) { - putIfAnnotated(a1.annotationType(), a1); - for (final Annotation a2 : a1.annotationType().getAnnotations()) { - if (putIfAnnotated(a2.annotationType(), a1)) { - for (final Annotation a3 : a2.annotationType().getAnnotations()) { - putIfAnnotated(a3.annotationType(), a2); - } - } - } - } - return this; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardBeanProperties.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardBeanProperties.java deleted file mode 100644 index 2283cddfddcf..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardBeanProperties.java +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.Reflect; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardAnnotationMaps.FieldMap; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardAnnotationMaps.TableMap; -import software.amazon.awssdk.utils.StringUtils; - -/** - * Reflection assistant for {@link DynamoDbMapper} - */ -@SdkInternalApi -final class StandardBeanProperties { - - /** - * Returns the bean mappings for a given class (caches the results). - */ - @SuppressWarnings("unchecked") - static Beans of(Class clazz) { - return ((CachedBeans) CachedBeans.CACHE).beans(clazz); - } - - /** - * Gets the field name given the getter method. - */ - static String fieldNameOf(Method getter) { - final String name = getter.getName().replaceFirst("^(get|is)", ""); - return StringUtils.lowerCase(name.substring(0, 1)) + name.substring(1); - } - - /** - * Cache of {@link Beans} by class type. - */ - private static final class CachedBeans { - private static final CachedBeans CACHE = new CachedBeans(); - private final ConcurrentMap, Beans> cache = new ConcurrentHashMap, Beans>(); - - private Beans beans(Class clazz) { - if (!cache.containsKey(clazz)) { - final TableMap annotations = StandardAnnotationMaps.of(clazz); - final BeanMap map = new BeanMap(clazz, false); - cache.putIfAbsent(clazz, new Beans(annotations, map)); - } - return cache.get(clazz); - } - } - - /** - * Cache of {@link Bean} mappings by class type. - */ - static final class Beans { - private final DynamoDbMapperTableModel.Properties properties; - private final Map> map; - - private Beans(TableMap annotations, Map> map) { - this.properties = new DynamoDbMapperTableModel.Properties.Immutable(annotations); - this.map = Collections.unmodifiableMap(map); - } - - DynamoDbMapperTableModel.Properties properties() { - return this.properties; - } - - Map> map() { - return this.map; - } - } - - /** - * Holds the reflection bean properties for a given property. - */ - static final class Bean { - private final DynamoDbMapperFieldModel.Properties properties; - private final ConvertibleType type; - private final Reflect reflect; - - private Bean(FieldMap annotations, Reflect reflect, Method getter) { - this.properties = new DynamoDbMapperFieldModel.Properties.Immutable(annotations); - this.type = ConvertibleType.of(getter, annotations); - this.reflect = reflect; - } - - DynamoDbMapperFieldModel.Properties properties() { - return this.properties; - } - - ConvertibleType type() { - return this.type; - } - - Reflect reflect() { - return this.reflect; - } - } - - /** - * Get/set reflection operations. - */ - static final class MethodReflect implements Reflect { - private final Method getter; - private final Method setter; - - private MethodReflect(Method getter) { - this.setter = setterOf(getter); - this.getter = getter; - } - - static Method setterOf(Method getter) { - try { - final String name = "set" + getter.getName().replaceFirst("^(get|is)", ""); - return getter.getDeclaringClass().getMethod(name, getter.getReturnType()); - } catch (NoSuchMethodException | RuntimeException no) { - // Ignored or expected. - } - return null; - } - - @Override - public V get(T object) { - try { - return (V) getter.invoke(object); - } catch (final Exception e) { - throw new DynamoDbMappingException("could not invoke " + getter + " on " + object.getClass(), e); - } - } - - @Override - public void set(T object, V value) { - try { - setter.invoke(object, value); - } catch (final Exception e) { - throw new DynamoDbMappingException("could not invoke " + setter + " on " + object.getClass() + - " with value " + value + " of type " + - (value == null ? null : value.getClass()), e); - } - } - } - - /** - * Get/set reflection operations with a declaring property. - */ - static final class DeclaringReflect implements Reflect { - private final Reflect reflect; - private final Reflect declaring; - private final Class targetType; - - private DeclaringReflect(Method getter, Reflect declaring, Class targetType) { - this.reflect = new MethodReflect(getter); - this.declaring = declaring; - this.targetType = targetType; - } - - static T newInstance(Class targetType) { - try { - return targetType.newInstance(); - } catch (final Exception e) { - throw new DynamoDbMappingException("could not instantiate " + targetType, e); - } - } - - @Override - public V get(T object) { - final T declaringObject = declaring.get(object); - if (declaringObject == null) { - return null; - } - return reflect.get(declaringObject); - } - - @Override - public void set(T object, V value) { - T declaringObject = declaring.get(object); - if (declaringObject == null) { - declaringObject = newInstance(targetType); - declaring.set(object, declaringObject); - } - reflect.set(declaringObject, value); - } - } - - /** - * {@link Map} of {@link Bean} - */ - static final class BeanMap extends LinkedHashMap> { - public static final long serialVersionUID = 1L; - - private final Class clazz; - - BeanMap(Class clazz, boolean inherited) { - this.clazz = clazz; - putAll(clazz, inherited); - } - - private void putAll(Class clazz, boolean inherited) { - for (final Method method : clazz.getMethods()) { - if (canMap(method, inherited)) { - final FieldMap annotations = StandardAnnotationMaps.of(method, null); - if (!annotations.ignored()) { - final Reflect reflect = new MethodReflect(method); - putOrFlatten(annotations, reflect, method); - } - } - } - } - - private void putOrFlatten(FieldMap annotations, Reflect reflect, Method getter) { - if (annotations.flattened()) { - flatten((Class) annotations.targetType(), annotations.attributes(), (Reflect) reflect); - } else { - final Bean bean = new Bean(annotations, reflect, getter); - if (put(bean.properties().attributeName(), bean) != null) { - throw new DynamoDbMappingException("duplicate attribute name"); - } - } - } - - private void flatten(Class targetType, Map attributes, Reflect declaring) { - for (final Method method : targetType.getMethods()) { - if (canMap(method, true)) { - String name = fieldNameOf(method); - name = attributes.remove(name); - if (name == null) { - continue; - } - final FieldMap annotations = StandardAnnotationMaps.of(method, name); - if (!annotations.ignored()) { - final Reflect reflect = new DeclaringReflect(method, declaring, targetType); - putOrFlatten(annotations, reflect, method); - } - } - } - if (!attributes.isEmpty()) { //<- this should be empty by now - throw new DynamoDbMappingException("contains unknown flattened attribute(s): " + attributes); - } - } - - private boolean canMap(Method method, boolean inherited) { - if (method.getName().matches("^(get|is).+") == false) { - return false; - } else if (method.getParameterTypes().length != 0) { - return false; - } else if (method.isBridge() || method.isSynthetic()) { - return false; - } else if (method.getDeclaringClass() == Object.class) { - return false; - } else if (!inherited && method.getDeclaringClass() != this.clazz && - StandardAnnotationMaps.of(method.getDeclaringClass()).attributeType() == null) { - return false; - } else { - return true; - } - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactories.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactories.java deleted file mode 100644 index bc5d9e8d08a1..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactories.java +++ /dev/null @@ -1,801 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static java.util.stream.Collectors.toList; -import static software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Scalar.BOOLEAN; -import static software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Scalar.DEFAULT; -import static software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Scalar.STRING; -import static software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Vector.LIST; -import static software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Vector.MAP; -import static software.amazon.awssdk.services.dynamodb.datamodeling.StandardTypeConverters.Vector.SET; -import static software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType.B; -import static software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType.N; -import static software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType.S; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.ImmutableObjectUtils; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.DynamoDbAttributeType; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.Reflect; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperModelFactory.TableFactory; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConverter.AbstractConverter; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTypeConverter.DelegateConverter; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardBeanProperties.Bean; -import software.amazon.awssdk.services.dynamodb.datamodeling.StandardBeanProperties.Beans; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * Pre-defined strategies for mapping between Java types and DynamoDB types. - */ -@SdkInternalApi -final class StandardModelFactories { - - private static final Logger log = LoggerFactory.getLogger(StandardModelFactories.class); - - /** - * Creates the standard {@link DynamoDbMapperModelFactory} factory. - */ - static DynamoDbMapperModelFactory of(S3Link.Factory s3Links) { - return new StandardModelFactory(s3Links); - } - - /** - * Creates a new set of conversion rules based on the configuration. - */ - private static RuleFactory rulesOf(DynamoDbMapperConfig config, S3Link.Factory s3Links, - DynamoDbMapperModelFactory models) { - final boolean ver1 = (config.getConversionSchema() == ConversionSchemas.V1); - final boolean ver2 = (config.getConversionSchema() == ConversionSchemas.V2); - final boolean v2Compatible = (config.getConversionSchema() == ConversionSchemas.V2_COMPATIBLE); - - final DynamoDbTypeConverterFactory.Builder scalars = config.getTypeConverterFactory().override(); - scalars.with(String.class, S3Link.class, s3Links); - - final Rules factory = new Rules(scalars.build()); - factory.add(factory.new NativeType(!ver1)); - factory.add(factory.new V2CompatibleBool(v2Compatible)); - factory.add(factory.new NativeBool(ver2)); - factory.add(factory.new StringScalar(true)); - factory.add(factory.new NumberScalar(true)); - factory.add(factory.new BinaryScalar(true)); - factory.add(factory.new NativeBoolSet(ver2)); - factory.add(factory.new StringScalarSet(true)); - factory.add(factory.new NumberScalarSet(true)); - factory.add(factory.new BinaryScalarSet(true)); - factory.add(factory.new ObjectSet(ver2)); - factory.add(factory.new ObjectStringSet(!ver2)); - factory.add(factory.new ObjectList(!ver1)); - factory.add(factory.new ObjectMap(!ver1)); - factory.add(factory.new ObjectDocumentMap(!ver1, models, config)); - return factory; - } - - /** - * Attribute value conversion. - */ - interface Rule { - boolean isAssignableFrom(ConvertibleType type); - - DynamoDbTypeConverter newConverter(ConvertibleType type); - - DynamoDbAttributeType getAttributeType(); - } - - /** - * Attribute value conversion factory. - */ - interface RuleFactory { - Rule getRule(ConvertibleType type); - } - - /** - * {@link TableFactory} mapped by {@link ConversionSchema}. - */ - private static final class StandardModelFactory implements DynamoDbMapperModelFactory { - private final ConcurrentMap cache; - private final S3Link.Factory s3Links; - - private StandardModelFactory(S3Link.Factory s3Links) { - this.cache = new ConcurrentHashMap(); - this.s3Links = s3Links; - } - - @Override - public TableFactory getTableFactory(DynamoDbMapperConfig config) { - final ConversionSchema schema = config.getConversionSchema(); - if (!cache.containsKey(schema)) { - RuleFactory rules = rulesOf(config, s3Links, this); - rules = new ConversionSchemas.ItemConverterRuleFactory(config, s3Links, rules); - cache.putIfAbsent(schema, new StandardTableFactory(rules)); - } - return cache.get(schema); - } - } - - /** - * {@link DynamoDbMapperTableModel} mapped by the clazz. - */ - private static final class StandardTableFactory implements TableFactory { - private final ConcurrentMap, DynamoDbMapperTableModel> cache; - private final RuleFactory rules; - - private StandardTableFactory(RuleFactory rules) { - this.cache = new ConcurrentHashMap, DynamoDbMapperTableModel>(); - this.rules = rules; - } - - @Override - @SuppressWarnings("unchecked") - public DynamoDbMapperTableModel getTable(Class clazz) { - if (!this.cache.containsKey(clazz)) { - this.cache.putIfAbsent(clazz, new TableBuilder(clazz, rules).build()); - } - return (DynamoDbMapperTableModel) this.cache.get(clazz); - } - } - - /** - * {@link DynamoDbMapperTableModel} builder. - */ - private static final class TableBuilder extends DynamoDbMapperTableModel.Builder { - private TableBuilder(Class clazz, Beans beans, RuleFactory rules) { - super(clazz, beans.properties()); - for (final Bean bean : beans.map().values()) { - try { - with(new FieldBuilder(clazz, bean, rules.getRule(bean.type())).build()); - } catch (final RuntimeException e) { - throw new DynamoDbMappingException(String.format( - "%s[%s] could not be mapped for type %s", - clazz.getSimpleName(), bean.properties().attributeName(), bean.type() - ), e); - } - } - } - - private TableBuilder(Class clazz, RuleFactory rules) { - this(clazz, StandardBeanProperties.of(clazz), rules); - } - } - - /** - * {@link DynamoDbMapperFieldModel} builder. - */ - private static final class FieldBuilder extends DynamoDbMapperFieldModel.Builder { - private FieldBuilder(Class clazz, Bean bean, Rule rule) { - super(clazz, bean.properties()); - if (bean.type().attributeType() != null) { - with(bean.type().attributeType()); - } else { - with(rule.getAttributeType()); - } - with(rule.newConverter(bean.type())); - with(bean.reflect()); - } - } - - /** - * Groups the conversion rules to be evaluated. - */ - private static final class Rules implements RuleFactory { - private final Set> rules = new LinkedHashSet>(); - private final DynamoDbTypeConverterFactory scalars; - - private Rules(DynamoDbTypeConverterFactory scalars) { - this.scalars = scalars; - } - - @SuppressWarnings("unchecked") - private void add(Rule rule) { - this.rules.add((Rule) rule); - } - - @Override - public Rule getRule(ConvertibleType type) { - for (final Rule rule : rules) { - if (rule.isAssignableFrom(type)) { - return rule; - } - } - return new NotSupported(); - } - - /** - * Gets the scalar converter for the given source and target types. - */ - private DynamoDbTypeConverter getConverter(Class sourceType, ConvertibleType type) { - return scalars.getConverter(sourceType, type.targetType()); - } - - /** - * Gets the nested converter for the given conversion type. - * Also wraps the resulting converter with a nullable converter. - */ - private DynamoDbTypeConverter getConverter(ConvertibleType type) { - return new DelegateConverter(getRule(type).newConverter(type)) { - public AttributeValue convert(T o) { - return o == null ? AttributeValue.builder().nul(true).build() : super.convert(o); - } - }; - } - - /** - * Native {@link AttributeValue} conversion. - */ - private class NativeType extends AbstractRule { - private NativeType(boolean supported) { - super(DynamoDbAttributeType.NULL, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.supported && type.is(AttributeValue.class); - } - - @Override - public DynamoDbTypeConverter newConverter(ConvertibleType type) { - return joinAll(type.typeConverter()); - } - - @Override - public AttributeValue get(AttributeValue o) { - return o; - } - - @Override - public void set(AttributeValue value, AttributeValue o) { - ImmutableObjectUtils.setObjectMember(value, "s", o.s()); - ImmutableObjectUtils.setObjectMember(value, "n", o.n()); - ImmutableObjectUtils.setObjectMember(value, "b", o.b()); - ImmutableObjectUtils.setObjectMember(value, "ss", o.ss()); - ImmutableObjectUtils.setObjectMember(value, "ns", o.ns()); - ImmutableObjectUtils.setObjectMember(value, "bs", o.bs()); - ImmutableObjectUtils.setObjectMember(value, "bool", o.bool()); - ImmutableObjectUtils.setObjectMember(value, "l", o.l()); - ImmutableObjectUtils.setObjectMember(value, "m", o.m()); - ImmutableObjectUtils.setObjectMember(value, "nul", o.nul()); - } - } - - /** - * {@code S} conversion - */ - private class StringScalar extends AbstractRule { - private StringScalar(boolean supported) { - super(DynamoDbAttributeType.S, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && (type.attributeType() != null || type.is(S)); - } - - @Override - public DynamoDbTypeConverter newConverter(ConvertibleType type) { - return joinAll(getConverter(String.class, type), type.typeConverter()); - } - - @Override - public String get(AttributeValue value) { - return value.s(); - } - - @Override - public void set(AttributeValue value, String o) { - ImmutableObjectUtils.setObjectMember(value, "s", o); - } - - @Override - public AttributeValue convert(String o) { - return o.length() == 0 ? null : super.convert(o); - } - } - - /** - * {@code N} conversion - */ - private class NumberScalar extends AbstractRule { - private NumberScalar(boolean supported) { - super(DynamoDbAttributeType.N, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && (type.attributeType() != null || type.is(N)); - } - - @Override - public DynamoDbTypeConverter newConverter(ConvertibleType type) { - return joinAll(getConverter(String.class, type), type.typeConverter()); - } - - @Override - public String get(AttributeValue value) { - return value.n(); - } - - @Override - public void set(AttributeValue value, String o) { - ImmutableObjectUtils.setObjectMember(value, "n", o); - //value.setN(o); - } - } - - /** - * {@code B} conversion - */ - private class BinaryScalar extends AbstractRule { - private BinaryScalar(boolean supported) { - super(DynamoDbAttributeType.B, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && (type.attributeType() != null || type.is(B)); - } - - @Override - public DynamoDbTypeConverter newConverter(ConvertibleType type) { - return joinAll(getConverter(ByteBuffer.class, type), type.typeConverter()); - } - - @Override - public ByteBuffer get(AttributeValue value) { - return value.b() == null ? null : value.b().asByteBuffer(); - } - - @Override - public void set(AttributeValue value, ByteBuffer o) { - ImmutableObjectUtils.setObjectMember(value, "b", SdkBytes.fromByteBuffer(o)); - //value.setB(o); - } - } - - /** - * {@code SS} conversion - */ - private class StringScalarSet extends AbstractRule, Collection> { - private StringScalarSet(boolean supported) { - super(DynamoDbAttributeType.SS, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && (type.attributeType() != null || type.is(S, SET)); - } - - @Override - public DynamoDbTypeConverter> newConverter(ConvertibleType> type) { - return joinAll(SET.join(getConverter(String.class, type.param(0))), type.>typeConverter()); - } - - @Override - public List get(AttributeValue value) { - return value.ss(); - } - - @Override - public void set(AttributeValue value, List o) { - ImmutableObjectUtils.setObjectMember(value, "ss", o); - //value.setSS(o); - } - } - - /** - * {@code NS} conversion - */ - private class NumberScalarSet extends AbstractRule, Collection> { - private NumberScalarSet(boolean supported) { - super(DynamoDbAttributeType.NS, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && (type.attributeType() != null || type.is(N, SET)); - } - - @Override - public DynamoDbTypeConverter> newConverter(ConvertibleType> type) { - return joinAll(SET.join(getConverter(String.class, type.param(0))), type.>typeConverter()); - } - - @Override - public List get(AttributeValue value) { - return value.ns(); - } - - @Override - public void set(AttributeValue value, List o) { - ImmutableObjectUtils.setObjectMember(value, "ns", o); - //value.setNS(o); - } - } - - /** - * {@code BS} conversion - */ - private class BinaryScalarSet extends AbstractRule, Collection> { - private BinaryScalarSet(boolean supported) { - super(DynamoDbAttributeType.BS, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && (type.attributeType() != null || type.is(B, SET)); - } - - @Override - public DynamoDbTypeConverter> newConverter(ConvertibleType> type) { - return joinAll(SET.join(getConverter(ByteBuffer.class, type.param(0))), type.typeConverter()); - } - - @Override - public List get(AttributeValue value) { - return Optional.ofNullable(value.bs()) - .map(bs -> bs.stream() - .map(SdkBytes::asByteBuffer) - .collect(toList())) - .orElse(null); - } - - @Override - public void set(AttributeValue value, List o) { - ImmutableObjectUtils.setObjectMember(value, "bs", o.stream().map(SdkBytes::fromByteBuffer).collect(toList())); - //value.setBS(o); - } - } - - /** - * {@code SS} conversion - */ - private class ObjectStringSet extends StringScalarSet { - private ObjectStringSet(boolean supported) { - super(supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return type.attributeType() == null && super.supported && type.is(SET); - } - - @Override - public DynamoDbTypeConverter> newConverter(ConvertibleType> type) { - log.warn("Marshaling a set of non-String objects to a DynamoDB " - + "StringSet. You won't be able to read these objects back " - + "out of DynamoDB unless you REALLY know what you're doing: " - + "it's probably a bug. If you DO know what you're doing feel" - + "free to ignore this warning, but consider using a custom " - + "marshaler for this instead."); - return joinAll(SET.join(scalars.getConverter(String.class, DEFAULT.type())), type.typeConverter()); - } - } - - /** - * Native boolean conversion. - */ - private class NativeBool extends AbstractRule { - private NativeBool(boolean supported) { - super(DynamoDbAttributeType.BOOL, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && type.is(BOOLEAN); - } - - @Override - public DynamoDbTypeConverter newConverter(ConvertibleType type) { - return joinAll(getConverter(Boolean.class, type), type.typeConverter()); - } - - @Override - public Boolean get(AttributeValue o) { - return o.bool(); - } - - @Override - public void set(AttributeValue o, Boolean value) { - ImmutableObjectUtils.setObjectMember(o, "bool", value); - //o.setBOOL(value); - } - - @Override - public Boolean unconvert(AttributeValue o) { - if (o.bool() == null && o.n() != null) { - return BOOLEAN.convert(o.n()); - } - return super.unconvert(o); - } - } - - /** - * Native boolean conversion. - */ - private class V2CompatibleBool extends AbstractRule { - private V2CompatibleBool(boolean supported) { - super(DynamoDbAttributeType.N, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && type.is(BOOLEAN); - } - - @Override - public DynamoDbTypeConverter newConverter(ConvertibleType type) { - return joinAll(getConverter(String.class, type), type.typeConverter()); - } - - /** - * For V2 Compatible schema we support loading booleans from a numeric attribute value (0/1) or the native boolean - * type. - */ - @Override - public String get(AttributeValue o) { - if (o.bool() != null) { - // Handle native bools, transform to expected numeric representation. - return o.bool() ? "1" : "0"; - } - return o.n(); - } - - /** - * For the V2 compatible schema we save as a numeric attribute value unless overridden by {@link - * DynamoDbNativeBoolean} or {@link DynamoDbTyped}. - */ - @Override - public void set(AttributeValue o, String value) { - ImmutableObjectUtils.setObjectMember(o, "n", value); - //o.setN(value); - } - } - - /** - * Any {@link Set} conversions. - */ - private class ObjectSet extends AbstractRule, Collection> { - private ObjectSet(boolean supported) { - super(DynamoDbAttributeType.L, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && type.param(0) != null && type.is(SET); - } - - @Override - public DynamoDbTypeConverter> newConverter(ConvertibleType> type) { - return joinAll(SET.join(getConverter(type.param(0))), type.>typeConverter()); - } - - @Override - public List get(AttributeValue value) { - return value.l(); - } - - @Override - public void set(AttributeValue value, List o) { - ImmutableObjectUtils.setObjectMember(value, "l", o); - //value.setL(o); - } - } - - /** - * Native bool {@link Set} conversions. - */ - private class NativeBoolSet extends ObjectSet { - private NativeBoolSet(boolean supported) { - super(supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && type.param(0).is(BOOLEAN); - } - - @Override - public List unconvert(AttributeValue o) { - if (o.l() == null && o.ns() != null) { - return LIST.convert(o.ns(), new NativeBool(true).join(scalars.getConverter(Boolean.class, String.class))); - } - return super.unconvert(o); - } - } - - /** - * Any {@link List} conversions. - */ - private class ObjectList extends AbstractRule, List> { - private ObjectList(boolean supported) { - super(DynamoDbAttributeType.L, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && type.param(0) != null && type.is(LIST); - } - - @Override - public DynamoDbTypeConverter> newConverter(ConvertibleType> type) { - return joinAll(LIST.join(getConverter(type.param(0))), type.>typeConverter()); - } - - @Override - public List get(AttributeValue value) { - return value.l(); - } - - @Override - public void set(AttributeValue value, List o) { - ImmutableObjectUtils.setObjectMember(value, "l", o); - //value.setL(o); - } - } - - /** - * Any {@link Map} conversions. - */ - private class ObjectMap extends AbstractRule, Map> { - private ObjectMap(boolean supported) { - super(DynamoDbAttributeType.M, supported); - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return super.isAssignableFrom(type) && type.param(1) != null && type.is(MAP) && type.param(0).is(STRING); - } - - @Override - public DynamoDbTypeConverter> newConverter(ConvertibleType> type) { - return joinAll( - MAP.join(getConverter(type.param(1))), - type.>typeConverter() - ); - } - - @Override - public Map get(AttributeValue value) { - return value.m(); - } - - @Override - public void set(AttributeValue value, Map o) { - ImmutableObjectUtils.setObjectMember(value, "m", o); - //value.setM(o); - } - } - - /** - * All object conversions. - */ - private class ObjectDocumentMap extends AbstractRule, T> { - private final DynamoDbMapperModelFactory models; - private final DynamoDbMapperConfig config; - - private ObjectDocumentMap(boolean supported, DynamoDbMapperModelFactory models, DynamoDbMapperConfig config) { - super(DynamoDbAttributeType.M, supported); - this.models = models; - this.config = config; - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return type.attributeType() == getAttributeType() && super.supported && !type.is(MAP); - } - - @Override - public DynamoDbTypeConverter newConverter(final ConvertibleType type) { - return joinAll(new DynamoDbTypeConverter, T>() { - public Map convert(final T o) { - return models.getTableFactory(config).getTable(type.targetType()).convert(o); - } - - public T unconvert(final Map o) { - return models.getTableFactory(config).getTable(type.targetType()).unconvert(o); - } - }, type.>typeConverter()); - } - - @Override - public Map get(AttributeValue value) { - return value.m(); - } - - @Override - public void set(AttributeValue value, Map o) { - ImmutableObjectUtils.setObjectMember(value, "m", o); - //value.setM(o); - } - } - - /** - * Default conversion when no match could be determined. - */ - private class NotSupported extends AbstractRule { - private NotSupported() { - super(DynamoDbAttributeType.NULL, false); - } - - @Override - public DynamoDbTypeConverter newConverter(ConvertibleType type) { - return this; - } - - @Override - public T get(AttributeValue value) { - throw new DynamoDbMappingException("not supported; requires @DynamoDBTyped or @DynamoDBTypeConverted"); - } - - @Override - public void set(AttributeValue value, T o) { - throw new DynamoDbMappingException("not supported; requires @DynamoDBTyped or @DynamoDBTypeConverted"); - } - } - } - - /** - * Basic attribute value conversion functions. - */ - private abstract static class AbstractRule extends AbstractConverter - implements Reflect, Rule { - protected final DynamoDbAttributeType attributeType; - protected final boolean supported; - - protected AbstractRule(DynamoDbAttributeType attributeType, boolean supported) { - this.attributeType = attributeType; - this.supported = supported; - } - - @Override - public boolean isAssignableFrom(ConvertibleType type) { - return type.attributeType() == null ? supported : type.attributeType() == attributeType; - } - - @Override - public DynamoDbAttributeType getAttributeType() { - return this.attributeType; - } - - @Override - public AttributeValue convert(final S o) { - final AttributeValue value = AttributeValue.builder().build(); - set(value, o); - return value; - } - - @Override - public S unconvert(final AttributeValue o) { - final S value = get(o); - if (value == null && o.nul() == null) { - throw new DynamoDbMappingException("expected " + attributeType + " in value " + o); - } - return value; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesOverrideTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesOverrideTest.java deleted file mode 100644 index 8358e9404815..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesOverrideTest.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class StandardModelFactoriesOverrideTest extends StandardModelFactoriesV2Test { - - private final DynamoDbMapperConfig config = new DynamoDbMapperConfig.Builder() - .withTypeConverterFactory(DynamoDbMapperConfig.DEFAULT.getTypeConverterFactory()) - .withConversionSchema(ConversionSchemas.v2Builder("V2Override").build()) - .build(); - - private final DynamoDbMapperModelFactory factory = StandardModelFactories.of(S3Link.Factory.of(null)); - private final DynamoDbMapperModelFactory.TableFactory models = factory.getTableFactory(config); - - @Override - protected AttributeValue convert(Class clazz, Method getter, Object value) { - final StandardAnnotationMaps.FieldMap map = StandardAnnotationMaps.of(getter, null); - return models.getTable(clazz).field(map.attributeName()).convert(value); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesTest.java deleted file mode 100644 index 88da788e305e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesTest.java +++ /dev/null @@ -1,2053 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.TimeZone; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel.DynamoDbAttributeType; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.services.dynamodb.pojos.AutoKeyAndVal; -import software.amazon.awssdk.services.dynamodb.pojos.Currency; -import software.amazon.awssdk.services.dynamodb.pojos.DateRange; -import software.amazon.awssdk.services.dynamodb.pojos.KeyAndVal; - -/** - * Unit tests for {@link DynamoDbMapperModelFactory.TableFactory}. - */ -public class StandardModelFactoriesTest { - - private static final DynamoDbMapperModelFactory factory = StandardModelFactories.of(S3Link.Factory.of(null)); - private static final DynamoDbMapperModelFactory.TableFactory models = factory.getTableFactory(DynamoDbMapperConfig.DEFAULT); - - @SuppressWarnings("unchecked") - private static DynamoDbMapperTableModel getTable(T object) { - return models.getTable((Class) object.getClass()); - } - - /** - * Assert that the field key properties are correct. - */ - private static void assertFieldKeyType(KeyType keyType, DynamoDbMapperFieldModel field, - DynamoDbMapperTableModel model) { - assertEquals(keyType, field.keyType()); - if (keyType != null) { - if (keyType == KeyType.HASH) { - assertEquals(field, model.hashKey()); - } else if (keyType == KeyType.RANGE) { - assertEquals(field, model.rangeKeyIfExists()); - assertEquals(field, model.rangeKey()); - } - } - } - - /** - * Assert that the field contains the LSIs. - */ - private static void assertFieldGsiNames(List names, KeyType keyType, DynamoDbMapperFieldModel field, - DynamoDbMapperTableModel model) { - assertEquals(names == null ? 0 : names.size(), field.globalSecondaryIndexNames(keyType).size()); - assertEquals(true, field.indexed()); - if (names != null) { - for (final String name : names) { - assertEquals(true, field.globalSecondaryIndexNames(keyType).contains(name)); - assertEquals(true, model.globalSecondaryIndex(name) != null); - assertEquals(true, !model.globalSecondaryIndexes().isEmpty()); - } - } - } - - /** - * Assert that the field contains the LSIs. - */ - private static void assertFieldLsiNames(List names, DynamoDbMapperFieldModel field, - DynamoDbMapperTableModel model) { - assertEquals(names == null ? 0 : names.size(), field.localSecondaryIndexNames().size()); - assertEquals(true, field.indexed()); - if (names != null) { - for (final String name : names) { - assertEquals(true, field.localSecondaryIndexNames().contains(name)); - assertEquals(true, model.localSecondaryIndex(name) != null); - assertEquals(true, !model.localSecondaryIndexes().isEmpty()); - } - } - } - - /** - * Test mappings. - */ - @Test - public void testHashAndRangeKey() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbHashKey(attributeName = "hk") - public String getKey() { - return super.getKey(); - } - - @DynamoDbRangeKey(attributeName = "rk") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - assertFieldKeyType(KeyType.HASH, model.field("hk"), model); - assertFieldKeyType(KeyType.RANGE, model.field("rk"), model); - } - - /** - * Test mappings. - */ - @Test(expected = DynamoDbMappingException.class) - public void testHashAndRangeKeyConflict() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbHashKey - @DynamoDbRangeKey - public String getKey() { - return super.getKey(); - } - }; - getTable(obj); - } - - /** - * Test mappings. - */ - @Test - public void testNamed() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbNamed("value") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - assertEquals(2, model.fields().size()); - assertNotNull(model.field("key")); - assertNotNull(model.field("value")); - } - - /** - * Test mappings. - */ - @Test - public void testAttributeTypeAsNumber() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbTyped(DynamoDbAttributeType.N) - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAttributeType.N, val.attributeType()); - } - - @Test - public void testAttributeTypeAsAttributeValueNumber() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbTyped(DynamoDbAttributeType.N) - public AttributeValue getVal() { - return super.getVal(); - } - - public void setVal(final AttributeValue val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAttributeType.N, val.attributeType()); - assertEquals("123", val.convert(AttributeValue.builder().n("123").build()).n()); - assertEquals("123", val.unconvert(AttributeValue.builder().n("123").build()).n()); - } - - @Test - public void testAttributeTypeAsAttributeValueMap() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbTyped(DynamoDbAttributeType.M) - public AttributeValue getVal() { - return super.getVal(); - } - - public void setVal(final AttributeValue val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAttributeType.M, val.attributeType()); - - Map map = new HashMap(); - map.put("A", AttributeValue.builder().n("123").build()); - map = Collections.unmodifiableMap(map); - - assertEquals("123", val.convert(AttributeValue.builder().m(map).build()).m().get("A").n()); - assertEquals("123", val.unconvert(AttributeValue.builder().m(map).build()).m().get("A").n()); - } - - /** - * Test mappings. - */ - @Test - public void testScalarAttributeStringTimeZone() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbHashKey - public String getKey() { - return super.getKey(); - } - - @DynamoDbScalarAttribute(type = ScalarAttributeType.S) - public TimeZone getVal() { - return super.getVal(); - } - - public void setVal(final TimeZone val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAttributeType.S, val.attributeType()); - assertEquals("America/New_York", val.convert(TimeZone.getTimeZone("America/New_York")).s()); - assertEquals("America/New_York", val.unconvert(AttributeValue.builder().s("America/New_York").build()).getID()); - } - - /** - * Test mappings. - */ - @Test - public void testScalarAttributeStringLocale() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbHashKey - public String getKey() { - return super.getKey(); - } - - @DynamoDbScalarAttribute(type = ScalarAttributeType.S) - public Locale getVal() { - return super.getVal(); - } - - public void setVal(final Locale val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAttributeType.S, val.attributeType()); - assertEquals("en-CA", val.convert(new Locale("en", "CA")).s()); - assertEquals("en-CA", val.unconvert(AttributeValue.builder().s("en-CA").build()).toString().replaceAll("_", "-")); - } - - /** - * Test mappings. - */ - @Test - public void testScalarAttributeBinaryUuid() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbHashKey - public String getKey() { - return super.getKey(); - } - - @DynamoDbScalarAttribute(type = ScalarAttributeType.B) - public UUID getVal() { - return super.getVal(); - } - - public void setVal(final UUID val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - assertEquals(DynamoDbAttributeType.B, model.field("val").attributeType()); - final UUID val = UUID.randomUUID(); - final AttributeValue converted = model.field("val").convert(val); - assertNotNull(converted.b()); - assertEquals(val, model.field("val").unconvert(converted)); - } - - @Test - public void testScalarAttributeAttributeName() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbHashKey - public String getKey() { - return super.getKey(); - } - - @DynamoDbScalarAttribute(attributeName = "value", type = ScalarAttributeType.S) - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = models.getTable((Class) obj.getClass()); - final DynamoDbMapperFieldModel val = model.field("value"); - assertEquals(DynamoDbAttributeType.S, val.attributeType()); - } - - /** - * Test mappings. - */ - @Test - public void testIgnore() { - final Object obj = new AutoKeyAndVal() { - private String ignore; - - @DynamoDbAttribute(attributeName = "value") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - - @DynamoDbIgnore - @DynamoDbAttribute(attributeName = "ignore") - public String getIgnore() { - return this.ignore; - } - - public void setIgnore(final String ignore) { - this.ignore = ignore; - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - assertEquals(2, model.fields().size()); - assertNotNull(model.field("key")); - assertNotNull(model.field("value")); - } - - /** - * Test mappings. - */ - @Test - public void testConvertedBool() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbConvertedBool(DynamoDbConvertedBool.Format.Y_N) - public Boolean getVal() { - return super.getVal(); - } - - public void setVal(final Boolean val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAttributeType.S, val.attributeType()); - assertEquals("Y", val.convert(Boolean.TRUE).s()); - assertEquals(Boolean.TRUE, val.unconvert(AttributeValue.builder().s("Y").build())); - assertEquals("N", val.convert(Boolean.FALSE).s()); - assertEquals(Boolean.FALSE, val.unconvert(AttributeValue.builder().s("N").build())); - assertEquals(null, val.convert(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedHashKeyString() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAttribute - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel key = model.field("key"); - assertFieldKeyType(KeyType.HASH, key, model); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, key.getGenerateStrategy()); - assertNotNull(key.generate(null)); - assertNotNull(key.generate(UUID.randomUUID().toString())); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedRangeKeyUuid() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbRangeKey - @DynamoDbAutoGeneratedKey - public UUID getVal() { - return super.getVal(); - } - - public void setVal(final UUID val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertFieldKeyType(KeyType.RANGE, val, model); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertNotNull(val.generate(null)); - assertNotNull(val.generate(UUID.randomUUID())); - } - - /** - * Test mappings. - */ - @Test(expected = DynamoDbMappingException.class) - public void testAutoGeneratedConflict() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbHashKey - @DynamoDbAutoGeneratedKey - @DynamoDbVersionAttribute - public String getKey() { - return super.getKey(); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - } - - /** - * Test mappings. - */ - @Test(expected = DynamoDbMappingException.class) - public void testAutoGeneratedVersionUuid() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbVersionAttribute - public UUID getVal() { - return super.getVal(); - } - - public void setVal(final UUID val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - val.generate(null); //<- should fail - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionBigInteger() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbVersionAttribute - public BigInteger getVal() { - return super.getVal(); - } - - public void setVal(final BigInteger val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(true, val.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, val.getGenerateStrategy()); - assertEquals(BigInteger.ONE, val.generate(null)); - assertEquals(BigInteger.valueOf((int) 2), val.generate(BigInteger.ONE)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionByte() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbVersionAttribute - public Byte getVal() { - return super.getVal(); - } - - public void setVal(final Byte val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(true, val.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, val.getGenerateStrategy()); - assertEquals(Byte.valueOf((byte) 1), val.generate(null)); - assertEquals(Byte.valueOf((byte) 2), val.generate(Byte.valueOf((byte) 1))); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionBytePrimitive() { - final Object obj = new AutoKeyAndVal() { - private byte rvn; - - @DynamoDbAttribute - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - - @DynamoDbVersionAttribute - public byte getRvn() { - return this.rvn; - } - - public void setRvn(final byte rvn) { - this.rvn = rvn; - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel rvn = model.field("rvn"); - assertEquals(true, rvn.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, rvn.getGenerateStrategy()); - assertEquals(Byte.valueOf((byte) 1), rvn.generate(null)); - assertEquals(Byte.valueOf((byte) 2), rvn.generate(Byte.valueOf((byte) 1))); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionInteger() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbVersionAttribute - public Integer getVal() { - return super.getVal(); - } - - public void setVal(final Integer val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(true, val.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, val.getGenerateStrategy()); - assertEquals(Integer.valueOf((int) 1), val.generate(null)); - assertEquals(Integer.valueOf((int) 2), val.generate(Integer.valueOf((int) 1))); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionIntegerPrimitive() { - final Object obj = new AutoKeyAndVal() { - private int rvn; - - @DynamoDbAttribute - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - - @DynamoDbVersionAttribute - public int getRvn() { - return this.rvn; - } - - public void setRvn(final int rvn) { - this.rvn = rvn; - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel rvn = model.field("rvn"); - assertEquals(true, rvn.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, rvn.getGenerateStrategy()); - assertEquals(Integer.valueOf((int) 1), rvn.generate(null)); - assertEquals(Integer.valueOf((int) 2), rvn.generate(Integer.valueOf((int) 1))); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionLong() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbVersionAttribute - public Long getVal() { - return super.getVal(); - } - - public void setVal(final Long val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(true, val.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, val.getGenerateStrategy()); - assertEquals(Long.valueOf((long) 1), val.generate(null)); - assertEquals(Long.valueOf((long) 2), val.generate(Long.valueOf((long) 1))); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionLongPrimitive() { - final Object obj = new AutoKeyAndVal() { - private long rvn; - - @DynamoDbAttribute - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - - @DynamoDbVersionAttribute - public long getRvn() { - return this.rvn; - } - - public void setRvn(final long rvn) { - this.rvn = rvn; - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel rvn = model.field("rvn"); - assertEquals(true, rvn.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, rvn.getGenerateStrategy()); - assertEquals(Long.valueOf((long) 1), rvn.generate(null)); - assertEquals(Long.valueOf((long) 2), rvn.generate(Long.valueOf((long) 1))); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionshort() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbVersionAttribute - public Short getVal() { - return super.getVal(); - } - - public void setVal(final Short val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(true, val.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, val.getGenerateStrategy()); - assertEquals(Short.valueOf((short) 1), val.generate(null)); - assertEquals(Short.valueOf((short) 2), val.generate(Short.valueOf((short) 1))); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedVersionshortPrimitive() { - final Object obj = new AutoKeyAndVal() { - private short rvn; - - @DynamoDbAttribute - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - - @DynamoDbVersionAttribute - public short getRvn() { - return this.rvn; - } - - public void setRvn(final short rvn) { - this.rvn = rvn; - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel rvn = model.field("rvn"); - assertEquals(true, rvn.versioned()); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, rvn.getGenerateStrategy()); - assertEquals(Short.valueOf((short) 1), rvn.generate(null)); - assertEquals(Short.valueOf((short) 2), rvn.generate(Short.valueOf((short) 1))); - } - - /** - * Test mappings. - */ - @Test(expected = DynamoDbMappingException.class) - public void testAutoGeneratedTimestampUuid() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedTimestamp - public UUID getVal() { - return super.getVal(); - } - - public void setVal(final UUID val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedTimestampCalendar() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedTimestamp - public Calendar getVal() { - return super.getVal(); - } - - public void setVal(final Calendar val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, val.getGenerateStrategy()); - assertNotNull(val.generate(null)); - assertNotNull(val.generate(Calendar.getInstance())); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedTimestampDateKey() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbRangeKey - @DynamoDbAutoGeneratedTimestamp(strategy = DynamoDbAutoGenerateStrategy.CREATE) - public Date getVal() { - return super.getVal(); - } - - public void setVal(final Date val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertFieldKeyType(KeyType.RANGE, val, model); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertNotNull(val.generate(null)); - assertNotNull(val.generate(new Date())); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedTimestampDateVal() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedTimestamp - public Date getVal() { - return super.getVal(); - } - - public void setVal(final Date val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, val.getGenerateStrategy()); - assertNotNull(val.generate(null)); - assertNotNull(val.generate(new Date())); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedTimestampLong() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedTimestamp - public Long getVal() { - return super.getVal(); - } - - public void setVal(final Long val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.ALWAYS, val.getGenerateStrategy()); - assertNotNull(val.generate(null)); - assertNotNull(val.generate(System.currentTimeMillis())); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultByteBuffer() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("default-val") - public ByteBuffer getVal() { - return super.getVal(); - } - - public void setVal(final ByteBuffer val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertNotNull(val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultBigDecimal() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1234.5") - public BigDecimal getVal() { - return super.getVal(); - } - - public void setVal(final BigDecimal val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(BigDecimal.valueOf(1234.5D), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultBigInteger() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1234") - public BigInteger getVal() { - return super.getVal(); - } - - public void setVal(final BigInteger val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(BigInteger.valueOf(1234), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultBoolean_true() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("true") - public Boolean getVal() { - return super.getVal(); - } - - public void setVal(final Boolean val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Boolean.TRUE, val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultBoolean_0() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("0") - public Boolean getVal() { - return super.getVal(); - } - - public void setVal(final Boolean val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Boolean.FALSE, val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultBoolean_1() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1") - public Boolean getVal() { - return super.getVal(); - } - - public void setVal(final Boolean val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Boolean.TRUE, val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultBoolean_y() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("y") - public Boolean getVal() { - return super.getVal(); - } - - public void setVal(final Boolean val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Boolean.TRUE, val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultBoolean_Y() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("Y") - public Boolean getVal() { - return super.getVal(); - } - - public void setVal(final Boolean val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Boolean.TRUE, val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultByte() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1") - public Byte getVal() { - return super.getVal(); - } - - public void setVal(final Byte val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Byte.valueOf((byte) 1), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultCharacter() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("A") - public Character getVal() { - return super.getVal(); - } - - public void setVal(final Character val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Character.valueOf('A'), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultCurrency() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("CAD") - public java.util.Currency getVal() { - return super.getVal(); - } - - public void setVal(final java.util.Currency val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(java.util.Currency.getInstance("CAD"), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultDouble() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1234.5") - public Double getVal() { - return super.getVal(); - } - - public void setVal(final Double val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Double.valueOf(1234.5D), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultEnum() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbTypeConvertedEnum - @DynamoDbAutoGeneratedDefault("SECONDS") - public TimeUnit getVal() { - return super.getVal(); - } - - public void setVal(final TimeUnit val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(TimeUnit.SECONDS, val.generate(null)); - assertEquals(TimeUnit.SECONDS, val.generate(TimeUnit.MILLISECONDS)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultFloat() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1234.5") - public Float getVal() { - return super.getVal(); - } - - public void setVal(final Float val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Float.valueOf(1234.5F), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultInteger() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1234") - public Integer getVal() { - return super.getVal(); - } - - public void setVal(final Integer val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Integer.valueOf((int) 1234), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultLong() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1234") - public Long getVal() { - return super.getVal(); - } - - public void setVal(final Long val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Long.valueOf((long) 1234), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultShort() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("1234") - public Short getVal() { - return super.getVal(); - } - - public void setVal(final Short val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(Short.valueOf((short) 1234), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultString() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("default-val") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals("default-val", val.generate(null)); - assertEquals("default-val", val.generate("not-default")); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultTimeZone() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("America/New_York") - public TimeZone getVal() { - return super.getVal(); - } - - public void setVal(final TimeZone val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(TimeZone.getTimeZone("America/New_York"), val.generate(null)); - assertEquals(TimeZone.getTimeZone("America/New_York"), val.generate(TimeZone.getTimeZone("America/Los_Angeles"))); - } - - /** - * Test mappings. - */ - @Test - public void testAutoGeneratedDefaultUuid() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbAutoGeneratedDefault("12345678-1234-1234-1234-123456789012") - public UUID getVal() { - return super.getVal(); - } - - public void setVal(final UUID val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel val = model.field("val"); - assertEquals(DynamoDbAutoGenerateStrategy.CREATE, val.getGenerateStrategy()); - assertEquals(UUID.fromString("12345678-1234-1234-1234-123456789012"), val.generate(null)); - } - - /** - * Test mappings. - */ - @Test - public void testIndexHashKeyGlobalSecondaryIndexName() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbIndexHashKey(attributeName = "gsi_hk", globalSecondaryIndexName = "gsi") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel gsi_hk = model.field("gsi_hk"); - assertFieldGsiNames(Arrays.asList("gsi"), KeyType.HASH, gsi_hk, model); - assertFieldGsiNames(null, KeyType.RANGE, gsi_hk, model); - assertFieldLsiNames(null, gsi_hk, model); - } - - /** - * Test mappings. - */ - @Test - public void testIndexHashKeyGlobalSecondaryIndexNames() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbIndexHashKey(attributeName = "gsi_hk", globalSecondaryIndexNames = "gsi") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel gsi_hk = model.field("gsi_hk"); - assertFieldGsiNames(Arrays.asList("gsi"), KeyType.HASH, gsi_hk, model); - assertFieldGsiNames(null, KeyType.RANGE, gsi_hk, model); - assertFieldLsiNames(null, gsi_hk, model); - } - - /** - * Test mappings. - */ - @Test - public void testIndexRangeKeyGlobalSecondaryIndexName() { - final Object obj = new AutoKeyAndVal() { - private String gsi; - - @DynamoDbIndexHashKey(attributeName = "gsi_hk", globalSecondaryIndexName = "gsi") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - - @DynamoDbIndexRangeKey(attributeName = "gsi_rk", globalSecondaryIndexName = "gsi") - public String getGsi() { - return this.gsi; - } - - public void setGsi(final String gsi) { - this.gsi = gsi; - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel gsi_hk = model.field("gsi_hk"); - assertFieldGsiNames(Arrays.asList("gsi"), KeyType.HASH, gsi_hk, model); - assertFieldGsiNames(null, KeyType.RANGE, gsi_hk, model); - assertFieldLsiNames(null, gsi_hk, model); - final DynamoDbMapperFieldModel gsi_rk = model.field("gsi_rk"); - assertFieldGsiNames(null, KeyType.HASH, gsi_rk, model); - assertFieldGsiNames(Arrays.asList("gsi"), KeyType.RANGE, gsi_rk, model); - assertFieldLsiNames(null, gsi_rk, model); - } - - /** - * Test mappings. - */ - @Test - public void testIndexRangeKeyGlobalSecondaryIndexNames() { - final Object obj = new AutoKeyAndVal() { - private String gsi; - - @DynamoDbIndexHashKey(attributeName = "gsi_hk", globalSecondaryIndexName = "gsi") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - - @DynamoDbIndexRangeKey(attributeName = "gsi_rk", globalSecondaryIndexNames = "gsi") - public String getGsi() { - return this.gsi; - } - - public void setGsi(final String gsi) { - this.gsi = gsi; - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel gsi_hk = model.field("gsi_hk"); - assertFieldGsiNames(Arrays.asList("gsi"), KeyType.HASH, gsi_hk, model); - assertFieldGsiNames(null, KeyType.RANGE, gsi_hk, model); - assertFieldLsiNames(null, gsi_hk, model); - final DynamoDbMapperFieldModel gsi_rk = model.field("gsi_rk"); - assertFieldGsiNames(null, KeyType.HASH, gsi_rk, model); - assertFieldGsiNames(Arrays.asList("gsi"), KeyType.RANGE, gsi_rk, model); - assertFieldLsiNames(null, gsi_rk, model); - } - - /** - * Test mappings. - */ - @Test - public void testIndexRangeKeyiLocalSecondaryIndexName() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbIndexRangeKey(attributeName = "lsi_rk", localSecondaryIndexName = "lsi") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel lsi_rk = model.field("lsi_rk"); - assertFieldLsiNames(Arrays.asList("lsi"), lsi_rk, model); - } - - /** - * Test mappings. - */ - @Test - public void testIndexRangeKeyLocalSecondaryIndexNames() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbIndexRangeKey(attributeName = "lsi_rk", localSecondaryIndexNames = "lsi") - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - final DynamoDbMapperFieldModel lsi_rk = model.field("lsi_rk"); - assertFieldLsiNames(Arrays.asList("lsi"), lsi_rk, model); - } - - @Test - public void testFlattened() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "start", attributeName = "DateRangeStart"), - @DynamoDbAttribute(mappedBy = "end", attributeName = "DateRangeEnd")}) - public DateRange getVal() { - return super.getVal(); - } - - public void setVal(final DateRange val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - assertEquals(3, model.fields().size()); - assertEquals("DateRangeStart", model.field("DateRangeStart").name()); - assertEquals("DateRangeEnd", model.field("DateRangeEnd").name()); - } - - /** - * Test mappings. - */ - @Test - public void testFlattenedNotAllSpecified() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "start", attributeName = "DateRangeStart")}) - public DateRange getVal() { - return super.getVal(); - } - - public void setVal(final DateRange val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - assertEquals(2, model.fields().size()); - assertEquals("DateRangeStart", model.field("DateRangeStart").name()); - } - - /** - * Test mappings. - */ - @Test(expected = DynamoDbMappingException.class) - public void testFlattenedInvalidMappedBy() { - final Object obj = new AutoKeyAndVal() { - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "xstart", attributeName = "DateRangeStart"), - @DynamoDbAttribute(mappedBy = "xend", attributeName = "DateRangeEnd")}) - public DateRange getVal() { - return super.getVal(); - } - - public void setVal(final DateRange val) { - super.setVal(val); - } - }; - getTable(obj); - } - - /** - * Test mappings. - */ - @Test - public void testFlattenedMultipleSameType() { - final Object obj = new AutoKeyAndVal() { - private Currency other; - - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "amount", attributeName = "firstAmount"), - @DynamoDbAttribute(mappedBy = "unit", attributeName = "firstUnit")}) - public Currency getVal() { - return super.getVal(); - } - - public void setVal(final Currency val) { - super.setVal(val); - } - - @DynamoDbFlattened(attributes = { - @DynamoDbAttribute(mappedBy = "amount", attributeName = "secondAmount"), - @DynamoDbAttribute(mappedBy = "unit", attributeName = "secondUnit")}) - public Currency getOther() { - return this.other; - } - - public void setOther(final Currency other) { - this.other = other; - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - assertEquals(5, model.fields().size()); - assertEquals("firstAmount", model.field("firstAmount").name()); - assertEquals("firstUnit", model.field("firstUnit").name()); - assertEquals("secondAmount", model.field("secondAmount").name()); - assertEquals("secondUnit", model.field("secondUnit").name()); - } - - /** - * Test mappings. - */ - @Test - public void testTableAndDocument() { - models.getTable(TableAndDocument.class); - } - - /** - * Test mappings. - */ - @Test - public void testInheritedWithNoTable() { - final Object obj = new KeyAndVal() { - @DynamoDbHashKey(attributeName = "hk") - public String getKey() { - return super.getKey(); - } - - public void setKey(String key) { - super.setKey(key); - } - - @DynamoDbAttribute(attributeName = "value") - public String getVal() { - return super.getVal(); - } - - public void setVal(String val) { - super.setVal(val); - } - }; - final DynamoDbMapperTableModel model = getTable(obj); - - final DynamoDbMapperFieldModel key = model.field("hk"); - assertNotNull(key); - assertEquals(KeyType.HASH, key.keyType()); - assertEquals(DynamoDbAttributeType.S, key.attributeType()); - - final DynamoDbMapperFieldModel val = model.field("value"); - assertNotNull(val); - assertEquals(DynamoDbAttributeType.S, val.attributeType()); - } - - /** - * Test mappings to make sure the bridge method is ruled out. - */ - @Test - public void testFindRelevantGettersWithBridgeMethod() { - final DynamoDbMapperTableModel model = models.getTable(SubClass.class); - assertEquals("only two getter should be returned", 2, model.fields().size()); - assertEquals("return type should be Integer rather than Object", DynamoDbAttributeType.N, model.field("t").attributeType()); - } - - /** - * Test mappings. - */ - @Test - public void testNonMappedInheritedProperties() { - final DynamoDbMapperTableModel model = models.getTable(NonMappedInheritedProperties.class); - assertEquals(2, model.fields().size()); - assertNotNull(model.field("doUse")); - } - - /** - * Test mappings. - */ - @Test - public void testInheritedProperties() { - final DynamoDbMapperTableModel model1 = models.getTable(BaseTablePojo.class); - assertEquals(3, model1.fields().size()); - assertNotNull(model1.field("hashKeyOnField")); - assertNotNull(model1.field("rangeKeyOnGetter")); - final DynamoDbMapperTableModel model2 = models.getTable(TablePojoSubclass.class); - assertEquals(4, model2.fields().size()); - assertNotNull(model2.field("hashKeyOnField")); - assertNotNull(model2.field("rangeKeyOnGetter")); - } - - /** - * Test mappings. - */ - @Test - public void testPojoWithGetterAnnotations() { - PojoAsserts.assertAll(models.getTable(PojoWithGetterAnnotations.class)); - } - - /** - * Test mappings. - */ - @Test - public void testPojoWithFieldAnnotations() { - PojoAsserts.assertAll(models.getTable(PojoWithFieldAnnotations.class)); - } - - /** - * Test mappings. - */ - @Test - public void testPojoWithMixedAnnotations() { - PojoAsserts.assertAll(models.getTable(PojoWithMixedAnnotations.class)); - } - - /** - * Pojo field assertions. - */ - private static enum PojoAsserts { - hashKey(KeyType.HASH, null), - rangeKey(KeyType.RANGE, DynamoDbAutoGenerateStrategy.CREATE), - indexHashKey(null, null), - indexRangeKey(null, null), - actualAttrName(null, null), - versionedAttr(null, DynamoDbAutoGenerateStrategy.ALWAYS), - marshallingAttr(null, null); - private final DynamoDbAutoGenerateStrategy generateStrategy; - private final KeyType keyType; - - private PojoAsserts(final KeyType keyType, final DynamoDbAutoGenerateStrategy generateStrategy) { - this.generateStrategy = generateStrategy; - this.keyType = keyType; - } - - public static void assertAll(final DynamoDbMapperTableModel model) { - for (final PojoAsserts asserts : PojoAsserts.values()) { - final DynamoDbMapperFieldModel field = model.field(asserts.name()); - assertNotNull(field); - assertFieldKeyType(asserts.keyType, field, model); - assertEquals(asserts.generateStrategy, field.getGenerateStrategy()); - assertEquals(0, field.localSecondaryIndexNames().size()); - } - assertEquals(PojoAsserts.values().length, model.fields().size()); - } - } - - @DynamoDbDocument - @DynamoDbTable(tableName = "") - public static class TableAndDocument extends AutoKeyAndVal { - public String getVal() { - return super.getVal(); - } - - public void setVal(final String val) { - super.setVal(val); - } - } - - @DynamoDbTable(tableName = "") - private abstract static class SuperGenericClass { - private String id; - - @DynamoDbHashKey - public final String getId() { - return this.id; - } - - public final void setId(String id) { - this.id = id; - } - - public abstract T getT(); - - public abstract void setT(T t); - } - - @DynamoDbTable(tableName = "GenericString") - private static class SubClass extends SuperGenericClass { - private Integer t; - - @Override - public Integer getT() { - return t; - } - - @Override - public void setT(Integer t) { - this.t = t; - } - } - - @DynamoDbTable(tableName = "table") - private static class BaseTablePojo { - @DynamoDbHashKey - private String hashKeyOnField; - private String rangeKeyOnGetter; - private String attrNoAnnotation; - @DynamoDbIgnore - private String ignoredAttr; - - public String getHashKeyOnField() { - return hashKeyOnField; - } - - public void setHashKeyOnField(String hashKeyOnField) { - this.hashKeyOnField = hashKeyOnField; - } - - @DynamoDbRangeKey - public String getRangeKeyOnGetter() { - return rangeKeyOnGetter; - } - - public void setRangeKeyOnGetter(String rangeKeyOnGetter) { - this.rangeKeyOnGetter = rangeKeyOnGetter; - } - - public String getAttrNoAnnotation() { - return attrNoAnnotation; - } - - public void setAttrNoAnnotation(String attrNoAnnotation) { - this.attrNoAnnotation = attrNoAnnotation; - } - - public String getIgnoredAttr() { - return ignoredAttr; - } - - public void setIgnoredAttr(String ignoredAttr) { - this.ignoredAttr = ignoredAttr; - } - } - - @DynamoDbTable(tableName = "table") - private static class TablePojoSubclass extends BaseTablePojo { - private String ignoredAttr; - - @Override - public String getIgnoredAttr() { - return ignoredAttr; - } - - @Override - public void setIgnoredAttr(String ignoredAttr) { - this.ignoredAttr = ignoredAttr; - } - } - - /** - * A POJO model that uses getter annotations. - */ - @DynamoDbTable(tableName = "table") - private static class PojoWithGetterAnnotations { - private String hashKey; - private String rangeKey; - private String indexHashKey; - private String indexRangeKey; - private String annotatedAttr; - private Long versionedAttr; - private String marshallingAttr; - private String ignoredAttr; - - @DynamoDbHashKey - public String getHashKey() { - return hashKey; - } - - public void setHashKey(String hashKey) { - this.hashKey = hashKey; - } - - @DynamoDbRangeKey - @DynamoDbAutoGeneratedKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - @DynamoDbIndexHashKey(globalSecondaryIndexName = "index") - public String getIndexHashKey() { - return indexHashKey; - } - - public void setIndexHashKey(String indexHashKey) { - this.indexHashKey = indexHashKey; - } - - @DynamoDbIndexRangeKey(globalSecondaryIndexName = "index") - public String getIndexRangeKey() { - return indexRangeKey; - } - - public void setIndexRangeKey(String indexRangeKey) { - this.indexRangeKey = indexRangeKey; - } - - @DynamoDbAttribute(attributeName = "actualAttrName") - public String getAnnotatedAttr() { - return annotatedAttr; - } - - public void setAnnotatedAttr(String annotatedAttr) { - this.annotatedAttr = annotatedAttr; - } - - @DynamoDbVersionAttribute - public Long getVersionedAttr() { - return versionedAttr; - } - - public void setVersionedAttr(Long versionedAttr) { - this.versionedAttr = versionedAttr; - } - - @DynamoDbTypeConverted(converter = RandomUuidMarshaller.class) - public String getMarshallingAttr() { - return marshallingAttr; - } - - public void setMarshallingAttr(String marshallingAttr) { - this.marshallingAttr = marshallingAttr; - } - - @DynamoDbIgnore - public String getIgnoredAttr() { - return ignoredAttr; - } - - public void setIgnoredAttr(String ignoredAttr) { - this.ignoredAttr = ignoredAttr; - } - } - - /** - * The same model as defined in PojoWithGetterAnnotations, but uses field - * annotations instead. - */ - @DynamoDbTable(tableName = "table") - private static class PojoWithFieldAnnotations { - @DynamoDbHashKey - private String hashKey; - @DynamoDbRangeKey - @DynamoDbAutoGeneratedKey - private String rangeKey; - @DynamoDbIndexHashKey(globalSecondaryIndexName = "index") - private String indexHashKey; - @DynamoDbIndexRangeKey(globalSecondaryIndexName = "index") - private String indexRangeKey; - @DynamoDbAttribute(attributeName = "actualAttrName") - private String annotatedAttr; - @DynamoDbVersionAttribute - private Long versionedAttr; - @DynamoDbTypeConverted(converter = RandomUuidMarshaller.class) - private String marshallingAttr; - @DynamoDbIgnore - private String ignoredAttr; - - public String getHashKey() { - return hashKey; - } - - public void setHashKey(String hashKey) { - this.hashKey = hashKey; - } - - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - public String getIndexHashKey() { - return indexHashKey; - } - - public void setIndexHashKey(String indexHashKey) { - this.indexHashKey = indexHashKey; - } - - public String getIndexRangeKey() { - return indexRangeKey; - } - - public void setIndexRangeKey(String indexRangeKey) { - this.indexRangeKey = indexRangeKey; - } - - public String getAnnotatedAttr() { - return annotatedAttr; - } - - public void setAnnotatedAttr(String annotatedAttr) { - this.annotatedAttr = annotatedAttr; - } - - public Long getVersionedAttr() { - return versionedAttr; - } - - public void setVersionedAttr(Long versionedAttr) { - this.versionedAttr = versionedAttr; - } - - public String getMarshallingAttr() { - return marshallingAttr; - } - - public void setMarshallingAttr(String marshallingAttr) { - this.marshallingAttr = marshallingAttr; - } - - public String getIgnoredAttr() { - return ignoredAttr; - } - - public void setIgnoredAttr(String ignoredAttr) { - this.ignoredAttr = ignoredAttr; - } - } - - /** - * The same model as defined in PojoWithGetterAnnotations, but uses both getter and field - * annotations. - */ - @DynamoDbTable(tableName = "table") - private static class PojoWithMixedAnnotations { - @DynamoDbHashKey - private String hashKey; - private String rangeKey; - @DynamoDbIndexHashKey(globalSecondaryIndexName = "index") - private String indexHashKey; - private String indexRangeKey; - @DynamoDbAttribute(attributeName = "actualAttrName") - private String annotatedAttr; - private Long versionedAttr; - @DynamoDbTypeConverted(converter = RandomUuidMarshaller.class) - private String marshallingAttr; - private String ignoredAttr; - - public String getHashKey() { - return hashKey; - } - - public void setHashKey(String hashKey) { - this.hashKey = hashKey; - } - - @DynamoDbRangeKey - @DynamoDbAutoGeneratedKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - public String getIndexHashKey() { - return indexHashKey; - } - - public void setIndexHashKey(String indexHashKey) { - this.indexHashKey = indexHashKey; - } - - @DynamoDbIndexRangeKey(globalSecondaryIndexName = "index") - public String getIndexRangeKey() { - return indexRangeKey; - } - - public void setIndexRangeKey(String indexRangeKey) { - this.indexRangeKey = indexRangeKey; - } - - public String getAnnotatedAttr() { - return annotatedAttr; - } - - public void setAnnotatedAttr(String annotatedAttr) { - this.annotatedAttr = annotatedAttr; - } - - @DynamoDbVersionAttribute - public Long getVersionedAttr() { - return versionedAttr; - } - - public void setVersionedAttr(Long versionedAttr) { - this.versionedAttr = versionedAttr; - } - - public String getMarshallingAttr() { - return marshallingAttr; - } - - public void setMarshallingAttr(String marshallingAttr) { - this.marshallingAttr = marshallingAttr; - } - - @DynamoDbIgnore - public String getIgnoredAttr() { - return ignoredAttr; - } - - public void setIgnoredAttr(String ignoredAttr) { - this.ignoredAttr = ignoredAttr; - } - } - - public abstract class AbstractNonMappedInheritedProperties { - private String doNotUse; - - public String getDoNotUse() { - return this.doNotUse; - } - - public void setDoNotUse(final String doNotUse) { - this.doNotUse = doNotUse; - } - } - - @DynamoDbTable(tableName = "aws-java-sdk-test") - public class NonMappedInheritedProperties extends AbstractNonMappedInheritedProperties { - private String id; - private String doUse; - - @DynamoDbHashKey - public final String getId() { - return this.id; - } - - public final void setId(String id) { - this.id = id; - } - - public String getDoUse() { - return this.doUse; - } - - public void setDoUse(final String doUse) { - this.doUse = doUse; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV1Test.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV1Test.java deleted file mode 100644 index 94b53ba96582..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV1Test.java +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import java.lang.reflect.Method; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.LinkedHashSet; -import java.util.TreeSet; -import java.util.UUID; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.pojos.SubClass; -import software.amazon.awssdk.services.dynamodb.pojos.TestClass; -import software.amazon.awssdk.services.dynamodb.pojos.UnannotatedSubClass; - -public class StandardModelFactoriesV1Test { - - protected static final DynamoDbMapperConfig CONFIG = new DynamoDbMapperConfig.Builder() - .withTypeConverterFactory(DynamoDbMapperConfig.DEFAULT.getTypeConverterFactory()) - .withConversionSchema(ConversionSchemas.V1) - .build(); - - private static final DynamoDbMapperModelFactory factory = StandardModelFactories.of(S3Link.Factory.of(null)); - private static final DynamoDbMapperModelFactory.TableFactory models = factory.getTableFactory(CONFIG); - - protected AttributeValue convert(Class clazz, Method getter, Object value) { - final StandardAnnotationMaps.FieldMap map = StandardAnnotationMaps.of(getter, null); - return models.getTable(clazz).field(map.attributeName()).convert(value); - } - - @Test - public void testBoolean() { - assertEquals("1", convert("getBoolean", true).n()); - assertEquals("0", convert("getBoolean", false).n()); - assertEquals("1", convert("getBoxedBoolean", true).n()); - assertEquals("0", convert("getBoxedBoolean", false).n()); - - assertEquals(true, convert("getNativeBoolean", true).bool()); - assertEquals(false, convert("getNativeBoolean", false).bool()); - } - - @Test - public void testString() { - assertEquals("abc", convert("getString", "abc").s()); - - assertEquals(RandomUuidMarshaller.randomUUID, - convert("getCustomString", "abc").s()); - } - - @Test - public void testUuid() { - UUID uuid = UUID.randomUUID(); - assertEquals(uuid.toString(), convert("getUuid", uuid).s()); - } - - @Test - public void testDate() { - assertEquals("1970-01-01T00:00:00.001Z", - convert("getDate", new Date(1)).s()); - - Calendar c = GregorianCalendar.getInstance(); - c.setTimeInMillis(1); - - assertEquals("1970-01-01T00:00:00.001Z", - convert("getCalendar", c).s()); - } - - @Test - public void testNumbers() { - assertEquals("0", convert("getByte", (byte) 0).n()); - assertEquals("1", convert("getByte", (byte) 1).n()); - assertEquals("0", convert("getBoxedByte", (byte) 0).n()); - assertEquals("1", convert("getBoxedByte", (byte) 1).n()); - - assertEquals("0", convert("getShort", (short) 0).n()); - assertEquals("1", convert("getShort", (short) 1).n()); - assertEquals("0", convert("getBoxedShort", (short) 0).n()); - assertEquals("1", convert("getBoxedShort", (short) 1).n()); - - assertEquals("0", convert("getInt", 0).n()); - assertEquals("1", convert("getInt", 1).n()); - assertEquals("0", convert("getBoxedInt", 0).n()); - assertEquals("1", convert("getBoxedInt", 1).n()); - - assertEquals("0", convert("getLong", 0l).n()); - assertEquals("1", convert("getLong", 1l).n()); - assertEquals("0", convert("getBoxedLong", 0l).n()); - assertEquals("1", convert("getBoxedLong", 1l).n()); - - assertEquals("0", convert("getBigInt", BigInteger.ZERO).n()); - assertEquals("1", convert("getBigInt", BigInteger.ONE).n()); - - assertEquals("0.0", convert("getFloat", 0f).n()); - assertEquals("1.0", convert("getFloat", 1f).n()); - assertEquals("0.0", convert("getBoxedFloat", 0f).n()); - assertEquals("1.0", convert("getBoxedFloat", 1f).n()); - - assertEquals("0.0", convert("getDouble", 0d).n()); - assertEquals("1.0", convert("getDouble", 1d).n()); - assertEquals("0.0", convert("getBoxedDouble", 0d).n()); - assertEquals("1.0", convert("getBoxedDouble", 1d).n()); - - assertEquals("0", convert("getBigDecimal", BigDecimal.ZERO).n()); - assertEquals("1", convert("getBigDecimal", BigDecimal.ONE).n()); - } - - @Test - public void testBinary() { - SdkBytes value = SdkBytes.fromUtf8String("value"); - assertEquals(value, convert("getByteArray", value.asByteArray()).b()); - assertEquals(value, convert("getByteBuffer", value.asByteBuffer()).b()); - } - - @Test - public void testBooleanSet() { - assertEquals(Collections.singletonList("1"), - convert("getBooleanSet", Collections.singleton(true)).ns()); - - assertEquals(Collections.singletonList("0"), - convert("getBooleanSet", Collections.singleton(false)).ns()); - - assertEquals(Arrays.asList("0", "1"), - convert("getBooleanSet", new TreeSet() {{ - add(true); - add(false); - }}).ns()); - } - - @Test - public void testStringSet() { - assertEquals(Collections.singletonList("a"), - convert("getStringSet", Collections.singleton("a")).ss()); - assertEquals(Collections.singletonList("b"), - convert("getStringSet", Collections.singleton("b")).ss()); - - assertEquals(Arrays.asList("a", "b", "c"), - convert("getStringSet", new TreeSet() {{ - add("a"); - add("b"); - add("c"); - }}).ss()); - } - - @Test - public void testUuidSet() { - final UUID one = UUID.randomUUID(); - final UUID two = UUID.randomUUID(); - final UUID three = UUID.randomUUID(); - - assertEquals(Collections.singletonList(one.toString()), - convert("getUuidSet", Collections.singleton(one)).ss()); - - assertEquals(Collections.singletonList(two.toString()), - convert("getUuidSet", Collections.singleton(two)).ss()); - - assertEquals( - Arrays.asList( - one.toString(), - two.toString(), - three.toString()), - convert("getUuidSet", new LinkedHashSet() {{ - add(one); - add(two); - add(three); - }}).ss()); - } - - @Test - public void testDateSet() { - assertEquals(Collections.singletonList("1970-01-01T00:00:00.001Z"), - convert("getDateSet", Collections.singleton(new Date(1))) - .ss()); - - Calendar c = GregorianCalendar.getInstance(); - c.setTimeInMillis(1); - - assertEquals(Collections.singletonList("1970-01-01T00:00:00.001Z"), - convert("getCalendarSet", Collections.singleton(c)) - .ss()); - } - - @Test - public void testNumberSet() { - assertEquals(Collections.singletonList("0"), - convert("getByteSet", Collections.singleton((byte) 0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getShortSet", Collections.singleton((short) 0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getIntSet", Collections.singleton(0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getLongSet", Collections.singleton(0l)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getBigIntegerSet", Collections.singleton(BigInteger.ZERO)) - .ns()); - assertEquals(Collections.singletonList("0.0"), - convert("getFloatSet", Collections.singleton(0f)).ns()); - assertEquals(Collections.singletonList("0.0"), - convert("getDoubleSet", Collections.singleton(0d)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getBigDecimalSet", Collections.singleton(BigDecimal.ZERO)) - .ns()); - - assertEquals(Arrays.asList("0", "1", "2"), - convert("getLongSet", new TreeSet() {{ - add(0); - add(1); - add(2); - }}).ns()); - } - - @Test - public void testBinarySet() { - SdkBytes test = SdkBytes.fromUtf8String("test"); - SdkBytes test2 = SdkBytes.fromUtf8String("test2"); - - assertEquals(Collections.singletonList(test), - convert("getByteArraySet", Collections.singleton(test.asByteArray())).bs()); - - assertEquals(Collections.singletonList(test), - convert("getByteBufferSet", Collections.singleton(test.asByteBuffer())).bs()); - - assertEquals(Arrays.asList(test, test2), - convert("getByteBufferSet", new TreeSet() {{ - add(test.asByteBuffer()); - add(test2.asByteBuffer()); - }}).bs()); - } - - @Test - public void testObjectSet() { - Object o = new Object() { - @Override - public String toString() { - return "hello"; - } - }; - - assertEquals(Collections.singletonList("hello"), - convert("getObjectSet", Collections.singleton(o)).ss()); - } - - @Test - public void testList() { - try { - convert("getList", Arrays.asList("a", "b", "c")); - Assert.fail("Expected DynamoDBMappingException"); - } catch (DynamoDbMappingException e) { - // Ignored or expected. - } - } - - @Test - public void testMap() { - try { - convert("getMap", Collections.singletonMap("a", "b")); - Assert.fail("Expected DynamoDBMappingException"); - } catch (DynamoDbMappingException e) { - // Ignored or expected. - } - } - - @Test - public void testObject() { - try { - convert("getObject", new SubClass()); - Assert.fail("Expected DynamoDBMappingException"); - } catch (DynamoDbMappingException e) { - // Ignored or expected. - } - } - - @Test - public void testUnannotatedObject() throws Exception { - try { - convert(UnannotatedSubClass.class, UnannotatedSubClass.class.getMethod("getChild"), - new UnannotatedSubClass()); - - Assert.fail("Expected DynamoDBMappingException"); - } catch (DynamoDbMappingException e) { - // Ignored or expected. - } - } - - @Test - public void testS3Link() { - S3ClientCache cache = new S3ClientCache((AwsCredentialsProvider) null); - S3Link link = new S3Link(cache, "bucket", "key"); - - assertEquals("{\"s3\":{" - + "\"bucket\":\"bucket\"," - + "\"key\":\"key\"," - + "\"region\":null}}", - convert("getS3Link", link).s()); - } - - private AttributeValue convert(String getter, Object value) { - try { - return convert(TestClass.class, TestClass.class.getMethod(getter), value); - - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2CompatibleTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2CompatibleTest.java deleted file mode 100644 index b967a91f0e31..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2CompatibleTest.java +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import java.lang.reflect.Method; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.LinkedHashSet; -import java.util.Set; -import java.util.TimeZone; -import java.util.TreeSet; -import java.util.UUID; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.pojos.SubClass; -import software.amazon.awssdk.services.dynamodb.pojos.TestClass; -import software.amazon.awssdk.services.dynamodb.pojos.UnannotatedSubClass; - -public class StandardModelFactoriesV2CompatibleTest { - - protected static final DynamoDbMapperConfig CONFIG = new DynamoDbMapperConfig.Builder() - .withTypeConverterFactory(DynamoDbMapperConfig.DEFAULT.getTypeConverterFactory()) - .withConversionSchema(ConversionSchemas.V2_COMPATIBLE) - .build(); - - private static final DynamoDbMapperModelFactory factory = StandardModelFactories.of(S3Link.Factory.of(null)); - private static final DynamoDbMapperModelFactory.TableFactory models = factory.getTableFactory(CONFIG); - - protected AttributeValue convert(Class clazz, Method getter, Object value) { - final StandardAnnotationMaps.FieldMap map = StandardAnnotationMaps.of(getter, null); - return models.getTable(clazz).field(map.attributeName()).convert(value); - } - - @Test - public void testBoolean() { - assertEquals("1", convert("getBoolean", true).n()); - assertEquals("0", convert("getBoolean", false).n()); - assertEquals("1", convert("getBoxedBoolean", true).n()); - assertEquals("0", convert("getBoxedBoolean", false).n()); - - assertEquals(true, convert("getNativeBoolean", true).bool()); - assertEquals(false, convert("getNativeBoolean", false).bool()); - } - - @Test - public void testString() { - assertEquals("abc", convert("getString", "abc").s()); - - assertEquals(RandomUuidMarshaller.randomUUID, - convert("getCustomString", "abc").s()); - } - - @Test - public void testUuid() { - UUID uuid = UUID.randomUUID(); - assertEquals(uuid.toString(), convert("getUuid", uuid).s()); - } - - @Test - public void testDate() { - assertEquals("1970-01-01T00:00:00.001Z", - convert("getDate", new Date(1)).s()); - } - - @Test - public void testCalendar() { - Calendar c = GregorianCalendar.getInstance(); - c.setTimeInMillis(0); - c.setTimeZone(TimeZone.getTimeZone("Z")); - - assertEquals("1970-01-01T00:00:00Z", - convert("getCalendar", c).s()); - } - - @Test - public void testNumbers() { - assertEquals("0", convert("getByte", (byte) 0).n()); - assertEquals("1", convert("getByte", (byte) 1).n()); - assertEquals("0", convert("getBoxedByte", (byte) 0).n()); - assertEquals("1", convert("getBoxedByte", (byte) 1).n()); - - assertEquals("0", convert("getShort", (short) 0).n()); - assertEquals("1", convert("getShort", (short) 1).n()); - assertEquals("0", convert("getBoxedShort", (short) 0).n()); - assertEquals("1", convert("getBoxedShort", (short) 1).n()); - - assertEquals("0", convert("getInt", 0).n()); - assertEquals("1", convert("getInt", 1).n()); - assertEquals("0", convert("getBoxedInt", 0).n()); - assertEquals("1", convert("getBoxedInt", 1).n()); - - assertEquals("0", convert("getLong", 0l).n()); - assertEquals("1", convert("getLong", 1l).n()); - assertEquals("0", convert("getBoxedLong", 0l).n()); - assertEquals("1", convert("getBoxedLong", 1l).n()); - - assertEquals("0", convert("getBigInt", BigInteger.ZERO).n()); - assertEquals("1", convert("getBigInt", BigInteger.ONE).n()); - - assertEquals("0.0", convert("getFloat", 0f).n()); - assertEquals("1.0", convert("getFloat", 1f).n()); - assertEquals("0.0", convert("getBoxedFloat", 0f).n()); - assertEquals("1.0", convert("getBoxedFloat", 1f).n()); - - assertEquals("0.0", convert("getDouble", 0d).n()); - assertEquals("1.0", convert("getDouble", 1d).n()); - assertEquals("0.0", convert("getBoxedDouble", 0d).n()); - assertEquals("1.0", convert("getBoxedDouble", 1d).n()); - - assertEquals("0", convert("getBigDecimal", BigDecimal.ZERO).n()); - assertEquals("1", convert("getBigDecimal", BigDecimal.ONE).n()); - } - - @Test - public void testBinary() { - SdkBytes value = SdkBytes.fromUtf8String("value"); - assertEquals(value, convert("getByteArray", value.asByteArray()).b()); - assertEquals(value, convert("getByteBuffer", value.asByteBuffer()).b()); - } - - @Test - public void testBooleanSet() { - assertEquals(Collections.singletonList("1"), - convert("getBooleanSet", Collections.singleton(true)).ns()); - - assertEquals(Collections.singletonList("0"), - convert("getBooleanSet", Collections.singleton(false)).ns()); - - assertEquals(Arrays.asList("0", "1"), - convert("getBooleanSet", new TreeSet() {{ - add(true); - add(false); - }}).ns()); - } - - @Test - public void testStringSet() { - assertEquals(Collections.singletonList("a"), - convert("getStringSet", Collections.singleton("a")).ss()); - assertEquals(Collections.singletonList("b"), - convert("getStringSet", Collections.singleton("b")).ss()); - - assertEquals(Arrays.asList("a", "b", "c"), - convert("getStringSet", new TreeSet() {{ - add("a"); - add("b"); - add("c"); - }}).ss()); - } - - @Test - public void testUuidSet() { - final UUID one = UUID.randomUUID(); - final UUID two = UUID.randomUUID(); - final UUID three = UUID.randomUUID(); - - assertEquals(Collections.singletonList(one.toString()), - convert("getUuidSet", Collections.singleton(one)).ss()); - - assertEquals(Collections.singletonList(two.toString()), - convert("getUuidSet", Collections.singleton(two)).ss()); - - assertEquals( - Arrays.asList( - one.toString(), - two.toString(), - three.toString()), - convert("getUuidSet", new LinkedHashSet() {{ - add(one); - add(two); - add(three); - }}).ss()); - } - - @Test - public void testDateSet() { - assertEquals(Collections.singletonList("1970-01-01T00:00:00.001Z"), - convert("getDateSet", Collections.singleton(new Date(1))) - .ss()); - - Calendar c = GregorianCalendar.getInstance(); - c.setTimeInMillis(1); - - assertEquals(Collections.singletonList("1970-01-01T00:00:00.001Z"), - convert("getCalendarSet", Collections.singleton(c)) - .ss()); - } - - @Test - public void testNumberSet() { - assertEquals(Collections.singletonList("0"), - convert("getByteSet", Collections.singleton((byte) 0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getShortSet", Collections.singleton((short) 0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getIntSet", Collections.singleton(0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getLongSet", Collections.singleton(0l)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getBigIntegerSet", Collections.singleton(BigInteger.ZERO)) - .ns()); - assertEquals(Collections.singletonList("0.0"), - convert("getFloatSet", Collections.singleton(0f)).ns()); - assertEquals(Collections.singletonList("0.0"), - convert("getDoubleSet", Collections.singleton(0d)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getBigDecimalSet", Collections.singleton(BigDecimal.ZERO)) - .ns()); - - assertEquals(Arrays.asList("0", "1", "2"), - convert("getLongSet", new TreeSet() {{ - add(0); - add(1); - add(2); - }}).ns()); - } - - @Test - public void testBinarySet() { - SdkBytes test = SdkBytes.fromUtf8String("test"); - SdkBytes test2 = SdkBytes.fromUtf8String("test2"); - - assertEquals(Collections.singletonList(test), - convert("getByteArraySet", Collections.singleton(test.asByteArray())).bs()); - - assertEquals(Collections.singletonList(test), - convert("getByteBufferSet", Collections.singleton(test.asByteBuffer())).bs()); - - assertEquals(Arrays.asList(test, test2), - convert("getByteBufferSet", new TreeSet() {{ - add(test.asByteBuffer()); - add(test2.asByteBuffer()); - }}).bs()); - } - - @Test - public void testObjectSet() { - Object o = new Object() { - @Override - public String toString() { - return "hello"; - } - }; - - assertEquals(Collections.singletonList("hello"), - convert("getObjectSet", Collections.singleton(o)).ss()); - } - - @Test - public void testList() { - assertEquals(Arrays.asList( - AttributeValue.builder().s("a").build(), - AttributeValue.builder().s("b").build(), - AttributeValue.builder().s("c").build()), - convert("getList", Arrays.asList("a", "b", "c")).l()); - - assertEquals(Arrays.asList(AttributeValue.builder().nul(true).build()), - convert("getList", Collections.singletonList(null)).l()); - } - - @Test - public void testSetList() { - assertEquals(Arrays.asList( - AttributeValue.builder().ss("a").build(), - AttributeValue.builder().ss("b").build(), - AttributeValue.builder().ss("c").build()), - convert("getSetList", Arrays.asList( - Collections.singleton("a"), - Collections.singleton("b"), - Collections.singleton("c"))).l()); - } - - @Test - public void testMap() { - assertEquals(new HashMap() {{ - put("a", AttributeValue.builder().s("b").build()); - put("c", AttributeValue.builder().s("d").build()); - put("e", AttributeValue.builder().s("f").build()); - }}, - convert("getMap", new HashMap() {{ - put("a", "b"); - put("c", "d"); - put("e", "f"); - }}).m()); - - assertEquals(Collections.singletonMap("a", AttributeValue.builder().nul(true).build()), - convert("getMap", Collections.singletonMap("a", null)).m()); - } - - @Test - public void testSetMap() { - assertEquals(new HashMap() {{ - put("a", AttributeValue.builder().ss("a", "b").build()); - }}, - convert("getSetMap", new HashMap>() {{ - put("a", new TreeSet(Arrays.asList("a", "b"))); - }}).m()); - - assertEquals(new HashMap() {{ - put("a", AttributeValue.builder().ss("a").build()); - put("b", AttributeValue.builder().nul(true).build()); - }}, - convert("getSetMap", new HashMap>() {{ - put("a", new TreeSet(Arrays.asList("a"))); - put("b", null); - }}).m()); - } - - @Test - public void testObject() { - assertEquals(new HashMap() {{ - put("name", AttributeValue.builder().s("name").build()); - put("value", AttributeValue.builder().n("123").build()); - }}, - convert("getObject", new SubClass()).m()); - } - - @Test - public void testUnannotatedObject() throws Exception { - try { - convert(UnannotatedSubClass.class, UnannotatedSubClass.class.getMethod("getChild"), - new UnannotatedSubClass()); - - Assert.fail("Expected DynamoDBMappingException"); - } catch (DynamoDbMappingException e) { - // Ignored or expected. - } - } - - @Test - public void testS3Link() { - S3ClientCache cache = new S3ClientCache((AwsCredentialsProvider) null); - S3Link link = new S3Link(cache, "bucket", "key"); - - assertEquals("{\"s3\":{" - + "\"bucket\":\"bucket\"," - + "\"key\":\"key\"," - + "\"region\":null}}", - convert("getS3Link", link).s()); - } - - private AttributeValue convert(String getter, Object value) { - try { - - return convert(TestClass.class, TestClass.class.getMethod(getter), value); - - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2Test.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2Test.java deleted file mode 100644 index e0d60beb151f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2Test.java +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import java.lang.reflect.Method; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import org.junit.Assert; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.pojos.SubClass; -import software.amazon.awssdk.services.dynamodb.pojos.TestClass; -import software.amazon.awssdk.services.dynamodb.pojos.UnannotatedSubClass; - -public class StandardModelFactoriesV2Test { - - protected static final DynamoDbMapperConfig CONFIG = new DynamoDbMapperConfig.Builder() - .withTypeConverterFactory(DynamoDbMapperConfig.DEFAULT.getTypeConverterFactory()) - .withConversionSchema(ConversionSchemas.V2) - .build(); - - private static final DynamoDbMapperModelFactory factory = StandardModelFactories.of(S3Link.Factory.of(null)); - private static final DynamoDbMapperModelFactory.TableFactory models = factory.getTableFactory(CONFIG); - - protected AttributeValue convert(Class clazz, Method getter, Object value) { - final StandardAnnotationMaps.FieldMap map = StandardAnnotationMaps.of(getter, null); - return models.getTable(clazz).field(map.attributeName()).convert(value); - } - - @Test - public void testBoolean() { - // These are all native booleans by default in the v2 schema - assertEquals(true, convert("getBoolean", true).bool()); - assertEquals(false, convert("getBoolean", false).bool()); - assertEquals(true, convert("getBoxedBoolean", true).bool()); - assertEquals(false, convert("getBoxedBoolean", false).bool()); - assertEquals(true, convert("getNativeBoolean", true).bool()); - assertEquals(false, convert("getNativeBoolean", false).bool()); - } - - @Test - public void testString() { - assertEquals("abc", convert("getString", "abc").s()); - - assertEquals(RandomUuidMarshaller.randomUUID, - convert("getCustomString", "abc").s()); - } - - @Test - public void testUuid() { - UUID uuid = UUID.randomUUID(); - assertEquals(uuid.toString(), convert("getUuid", uuid).s()); - } - - @Test - public void testDate() { - assertEquals("1970-01-01T00:00:00Z", - convert("getDate", new Date(0)).s()); - - Calendar c = GregorianCalendar.getInstance(); - c.setTimeInMillis(0); - - assertEquals("1970-01-01T00:00:00Z", - convert("getCalendar", c).s()); - } - - @Test - public void testNumbers() { - assertEquals("0", convert("getByte", (byte) 0).n()); - assertEquals("1", convert("getByte", (byte) 1).n()); - assertEquals("0", convert("getBoxedByte", (byte) 0).n()); - assertEquals("1", convert("getBoxedByte", (byte) 1).n()); - - assertEquals("0", convert("getShort", (short) 0).n()); - assertEquals("1", convert("getShort", (short) 1).n()); - assertEquals("0", convert("getBoxedShort", (short) 0).n()); - assertEquals("1", convert("getBoxedShort", (short) 1).n()); - - assertEquals("0", convert("getInt", 0).n()); - assertEquals("1", convert("getInt", 1).n()); - assertEquals("0", convert("getBoxedInt", 0).n()); - assertEquals("1", convert("getBoxedInt", 1).n()); - - assertEquals("0", convert("getLong", 0l).n()); - assertEquals("1", convert("getLong", 1l).n()); - assertEquals("0", convert("getBoxedLong", 0l).n()); - assertEquals("1", convert("getBoxedLong", 1l).n()); - - assertEquals("0", convert("getBigInt", BigInteger.ZERO).n()); - assertEquals("1", convert("getBigInt", BigInteger.ONE).n()); - - assertEquals("0.0", convert("getFloat", 0f).n()); - assertEquals("1.0", convert("getFloat", 1f).n()); - assertEquals("0.0", convert("getBoxedFloat", 0f).n()); - assertEquals("1.0", convert("getBoxedFloat", 1f).n()); - - assertEquals("0.0", convert("getDouble", 0d).n()); - assertEquals("1.0", convert("getDouble", 1d).n()); - assertEquals("0.0", convert("getBoxedDouble", 0d).n()); - assertEquals("1.0", convert("getBoxedDouble", 1d).n()); - - assertEquals("0", convert("getBigDecimal", BigDecimal.ZERO).n()); - assertEquals("1", convert("getBigDecimal", BigDecimal.ONE).n()); - } - - @Test - public void testBinary() { - SdkBytes value = SdkBytes.fromUtf8String("value"); - assertEquals(value, convert("getByteArray", value.asByteArray()).b()); - assertEquals(value, convert("getByteBuffer", value.asByteBuffer()).b()); - } - - @Test - public void testBooleanSet() { - // Set (which is silly but technically valid) gets mapped to - // a List of Booleans now via the ObjectSetToListMarshaller. - AttributeValue value = - convert("getBooleanSet", Collections.singleton(true)); - - Assert.assertEquals(1, value.l().size()); - Assert.assertEquals(true, value.l().get(0).bool()); - } - - @Test - public void testStringSet() { - assertEquals(Collections.singletonList("a"), - convert("getStringSet", Collections.singleton("a")).ss()); - assertEquals(Collections.singletonList("b"), - convert("getStringSet", Collections.singleton("b")).ss()); - - assertEquals(Arrays.asList("a", "b", "c"), - convert("getStringSet", new TreeSet() {{ - add("a"); - add("b"); - add("c"); - }}).ss()); - } - - @Test - public void testUuidSet() { - final UUID one = UUID.randomUUID(); - final UUID two = UUID.randomUUID(); - final UUID three = UUID.randomUUID(); - - assertEquals(Collections.singletonList(one.toString()), - convert("getUuidSet", Collections.singleton(one)).ss()); - - assertEquals(Collections.singletonList(two.toString()), - convert("getUuidSet", Collections.singleton(two)).ss()); - - assertEquals( - Arrays.asList( - one.toString(), - two.toString(), - three.toString()), - convert("getUuidSet", new LinkedHashSet() {{ - add(one); - add(two); - add(three); - }}).ss()); - } - - @Test - public void testDateSet() { - assertEquals(Collections.singletonList("1970-01-01T00:00:00Z"), - convert("getDateSet", Collections.singleton(new Date(0))) - .ss()); - - Calendar c = GregorianCalendar.getInstance(); - c.setTimeInMillis(0); - - assertEquals(Collections.singletonList("1970-01-01T00:00:00Z"), - convert("getCalendarSet", Collections.singleton(c)) - .ss()); - } - - @Test - public void testNumberSet() { - assertEquals(Collections.singletonList("0"), - convert("getByteSet", Collections.singleton((byte) 0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getShortSet", Collections.singleton((short) 0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getIntSet", Collections.singleton(0)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getLongSet", Collections.singleton(0l)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getBigIntegerSet", Collections.singleton(BigInteger.ZERO)) - .ns()); - assertEquals(Collections.singletonList("0.0"), - convert("getFloatSet", Collections.singleton(0f)).ns()); - assertEquals(Collections.singletonList("0.0"), - convert("getDoubleSet", Collections.singleton(0d)).ns()); - assertEquals(Collections.singletonList("0"), - convert("getBigDecimalSet", Collections.singleton(BigDecimal.ZERO)) - .ns()); - - assertEquals(Arrays.asList("0", "1", "2"), - convert("getLongSet", new TreeSet() {{ - add(0); - add(1); - add(2); - }}).ns()); - } - - @Test - public void testBinarySet() { - SdkBytes test = SdkBytes.fromUtf8String("test"); - SdkBytes test2 = SdkBytes.fromUtf8String("test2"); - - assertEquals(Collections.singletonList(test), - convert("getByteArraySet", Collections.singleton(test.asByteArray())).bs()); - - assertEquals(Collections.singletonList(test), - convert("getByteBufferSet", Collections.singleton(test.asByteBuffer())).bs()); - - assertEquals(Arrays.asList(test, test2), - convert("getByteBufferSet", new TreeSet() {{ - add(test.asByteBuffer()); - add(test2.asByteBuffer()); - }}).bs()); - } - - @Test - public void testObjectSet() { - AttributeValue value = - convert("getObjectSet", Collections.singleton(new SubClass())); - - assertEquals(1, value.l().size()); - assertEquals(new HashMap() {{ - put("name", AttributeValue.builder().s("name").build()); - put("value", AttributeValue.builder().n("123").build()); - }}, - value.l().get(0).m()); - - assertEquals(Arrays.asList(AttributeValue.builder().nul(true).build()), - convert("getObjectSet", Collections.singleton(null)).l()); - } - - @Test - public void testList() { - assertEquals(Arrays.asList( - AttributeValue.builder().s("a").build(), - AttributeValue.builder().s("b").build(), - AttributeValue.builder().s("c").build()), - convert("getList", Arrays.asList("a", "b", "c")).l()); - - assertEquals(Arrays.asList(AttributeValue.builder().nul(true).build()), - convert("getList", Collections.singletonList(null)).l()); - } - - @Test - public void testObjectList() { - AttributeValue value = convert( - "getObjectList", - Collections.singletonList(new SubClass())); - - assertEquals(1, value.l().size()); - assertEquals(new HashMap() {{ - put("name", AttributeValue.builder().s("name").build()); - put("value", AttributeValue.builder().n("123").build()); - }}, - value.l().get(0).m()); - } - - @Test - public void testSetList() { - assertEquals( - Arrays.asList(AttributeValue.builder().ss("a").build()), - convert("getSetList", Arrays.asList( - Collections.singleton("a"))).l()); - - List> list = new ArrayList>(); - list.add(null); - - assertEquals( - Arrays.asList(AttributeValue.builder().nul(true).build()), - convert("getSetList", list).l()); - } - - @Test - public void testMap() { - assertEquals(new HashMap() {{ - put("a", AttributeValue.builder().s("b").build()); - put("c", AttributeValue.builder().s("d").build()); - put("e", AttributeValue.builder().s("f").build()); - }}, - convert("getMap", new HashMap() {{ - put("a", "b"); - put("c", "d"); - put("e", "f"); - }}).m()); - - assertEquals(Collections.singletonMap("a", AttributeValue.builder().nul(true).build()), - convert("getMap", Collections.singletonMap("a", null)).m()); - } - - @Test - public void testSetMap() { - assertEquals(new HashMap() {{ - put("a", AttributeValue.builder().ss("a", "b").build()); - }}, - convert("getSetMap", new HashMap>() {{ - put("a", new TreeSet(Arrays.asList("a", "b"))); - }}).m()); - - assertEquals(new HashMap() {{ - put("a", AttributeValue.builder().ss("a").build()); - put("b", AttributeValue.builder().nul(true).build()); - }}, - convert("getSetMap", new HashMap>() {{ - put("a", new TreeSet(Arrays.asList("a"))); - put("b", null); - }}).m()); - } - - @Test - public void testObject() { - assertEquals(new HashMap() {{ - put("name", AttributeValue.builder().s("name").build()); - put("value", AttributeValue.builder().n("123").build()); - }}, - convert("getObject", new SubClass()).m()); - } - - @Test - public void testUnannotatedObject() throws Exception { - try { - convert(UnannotatedSubClass.class, UnannotatedSubClass.class.getMethod("getChild"), - new UnannotatedSubClass()); - - Assert.fail("Expected DynamoDBMappingException"); - } catch (DynamoDbMappingException e) { - // Ignored or expected. - } - } - - @Test - public void testS3Link() { - S3ClientCache cache = new S3ClientCache((AwsCredentialsProvider) null); - S3Link link = new S3Link(cache, "bucket", "key"); - - assertEquals("{\"s3\":{" - + "\"bucket\":\"bucket\"," - + "\"key\":\"key\"," - + "\"region\":null}}", - convert("getS3Link", link).s()); - } - - private AttributeValue convert(String getter, Object value) { - try { - - return convert(TestClass.class, TestClass.class.getMethod(getter), value); - - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2UnconvertTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2UnconvertTest.java deleted file mode 100644 index 9e73b4583dc0..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardModelFactoriesV2UnconvertTest.java +++ /dev/null @@ -1,545 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; - -import java.lang.reflect.Method; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.util.Arrays; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import org.junit.Assert; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.pojos.SubClass; -import software.amazon.awssdk.services.dynamodb.pojos.TestClass; -import software.amazon.awssdk.services.dynamodb.pojos.UnannotatedSubClass; - -public class StandardModelFactoriesV2UnconvertTest { - - protected static final DynamoDbMapperConfig CONFIG = new DynamoDbMapperConfig.Builder() - .withTypeConverterFactory(DynamoDbMapperConfig.DEFAULT.getTypeConverterFactory()) - .withConversionSchema(ConversionSchemas.V2) - .build(); - - private static final DynamoDbMapperModelFactory factory = StandardModelFactories.of(new S3Link.Factory(new S3ClientCache((AwsCredentialsProvider) null))); - private static final DynamoDbMapperModelFactory.TableFactory models = factory.getTableFactory(CONFIG); - - protected Object unconvert(Class clazz, Method getter, Method setter, AttributeValue value) { - final StandardAnnotationMaps.FieldMap map = StandardAnnotationMaps.of(getter, null); - return models.getTable(clazz).field(map.attributeName()).unconvert(value); - } - - @Test - public void testBoolean() { - assertEquals(false, unconvert("getBoolean", "setBoolean", - AttributeValue.builder().n("0").build())); - - assertEquals(true, unconvert("getBoolean", "setBoolean", - AttributeValue.builder().n("1").build())); - - assertEquals(false, unconvert("getBoolean", "setBoolean", - AttributeValue.builder().bool(false).build())); - - assertEquals(true, unconvert("getBoolean", "setBoolean", - AttributeValue.builder().bool(true).build())); - - assertEquals(false, unconvert("getBoxedBoolean", "setBoxedBoolean", - AttributeValue.builder().n("0").build())); - - assertEquals(true, unconvert("getBoxedBoolean", "setBoxedBoolean", - AttributeValue.builder().n("1").build())); - - assertEquals(false, unconvert("getBoxedBoolean", "setBoxedBoolean", - AttributeValue.builder().bool(false).build())); - - assertEquals(true, unconvert("getBoxedBoolean", "setBoxedBoolean", - AttributeValue.builder().bool(true).build())); - } - - @Test - public void testString() { - assertEquals("test", unconvert("getString", "setString", - AttributeValue.builder().s("test").build())); - - Assert.assertNull(unconvert("getCustomString", "setCustomString", - AttributeValue.builder().s("ignoreme").build())); - } - - @Test - public void testUuid() { - UUID uuid = UUID.randomUUID(); - assertEquals(uuid, unconvert("getUuid", "setUuid", - AttributeValue.builder().s(uuid.toString()).build())); - } - - @Test - public void testDate() { - Date date = Date.from(Instant.ofEpochMilli(0)); - assertEquals(date, unconvert("getDate", "setDate", - AttributeValue.builder().s("1970-01-01T00:00:00Z").build())); - } - - @Test - public void testCalendar() { - Calendar c = GregorianCalendar.getInstance(); - c.setTimeInMillis(1); - - assertEquals(c, unconvert("getCalendar", "setCalendar", - AttributeValue.builder().s("1970-01-01T00:00:00.001Z").build())); - } - - @Test - public void testNumbers() { - assertEquals((byte) 1, unconvert("getByte", "setByte", - AttributeValue.builder().n("1").build())); - assertEquals((byte) 1, unconvert("getBoxedByte", "setBoxedByte", - AttributeValue.builder().n("1").build())); - - assertEquals((short) 1, unconvert("getShort", "setShort", - AttributeValue.builder().n("1").build())); - assertEquals((short) 1, unconvert("getBoxedShort", "setBoxedShort", - AttributeValue.builder().n("1").build())); - - assertEquals(1, unconvert("getInt", "setInt", - AttributeValue.builder().n("1").build())); - assertEquals(1, unconvert("getBoxedInt", "setBoxedInt", - AttributeValue.builder().n("1").build())); - - assertEquals(1l, unconvert("getLong", "setLong", - AttributeValue.builder().n("1").build())); - assertEquals(1l, unconvert("getBoxedLong", "setBoxedLong", - AttributeValue.builder().n("1").build())); - - assertEquals(BigInteger.ONE, unconvert("getBigInt", "setBigInt", - AttributeValue.builder().n("1").build())); - - assertEquals(1.5f, unconvert("getFloat", "setFloat", - AttributeValue.builder().n("1.5").build())); - assertEquals(1.5f, unconvert("getBoxedFloat", "setBoxedFloat", - AttributeValue.builder().n("1.5").build())); - - assertEquals(1.5d, unconvert("getDouble", "setDouble", - AttributeValue.builder().n("1.5").build())); - assertEquals(1.5d, unconvert("getBoxedDouble", "setBoxedDouble", - AttributeValue.builder().n("1.5").build())); - - assertEquals(BigDecimal.ONE, unconvert("getBigDecimal", "setBigDecimal", - AttributeValue.builder().n("1").build())); - } - - @Test - public void testBinary() { - SdkBytes test = SdkBytes.fromUtf8String("test"); - assertArrayEquals(test.asByteArray(), - (byte[]) unconvert("getByteArray", "setByteArray", AttributeValue.builder().b(test).build())); - - assertEquals(test.asByteBuffer(), - unconvert("getByteBuffer", "setByteBuffer", AttributeValue.builder().b(test).build())); - } - - @Test - @Ignore // No longer works because mapper is not aware of auto construct lists - public void testBooleanSet() { - assertEquals(new HashSet() {{ - add(true); - }}, - unconvert("getBooleanSet", "setBooleanSet", - AttributeValue.builder().ns("1").build())); - - assertEquals(new HashSet() {{ - add(false); - }}, - unconvert("getBooleanSet", "setBooleanSet", - AttributeValue.builder().ns("0").build())); - - assertEquals(new HashSet() {{ - add(true); - add(false); - }}, - unconvert("getBooleanSet", "setBooleanSet", - AttributeValue.builder().ns("0", "1").build())); - - assertEquals(new HashSet() {{ - add(true); - }}, - unconvert("getBooleanSet", "setBooleanSet", - AttributeValue.builder().l( - AttributeValue.builder().bool(true).build()).build())); - - assertEquals(new HashSet() {{ - add(false); - }}, - unconvert("getBooleanSet", "setBooleanSet", - AttributeValue.builder().l( - AttributeValue.builder().bool(false).build()).build())); - - assertEquals(new HashSet() {{ - add(false); - add(true); - }}, - unconvert("getBooleanSet", "setBooleanSet", - AttributeValue.builder().l( - AttributeValue.builder().bool(false).build(), - AttributeValue.builder().bool(true).build()).build())); - - assertEquals(new HashSet() {{ - add(null); - }}, - unconvert("getBooleanSet", "setBooleanSet", - AttributeValue.builder().l( - AttributeValue.builder().nul(true).build()).build())); - } - - @Test - @Ignore // No longer works because mapper is not aware of auto construct lists - public void testStringSet() { - Assert.assertNull(unconvert("getStringSet", "setStringSet", - AttributeValue.builder().nul(true).build())); - - assertEquals(new HashSet() {{ - add("a"); - add("b"); - }}, - unconvert("getStringSet", "setStringSet", - AttributeValue.builder().ss("a", "b").build())); - } - - @Test - @Ignore // No longer works because mapper is not aware of auto construct lists - public void testUuidSet() { - Assert.assertNull(unconvert("getUuidSet", "setUuidSet", - AttributeValue.builder().nul(true).build())); - - final UUID one = UUID.randomUUID(); - final UUID two = UUID.randomUUID(); - - assertEquals(new HashSet() {{ - add(one); - add(two); - }}, - unconvert("getUuidSet", "setUuidSet", - AttributeValue.builder().ss( - one.toString(), - two.toString()).build())); - } - - @Test - public void testDateSet() { - assertEquals(Collections.singleton(new Date(0)), - unconvert("getDateSet", "setDateSet", AttributeValue.builder() - .ss("1970-01-01T00:00:00.000Z").build())); - - Calendar c = GregorianCalendar.getInstance(); - c.setTimeInMillis(0); - - assertEquals(Collections.singleton(c), - unconvert("getCalendarSet", "setCalendarSet", - AttributeValue.builder() - .ss("1970-01-01T00:00:00.000Z").build())); - } - - @Test - @Ignore // No longer works because mapper is not aware of auto construct lists - public void testNumberSet() { - Assert.assertNull(unconvert("getByteSet", "setByteSet", - AttributeValue.builder().nul(true).build())); - Assert.assertNull(unconvert("getShortSet", "setShortSet", - AttributeValue.builder().nul(true).build())); - Assert.assertNull(unconvert("getIntSet", "setIntSet", - AttributeValue.builder().nul(true).build())); - Assert.assertNull(unconvert("getLongSet", "setLongSet", - AttributeValue.builder().nul(true).build())); - Assert.assertNull(unconvert("getBigIntegerSet", "setBigIntegerSet", - AttributeValue.builder().nul(true).build())); - Assert.assertNull(unconvert("getFloatSet", "setFloatSet", - AttributeValue.builder().nul(true).build())); - Assert.assertNull(unconvert("getDoubleSet", "setDoubleSet", - AttributeValue.builder().nul(true).build())); - Assert.assertNull(unconvert("getBigDecimalSet", "setBigDecimalSet", - AttributeValue.builder().nul(true).build())); - - - assertEquals(new HashSet() {{ - add((byte) 1); - }}, - unconvert("getByteSet", "setByteSet", - AttributeValue.builder().ns("1").build())); - - assertEquals(new HashSet() {{ - add((short) 1); - }}, - unconvert("getShortSet", "setShortSet", - AttributeValue.builder().ns("1").build())); - - assertEquals(new HashSet() {{ - add(1); - }}, - unconvert("getIntSet", "setIntSet", - AttributeValue.builder().ns("1").build())); - - assertEquals(new HashSet() {{ - add(1l); - }}, - unconvert("getLongSet", "setLongSet", - AttributeValue.builder().ns("1").build())); - - assertEquals(new HashSet() {{ - add(BigInteger.ONE); - }}, - unconvert("getBigIntegerSet", "setBigIntegerSet", - AttributeValue.builder().ns("1").build())); - - assertEquals(new HashSet() {{ - add(1.5f); - }}, - unconvert("getFloatSet", "setFloatSet", - AttributeValue.builder().ns("1.5").build())); - - assertEquals(new HashSet() {{ - add(1.5d); - }}, - unconvert("getDoubleSet", "setDoubleSet", - AttributeValue.builder().ns("1.5").build())); - - assertEquals(new HashSet() {{ - add(BigDecimal.ONE); - }}, - unconvert("getBigDecimalSet", "setBigDecimalSet", - AttributeValue.builder().ns("1").build())); - } - - @Test - @Ignore // No longer works because mapper is not aware of auto construct lists - public void testBinarySet() { - Assert.assertNull(unconvert("getByteArraySet", "setByteArraySet", - AttributeValue.builder().nul(true).build())); - Assert.assertNull(unconvert("getByteBufferSet", "setByteBufferSet", - AttributeValue.builder().nul(true).build())); - - ByteBuffer test = ByteBuffer.wrap("test".getBytes()); - - Set result = (Set) unconvert( - "getByteArraySet", "setByteArraySet", - AttributeValue.builder().bs(SdkBytes.fromByteBuffer(test.slice())).build()); - - assertEquals(1, result.size()); - Assert.assertTrue(Arrays.equals( - "test".getBytes(), - result.iterator().next())); - - Assert.assertEquals(Collections.singleton(test.slice()), - unconvert("getByteBufferSet", "setByteBufferSet", - AttributeValue.builder().bs(SdkBytes.fromByteBuffer(test.slice())).build())); - } - - @Test - @Ignore // No longer works because the converters aren't aware of auto construct maps - public void testObjectSet() { - Object result = unconvert("getObjectSet", "setObjectSet", - AttributeValue.builder().l(AttributeValue.builder().m( - new HashMap() {{ - put("name", AttributeValue.builder().s("name").build()); - put("value", AttributeValue.builder().n("123").build()); - put("null", AttributeValue.builder().nul(true).build()); - }} - ) - .build()) - .build()); - - assertEquals(Collections.singleton(new SubClass()), result); - - result = unconvert("getObjectSet", "setObjectSet", - AttributeValue.builder().l( - AttributeValue.builder() - .nul(true) - .build()) - .build()); - - assertEquals(Collections.singleton(null), result); - } - - @Test - @Ignore // No longer works because the converters aren't aware of auto construct maps - public void testList() { - Assert.assertNull(unconvert("getList", "setList", - AttributeValue.builder().nul(true).build())); - - assertEquals(Arrays.asList("a", "b", "c"), - unconvert("getList", "setList", AttributeValue.builder().l( - AttributeValue.builder().s("a").build(), - AttributeValue.builder().s("b").build(), - AttributeValue.builder().s("c").build()) - .build())); - - assertEquals(Arrays.asList("a", null), - unconvert("getList", "setList", AttributeValue.builder().l( - AttributeValue.builder().s("a").build(), - AttributeValue.builder().nul(true).build()).build())); - } - - @Test - @Ignore // No longer works because the converters aren't aware of auto construct maps - public void testObjectList() { - Assert.assertNull(unconvert("getObjectList", "setObjectList", - AttributeValue.builder().nul(true).build())); - - assertEquals(Arrays.asList(new SubClass(), null), - unconvert("getObjectList", "setObjectList", - AttributeValue.builder().l( - AttributeValue.builder().m(new HashMap() {{ - put("name", AttributeValue.builder().s("name").build()); - put("value", AttributeValue.builder().n("123").build()); - put("null", AttributeValue.builder().nul(true).build()); - }}).build(), - AttributeValue.builder().nul(true).build()).build())); - } - - @Test - @Ignore // No longer works because mapper is not aware of auto construct lists - public void testSetList() { - Assert.assertNull(unconvert("getSetList", "setSetList", - AttributeValue.builder().nul(true).build())); - - assertEquals(Arrays.asList(new Set[] {null}), - unconvert("getSetList", "setSetList", AttributeValue.builder().l( - AttributeValue.builder().nul(true).build()).build())); - - assertEquals(Arrays.asList(Collections.singleton("a")), - unconvert("getSetList", "setSetList", AttributeValue.builder().l( - AttributeValue.builder().ss("a").build()).build())); - } - - @Test - @Ignore // No longer works because the converters aren't aware of auto construct maps - public void testMap() { - Assert.assertNull(unconvert("getMap", "setMap", - AttributeValue.builder().nul(true).build())); - - assertEquals(new HashMap() {{ - put("a", "b"); - put("c", "d"); - }}, - unconvert("getMap", "setMap", AttributeValue.builder().m( - new HashMap() {{ - put("a", AttributeValue.builder().s("b").build()); - put("c", AttributeValue.builder().s("d").build()); - }}).build())); - - assertEquals(new HashMap() {{ - put("a", null); - }}, - unconvert("getMap", "setMap", AttributeValue.builder().m( - new HashMap() {{ - put("a", AttributeValue.builder().nul(true).build()); - }}).build())); - } - - @Test - @Ignore // No longer works because the converters aren't aware of auto construct maps - public void testSetMap() { - Assert.assertNull(unconvert("getSetMap", "setSetMap", - AttributeValue.builder().nul(true).build())); - - assertEquals(new HashMap>() {{ - put("a", null); - put("b", new TreeSet(Arrays.asList("a", "b"))); - }}, - unconvert("getSetMap", "setSetMap", AttributeValue.builder().m( - new HashMap() {{ - put("a", AttributeValue.builder().nul(true).build()); - put("b", AttributeValue.builder().ss("a", "b").build()); - }}).build())); - } - - @Test - @Ignore // No longer works because the converters aren't aware of auto construct maps - public void testObject() { - Assert.assertNull(unconvert("getObject", "setObject", - AttributeValue.builder().nul(true).build())); - - assertEquals(new SubClass(), unconvert("getObject", "setObject", - AttributeValue.builder().m(new HashMap() {{ - put("name", AttributeValue.builder().s("name").build()); - put("value", AttributeValue.builder().n("123").build()); - }}).build())); - - assertEquals(new SubClass(), unconvert("getObject", "setObject", - AttributeValue.builder().m(new HashMap() {{ - put("name", AttributeValue.builder().s("name").build()); - put("value", AttributeValue.builder().n("123").build()); - put("null", AttributeValue.builder().nul(true).build()); - }}).build())); - } - - @Test - public void testUnannotatedObject() throws Exception { - Method getter = UnannotatedSubClass.class.getMethod("getChild"); - Method setter = UnannotatedSubClass.class - .getMethod("setChild", UnannotatedSubClass.class); - - try { - unconvert(UnannotatedSubClass.class, getter, setter, AttributeValue.builder().s("").build()); - Assert.fail("Expected DynamoDBMappingException"); - } catch (DynamoDbMappingException e) { - // Ignored or expected. - } - } - - @Test - public void testS3Link() { - S3Link link = (S3Link) unconvert("getS3Link", "setS3Link", - AttributeValue.builder().s("{\"s3\":{" - + "\"bucket\":\"bucket\"," - + "\"key\":\"key\"," - + "\"region\":null}}").build()); - - assertEquals("bucket", link.bucketName()); - assertEquals("key", link.getKey()); - assertEquals(Region.US_EAST_1, link.s3Region()); - } - - public Object unconvert(String getter, String setter, AttributeValue value) { - try { - - Method gm = TestClass.class.getMethod(getter); - Method sm = TestClass.class.getMethod(setter, gm.getReturnType()); - return unconvert(TestClass.class, gm, sm, value); - - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException("BOOM", e); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardTypeConverters.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardTypeConverters.java deleted file mode 100644 index 43b9cbb3e0ce..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StandardTypeConverters.java +++ /dev/null @@ -1,1137 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static java.time.format.DateTimeFormatter.ISO_DATE_TIME; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Collection; -import java.util.Currency; -import java.util.Date; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.TimeZone; -import java.util.regex.Pattern; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.utils.DateUtils; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; - -/** - * Type conversions. - * - * @see DynamoDbTypeConverter - */ -@SdkInternalApi -final class StandardTypeConverters extends DynamoDbTypeConverterFactory { - - /** - * Standard scalar type-converter factory. - */ - private static final DynamoDbTypeConverterFactory FACTORY = new StandardTypeConverters(); - - static DynamoDbTypeConverterFactory factory() { - return StandardTypeConverters.FACTORY; - } - - /** - * {@inheritDoc} - */ - @Override - public DynamoDbTypeConverter getConverter(Class sourceType, Class targetType) { - final Scalar source = Scalar.of(sourceType); - final Scalar target = Scalar.of(targetType); - final Converter toSource = source.getConverter(sourceType, target.type()); - final Converter toTarget = target.getConverter(targetType, source.type()); - return new StandardDynamoDbTypeConverter<>(toSource, toTarget); - } - - /** - * Standard scalar types. - */ - enum Scalar { - /** - * {@link BigDecimal} - */ - BIG_DECIMAL(ScalarAttributeType.N, new ConverterMap(BigDecimal.class, null) - .with(Number.class, ToBigDecimal.FROM_STRING.join(ToString.FROM_NUMBER)) - .with(String.class, ToBigDecimal.FROM_STRING) - ), - - /** - * {@link BigInteger} - */ - BIG_INTEGER(ScalarAttributeType.N, new ConverterMap(BigInteger.class, null) - .with(Number.class, ToBigInteger.FROM_STRING.join(ToString.FROM_NUMBER)) - .with(String.class, ToBigInteger.FROM_STRING) - ), - - /** - * {@link Boolean} - */ - BOOLEAN(ScalarAttributeType.N, new ConverterMap(Boolean.class, Boolean.TYPE) - .with(Number.class, ToBoolean.FROM_STRING.join(ToString.FROM_NUMBER)) - .with(String.class, ToBoolean.FROM_STRING) - ), - - /** - * {@link Byte} - */ - BYTE(ScalarAttributeType.N, new ConverterMap(Byte.class, Byte.TYPE) - .with(Number.class, ToByte.FROM_NUMBER) - .with(String.class, ToByte.FROM_STRING) - ), - - /** - * {@link Byte} array - */ - BYTE_ARRAY(ScalarAttributeType.B, new ConverterMap(byte[].class, null) - .with(ByteBuffer.class, ToByteArray.FROM_BYTE_BUFFER) - .with(String.class, ToByteArray.FROM_STRING) - ), - - /** - * {@link ByteBuffer} - */ - BYTE_BUFFER(ScalarAttributeType.B, new ConverterMap(ByteBuffer.class, null) - .with(byte[].class, ToByteBuffer.FROM_BYTE_ARRAY) - .with(String.class, ToByteBuffer.FROM_BYTE_ARRAY.join(ToByteArray.FROM_STRING)) - .with(java.util.UUID.class, ToByteBuffer.FROM_UUID) - ), - - /** - * {@link Calendar} - */ - CALENDAR(ScalarAttributeType.S, new ConverterMap(Calendar.class, null) - .with(Date.class, ToCalendar.FROM_DATE) - .with(Instant.class, ToCalendar.FROM_INSTANT) - .with(ZonedDateTime.class, ToCalendar.FROM_INSTANT.join(ToInstant.FROM_ZONEDDATETIME)) - .with(Long.class, ToCalendar.FROM_INSTANT.join(ToInstant.FROM_LONG)) - .with(String.class, ToCalendar.FROM_INSTANT.join(ToInstant.FROM_STRING)) - ), - - /** - * {@link Character} - */ - CHARACTER(ScalarAttributeType.S, new ConverterMap(Character.class, Character.TYPE) - .with(String.class, ToCharacter.FROM_STRING) - ), - - /** - * {@link Currency} - */ - CURRENCY(ScalarAttributeType.S, new ConverterMap(Currency.class, null) - .with(String.class, ToCurrency.FROM_STRING) - ), - - /** - * {@link Instant} - */ - INSTANT(ScalarAttributeType.S, new ConverterMap(Instant.class, null) - .with(Calendar.class, ToInstant.FROM_CALENDAR) - .with(ZonedDateTime.class, ToInstant.FROM_ZONEDDATETIME) - .with(Long.class, ToInstant.FROM_LONG) - .with(String.class, ToInstant.FROM_STRING) - ), - /** - * {@link Date} - */ - DATE(ScalarAttributeType.S, new ConverterMap(Date.class, null) - .with(Instant.class, ToDate.FROM_INSTANT) - .with(Calendar.class, ToDate.FROM_CALENDAR) - .with(ZonedDateTime.class, ToDate.FROM_INSTANT.join(ToInstant.FROM_ZONEDDATETIME)) - .with(Long.class, ToDate.FROM_INSTANT.join(ToInstant.FROM_LONG)) - .with(String.class, ToDate.FROM_STRING) - ), - - /** - * {@link ZonedDateTime} - */ - ZONED_DATE_TIME(/*ScalarAttributeType.S*/null, new ConverterMap(ZonedDateTime.class, null) - .with(Calendar.class, ToDateTime.FROM_CALENDAR) - .with(Date.class, ToDateTime.FROM_DATE) - .with(Long.class, ToDateTime.FROM_INSTANT_AT_UTC.join(ToInstant.FROM_LONG)) - .with(String.class, ToDateTime.FROM_INSTANT_AT_UTC.join(ToInstant.FROM_STRING)) - ), - - /** - * {@link Double} - */ - DOUBLE(ScalarAttributeType.N, new ConverterMap(Double.class, Double.TYPE) - .with(Number.class, ToDouble.FROM_NUMBER) - .with(String.class, ToDouble.FROM_STRING) - ), - - /** - * {@link Float} - */ - FLOAT(ScalarAttributeType.N, new ConverterMap(Float.class, Float.TYPE) - .with(Number.class, ToFloat.FROM_NUMBER) - .with(String.class, ToFloat.FROM_STRING) - ), - - /** - * {@link Integer} - */ - INTEGER(ScalarAttributeType.N, new ConverterMap(Integer.class, Integer.TYPE) - .with(Number.class, ToInteger.FROM_NUMBER) - .with(String.class, ToInteger.FROM_STRING) - ), - - /** - * {@link Locale} - */ - LOCALE(ScalarAttributeType.S, new ConverterMap(Locale.class, null) - .with(String.class, ToLocale.FROM_STRING) - ), - - /** - * {@link Long} - */ - LONG(ScalarAttributeType.N, new ConverterMap(Long.class, Long.TYPE) - .with(Date.class, ToLong.FROM_DATE) - .with(ZonedDateTime.class, ToLong.FROM_TEMPORAL_ACCESSOR.join(ToInstant.FROM_ZONEDDATETIME)) - .with(Number.class, ToLong.FROM_NUMBER) - .with(String.class, ToLong.FROM_STRING) - ), - - /** - * {@link S3Link} - */ - S3_LINK(ScalarAttributeType.S, new ConverterMap(S3Link.class, null)), - - /** - * {@link Short} - */ - SHORT(ScalarAttributeType.N, new ConverterMap(Short.class, Short.TYPE) - .with(Number.class, ToShort.FROM_NUMBER) - .with(String.class, ToShort.FROM_STRING) - ), - - /** - * {@link String} - */ - STRING(ScalarAttributeType.S, new ConverterMap(String.class, null) - .with(Boolean.class, ToString.FROM_BOOLEAN) - .with(byte[].class, ToString.FROM_BYTE_ARRAY) - .with(ByteBuffer.class, ToString.FROM_BYTE_ARRAY.join(ToByteArray.FROM_BYTE_BUFFER)) - .with(Calendar.class, ToString.FROM_DATE.join(ToDate.FROM_CALENDAR)) - .with(Date.class, ToString.FROM_DATE) - .with(ZonedDateTime.class, ToString.FROM_DATE_TIME) - .with(Enum.class, ToString.FROM_ENUM) - .with(Locale.class, ToString.FROM_LOCALE) - .with(TimeZone.class, ToString.FROM_TIME_ZONE) - .with(ZoneId.class, ToString.FROM_ZONE_ID) - .with(Object.class, ToString.FROM_OBJECT) - ), - - /** - * {@link TimeZone} - */ - TIME_ZONE(ScalarAttributeType.S, new ConverterMap(TimeZone.class, null) - .with(String.class, ToTimeZone.FROM_STRING) - ), - /** - * {@link ZoneId} - */ - ZONE_ID(ScalarAttributeType.S, new ConverterMap(ZoneId.class, null) - .with(String.class, ToZoneId.FROM_STRING) - ), - - /** - * {@link java.net.URL} - */ - URL(ScalarAttributeType.S, new ConverterMap(java.net.URL.class, null) - .with(String.class, ToUrl.FROM_STRING) - ), - - /** - * {@link java.net.URI} - */ - URI(ScalarAttributeType.S, new ConverterMap(java.net.URI.class, null) - .with(String.class, ToUri.FROM_STRING) - ), - - /** - * {@link java.util.UUID} - */ - UUID(ScalarAttributeType.S, new ConverterMap(java.util.UUID.class, null) - .with(ByteBuffer.class, ToUuid.FROM_BYTE_BUFFER) - .with(String.class, ToUuid.FROM_STRING) - ), - - /** - * {@link Object}; default must be last - */ - DEFAULT(null, new ConverterMap(Object.class, null)) { - @Override - Converter getConverter(Class sourceType, Class targetType) { - if (sourceType.isEnum() && STRING.map.isAssignableFrom(targetType)) { - return (Converter) new ToEnum.FromString(sourceType); - } - return super.getConverter(sourceType, targetType); - } - }; - - /** - * The scalar attribute type. - */ - private final ScalarAttributeType scalarAttributeType; - - /** - * The mapping of conversion functions for this scalar. - */ - private final ConverterMap map; - - /** - * Constructs a new scalar with the specified conversion mappings. - */ - Scalar(ScalarAttributeType scalarAttributeType, ConverterMap map) { - this.scalarAttributeType = scalarAttributeType; - this.map = map; - } - - /** - * Returns the first matching scalar, which may be the same as or a - * supertype of the specified target class. - */ - static Scalar of(Class type) { - for (final Scalar scalar : Scalar.values()) { - if (scalar.is(type)) { - return scalar; - } - } - return DEFAULT; - } - - /** - * Returns the function to convert from the specified target class to - * this scalar type. - */ - Converter getConverter(Class sourceType, Class targetType) { - return map.getConverter(targetType); - } - - /** - * Converts the target instance using the standard type-conversions. - */ - @SuppressWarnings("unchecked") - S convert(Object o) { - return getConverter(this.type(), (Class) o.getClass()).convert(o); - } - - /** - * Determines if the scalar is of the specified scalar attribute type. - */ - boolean is(final ScalarAttributeType scalarAttributeType) { - return this.scalarAttributeType == scalarAttributeType; - } - - /** - * Determines if the class represented by this scalar is either the - * same as or a supertype of the specified target type. - */ - boolean is(final Class type) { - return this.map.isAssignableFrom(type); - } - - /** - * Returns the primary reference type. - */ - @SuppressWarnings("unchecked") - Class type() { - return (Class) this.map.referenceType; - } - } - - /** - * Standard vector types. - */ - abstract static class Vector { - /** - * {@link List} - */ - static final ToList LIST = new ToList(); - /** - * {@link Map} - */ - static final ToMap MAP = new ToMap(); - /** - * {@link Set} - */ - static final ToSet SET = new ToSet(); - - /** - * Determines if the class represented by this vector is either the - * same as or a supertype of the specified target type. - */ - abstract boolean is(Class type); - - static final class ToList extends Vector { - DynamoDbTypeConverter, List> join(final DynamoDbTypeConverter scalar) { - return new ListTypeConverter<>(scalar); - } - - List convert(Collection o, DynamoDbTypeConverter scalar) { - final List vector = new ArrayList(o.size()); - for (final T t : o) { - vector.add(scalar.convert(t)); - } - return vector; - } - - List unconvert(Collection o, DynamoDbTypeConverter scalar) { - final List vector = new ArrayList(o.size()); - for (final S s : o) { - vector.add(scalar.unconvert(s)); - } - return vector; - } - - @Override - boolean is(final Class type) { - return List.class.isAssignableFrom(type); - } - - private static class ListTypeConverter implements DynamoDbTypeConverter, List> { - private final DynamoDbTypeConverter scalar; - - ListTypeConverter(DynamoDbTypeConverter scalar) { - this.scalar = scalar; - } - - @Override - public final List convert(final List o) { - return LIST.convert(o, scalar); - } - - @Override - public final List unconvert(final List o) { - return LIST.unconvert(o, scalar); - } - } - } - - static final class ToMap extends Vector { - DynamoDbTypeConverter, Map> join(final DynamoDbTypeConverter scalar) { - return new MapTypeConverter<>(scalar); - } - - Map convert(Map o, DynamoDbTypeConverter scalar) { - final Map vector = new LinkedHashMap(); - for (final Map.Entry t : o.entrySet()) { - vector.put(t.getKey(), scalar.convert(t.getValue())); - } - return vector; - } - - Map unconvert(Map o, DynamoDbTypeConverter scalar) { - final Map vector = new LinkedHashMap(); - for (final Map.Entry s : o.entrySet()) { - vector.put(s.getKey(), scalar.unconvert(s.getValue())); - } - return vector; - } - - boolean is(final Class type) { - return Map.class.isAssignableFrom(type); - } - - private static class MapTypeConverter implements DynamoDbTypeConverter, Map> { - private final DynamoDbTypeConverter scalar; - - MapTypeConverter(DynamoDbTypeConverter scalar) { - this.scalar = scalar; - } - - @Override - public final Map convert(final Map o) { - return MAP.convert(o, scalar); - } - - @Override - public final Map unconvert(final Map o) { - return MAP.unconvert(o, scalar); - } - } - } - - static final class ToSet extends Vector { - DynamoDbTypeConverter, Collection> join(final DynamoDbTypeConverter target) { - return new SetTypeConverter<>(target); - } - - Set unconvert(Collection o, DynamoDbTypeConverter scalar) { - final Set vector = new LinkedHashSet(); - for (final S s : o) { - if (vector.add(scalar.unconvert(s)) == false) { - throw new DynamoDbMappingException("duplicate value (" + s + ")"); - } - } - return vector; - } - - boolean is(final Class type) { - return Set.class.isAssignableFrom(type); - } - - private static class SetTypeConverter implements DynamoDbTypeConverter, Collection> { - private final DynamoDbTypeConverter target; - - SetTypeConverter(DynamoDbTypeConverter target) { - this.target = target; - } - - @Override - public List convert(final Collection o) { - return LIST.convert(o, target); - } - - @Override - public Collection unconvert(final List o) { - return SET.unconvert(o, target); - } - } - } - } - - /** - * Converter map. - */ - private static class ConverterMap extends LinkedHashMap, Converter> { - private static final long serialVersionUID = -1L; - private final Class referenceType; - private final Class primitiveType; - - private ConverterMap(Class referenceType, Class primitiveType) { - this.referenceType = referenceType; - this.primitiveType = primitiveType; - } - - private ConverterMap with(Class targetType, Converter converter) { - put(targetType, converter); - return this; - } - - private boolean isAssignableFrom(Class type) { - return type.isPrimitive() ? primitiveType == type : referenceType.isAssignableFrom(type); - } - - @SuppressWarnings("unchecked") - private Converter getConverter(Class targetType) { - for (final Map.Entry, Converter> entry : entrySet()) { - if (entry.getKey().isAssignableFrom(targetType)) { - return (Converter) entry.getValue(); - } - } - if (isAssignableFrom(targetType)) { - return (Converter) ToObject.FROM_OBJECT; - } - throw new DynamoDbMappingException( - "type [" + targetType + "] is not supported; no conversion from " + referenceType - ); - } - } - - /** - * {@link BigDecimal} conversion functions. - */ - private abstract static class ToBigDecimal extends Converter { - private static final ToBigDecimal FROM_STRING = new ToBigDecimal() { - @Override - public BigDecimal convert(final String o) { - return new BigDecimal(o); - } - }; - } - - /** - * {@link BigInteger} conversion functions. - */ - private abstract static class ToBigInteger extends Converter { - private static final ToBigInteger FROM_STRING = new ToBigInteger() { - @Override - public BigInteger convert(final String o) { - return new BigInteger(o); - } - }; - } - - /** - * {@link Boolean} conversion functions. - */ - private abstract static class ToBoolean extends Converter { - private static final ToBoolean FROM_STRING = new ToBoolean() { - private final Pattern n0 = Pattern.compile("(?i)[N0]"); - private final Pattern y1 = Pattern.compile("(?i)[Y1]"); - - @Override - public Boolean convert(final String o) { - return n0.matcher(o).matches() ? Boolean.FALSE : y1.matcher(o).matches() ? Boolean.TRUE : Boolean.valueOf(o); - } - }; - } - - /** - * {@link Byte} conversion functions. - */ - private abstract static class ToByte extends Converter { - private static final ToByte FROM_NUMBER = new ToByte() { - @Override - public Byte convert(final Number o) { - return o.byteValue(); - } - }; - - private static final ToByte FROM_STRING = new ToByte() { - @Override - public Byte convert(final String o) { - return Byte.valueOf(o); - } - }; - } - - /** - * {@link byte} array conversion functions. - */ - private abstract static class ToByteArray extends Converter { - private static final ToByteArray FROM_BYTE_BUFFER = new ToByteArray() { - @Override - public byte[] convert(final ByteBuffer o) { - if (o.hasArray()) { - return o.array(); - } - final byte[] value = new byte[o.remaining()]; - o.get(value); - return value; - } - }; - - private static final ToByteArray FROM_STRING = new ToByteArray() { - @Override - public byte[] convert(final String o) { - return o.getBytes(Charset.forName("UTF-8")); - } - }; - } - - /** - * {@link ByteBuffer} conversion functions. - */ - private abstract static class ToByteBuffer extends Converter { - private static final ToByteBuffer FROM_BYTE_ARRAY = new ToByteBuffer() { - @Override - public ByteBuffer convert(final byte[] o) { - return ByteBuffer.wrap(o); - } - }; - - private static final ToByteBuffer FROM_UUID = new ToByteBuffer() { - @Override - public ByteBuffer convert(final java.util.UUID o) { - final ByteBuffer value = ByteBuffer.allocate(16); - value.putLong(o.getMostSignificantBits()).putLong(o.getLeastSignificantBits()); - value.position(0); - return value; - } - }; - } - - /** - * {@link Calendar} conversion functions. - */ - private abstract static class ToCalendar extends Converter { - private static final ToCalendar FROM_DATE = new ToCalendar() { - @Override - public Calendar convert(final Date o) { - final Calendar value = Calendar.getInstance(); - value.setTime(o); - return value; - } - }; - private static final ToCalendar FROM_INSTANT = new ToCalendar() { - @Override - public Calendar convert(Instant o) { - Calendar cal = Calendar.getInstance(); - cal.setTimeInMillis(o.toEpochMilli()); - return cal; - } - }; - } - - /** - * {@link Character} conversion functions. - */ - private abstract static class ToCharacter extends Converter { - private static final ToCharacter FROM_STRING = new ToCharacter() { - @Override - public Character convert(final String o) { - return Character.valueOf(o.charAt(0)); - } - }; - } - - /** - * {@link Currency} conversion functions. - */ - private abstract static class ToCurrency extends Converter { - private static final ToCurrency FROM_STRING = new ToCurrency() { - @Override - public Currency convert(final String o) { - return Currency.getInstance(o); - } - }; - } - - private abstract static class ToDate extends Converter { - private static final ToDate FROM_INSTANT = new ToDate() { - @Override - public Date convert(Instant o) { - return Date.from(o); - } - }; - private static final ToDate FROM_CALENDAR = new ToDate() { - @Override - public Date convert(Calendar o) { - return o.getTime(); - } - }; - private static final ToDate FROM_STRING = new ToDate() { - @Override - public Date convert(String s) { - return Date.from(DateUtils.parseIso8601Date(s)); - } - }; - } - - /** - * {@link Instant} conversion functions. - */ - private abstract static class ToInstant extends Converter { - private static final ToInstant FROM_CALENDAR = new ToInstant() { - @Override - public Instant convert(final Calendar o) { - return o.toInstant(); - } - }; - - private static final ToInstant FROM_ZONEDDATETIME = new ToInstant() { - @Override - public Instant convert(final ZonedDateTime o) { - return o.toInstant(); - } - }; - - private static final ToInstant FROM_LONG = new ToInstant() { - @Override - public Instant convert(final Long o) { - return Instant.ofEpochMilli(o); - } - }; - - private static final ToInstant FROM_STRING = new ToInstant() { - @Override - public Instant convert(final String o) { - return DateUtils.parseIso8601Date(o); - } - }; - } - - /** - * {@link java.time.ZonedDateTime} conversion functions. - */ - private abstract static class ToDateTime extends Converter { - private static final ToDateTime FROM_DATE = new ToDateTime() { - public ZonedDateTime convert(final Date o) { - return ZonedDateTime.ofInstant(o.toInstant(), ZoneOffset.UTC); - } - }; - private static final ToDateTime FROM_INSTANT_AT_UTC = new ToDateTime() { - public ZonedDateTime convert(final Instant o) { - return ZonedDateTime.ofInstant(o, ZoneOffset.UTC); - } - }; - - private static final ToDateTime FROM_CALENDAR = new ToDateTime() { - @Override - public ZonedDateTime convert(Calendar o) { - return ZonedDateTime.ofInstant(Instant.ofEpochMilli(o.getTimeInMillis()), o.getTimeZone().toZoneId()); - } - }; - } - - /** - * {@link Double} conversion functions. - */ - private abstract static class ToDouble extends Converter { - private static final ToDouble FROM_NUMBER = new ToDouble() { - @Override - public Double convert(final Number o) { - return o.doubleValue(); - } - }; - - private static final ToDouble FROM_STRING = new ToDouble() { - @Override - public Double convert(final String o) { - return Double.valueOf(o); - } - }; - } - - /** - * {@link Enum} from {@link String} - */ - private abstract static class ToEnum, T> extends Converter { - private static final class FromString> extends ToEnum { - private final Class sourceType; - - private FromString(final Class sourceType) { - this.sourceType = sourceType; - } - - @Override - public S convert(final String o) { - return Enum.valueOf(sourceType, o); - } - } - } - - /** - * {@link Float} conversion functions. - */ - private abstract static class ToFloat extends Converter { - private static final ToFloat FROM_NUMBER = new ToFloat() { - @Override - public Float convert(final Number o) { - return o.floatValue(); - } - }; - - private static final ToFloat FROM_STRING = new ToFloat() { - @Override - public Float convert(final String o) { - return Float.valueOf(o); - } - }; - } - - /** - * {@link Integer} conversion functions. - */ - private abstract static class ToInteger extends Converter { - private static final ToInteger FROM_NUMBER = new ToInteger() { - @Override - public Integer convert(final Number o) { - return o.intValue(); - } - }; - - private static final ToInteger FROM_STRING = new ToInteger() { - @Override - public Integer convert(final String o) { - return Integer.valueOf(o); - } - }; - } - - /** - * {@link Locale} conversion functions. - */ - private abstract static class ToLocale extends Converter { - private static final ToLocale FROM_STRING = new ToLocale() { - @Override - public Locale convert(final String o) { - final String[] value = o.split("-", 3); - if (value.length == 3) { - return new Locale(value[0], value[1], value[2]); - } - if (value.length == 2) { - return new Locale(value[0], value[1]); - } - return new Locale(value[0]); //JDK7+: return Locale.forLanguageTag(o); - } - }; - } - - /** - * {@link Long} conversion functions. - */ - private abstract static class ToLong extends Converter { - private static final ToLong FROM_DATE = new ToLong() { - @Override - public Long convert(final Date o) { - return o.getTime(); - } - }; - private static final ToLong FROM_TEMPORAL_ACCESSOR = new ToLong() { - @Override - public Long convert(final Instant o) { - return o.toEpochMilli(); - } - }; - - private static final ToLong FROM_NUMBER = new ToLong() { - @Override - public Long convert(final Number o) { - return o.longValue(); - } - }; - - private static final ToLong FROM_STRING = new ToLong() { - @Override - public Long convert(final String o) { - return Long.valueOf(o); - } - }; - } - - /** - * {@link Short} conversion functions. - */ - private abstract static class ToShort extends Converter { - private static final ToShort FROM_NUMBER = new ToShort() { - @Override - public Short convert(final Number o) { - return o.shortValue(); - } - }; - - private static final ToShort FROM_STRING = new ToShort() { - @Override - public Short convert(final String o) { - return Short.valueOf(o); - } - }; - } - - /** - * {@link String} conversion functions. - */ - private abstract static class ToString extends Converter { - private static final ToString FROM_BOOLEAN = new ToString() { - @Override - public String convert(final Boolean o) { - return Boolean.TRUE.equals(o) ? "1" : "0"; - } - }; - - private static final ToString FROM_BYTE_ARRAY = new ToString() { - @Override - public String convert(final byte[] o) { - return new String(o, Charset.forName("UTF-8")); - } - }; - - private static final ToString FROM_DATE = new ToString() { - @Override - public String convert(final Date o) { - return DateUtils.formatIso8601Date(o.toInstant()); - } - }; - - private static final ToString FROM_ENUM = new ToString() { - @Override - public String convert(final Enum o) { - return o.name(); - } - }; - - private static final ToString FROM_LOCALE = new ToString() { - @Override - public String convert(final Locale o) { - final StringBuilder value = new StringBuilder(o.getLanguage()); - if (!o.getCountry().isEmpty() || !o.getVariant().isEmpty()) { - value.append("-").append(o.getCountry()); - } - if (!o.getVariant().isEmpty()) { - value.append("-").append(o.getVariant()); - } - return value.toString(); //JDK7+: return o.toLanguageTag(); - } - }; - - private static final ToString FROM_NUMBER = new ToString() { - @Override - public String convert(final Number o) { - return o.toString(); - } - }; - - private static final ToString FROM_TIME_ZONE = new ToString() { - @Override - public String convert(final TimeZone o) { - return o.getID(); - } - }; - private static final ToString FROM_ZONE_ID = new ToString() { - @Override - public String convert(final ZoneId o) { - return o.getId(); - } - }; - - private static final ToString FROM_OBJECT = new ToString() { - @Override - public String convert(final Object o) { - return o.toString(); - } - }; - - private static final ToString FROM_DATE_TIME = new ToString() { - @Override - public String convert(ZonedDateTime o) { - return ISO_DATE_TIME.format(o); - } - }; - } - - /** - * {@link TimeZone} conversion functions. - */ - private abstract static class ToTimeZone extends Converter { - private static final ToTimeZone FROM_STRING = new ToTimeZone() { - @Override - public TimeZone convert(final String o) { - return TimeZone.getTimeZone(o); - } - }; - } - - /** - * {@link ZoneId} conversion functions. - */ - private abstract static class ToZoneId extends Converter { - private static final ToZoneId FROM_STRING = new ToZoneId() { - @Override - public ZoneId convert(final String o) { - return ZoneId.of(o); - } - }; - } - - /** - * {@link java.net.URL} conversion functions. - */ - private abstract static class ToUrl extends Converter { - private static final ToUrl FROM_STRING = new ToUrl() { - @Override - public java.net.URL convert(final String o) { - try { - return new java.net.URL(o); - } catch (final java.net.MalformedURLException e) { - throw new IllegalArgumentException("malformed URL", e); - } - } - }; - } - - /** - * {@link java.net.URI} conversion functions. - */ - private abstract static class ToUri extends Converter { - private static final ToUri FROM_STRING = new ToUri() { - @Override - public java.net.URI convert(final String o) { - try { - return new java.net.URI(o); - } catch (final java.net.URISyntaxException e) { - throw new IllegalArgumentException("malformed URI", e); - } - } - }; - } - - /** - * {@link java.util.UUID} conversion functions. - */ - private abstract static class ToUuid extends Converter { - private static final ToUuid FROM_BYTE_BUFFER = new ToUuid() { - @Override - public java.util.UUID convert(final ByteBuffer o) { - return new java.util.UUID(o.getLong(), o.getLong()); - } - }; - - private static final ToUuid FROM_STRING = new ToUuid() { - @Override - public java.util.UUID convert(final String o) { - return java.util.UUID.fromString(o); - } - }; - } - - /** - * {@link Object} conversion functions. - */ - private abstract static class ToObject extends Converter { - private static final ToObject FROM_OBJECT = new ToObject() { - @Override - public Object convert(final Object o) { - return o; - } - }; - } - - /** - * One-way type-converter. - */ - abstract static class Converter { - final Converter join(final Converter target) { - final Converter source = this; - return new Converter() { - @Override - public S convert(final U o) { - return source.convert(target.convert(o)); - } - }; - } - - public abstract S convert(T o); - } - - private static class StandardDynamoDbTypeConverter implements DynamoDbTypeConverter { - private final Converter toSource; - private final Converter toTarget; - - StandardDynamoDbTypeConverter(Converter toSource, Converter toTarget) { - this.toSource = toSource; - this.toTarget = toTarget; - } - - @Override - public final S convert(final T o) { - return toSource.convert(o); - } - - @Override - public final T unconvert(final S o) { - return toTarget.convert(o); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StringListMapTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StringListMapTest.java deleted file mode 100644 index 69d478e13a2e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/StringListMapTest.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import static org.junit.Assert.assertEquals; - -import java.util.List; -import org.junit.Test; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper.StringListMap; - -/** - * Unit tests for {@link StringListMap}. - */ -public class StringListMapTest { - - private StringListMap map() { - final StringListMap map = new StringListMap(); - for (int i = 1, its = 1; i <= its; i++) { - map.add("A", Integer.valueOf(i)); - } - for (int i = 1, its = 25; i <= its; i++) { - map.add("B", Integer.valueOf(i)); - } - for (int i = 1, its = 10; i <= its; i++) { - map.add("C", Integer.valueOf(i)); - } - for (int i = 1, its = 5; i <= its; i++) { - map.add("D", Integer.valueOf(i)); - } - for (int i = 1, its = 10; i <= its; i++) { - map.add("E", Integer.valueOf(i)); - } - for (int i = 1, its = 25; i <= its; i++) { - map.add("F", Integer.valueOf(i)); - } - for (int i = 1, its = 1; i <= its; i++) { - map.add("G", Integer.valueOf(i)); - } - assertEquals(7, map.size()); - return map; - } - - @Test - public void testSubMaps20PerMap() { - assertSizes(map().subMaps(20, true), new int[][] {{1, 19}, {6, 10, 4}, {1, 10, 9}, {16, 1}}); - } - - @Test - public void testSubMaps11PerMap() { - assertSizes(map().subMaps(11, true), new int[][] {{1, 10}, {11}, {4, 7}, {3, 5, 3}, {7, 4}, {11}, {10, 1}}); - } - - @Test - public void testSubMapsInto4() { - assertSizes(map().subMaps(4, false), new int[][] {{1, 6, 2, 2, 2, 6, 1}, {7, 2, 1, 3, 6}, {6, 3, 1, 3, 6}, {6, 3, 1, 2, 7}}); - } - - private void assertSizes(List> subMaps, int[][] sizes) { - try { - assertEquals(sizes.length, subMaps.size()); - - for (int mapIndex = 0, keyIndex = 0; mapIndex < sizes.length; mapIndex++, keyIndex = 0) { - final StringListMap subMap = subMaps.get(mapIndex); - assertEquals("subMaps[" + mapIndex + "]", sizes[mapIndex].length, subMap.size()); - - for (final List values : subMap.values()) { - assertEquals("subMaps[" + mapIndex + "][" + keyIndex + "]", sizes[mapIndex][keyIndex], values.size()); - keyIndex++; - } - } - } catch (final Error e) { - System.out.println("subMaps=" + subMaps); - throw e; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/TestParameters.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/TestParameters.java deleted file mode 100644 index 0ba70d9924d7..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/TestParameters.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class TestParameters - implements AttributeTransformer.Parameters { - - private Map attributeValues; - - public TestParameters() { - this(null); - } - - public TestParameters(Map attributeValues) { - this.attributeValues = attributeValues; - } - - public Map getAttributeValues() { - return attributeValues; - } - - public boolean isPartialUpdate() { - return false; - } - - public Class modelClass() { - return null; - } - - public DynamoDbMapperConfig mapperConfig() { - return null; - } - - public String getTableName() { - return null; - } - - public String getHashKeyName() { - return null; - } - - public String getRangeKeyName() { - return null; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/UnmarshallerTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/UnmarshallerTest.java deleted file mode 100644 index 3fed28fc67e2..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/UnmarshallerTest.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class UnmarshallerTest extends StandardModelFactoriesV2UnconvertTest { - - private static final ItemConverter CONVERTER = CONFIG.getConversionSchema().getConverter( - new ConversionSchema.Dependencies().with(S3ClientCache.class, new S3ClientCache((AwsCredentialsProvider) null))); - - @Override - protected Object unconvert(Class clazz, Method getter, Method setter, AttributeValue value) { - return CONVERTER.unconvert(getter, setter, value); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V1MarshallerTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V1MarshallerTest.java deleted file mode 100644 index f3166984e6fd..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V1MarshallerTest.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class V1MarshallerTest extends StandardModelFactoriesV1Test { - - private static final ItemConverter CONVERTER = CONFIG.getConversionSchema().getConverter( - new ConversionSchema.Dependencies()); - - @Override - protected AttributeValue convert(Class clazz, Method getter, Object value) { - return CONVERTER.convert(getter, value); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V2CompatMarshallerTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V2CompatMarshallerTest.java deleted file mode 100644 index 78ded601a426..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V2CompatMarshallerTest.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class V2CompatMarshallerTest extends StandardModelFactoriesV2CompatibleTest { - - private static final ItemConverter CONVERTER = CONFIG.getConversionSchema().getConverter( - new ConversionSchema.Dependencies()); - - @Override - protected AttributeValue convert(Class clazz, Method getter, Object value) { - return CONVERTER.convert(getter, value); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V2MarshallerTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V2MarshallerTest.java deleted file mode 100644 index cf879edceb8c..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/V2MarshallerTest.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class V2MarshallerTest extends StandardModelFactoriesV2Test { - - private static final ItemConverter CONVERTER = CONFIG.getConversionSchema().getConverter( - new ConversionSchema.Dependencies()); - - @Override - protected AttributeValue convert(Class clazz, Method getter, Object value) { - return CONVERTER.convert(getter, value); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanSetToNumberSetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanSetToNumberSetMarshaller.java deleted file mode 100644 index beff5cf7f943..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanSetToNumberSetMarshaller.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.NumberSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A legacy marshaller that marshals sets of Java {@code Booleans} into DynamoDB - * NumberSets, representing {@code true} as '1' and {@code false} as '0'. - * Retained for backwards compatibility with older versions of the mapper which - * don't know about the DynamoDB BOOL type. - */ -public class BooleanSetToNumberSetMarshaller - implements NumberSetAttributeMarshaller { - - private static final BooleanSetToNumberSetMarshaller INSTANCE = - new BooleanSetToNumberSetMarshaller(); - - private BooleanSetToNumberSetMarshaller() { - } - - public static BooleanSetToNumberSetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - @SuppressWarnings("unchecked") - Set booleans = (Set) obj; - List booleanAttributes = new ArrayList(booleans.size()); - - for (Boolean b : booleans) { - if (b == null || b == false) { - booleanAttributes.add("0"); - } else { - booleanAttributes.add("1"); - } - } - - return AttributeValue.builder().ns(booleanAttributes).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanToBooleanMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanToBooleanMarshaller.java deleted file mode 100644 index 0045ee31fa23..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanToBooleanMarshaller.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.BooleanAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals Java {@code Boolean} objects to Dynamodb-native - * {@code BOOL} attribute values. - */ -public class BooleanToBooleanMarshaller implements BooleanAttributeMarshaller { - - private static final BooleanToBooleanMarshaller INSTANCE = - new BooleanToBooleanMarshaller(); - - private BooleanToBooleanMarshaller() { - } - - public static BooleanToBooleanMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - return AttributeValue.builder().bool((Boolean) obj).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanToNumberMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanToNumberMarshaller.java deleted file mode 100644 index c757e3d79a40..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/BooleanToNumberMarshaller.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.NumberAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A legacy marshaller that marshals Java {@code Booleans} into DynamoDB - * Numbers, representing {@code true} as '1' and {@code false} as '0'. Retained - * for backwards compatibility with older versions of the mapper which don't - * know about the DynamoDB BOOL type. - */ -public class BooleanToNumberMarshaller implements NumberAttributeMarshaller { - - private static final BooleanToNumberMarshaller INSTANCE = - new BooleanToNumberMarshaller(); - - private BooleanToNumberMarshaller() { - } - - public static BooleanToNumberMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - Boolean bool = (Boolean) obj; - if (bool == null || bool == false) { - return AttributeValue.builder().n("0").build(); - } else { - return AttributeValue.builder().n("1").build(); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteArraySetToBinarySetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteArraySetToBinarySetMarshaller.java deleted file mode 100644 index 4b4db1f7fd23..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteArraySetToBinarySetMarshaller.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.BinarySetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals sets of Java {@code byte[]}s into DynamoDB - * BinarySet attributes. - */ -public class ByteArraySetToBinarySetMarshaller - implements BinarySetAttributeMarshaller { - - private static final ByteArraySetToBinarySetMarshaller INSTANCE = - new ByteArraySetToBinarySetMarshaller(); - - private ByteArraySetToBinarySetMarshaller() { - } - - public static ByteArraySetToBinarySetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - @SuppressWarnings("unchecked") - Set buffers = (Set) obj; - List attributes = new ArrayList<>(buffers.size()); - - for (byte[] b : buffers) { - attributes.add(SdkBytes.fromByteArray(b)); - } - - return AttributeValue.builder().bs(attributes).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteArrayToBinaryMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteArrayToBinaryMarshaller.java deleted file mode 100644 index f188d8a486af..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteArrayToBinaryMarshaller.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.BinaryAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals Java {@code byte[]}s into DynamoDB Binary - * attributes. - */ -public class ByteArrayToBinaryMarshaller implements BinaryAttributeMarshaller { - - private static final ByteArrayToBinaryMarshaller INSTANCE = - new ByteArrayToBinaryMarshaller(); - - private ByteArrayToBinaryMarshaller() { - } - - public static ByteArrayToBinaryMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - return AttributeValue.builder().b(SdkBytes.fromByteArray((byte[]) obj)).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteBufferSetToBinarySetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteBufferSetToBinarySetMarshaller.java deleted file mode 100644 index f305ac86a507..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteBufferSetToBinarySetMarshaller.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.BinarySetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals sets of Java {@code ByteBuffer}s into DynamoDB - * BinarySet attributes. - */ -public class ByteBufferSetToBinarySetMarshaller - implements BinarySetAttributeMarshaller { - - private static final ByteBufferSetToBinarySetMarshaller INSTANCE = - new ByteBufferSetToBinarySetMarshaller(); - - private ByteBufferSetToBinarySetMarshaller() { - } - - public static ByteBufferSetToBinarySetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - @SuppressWarnings("unchecked") - Set buffers = (Set) obj; - List attributes = new ArrayList(buffers.size()); - - for (ByteBuffer b : buffers) { - attributes.add(SdkBytes.fromByteBuffer(b)); - } - - return AttributeValue.builder().bs(attributes).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteBufferToBinaryMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteBufferToBinaryMarshaller.java deleted file mode 100644 index 2f9d7fcd3aa6..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ByteBufferToBinaryMarshaller.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.nio.ByteBuffer; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.BinaryAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals Java {@code ByteBuffer}s into DynamoDB Binary - * attributes. - */ -public class ByteBufferToBinaryMarshaller implements BinaryAttributeMarshaller { - - private static final ByteBufferToBinaryMarshaller INSTANCE = - new ByteBufferToBinaryMarshaller(); - - private ByteBufferToBinaryMarshaller() { - } - - public static ByteBufferToBinaryMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - return AttributeValue.builder().b(SdkBytes.fromByteBuffer((ByteBuffer) obj)).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CalendarSetToStringSetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CalendarSetToStringSetMarshaller.java deleted file mode 100644 index ad5b1fa63330..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CalendarSetToStringSetMarshaller.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import static software.amazon.awssdk.utils.DateUtils.formatIso8601Date; - -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals sets of Java {@code Calendar} objects into - * DynamoDB StringSets (in ISO 8601 format, ie {"2014-01-01T00:00:00Z", ...}). - */ -public class CalendarSetToStringSetMarshaller - implements StringSetAttributeMarshaller { - - private static final CalendarSetToStringSetMarshaller INSTANCE = - new CalendarSetToStringSetMarshaller(); - - private CalendarSetToStringSetMarshaller() { - } - - public static CalendarSetToStringSetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - @SuppressWarnings("unchecked") - Set dates = (Set) obj; - - List timestamps = new ArrayList(dates.size()); - for (Calendar calendar : dates) { - timestamps.add(formatIso8601Date(calendar.toInstant())); - } - - return AttributeValue.builder().ss(timestamps).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CalendarToStringMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CalendarToStringMarshaller.java deleted file mode 100644 index 1c734f99b29e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CalendarToStringMarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import static software.amazon.awssdk.utils.DateUtils.formatIso8601Date; - -import java.util.Calendar; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals Java {@code Calendar} objects into DynamoDB - * Strings (in ISO 8601 format, ie "2014-01-01T00:00:00Z"). - */ -public class CalendarToStringMarshaller implements StringAttributeMarshaller { - - private static final CalendarToStringMarshaller INSTANCE = - new CalendarToStringMarshaller(); - - private CalendarToStringMarshaller() { - } - - public static CalendarToStringMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - Calendar calendar = (Calendar) obj; - return AttributeValue.builder().s( - formatIso8601Date(calendar.toInstant())).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CollectionToListMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CollectionToListMarshaller.java deleted file mode 100644 index 7135d06ef751..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CollectionToListMarshaller.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.ListAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class CollectionToListMarshaller implements ListAttributeMarshaller { - - private static final CollectionToListMarshaller INSTANCE = - new CollectionToListMarshaller(); - private final ArgumentMarshaller memberMarshaller; - - - private CollectionToListMarshaller() { - this(null); - } - - public CollectionToListMarshaller(ArgumentMarshaller memberMarshaller) { - this.memberMarshaller = memberMarshaller; - } - - public static CollectionToListMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - if (memberMarshaller == null) { - throw new IllegalStateException( - "No member marshaller configured!"); - } - - Collection objects = (Collection) obj; - List values = - new ArrayList(objects.size()); - - for (Object o : objects) { - AttributeValue value; - if (o == null) { - value = AttributeValue.builder().nul(true).build(); - } else { - value = memberMarshaller.marshall(o); - } - - values.add(value); - } - - AttributeValue result = AttributeValue.builder().l(values).build(); - return result; - } - - public ArgumentMarshaller memberMarshaller() { - return memberMarshaller; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CustomMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CustomMarshaller.java deleted file mode 100644 index 61cf9f93bb45..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/CustomMarshaller.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that delegates to an instance of a - * {@code DynamoDBMarshalling}-derived custom marshaler. - */ -public class CustomMarshaller implements StringAttributeMarshaller { - - private final Class> marshallerClass; - - public CustomMarshaller( - Class> marshallerClass) { - - this.marshallerClass = marshallerClass; - } - - @SuppressWarnings("unchecked") - private static DynamoDbMarshaller createMarshaller(Class clazz) { - try { - - return (DynamoDbMarshaller) clazz.newInstance(); - - } catch (InstantiationException e) { - throw new DynamoDbMappingException( - "Failed to instantiate custom marshaler for class " + clazz, - e); - - } catch (IllegalAccessException e) { - throw new DynamoDbMappingException( - "Failed to instantiate custom marshaler for class " + clazz, - e); - } - } - - @Override - public AttributeValue marshall(Object obj) { - - // TODO: Would be nice to cache this object, but not sure if we can - // do that now without a breaking change; user's marshalers might - // not all be thread-safe. - - DynamoDbMarshaller marshaler = - createMarshaller(marshallerClass); - - String stringValue = marshaler.marshall(obj); - - if (stringValue == null) { - return null; - } else { - return AttributeValue.builder().s(stringValue).build(); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/DateSetToStringSetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/DateSetToStringSetMarshaller.java deleted file mode 100644 index 51e942c16bb9..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/DateSetToStringSetMarshaller.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.utils.DateUtils; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals sets of Java {@code Date} objects into DynamoDB - * StringSets (in ISO 8601 format, ie {"2014-01-01T00:00:00Z", ...}). - */ -public class DateSetToStringSetMarshaller - implements StringSetAttributeMarshaller { - - private static final DateSetToStringSetMarshaller INSTANCE = - new DateSetToStringSetMarshaller(); - - private DateSetToStringSetMarshaller() { - } - - public static DateSetToStringSetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - @SuppressWarnings("unchecked") - Set dates = (Set) obj; - - List timestamps = new ArrayList(dates.size()); - for (Date date : dates) { - timestamps.add(DateUtils.formatIso8601Date(date.toInstant())); - } - - return AttributeValue.builder().ss(timestamps).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/DateToStringMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/DateToStringMarshaller.java deleted file mode 100644 index 304bd6e606e6..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/DateToStringMarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import static software.amazon.awssdk.utils.DateUtils.formatIso8601Date; - -import java.util.Date; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals Java {@code Date} objects into DynamoDB Strings - * (in ISO 8601 format, ie "2014-01-01T00:00:00Z"). - */ -public class DateToStringMarshaller implements StringAttributeMarshaller { - - private static final DateToStringMarshaller INSTANCE = - new DateToStringMarshaller(); - - private DateToStringMarshaller() { - } - - public static DateToStringMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - return AttributeValue.builder().s( - formatIso8601Date(Date.class.cast(obj).toInstant())) - .build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/MapToMapMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/MapToMapMarshaller.java deleted file mode 100644 index 4a35b2a9c15d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/MapToMapMarshaller.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.MapAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class MapToMapMarshaller implements MapAttributeMarshaller { - - private static final MapToMapMarshaller INSTANCE = - new MapToMapMarshaller(); - private final ArgumentMarshaller memberMarshaller; - - - private MapToMapMarshaller() { - memberMarshaller = null; - } - - public MapToMapMarshaller(ArgumentMarshaller memberMarshaller) { - if (memberMarshaller == null) { - throw new NullPointerException("memberMarshaller"); - } - this.memberMarshaller = memberMarshaller; - } - - public static MapToMapMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - if (memberMarshaller == null) { - throw new IllegalStateException( - "No member marshaller configured!"); - } - - @SuppressWarnings("unchecked") - Map map = (Map) obj; - Map values = - new HashMap(); - - for (Map.Entry entry : map.entrySet()) { - AttributeValue value; - if (entry.getValue() == null) { - value = AttributeValue.builder().nul(true).build(); - } else { - value = memberMarshaller.marshall(entry.getValue()); - } - - values.put(entry.getKey(), value); - } - - AttributeValue result = AttributeValue.builder().m(values).build(); - //result.setM(values); - return result; - } - - public ArgumentMarshaller memberMarshaller() { - return memberMarshaller; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/NumberSetToNumberSetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/NumberSetToNumberSetMarshaller.java deleted file mode 100644 index ccf4b4ced273..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/NumberSetToNumberSetMarshaller.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.NumberSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals sets of Java {@code Number}s into DynamoDB - * NumberSets. - */ -public class NumberSetToNumberSetMarshaller - implements NumberSetAttributeMarshaller { - - private static final NumberSetToNumberSetMarshaller INSTANCE = - new NumberSetToNumberSetMarshaller(); - - private NumberSetToNumberSetMarshaller() { - } - - public static NumberSetToNumberSetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - @SuppressWarnings("unchecked") - Set numbers = (Set) obj; - List numberAttributes = new ArrayList(numbers.size()); - - for (Number n : numbers) { - numberAttributes.add(n.toString()); - } - - return AttributeValue.builder().ns(numberAttributes).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/NumberToNumberMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/NumberToNumberMarshaller.java deleted file mode 100644 index c23322942a57..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/NumberToNumberMarshaller.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.NumberAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals any Java {@code Number} to a DynamoDB number. - */ -public class NumberToNumberMarshaller implements NumberAttributeMarshaller { - - private static final NumberToNumberMarshaller INSTANCE = - new NumberToNumberMarshaller(); - - private NumberToNumberMarshaller() { - } - - public static NumberToNumberMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - Number number = (Number) obj; - return AttributeValue.builder().n(number.toString()).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectSetToStringSetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectSetToStringSetMarshaller.java deleted file mode 100644 index 9d028316dced..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectSetToStringSetMarshaller.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A legacy marshaller that marshals sets of arbitrary Java objects into - * DynamoDB StringSets by using {@link String#valueOf(Object)}. Retained for - * backwards compatibility in case someone is relying on this, but logs a - * warning if ever used since we only know how to unmarshal back to Java - * Strings. - */ -public class ObjectSetToStringSetMarshaller - implements StringSetAttributeMarshaller { - - private static final Logger log = - LoggerFactory.getLogger(ObjectSetToStringSetMarshaller.class); - - private static final ObjectSetToStringSetMarshaller INSTANCE = - new ObjectSetToStringSetMarshaller(); - - private ObjectSetToStringSetMarshaller() { - } - - public static ObjectSetToStringSetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - Set set = (Set) obj; - - log.warn("Marshaling a set of non-String objects to a DynamoDB " - + "StringSet. You won't be able to read these objects back " - + "out of DynamoDB unless you REALLY know what you're doing: " - + "it's probably a bug. If you DO know what you're doing feel" - + "free to ignore this warning, but consider using a custom " - + "marshaler for this instead."); - - List strings = new ArrayList(set.size()); - for (Object o : set) { - strings.add(String.valueOf(o)); - } - - return AttributeValue.builder().ss(strings).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectToMapMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectToMapMarshaller.java deleted file mode 100644 index 2075a16d00d7..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectToMapMarshaller.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.MapAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.ItemConverter; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class ObjectToMapMarshaller implements MapAttributeMarshaller { - - private static final ObjectToMapMarshaller INSTANCE = - new ObjectToMapMarshaller(); - private final ItemConverter converter; - - private ObjectToMapMarshaller() { - converter = null; - } - - public ObjectToMapMarshaller(ItemConverter converter) { - if (converter == null) { - throw new NullPointerException("converter"); - } - this.converter = converter; - } - - public static ObjectToMapMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - Map values = converter.convert(obj); - return AttributeValue.builder().m(values).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectToStringMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectToStringMarshaller.java deleted file mode 100644 index 16869104f830..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/ObjectToStringMarshaller.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals Java {@code Object} objects into DynamoDB - * Strings. - * - * @author Sergei Egorov - */ -public class ObjectToStringMarshaller implements StringAttributeMarshaller { - - private static final ObjectToStringMarshaller INSTANCE = - new ObjectToStringMarshaller(); - - private ObjectToStringMarshaller() { - } - - public static ObjectToStringMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - return AttributeValue.builder().s(obj.toString()).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/S3LinkToStringMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/S3LinkToStringMarshaller.java deleted file mode 100644 index e91d640e9042..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/S3LinkToStringMarshaller.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.S3Link; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals {@code S3Link} objects to DynamoDB Strings, - * using a JSON encoding. For example: {"s3":{"region":"us-west-2", - * "bucket":"my-bucket-name", "key": "foo/bar/baz.txt"}}. - */ -public class S3LinkToStringMarshaller implements StringAttributeMarshaller { - - private static final S3LinkToStringMarshaller INSTANCE = - new S3LinkToStringMarshaller(); - - private S3LinkToStringMarshaller() { - } - - public static S3LinkToStringMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - S3Link s3link = (S3Link) obj; - - if (s3link.bucketName() == null || s3link.getKey() == null) { - // insufficient S3 resource specification - return null; - } - - return AttributeValue.builder().s(s3link.toJson()).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/StringSetToStringSetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/StringSetToStringSetMarshaller.java deleted file mode 100644 index aae6155c4e6f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/StringSetToStringSetMarshaller.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals sets of Java {@code String}s to DynamoDB - * StringSets. - */ -public class StringSetToStringSetMarshaller - implements StringSetAttributeMarshaller { - - private static final StringSetToStringSetMarshaller INSTANCE = - new StringSetToStringSetMarshaller(); - - private StringSetToStringSetMarshaller() { - } - - public static StringSetToStringSetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - @SuppressWarnings("unchecked") - Set set = (Set) obj; - - List strings = new ArrayList(set.size()); - for (String s : set) { - strings.add(s); - } - - return AttributeValue.builder().ss(strings).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/StringToStringMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/StringToStringMarshaller.java deleted file mode 100644 index abd03f07182f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/StringToStringMarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals Java {@code String} objects to DynamoDB Strings. - */ -public class StringToStringMarshaller implements StringAttributeMarshaller { - - private static final StringToStringMarshaller INSTANCE = - new StringToStringMarshaller(); - - private StringToStringMarshaller() { - } - - public static StringToStringMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - String string = (String) obj; - if (string.length() == 0) { - // Sticking with the legacy behavior for now. - return null; - } - - return AttributeValue.builder().s(string).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/UuidSetToStringSetMarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/UuidSetToStringSetMarshaller.java deleted file mode 100644 index cae6047ff8ac..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/UuidSetToStringSetMarshaller.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentMarshaller.StringSetAttributeMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A marshaller that marshals sets of Java {@code Object} objects into - * DynamoDB StringSets. - */ -public class UuidSetToStringSetMarshaller - implements StringSetAttributeMarshaller { - - private static final UuidSetToStringSetMarshaller INSTANCE = - new UuidSetToStringSetMarshaller(); - - private UuidSetToStringSetMarshaller() { - } - - public static UuidSetToStringSetMarshaller instance() { - return INSTANCE; - } - - @Override - public AttributeValue marshall(Object obj) { - @SuppressWarnings("unchecked") - Set uuids = (Set) obj; - - List strings = new ArrayList(uuids.size()); - for (UUID uuid : uuids) { - strings.add(uuid.toString()); - } - - return AttributeValue.builder().ss(strings).build(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/package-info.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/package-info.java deleted file mode 100644 index be3929bcef17..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/marshallers/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -/** - * Standard implementations of {@code ArgumentMarshaller}. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.marshallers; diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BUnmarshaller.java deleted file mode 100644 index 535403cb2dc5..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BUnmarshaller.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -abstract class BUnmarshaller implements ArgumentUnmarshaller { - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.b() == null) { - throw new DynamoDbMappingException("Expected B in value " + value + " when invoking " + setter); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigDecimalSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigDecimalSetUnmarshaller.java deleted file mode 100644 index a4300cfe3dbe..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigDecimalSetUnmarshaller.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.math.BigDecimal; -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB NumberSets into sets of Java - * {@code BigDecimal}s. - */ -public class BigDecimalSetUnmarshaller extends NsUnmarshaller { - - private static final BigDecimalSetUnmarshaller INSTANCE = - new BigDecimalSetUnmarshaller(); - - private BigDecimalSetUnmarshaller() { - } - - public static BigDecimalSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - for (String s : value.ns()) { - result.add(new BigDecimal(s)); - } - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigDecimalUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigDecimalUnmarshaller.java deleted file mode 100644 index 2ff631e45575..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigDecimalUnmarshaller.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.math.BigDecimal; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Numbers into Java - * {@code BigDecimal}s. - */ -public class BigDecimalUnmarshaller extends NUnmarshaller { - - private static final BigDecimalUnmarshaller INSTANCE = - new BigDecimalUnmarshaller(); - - private BigDecimalUnmarshaller() { - } - - public static BigDecimalUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return new BigDecimal(value.n()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigIntegerSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigIntegerSetUnmarshaller.java deleted file mode 100644 index d008cbb327de..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigIntegerSetUnmarshaller.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.math.BigInteger; -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB NumberSets into sets of Java - * {@code BigInteger}s. - */ -public class BigIntegerSetUnmarshaller extends NsUnmarshaller { - - private static final BigIntegerSetUnmarshaller INSTANCE = - new BigIntegerSetUnmarshaller(); - - private BigIntegerSetUnmarshaller() { - } - - public static BigIntegerSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - for (String s : value.ns()) { - result.add(new BigInteger(s)); - } - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigIntegerUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigIntegerUnmarshaller.java deleted file mode 100644 index 3ec56b6cc1b0..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BigIntegerUnmarshaller.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.math.BigInteger; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Numbers into Java - * {@code BigInteger}s. - */ -public class BigIntegerUnmarshaller extends NUnmarshaller { - - private static final BigIntegerUnmarshaller INSTANCE = - new BigIntegerUnmarshaller(); - - private BigIntegerUnmarshaller() { - } - - public static BigIntegerUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return new BigInteger(value.n()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BooleanSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BooleanSetUnmarshaller.java deleted file mode 100644 index 8a0577601bb0..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BooleanSetUnmarshaller.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A special unmarshaller for Set<Boolean>, which the V1 schema stores as - * an NS using 0/1 for true/false. In the V2 schema these fall through to - * the {@code ObjectSetToListMarshaller} which stores them as an L or BOOLs. - */ -public class BooleanSetUnmarshaller implements ArgumentUnmarshaller { - - private static final BooleanSetUnmarshaller INSTANCE = - new BooleanSetUnmarshaller(); - - private BooleanSetUnmarshaller() { - } - - public static BooleanSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.ns() == null && value.l() == null) { - throw new DynamoDbMappingException( - "Expected either L or NS in value " + value - + " when invoking " + setter); - } - } - - @Override - public Object unmarshall(AttributeValue value) { - if (value.l() != null) { - return unmarshallList(value.l()); - } else { - return unmarshallNs(value.ns()); - } - } - - private Set unmarshallList(List values) { - Set result = new HashSet(); - - for (AttributeValue value : values) { - Boolean bool; - if (Boolean.TRUE.equals(value.nul())) { - bool = null; - } else { - bool = value.bool(); - if (bool == null) { - throw new DynamoDbMappingException( - value + " is not a boolean"); - } - } - - if (!result.add(bool)) { - throw new DynamoDbMappingException( - "Duplicate value (" + bool + ") found in " - + values); - } - } - - return result; - } - - private Set unmarshallNs(List values) { - Set result = new HashSet(); - - for (String s : values) { - if ("1".equals(s)) { - result.add(Boolean.TRUE); - } else if ("0".equals(s)) { - result.add(Boolean.FALSE); - } else { - throw new IllegalArgumentException( - "Expected '1' or '0' for boolean value, was " + s); - } - } - - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BooleanUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BooleanUnmarshaller.java deleted file mode 100644 index 35a0cc49b702..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BooleanUnmarshaller.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Bools (or Numbers) into Java - * {@code Boolean}s. Numbers are handled for backwards compatibility with - * versions of the mapper written before the DynamoDB native Boolean type - * was added, which stored Java {@code Boolean}s as either the Number 0 (false) - * or 1 (true). - */ -public class BooleanUnmarshaller implements ArgumentUnmarshaller { - - private static final BooleanUnmarshaller INSTANCE = - new BooleanUnmarshaller(); - - private BooleanUnmarshaller() { - } - - public static BooleanUnmarshaller instance() { - return INSTANCE; - } - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.n() == null && value.bool() == null) { - throw new DynamoDbMappingException( - "Expected either N or BOOL in value " + value - + " when invoking " + setter); - } - } - - @Override - public Object unmarshall(AttributeValue value) { - if (value.bool() != null) { - return value.bool(); - } - if ("1".equals(value.n())) { - return Boolean.TRUE; - } - if ("0".equals(value.n())) { - return Boolean.FALSE; - } - - throw new IllegalArgumentException( - "Expected '1', '0', or BOOL value for boolean value, was " - + value); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BsUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BsUnmarshaller.java deleted file mode 100644 index 9529d8857679..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/BsUnmarshaller.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -abstract class BsUnmarshaller implements ArgumentUnmarshaller { - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.bs() == null) { - throw new DynamoDbMappingException("Expected BS in value " + value + " when invoking " + setter); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteArraySetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteArraySetUnmarshaller.java deleted file mode 100644 index 129136773ed9..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteArraySetUnmarshaller.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.nio.ByteBuffer; -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals BinarySet values as sets of Java - * {@code byte[]}s. - */ -public class ByteArraySetUnmarshaller extends BsUnmarshaller { - - private static final ByteArraySetUnmarshaller INSTANCE = - new ByteArraySetUnmarshaller(); - - private ByteArraySetUnmarshaller() { - } - - public static ByteArraySetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - - for (SdkBytes bytesBuffer : value.bs()) { - ByteBuffer buffer = bytesBuffer.asByteBuffer(); - if (buffer.hasArray()) { - result.add(buffer.array()); - } else { - byte[] array = new byte[buffer.remaining()]; - buffer.get(array); - result.add(array); - } - } - - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteArrayUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteArrayUnmarshaller.java deleted file mode 100644 index 14454a43beee..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteArrayUnmarshaller.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.nio.ByteBuffer; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals Binary values as Java {@code byte[]}s. - */ -public class ByteArrayUnmarshaller extends BUnmarshaller { - - private static final ByteArrayUnmarshaller INSTANCE = - new ByteArrayUnmarshaller(); - - private ByteArrayUnmarshaller() { - } - - public static ByteArrayUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return value.b().asByteArray(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteBufferSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteBufferSetUnmarshaller.java deleted file mode 100644 index 0b85b70edde2..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteBufferSetUnmarshaller.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.stream.Collectors; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals BinarySet values as sets of Java - * {@code ByteBuffer}s. - */ -public class ByteBufferSetUnmarshaller extends BsUnmarshaller { - - private static final ByteBufferSetUnmarshaller INSTANCE = - new ByteBufferSetUnmarshaller(); - - private ByteBufferSetUnmarshaller() { - } - - public static ByteBufferSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return value.bs().stream() - .map(SdkBytes::asByteBuffer) - .collect(Collectors.toSet()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteBufferUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteBufferUnmarshaller.java deleted file mode 100644 index cc5b986226ed..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteBufferUnmarshaller.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals Binary values as Java {@code ByteBuffer}s. - */ -public class ByteBufferUnmarshaller extends BUnmarshaller { - - private static final ByteBufferUnmarshaller INSTANCE = - new ByteBufferUnmarshaller(); - - private ByteBufferUnmarshaller() { - } - - public static ByteBufferUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return value.b().asByteBuffer(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteSetUnmarshaller.java deleted file mode 100644 index 1e29eaeaac0f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteSetUnmarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB NumberSets into sets of Java - * {@code Byte}s. - */ -public class ByteSetUnmarshaller extends NsUnmarshaller { - - private static final ByteSetUnmarshaller INSTANCE = - new ByteSetUnmarshaller(); - - private ByteSetUnmarshaller() { - } - - public static ByteSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - for (String s : value.ns()) { - result.add(Byte.valueOf(s)); - } - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteUnmarshaller.java deleted file mode 100644 index 55773a6459f4..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ByteUnmarshaller.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Numbers into Java - * {@code Byte}s. - */ -public class ByteUnmarshaller extends NUnmarshaller { - - private static final ByteUnmarshaller INSTANCE = - new ByteUnmarshaller(); - - private ByteUnmarshaller() { - } - - public static ByteUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return Byte.valueOf(value.n()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CalendarSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CalendarSetUnmarshaller.java deleted file mode 100644 index b1c9da0a10ec..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CalendarSetUnmarshaller.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.sql.Date; -import java.util.Calendar; -import java.util.GregorianCalendar; -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.utils.DateUtils; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals sets of ISO-8601-formatted dates as sets of - * Java {@code Calendar} objects. - */ -public class CalendarSetUnmarshaller extends SsUnmarshaller { - - private static final CalendarSetUnmarshaller INSTANCE = - new CalendarSetUnmarshaller(); - - private CalendarSetUnmarshaller() { - } - - public static CalendarSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - - for (String s : value.ss()) { - Calendar cal = GregorianCalendar.getInstance(); - cal.setTime(Date.from(DateUtils.parseIso8601Date(s))); - result.add(cal); - } - - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CalendarUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CalendarUnmarshaller.java deleted file mode 100644 index 95d3bbe211a2..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CalendarUnmarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import software.amazon.awssdk.utils.DateUtils; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals ISO-8601-formatted dates as Java - * {@code Calendar} objects. - */ -public class CalendarUnmarshaller extends SUnmarshaller { - - private static final CalendarUnmarshaller INSTANCE = - new CalendarUnmarshaller(); - - private CalendarUnmarshaller() { - } - - public static CalendarUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Calendar cal = GregorianCalendar.getInstance(); - cal.setTime(Date.from(DateUtils.parseIso8601Date(value.s()))); - return cal; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CustomUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CustomUnmarshaller.java deleted file mode 100644 index 6ed62268bacc..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/CustomUnmarshaller.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that delegates to an instance of a - * {@code DynamoDBMarshaller}-derived custom marshaler. - */ -public class CustomUnmarshaller extends SUnmarshaller { - - private final Class targetClass; - private final Class> unmarshallerClass; - - public CustomUnmarshaller( - Class targetClass, - Class> unmarshallerClass) { - - this.targetClass = targetClass; - this.unmarshallerClass = unmarshallerClass; - } - - @SuppressWarnings({"rawtypes"}) - private static DynamoDbMarshaller createUnmarshaller(Class clazz) { - try { - - return (DynamoDbMarshaller) clazz.newInstance(); - - } catch (InstantiationException e) { - throw new DynamoDbMappingException( - "Failed to instantiate custom marshaler for class " + clazz, - e); - - } catch (IllegalAccessException e) { - throw new DynamoDbMappingException( - "Failed to instantiate custom marshaler for class " + clazz, - e); - } - } - - @Override - @SuppressWarnings({"rawtypes", "unchecked"}) - public Object unmarshall(AttributeValue value) { - - // TODO: Would be nice to cache this object, but not sure if we can - // do that now without a breaking change; user's unmarshallers - // might not all be thread-safe. - - DynamoDbMarshaller unmarshaller = - createUnmarshaller(unmarshallerClass); - - return unmarshaller.unmarshall(targetClass, value.s()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DateSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DateSetUnmarshaller.java deleted file mode 100644 index defa461a1444..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DateSetUnmarshaller.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.Date; -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.utils.DateUtils; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals sets of ISO-8601-formatted dates as sets of - * Java {@code Date} objects. - */ -public class DateSetUnmarshaller extends SsUnmarshaller { - - private static final DateSetUnmarshaller INSTANCE = - new DateSetUnmarshaller(); - - private DateSetUnmarshaller() { - } - - public static DateSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - - for (String s : value.ss()) { - result.add(Date.from(DateUtils.parseIso8601Date(s))); - } - - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DateUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DateUnmarshaller.java deleted file mode 100644 index da650b7a5906..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DateUnmarshaller.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.Date; -import software.amazon.awssdk.utils.DateUtils; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals ISO-8601-formatted dates as Java - * {@code Date} objects. - */ -public class DateUnmarshaller extends SUnmarshaller { - - private static final DateUnmarshaller INSTANCE = - new DateUnmarshaller(); - - private DateUnmarshaller() { - } - - public static DateUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return Date.from(DateUtils.parseIso8601Date(value.s())); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DoubleSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DoubleSetUnmarshaller.java deleted file mode 100644 index 77397c9db8de..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DoubleSetUnmarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB NumberSets into sets of Java - * {@code Double}s. - */ -public class DoubleSetUnmarshaller extends NsUnmarshaller { - - private static final DoubleSetUnmarshaller INSTANCE = - new DoubleSetUnmarshaller(); - - private DoubleSetUnmarshaller() { - } - - public static DoubleSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - for (String s : value.ns()) { - result.add(Double.valueOf(s)); - } - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DoubleUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DoubleUnmarshaller.java deleted file mode 100644 index 9527458c812c..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/DoubleUnmarshaller.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Numbers into Java {@code Double}s. - */ -public class DoubleUnmarshaller extends NUnmarshaller { - - private static final DoubleUnmarshaller INSTANCE = - new DoubleUnmarshaller(); - - private DoubleUnmarshaller() { - } - - public static DoubleUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return Double.valueOf(value.n()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/FloatSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/FloatSetUnmarshaller.java deleted file mode 100644 index 65a21f6d502b..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/FloatSetUnmarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB NumberSets into sets of Java - * {@code Float}s. - */ -public class FloatSetUnmarshaller extends NsUnmarshaller { - - private static final FloatSetUnmarshaller INSTANCE = - new FloatSetUnmarshaller(); - - private FloatSetUnmarshaller() { - } - - public static FloatSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - for (String s : value.ns()) { - result.add(Float.valueOf(s)); - } - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/FloatUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/FloatUnmarshaller.java deleted file mode 100644 index f38466798a2f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/FloatUnmarshaller.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Numbers into Java - * {@code Float}s. - */ -public class FloatUnmarshaller extends NUnmarshaller { - - private static final FloatUnmarshaller INSTANCE = - new FloatUnmarshaller(); - - private FloatUnmarshaller() { - } - - public static FloatUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return Float.valueOf(value.n()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/IntegerSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/IntegerSetUnmarshaller.java deleted file mode 100644 index 5a6a25e70109..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/IntegerSetUnmarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB NumberSets into sets of Java - * {@code Integer}s. - */ -public class IntegerSetUnmarshaller extends NsUnmarshaller { - - private static final IntegerSetUnmarshaller INSTANCE = - new IntegerSetUnmarshaller(); - - private IntegerSetUnmarshaller() { - } - - public static IntegerSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - for (String s : value.ns()) { - result.add(Integer.valueOf(s)); - } - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/IntegerUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/IntegerUnmarshaller.java deleted file mode 100644 index 1a873aefdc3f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/IntegerUnmarshaller.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Numbers into Java - * {@code Integer}s. - */ -public class IntegerUnmarshaller extends NUnmarshaller { - - private static final IntegerUnmarshaller INSTANCE = - new IntegerUnmarshaller(); - - private IntegerUnmarshaller() { - } - - public static IntegerUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return Integer.valueOf(value.n()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LUnmarshaller.java deleted file mode 100644 index 302610c8db3f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LUnmarshaller.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -abstract class LUnmarshaller implements ArgumentUnmarshaller { - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.l() == null) { - throw new DynamoDbMappingException("Expected L in value " + value + " when invoking " + setter); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ListUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ListUnmarshaller.java deleted file mode 100644 index 49d490061b5a..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ListUnmarshaller.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.text.ParseException; -import java.util.ArrayList; -import java.util.List; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals Lists into Java {@code Lists}. - */ -public class ListUnmarshaller extends LUnmarshaller { - - private static final ListUnmarshaller INSTANCE = new ListUnmarshaller(); - private final ArgumentUnmarshaller memberUnmarshaller; - - private ListUnmarshaller() { - memberUnmarshaller = null; - } - - public ListUnmarshaller(ArgumentUnmarshaller memberUnmarshaller) { - if (memberUnmarshaller == null) { - throw new NullPointerException("memberUnmarshaller"); - } - this.memberUnmarshaller = memberUnmarshaller; - } - - public static ListUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) throws ParseException { - List values = value.l(); - List objects = new ArrayList(values.size()); - - for (AttributeValue v : values) { - memberUnmarshaller.typeCheck(v, null); - objects.add(memberUnmarshaller.unmarshall(v)); - } - - return objects; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LongSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LongSetUnmarshaller.java deleted file mode 100644 index fd3baa590c56..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LongSetUnmarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB NumberSets into sets of Java - * {@code Long}s. - */ -public class LongSetUnmarshaller extends NsUnmarshaller { - - private static final LongSetUnmarshaller INSTANCE = - new LongSetUnmarshaller(); - - private LongSetUnmarshaller() { - } - - public static LongSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - for (String s : value.ns()) { - result.add(Long.valueOf(s)); - } - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LongUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LongUnmarshaller.java deleted file mode 100644 index 4c0a6923e5f2..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/LongUnmarshaller.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Numbers into Java - * {@code Long}s. - */ -public class LongUnmarshaller extends NUnmarshaller { - - private static final LongUnmarshaller INSTANCE = - new LongUnmarshaller(); - - private LongUnmarshaller() { - } - - public static LongUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return Long.valueOf(value.n()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/MUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/MUnmarshaller.java deleted file mode 100644 index a6901461a0c5..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/MUnmarshaller.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -abstract class MUnmarshaller implements ArgumentUnmarshaller { - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.m() == null) { - throw new DynamoDbMappingException("Expected M in value " + value + " when invoking " + setter); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/MapUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/MapUnmarshaller.java deleted file mode 100644 index 7aaddf400e4a..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/MapUnmarshaller.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.text.ParseException; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class MapUnmarshaller extends MUnmarshaller { - - private static final MapUnmarshaller INSTANCE = new MapUnmarshaller(); - private final ArgumentUnmarshaller memberUnmarshaller; - - private MapUnmarshaller() { - memberUnmarshaller = null; - } - - public MapUnmarshaller(ArgumentUnmarshaller memberUnmarshaller) { - if (memberUnmarshaller == null) { - throw new NullPointerException("memberUnmarshaller"); - } - this.memberUnmarshaller = memberUnmarshaller; - } - - public static MapUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) throws ParseException { - Map map = value.m(); - Map result = new HashMap(); - - for (Map.Entry entry : map.entrySet()) { - memberUnmarshaller.typeCheck(entry.getValue(), null); - result.put(entry.getKey(), - memberUnmarshaller.unmarshall(entry.getValue())); - } - - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NUnmarshaller.java deleted file mode 100644 index f4a624922ac3..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NUnmarshaller.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -abstract class NUnmarshaller implements ArgumentUnmarshaller { - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.n() == null) { - throw new DynamoDbMappingException("Expected N in value " + value + " when invoking " + setter); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NsUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NsUnmarshaller.java deleted file mode 100644 index 5940f8eb3670..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NsUnmarshaller.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -abstract class NsUnmarshaller implements ArgumentUnmarshaller { - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.ns() == null) { - throw new DynamoDbMappingException("Expected NS in value " + value + " when invoking " + setter); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NullableUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NullableUnmarshaller.java deleted file mode 100644 index 897680ab882f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/NullableUnmarshaller.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import java.text.ParseException; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class NullableUnmarshaller implements ArgumentUnmarshaller { - - private final ArgumentUnmarshaller wrapped; - - public NullableUnmarshaller(ArgumentUnmarshaller wrapped) { - if (wrapped == null) { - throw new NullPointerException("wrapped"); - } - this.wrapped = wrapped; - } - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.nul() == null) { - wrapped.typeCheck(value, setter); - } - } - - @Override - public Object unmarshall(AttributeValue value) throws ParseException { - if (value.nul() != null) { - return null; - } - return wrapped.unmarshall(value); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ObjectSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ObjectSetUnmarshaller.java deleted file mode 100644 index 91627e6979ce..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ObjectSetUnmarshaller.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.text.ParseException; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class ObjectSetUnmarshaller extends LUnmarshaller { - - private static final ObjectSetUnmarshaller INSTANCE = - new ObjectSetUnmarshaller(); - private final ArgumentUnmarshaller memberUnmarshaller; - - private ObjectSetUnmarshaller() { - memberUnmarshaller = null; - } - - public ObjectSetUnmarshaller(ArgumentUnmarshaller memberUnmarshaller) { - if (memberUnmarshaller == null) { - throw new NullPointerException("memberUnmarshaller"); - } - this.memberUnmarshaller = memberUnmarshaller; - } - - public static ObjectSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) throws ParseException { - List values = value.l(); - - // As in the LinkedHashSet(Collection) constructor. - int size = Math.max(values.size() * 2, 11); - Set objects = new LinkedHashSet(size); - - for (AttributeValue v : values) { - memberUnmarshaller.typeCheck(v, null); - Object o = memberUnmarshaller.unmarshall(v); - if (!objects.add(o)) { - throw new DynamoDbMappingException( - "Duplicate value (" + o + ") found in " + values); - } - } - - return objects; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ObjectUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ObjectUnmarshaller.java deleted file mode 100644 index 9c2415285747..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ObjectUnmarshaller.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.text.ParseException; -import java.util.Map; -import software.amazon.awssdk.services.dynamodb.datamodeling.ItemConverter; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class ObjectUnmarshaller extends MUnmarshaller { - - private static final ObjectUnmarshaller INSTANCE = new ObjectUnmarshaller(); - private final ItemConverter converter; - private final Class clazz; - - private ObjectUnmarshaller() { - converter = null; - clazz = null; - } - - public ObjectUnmarshaller(ItemConverter converter, Class clazz) { - if (converter == null) { - throw new NullPointerException("converter"); - } - if (clazz == null) { - throw new NullPointerException("clazz"); - } - - this.converter = converter; - this.clazz = clazz; - } - - public static ObjectUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) throws ParseException { - Map map = value.m(); - return converter.unconvert(clazz, map); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/S3LinkUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/S3LinkUnmarshaller.java deleted file mode 100644 index 488777f8ee7b..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/S3LinkUnmarshaller.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.datamodeling.S3ClientCache; -import software.amazon.awssdk.services.dynamodb.datamodeling.S3Link; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -public class S3LinkUnmarshaller extends SUnmarshaller { - - private static final S3LinkUnmarshaller INSTANCE = new S3LinkUnmarshaller(); - private final S3ClientCache clientCache; - - - private S3LinkUnmarshaller() { - this(null); - } - - public S3LinkUnmarshaller(S3ClientCache clientCache) { - this.clientCache = clientCache; - } - - public static S3LinkUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - if (clientCache == null) { - throw new IllegalStateException( - "Mapper must be constructed with S3 AWS Credentials to " - + "load S3Link"); - } - - return S3Link.fromJson(clientCache, value.s()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/SUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/SUnmarshaller.java deleted file mode 100644 index 385172193459..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/SUnmarshaller.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -abstract class SUnmarshaller implements ArgumentUnmarshaller { - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.s() == null) { - throw new DynamoDbMappingException("Expected S in value " + value + " when invoking " + setter); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ShortSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ShortSetUnmarshaller.java deleted file mode 100644 index 889eb5a1588c..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ShortSetUnmarshaller.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.HashSet; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB NumberSets into sets of Java - * {@code Short}s. - */ -public class ShortSetUnmarshaller extends NsUnmarshaller { - - private static final ShortSetUnmarshaller INSTANCE = - new ShortSetUnmarshaller(); - - private ShortSetUnmarshaller() { - } - - public static ShortSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - Set result = new HashSet(); - for (String s : value.ns()) { - result.add(Short.valueOf(s)); - } - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ShortUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ShortUnmarshaller.java deleted file mode 100644 index d0068a4cccd3..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/ShortUnmarshaller.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Numbers into Java - * {@code Short}s. - */ -public class ShortUnmarshaller extends NUnmarshaller { - - private static final ShortUnmarshaller INSTANCE = - new ShortUnmarshaller(); - - private ShortUnmarshaller() { - } - - public static ShortUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return Short.valueOf(value.n()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/SsUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/SsUnmarshaller.java deleted file mode 100644 index b8244c1e82a5..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/SsUnmarshaller.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.lang.reflect.Method; -import software.amazon.awssdk.services.dynamodb.datamodeling.ArgumentUnmarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -abstract class SsUnmarshaller implements ArgumentUnmarshaller { - - @Override - public void typeCheck(AttributeValue value, Method setter) { - if (value.ss() == null) { - throw new DynamoDbMappingException("Expected SS in value " + value + " when invoking " + setter); - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/StringSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/StringSetUnmarshaller.java deleted file mode 100644 index e53f02a160d1..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/StringSetUnmarshaller.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.HashSet; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB StringSets as sets of Java - * {@code String} objects. - */ -public class StringSetUnmarshaller extends SsUnmarshaller { - - private static final StringSetUnmarshaller INSTANCE = - new StringSetUnmarshaller(); - - private StringSetUnmarshaller() { - } - - public static StringSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return new HashSet(value.ss()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/StringUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/StringUnmarshaller.java deleted file mode 100644 index cf7195b86fcc..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/StringUnmarshaller.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals DynamoDB Strings as Java {@code String} - * objects. - */ -public class StringUnmarshaller extends SUnmarshaller { - - private static final StringUnmarshaller INSTANCE = - new StringUnmarshaller(); - - private StringUnmarshaller() { - } - - public static StringUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Object unmarshall(AttributeValue value) { - return value.s(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/UuidSetUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/UuidSetUnmarshaller.java deleted file mode 100644 index ceda8d596532..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/UuidSetUnmarshaller.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals sets of UUIDs as sets of - * Java {@code UUID} objects. - */ -public class UuidSetUnmarshaller extends SsUnmarshaller { - - private static final UuidSetUnmarshaller INSTANCE = - new UuidSetUnmarshaller(); - - private UuidSetUnmarshaller() { - } - - public static UuidSetUnmarshaller instance() { - return INSTANCE; - } - - @Override - public Set unmarshall(AttributeValue value) { - Set result = new HashSet(); - - for (String s : value.ss()) { - result.add(UUID.fromString(s)); - } - - return result; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/UuidUnmarshaller.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/UuidUnmarshaller.java deleted file mode 100644 index 32b8f55dd0cb..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/UuidUnmarshaller.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; - -import java.util.UUID; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * An unmarshaller that unmarshals UUIDs as Java - * {@code UUID} objects. - */ -public class UuidUnmarshaller extends SUnmarshaller { - - private static final UuidUnmarshaller INSTANCE = - new UuidUnmarshaller(); - - private UuidUnmarshaller() { - } - - public static UuidUnmarshaller instance() { - return INSTANCE; - } - - @Override - public UUID unmarshall(AttributeValue value) { - return UUID.fromString(value.s()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/package-info.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/package-info.java deleted file mode 100644 index ae581f3569b1..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/datamodeling/unmarshallers/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -/** - * Standard implementations of {@code ArgumentUnmarshaller}. - */ - -package software.amazon.awssdk.services.dynamodb.datamodeling.unmarshallers; diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/GenerateCreateTableRequestTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/GenerateCreateTableRequestTest.java deleted file mode 100644 index c30986e5d931..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/GenerateCreateTableRequestTest.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.awssdk.testutils.UnorderedCollectionComparator; -import utils.test.util.DynamoDBTestBase; - -/** - * Tests on the DynamoDBMapper.generateCreateTableRequest method. - */ -public class GenerateCreateTableRequestTest extends DynamoDBTestBase { - - private static DynamoDbMapper mapper; - - @BeforeClass - public static void setUp() { - dynamo = DynamoDbClient.builder() - .credentialsProvider(AnonymousCredentialsProvider.create()) - .region(Region.US_WEST_2) - .build(); - mapper = new DynamoDbMapper(dynamo); - } - - private static boolean equalLsi(Collection a, Collection b) { - return UnorderedCollectionComparator.equalUnorderedCollections(a, b, new LocalSecondaryIndexDefinitionComparator()); - } - - private static boolean equalGsi(Collection a, Collection b) { - return UnorderedCollectionComparator.equalUnorderedCollections(a, b, new GlobalSecondaryIndexDefinitionComparator()); - } - - @Test - @Ignore // No longer works because mapper is not aware of auto construct lists - public void testParseIndexRangeKeyClass() { - CreateTableRequest request = mapper.generateCreateTableRequest(IndexRangeKeyClass.class); - - assertEquals("aws-java-sdk-index-range-test", request.tableName()); - List expectedKeyElements = Arrays.asList( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("rangeKey").keyType(KeyType.RANGE).build() - ); - assertEquals(expectedKeyElements, request.keySchema()); - - List expectedAttrDefinitions = Arrays.asList( - AttributeDefinition.builder().attributeName("key").attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName("rangeKey").attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName("indexFooRangeKey").attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName("indexBarRangeKey").attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName("multipleIndexRangeKey").attributeType(ScalarAttributeType.N).build() - ); - assertTrue(UnorderedCollectionComparator.equalUnorderedCollections( - expectedAttrDefinitions, - request.attributeDefinitions())); - - List expectedLsi = Arrays.asList( - LocalSecondaryIndex.builder() - .indexName("index_foo") - .keySchema( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexFooRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("index_bar") - .keySchema( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexBarRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("index_foo_copy") - .keySchema( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("multipleIndexRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("index_bar_copy") - .keySchema( - KeySchemaElement.builder().attributeName("key").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("multipleIndexRangeKey").keyType(KeyType.RANGE).build()).build()); - assertTrue(equalLsi(expectedLsi, request.localSecondaryIndexes())); - - assertNull(request.globalSecondaryIndexes()); - assertNull(request.provisionedThroughput()); - } - - @Test - public void testComplexIndexedHashRangeClass() { - CreateTableRequest request = mapper.generateCreateTableRequest(MapperQueryExpressionTest.HashRangeClass.class); - - assertEquals("table_name", request.tableName()); - List expectedKeyElements = Arrays.asList( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("primaryRangeKey").keyType(KeyType.RANGE).build() - ); - assertEquals(expectedKeyElements, request.keySchema()); - - List expectedAttrDefinitions = Arrays.asList( - AttributeDefinition.builder().attributeName("primaryHashKey").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("indexHashKey").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("primaryRangeKey").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("indexRangeKey").attributeType(ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName("anotherIndexRangeKey").attributeType(ScalarAttributeType.S).build() - ); - assertTrue(UnorderedCollectionComparator.equalUnorderedCollections( - expectedAttrDefinitions, - request.attributeDefinitions())); - - List expectedLsi = Arrays.asList( - LocalSecondaryIndex.builder() - .indexName("LSI-primary-range") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("primaryRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("LSI-index-range-1") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("LSI-index-range-2") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build(), - LocalSecondaryIndex.builder() - .indexName("LSI-index-range-3") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("anotherIndexRangeKey").keyType(KeyType.RANGE).build()).build()); - assertTrue(equalLsi(expectedLsi, request.localSecondaryIndexes())); - - List expectedGsi = Arrays.asList( - GlobalSecondaryIndex.builder() - .indexName("GSI-primary-hash-index-range-1") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build(), - GlobalSecondaryIndex.builder() - .indexName("GSI-primary-hash-index-range-2") - .keySchema( - KeySchemaElement.builder().attributeName("primaryHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("anotherIndexRangeKey").keyType(KeyType.RANGE).build()).build(), - GlobalSecondaryIndex.builder() - .indexName("GSI-index-hash-primary-range") - .keySchema( - KeySchemaElement.builder().attributeName("indexHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("primaryRangeKey").keyType(KeyType.RANGE).build()).build(), - GlobalSecondaryIndex.builder() - .indexName("GSI-index-hash-index-range-1") - .keySchema( - KeySchemaElement.builder().attributeName("indexHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build(), - GlobalSecondaryIndex.builder() - .indexName("GSI-index-hash-index-range-2") - .keySchema( - KeySchemaElement.builder().attributeName("indexHashKey").keyType(KeyType.HASH).build(), - KeySchemaElement.builder().attributeName("indexRangeKey").keyType(KeyType.RANGE).build()).build()); - assertTrue(equalGsi(expectedGsi, request.globalSecondaryIndexes())); - - assertNull(request.provisionedThroughput()); - } - - private static class LocalSecondaryIndexDefinitionComparator - implements - UnorderedCollectionComparator.CrossTypeComparator { - - @Override - public boolean equals(LocalSecondaryIndex a, LocalSecondaryIndex b) { - return a.indexName().equals(b.indexName()) - && a.keySchema().equals(b.keySchema()); - } - - } - - private static class GlobalSecondaryIndexDefinitionComparator - implements - UnorderedCollectionComparator.CrossTypeComparator { - - @Override - public boolean equals(GlobalSecondaryIndex a, GlobalSecondaryIndex b) { - return a.indexName().equals(b.indexName()) - && a.keySchema().equals(b.keySchema()); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/IndexRangeKeyClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/IndexRangeKeyClass.java deleted file mode 100644 index 9364b6134d09..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/IndexRangeKeyClass.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - - -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIndexRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbVersionAttribute; - -/** - * Comprehensive domain class - */ -@DynamoDbTable(tableName = "aws-java-sdk-index-range-test") -public class IndexRangeKeyClass { - - private long key; - private double rangeKey; - private Double indexFooRangeKey; - private Double indexBarRangeKey; - private Double multipleIndexRangeKey; - private Long version; - - private String fooAttribute; - private String barAttribute; - - @DynamoDbHashKey - public long getKey() { - return key; - } - - public void setKey(long key) { - this.key = key; - } - - @DynamoDbRangeKey - public double getRangeKey() { - return rangeKey; - } - - public void setRangeKey(double rangeKey) { - this.rangeKey = rangeKey; - } - - @DynamoDbIndexRangeKey( - localSecondaryIndexName = "index_foo", - attributeName = "indexFooRangeKey" - ) - public Double getIndexFooRangeKeyWithFakeName() { - return indexFooRangeKey; - } - - public void setIndexFooRangeKeyWithFakeName(Double indexFooRangeKey) { - this.indexFooRangeKey = indexFooRangeKey; - } - - @DynamoDbIndexRangeKey( - localSecondaryIndexName = "index_bar" - ) - public Double getIndexBarRangeKey() { - return indexBarRangeKey; - } - - public void setIndexBarRangeKey(Double indexBarRangeKey) { - this.indexBarRangeKey = indexBarRangeKey; - } - - @DynamoDbIndexRangeKey( - localSecondaryIndexNames = {"index_foo_copy", "index_bar_copy"} - ) - public Double getMultipleIndexRangeKey() { - return multipleIndexRangeKey; - } - - public void setMultipleIndexRangeKey(Double multipleIndexRangeKey) { - this.multipleIndexRangeKey = multipleIndexRangeKey; - } - - @DynamoDbAttribute - public String getFooAttribute() { - return fooAttribute; - } - - public void setFooAttribute(String fooAttribute) { - this.fooAttribute = fooAttribute; - } - - @DynamoDbAttribute - public String getBarAttribute() { - return barAttribute; - } - - public void setBarAttribute(String barAttribute) { - this.barAttribute = barAttribute; - } - - @DynamoDbVersionAttribute - public Long getVersion() { - return version; - } - - public void setVersion(Long version) { - this.version = version; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((fooAttribute == null) ? 0 : fooAttribute.hashCode()); - result = prime * result + ((barAttribute == null) ? 0 : barAttribute.hashCode()); - result = prime * result + (int) (key ^ (key >>> 32)); - long temp; - temp = Double.doubleToLongBits(rangeKey); - result = prime * result + (int) (temp ^ (temp >>> 32)); - temp = Double.doubleToLongBits(indexFooRangeKey); - result = prime * result + (int) (temp ^ (temp >>> 32)); - temp = Double.doubleToLongBits(indexBarRangeKey); - result = prime * result + (int) (temp ^ (temp >>> 32)); - result = prime * result + ((version == null) ? 0 : version.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - IndexRangeKeyClass other = (IndexRangeKeyClass) obj; - if (fooAttribute == null) { - if (other.fooAttribute != null) { - return false; - } - } else if (!fooAttribute.equals(other.fooAttribute)) { - return false; - } - if (barAttribute == null) { - if (other.barAttribute != null) { - return false; - } - } else if (!barAttribute.equals(other.barAttribute)) { - return false; - } - if (key != other.key) { - return false; - } - if (Double.doubleToLongBits(rangeKey) != Double.doubleToLongBits(other.rangeKey)) { - return false; - } - if (Double.doubleToLongBits(indexFooRangeKey) != Double.doubleToLongBits(other.indexFooRangeKey)) { - return false; - } - if (Double.doubleToLongBits(indexBarRangeKey) != Double.doubleToLongBits(other.indexBarRangeKey)) { - return false; - } - if (version == null) { - if (other.version != null) { - return false; - } - } else if (!version.equals(other.version)) { - return false; - } - return true; - } - - @Override - public String toString() { - return "IndexRangeKeyClass [key=" + key + ", rangeKey=" + rangeKey + ", version=" + version - + ", indexFooRangeKey=" + indexFooRangeKey + ", indexBarRangeKey=" + indexBarRangeKey - + ", fooAttribute=" + fooAttribute + ", barAttribute=" + barAttribute + "]"; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/MapperQueryExpressionTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/MapperQueryExpressionTest.java deleted file mode 100644 index e122d3377146..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/MapperQueryExpressionTest.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.atLeastOnce; - -import java.util.ArrayList; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; -import software.amazon.awssdk.utils.ImmutableMap; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIndexHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIndexRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbQueryExpression; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ComparisonOperator; -import software.amazon.awssdk.services.dynamodb.model.Condition; -import software.amazon.awssdk.services.dynamodb.model.QueryRequest; -import software.amazon.awssdk.services.dynamodb.model.QueryResponse; - - -/** - * Unit test for the private method DynamoDBMapper#createQueryRequestFromExpression - */ -public class MapperQueryExpressionTest { - - private static final String TABLE_NAME = "table_name"; - private static final Condition RANGE_KEY_CONDITION = Condition.builder() - .attributeValueList(AttributeValue.builder().s("some value").build()) - .comparisonOperator(ComparisonOperator.EQ).build(); - - private static DynamoDbClient mockClient; - private static DynamoDbMapper mapper; - - @Before - public void setUp() throws SecurityException, NoSuchMethodException { - mockClient = Mockito.mock(DynamoDbClient.class); - mapper = new DynamoDbMapper(mockClient); - } - - private static QueryRequest testCreateQueryRequestFromExpression( - Class clazz, DynamoDbQueryExpression queryExpression) { - return testCreateQueryRequestFromExpression(clazz, queryExpression, null); - } - - private static QueryRequest testCreateQueryRequestFromExpression( - Class clazz, DynamoDbQueryExpression queryExpression, - String expectedErrorMessage) { - try { - Mockito.when(mockClient.query(any(QueryRequest.class))).thenReturn(QueryResponse.builder().items(new ArrayList<>()).build()); - - mapper.queryPage(clazz, queryExpression, DynamoDbMapperConfig.DEFAULT); - if (expectedErrorMessage != null) { - fail("Exception containing messsage (" - + expectedErrorMessage + ") is expected."); - } - - ArgumentCaptor request = ArgumentCaptor.forClass(QueryRequest.class); - Mockito.verify(mockClient, atLeastOnce()).query(request.capture()); - return request.getValue(); - } catch (RuntimeException e) { - if (expectedErrorMessage != null && e.getMessage() != null) { - assertTrue("Exception message [" + e.getMessage() + "] does not contain " + - "the expected message [" + expectedErrorMessage + "].", - e.getMessage().contains(expectedErrorMessage)); - } else { - e.printStackTrace(); - fail("Internal error when calling createQueryRequestFromExpressio method"); - } - } catch (Exception e) { - fail(e.getMessage()); - } - return null; - } - - /** - * Tests different scenarios of hash-only query - **/ - @Test - public void testHashConditionOnly() { - // Primary hash only - QueryRequest queryRequest = testCreateQueryRequestFromExpression( - HashOnlyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashOnlyClass("foo", null, null))); - assertTrue(queryRequest.keyConditions().size() == 1); - assertEquals("primaryHashKey", queryRequest.keyConditions().keySet().iterator().next()); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("primaryHashKey")); - assertNull(queryRequest.indexName()); - - // Primary hash used for a GSI - queryRequest = testCreateQueryRequestFromExpression( - HashOnlyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashOnlyClass("foo", null, null)) - .withIndexName("GSI-primary-hash")); - assertTrue(queryRequest.keyConditions().size() == 1); - assertEquals("primaryHashKey", queryRequest.keyConditions().keySet().iterator().next()); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("primaryHashKey")); - assertEquals("GSI-primary-hash", queryRequest.indexName()); - - // Primary hash query takes higher priority then index hash query - queryRequest = testCreateQueryRequestFromExpression( - HashOnlyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashOnlyClass("foo", "bar", null))); - assertTrue(queryRequest.keyConditions().size() == 1); - assertEquals("primaryHashKey", queryRequest.keyConditions().keySet().iterator().next()); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("primaryHashKey")); - assertNull(queryRequest.indexName()); - - // Ambiguous query on multiple index hash keys - queryRequest = testCreateQueryRequestFromExpression( - HashOnlyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashOnlyClass(null, "bar", "charlie")), - "Ambiguous query expression: More than one index hash key EQ conditions"); - - // Ambiguous query when not specifying index name - queryRequest = testCreateQueryRequestFromExpression( - HashOnlyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashOnlyClass(null, "bar", null)), - "Ambiguous query expression: More than one GSIs"); - - // Explicitly specify a GSI. - queryRequest = testCreateQueryRequestFromExpression( - HashOnlyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashOnlyClass("foo", "bar", null)) - .withIndexName("GSI-index-hash-1")); - assertTrue(queryRequest.keyConditions().size() == 1); - assertEquals("indexHashKey", queryRequest.keyConditions().keySet().iterator().next()); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("bar").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("indexHashKey")); - assertEquals("GSI-index-hash-1", queryRequest.indexName()); - - // Non-existent GSI - queryRequest = testCreateQueryRequestFromExpression( - HashOnlyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashOnlyClass("foo", "bar", null)) - .withIndexName("some fake gsi"), - "No hash key condition is applicable to the specified index"); - - // No hash key condition specified - queryRequest = testCreateQueryRequestFromExpression( - HashOnlyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashOnlyClass(null, null, null)), - "Illegal query expression: No hash key condition is found in the query"); - } - - /** - * Tests hash + range query - **/ - @Test - public void testHashAndRangeCondition() { - // Primary hash + primary range - QueryRequest queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyCondition("primaryRangeKey", RANGE_KEY_CONDITION)); - assertTrue(queryRequest.keyConditions().size() == 2); - assertTrue(queryRequest.keyConditions().containsKey("primaryHashKey")); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("primaryHashKey")); - assertTrue(queryRequest.keyConditions().containsKey("primaryRangeKey")); - assertEquals(RANGE_KEY_CONDITION, queryRequest.keyConditions().get("primaryRangeKey")); - assertNull(queryRequest.indexName()); - - // Primary hash + primary range on a LSI - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyCondition("primaryRangeKey", RANGE_KEY_CONDITION) - .withIndexName("LSI-primary-range")); - assertTrue(queryRequest.keyConditions().size() == 2); - assertTrue(queryRequest.keyConditions().containsKey("primaryHashKey")); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("primaryHashKey")); - assertTrue(queryRequest.keyConditions().containsKey("primaryRangeKey")); - assertEquals(RANGE_KEY_CONDITION, queryRequest.keyConditions().get("primaryRangeKey")); - assertEquals("LSI-primary-range", queryRequest.indexName()); - - // Primary hash + index range used by multiple LSI. But also a GSI hash + range - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyCondition("indexRangeKey", RANGE_KEY_CONDITION)); - assertTrue(queryRequest.keyConditions().size() == 2); - assertTrue(queryRequest.keyConditions().containsKey("primaryHashKey")); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("primaryHashKey")); - assertTrue(queryRequest.keyConditions().containsKey("indexRangeKey")); - assertEquals(RANGE_KEY_CONDITION, queryRequest.keyConditions().get("indexRangeKey")); - assertEquals("GSI-primary-hash-index-range-1", queryRequest.indexName()); - - - // Primary hash + index range on a LSI - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyCondition("indexRangeKey", RANGE_KEY_CONDITION) - .withIndexName("LSI-index-range-1")); - assertTrue(queryRequest.keyConditions().size() == 2); - assertTrue(queryRequest.keyConditions().containsKey("primaryHashKey")); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("primaryHashKey")); - assertTrue(queryRequest.keyConditions().containsKey("indexRangeKey")); - assertEquals(RANGE_KEY_CONDITION, queryRequest.keyConditions().get("indexRangeKey")); - assertEquals("LSI-index-range-1", queryRequest.indexName()); - - // Non-existent LSI - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyCondition("indexRangeKey", RANGE_KEY_CONDITION) - .withIndexName("some fake lsi"), - "No range key condition is applicable to the specified index"); - - // Illegal query: Primary hash + primary range on a GSI - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyCondition("indexRangeKey", RANGE_KEY_CONDITION) - .withIndexName("GSI-index-hash-index-range-1"), - "Illegal query expression: No hash key condition is applicable to the specified index"); - - // GSI hash + GSI range - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass(null, "foo")) - .withRangeKeyCondition("primaryRangeKey", RANGE_KEY_CONDITION)); - assertTrue(queryRequest.keyConditions().size() == 2); - assertTrue(queryRequest.keyConditions().containsKey("indexHashKey")); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("indexHashKey")); - assertTrue(queryRequest.keyConditions().containsKey("primaryRangeKey")); - assertEquals(RANGE_KEY_CONDITION, queryRequest.keyConditions().get("primaryRangeKey")); - assertEquals("GSI-index-hash-primary-range", queryRequest.indexName()); - - // Ambiguous query: GSI hash + index range used by multiple GSIs - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass(null, "foo")) - .withRangeKeyCondition("indexRangeKey", RANGE_KEY_CONDITION), - "Illegal query expression: Cannot infer the index name from the query expression."); - - // Explicitly specify the GSI name - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass(null, "foo")) - .withRangeKeyCondition("indexRangeKey", RANGE_KEY_CONDITION) - .withIndexName("GSI-index-hash-index-range-2")); - assertTrue(queryRequest.keyConditions().size() == 2); - assertTrue(queryRequest.keyConditions().containsKey("indexHashKey")); - assertEquals( - Condition.builder().attributeValueList(AttributeValue.builder().s("foo").build()) - .comparisonOperator(ComparisonOperator.EQ).build(), - queryRequest.keyConditions().get("indexHashKey")); - assertTrue(queryRequest.keyConditions().containsKey("indexRangeKey")); - assertEquals(RANGE_KEY_CONDITION, queryRequest.keyConditions().get("indexRangeKey")); - assertEquals("GSI-index-hash-index-range-2", queryRequest.indexName()); - - // Ambiguous query: (1) primary hash + LSI range OR (2) GSI hash + range - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyCondition("anotherIndexRangeKey", RANGE_KEY_CONDITION), - "Ambiguous query expression: Found multiple valid queries:"); - - // Multiple range key conditions specified - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyConditions( - ImmutableMap.of( - "primaryRangeKey", RANGE_KEY_CONDITION, - "indexRangeKey", RANGE_KEY_CONDITION)), - "Illegal query expression: Conditions on multiple range keys"); - - // Using an un-annotated range key - queryRequest = testCreateQueryRequestFromExpression( - HashRangeClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new HashRangeClass("foo", null)) - .withRangeKeyCondition("indexHashKey", RANGE_KEY_CONDITION), - "not annotated with either @DynamoDBRangeKey or @DynamoDBIndexRangeKey."); - } - - @Test - public void testHashOnlyQueryOnHashRangeTable() { - // Primary hash only query on a Hash+Range table - QueryRequest queryRequest = testCreateQueryRequestFromExpression( - LSIRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new LSIRangeKeyClass("foo", null))); - assertTrue(queryRequest.keyConditions().size() == 1); - assertTrue(queryRequest.keyConditions().containsKey("primaryHashKey")); - assertNull(queryRequest.indexName()); - - // Hash+Range query on a LSI - queryRequest = testCreateQueryRequestFromExpression( - LSIRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new LSIRangeKeyClass("foo", null)) - .withRangeKeyCondition("lsiRangeKey", RANGE_KEY_CONDITION) - .withIndexName("LSI")); - assertTrue(queryRequest.keyConditions().size() == 2); - assertTrue(queryRequest.keyConditions().containsKey("primaryHashKey")); - assertTrue(queryRequest.keyConditions().containsKey("lsiRangeKey")); - assertEquals("LSI", queryRequest.indexName()); - - // Hash-only query on a LSI - queryRequest = testCreateQueryRequestFromExpression( - LSIRangeKeyClass.class, - new DynamoDbQueryExpression() - .withHashKeyValues(new LSIRangeKeyClass("foo", null)) - .withIndexName("LSI")); - assertTrue(queryRequest.keyConditions().size() == 1); - assertTrue(queryRequest.keyConditions().containsKey("primaryHashKey")); - assertEquals("LSI", queryRequest.indexName()); - } - - @DynamoDbTable(tableName = TABLE_NAME) - public final class HashOnlyClass { - - @DynamoDbHashKey - @DynamoDbIndexHashKey( - globalSecondaryIndexNames = "GSI-primary-hash" - ) - private String primaryHashKey; - - @DynamoDbIndexHashKey( - globalSecondaryIndexNames = {"GSI-index-hash-1", "GSI-index-hash-2"} - ) - private String indexHashKey; - - @DynamoDbIndexHashKey( - globalSecondaryIndexNames = {"GSI-another-index-hash"} - ) - private String anotherIndexHashKey; - - public HashOnlyClass(String primaryHashKey, String indexHashKey, String anotherIndexHashKey) { - this.primaryHashKey = primaryHashKey; - this.indexHashKey = indexHashKey; - this.anotherIndexHashKey = anotherIndexHashKey; - } - - public String getPrimaryHashKey() { - return primaryHashKey; - } - - public void setPrimaryHashKey(String primaryHashKey) { - this.primaryHashKey = primaryHashKey; - } - - public String getIndexHashKey() { - return indexHashKey; - } - - public void setIndexHashKey(String indexHashKey) { - this.indexHashKey = indexHashKey; - } - - public String getAnotherIndexHashKey() { - return anotherIndexHashKey; - } - - public void setAnotherIndexHashKey(String anotherIndexHashKey) { - this.anotherIndexHashKey = anotherIndexHashKey; - } - } - - @DynamoDbTable(tableName = TABLE_NAME) - public final class HashRangeClass { - private String primaryHashKey; - private String indexHashKey; - private String primaryRangeKey; - private String indexRangeKey; - private String anotherIndexRangeKey; - - public HashRangeClass(String primaryHashKey, String indexHashKey) { - this.primaryHashKey = primaryHashKey; - this.indexHashKey = indexHashKey; - } - - @DynamoDbHashKey - @DynamoDbIndexHashKey( - globalSecondaryIndexNames = { - "GSI-primary-hash-index-range-1", - "GSI-primary-hash-index-range-2"} - ) - public String getPrimaryHashKey() { - return primaryHashKey; - } - - public void setPrimaryHashKey(String primaryHashKey) { - this.primaryHashKey = primaryHashKey; - } - - @DynamoDbIndexHashKey( - globalSecondaryIndexNames = { - "GSI-index-hash-primary-range", - "GSI-index-hash-index-range-1", - "GSI-index-hash-index-range-2"} - ) - public String getIndexHashKey() { - return indexHashKey; - } - - public void setIndexHashKey(String indexHashKey) { - this.indexHashKey = indexHashKey; - } - - @DynamoDbRangeKey - @DynamoDbIndexRangeKey( - globalSecondaryIndexNames = {"GSI-index-hash-primary-range"}, - localSecondaryIndexName = "LSI-primary-range" - ) - public String getPrimaryRangeKey() { - return primaryRangeKey; - } - - public void setPrimaryRangeKey(String primaryRangeKey) { - this.primaryRangeKey = primaryRangeKey; - } - - @DynamoDbIndexRangeKey( - globalSecondaryIndexNames = { - "GSI-primary-hash-index-range-1", - "GSI-index-hash-index-range-1", - "GSI-index-hash-index-range-2"}, - localSecondaryIndexNames = {"LSI-index-range-1", "LSI-index-range-2"} - ) - public String getIndexRangeKey() { - return indexRangeKey; - } - - public void setIndexRangeKey(String indexRangeKey) { - this.indexRangeKey = indexRangeKey; - } - - @DynamoDbIndexRangeKey( - localSecondaryIndexName = "LSI-index-range-3", - globalSecondaryIndexName = "GSI-primary-hash-index-range-2" - ) - public String getAnotherIndexRangeKey() { - return anotherIndexRangeKey; - } - - public void setAnotherIndexRangeKey(String anotherIndexRangeKey) { - this.anotherIndexRangeKey = anotherIndexRangeKey; - } - } - - @DynamoDbTable(tableName = TABLE_NAME) - public final class LSIRangeKeyClass { - private String primaryHashKey; - private String primaryRangeKey; - private String lsiRangeKey; - - public LSIRangeKeyClass(String primaryHashKey, String primaryRangeKey) { - this.primaryHashKey = primaryHashKey; - this.primaryRangeKey = primaryRangeKey; - } - - @DynamoDbHashKey - public String getPrimaryHashKey() { - return primaryHashKey; - } - - public void setPrimaryHashKey(String primaryHashKey) { - this.primaryHashKey = primaryHashKey; - } - - @DynamoDbRangeKey - public String getPrimaryRangeKey() { - return primaryRangeKey; - } - - public void setPrimaryRangeKey(String primaryRangeKey) { - this.primaryRangeKey = primaryRangeKey; - } - - @DynamoDbIndexRangeKey(localSecondaryIndexName = "LSI") - public String getLsiRangeKey() { - return lsiRangeKey; - } - - public void setLsiRangeKey(String lsiRangeKey) { - this.lsiRangeKey = lsiRangeKey; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/V2CompatibleBooleansTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/V2CompatibleBooleansTest.java deleted file mode 100644 index e4aa0dab9cab..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/mapper/V2CompatibleBooleansTest.java +++ /dev/null @@ -1,404 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.mapper; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.Arrays; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import software.amazon.awssdk.utils.ImmutableMap; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.datamodeling.ConversionSchema; -import software.amazon.awssdk.services.dynamodb.datamodeling.ConversionSchemas; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapper; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperConfig; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMapperFieldModel; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMappingException; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbNativeBoolean; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTyped; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; - -/** - * The default converters should be able to unmarshall both V1 booleans (numeric 0/1) and native Dynamo booleans. When using the - * standard converters booleans should be marshalled as numeric attribute values. - */ -@RunWith(MockitoJUnitRunner.class) -public class V2CompatibleBooleansTest { - - private static final String HASH_KEY = "1234"; - - @Mock - private DynamoDbClient ddb; - - /** - * Mapper with default config. - */ - private DynamoDbMapper defaultMapper; - - /** - * Mapper explicitly using {@link ConversionSchemas#V2_COMPATIBLE} - */ - private DynamoDbMapper v2CompatMapper; - - /** - * Mapper explicitly using {@link ConversionSchemas#V1} - */ - private DynamoDbMapper v1Mapper; - - /** - * Mapper explicitly using {@link ConversionSchemas#V2} - */ - private DynamoDbMapper v2Mapper; - - @Before - public void setup() { - defaultMapper = new DynamoDbMapper(ddb); - v2CompatMapper = buildMapper(ConversionSchemas.V2_COMPATIBLE); - v1Mapper = buildMapper(ConversionSchemas.V1); - v2Mapper = buildMapper(ConversionSchemas.V2); - // Just stub dummy response for all save related tests - when(ddb.updateItem(any(UpdateItemRequest.class))).thenReturn(UpdateItemResponse.builder().build()); - } - - private DynamoDbMapper buildMapper(ConversionSchema schema) { - return new DynamoDbMapper(ddb, DynamoDbMapperConfig.builder() - .withConversionSchema(schema) - .build()); - } - - /** - * Without coercion from an annotation the default mapping should marshall booleans as a number. - */ - @Test - public void saveBooleanUsingDefaultConverters_MarshallsIntoNumber() { - defaultMapper.save(new UnitTestPojo().setHashKey(HASH_KEY).setBooleanAttr(true)); - verifyAttributeUpdatedWithValue("booleanAttr", AttributeValue.builder().n("1").build()); - } - - @Test - public void saveBooleanUsingV1Schema_MarshallsIntoNumber() { - v1Mapper.save(new UnitTestPojo().setHashKey(HASH_KEY).setBooleanAttr(true)); - verifyAttributeUpdatedWithValue("booleanAttr", AttributeValue.builder().n("1").build()); - } - - @Test - public void saveBooleanUsingV2Compat_MarshallsIntoNumber() { - v2CompatMapper.save(new UnitTestPojo().setHashKey(HASH_KEY).setBooleanAttr(true)); - verifyAttributeUpdatedWithValue("booleanAttr", AttributeValue.builder().n("1").build()); - } - - @Test - public void saveBooleanUsingV2Schema_MarshallsIntoNativeBool() { - v2Mapper.save(new UnitTestPojo().setHashKey(HASH_KEY).setBooleanAttr(true)); - verifyAttributeUpdatedWithValue("booleanAttr", AttributeValue.builder().bool(true).build()); - } - - /** - * {@link DynamoDbNativeBoolean} or {@link DynamoDbTyped} can force - * native - * boolean marshalling. - */ - @Test - public void saveCoercedNativeBooleanUsingDefaultConverters_MarshallsIntoNativeBool() { - saveCoercedNativeBoolean_MarshallsIntoNativeBoolean(defaultMapper); - } - - @Test - public void saveCoercedNativeBooleanUsingV1Schema_MarshallsIntoNativeBool() { - saveCoercedNativeBoolean_MarshallsIntoNativeBoolean(v1Mapper); - } - - @Test - public void saveCoercedNativeBooleanUsingV2CompatSchema_MarshallsIntoNativeBool() { - saveCoercedNativeBoolean_MarshallsIntoNativeBoolean(v2CompatMapper); - } - - @Test - public void saveCoercedNativeBooleanUsingV2_MarshallsIntoNativeBool() { - saveCoercedNativeBoolean_MarshallsIntoNativeBoolean(v2Mapper); - } - - private void saveCoercedNativeBoolean_MarshallsIntoNativeBoolean(DynamoDbMapper mapper) { - mapper.save(new UnitTestPojo().setNativeBoolean(true).setHashKey(HASH_KEY)); - verifyAttributeUpdatedWithValue("nativeBoolean", AttributeValue.builder().bool(true).build()); - } - - /** - * {@link DynamoDbTyped} can force numeric boolean conversion even when using V2 schema. - */ - @Test - public void saveCoercedNumericBooleanUsingDefaultConverters_MarshallsIntoNumericBool() { - saveCoercedNumericBoolean_MarshallsIntoNumericBoolean(defaultMapper); - } - - @Test - public void saveCoercedNumericBooleanUsingV1Schema_MarshallsIntoNumericBool() { - saveCoercedNumericBoolean_MarshallsIntoNumericBoolean(v1Mapper); - } - - @Test - public void saveCoercedNumericBooleanUsingV2CompatSchema_MarshallsIntoNumericBool() { - saveCoercedNumericBoolean_MarshallsIntoNumericBoolean(v2CompatMapper); - } - - @Test - public void saveCoercedNumericBooleanUsingV2_MarshallsIntoNumericBool() { - saveCoercedNumericBoolean_MarshallsIntoNumericBoolean(v2Mapper); - } - - private void saveCoercedNumericBoolean_MarshallsIntoNumericBoolean(DynamoDbMapper mapper) { - mapper.save(new UnitTestPojo().setNumericBoolean(true).setHashKey(HASH_KEY)); - verifyAttributeUpdatedWithValue("numericBoolean", AttributeValue.builder().n("1").build()); - } - - @Test - public void saveBooleanListUsingDefaultConverters_MarshallsIntoListOfNumbers() { - defaultMapper.save(new UnitTestPojoWithList() - .setBooleanList(Arrays.asList(Boolean.FALSE, Boolean.TRUE)) - .setHashKey(HASH_KEY)); - verifyAttributeUpdatedWithValue("booleanList", AttributeValue.builder() - .l( - AttributeValue.builder().n("0").build(), - AttributeValue.builder().n("1").build()) - .build()); - } - - /** - * Verifies the mapper results in an update item call that has an update for the appropriate attribute. - * - * @param attributeName Attribute expected to be updated. - * @param expected Expected value of update action. - */ - private void verifyAttributeUpdatedWithValue(String attributeName, AttributeValue expected) { - ArgumentCaptor updateItemRequestCaptor = ArgumentCaptor.forClass(UpdateItemRequest.class); - verify(ddb).updateItem(updateItemRequestCaptor.capture()); - assertEquals(expected, updateItemRequestCaptor.getValue().attributeUpdates().get(attributeName).value()); - } - - @Test - public void loadNumericBooleanUsingDefaultConverters_UnmarshallsCorrectly() { - stubGetItemRequest("booleanAttr", AttributeValue.builder().n("1").build()); - final UnitTestPojo pojo = loadPojo(defaultMapper); - assertTrue(pojo.getBooleanAttr()); - } - - @Test - public void loadNumericBooleanUsingV1Schema_UnmarshallsCorrectly() { - stubGetItemRequest("booleanAttr", AttributeValue.builder().n("1").build()); - final UnitTestPojo pojo = loadPojo(v1Mapper); - assertTrue(pojo.getBooleanAttr()); - } - - @Test - public void loadNumericBooleanUsingV2CompatSchema_UnmarshallsCorrectly() { - stubGetItemRequest("booleanAttr", AttributeValue.builder().n("1").build()); - final UnitTestPojo pojo = loadPojo(v2CompatMapper); - assertTrue(pojo.getBooleanAttr()); - } - - @Test - public void loadNumericBooleanUsingV2_UnmarshallsCorrectly() { - stubGetItemRequest("booleanAttr", AttributeValue.builder().n("1").build()); - final UnitTestPojo pojo = loadPojo(v2Mapper); - assertTrue(pojo.getBooleanAttr()); - } - - @Test - public void loadNativeBooleanUsingDefaultConverters_UnmarshallsCorrectly() { - stubGetItemRequest("booleanAttr", AttributeValue.builder().bool(true).build()); - final UnitTestPojo pojo = loadPojo(defaultMapper); - assertTrue(pojo.getBooleanAttr()); - } -// - /** - * V1 schema does not handle native bool types by default - */ - @Test(expected = DynamoDbMappingException.class) - public void loadNativeBooleanUsingV1Schema_FailsToUnmarshall() { - stubGetItemRequest("booleanAttr", AttributeValue.builder().bool(true).build()); - loadPojo(v1Mapper); - } - - /** - * Native bool support can be forced in V1 schema with @{@link DynamoDbTyped}. - */ - @Test - public void loadCoercedNativeBooleanUsingV1Schema_UnmarshallsCorrectly() { - stubGetItemRequest("nativeBoolean", AttributeValue.builder().bool(true).build()); - final UnitTestPojo pojo = loadPojo(v1Mapper); - assertTrue(pojo.getNativeBoolean()); - } - - @Test - public void loadNativeBooleanUsingV2CompatSchema_UnmarshallsCorrectly() { - stubGetItemRequest("booleanAttr", AttributeValue.builder().bool(true).build()); - final UnitTestPojo pojo = loadPojo(v2CompatMapper); - assertTrue(pojo.getBooleanAttr()); - } - - @Test - public void loadNativeBooleanUsingV2_UnmarshallsCorrectly() { - stubGetItemRequest("booleanAttr", AttributeValue.builder().bool(true).build()); - final UnitTestPojo pojo = loadPojo(v2Mapper); - assertTrue(pojo.getBooleanAttr()); - } - - @Test - public void loadNativeBooleanListUsingDefaultConverters_UnmarshallsCorrectly() { - stubGetItemRequest("booleanList", AttributeValue.builder() - .l( - AttributeValue.builder().bool(true).build(), - AttributeValue.builder().bool(false).build() - ).build()); - final UnitTestPojoWithList pojo = loadListPojo(defaultMapper); - - assertTrue(pojo.getBooleanList().get(0)); - assertFalse(pojo.getBooleanList().get(1)); - } - - @Test - public void loadNumericBooleanListUsingDefaultConverters_UnmarshallsCorrectly() { - stubGetItemRequest("booleanList", AttributeValue.builder() - .l( - AttributeValue.builder().n("1").build(), - AttributeValue.builder().n("0").build() - ).build()); - final UnitTestPojoWithList pojo = loadListPojo(defaultMapper); - - assertTrue(pojo.getBooleanList().get(0)); - assertFalse(pojo.getBooleanList().get(1)); - } - - private UnitTestPojoWithList loadListPojo(DynamoDbMapper mapper) { - UnitTestPojoWithList pojo = new UnitTestPojoWithList(); - pojo.setHashKey(HASH_KEY); - return mapper.load(pojo); - } - - private UnitTestPojo loadPojo(DynamoDbMapper mapper) { - return mapper.load(new UnitTestPojo().setHashKey(HASH_KEY)); - } - - /** - * Stub a call to getItem to return a result with the given attribute value in the item. - * - * @param attributeName Attribute name to return in result (in addition to hash key) - * @param attributeValue Attribute value to return in result (in addition to hash key) - */ - private void stubGetItemRequest(String attributeName, AttributeValue attributeValue) { - when(ddb.getItem(any(GetItemRequest.class))).thenReturn(createGetItemResponse(attributeName, attributeValue)); - } - - /** - * Create a {@link GetItemResponse} with the hash key value ({@value #HASH_KEY} and the additional attribute. - * - * @param attributeName Additional attribute to include in created {@link GetItemResponse}. - * @param attributeValue Value of additional attribute. - */ - private GetItemResponse createGetItemResponse(String attributeName, AttributeValue attributeValue) { - return GetItemResponse.builder().item( - ImmutableMap.of("hashKey", AttributeValue.builder().s(HASH_KEY).build(), - attributeName, attributeValue)).build(); - } - - @DynamoDbTable(tableName = "UnitTestTable") - public static class UnitTestPojo { - - @DynamoDbHashKey - private String hashKey; - - @DynamoDbAttribute - private Boolean booleanAttr; - - @DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.BOOL) - private Boolean nativeBoolean; - - @DynamoDbTyped(DynamoDbMapperFieldModel.DynamoDbAttributeType.N) - private Boolean numericBoolean; - - - public String getHashKey() { - return hashKey; - } - - public UnitTestPojo setHashKey(String hashKey) { - this.hashKey = hashKey; - return this; - } - - public Boolean getBooleanAttr() { - return booleanAttr; - } - - public UnitTestPojo setBooleanAttr(Boolean booleanAttr) { - this.booleanAttr = booleanAttr; - return this; - } - - public Boolean getNativeBoolean() { - return nativeBoolean; - } - - public UnitTestPojo setNativeBoolean(Boolean nativeBoolean) { - this.nativeBoolean = nativeBoolean; - return this; - } - - public Boolean getNumericBoolean() { - return numericBoolean; - } - - public UnitTestPojo setNumericBoolean(Boolean numericBoolean) { - this.numericBoolean = numericBoolean; - return this; - } - - } - - public static class UnitTestPojoWithList extends UnitTestPojo { - - @DynamoDbAttribute - private List booleanList; - - public List getBooleanList() { - return booleanList; - } - - public UnitTestPojo setBooleanList(List booleanList) { - this.booleanList = booleanList; - return this; - } - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/model/Item.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/model/Item.java deleted file mode 100644 index c1f9d713e591..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/model/Item.java +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.model; - -import static java.util.stream.Collectors.toList; -import static software.amazon.awssdk.utils.CollectionUtils.toMap; - -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.utils.builder.SdkBuilder; - -/** - * Used to build a DynamoDB item map. - * - * For example to pass to a {@link PutItemRequest.Builder#item(Map)}. - */ -//TODO : May want to do this a different way -public final class Item extends HashMap { - - private Item(Builder builder) { - putAll(builder.item); - } - - /** - * Create a new instance of the {@link Builder}. - * - * @return a new instance of the {@link Builder} - */ - public static Builder builder() { - return new Builder(); - } - - public static class Builder implements SdkBuilder { - private final Map item = new HashMap<>(); - - private Builder() { - } - - /** - * Build a {@link Map} representing a DyanmoDB item. - * - * @return a {@link Map} representing a DyanmoDB item - */ - @Override - public Item build() { - return new Item(this); - } - - /** - * Adds an {@link AttributeValue} representing a String to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().s(stringValue).build());
    -         * 
    - * - * @param key the key of this attribute - * @param stringValue the string value of the attribute - * @return the builder for method chaining - */ - public Builder attribute(String key, String stringValue) { - item.put(key, AttributeValue.builder().s(stringValue).build()); - return this; - } - - /** - * Adds an {@link AttributeValue} representing a Boolean to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().bool(booleanValue).build());
    -         * 
    - * - * @param key the key of this attribute - * @param booleanValue the boolean value of the attribute - * @return the builder for method chaining - */ - public Builder attribute(String key, Boolean booleanValue) { - item.put(key, AttributeValue.builder().bool(booleanValue).build()); - return this; - } - - /** - * Adds an {@link AttributeValue} representing a Number to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().n(String.valueOf(numericValue)).build());
    -         * 
    - * - * @param key the key of this attribute - * @param numericValue the numeric value of the attribute - * @return the builder for method chaining - */ - public Builder attribute(String key, Number numericValue) { - item.put(key, AttributeValue.builder().n(String.valueOf(numericValue)).build()); - return this; - } - - /** - * Adds an {@link AttributeValue} representing binary data to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().b(ByteBuffer.wrap(binaryValue)).build());
    -         * 
    - * - * @param key the key of this attribute - * @param binaryValue the binary value of the attribute - * @return the builder for method chaining - */ - public Builder attribute(String key, byte[] binaryValue) { - return attribute(key, ByteBuffer.wrap(binaryValue)); - } - - /** - * Adds an {@link AttributeValue} representing binary data to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().b(binaryValue).build());
    -         * 
    - * - * @param key the key of this attribute - * @param binaryValue the binary value of the attribute - * @return the builder for method chaining - */ - public Builder attribute(String key, ByteBuffer binaryValue) { - item.put(key, AttributeValue.builder().b(SdkBytes.fromByteBuffer(binaryValue)).build()); - return this; - } - - /** - * Adds an {@link AttributeValue} representing a list of AttributeValues to the item with the specified key. - * - * This will attempt to infer the of of the {@link AttributeValue} for each given {@link Object} in the list - * based on the type. Supported types are: - *
      - *
    • {@link AttributeValue} which is unaltered
    • - *
    • {@link String} becomes {@link AttributeValue#s()}
    • - *
    • {@link Number} (and anything that can be automatically boxed to {@link Number} including - * int, long, float etc.) becomes {@link AttributeValue#n()}
    • - *
    • {@link Boolean} (and bool) becomes {@link AttributeValue#bool()}
    • - *
    • byte[] becomes {@link AttributeValue#b()}
    • - *
    • {@link ByteBuffer} becomes {@link AttributeValue#b()}
    • - *
    • {@link List}<Object> (where the containing objects are one of the types in - * this list) becomes {@link AttributeValue#l()}
    • - *
    • {@link Map}<String, Object> (where the containing object values are one of the types - * in this list) becomes {@link AttributeValue#m()}
    • - *
    - * - * @param key the key of this attribute - * @param values the object values - * @return the builder for method chaining - */ - public Builder attribute(String key, List values) { - item.put(key, fromObject(values)); - return this; - } - - /** - * Adds an {@link AttributeValue} representing a map of string to AttributeValues to the item with the specified key. - * - * This will attempt to infer the most appropriate {@link AttributeValue} for each given {@link Object} value in the map - * based on the type. Supported types are: - *
      - *
    • {@link AttributeValue} which is unaltered
    • - *
    • {@link String} becomes {@link AttributeValue#s()}
    • - *
    • {@link Number} (and anything that can be automatically boxed to {@link Number} including - * int, long, float etc.) becomes {@link AttributeValue#n()}
    • - *
    • {@link Boolean} (and bool) becomes {@link AttributeValue#bool()}
    • - *
    • byte[] becomes {@link AttributeValue#b()}
    • - *
    • {@link ByteBuffer} becomes {@link AttributeValue#b()}
    • - *
    • {@link List}<Object> (where the containing objects are one of the types in - * this list) becomes {@link AttributeValue#l()}
    • - *
    • {@link Map}<String, Object> (where the containing object values are one of the types - * in this list) becomes {@link AttributeValue#m()}
    • - *
    - * - * @param key the key of this attribute - * @param values the map of key to object - * @return the builder for method chaining - */ - public Builder attribute(String key, Map values) { - item.put(key, fromObject(values)); - return this; - } - - /** - * Adds an {@link AttributeValue} representing a list of strings to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().ss(stringValue1, stringValue2, ...).build());
    -         * 
    - * - * @param key the key of this attribute - * @param stringValues the string values of the attribute - * @return the builder for method chaining - */ - public Builder strings(String key, String... stringValues) { - return strings(key, Arrays.asList(stringValues)); - } - - /** - * Adds an {@link AttributeValue} representing a list of strings to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().ss(stringValues).build());
    -         * 
    - * - * @param key the key of this attribute - * @param stringValues the string values of the attribute - * @return the builder for method chaining - */ - public Builder strings(String key, Collection stringValues) { - item.put(key, AttributeValue.builder().ss(stringValues).build()); - return this; - } - - /** - * Adds an {@link AttributeValue} representing a list of numbers to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().ns(numberValues1, numberValues2, ...).build());
    -         * 
    - * - * @param key the key of this attribute - * @param numberValues the number values of the attribute - * @return the builder for method chaining - */ - public Builder numbers(String key, Number... numberValues) { - return numbers(key, Arrays.asList(numberValues)); - } - - /** - * Adds an {@link AttributeValue} representing a list of numbers to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().ns(numberValues).build());
    -         * 
    - * - * @param key the key of this attribute - * @param numberValues the number values of the attribute - * @return the builder for method chaining - */ - public Builder numbers(String key, Collection numberValues) { - item.put(key, AttributeValue.builder().ns(numberValues.stream().map(String::valueOf).collect(toList())).build()); - return this; - } - - /** - * Adds an {@link AttributeValue} representing a list of binary data to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().bs(Arrays.stream(byteArrays)
    -         *                                                    .map(ByteBuffer::wrap)
    -         *                                                    .collect(Collectors.toList())).build());
    -         * 
    - * - * @param key the key of this attribute - * @param byteArrays the binary values of the attribute - * @return the builder for method chaining - */ - public Builder byteArrays(String key, byte[]... byteArrays) { - return byteArrays(key, Arrays.asList(byteArrays)); - } - - /** - * Adds an {@link AttributeValue} representing a list of binary data to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().bs(byteArrays.stream()
    -         *                                                        .map(ByteBuffer::wrap)
    -         *                                                        .collect(Collectors.toList())).build());
    -         * 
    - * - * @param key the key of this attribute - * @param byteArrays the binary values of the attribute - * @return the builder for method chaining - */ - public Builder byteArrays(String key, Collection byteArrays) { - return byteBuffers(key, byteArrays.stream().map(ByteBuffer::wrap).collect(Collectors.toList())); - } - - /** - * Adds an {@link AttributeValue} representing a list of binary data to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().bs(binaryValues1, binaryValues2, ...)).build());
    -         * 
    - * - * @param key the key of this attribute - * @param binaryValues the binary values of the attribute - * @return the builder for method chaining - */ - public Builder byteBuffers(String key, ByteBuffer... binaryValues) { - return byteBuffers(key, Arrays.asList(binaryValues)); - } - - /** - * Adds an {@link AttributeValue} representing a list of binary data to the item with the specified key. - * - * Equivalent of: - *
    
    -         * itemMap.put(key, AttributeValue.builder().bs(binaryValues).build());
    -         * 
    - * - * @param key the key of this attribute - * @param binaryValues the binary values of the attribute - * @return the builder for method chaining - */ - public Builder byteBuffers(String key, Collection binaryValues) { - item.put(key, AttributeValue.builder().bs(binaryValues.stream().map(SdkBytes::fromByteBuffer).collect(toList())).build()); - return this; - } - - private static AttributeValue fromObject(Object object) { - if (object instanceof AttributeValue) { - return (AttributeValue) object; - } - if (object instanceof String) { - return AttributeValue.builder().s((String) object).build(); - } - if (object instanceof Number) { - return AttributeValue.builder().n(String.valueOf((Number) object)).build(); - } - if (object instanceof byte[]) { - return AttributeValue.builder().b(SdkBytes.fromByteArray((byte[]) object)).build(); - } - if (object instanceof ByteBuffer) { - return AttributeValue.builder().b(SdkBytes.fromByteBuffer((ByteBuffer) object)).build(); - } - if (object instanceof Boolean) { - return AttributeValue.builder().bool((Boolean) object).build(); - } - if (object instanceof List) { - List attributeValues = ((List) object).stream() - .map(Builder::fromObject) - .collect(toList()); - return AttributeValue.builder().l(attributeValues).build(); - } - if (object instanceof Map) { - Map attributeValues = - ((Map) object).entrySet() - .stream() - .map(e -> new SimpleImmutableEntry<>(e.getKey(), fromObject(e.getValue()))) - .collect(toMap()); - return AttributeValue.builder().m(attributeValues).build(); - } - throw new IllegalArgumentException("Unsupported type: " + object.getClass()); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/model/ItemTest.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/model/ItemTest.java deleted file mode 100644 index ad980c4837a5..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/model/ItemTest.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.model; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Collections.singletonMap; -import static org.assertj.core.api.Assertions.assertThat; - -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.assertj.core.api.Condition; -import org.junit.Test; - -public class ItemTest { - - public static final byte[] BYTES1 = "value1".getBytes(UTF_8); - public static final byte[] BYTES2 = "value2".getBytes(UTF_8); - - @Test - public void testItemBuilder() { - - Map testMap = new HashMap<>(); - testMap.put("String", "stringValue"); - testMap.put("int", 15); - testMap.put("byteArray", BYTES1); - testMap.put("ByteBuffer", ByteBuffer.wrap(BYTES2)); - testMap.put("bool", true); - - List longs = Arrays.asList(1l, 2l, 3l); - - Map item = - Item.builder() - .attribute("String", "stringValue") - .attribute("int", 15) - .attribute("long", 200L) - .attribute("float", 15.5) - .attribute("Integer", new Integer(15)) - .attribute("Long", new Long(200)) - .attribute("Float", new Float(15.5)) - .attribute("bool", true) - .attribute("Boolean", Boolean.TRUE) - .attribute("bytes", BYTES1) - .attribute("ByteBuffer", ByteBuffer.wrap(BYTES1)) - .strings("StringsVarArgs", "value1", "value2") - .strings("StringsCollection", Arrays.asList("value1", "value2")) - .numbers("NumbersVarArgs", 1, 2.0, 3L) - .numbers("NumbersCollection", Arrays.asList(1, 2.0, 3L)) - .numbers("LongCollection", longs) - .byteArrays("bytesVarArgs", BYTES1, BYTES2) - .byteArrays("bytesCollection", Arrays.asList(BYTES1, BYTES2)) - .byteBuffers("ByteBuffersVarArgs", ByteBuffer.wrap(BYTES1), ByteBuffer.wrap(BYTES2)) - .byteBuffers("ByteBuffersCollection", Arrays.asList(ByteBuffer.wrap(BYTES1), ByteBuffer.wrap(BYTES2))) - .attribute("list", Arrays.asList(15, 200L, 15.5, "stringValue", BYTES1, ByteBuffer.wrap(BYTES2), true)) - .attribute("map", testMap) - .attribute("nested", Arrays.asList(15, singletonMap("nestedList", Arrays.asList("stringValue", BYTES1)))) - .build(); - - - assertThat(item).containsEntry("String", AttributeValue.builder().s("stringValue").build()); - assertThat(item).containsEntry("int", AttributeValue.builder().n("15").build()); - assertThat(item).containsEntry("long", AttributeValue.builder().n("200").build()); - assertThat(item).containsEntry("float", AttributeValue.builder().n("15.5").build()); - assertThat(item).containsEntry("Integer", AttributeValue.builder().n("15").build()); - assertThat(item).containsEntry("Long", AttributeValue.builder().n("200").build()); - assertThat(item).containsEntry("Float", AttributeValue.builder().n("15.5").build()); - assertThat(item).containsEntry("bool", AttributeValue.builder().bool(true).build()); - assertThat(item).containsEntry("Boolean", AttributeValue.builder().bool(true).build()); - assertThat(item).containsEntry("StringsVarArgs", AttributeValue.builder().ss("value1", "value2").build()); - assertThat(item).containsEntry("StringsCollection", AttributeValue.builder().ss("value1", "value2").build()); - assertThat(item).containsEntry("NumbersVarArgs", AttributeValue.builder().ns("1", "2.0", "3").build()); - assertThat(item).containsEntry("NumbersCollection", AttributeValue.builder().ns("1", "2.0", "3").build()); - assertThat(item).containsEntry("LongCollection", AttributeValue.builder().ns("1", "2", "3").build()); - - assertThat(item).hasEntrySatisfying("bytes", bytesMatching(BYTES1)); - assertThat(item).hasEntrySatisfying("ByteBuffer", bytesMatching(BYTES1)); - assertThat(item).hasEntrySatisfying("bytesVarArgs", bytesMatching(BYTES1, BYTES2)); - assertThat(item).hasEntrySatisfying("bytesCollection", bytesMatching(BYTES1, BYTES2)); - assertThat(item).hasEntrySatisfying("ByteBuffersVarArgs", bytesMatching(BYTES1, BYTES2)); - assertThat(item).hasEntrySatisfying("ByteBuffersCollection", bytesMatching(BYTES1, BYTES2)); - - assertThat(item.get("list").l()).hasSize(7); - assertThat(item.get("list").l().get(0)).isEqualTo(AttributeValue.builder().n("15").build()); - assertThat(item.get("list").l().get(1)).isEqualTo(AttributeValue.builder().n("200").build()); - assertThat(item.get("list").l().get(2)).isEqualTo(AttributeValue.builder().n("15.5").build()); - assertThat(item.get("list").l().get(3)).isEqualTo(AttributeValue.builder().s("stringValue").build()); - assertThat(item.get("list").l().get(4)).has(bytesMatching(BYTES1)); - assertThat(item.get("list").l().get(5)).has(bytesMatching(BYTES2)); - assertThat(item.get("list").l().get(6)).isEqualTo(AttributeValue.builder().bool(true).build()); - - assertThat(item.get("map").m().entrySet()).hasSize(5); - assertThat(item.get("map").m().get("String")).isEqualTo(AttributeValue.builder().s("stringValue").build()); - assertThat(item.get("map").m().get("int")).isEqualTo(AttributeValue.builder().n("15").build()); - assertThat(item.get("map").m().get("byteArray")).has(bytesMatching(BYTES1)); - assertThat(item.get("map").m().get("ByteBuffer")).has(bytesMatching(BYTES2)); - assertThat(item.get("map").m().get("bool")).isEqualTo(AttributeValue.builder().bool(true).build()); - - assertThat(item.get("nested").l()).hasSize(2); - assertThat(item.get("nested").l().get(0)).isEqualTo(AttributeValue.builder().n("15").build()); - assertThat(item.get("nested").l().get(1).m().entrySet()).hasSize(1); - assertThat(item.get("nested").l().get(1).m().get("nestedList").l()).hasSize(2); - assertThat(item.get("nested").l().get(1).m().get("nestedList").l().get(0)).isEqualTo(AttributeValue.builder().s("stringValue").build()); - assertThat(item.get("nested").l().get(1).m().get("nestedList").l().get(1)).has(bytesMatching(BYTES1)); - } - - private static Condition bytesMatching(byte[] bytes) { - return new Condition<>(item -> byteBufferEquals(item.b().asByteBuffer(), bytes), "bytes matching"); - } - - private static Condition bytesMatching(byte[] firstByteArray, byte[] secondByteArray) { - return new Condition<>(item -> { - return item.bs().size() == 2 && - byteBufferEquals(item.bs().get(0).asByteBuffer(), firstByteArray) && - byteBufferEquals(item.bs().get(1).asByteBuffer(), secondByteArray); - }, "List<" + String.valueOf(firstByteArray) + ", " + String.valueOf(secondByteArray) + ">"); - } - - private static boolean byteBufferEquals(ByteBuffer byteBuffer, byte[] bytes) { - if (byteBuffer.remaining() != bytes.length) { - return false; - } - byte[] actual = new byte[bytes.length]; - byteBuffer.duplicate().get(actual); - return Arrays.equals(actual, bytes); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/AuditRecord.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/AuditRecord.java deleted file mode 100644 index 3cd0a6efb1be..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/AuditRecord.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.util.Date; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGenerateStrategy; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedTimestamp; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbFlattened; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbVersionAttribute; - -@DynamoDbFlattened(attributes = {@DynamoDbAttribute(mappedBy = "createdDate", attributeName = "RCD"), - @DynamoDbAttribute(mappedBy = "lastModifiedDate", attributeName = "RMD"), - @DynamoDbAttribute(mappedBy = "versionNumber", attributeName = "RVN")}) -public class AuditRecord { - - private Date createdDate; - private Date lastModifiedDate; - private Long versionNumber; - - @DynamoDbAutoGeneratedTimestamp(strategy = DynamoDbAutoGenerateStrategy.CREATE) - public Date getCreatedDate() { - return this.createdDate; - } - - public void setCreatedDate(final Date createdDate) { - this.createdDate = createdDate; - } - - @DynamoDbAutoGeneratedTimestamp(strategy = DynamoDbAutoGenerateStrategy.ALWAYS) - public Date getLastModifiedDate() { - return this.lastModifiedDate; - } - - public void setLastModifiedDate(final Date lastModifiedDate) { - this.lastModifiedDate = lastModifiedDate; - } - - @DynamoDbVersionAttribute - public Long getVersionNumber() { - return this.versionNumber; - } - - public void setVersionNumber(final Long versionNumber) { - this.versionNumber = versionNumber; - } - - @Override - public final boolean equals(final Object o) { - if (o == this) { - return true; - } - if (!(o instanceof AuditRecord)) { - return false; - } - AuditRecord that = (AuditRecord) o; - return (createdDate == null ? that.createdDate == null : createdDate.equals(that.createdDate)) && - (lastModifiedDate == null ? that.lastModifiedDate == null : lastModifiedDate.equals(that.lastModifiedDate)) && - (versionNumber == null ? that.versionNumber == null : versionNumber.equals(that.versionNumber)); - } - - @Override - public final int hashCode() { - return 1 + (createdDate == null ? 0 : createdDate.hashCode()) + - (lastModifiedDate == null ? 0 : lastModifiedDate.hashCode()) + - (versionNumber == null ? 0 : versionNumber.hashCode()); - } - - @Override - public final String toString() { - return getClass().getName() + "{" + - "createdDate=" + createdDate + "," + - "lastModifiedDate=" + lastModifiedDate + "," + - "versionNumber=" + versionNumber + - "}"; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/AutoKeyAndVal.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/AutoKeyAndVal.java deleted file mode 100644 index 18806bb29d15..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/AutoKeyAndVal.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Auto-generated string key and value. - */ -@DynamoDbTable(tableName = "aws-java-sdk-util") -public class AutoKeyAndVal extends KeyAndVal { - @Override - @DynamoDbHashKey - @DynamoDbAutoGeneratedKey - public String getKey() { - return super.getKey(); - } - - @Override - public void setKey(final String key) { - super.setKey(key); - } - - @Override - public V getVal() { - return super.getVal(); - } - - @Override - public void setVal(final V val) { - super.setVal(val); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/BinaryAttributeByteArrayClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/BinaryAttributeByteArrayClass.java deleted file mode 100644 index e4f575ee529e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/BinaryAttributeByteArrayClass.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Test domain class with byte[] attribute, byte[] set and a string key - */ -@DynamoDbTable(tableName = "aws-java-sdk-util") -public class BinaryAttributeByteArrayClass { - - private String key; - private byte[] binaryAttribute; - private Set binarySetAttribute; - - @DynamoDbHashKey(attributeName = "key") - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAttribute(attributeName = "binaryAttribute") - public byte[] getBinaryAttribute() { - return binaryAttribute; - } - - public void setBinaryAttribute(byte[] binaryAttribute) { - this.binaryAttribute = binaryAttribute; - } - - @DynamoDbAttribute(attributeName = "binarySetAttribute") - public Set getBinarySetAttribute() { - return binarySetAttribute; - } - - public void setBinarySetAttribute(Set binarySetAttribute) { - this.binarySetAttribute = binarySetAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((binaryAttribute == null) ? 0 : binaryAttribute.hashCode()); - result = prime * result + ((binarySetAttribute == null) ? 0 : binarySetAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - BinaryAttributeByteArrayClass other = (BinaryAttributeByteArrayClass) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (binaryAttribute == null) { - if (other.binaryAttribute != null) { - return false; - } - } else if (!binaryAttribute.equals(other.binaryAttribute)) { - return false; - } - if (binarySetAttribute == null) { - if (other.binarySetAttribute != null) { - return false; - } - } else if (!binarySetAttribute.equals(other.binarySetAttribute)) { - return false; - } - return true; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/BinaryAttributeByteBufferClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/BinaryAttributeByteBufferClass.java deleted file mode 100644 index 4f4433fd6c37..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/BinaryAttributeByteBufferClass.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.nio.ByteBuffer; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Test domain class with byteBuffer attribute, byteBuffer set and a string key - */ -@DynamoDbTable(tableName = "aws-java-sdk-util") -public class BinaryAttributeByteBufferClass { - - private String key; - private ByteBuffer binaryAttribute; - private Set binarySetAttribute; - - @DynamoDbHashKey(attributeName = "key") - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAttribute(attributeName = "binaryAttribute") - public ByteBuffer getBinaryAttribute() { - return binaryAttribute; - } - - public void setBinaryAttribute(ByteBuffer binaryAttribute) { - this.binaryAttribute = binaryAttribute; - } - - @DynamoDbAttribute(attributeName = "binarySetAttribute") - public Set getBinarySetAttribute() { - return binarySetAttribute; - } - - public void setBinarySetAttribute(Set binarySetAttribute) { - this.binarySetAttribute = binarySetAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((binaryAttribute == null) ? 0 : binaryAttribute.hashCode()); - result = prime * result + ((binarySetAttribute == null) ? 0 : binarySetAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - BinaryAttributeByteBufferClass other = (BinaryAttributeByteBufferClass) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (binaryAttribute == null) { - if (other.binaryAttribute != null) { - return false; - } - } else if (!binaryAttribute.equals(other.binaryAttribute)) { - return false; - } - if (binarySetAttribute == null) { - if (other.binarySetAttribute != null) { - return false; - } - } else if (!binarySetAttribute.equals(other.binarySetAttribute)) { - return false; - } - return true; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/CrossSdkVerificationClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/CrossSdkVerificationClass.java deleted file mode 100644 index 8f52e8af1289..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/CrossSdkVerificationClass.java +++ /dev/null @@ -1,490 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.Calendar; -import java.util.Date; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbVersionAttribute; - -/** - * Exhaustive exercise of DynamoDB domain mapping, exercising every supported - * data type. - */ -@DynamoDbTable(tableName = "aws-xsdk") -public class CrossSdkVerificationClass { - - private String key; - private String rangeKey; - private Long version; - private String lastUpdater; - - private Integer integerAttribute; - private Long longAttribute; - private Double doubleAttribute; - private Float floatAttribute; - private BigDecimal bigDecimalAttribute; - private BigInteger bigIntegerAttribute; - private Byte byteAttribute; - private Date dateAttribute; - private Calendar calendarAttribute; - private Boolean booleanAttribute; - - private Set stringSetAttribute; - private Set integerSetAttribute; - private Set doubleSetAttribute; - private Set floatSetAttribute; - private Set bigDecimalSetAttribute; - private Set bigIntegerSetAttribute; - private Set longSetAttribute; - private Set byteSetAttribute; - private Set dateSetAttribute; - private Set calendarSetAttribute; - - // these are kind of pointless, but here for completeness - private Set booleanSetAttribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbRangeKey - public String getRangeKey() { - return rangeKey; - } - - public void setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - } - - @DynamoDbVersionAttribute - public Long getVersion() { - return version; - } - - public void setVersion(Long version) { - this.version = version; - } - - public String lastUpdater() { - return lastUpdater; - } - - public void setLastUpdater(String lastUpdater) { - this.lastUpdater = lastUpdater; - } - - public Integer getIntegerAttribute() { - return integerAttribute; - } - - public void setIntegerAttribute(Integer integerAttribute) { - this.integerAttribute = integerAttribute; - } - - public Long longAttribute() { - return longAttribute; - } - - public void setLongAttribute(Long longAttribute) { - this.longAttribute = longAttribute; - } - - public Double getDoubleAttribute() { - return doubleAttribute; - } - - public void setDoubleAttribute(Double doubleAttribute) { - this.doubleAttribute = doubleAttribute; - } - - public Float getFloatAttribute() { - return floatAttribute; - } - - public void setFloatAttribute(Float floatAttribute) { - this.floatAttribute = floatAttribute; - } - - public BigDecimal bigDecimalAttribute() { - return bigDecimalAttribute; - } - - public void setBigDecimalAttribute(BigDecimal bigDecimalAttribute) { - this.bigDecimalAttribute = bigDecimalAttribute; - } - - public BigInteger bigIntegerAttribute() { - return bigIntegerAttribute; - } - - public void setBigIntegerAttribute(BigInteger bigIntegerAttribute) { - this.bigIntegerAttribute = bigIntegerAttribute; - } - - public Byte byteAttribute() { - return byteAttribute; - } - - public void setByteAttribute(Byte byteAttribute) { - this.byteAttribute = byteAttribute; - } - - public Date getDateAttribute() { - return dateAttribute; - } - - public void setDateAttribute(Date dateAttribute) { - this.dateAttribute = dateAttribute; - } - - public Calendar getCalendarAttribute() { - return calendarAttribute; - } - - public void setCalendarAttribute(Calendar calendarAttribute) { - this.calendarAttribute = calendarAttribute; - } - - public Boolean booleanAttribute() { - return booleanAttribute; - } - - public void setBooleanAttribute(Boolean booleanAttribute) { - this.booleanAttribute = booleanAttribute; - } - - public Set getIntegerSetAttribute() { - return integerSetAttribute; - } - - public void setIntegerSetAttribute(Set integerSetAttribute) { - this.integerSetAttribute = integerSetAttribute; - } - - public Set getDoubleSetAttribute() { - return doubleSetAttribute; - } - - public void setDoubleSetAttribute(Set doubleSetAttribute) { - this.doubleSetAttribute = doubleSetAttribute; - } - - public Set getFloatSetAttribute() { - return floatSetAttribute; - } - - public void setFloatSetAttribute(Set floatSetAttribute) { - this.floatSetAttribute = floatSetAttribute; - } - - public Set bigDecimalSetAttribute() { - return bigDecimalSetAttribute; - } - - public void setBigDecimalSetAttribute(Set bigDecimalSetAttribute) { - this.bigDecimalSetAttribute = bigDecimalSetAttribute; - } - - public Set bigIntegerSetAttribute() { - return bigIntegerSetAttribute; - } - - public void setBigIntegerSetAttribute(Set bigIntegerSetAttribute) { - this.bigIntegerSetAttribute = bigIntegerSetAttribute; - } - - public Set longSetAttribute() { - return longSetAttribute; - } - - public void setLongSetAttribute(Set longSetAttribute) { - this.longSetAttribute = longSetAttribute; - } - - public Set byteSetAttribute() { - return byteSetAttribute; - } - - public void setByteSetAttribute(Set byteSetAttribute) { - this.byteSetAttribute = byteSetAttribute; - } - - public Set getDateSetAttribute() { - return dateSetAttribute; - } - - public void setDateSetAttribute(Set dateSetAttribute) { - this.dateSetAttribute = dateSetAttribute; - } - - public Set getCalendarSetAttribute() { - return calendarSetAttribute; - } - - public void setCalendarSetAttribute(Set calendarSetAttribute) { - this.calendarSetAttribute = calendarSetAttribute; - } - - public Set booleanSetAttribute() { - return booleanSetAttribute; - } - - public void setBooleanSetAttribute(Set booleanSetAttribute) { - this.booleanSetAttribute = booleanSetAttribute; - } - - public Set stringSetAttribute() { - return stringSetAttribute; - } - - public void setStringSetAttribute(Set stringSetAttribute) { - this.stringSetAttribute = stringSetAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((bigDecimalAttribute == null) ? 0 : bigDecimalAttribute.hashCode()); - result = prime * result + ((bigDecimalSetAttribute == null) ? 0 : bigDecimalSetAttribute.hashCode()); - result = prime * result + ((bigIntegerAttribute == null) ? 0 : bigIntegerAttribute.hashCode()); - result = prime * result + ((bigIntegerSetAttribute == null) ? 0 : bigIntegerSetAttribute.hashCode()); - result = prime * result + ((booleanAttribute == null) ? 0 : booleanAttribute.hashCode()); - result = prime * result + ((booleanSetAttribute == null) ? 0 : booleanSetAttribute.hashCode()); - result = prime * result + ((byteAttribute == null) ? 0 : byteAttribute.hashCode()); - result = prime * result + ((byteSetAttribute == null) ? 0 : byteSetAttribute.hashCode()); - result = prime * result + ((calendarAttribute == null) ? 0 : calendarAttribute.hashCode()); - result = prime * result + ((calendarSetAttribute == null) ? 0 : calendarSetAttribute.hashCode()); - result = prime * result + ((dateAttribute == null) ? 0 : dateAttribute.hashCode()); - result = prime * result + ((dateSetAttribute == null) ? 0 : dateSetAttribute.hashCode()); - result = prime * result + ((doubleAttribute == null) ? 0 : doubleAttribute.hashCode()); - result = prime * result + ((doubleSetAttribute == null) ? 0 : doubleSetAttribute.hashCode()); - result = prime * result + ((floatAttribute == null) ? 0 : floatAttribute.hashCode()); - result = prime * result + ((floatSetAttribute == null) ? 0 : floatSetAttribute.hashCode()); - result = prime * result + ((integerAttribute == null) ? 0 : integerAttribute.hashCode()); - result = prime * result + ((integerSetAttribute == null) ? 0 : integerSetAttribute.hashCode()); - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((lastUpdater == null) ? 0 : lastUpdater.hashCode()); - result = prime * result + ((longAttribute == null) ? 0 : longAttribute.hashCode()); - result = prime * result + ((longSetAttribute == null) ? 0 : longSetAttribute.hashCode()); - result = prime * result + ((rangeKey == null) ? 0 : rangeKey.hashCode()); - result = prime * result + ((stringSetAttribute == null) ? 0 : stringSetAttribute.hashCode()); - result = prime * result + ((version == null) ? 0 : version.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - CrossSdkVerificationClass other = (CrossSdkVerificationClass) obj; - if (bigDecimalAttribute == null) { - if (other.bigDecimalAttribute != null) { - return false; - } - } else if (!bigDecimalAttribute.equals(other.bigDecimalAttribute)) { - return false; - } - if (bigDecimalSetAttribute == null) { - if (other.bigDecimalSetAttribute != null) { - return false; - } - } else if (!bigDecimalSetAttribute.equals(other.bigDecimalSetAttribute)) { - return false; - } - if (bigIntegerAttribute == null) { - if (other.bigIntegerAttribute != null) { - return false; - } - } else if (!bigIntegerAttribute.equals(other.bigIntegerAttribute)) { - return false; - } - if (bigIntegerSetAttribute == null) { - if (other.bigIntegerSetAttribute != null) { - return false; - } - } else if (!bigIntegerSetAttribute.equals(other.bigIntegerSetAttribute)) { - return false; - } - if (booleanAttribute == null) { - if (other.booleanAttribute != null) { - return false; - } - } else if (!booleanAttribute.equals(other.booleanAttribute)) { - return false; - } - if (booleanSetAttribute == null) { - if (other.booleanSetAttribute != null) { - return false; - } - } else if (!booleanSetAttribute.equals(other.booleanSetAttribute)) { - return false; - } - if (byteAttribute == null) { - if (other.byteAttribute != null) { - return false; - } - } else if (!byteAttribute.equals(other.byteAttribute)) { - return false; - } - if (byteSetAttribute == null) { - if (other.byteSetAttribute != null) { - return false; - } - } else if (!byteSetAttribute.equals(other.byteSetAttribute)) { - return false; - } - if (calendarAttribute == null) { - if (other.calendarAttribute != null) { - return false; - } - } else if (!calendarAttribute.equals(other.calendarAttribute)) { - return false; - } - if (calendarSetAttribute == null) { - if (other.calendarSetAttribute != null) { - return false; - } - } else if (!calendarSetAttribute.equals(other.calendarSetAttribute)) { - return false; - } - if (dateAttribute == null) { - if (other.dateAttribute != null) { - return false; - } - } else if (!dateAttribute.equals(other.dateAttribute)) { - return false; - } - if (dateSetAttribute == null) { - if (other.dateSetAttribute != null) { - return false; - } - } else if (!dateSetAttribute.equals(other.dateSetAttribute)) { - return false; - } - if (doubleAttribute == null) { - if (other.doubleAttribute != null) { - return false; - } - } else if (!doubleAttribute.equals(other.doubleAttribute)) { - return false; - } - if (doubleSetAttribute == null) { - if (other.doubleSetAttribute != null) { - return false; - } - } else if (!doubleSetAttribute.equals(other.doubleSetAttribute)) { - return false; - } - if (floatAttribute == null) { - if (other.floatAttribute != null) { - return false; - } - } else if (!floatAttribute.equals(other.floatAttribute)) { - return false; - } - if (floatSetAttribute == null) { - if (other.floatSetAttribute != null) { - return false; - } - } else if (!floatSetAttribute.equals(other.floatSetAttribute)) { - return false; - } - if (integerAttribute == null) { - if (other.integerAttribute != null) { - return false; - } - } else if (!integerAttribute.equals(other.integerAttribute)) { - return false; - } - if (integerSetAttribute == null) { - if (other.integerSetAttribute != null) { - return false; - } - } else if (!integerSetAttribute.equals(other.integerSetAttribute)) { - return false; - } - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (lastUpdater == null) { - if (other.lastUpdater != null) { - return false; - } - } else if (!lastUpdater.equals(other.lastUpdater)) { - return false; - } - if (longAttribute == null) { - if (other.longAttribute != null) { - return false; - } - } else if (!longAttribute.equals(other.longAttribute)) { - return false; - } - if (longSetAttribute == null) { - if (other.longSetAttribute != null) { - return false; - } - } else if (!longSetAttribute.equals(other.longSetAttribute)) { - return false; - } - if (rangeKey == null) { - if (other.rangeKey != null) { - return false; - } - } else if (!rangeKey.equals(other.rangeKey)) { - return false; - } - if (stringSetAttribute == null) { - if (other.stringSetAttribute != null) { - return false; - } - } else if (!stringSetAttribute.equals(other.stringSetAttribute)) { - return false; - } - if (version == null) { - if (other.version != null) { - return false; - } - } else if (!version.equals(other.version)) { - return false; - } - return true; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/Currency.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/Currency.java deleted file mode 100644 index 8b5936b6f6c5..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/Currency.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -/** - * A currency object. - */ -public class Currency { - private KeyAndVal kav; - - public Currency(final Double amount, final String unit) { - this.kav = new KeyAndVal(amount, unit); - } - - public Currency() { - this((Double) null, (String) null); - } - - public Double getAmount() { - return kav.getKey(); - } - - public void setAmount(final Double amount) { - kav.setKey(amount); - } - - public String getUnit() { - return kav.getVal(); - } - - public void setUnit(final String unit) { - kav.setVal(unit); - } - - @Override - public final boolean equals(final Object o) { - return (o == this || (o instanceof Currency && kav.equals(((Currency) o).kav))); - } - - @Override - public final int hashCode() { - return kav.hashCode(); - } - - @Override - public final String toString() { - return kav.toString(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/DateRange.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/DateRange.java deleted file mode 100644 index b40269c8021d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/DateRange.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.util.Date; - -/** - * A date range object. - */ -public class DateRange { - private KeyAndVal kav; - - public DateRange(final Date start, final Date end) { - this.kav = new KeyAndVal(start, end); - } - - public DateRange(final Date date, final Long start, final Long end) { - this(start == null ? null : new Date(date.getTime() + start), end == null ? null : new Date(date.getTime() + end)); - } - - public DateRange() { - this((Date) null, (Date) null); - } - - public Date getStart() { - return kav.getKey(); - } - - public void setStart(final Date start) { - kav.setKey(start); - } - - public Date getEnd() { - return kav.getVal(); - } - - public void setEnd(final Date end) { - kav.setVal(end); - } - - @Override - public final boolean equals(final Object o) { - return (o == this || (o instanceof DateRange && kav.equals(((DateRange) o).kav))); - } - - @Override - public final int hashCode() { - return kav.hashCode(); - } - - @Override - public final String toString() { - return kav.toString(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/GsiWithAlwaysUpdateTimestamp.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/GsiWithAlwaysUpdateTimestamp.java deleted file mode 100644 index 84f1179cd02a..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/GsiWithAlwaysUpdateTimestamp.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGenerateStrategy; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAutoGeneratedTimestamp; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbIndexHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -@DynamoDbTable(tableName = "Foo") -public class GsiWithAlwaysUpdateTimestamp { - - @DynamoDbHashKey - private String hashKey; - - @DynamoDbRangeKey - private String rangeKey; - - @DynamoDbAutoGeneratedTimestamp(strategy = DynamoDbAutoGenerateStrategy.ALWAYS) - @DynamoDbIndexHashKey(globalSecondaryIndexName = "last-mod-date") - private Long lastModifiedDate; - - public String getHashKey() { - return hashKey; - } - - public GsiWithAlwaysUpdateTimestamp setHashKey(String hashKey) { - this.hashKey = hashKey; - return this; - } - - public String getRangeKey() { - return rangeKey; - } - - public GsiWithAlwaysUpdateTimestamp setRangeKey(String rangeKey) { - this.rangeKey = rangeKey; - return this; - } - - public Long getLastModifiedDate() { - return lastModifiedDate; - } - - public GsiWithAlwaysUpdateTimestamp setLastModifiedDate(Long lastModifiedDate) { - this.lastModifiedDate = lastModifiedDate; - return this; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/KeyAndVal.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/KeyAndVal.java deleted file mode 100644 index fed9940c8b08..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/KeyAndVal.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -/** - * Simple key-val domain class. - */ -public class KeyAndVal { - private K key; - private V val; - - public KeyAndVal(final K key, final V val) { - this.key = key; - this.val = val; - } - - public KeyAndVal() { - this((K) null, (V) null); - } - - public K getKey() { - return this.key; - } - - public void setKey(final K key) { - this.key = key; - } - - public V getVal() { - return this.val; - } - - public void setVal(final V val) { - this.val = val; - } - - public final boolean keyEquals(final Object o) { - return key == o || (key != null && key.equals(o)); - } - - public final boolean valEquals(final Object o) { - return val == o || (val != null && val.equals(o)); - } - - @Override - public final boolean equals(final Object o) { - return o == this || (o instanceof KeyAndVal && keyEquals(((KeyAndVal) o).key) && valEquals(((KeyAndVal) o).val)); - } - - @Override - public final int hashCode() { - return 1 + (key == null ? 0 : key.hashCode()) + (val == null ? 0 : val.hashCode()); - } - - @Override - public final String toString() { - return "[" + key + "," + val + "]"; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/PhoneNumber.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/PhoneNumber.java deleted file mode 100644 index 5c023a9f84bf..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/PhoneNumber.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -/** - * A phone number object. - */ -public class PhoneNumber { - - private String areaCode; - private String exchange; - private String subscriber; - - public PhoneNumber(final String areaCode, final String exchange, final String subscriber) { - setAreaCode(areaCode); - setExchange(exchange); - setSubscriber(subscriber); - } - - public PhoneNumber() { - this(null, null, null); - } - - public String getAreaCode() { - return this.areaCode; - } - - public void setAreaCode(final String areaCode) { - this.areaCode = areaCode; - } - - public String getExchange() { - return this.exchange; - } - - public void setExchange(final String exchange) { - this.exchange = exchange; - } - - public String getSubscriber() { - return this.subscriber; - } - - public void setSubscriber(String subscriber) { - this.subscriber = subscriber; - } - - public final boolean areaCodeEquals(final Object o) { - return (areaCode == o || (areaCode != null && areaCode.equals(o))); - } - - public final boolean exchangeEquals(final Object o) { - return (exchange == o || (exchange != null && exchange.equals(o))); - } - - public final boolean subscriberEquals(final Object o) { - return (subscriber == o || (subscriber != null && subscriber.equals(o))); - } - - @Override - public final boolean equals(final Object o) { - return (o == this || (o instanceof PhoneNumber && areaCodeEquals(((PhoneNumber) o).areaCode) && exchangeEquals(((PhoneNumber) o).exchange) && subscriberEquals(((PhoneNumber) o).subscriber))); - } - - @Override - public final int hashCode() { - return (1 + (areaCode == null ? 0 : areaCode.hashCode()) + (exchange == null ? 0 : exchange.hashCode()) + (subscriber == null ? 0 : subscriber.hashCode())); - } - - @Override - public final String toString() { - return (getClass().getName() + "{areaCode=" + areaCode + ",exchange=" + exchange + ",subscriber=" + subscriber + "}"); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/RangeKeyClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/RangeKeyClass.java deleted file mode 100644 index fdeb5940d0a3..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/RangeKeyClass.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.math.BigDecimal; -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbRangeKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbVersionAttribute; - -/** - * Comprehensive domain class - */ -@DynamoDbTable(tableName = "aws-java-sdk-range-test") -public class RangeKeyClass { - - private long key; - private double rangeKey; - private Long version; - - private Set integerSetAttribute; - private Set stringSetAttribute; - private BigDecimal bigDecimalAttribute; - private String stringAttribute; - - @DynamoDbHashKey - public long getKey() { - return key; - } - - public void setKey(long key) { - this.key = key; - } - - @DynamoDbRangeKey - public double getRangeKey() { - return rangeKey; - } - - public void setRangeKey(double rangeKey) { - this.rangeKey = rangeKey; - } - - @DynamoDbAttribute(attributeName = "integerSetAttribute") - public Set getIntegerAttribute() { - return integerSetAttribute; - } - - public void setIntegerAttribute(Set integerAttribute) { - this.integerSetAttribute = integerAttribute; - } - - @DynamoDbAttribute - public Set getStringSetAttribute() { - return stringSetAttribute; - } - - public void setStringSetAttribute(Set stringSetAttribute) { - this.stringSetAttribute = stringSetAttribute; - } - - @DynamoDbAttribute - public BigDecimal getBigDecimalAttribute() { - return bigDecimalAttribute; - } - - public void setBigDecimalAttribute(BigDecimal bigDecimalAttribute) { - this.bigDecimalAttribute = bigDecimalAttribute; - } - - @DynamoDbAttribute - public String getStringAttribute() { - return stringAttribute; - } - - public void setStringAttribute(String stringAttribute) { - this.stringAttribute = stringAttribute; - } - - @DynamoDbVersionAttribute - public Long getVersion() { - return version; - } - - public void setVersion(Long version) { - this.version = version; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((bigDecimalAttribute == null) ? 0 : bigDecimalAttribute.hashCode()); - result = prime * result + ((integerSetAttribute == null) ? 0 : integerSetAttribute.hashCode()); - result = prime * result + (int) (key ^ (key >>> 32)); - long temp; - temp = Double.doubleToLongBits(rangeKey); - result = prime * result + (int) (temp ^ (temp >>> 32)); - result = prime * result + ((stringAttribute == null) ? 0 : stringAttribute.hashCode()); - result = prime * result + ((stringSetAttribute == null) ? 0 : stringSetAttribute.hashCode()); - result = prime * result + ((version == null) ? 0 : version.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - RangeKeyClass other = (RangeKeyClass) obj; - if (bigDecimalAttribute == null) { - if (other.bigDecimalAttribute != null) { - return false; - } - } else if (!bigDecimalAttribute.equals(other.bigDecimalAttribute)) { - return false; - } - if (integerSetAttribute == null) { - if (other.integerSetAttribute != null) { - return false; - } - } else if (!integerSetAttribute.equals(other.integerSetAttribute)) { - return false; - } - if (key != other.key) { - return false; - } - if (Double.doubleToLongBits(rangeKey) != Double.doubleToLongBits(other.rangeKey)) { - return false; - } - if (stringAttribute == null) { - if (other.stringAttribute != null) { - return false; - } - } else if (!stringAttribute.equals(other.stringAttribute)) { - return false; - } - if (stringSetAttribute == null) { - if (other.stringSetAttribute != null) { - return false; - } - } else if (!stringSetAttribute.equals(other.stringSetAttribute)) { - return false; - } - if (version == null) { - if (other.version != null) { - return false; - } - } else if (!version.equals(other.version)) { - return false; - } - return true; - } - - @Override - public String toString() { - return "RangeKeyClass [key=" + key + ", rangeKey=" + rangeKey + ", version=" + version - + ", integerSetAttribute=" + integerSetAttribute + ", stringSetAttribute=" + stringSetAttribute - + ", bigDecimalAttribute=" + bigDecimalAttribute + ", stringAttribute=" + stringAttribute + "]"; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/S3LinksTestClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/S3LinksTestClass.java deleted file mode 100644 index 11ef37ee2eb9..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/S3LinksTestClass.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.S3Link; - -/** - * Test domain class with a single string key, and two S3Links - */ -@DynamoDbTable(tableName = "aws-java-sdk-util") -public class S3LinksTestClass { - - private String key; - private S3Link s3LinkWest; - private S3Link s3LinkEast; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public S3LinksTestClass withKey(String key) { - setKey(key); - return this; - } - - public S3Link s3LinkWest() { - return s3LinkWest; - } - - public void setS3LinkWest(S3Link s3LinkAttribute) { - this.s3LinkWest = s3LinkAttribute; - } - - public S3Link s3LinkEast() { - return s3LinkEast; - } - - public void setS3LinkEast(S3Link s3LinkEast) { - this.s3LinkEast = s3LinkEast; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/StringAttributeClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/StringAttributeClass.java deleted file mode 100644 index 9df6e653868d..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/StringAttributeClass.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Test domain class with a single string attribute and a string key - */ -@DynamoDbTable(tableName = "aws-java-sdk-util") -public class StringAttributeClass { - - private String key; - private String stringAttribute; - private String renamedAttribute; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAttribute - public String getStringAttribute() { - return stringAttribute; - } - - public void setStringAttribute(String stringAttribute) { - this.stringAttribute = stringAttribute; - } - - @DynamoDbAttribute(attributeName = "originalName") - public String getRenamedAttribute() { - return renamedAttribute; - } - - public void setRenamedAttribute(String renamedAttribute) { - this.renamedAttribute = renamedAttribute; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((renamedAttribute == null) ? 0 : renamedAttribute.hashCode()); - result = prime * result + ((stringAttribute == null) ? 0 : stringAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - StringAttributeClass other = (StringAttributeClass) obj; - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (renamedAttribute == null) { - if (other.renamedAttribute != null) { - return false; - } - } else if (!renamedAttribute.equals(other.renamedAttribute)) { - return false; - } - if (stringAttribute == null) { - if (other.stringAttribute != null) { - return false; - } - } else if (!stringAttribute.equals(other.stringAttribute)) { - return false; - } - return true; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/StringSetAttributeClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/StringSetAttributeClass.java deleted file mode 100644 index 716c3512c19e..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/StringSetAttributeClass.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.util.Set; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbAttribute; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; - -/** - * Test domain class with a string set attribute and a string key - */ -@DynamoDbTable(tableName = "aws-java-sdk-util") -public class StringSetAttributeClass { - - private String key; - private Set stringSetAttribute; - private Set StringSetAttributeRenamed; - - @DynamoDbHashKey - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - @DynamoDbAttribute - public Set getStringSetAttribute() { - return stringSetAttribute; - } - - public void setStringSetAttribute(Set stringSetAttribute) { - this.stringSetAttribute = stringSetAttribute; - } - - @DynamoDbAttribute(attributeName = "originalName") - public Set getStringSetAttributeRenamed() { - return StringSetAttributeRenamed; - } - - public void setStringSetAttributeRenamed(Set stringSetAttributeRenamed) { - StringSetAttributeRenamed = stringSetAttributeRenamed; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((StringSetAttributeRenamed == null) ? 0 : StringSetAttributeRenamed.hashCode()); - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((stringSetAttribute == null) ? 0 : stringSetAttribute.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - StringSetAttributeClass other = (StringSetAttributeClass) obj; - if (StringSetAttributeRenamed == null) { - if (other.StringSetAttributeRenamed != null) { - return false; - } - } else if (!StringSetAttributeRenamed.equals(other.StringSetAttributeRenamed)) { - return false; - } - if (key == null) { - if (other.key != null) { - return false; - } - } else if (!key.equals(other.key)) { - return false; - } - if (stringSetAttribute == null) { - if (other.stringSetAttribute != null) { - return false; - } - } else if (!stringSetAttribute.equals(other.stringSetAttribute)) { - return false; - } - return true; - } - - -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/SubClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/SubClass.java deleted file mode 100644 index a0eec5bd30fb..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/SubClass.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.nio.ByteBuffer; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbDocument; - -@DynamoDbDocument -public class SubClass { - - private String name; - private Integer value; - private ByteBuffer bb; - - public SubClass() { - name = "name"; - value = 123; - } - - public static boolean equals(T one, T two) { - if (one == null) { - return (two == null); - } else { - return one.equals(two); - } - } - - public String getName() { - return name; - } - - public void setName(String n) { - name = n; - } - - public Integer getValue() { - return value; - } - - public void setValue(Integer i) { - value = i; - } - - public ByteBuffer getNull() { - return bb; - } - - public void setNull(ByteBuffer b) { - bb = b; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SubClass)) { - return false; - } - - SubClass that = (SubClass) o; - - return equals(this.name, that.name) - && equals(this.value, that.value) - && equals(this.bb, that.bb); - } - - @Override - public String toString() { - return "{name=" + name + ", value=" + value + ", bb=" + bb + "}"; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/TestClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/TestClass.java deleted file mode 100644 index c426da394976..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/TestClass.java +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.Calendar; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbHashKey; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbMarshalling; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbNativeBoolean; -import software.amazon.awssdk.services.dynamodb.datamodeling.DynamoDbTable; -import software.amazon.awssdk.services.dynamodb.datamodeling.RandomUuidMarshaller; -import software.amazon.awssdk.services.dynamodb.datamodeling.S3Link; - -@DynamoDbTable(tableName = "nonexisting-test-tablename") -public class TestClass { - - @DynamoDbHashKey - public String getId() { - return null; - } - - public void setId(String id) { - } - - public boolean getBoolean() { - return false; - } - - public void setBoolean(boolean value) { - } - - public Boolean getBoxedBoolean() { - return false; - } - - public void setBoxedBoolean(Boolean value) { - } - - @DynamoDbNativeBoolean - public boolean getNativeBoolean() { - return false; - } - - public void setNativeBoolean(boolean value) { - } - - public String getString() { - return null; - } - - public void setString(String value) { - } - - public UUID getUuid() { - return null; - } - - public void setUuid(UUID u) { - } - - @DynamoDbMarshalling(marshallerClass = RandomUuidMarshaller.class) - public String getCustomString() { - return null; - } - - public void setCustomString(String s) { - } - - public Date getDate() { - return null; - } - - public void setDate(Date d) { - } - - public Calendar getCalendar() { - return null; - } - - public void setCalendar(Calendar c) { - } - - public byte getByte() { - return 0; - } - - public void setByte(byte b) { - } - - public Byte getBoxedByte() { - return 0; - } - - public void setBoxedByte(Byte b) { - } - - public short getShort() { - return 0; - } - - public void setShort(short s) { - } - - public Short getBoxedShort() { - return 0; - } - - public void setBoxedShort(Short s) { - } - - public int getInt() { - return 0; - } - - public void setInt(int i) { - } - - public Integer getBoxedInt() { - return 0; - } - - public void setBoxedInt(Integer i) { - } - - public long getLong() { - return 0; - } - - public void setLong(long l) { - } - - public Long getBoxedLong() { - return 0l; - } - - public void setBoxedLong(Long l) { - } - - public BigInteger getBigInt() { - return BigInteger.ZERO; - } - - public void setBigInt(BigInteger i) { - } - - public float getFloat() { - return 0; - } - - public void setFloat(float f) { - } - - public Float getBoxedFloat() { - return 0f; - } - - public void setBoxedFloat(Float f) { - } - - public double getDouble() { - return 0; - } - - public void setDouble(double d) { - } - - public Double getBoxedDouble() { - return 0d; - } - - public void setBoxedDouble(Double d) { - } - - public BigDecimal getBigDecimal() { - return BigDecimal.ZERO; - } - - public void setBigDecimal(BigDecimal d) { - } - - public byte[] getByteArray() { - return null; - } - - public void setByteArray(byte[] b) { - } - - public ByteBuffer getByteBuffer() { - return null; - } - - public void setByteBuffer(ByteBuffer b) { - } - - public Set getBooleanSet() { - return null; - } - - public void setBooleanSet(Set s) { - } - - public Set getUuidSet() { - return null; - } - - public void setUuidSet(Set s) { - } - - public Set getStringSet() { - return null; - } - - public void setStringSet(Set s) { - } - - public Set getDateSet() { - return null; - } - - public void setDateSet(Set d) { - } - - public Set getCalendarSet() { - return null; - } - - public void setCalendarSet(Set c) { - } - - public Set getByteSet() { - return null; - } - - public void setByteSet(Set s) { - } - - public Set getShortSet() { - return null; - } - - public void setShortSet(Set s) { - } - - public Set getIntSet() { - return null; - } - - public void setIntSet(Set s) { - } - - public Set getLongSet() { - return null; - } - - public void setLongSet(Set s) { - } - - public Set getBigIntegerSet() { - return null; - } - - public void setBigIntegerSet(Set s) { - } - - public Set getFloatSet() { - return null; - } - - public void setFloatSet(Set s) { - } - - public Set getDoubleSet() { - return null; - } - - public void setDoubleSet(Set s) { - } - - public Set getBigDecimalSet() { - return null; - } - - public void setBigDecimalSet(Set s) { - } - - public Set getByteArraySet() { - return null; - } - - public void setByteArraySet(Set s) { - } - - public Set getByteBufferSet() { - return null; - } - - public void setByteBufferSet(Set s) { - } - - public Set getObjectSet() { - return null; - } - - public void setObjectSet(Set s) { - } - - public List getList() { - return null; - } - - public void setList(List l) { - } - - public List getObjectList() { - return null; - } - - public void setObjectList(List l) { - } - - public List> getSetList() { - return null; - } - - public void setSetList(List> l) { - } - - public Map getMap() { - return null; - } - - public void setMap(Map m) { - } - - public Map> getSetMap() { - return null; - } - - public void setSetMap(Map> m) { - } - - public Map getBogusMap() { - return null; - } - - public void setBogusMap(Map m) { - } - - public SubClass getObject() { - return null; - } - - public void setObject(SubClass c) { - } - - public S3Link getS3Link() { - return null; - } - - public void setS3Link(S3Link link) { - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/UnannotatedSubClass.java b/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/UnannotatedSubClass.java deleted file mode 100644 index bb6247319954..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/software/amazon/awssdk/services/dynamodb/pojos/UnannotatedSubClass.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.dynamodb.pojos; - -public class UnannotatedSubClass { - public UnannotatedSubClass getChild() { - return null; - } - - public void setChild(UnannotatedSubClass c) { - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/RequiredResources.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/RequiredResources.java deleted file mode 100644 index 71d3f3f6434f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/RequiredResources.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation for resources required for the test case. It could be applied to - * either a type (test class) or a method (test method). - */ -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.METHOD, ElementType.TYPE}) -public @interface RequiredResources { - - /** - * An array of RequiredResource annotations - */ - RequiredResource[] value() default {}; - - enum ResourceCreationPolicy { - /** - * Existing resource will be reused if it matches the required resource - * definition (i.e. TestResource.getResourceStatus() returns AVAILABLE). - */ - REUSE_EXISTING, - /** - * Always destroy existing resources (if any) and then recreate new ones for test. - */ - ALWAYS_RECREATE; - } - - enum ResourceRetentionPolicy { - /** - * Do not delete the created resource after test. - */ - KEEP, - /** - * When used for @RequiredAnnota - */ - DESTROY_IMMEDIATELY, - DESTROY_AFTER_ALL_TESTS; - } - - @interface RequiredResource { - - /** - * The Class object of the TestResource class - */ - Class resource(); - - /** - * How the resource should be created before the test starts. - */ - ResourceCreationPolicy creationPolicy(); - - /** - * Retention policy after the test is done. - */ - ResourceRetentionPolicy retentionPolicy(); - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/ResourceCentricBlockJUnit4ClassRunner.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/ResourceCentricBlockJUnit4ClassRunner.java deleted file mode 100644 index 0cb576b15a18..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/ResourceCentricBlockJUnit4ClassRunner.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources; - -import java.util.HashSet; -import java.util.Set; -import org.junit.Ignore; -import org.junit.runner.Description; -import org.junit.runner.notification.Failure; -import org.junit.runner.notification.RunNotifier; -import org.junit.runners.BlockJUnit4ClassRunner; -import org.junit.runners.model.FrameworkMethod; -import org.junit.runners.model.InitializationError; -import org.junit.runners.model.Statement; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import utils.resources.RequiredResources.RequiredResource; -import utils.resources.RequiredResources.ResourceRetentionPolicy; - -public class ResourceCentricBlockJUnit4ClassRunner extends BlockJUnit4ClassRunner { - - private final Set resourcesToBeDestroyedAfterAllTests; - - private final RequiredResources classRequiredResourcesAnnotation; - - private final Logger log = LoggerFactory.getLogger(ResourceCentricBlockJUnit4ClassRunner.class); - - public ResourceCentricBlockJUnit4ClassRunner(Class klass) - throws InitializationError { - super(klass); - - classRequiredResourcesAnnotation = klass.getAnnotation(RequiredResources.class); - resourcesToBeDestroyedAfterAllTests = new HashSet(); - } - - /** - * - */ - private static TestResource createResourceInstance(RequiredResource resourceAnnotation) - throws InstantiationException, IllegalAccessException { - Class resourceClazz = resourceAnnotation.resource(); - if (resourceClazz == null) { - throw new IllegalArgumentException( - "resource parameter is missing for the @RequiredResource annotation."); - } - return resourceClazz.newInstance(); - } - - @Override - protected void runChild(final FrameworkMethod method, RunNotifier notifier) { - Description description = describeChild(method); - if (method.getAnnotation(Ignore.class) != null) { - notifier.fireTestIgnored(description); - } else { - RequiredResources annotation = method.getAnnotation(RequiredResources.class); - if (annotation != null) { - try { - beforeRunLeaf(annotation.value()); - } catch (Exception e) { - notifier.fireTestFailure(new Failure(description, e)); - } - - } - - runLeaf(methodBlock(method), description, notifier); - - if (annotation != null) { - try { - afterRunLeaf(annotation.value()); - } catch (Exception e) { - notifier.fireTestFailure(new Failure(description, e)); - } - } - } - } - - /** - * Override the withBeforeClasses method to inject executing resource - * creation between @BeforeClass methods and test methods. - */ - @Override - protected Statement withBeforeClasses(final Statement statement) { - Statement withRequiredResourcesCreation = new Statement() { - - @Override - public void evaluate() throws Throwable { - if (classRequiredResourcesAnnotation != null) { - beforeRunClass(classRequiredResourcesAnnotation.value()); - } - statement.evaluate(); - } - }; - return super.withBeforeClasses(withRequiredResourcesCreation); - } - - /** - * Override the withAfterClasses method to inject executing resource - * creation between test methods and the @AfterClass methods. - */ - @Override - protected Statement withAfterClasses(final Statement statement) { - Statement withRequiredResourcesDeletion = new Statement() { - - @Override - public void evaluate() throws Throwable { - statement.evaluate(); - afterRunClass(); - } - }; - return super.withAfterClasses(withRequiredResourcesDeletion); - } - - private void beforeRunClass(RequiredResource[] resourcesAnnotation) - throws InstantiationException, IllegalAccessException, InterruptedException { - log.debug("Processing @RequiredResources before running the test class..."); - for (RequiredResource resourceAnnotation : resourcesAnnotation) { - TestResource resource = createResourceInstance(resourceAnnotation); - TestResourceUtils.createResource(resource, resourceAnnotation.creationPolicy()); - - if (resourceAnnotation.retentionPolicy() != ResourceRetentionPolicy.KEEP) { - resourcesToBeDestroyedAfterAllTests.add(resource); - } - } - } - - private void afterRunClass() - throws InstantiationException, IllegalAccessException, InterruptedException { - log.debug("Processing @RequiredResources after running the test class..."); - for (TestResource resource : resourcesToBeDestroyedAfterAllTests) { - TestResourceUtils.deleteResource(resource); - } - } - - private void beforeRunLeaf(RequiredResource[] resourcesAnnotation) - throws InstantiationException, IllegalAccessException, InterruptedException { - log.debug("Processing @RequiredResources before running the test..."); - for (RequiredResource resourceAnnotation : resourcesAnnotation) { - TestResource resource = createResourceInstance(resourceAnnotation); - TestResourceUtils.createResource(resource, resourceAnnotation.creationPolicy()); - - if (resourceAnnotation.retentionPolicy() == ResourceRetentionPolicy.DESTROY_AFTER_ALL_TESTS) { - resourcesToBeDestroyedAfterAllTests.add(resource); - } - } - } - - private void afterRunLeaf(RequiredResource[] resourcesAnnotation) - throws InstantiationException, IllegalAccessException, InterruptedException { - log.debug("Processing @RequiredResources after running the test..."); - for (RequiredResource resourceAnnotation : resourcesAnnotation) { - TestResource resource = createResourceInstance(resourceAnnotation); - - if (resourceAnnotation.retentionPolicy() == ResourceRetentionPolicy.DESTROY_IMMEDIATELY) { - TestResourceUtils.deleteResource(resource); - } - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/TestResource.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/TestResource.java deleted file mode 100644 index 98736d15a401..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/TestResource.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources; - -/** - * An interface which represents a resource to be used in a test case. - *

    - * Note that sub-classes implementing this interface must provide a no-arg - * constructor. - */ -public interface TestResource { - - /** - * Create/initialize the resource which this TestResource represents. - * - * @param waitTillFinished Whether this method should block until the resource is fully - * initialized. - */ - void create(boolean waitTillFinished); - - /** - * Delete the resource which this TestResource represents. - * - * @param waitTillFinished Whether this method should block until the resource is fully - * initialized. - */ - void delete(boolean waitTillFinished); - - /** - * Returns the current status of the resource which this TestResource - * represents. - */ - ResourceStatus getResourceStatus(); - - /** - * Enum of all the generalized resource statuses. - */ - enum ResourceStatus { - /** - * The resource is currently available, and it is compatible with the - * required resource. - */ - AVAILABLE, - /** - * The resource does not exist and there is no existing resource that is - * incompatible. - */ - NOT_EXIST, - /** - * There is an existing resource that has to be removed before creating - * the required resource. For example, DDB table with the same name but - * different table schema. - */ - EXIST_INCOMPATIBLE_RESOURCE, - /** - * The resource is in transient state (e.g. creating/deleting/updating) - */ - TRANSIENT, - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/TestResourceUtils.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/TestResourceUtils.java deleted file mode 100644 index 5f11cb7244c5..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/TestResourceUtils.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources; - -import software.amazon.awssdk.testutils.Waiter; -import software.amazon.awssdk.utils.Logger; -import utils.resources.RequiredResources.ResourceCreationPolicy; -import utils.resources.TestResource.ResourceStatus; - - -public class TestResourceUtils { - private static final Logger log = Logger.loggerFor(TestResourceUtils.class); - - public static void createResource(TestResource resource, ResourceCreationPolicy policy) throws InterruptedException { - TestResource.ResourceStatus finalizedStatus = waitForFinalizedStatus(resource); - if (policy == ResourceCreationPolicy.ALWAYS_RECREATE) { - if (finalizedStatus != ResourceStatus.NOT_EXIST) { - resource.delete(true); - } - resource.create(true); - } else if (policy == ResourceCreationPolicy.REUSE_EXISTING) { - switch (finalizedStatus) { - case AVAILABLE: - log.info(() -> "Found existing resource " + resource + " that could be reused..."); - return; - case EXIST_INCOMPATIBLE_RESOURCE: - resource.delete(true); - resource.create(true); - // fallthru - case NOT_EXIST: - resource.create(true); - break; - default: - break; - } - } - } - - public static void deleteResource(TestResource resource) throws InterruptedException { - ResourceStatus finalizedStatus = waitForFinalizedStatus(resource); - if (finalizedStatus != ResourceStatus.NOT_EXIST) { - resource.delete(false); - } - } - - public static ResourceStatus waitForFinalizedStatus(TestResource resource) throws InterruptedException { - return Waiter.run(resource::getResourceStatus) - .until(status -> status != ResourceStatus.TRANSIENT) - .orFail(); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/BasicTempTable.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/BasicTempTable.java deleted file mode 100644 index 979df80e14a2..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/BasicTempTable.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources.tables; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import utils.test.resources.DynamoDBTableResource; -import utils.test.util.DynamoDBTestBase; - -public class BasicTempTable extends DynamoDBTableResource { - - public static final String TEMP_TABLE_NAME = "java-sdk-" + System.currentTimeMillis(); - public static final String HASH_KEY_NAME = "hash"; - public static final Long READ_CAPACITY = 10L; - public static final Long WRITE_CAPACITY = 5L; - public static final ProvisionedThroughput DEFAULT_PROVISIONED_THROUGHPUT = - ProvisionedThroughput.builder().readCapacityUnits(READ_CAPACITY).writeCapacityUnits(WRITE_CAPACITY).build(); - - @Override - protected DynamoDbClient getClient() { - return DynamoDBTestBase.getClient(); - } - - @Override - protected CreateTableRequest getCreateTableRequest() { - CreateTableRequest request = CreateTableRequest.builder() - .tableName(TEMP_TABLE_NAME) - .keySchema( - KeySchemaElement.builder().attributeName(HASH_KEY_NAME) - .keyType(KeyType.HASH).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName( - HASH_KEY_NAME).attributeType( - ScalarAttributeType.S).build()) - .provisionedThroughput(DEFAULT_PROVISIONED_THROUGHPUT).build(); - return request; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/BasicTempTableWithLowThroughput.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/BasicTempTableWithLowThroughput.java deleted file mode 100644 index e5bfbe2ee97f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/BasicTempTableWithLowThroughput.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources.tables; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import utils.test.resources.DynamoDBTableResource; -import utils.test.util.DynamoDBTestBase; - -/** - * DynamoDB table used by {@link ProvisionedThroughputThrottlingIntegrationTest} - */ -public class BasicTempTableWithLowThroughput extends DynamoDBTableResource { - - public static final String TEMP_TABLE_NAME = "java-sdk-low-throughput-" + System.currentTimeMillis(); - public static final String HASH_KEY_NAME = "hash"; - public static final Long READ_CAPACITY = 1L; - public static final Long WRITE_CAPACITY = 1L; - public static final ProvisionedThroughput DEFAULT_PROVISIONED_THROUGHPUT = - ProvisionedThroughput.builder().readCapacityUnits(READ_CAPACITY).writeCapacityUnits(WRITE_CAPACITY).build(); - - @Override - protected DynamoDbClient getClient() { - return DynamoDBTestBase.getClient(); - } - - @Override - protected CreateTableRequest getCreateTableRequest() { - CreateTableRequest request = CreateTableRequest.builder() - .tableName(TEMP_TABLE_NAME) - .keySchema( - KeySchemaElement.builder().attributeName(HASH_KEY_NAME) - .keyType(KeyType.HASH).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName( - HASH_KEY_NAME).attributeType( - ScalarAttributeType.S).build()) - .provisionedThroughput(DEFAULT_PROVISIONED_THROUGHPUT) - .build(); - return request; - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TempTableWithBinaryKey.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TempTableWithBinaryKey.java deleted file mode 100644 index 1e1c38d58629..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TempTableWithBinaryKey.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources.tables; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import utils.test.resources.DynamoDBTableResource; -import utils.test.util.DynamoDBTestBase; - -public class TempTableWithBinaryKey extends DynamoDBTableResource { - - public static final String TEMP_BINARY_TABLE_NAME = "java-sdk-binary-" + System.currentTimeMillis(); - public static final String HASH_KEY_NAME = "hash"; - public static final Long READ_CAPACITY = 10L; - public static final Long WRITE_CAPACITY = 5L; - public static final ProvisionedThroughput DEFAULT_PROVISIONED_THROUGHPUT = - ProvisionedThroughput.builder().readCapacityUnits(READ_CAPACITY).writeCapacityUnits(WRITE_CAPACITY).build(); - - @Override - protected DynamoDbClient getClient() { - return DynamoDBTestBase.getClient(); - } - - @Override - protected CreateTableRequest getCreateTableRequest() { - CreateTableRequest request = CreateTableRequest.builder() - .tableName(TEMP_BINARY_TABLE_NAME) - .keySchema( - KeySchemaElement.builder().attributeName(HASH_KEY_NAME) - .keyType(KeyType.HASH).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName( - HASH_KEY_NAME).attributeType( - ScalarAttributeType.B).build()) - .provisionedThroughput(DEFAULT_PROVISIONED_THROUGHPUT) - .build(); - return request; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TempTableWithSecondaryIndexes.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TempTableWithSecondaryIndexes.java deleted file mode 100644 index 07d827c164db..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TempTableWithSecondaryIndexes.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources.tables; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.Projection; -import software.amazon.awssdk.services.dynamodb.model.ProjectionType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import utils.test.resources.DynamoDBTableResource; -import utils.test.util.DynamoDBTestBase; - -/** - * The table used by SecondaryIndexesIntegrationTest - */ -public class TempTableWithSecondaryIndexes extends DynamoDBTableResource { - - public static final String TEMP_TABLE_NAME = "java-sdk-indexes-" + System.currentTimeMillis(); - public static final String HASH_KEY_NAME = "hash_key"; - public static final String RANGE_KEY_NAME = "range_key"; - public static final String LSI_NAME = "local_secondary_index"; - public static final String LSI_RANGE_KEY_NAME = "local_secondary_index_attribute"; - public static final String GSI_NAME = "global_secondary_index"; - public static final String GSI_HASH_KEY_NAME = "global_secondary_index_hash_attribute"; - public static final String GSI_RANGE_KEY_NAME = "global_secondary_index_range_attribute"; - public static final ProvisionedThroughput GSI_PROVISIONED_THROUGHPUT = ProvisionedThroughput.builder() - .readCapacityUnits(5L) - .writeCapacityUnits(5L) - .build(); - - @Override - protected DynamoDbClient getClient() { - return DynamoDBTestBase.getClient(); - } - - /** - * Table schema: - * Hash Key : HASH_KEY_NAME (S) - * Range Key : RANGE_KEY_NAME (N) - * LSI schema: - * Hash Key : HASH_KEY_NAME (S) - * Range Key : LSI_RANGE_KEY_NAME (N) - * GSI schema: - * Hash Key : GSI_HASH_KEY_NAME (N) - * Range Key : GSI_RANGE_KEY_NAME (N) - */ - @Override - protected CreateTableRequest getCreateTableRequest() { - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TEMP_TABLE_NAME) - .keySchema( - KeySchemaElement.builder() - .attributeName(HASH_KEY_NAME) - .keyType(KeyType.HASH) - .build(), - KeySchemaElement.builder() - .attributeName(RANGE_KEY_NAME) - .keyType(KeyType.RANGE) - .build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName( - HASH_KEY_NAME).attributeType( - ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName( - RANGE_KEY_NAME).attributeType( - ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName( - LSI_RANGE_KEY_NAME).attributeType( - ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName( - GSI_HASH_KEY_NAME).attributeType( - ScalarAttributeType.S).build(), - AttributeDefinition.builder().attributeName( - GSI_RANGE_KEY_NAME).attributeType( - ScalarAttributeType.N).build()) - .provisionedThroughput(BasicTempTable.DEFAULT_PROVISIONED_THROUGHPUT) - .localSecondaryIndexes( - LocalSecondaryIndex.builder() - .indexName(LSI_NAME) - .keySchema( - KeySchemaElement.builder() - .attributeName( - HASH_KEY_NAME) - .keyType(KeyType.HASH).build(), - KeySchemaElement.builder() - .attributeName( - LSI_RANGE_KEY_NAME) - .keyType(KeyType.RANGE).build()) - .projection( - Projection.builder() - .projectionType(ProjectionType.KEYS_ONLY).build()).build()) - .globalSecondaryIndexes( - GlobalSecondaryIndex.builder().indexName(GSI_NAME) - .keySchema( - KeySchemaElement.builder() - .attributeName( - GSI_HASH_KEY_NAME) - .keyType(KeyType.HASH).build(), - KeySchemaElement.builder() - .attributeName( - GSI_RANGE_KEY_NAME) - .keyType(KeyType.RANGE).build()) - .projection( - Projection.builder() - .projectionType(ProjectionType.KEYS_ONLY).build()) - .provisionedThroughput( - GSI_PROVISIONED_THROUGHPUT).build()) - .build(); - return createTableRequest; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TestTableForParallelScan.java b/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TestTableForParallelScan.java deleted file mode 100644 index e35c885e225f..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/resources/tables/TestTableForParallelScan.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.resources.tables; - -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import utils.test.resources.DynamoDBTableResource; -import utils.test.util.DynamoDBTestBase; - -/** - * Test table for {@link ParallelScanIntegrationTest} - */ -public class TestTableForParallelScan extends DynamoDBTableResource { - - public static final String TABLE_NAME = "java-sdk-parallel-scan"; - public static final String HASH_KEY_NAME = "hash"; - public static final Long READ_CAPACITY = 10L; - public static final Long WRITE_CAPACITY = 5L; - public static final ProvisionedThroughput DEFAULT_PROVISIONED_THROUGHPUT = - ProvisionedThroughput.builder().readCapacityUnits(READ_CAPACITY).writeCapacityUnits(WRITE_CAPACITY).build(); - - @Override - protected DynamoDbClient getClient() { - return DynamoDBTestBase.getClient(); - } - - @Override - protected CreateTableRequest getCreateTableRequest() { - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TABLE_NAME) - .keySchema( - KeySchemaElement.builder().attributeName(HASH_KEY_NAME) - .keyType(KeyType.HASH).build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName( - HASH_KEY_NAME).attributeType( - ScalarAttributeType.N).build()) - .provisionedThroughput(DEFAULT_PROVISIONED_THROUGHPUT) - .build(); - return createTableRequest; - } - -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/test/resources/DynamoDBTableResource.java b/test/dynamodbmapper-v1/src/test/java/utils/test/resources/DynamoDBTableResource.java deleted file mode 100644 index 9a822568601a..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/test/resources/DynamoDBTableResource.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.test.resources; - -import java.util.List; - -import software.amazon.awssdk.awscore.exception.AwsServiceException; -import software.amazon.awssdk.core.exception.SdkServiceException; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.TableUtils; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndexDescription; -import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndexDescription; -import software.amazon.awssdk.services.dynamodb.model.Projection; -import software.amazon.awssdk.services.dynamodb.model.TableDescription; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; -import software.amazon.awssdk.testutils.UnorderedCollectionComparator; -import software.amazon.awssdk.utils.Logger; -import utils.resources.TestResource; -import utils.test.util.DynamoDBTestBase; - -public abstract class DynamoDBTableResource implements TestResource { - private static final Logger log = Logger.loggerFor(DynamoDBTableResource.class); - - /** - * Returns true if the two lists of GlobalSecondaryIndex and - * GlobalSecondaryIndexDescription share the same set of: - * 1) indexName - * 2) projection - * 3) keySchema (compared as unordered lists) - */ - static boolean equalUnorderedGsiLists(List listA, List listB) { - return UnorderedCollectionComparator.equalUnorderedCollections( - listA, listB, - new UnorderedCollectionComparator.CrossTypeComparator() { - @Override - public boolean equals(GlobalSecondaryIndex a, GlobalSecondaryIndexDescription b) { - return a.indexName().equals(b.indexName()) - && equalProjections(a.projection(), b.projection()) - && UnorderedCollectionComparator.equalUnorderedCollections(a.keySchema(), b.keySchema()); - } - }); - } - - /** - * Returns true if the two lists of LocalSecondaryIndex and - * LocalSecondaryIndexDescription share the same set of: - * 1) indexName - * 2) projection - * 3) keySchema (compared as unordered lists) - */ - static boolean equalUnorderedLsiLists(List listA, List listB) { - return UnorderedCollectionComparator.equalUnorderedCollections( - listA, listB, - new UnorderedCollectionComparator.CrossTypeComparator() { - @Override - public boolean equals(LocalSecondaryIndex a, LocalSecondaryIndexDescription b) { - // Project parameter might not be specified in the - // CreateTableRequest. But it should be treated as equal - // to the default projection type - KEYS_ONLY. - return a.indexName().equals(b.indexName()) - && equalProjections(a.projection(), b.projection()) - && UnorderedCollectionComparator.equalUnorderedCollections(a.keySchema(), b.keySchema()); - } - }); - } - - /** - * Compares the Projection parameter included in the CreateTableRequest, - * with the one returned from DescribeTableResponse. - */ - static boolean equalProjections(Projection fromCreateTableRequest, Projection fromDescribeTableResponse) { - if (fromCreateTableRequest == null || fromDescribeTableResponse == null) { - throw new IllegalStateException("The projection parameter should never be null."); - } - - return fromCreateTableRequest.projectionType().equals( - fromDescribeTableResponse.projectionType()) - && UnorderedCollectionComparator.equalUnorderedCollections( - fromCreateTableRequest.nonKeyAttributes(), - fromDescribeTableResponse.nonKeyAttributes()); - } - - protected abstract DynamoDbClient getClient(); - - protected abstract CreateTableRequest getCreateTableRequest(); - - /** - * Implementation of TestResource interfaces - */ - - @Override - public void create(boolean waitTillFinished) { - log.info(() -> "Creating " + this + "..."); - getClient().createTable(getCreateTableRequest()); - - if (waitTillFinished) { - log.info(() -> "Waiting for " + this + " to become active..."); - try { - TableUtils.waitUntilActive(getClient(), getCreateTableRequest().tableName()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } - - @Override - public void delete(boolean waitTillFinished) { - log.info(() -> "Deleting " + this + "..."); - getClient().deleteTable(DeleteTableRequest.builder().tableName(getCreateTableRequest().tableName()).build()); - - if (waitTillFinished) { - log.info(() -> "Waiting for " + this + " to become deleted..."); - DynamoDBTestBase.waitForTableToBecomeDeleted(getClient(), getCreateTableRequest().tableName()); - } - } - - @Override - public ResourceStatus getResourceStatus() { - CreateTableRequest createRequest = getCreateTableRequest(); - TableDescription table = null; - try { - table = getClient().describeTable(DescribeTableRequest.builder().tableName( - createRequest.tableName()).build()).table(); - } catch (AwsServiceException exception) { - if (exception.awsErrorDetails().errorCode().equalsIgnoreCase("ResourceNotFoundException")) { - return ResourceStatus.NOT_EXIST; - } - } - - TableStatus tableStatus = table.tableStatus(); - - if (tableStatus == TableStatus.ACTIVE) { - // returns AVAILABLE only if table KeySchema + LSIs + GSIs all match. - if (UnorderedCollectionComparator.equalUnorderedCollections(createRequest.keySchema(), table.keySchema()) - && equalUnorderedGsiLists(createRequest.globalSecondaryIndexes(), table.globalSecondaryIndexes()) - && equalUnorderedLsiLists(createRequest.localSecondaryIndexes(), table.localSecondaryIndexes())) { - return ResourceStatus.AVAILABLE; - } else { - return ResourceStatus.EXIST_INCOMPATIBLE_RESOURCE; - } - } else if (tableStatus == TableStatus.CREATING - || tableStatus == TableStatus.UPDATING - || tableStatus == TableStatus.DELETING) { - return ResourceStatus.TRANSIENT; - } else { - return ResourceStatus.NOT_EXIST; - } - } - - /** - * Object interfaces - */ - @Override - public String toString() { - return "DynamoDB Table [" + getCreateTableRequest().tableName() + "]"; - } - - @Override - public int hashCode() { - return getCreateTableRequest().hashCode(); - } - - @Override - public boolean equals(Object other) { - if (!(other instanceof DynamoDBTableResource)) { - return false; - } - return getCreateTableRequest().equals( - ((DynamoDBTableResource) other).getCreateTableRequest()); - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/test/util/DynamoDBIntegrationTestBase.java b/test/dynamodbmapper-v1/src/test/java/utils/test/util/DynamoDBIntegrationTestBase.java deleted file mode 100644 index 2ff2029118b2..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/test/util/DynamoDBIntegrationTestBase.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.test.util; - -import org.junit.BeforeClass; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.TableUtils; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ListTablesRequest; -import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse; -import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex; -import software.amazon.awssdk.services.dynamodb.model.Projection; -import software.amazon.awssdk.services.dynamodb.model.ProjectionType; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; - -public class DynamoDBIntegrationTestBase extends DynamoDBTestBase { - - protected static final String KEY_NAME = "key"; - protected static final String TABLE_NAME = "aws-java-sdk-util"; - protected static final String TABLE_WITH_RANGE_ATTRIBUTE = "aws-java-sdk-range-test"; - protected static final String TABLE_WITH_INDEX_RANGE_ATTRIBUTE = "aws-java-sdk-index-range-test"; - protected static long startKey = System.currentTimeMillis(); - - @BeforeClass - public static void setUp() throws Exception { - setUpCredentials(); - dynamo = DynamoDbClient.builder().region(Region.US_EAST_1).credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - - // Create a table - String keyName = KEY_NAME; - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TABLE_NAME) - .keySchema(KeySchemaElement.builder() - .attributeName(keyName) - .keyType(KeyType.HASH) - .build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName(keyName) - .attributeType(ScalarAttributeType.S) - .build()) - .provisionedThroughput(ProvisionedThroughput.builder() - .readCapacityUnits(10L) - .writeCapacityUnits(5L).build()) - .build(); - - if (TableUtils.createTableIfNotExists(dynamo, createTableRequest)) { - TableUtils.waitUntilActive(dynamo, TABLE_NAME); - } - } - - /** - * Quick utility method to delete all tables when we have too much capacity - * reserved for the region. - */ - public static void deleteAllTables() { - ListTablesResponse listTables = dynamo.listTables(ListTablesRequest.builder().build()); - for (String name : listTables.tableNames()) { - dynamo.deleteTable(DeleteTableRequest.builder().tableName(name).build()); - } - } - - protected static void setUpTableWithRangeAttribute() throws Exception { - setUp(); - - String keyName = DynamoDBIntegrationTestBase.KEY_NAME; - String rangeKeyAttributeName = "rangeKey"; - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TABLE_WITH_RANGE_ATTRIBUTE) - .keySchema( - KeySchemaElement.builder() - .attributeName(keyName) - .keyType(KeyType.HASH) - .build(), - KeySchemaElement.builder() - .attributeName(rangeKeyAttributeName) - .keyType(KeyType.RANGE) - .build()) - .attributeDefinitions( - AttributeDefinition.builder() - .attributeName(keyName) - .attributeType(ScalarAttributeType.N) - .build(), - AttributeDefinition.builder() - .attributeName(rangeKeyAttributeName) - .attributeType(ScalarAttributeType.N) - .build()) - .provisionedThroughput(ProvisionedThroughput.builder() - .readCapacityUnits(10L) - .writeCapacityUnits(5L).build()) - .build(); - - if (TableUtils.createTableIfNotExists(dynamo, createTableRequest)) { - TableUtils.waitUntilActive(dynamo, TABLE_WITH_RANGE_ATTRIBUTE); - } - } - - protected static void setUpTableWithIndexRangeAttribute(boolean recreateTable) throws Exception { - setUp(); - if (recreateTable) { - dynamo.deleteTable(DeleteTableRequest.builder().tableName(TABLE_WITH_INDEX_RANGE_ATTRIBUTE).build()); - waitForTableToBecomeDeleted(TABLE_WITH_INDEX_RANGE_ATTRIBUTE); - } - - String keyName = DynamoDBIntegrationTestBase.KEY_NAME; - String rangeKeyAttributeName = "rangeKey"; - String indexFooRangeKeyAttributeName = "indexFooRangeKey"; - String indexBarRangeKeyAttributeName = "indexBarRangeKey"; - String multipleIndexRangeKeyAttributeName = "multipleIndexRangeKey"; - String fooAttributeName = "fooAttribute"; - String barAttributeName = "barAttribute"; - String indexFooName = "index_foo"; - String indexBarName = "index_bar"; - String indexFooCopyName = "index_foo_copy"; - String indexBarCopyName = "index_bar_copy"; - - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TABLE_WITH_INDEX_RANGE_ATTRIBUTE) - .keySchema( - KeySchemaElement.builder() - .attributeName(keyName) - .keyType(KeyType.HASH) - .build(), - KeySchemaElement.builder() - .attributeName(rangeKeyAttributeName) - .keyType(KeyType.RANGE) - .build()) - .localSecondaryIndexes( - LocalSecondaryIndex.builder() - .indexName(indexFooName) - .keySchema( - KeySchemaElement.builder() - .attributeName(keyName) - .keyType(KeyType.HASH) - .build(), - KeySchemaElement.builder() - .attributeName(indexFooRangeKeyAttributeName) - .keyType(KeyType.RANGE) - .build()) - .projection(Projection.builder() - .projectionType(ProjectionType.INCLUDE) - .nonKeyAttributes(fooAttributeName) - .build()) - .build(), - LocalSecondaryIndex.builder() - .indexName(indexBarName) - .keySchema( - KeySchemaElement.builder() - .attributeName(keyName) - .keyType(KeyType.HASH) - .build(), - KeySchemaElement.builder() - .attributeName(indexBarRangeKeyAttributeName) - .keyType(KeyType.RANGE) - .build()) - .projection(Projection.builder() - .projectionType(ProjectionType.INCLUDE) - .nonKeyAttributes(barAttributeName) - .build()) - .build(), - LocalSecondaryIndex.builder() - .indexName(indexFooCopyName) - .keySchema( - KeySchemaElement.builder() - .attributeName(keyName) - .keyType(KeyType.HASH) - .build(), - KeySchemaElement.builder() - .attributeName(multipleIndexRangeKeyAttributeName) - .keyType(KeyType.RANGE) - .build()) - .projection(Projection.builder() - .projectionType(ProjectionType.INCLUDE) - .nonKeyAttributes(fooAttributeName) - .build()) - .build(), - LocalSecondaryIndex.builder() - .indexName(indexBarCopyName) - .keySchema( - KeySchemaElement.builder() - .attributeName(keyName) - .keyType(KeyType.HASH) - .build(), - KeySchemaElement.builder() - .attributeName(multipleIndexRangeKeyAttributeName) - .keyType(KeyType.RANGE) - .build()) - .projection(Projection.builder() - .projectionType(ProjectionType.INCLUDE) - .nonKeyAttributes(barAttributeName) - .build()) - .build()) - .attributeDefinitions( - AttributeDefinition.builder().attributeName(keyName).attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName(rangeKeyAttributeName) - .attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName(indexFooRangeKeyAttributeName) - .attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName(indexBarRangeKeyAttributeName) - .attributeType(ScalarAttributeType.N).build(), - AttributeDefinition.builder().attributeName(multipleIndexRangeKeyAttributeName) - .attributeType(ScalarAttributeType.N).build()) - .provisionedThroughput(ProvisionedThroughput.builder() - .readCapacityUnits(10L) - .writeCapacityUnits(5L).build()) - .build(); - - if (TableUtils.createTableIfNotExists(dynamo, createTableRequest)) { - TableUtils.waitUntilActive(dynamo, TABLE_WITH_INDEX_RANGE_ATTRIBUTE); - } - } -} diff --git a/test/dynamodbmapper-v1/src/test/java/utils/test/util/DynamoDBTestBase.java b/test/dynamodbmapper-v1/src/test/java/utils/test/util/DynamoDBTestBase.java deleted file mode 100644 index 5dd0d283a7bd..000000000000 --- a/test/dynamodbmapper-v1/src/test/java/utils/test/util/DynamoDBTestBase.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.test.util; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.math.BigDecimal; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.testutils.Waiter; -import software.amazon.awssdk.testutils.service.AwsTestBase; -import software.amazon.awssdk.utils.Logger; - -public class DynamoDBTestBase extends AwsTestBase { - protected static final String ENDPOINT = "http://dynamodb.us-east-1.amazonaws.com/"; - - protected static DynamoDbClient dynamo; - - private static final Logger log = Logger.loggerFor(DynamoDBTestBase.class); - - public static void setUpTestBase() { - try { - setUpCredentials(); - } catch (Exception e) { - throw SdkClientException.builder().message("Unable to load credential property file.").cause(e).build(); - } - - dynamo = DynamoDbClient.builder().region(Region.US_EAST_1).credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - } - - public static DynamoDbClient getClient() { - if (dynamo == null) { - setUpTestBase(); - } - return dynamo; - } - - protected static void waitForTableToBecomeDeleted(String tableName) { - waitForTableToBecomeDeleted(dynamo, tableName); - } - - public static void waitForTableToBecomeDeleted(DynamoDbClient dynamo, String tableName) { - log.info(() -> "Waiting for " + tableName + " to become Deleted..."); - - Waiter.run(() -> dynamo.describeTable(r -> r.tableName(tableName))) - .untilException(ResourceNotFoundException.class) - .orFail(); - } - - protected static void assertSetsEqual(Collection expected, Collection given) { - Set givenCopy = new HashSet(given); - for (T e : expected) { - if (!givenCopy.remove(e)) { - fail("Expected element not found: " + e); - } - } - - assertTrue("Unexpected elements found: " + givenCopy, givenCopy.isEmpty()); - } - - /** - * Only valid for whole numbers - */ - protected static void assertNumericSetsEquals(Set expected, Collection given) { - Set givenCopy = new HashSet(); - for (String s : given) { - BigDecimal bd = new BigDecimal(s); - givenCopy.add(bd.setScale(0)); - } - - Set expectedCopy = new HashSet(); - for (Number n : expected) { - BigDecimal bd = new BigDecimal(n.toString()); - expectedCopy.add(bd.setScale(0)); - } - - assertSetsEqual(expectedCopy, givenCopy); - } - - protected static Set toSet(T... array) { - Set set = new HashSet(); - for (T t : array) { - set.add(t); - } - return set; - } - - protected static Set toSet(Collection collection) { - Set set = new HashSet(); - for (T t : collection) { - set.add(t); - } - return set; - } - - protected static byte[] generateByteArray(int length) { - byte[] bytes = new byte[length]; - for (int i = 0; i < length; i++) { - bytes[i] = (byte) (i % Byte.MAX_VALUE); - } - return bytes; - } - - /** - * Gets a map of key values for the single hash key attribute value given. - */ - protected Map mapKey(String attributeName, AttributeValue value) { - HashMap map = new HashMap(); - map.put(attributeName, value); - return map; - } - -} diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 6dc9aa314178..aeee44ca9aa5 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -1,6 +1,6 @@ - netty-nio-client - software.amazon.awssdk - - software.amazon.awssdk protocol-tests ${awsjavasdk.version} - - - - netty-nio-client - software.amazon.awssdk - - software.amazon.awssdk apache-client ${awsjavasdk.version} + + software.amazon.awssdk + netty-nio-client + ${awsjavasdk.version} + software.amazon.awssdk url-connection-client diff --git a/test/module-path-tests/src/main/java/module-info.java b/test/module-path-tests/src/main/java/module-info.java index 6e01ede4e7e7..9b2d7c36c49d 100644 --- a/test/module-path-tests/src/main/java/module-info.java +++ b/test/module-path-tests/src/main/java/module-info.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ requires software.amazon.awssdk.regions; requires software.amazon.awssdk.http.urlconnection; requires software.amazon.awssdk.http.apache; + requires software.amazon.awssdk.http.nio.netty; requires software.amazon.awssdk.http; requires software.amazon.awssdk.core; requires software.amazon.awssdk.awscore; @@ -27,9 +28,6 @@ requires software.amazon.awssdk.utils; requires software.amazon.awssdk.testutils.service; - // This is fine because those are just used in unit test. - // https://jira.qos.ch/browse/SLF4J-420 - requires slf4j.api; + requires org.slf4j; requires slf4j.simple; - } diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/IntegTestsRunner.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/IntegTestsRunner.java index 07a5d381b5aa..5716f26d525a 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/IntegTestsRunner.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/IntegTestsRunner.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/MockTestsRunner.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/MockTestsRunner.java index d7d3ed04bfa5..5dcb2799b76f 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/MockTestsRunner.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/MockTestsRunner.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/integtests/BaseApiCall.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/integtests/BaseApiCall.java index d06303550ca4..8329874379f6 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/integtests/BaseApiCall.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/integtests/BaseApiCall.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/integtests/S3ApiCall.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/integtests/S3ApiCall.java index 45fc5d72cd4a..0278ba0dd059 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/integtests/S3ApiCall.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/integtests/S3ApiCall.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -15,15 +15,14 @@ package software.amazon.awssdk.modulepath.tests.integtests; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; public class S3ApiCall extends BaseApiCall { - private static final Logger logger = LoggerFactory.getLogger(S3ApiCall.class); private S3Client s3Client = S3Client.builder() .region(Region.US_WEST_2) @@ -36,6 +35,11 @@ public class S3ApiCall extends BaseApiCall { .httpClient(UrlConnectionHttpClient.builder().build()) .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); + private S3AsyncClient s3ClientWithNettyClient = S3AsyncClient.builder() + .region(Region.US_WEST_2) + .httpClient(NettyNioAsyncHttpClient.builder().build()) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); + public S3ApiCall() { super("s3"); } @@ -50,9 +54,8 @@ public Runnable urlHttpConnectionClientRunnable() { return () -> s3ClientWithHttpUrlConnection.listBuckets(); } - //TODO: testing netty client once it's fixed @Override public Runnable nettyClientRunnable() { - return () -> logger.info("Skipping testing s3 client with netty client"); + return () -> s3ClientWithNettyClient.listBuckets().join(); } } diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/BaseMockApiCall.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/BaseMockApiCall.java index 6593707d0275..51f42446f4df 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/BaseMockApiCall.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/BaseMockApiCall.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/JsonProtocolApiCall.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/JsonProtocolApiCall.java index bafade1a561a..30aa945fc82e 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/JsonProtocolApiCall.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/JsonProtocolApiCall.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/MockAyncHttpClient.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/MockAyncHttpClient.java index 021e9253e106..6634bda0c0f1 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/MockAyncHttpClient.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/MockAyncHttpClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/MockHttpClient.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/MockHttpClient.java index e1aa9f76fb1f..3cbc7582c884 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/MockHttpClient.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/MockHttpClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/XmlProtocolApiCall.java b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/XmlProtocolApiCall.java index cb2ff9e7d6e9..11864c6e0abf 100644 --- a/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/XmlProtocolApiCall.java +++ b/test/module-path-tests/src/main/java/software/amazon/awssdk/modulepath/tests/mocktests/XmlProtocolApiCall.java @@ -1,5 +1,5 @@ /* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 46e5f0bfca68..b8ad70898187 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -1,6 +1,6 @@